"""Composite volatility observer for Sigil micro-scanner v3 decision data.

Non-trading observer that computes a composite market-quality score for each
evaluated symbol during a scan.  The score is logged to ``micro_scan_rollup``
and written to ``trade_audit_log`` (when ``audit_log_rejections=True``) so that
after one week of data we can compare Camp A (6-gate) vs Camp B (composite)
pass rates and decide whether v3 should swap entry models.

Design ref: final-design.md §2.2.  No trading decisions are made here; this
function has zero side-effects and imports nothing outside the Python stdlib.
"""

from __future__ import annotations

import time
from collections import deque

from src.micro_scanner.indicators import calculate_linear_regression_slope

__all__ = [
    "compute_composite_score",
    "record_depth_ratio",
    "get_depth_slope",
]

# ---------------------------------------------------------------------------
# Depth-ratio rolling history
# ---------------------------------------------------------------------------
#
# Per-symbol rolling buffer of (timestamp, bid_depth / ask_depth) tuples.
# Populated externally (e.g. from ``_maybe_evaluate_entry`` in main.py when
# depth is fetched) and consumed by ``gate_depth_slope`` in signal_engine.
# ``deque(maxlen=30)`` bounds memory and makes the latest 30 samples the
# natural rolling window.

_DEPTH_HISTORY_MAXLEN: int = 30
_depth_ratio_history: dict[str, deque] = {}

# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------


def _clamp(value: float, lo: float, hi: float) -> float:
    """Return *value* clamped to [lo, hi]."""
    return max(lo, min(hi, value))


# ---------------------------------------------------------------------------
# Public API
# ---------------------------------------------------------------------------


def compute_composite_score(
    candles: list[list[float]],
    ticker: dict,
    spread_pct: float,
) -> dict:
    """Compute a composite market-quality score from candles and a ccxt ticker.

    Parameters
    ----------
    candles:
        OHLCV 1-minute candles as ``[timestamp, open, high, low, close, volume]``
        rows, **most-recent last**.  At least 10 rows are required for a valid
        score; at least 20 rows are needed for a meaningful ``volume_conviction``
        (falls back gracefully with fewer).
    ticker:
        ccxt-style ticker dict.  Keys consumed: ``high`` (24h high), ``low``
        (24h low).  The ``close`` is taken from the last candle rather than
        ``ticker['last']`` so the sub-scores are internally consistent.
    spread_pct:
        Already-computed spread percentage (e.g. 0.15 means 0.15 %).

    Returns
    -------
    dict with keys:
        ``composite_score`` – float in [0, 1]
        ``trend_strength``  – float in [0, 1]
        ``volume_conviction`` – float in [0, 1]
        ``spread_health``   – float in [0, 1]
        ``momentum_position`` – float in [0, 1]
        ``insufficient_data`` – bool; True when candles < 10 or 24h range = 0
    """
    _empty = {
        "composite_score": 0.0,
        "trend_strength": 0.0,
        "volume_conviction": 0.0,
        "spread_health": 0.0,
        "momentum_position": 0.0,
        "insufficient_data": True,
    }

    # ------------------------------------------------------------------ guard
    if len(candles) < 10:
        return _empty

    # ------------------------------------------------------------------ §2.2 sub-scores

    # 1. trend_strength — % of last 10 candles that are green (close > open)
    last_10 = candles[-10:]
    green_count = sum(1 for c in last_10 if c[4] >= c[1])  # close >= open
    trend_strength = green_count / 10.0  # already in [0, 1]

    # 2. volume_conviction — clamp(last_bar_vol / 20-bar-mean, 0, 3) / 3
    last_vol = candles[-1][5]
    if len(candles) >= 20:
        vol_window = candles[-20:]
    else:
        vol_window = candles  # use whatever we have (>=10 at this point)

    mean_vol = sum(c[5] for c in vol_window) / len(vol_window)
    if mean_vol > 0:
        raw_ratio = last_vol / mean_vol
    else:
        raw_ratio = 0.0
    volume_conviction = _clamp(raw_ratio, 0.0, 3.0) / 3.0

    # 3. spread_health — clamp(1 - spread_pct / 2%, 0, 1)
    spread_health = _clamp(1.0 - spread_pct / 2.0, 0.0, 1.0)

    # 4. momentum_position — clamp((close - low_24h) / (high_24h - low_24h), 0, 1)
    #    Flat-market guard: if range == 0, return 0.5 (neutral) per spec.
    try:
        high_24h = float(ticker.get("high") or 0.0)
        low_24h = float(ticker.get("low") or 0.0)
    except (TypeError, ValueError):
        high_24h = 0.0
        low_24h = 0.0

    range_24h = high_24h - low_24h
    if range_24h == 0.0 or high_24h == low_24h:
        # Flat market or missing data — return neutral, no division
        momentum_position = 0.5
        # Flag as insufficient if 24h range is genuinely zero (design §2.2)
        return {
            "composite_score": 0.0,
            "trend_strength": trend_strength,
            "volume_conviction": volume_conviction,
            "spread_health": spread_health,
            "momentum_position": 0.5,
            "insufficient_data": True,
        }

    close = candles[-1][4]
    momentum_position = _clamp((close - low_24h) / range_24h, 0.0, 1.0)

    # ------------------------------------------------------------------ composite
    composite_score = (
        0.30 * trend_strength
        + 0.30 * volume_conviction
        + 0.20 * spread_health
        + 0.20 * momentum_position
    )
    # Paranoid clamp — floating-point should stay in [0,1] but guard anyway
    composite_score = _clamp(composite_score, 0.0, 1.0)

    return {
        "composite_score": composite_score,
        "trend_strength": trend_strength,
        "volume_conviction": volume_conviction,
        "spread_health": spread_health,
        "momentum_position": momentum_position,
        "insufficient_data": False,
    }


# ---------------------------------------------------------------------------
# Depth-ratio history API
# ---------------------------------------------------------------------------


def record_depth_ratio(
    symbol: str,
    bid_depth: float,
    ask_depth: float,
) -> None:
    """Append the current (timestamp, bid_depth/ask_depth) sample for *symbol*.

    Silently ignored when ``ask_depth <= 0`` — an invalid sample cannot be
    computed and must not poison the rolling slope. Timestamps use
    ``time.time()`` (UNIX seconds) so window queries can trim by wall-clock
    minutes.
    """
    if ask_depth <= 0.0:
        return
    ratio = bid_depth / ask_depth
    buf = _depth_ratio_history.get(symbol)
    if buf is None:
        buf = deque(maxlen=_DEPTH_HISTORY_MAXLEN)
        _depth_ratio_history[symbol] = buf
    buf.append((time.time(), ratio))


def get_depth_slope(
    symbol: str,
    window_minutes: int = 30,
) -> float | None:
    """Return the linear-regression slope of depth ratios over the last
    *window_minutes* for *symbol*, or None when insufficient data.

    Reads from the rolling buffer populated by ``record_depth_ratio``. The
    slope is computed over the ratio values only (x = 0..n-1, per-sample
    step). Requires at least 2 samples inside the window.
    """
    buf = _depth_ratio_history.get(symbol)
    if buf is None or len(buf) < 2:
        return None

    cutoff = time.time() - window_minutes * 60
    ratios = [r for (ts, r) in buf if ts >= cutoff]
    if len(ratios) < 2:
        return None

    # calculate_linear_regression_slope requires len(values) >= window.
    return calculate_linear_regression_slope(ratios, window=len(ratios))
