"""Shared admin router + helpers (session, serializers, validators).

Every `admin/<domain>.py` module imports `router` from here and registers its
endpoints on it. The aggregated router is re-exported by `admin/__init__.py`.
"""
from __future__ import annotations

import logging
from pathlib import Path
from typing import Any

from fastapi import APIRouter, HTTPException
from sqlalchemy import select
from sqlalchemy.orm import Session

from govcrawler.db import get_sessionmaker
from govcrawler.models import (
    CrawlJob,
    CrawlSite,
    CrawlTarget,
    LocalDepartment,
    MasterColumnRegistry,
    SiteDepartment,
)
from govcrawler.repositories import targets as targets_repo
from govcrawler.timeutil import iso_cn

log = logging.getLogger(__name__)

router = APIRouter(prefix="/admin", tags=["admin"])

STATIC_DIR = Path(__file__).parent.parent / "static"
DASHBOARD_HTML = STATIC_DIR / "admin.html"


def _session() -> Session:
    SM = get_sessionmaker()
    with SM() as s:
        yield s


def _normalize_str(value: Any) -> str | None:
    if value is None:
        return None
    if not isinstance(value, str):
        raise HTTPException(400, "field must be string or null")
    value = value.strip()
    return value or None


def _normalize_bool(value: Any, *, default: bool | None = None) -> bool | None:
    if value is None:
        return default
    if isinstance(value, bool):
        return value
    raise HTTPException(400, "field must be boolean or null")


def _normalize_int(
    value: Any, *,
    field: str,
    minimum: int | None = None,
    default: int | None = None,
) -> int | None:
    if value is None:
        return default
    if not isinstance(value, int):
        raise HTTPException(400, f"{field} must be integer or null")
    if minimum is not None and value < minimum:
        raise HTTPException(400, f"{field} must be >= {minimum}")
    return value


def _job_total(result_json: dict[str, Any] | None) -> int | None:
    if not isinstance(result_json, dict):
        return None
    total = result_json.get("items_seen")
    if isinstance(total, int):
        return total
    parts = [
        result_json.get("items_new"),
        result_json.get("items_skipped"),
        result_json.get("items_failed"),
    ]
    if all(isinstance(v, int) for v in parts):
        return int(sum(parts))
    return None


def _serialize_crawl_job_summary(row: CrawlJob | None) -> dict[str, Any] | None:
    if row is None:
        return None
    result = row.result_json if isinstance(row.result_json, dict) else {}
    return {
        "job_id": row.job_id,
        "source": row.source,
        "status": row.status,
        "force": bool(row.force),
        "enqueued_at": iso_cn(row.enqueued_at),
        "started_at": iso_cn(row.started_at),
        "finished_at": iso_cn(row.finished_at),
        "items_seen": _job_total(result),
        "items_new": result.get("items_new"),
        "items_skipped": result.get("items_skipped"),
        "items_failed": result.get("items_failed"),
        "result_status": result.get("status"),
        "current_page": row.current_page or 0,
        "last_completed_page": row.last_completed_page or 0,
        "error_msg": row.error_msg,
    }


def _serialize_target(
    t: CrawlTarget,
    *,
    run_stats: dict[str, Any] | None = None,
) -> dict[str, Any]:
    run_stats = run_stats or {}
    return {
        "target_code": t.target_code,
        "target_name": t.target_name,
        "entry_url": t.entry_url,
        "sample_article_url": t.sample_article_url,
        "dept_id": t.dept_id,
        "site_department_id": t.site_department_id,
        "channel_name": t.channel_name,
        "channel_path": t.channel_path,
        "content_category": t.content_category,
        "content_subcategory": t.content_subcategory,
        "expected_cadence_days": t.expected_cadence_days,
        "interval_sec": t.interval_sec,
        "interval_jitter_sec": t.interval_jitter_sec,
        "track_checkpoint": bool(t.track_checkpoint),
        "enabled": t.enabled,
        "last_crawled_at": iso_cn(t.last_crawled_at),
        "last_article_time": iso_cn(t.last_article_time),
        "created_at": iso_cn(t.created_at),
        "last_crawl_job": _serialize_crawl_job_summary(run_stats.get("last_crawl_job")),
        "last_full_crawl_job": _serialize_crawl_job_summary(run_stats.get("last_full_crawl_job")),
        "has_completed_full_crawl": bool(run_stats.get("last_full_crawl_job")),
    }


def _serialize_site(
    site: CrawlSite,
    targets: list[CrawlTarget],
    *,
    target_run_stats: dict[str, dict[str, Any]] | None = None,
) -> dict[str, Any]:
    target_run_stats = target_run_stats or {}
    return {
        "site_code": site.site_code,
        "site_name": site.site_name,
        "base_url": site.base_url,
        "site_role": site.site_role,
        "cms_adapter": site.cms_adapter,
        "adapter_params_json": site.adapter_params_json,
        "default_fetch_strategy": site.default_fetch_strategy,
        "strategy_override_reason": site.strategy_override_reason,
        "respect_robots": site.respect_robots,
        "yaml_path": site.yaml_path,
        "managed_by": getattr(site, "managed_by", "yaml"),
        "enabled": site.enabled,
        "remark": site.remark,
        "crawl_window_start": site.crawl_window_start.strftime("%H:%M") if site.crawl_window_start else None,
        "crawl_window_end":   site.crawl_window_end.strftime("%H:%M")   if site.crawl_window_end   else None,
        "daily_max_requests": site.daily_max_requests,
        "weekend_enabled":    site.weekend_enabled,
        "backoff_min_sec":    site.backoff_min_sec,
        "backoff_max_sec":    site.backoff_max_sec,
        "schedule_cron":      site.schedule_cron,
        "targets": [
            _serialize_target(
                t,
                run_stats=target_run_stats.get(t.target_code),
            )
            for t in targets
        ],
    }


def _normalize_hhmm(val: Any, *, field: str) -> "time | None":
    """Accept "HH:MM" / "HH:MM:SS" / null and return a datetime.time."""
    from datetime import time as _time
    if val is None or val == "":
        return None
    if not isinstance(val, str):
        raise HTTPException(400, f"{field} must be HH:MM string or null")
    parts = val.strip().split(":")
    try:
        hh = int(parts[0]); mm = int(parts[1]) if len(parts) > 1 else 0
        ss = int(parts[2]) if len(parts) > 2 else 0
        return _time(hour=hh, minute=mm, second=ss)
    except (ValueError, IndexError):
        raise HTTPException(400, f"{field} must be HH:MM string")


def _validate_site_payload(payload: dict[str, Any], *, partial: bool) -> dict[str, Any]:
    allowed = {
        "site_code",
        "site_name",
        "base_url",
        "site_role",
        "cms_adapter",
        "adapter_params_json",
        "default_fetch_strategy",
        "strategy_override_reason",
        "respect_robots",
        "yaml_path",
        "enabled",
        "remark",
        "crawl_window_start", "crawl_window_end",
        "daily_max_requests", "weekend_enabled",
        "backoff_min_sec", "backoff_max_sec",
        "schedule_cron",
    }
    unknown = set(payload) - allowed
    if unknown:
        raise HTTPException(400, f"unknown keys: {sorted(unknown)}")

    out: dict[str, Any] = {}
    for key in [
        "site_code",
        "site_name",
        "base_url",
        "site_role",
        "cms_adapter",
        "default_fetch_strategy",
        "strategy_override_reason",
        "yaml_path",
        "remark",
    ]:
        if key in payload:
            out[key] = _normalize_str(payload.get(key))
    if "adapter_params_json" in payload:
        val = payload.get("adapter_params_json")
        if val is not None and not isinstance(val, dict):
            raise HTTPException(400, "adapter_params_json must be object or null")
        out["adapter_params_json"] = val
    if "respect_robots" in payload:
        out["respect_robots"] = _normalize_bool(payload.get("respect_robots"))
    if "enabled" in payload:
        out["enabled"] = _normalize_bool(payload.get("enabled"))
    if "weekend_enabled" in payload:
        out["weekend_enabled"] = _normalize_bool(payload.get("weekend_enabled"))
    if "crawl_window_start" in payload:
        out["crawl_window_start"] = _normalize_hhmm(payload.get("crawl_window_start"), field="crawl_window_start")
    if "crawl_window_end" in payload:
        out["crawl_window_end"] = _normalize_hhmm(payload.get("crawl_window_end"), field="crawl_window_end")
    for ikey in ("daily_max_requests", "backoff_min_sec", "backoff_max_sec"):
        if ikey in payload:
            out[ikey] = _normalize_int(payload.get(ikey), field=ikey, minimum=0)
    if "schedule_cron" in payload:
        # Light validation: 5 whitespace-separated fields. Don't try to
        # parse the cron — apscheduler will reject malformed values when
        # the scheduler reloads, and over-strict validation here would
        # block edge cases like '*/30 9-17 * * 1-5'.
        v = _normalize_str(payload.get("schedule_cron"))
        if v is not None and len(v.split()) != 5:
            raise HTTPException(400, "schedule_cron must be a 5-field cron expression or null")
        out["schedule_cron"] = v

    if not partial:
        if not out.get("site_code"):
            raise HTTPException(400, "site_code is required")
        if not out.get("base_url"):
            raise HTTPException(400, "base_url is required")

    if "cms_adapter" in out or "yaml_path" in out:
        cms_adapter = out.get("cms_adapter")
        yaml_path = out.get("yaml_path")
        if (cms_adapter is None) == (yaml_path is None):
            raise HTTPException(400, "exactly one of cms_adapter / yaml_path must be set")
    return out


def _validate_target_payload(payload: dict[str, Any], *, partial: bool) -> dict[str, Any]:
    allowed = {
        "site_code",
        "target_code",
        "target_name",
        "entry_url",
        "sample_article_url",
        "dept_id",
        "site_department_id",
        "channel_name",
        "channel_path",
        "content_category",
        "content_subcategory",
        "expected_cadence_days",
        "interval_sec",
        "interval_jitter_sec",
        "track_checkpoint",
        "enabled",
    }
    unknown = set(payload) - allowed
    if unknown:
        raise HTTPException(400, f"unknown keys: {sorted(unknown)}")

    out: dict[str, Any] = {}
    for key in [
        "site_code",
        "target_code",
        "target_name",
        "entry_url",
        "sample_article_url",
        "channel_name",
        "channel_path",
        "content_category",
        "content_subcategory",
    ]:
        if key in payload:
            out[key] = _normalize_str(payload.get(key))
    if "dept_id" in payload:
        out["dept_id"] = _normalize_int(payload.get("dept_id"), field="dept_id", minimum=1)
    if "site_department_id" in payload:
        out["site_department_id"] = _normalize_int(
            payload.get("site_department_id"), field="site_department_id", minimum=1
        )
    if "expected_cadence_days" in payload:
        out["expected_cadence_days"] = _normalize_int(
            payload.get("expected_cadence_days"), field="expected_cadence_days", minimum=1
        )
    if "interval_sec" in payload:
        out["interval_sec"] = _normalize_int(payload.get("interval_sec"), field="interval_sec", minimum=0)
    if "interval_jitter_sec" in payload:
        out["interval_jitter_sec"] = _normalize_int(
            payload.get("interval_jitter_sec"), field="interval_jitter_sec", minimum=0
        )
    if "track_checkpoint" in payload:
        out["track_checkpoint"] = _normalize_bool(payload.get("track_checkpoint"))
    if "enabled" in payload:
        out["enabled"] = _normalize_bool(payload.get("enabled"))

    if not partial:
        for field in ("site_code", "target_code", "entry_url"):
            if not out.get(field):
                raise HTTPException(400, f"{field} is required")
    return out


def _infer_legacy_validator_key(target_code: str) -> tuple[str, str] | None:
    parts = target_code.split("__")
    if len(parts) >= 2:
        return parts[0], parts[-1]
    return None


def _get_registry_row(s: Session, registry_id: int) -> tuple[MasterColumnRegistry, CrawlSite]:
    row = s.execute(
        select(MasterColumnRegistry, CrawlSite)
        .join(CrawlSite, CrawlSite.id == MasterColumnRegistry.site_id)
        .where(MasterColumnRegistry.id == registry_id)
    ).first()
    if row is None:
        raise HTTPException(404, f"column registry row not found: {registry_id}")
    return row[0], row[1]


def _render_entry_url_template(
    *,
    site: CrawlSite,
    registry: MasterColumnRegistry,
    template: str | None = None,
    dept_path: str | None = None,
) -> str | None:
    base_url = (site.base_url or "").rstrip("/")
    if not base_url:
        return None
    dept_path = (dept_path or "").strip("/")
    if template:
        return (
            template.replace("{base_url}", base_url)
            .replace("{column_id}", registry.column_id)
            .replace("{dept_path}", dept_path)
        )
    if site.cms_adapter == "gkmlpt":
        if dept_path:
            return f"{base_url}/{dept_path}/gkmlpt/index#{registry.column_id}"
        return f"{base_url}/gkmlpt/index#{registry.column_id}"
    return None


def _suggest_target_code(
    *,
    site_code: str,
    column_id: str,
    dept_path: str | None = None,
) -> str:
    if dept_path:
        return f"{site_code}__{dept_path}__{column_id}"
    return f"{site_code}__{column_id}"


def _build_registry_subscription_preview(
    *,
    s: Session,
    registry: MasterColumnRegistry,
    site: CrawlSite,
    payload: dict[str, Any],
) -> dict[str, Any]:
    site_department = None
    site_department_id = payload.get("site_department_id")
    if site_department_id is not None:
        site_department_id = _normalize_int(
            site_department_id, field="site_department_id", minimum=1
        )
        site_department = s.get(SiteDepartment, site_department_id)
        if site_department is None:
            raise HTTPException(404, f"site_department not found: {site_department_id}")
        if site_department.site_id != site.id:
            raise HTTPException(400, "site_department does not belong to registry site")
    dept_path = _normalize_str(payload.get("dept_path")) or (
        site_department.dept_path if site_department is not None else None
    )
    entry_url = _normalize_str(payload.get("entry_url")) or _render_entry_url_template(
        site=site,
        registry=registry,
        template=_normalize_str(payload.get("entry_url_template")),
        dept_path=dept_path,
    )
    target_code = _normalize_str(payload.get("target_code")) or _suggest_target_code(
        site_code=site.site_code,
        column_id=registry.column_id,
        dept_path=dept_path,
    )
    dept_id = payload.get("dept_id")
    if dept_id is not None:
        dept_id = _normalize_int(dept_id, field="dept_id", minimum=1)
    elif site_department is not None and site_department.local_dept_id is not None:
        dept_id = int(site_department.local_dept_id)

    existing_target: CrawlTarget | None = None
    if registry.subscribed_target_id is not None:
        existing_target = targets_repo.get_by_id(s, registry.subscribed_target_id)
    if existing_target is None:
        existing_target = targets_repo.get_by_code(s, target_code)

    return {
        "registry_id": registry.id,
        "site_code": site.site_code,
        "site_name": site.site_name,
        "adapter_id": registry.adapter_id,
        "column_id": registry.column_id,
        "column_name": registry.column_name,
        "column_path": registry.column_path,
        "topic_tags": registry.topic_tags or [],
        "site_department_id": site_department.id if site_department is not None else None,
        "dept_path": dept_path,
        "dept_id": dept_id,
        "entry_url": entry_url,
        "target_code": target_code,
        "target_name": _normalize_str(payload.get("target_name")) or registry.column_name,
        "channel_name": _normalize_str(payload.get("channel_name")) or registry.column_name,
        "channel_path": _normalize_str(payload.get("channel_path")) or registry.column_path,
        "content_category": _normalize_str(payload.get("content_category")),
        "content_subcategory": _normalize_str(payload.get("content_subcategory")),
        "expected_cadence_days": _normalize_int(
            payload.get("expected_cadence_days"),
            field="expected_cadence_days",
            minimum=1,
            default=30,
        ),
        "interval_sec": _normalize_int(
            payload.get("interval_sec"),
            field="interval_sec",
            minimum=0,
            default=5,
        ),
        "enabled": _normalize_bool(payload.get("enabled"), default=True),
        "sample_article_url": _normalize_str(payload.get("sample_article_url")),
        "existing_target_code": existing_target.target_code if existing_target is not None else None,
        "subscribed_target_id": registry.subscribed_target_id,
        "ready": bool(entry_url and target_code),
        "warnings": [msg for msg in [
            None if entry_url else "缺少 entry_url，需要指定 entry_url、entry_url_template 或可推导的 dept_path",
            None if dept_id is not None else "未绑定本地部门，后续文章将不带 dept_id",
        ] if msg],
    }
