"""Adapter for 国家法律法规数据库 (flk.npc.gov.cn).

The site is a Vue SPA. Its visible pages are thin shells over JSON APIs:

  * POST /law-search/search/list       -> paginated law rows
  * GET  /law-search/search/flfgDetails -> detail metadata + OFD/DOCX paths
  * GET  /law-search/amazonFile/previewLink + flkofd reader/text -> body text

This adapter intentionally crawls the API rather than scraping the SPA DOM.
"""
from __future__ import annotations

import json
import logging
import re
import time
from datetime import date, datetime, timezone
from typing import Any
from urllib.parse import parse_qs, urlencode, urlparse

import httpx

from govcrawler.adapters.contract import CrawlItem, FetchStrategy
from govcrawler.fetcher.browser import FetchResult
from govcrawler.parser.detail_parser import DetailFields
from govcrawler.utils.url_norm import url_hash as compute_url_hash

log = logging.getLogger(__name__)

ADAPTER_ID = "flk_npc"
DEFAULT_INTERVAL_SEC: float = 5.0

BASE_URL = "https://flk.npc.gov.cn"
LIST_URL = f"{BASE_URL}/law-search/search/list"
DETAIL_URL = f"{BASE_URL}/law-search/search/flfgDetails"
PREVIEW_URL = f"{BASE_URL}/law-search/amazonFile/previewLink"
OFD_READER_BASE = "https://flkofd.npc.gov.cn"

# Whole "法律" top-level section, including law interpretations, decisions,
# amendments, and amendment/repeal decisions. Use only 110..170 for narrow
# sectoral laws.
LAW_SECTION_CODE_IDS = [110, 120, 130, 140, 150, 155, 160, 170, 180, 190, 195, 200]

_HEADERS = {
    "User-Agent": "Mozilla/5.0 GovCrawler",
    "Accept": "application/json, text/plain, */*",
    "Origin": BASE_URL,
    "Referer": f"{BASE_URL}/search",
}


def _make_fetch_result(url: str, status: int, body: str, *, t0: float) -> FetchResult:
    return FetchResult(
        url=url,
        final_url=url,
        status=status,
        html=body or "",
        fetched_at=time.time(),
        duration_ms=int((time.time() - t0) * 1000),
        is_challenge=False,
        strategy=FetchStrategy.HTTPX.value,
    )


def _parse_date(value: Any) -> date | None:
    if not value:
        return None
    s = str(value).strip()
    for fmt in ("%Y-%m-%d", "%Y.%m.%d", "%Y/%m/%d"):
        try:
            return datetime.strptime(s, fmt).date()
        except ValueError:
            continue
    return None


def _parse_datetime(value: Any) -> datetime | None:
    d = _parse_date(value)
    if d is None:
        return None
    return datetime(d.year, d.month, d.day, tzinfo=timezone.utc)


VALIDITY_STATE_LABELS = {
    1: "已废止",
    2: "已修改",
    3: "有效",
    4: "尚未生效",
}


def _validity_state_code(sxx: Any) -> int | None:
    try:
        return int(sxx)
    except Exception:
        return None


def _validity_state_label(sxx: Any) -> str | None:
    state = _validity_state_code(sxx)
    if state is None:
        return None
    return VALIDITY_STATE_LABELS.get(state)


def _is_effective(sxx: Any) -> bool | None:
    """Map FLK validity state.

    Observed values:
      3 -> 有效
      4 -> 尚未生效
    Other historical states are treated as not currently effective when seen.
    """
    state = _validity_state_code(sxx)
    if state is None:
        return None
    if state == 3:
        return True
    if state in (1, 2, 4):
        return False
    return None


def _detail_page_url(bbbs: str, title: str | None = None) -> str:
    params = {"id": bbbs, "fileId": "", "type": ""}
    if title:
        params["title"] = title
    return f"{BASE_URL}/detail?{urlencode(params)}"


def _list_body(page_num: int, page_size: int, code_ids: list[int]) -> dict[str, Any]:
    return {
        "searchRange": 1,
        "sxrq": [],
        "gbrq": [],
        "searchType": 2,
        "sxx": [],
        "gbrqYear": [],
        "flfgCodeId": code_ids,
        "zdjgCodeId": [],
        "searchContent": "",
        "orderByParam": {"order": "-1", "sort": ""},
        "pageNum": page_num,
        "pageSize": page_size,
    }


def _row_to_item(row: dict[str, Any]) -> CrawlItem:
    bbbs = str(row.get("bbbs") or "").strip()
    title = str(row.get("title") or "").strip() or "(无标题)"
    url = _detail_page_url(bbbs, title)
    publish_dt = _parse_datetime(row.get("gbrq"))
    effective_date = _parse_date(row.get("sxrq"))
    validity_state = _validity_state_code(row.get("sxx"))
    validity_label = _validity_state_label(row.get("sxx"))
    return CrawlItem(
        site_id="flk_npc",
        native_post_id=bbbs or None,
        url=url,
        url_hash=compute_url_hash(url),
        title=title,
        publish_time=publish_dt,
        source_raw=row.get("zdjgName") or None,
        publisher=row.get("zdjgName") or None,
        publish_date=_parse_date(row.get("gbrq")),
        effective_date=effective_date,
        is_effective=_is_effective(row.get("sxx")),
        content_category=row.get("flxz") or "法律",
        content_subcategory=str(row.get("flfgCodeId")) if row.get("flfgCodeId") else None,
        open_category=row.get("flxz") or None,
        metadata_json={
            "raw": row,
            "flk_bbbs": bbbs,
            "flfgCodeId": row.get("flfgCodeId"),
            "zdjgCodeId": row.get("zdjgCodeId"),
            "sxx": row.get("sxx"),
            "validity_state_code": validity_state,
            "validity_state_label": validity_label,
            "publish_date": str(row.get("gbrq") or ""),
            "effective_date": str(row.get("sxrq") or ""),
        },
    )


def fetch_list_page(
    rt,
    *,
    page_num: int,
    params: dict[str, Any] | None = None,
    page_size: int | None = None,
    interval_sec: float | None = None,
) -> tuple[str, list[CrawlItem], FetchResult]:
    params = params or {}
    code_ids = params.get("flfg_code_ids") or LAW_SECTION_CODE_IDS
    code_ids = [int(x) for x in code_ids]
    size = int(page_size or params.get("page_size") or 20)
    body = _list_body(page_num, size, code_ids)

    t0 = time.time()
    r = httpx.post(LIST_URL, json=body, headers=_HEADERS, timeout=20)
    r.raise_for_status()
    payload = r.json()
    if payload.get("code") != 200:
        raise ValueError(f"flk list failed: {payload.get('msg') or payload}")
    rows = payload.get("rows") or []
    items = [_row_to_item(row) for row in rows if row.get("bbbs")]
    list_url = f"{LIST_URL}?pageNum={page_num}&pageSize={size}&flfgCodeId={','.join(map(str, code_ids))}"
    return list_url, items, _make_fetch_result(list_url, r.status_code, r.text, t0=t0)


def _preview_file_param(file_path: str) -> str | None:
    r = httpx.get(PREVIEW_URL, params={"filePath": file_path}, headers=_HEADERS, timeout=20)
    r.raise_for_status()
    payload = r.json()
    if payload.get("code") != 200:
        return None
    reader_url = (payload.get("data") or {}).get("url") or ""
    parsed = urlparse(reader_url)
    return (parse_qs(parsed.query).get("file") or [None])[0]


def _extract_page_text(payload: dict[str, Any]) -> str:
    lines: list[str] = []
    for area in payload.get("areas") or []:
        for line in area.get("lines") or []:
            chars = line.get("chars") or []
            text = "".join(str(ch.get("char") or "") for ch in chars)
            if text.strip():
                lines.append(text.rstrip())
    return "\n".join(lines)


def _fetch_ofd_text(file_path: str, *, max_pages: int = 300) -> str:
    file_param = _preview_file_param(file_path)
    if not file_param:
        return ""
    common = {"file": file_param, "_b": "3.2.0", "_v": "1"}
    t = str(int(time.time() * 1000))
    info = httpx.get(
        f"{OFD_READER_BASE}/reader/info",
        params={**common, "_": t},
        timeout=20,
    )
    info.raise_for_status()
    info_payload = info.json()
    page_count = len(info_payload.get("area") or [])
    if page_count <= 0:
        return ""
    page_count = min(page_count, max_pages)
    chunks: list[str] = []
    for idx in range(page_count):
        r = httpx.get(
            f"{OFD_READER_BASE}/reader/text",
            params={**common, "_": str(int(time.time() * 1000)), "_i": idx},
            timeout=20,
        )
        r.raise_for_status()
        text = _extract_page_text(r.json())
        if text:
            chunks.append(text)
    return "\n\n".join(chunks).strip()


def _bbbs_from_url(url: str) -> str | None:
    parsed = urlparse(url)
    return (parse_qs(parsed.query).get("id") or [None])[0]


def fetch_detail(rt, *, url: str, list_item: CrawlItem | None = None) -> tuple[FetchResult, DetailFields]:
    bbbs = (list_item.native_post_id if list_item else None) or _bbbs_from_url(url)
    if not bbbs:
        raise ValueError(f"missing flk bbbs in url={url!r}")

    t0 = time.time()
    r = httpx.get(DETAIL_URL, params={"bbbs": bbbs}, headers=_HEADERS, timeout=20)
    r.raise_for_status()
    payload = r.json()
    if payload.get("code") != 200:
        raise ValueError(f"flk detail failed: {payload.get('msg') or payload}")
    data = payload.get("data") or {}
    oss = data.get("ossFile") or {}

    title = data.get("title") or (list_item.title if list_item else "") or "(无标题)"
    detail_url = _detail_page_url(bbbs, title)
    ofd_path = oss.get("ossWordOfdPath") or oss.get("ossPdfOfdPath")
    content_text = ""
    if ofd_path:
        try:
            content_text = _fetch_ofd_text(str(ofd_path))
        except Exception as e:
            log.warning("flk ofd text fetch failed bbbs=%s path=%s err=%s", bbbs, ofd_path, e)

    header = [
        str(title),
        f"制定机关：{data.get('zdjgName') or ''}",
        f"法律法规分类：{data.get('flxz') or ''}",
        f"时效性：{_validity_state_label(data.get('sxx')) or ''}",
        f"公布日期：{data.get('gbrq') or ''}",
        f"施行日期：{data.get('sxrq') or ''}",
    ]
    if content_text:
        full_text = "\n".join(x for x in header if x.strip("：")) + "\n\n" + content_text
    else:
        full_text = "\n".join(x for x in header if x.strip("："))

    raw_html = json.dumps(payload, ensure_ascii=False)
    fr = _make_fetch_result(detail_url, r.status_code, raw_html, t0=t0)
    fields = DetailFields(
        title=str(title),
        publish_time_raw=str(data.get("gbrq") or ""),
        source=str(data.get("zdjgName") or ""),
        content_html=raw_html,
        content_text=full_text,
        attachment_urls=[],
        used_fallback=False,
        fallback_engine=None,
        publisher=data.get("zdjgName") or None,
        publish_date=_parse_date(data.get("gbrq")),
        effective_date=_parse_date(data.get("sxrq")),
        is_effective=_is_effective(data.get("sxx")),
        open_category=data.get("flxz") or None,
        content_category=data.get("flxz") or "法律",
        content_subcategory=str(data.get("flfgCodeId")) if data.get("flfgCodeId") else None,
        public_meta={
            "flk_bbbs": str(bbbs),
            "flxz": str(data.get("flxz") or ""),
            "zdjgName": str(data.get("zdjgName") or ""),
            "gbrq": str(data.get("gbrq") or ""),
            "sxrq": str(data.get("sxrq") or ""),
            "sxx": str(data.get("sxx") or ""),
            "validity_state_code": str(_validity_state_code(data.get("sxx")) or ""),
            "validity_state_label": str(_validity_state_label(data.get("sxx")) or ""),
            "publish_date": str(data.get("gbrq") or ""),
            "effective_date": str(data.get("sxrq") or ""),
            "ossWordPath": str(oss.get("ossWordPath") or ""),
            "ossWordOfdPath": str(oss.get("ossWordOfdPath") or ""),
            "ossPdfPath": str(oss.get("ossPdfPath") or ""),
            "ossPdfOfdPath": str(oss.get("ossPdfOfdPath") or ""),
        },
    )
    return fr, fields


def build_list_url(site, column_id: str, page: int = 1) -> str:
    return f"{LIST_URL}?pageNum={page}"


def parse_list_response(site, column_id: str, body: str) -> list[CrawlItem]:
    payload = json.loads(body)
    return [_row_to_item(row) for row in (payload.get("rows") or []) if row.get("bbbs")]
