"""Adapter for 国家标准全文公开系统 (openstd.samr.gov.cn).

Scope for the first target is intentionally narrow: collect metadata for
mandatory national standards (强制性国家标准). Full-text preview/download is
behind a CAPTCHA on c.gb688.cn, so this adapter does not attempt attachment
downloads.
"""
from __future__ import annotations

import json
import re
import time
from datetime import date, datetime, timezone
from typing import Any
from urllib.parse import urlencode

import httpx
from lxml import html

from govcrawler.adapters.contract import CrawlItem, FetchStrategy
from govcrawler.fetcher.browser import FetchResult
from govcrawler.parser.detail_parser import DetailFields
from govcrawler.utils.url_norm import url_hash as compute_url_hash

ADAPTER_ID = "openstd_samr"
DEFAULT_INTERVAL_SEC: float = 2.0

BASE_URL = "https://openstd.samr.gov.cn"
LIST_PATH = "/bzgk/std/std_list_type"
DETAIL_PATH = "/bzgk/std/newGbInfo"

_HEADERS = {
    "User-Agent": "Mozilla/5.0 GovCrawler",
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
    "Referer": f"{BASE_URL}{LIST_PATH}?p.p1=1&p.p90=circulation_date&p.p91=desc",
}


def _clean(value: str | None) -> str:
    return re.sub(r"\s+", " ", value or "").strip()


def _parse_date(value: Any) -> date | None:
    s = _clean(str(value or ""))
    m = re.search(r"\d{4}-\d{1,2}-\d{1,2}", s)
    if not m:
        return None
    try:
        return datetime.strptime(m.group(0), "%Y-%m-%d").date()
    except ValueError:
        return None


def _parse_datetime(value: Any) -> datetime | None:
    d = _parse_date(value)
    if d is None:
        return None
    return datetime(d.year, d.month, d.day, tzinfo=timezone.utc)


def _is_effective(status: str | None) -> bool | None:
    s = _clean(status)
    if not s:
        return None
    if "现行" in s:
        return True
    if "即将实施" in s or "废止" in s or "被代替" in s:
        return False
    return None


def _list_url(page_num: int, page_size: int, std_type: str) -> str:
    params = {
        "page": page_num,
        "pageSize": page_size,
        "p.p1": std_type,
        "p.p90": "circulation_date",
        "p.p91": "desc",
    }
    return f"{BASE_URL}{LIST_PATH}?{urlencode(params)}"


def _detail_url(hcno: str) -> str:
    return f"{BASE_URL}{DETAIL_PATH}?hcno={hcno}"


def _make_fetch_result(url: str, status: int, body: str, *, t0: float) -> FetchResult:
    return FetchResult(
        url=url,
        final_url=url,
        status=status,
        html=body or "",
        fetched_at=time.time(),
        duration_ms=int((time.time() - t0) * 1000),
        is_challenge=False,
        strategy=FetchStrategy.HTTPX.value,
    )


def _row_to_item(row, *, std_type_label: str) -> CrawlItem | None:
    onclick = " ".join(row.xpath(".//@onclick"))
    m = re.search(r"showInfo\('([A-Fa-f0-9]+)'\)", onclick)
    if not m:
        return None
    hcno = m.group(1).upper()
    cells = [_clean(td.text_content()) for td in row.xpath("./td")]
    if len(cells) < 7:
        return None

    std_no = cells[1]
    title = cells[3] or std_no or "(无标题)"
    status = cells[4]
    publish_date = _parse_date(cells[5])
    effective_date = _parse_date(cells[6])
    url = _detail_url(hcno)
    return CrawlItem(
        site_id="openstd_samr",
        native_post_id=hcno,
        url=url,
        url_hash=compute_url_hash(url),
        title=title,
        publish_time=_parse_datetime(cells[5]),
        source_raw="国家标准全文公开系统",
        publisher=None,
        doc_no=std_no or None,
        publish_date=publish_date,
        effective_date=effective_date,
        is_effective=_is_effective(status),
        channel_name=std_type_label,
        channel_path=f"国家标准全文公开系统/{std_type_label}",
        content_category="国家标准",
        content_subcategory=std_type_label,
        open_category=status or None,
        metadata_json={
            "openstd_hcno": hcno,
            "std_no": std_no,
            "standard_status": status,
            "std_type": std_type_label,
            "raw_cells": cells,
        },
    )


def _parse_list_items(body: str, *, std_type_label: str) -> list[CrawlItem]:
    doc = html.fromstring(body or "")
    items: list[CrawlItem] = []
    for row in doc.xpath("//tr[.//*[contains(@onclick, 'showInfo')]]"):
        item = _row_to_item(row, std_type_label=std_type_label)
        if item is not None:
            items.append(item)
    return items


def fetch_list_page(
    rt,
    *,
    page_num: int,
    params: dict[str, Any] | None = None,
    page_size: int | None = None,
    interval_sec: float | None = None,
) -> tuple[str, list[CrawlItem], FetchResult]:
    params = params or {}
    std_type = str(params.get("std_type") or "1")
    std_type_label = str(params.get("std_type_label") or "强制性国家标准")
    size = int(page_size or params.get("page_size") or 10)
    url = _list_url(page_num, size, std_type)
    t0 = time.time()
    r = httpx.get(url, headers=_HEADERS, timeout=30)
    r.raise_for_status()
    return url, _parse_list_items(r.text, std_type_label=std_type_label), _make_fetch_result(
        url, r.status_code, r.text, t0=t0,
    )


def _value_after_label(doc, label: str) -> str:
    nodes = doc.xpath(
        "//div[contains(concat(' ', normalize-space(@class), ' '), ' title ') "
        f"and contains(normalize-space(.), {json.dumps(label, ensure_ascii=False)})]"
        "/following-sibling::div[contains(concat(' ', normalize-space(@class), ' '), ' content ')][1]"
    )
    return _clean(nodes[0].text_content()) if nodes else ""


def _detail_metadata(body: str) -> dict[str, str]:
    doc = html.fromstring(body or "")
    std_no = _clean(" ".join(doc.xpath("//h1[contains(., '标准号')]/text()")))
    std_no = re.sub(r"^标准号[:：]\s*", "", std_no).strip()
    title = _clean(" ".join(doc.xpath("//td[contains(., '中文标准名称')]/b/text()")))
    en_title = _clean(" ".join(doc.xpath("//td[contains(., '英文标准名称')]/text()")))
    en_title = re.sub(r"^英文标准名称[:：]\s*", "", en_title).strip()
    status = _clean(" ".join(doc.xpath("//td[contains(., '标准状态')]/span/text()")))
    return {
        "std_no": std_no,
        "title": title,
        "english_title": en_title,
        "standard_status": status,
        "ccs": _value_after_label(doc, "中国标准分类号"),
        "ics": _value_after_label(doc, "国际标准分类号"),
        "publish_date": _value_after_label(doc, "发布日期"),
        "effective_date": _value_after_label(doc, "实施日期"),
        "主管部门": _value_after_label(doc, "主管部门"),
        "归口部门": _value_after_label(doc, "归口部门"),
        "发布单位": _value_after_label(doc, "发布单位"),
        "备注": _value_after_label(doc, "备注"),
    }


def _content_text(meta: dict[str, str]) -> str:
    labels = [
        ("标准号", "std_no"),
        ("中文标准名称", "title"),
        ("英文标准名称", "english_title"),
        ("标准状态", "standard_status"),
        ("中国标准分类号（CCS）", "ccs"),
        ("国际标准分类号（ICS）", "ics"),
        ("发布日期", "publish_date"),
        ("实施日期", "effective_date"),
        ("主管部门", "主管部门"),
        ("归口部门", "归口部门"),
        ("发布单位", "发布单位"),
        ("备注", "备注"),
    ]
    lines = [f"{label}：{meta[key]}" for label, key in labels if _clean(meta.get(key))]
    return "\n".join(lines)


def fetch_detail(rt, *, url: str, list_item: CrawlItem | None = None) -> tuple[FetchResult, DetailFields]:
    t0 = time.time()
    r = httpx.get(url, headers={**_HEADERS, "Referer": url}, timeout=30)
    r.raise_for_status()
    meta = _detail_metadata(r.text)
    title = meta.get("title") or (list_item.title if list_item else "") or "(无标题)"
    publish_date = _parse_date(meta.get("publish_date"))
    effective_date = _parse_date(meta.get("effective_date"))
    status = meta.get("standard_status") or ""
    public_meta = {
        k: str(v or "")
        for k, v in {
            **meta,
            "openstd_hcno": (list_item.native_post_id if list_item else "") or "",
            "detail_url": url,
            "fulltext_download_note": "download/preview requires CAPTCHA on c.gb688.cn",
        }.items()
    }
    fields = DetailFields(
        title=title,
        publish_time_raw=meta.get("publish_date") or "",
        source=meta.get("发布单位") or "国家标准全文公开系统",
        content_html=r.text,
        content_text=_content_text(meta),
        attachment_urls=[],
        used_fallback=False,
        fallback_engine=None,
        publisher=meta.get("发布单位") or None,
        doc_no=meta.get("std_no") or (list_item.doc_no if list_item else None),
        publish_date=publish_date,
        effective_date=effective_date,
        is_effective=_is_effective(status),
        open_category=status or None,
        content_category="国家标准",
        content_subcategory="强制性国家标准",
        topic_words=meta.get("ics") or None,
        public_meta=public_meta,
    )
    return _make_fetch_result(url, r.status_code, r.text, t0=t0), fields
