"""Attachments — read-only list of downloaded files."""
from __future__ import annotations

from typing import Any

from fastapi import Depends, Query
from sqlalchemy import desc, select
from sqlalchemy.orm import Session

from govcrawler.models import Article, Attachment, CrawlSite, CrawlTarget

from ._common import _session, iso_cn, router


@router.get("/api/attachments")
def list_attachments(
    site: str | None = Query(None, description="crawl_site.site_code"),
    target: str | None = Query(None, description="crawl_target.target_code"),
    ext: str | None = Query(None, description="file extension, e.g. pdf"),
    q: str | None = Query(None, description="substring of file_name"),
    limit: int = Query(20, ge=1, le=200),
    offset: int = Query(0, ge=0),
    s: Session = Depends(_session),
) -> dict[str, Any]:
    """Server-side pagination — same contract as /api/articles/search.
    Returns `total` so UI renders proper page count without an
    additional COUNT round-trip."""
    from sqlalchemy import func as _f
    filters = []
    if site:
        filters.append(CrawlSite.site_code == site)
    if target:
        filters.append(CrawlTarget.target_code == target)
    if ext:
        filters.append(Attachment.file_ext == ext.lstrip("."))
    if q:
        filters.append(Attachment.file_name.ilike(f"%{q}%"))

    count_stmt = (
        select(_f.count(Attachment.id))
        .select_from(Attachment)
        .join(Article, Article.id == Attachment.article_id)
        .join(CrawlSite, CrawlSite.id == Article.site_id, isouter=True)
        .join(CrawlTarget, CrawlTarget.id == Article.target_id, isouter=True)
    )
    for f in filters:
        count_stmt = count_stmt.where(f)
    total = s.execute(count_stmt).scalar() or 0

    stmt = (
        select(
            Attachment,
            Article.title.label("article_title"),
            Article.url.label("article_url"),
            CrawlSite.site_code.label("site_code"),
            CrawlTarget.target_code.label("target_code"),
        )
        .select_from(Attachment)
        .join(Article, Article.id == Attachment.article_id)
        .join(CrawlSite, CrawlSite.id == Article.site_id, isouter=True)
        .join(CrawlTarget, CrawlTarget.id == Article.target_id, isouter=True)
        .order_by(desc(Attachment.id))
        .offset(offset)
        .limit(limit)
    )
    for f in filters:
        stmt = stmt.where(f)
    rows = s.execute(stmt).all()
    return {
        "count": len(rows),
        "total": total,
        "limit": limit,
        "offset": offset,
        "items": [
            {
                "id": r.Attachment.id,
                "article_id": r.Attachment.article_id,
                "article_title": r.article_title,
                "article_url": r.article_url,
                "site_code": r.site_code,
                "target_code": r.target_code,
                "file_name": r.Attachment.file_name,
                "file_ext": r.Attachment.file_ext,
                "size_bytes": r.Attachment.size_bytes,
                "file_path": r.Attachment.file_path,
                "file_hash": r.Attachment.file_hash,
                "downloaded_at": iso_cn(r.Attachment.downloaded_at),
                "source_url": r.Attachment.source_url
            }
            for r in rows
        ],
    }
