from __future__ import annotations

import hashlib
import re
from pathlib import Path
from typing import Any

from ..config import DEFAULT_STORAGE_ROOT, DOCUMENT_SUBFOLDERS, SUPPORTED_CATEGORIES
from ..utils.fs_utils import copy_file, ensure_dir
from ..utils.json_utils import read_json, write_json
from ..utils.time_utils import now_iso


class IntakeService:
    """Handles applicant creation and intake scanning/copying."""

    def __init__(self, storage_root: Path | None = None):
        self.storage_root = Path(storage_root or DEFAULT_STORAGE_ROOT)

    def applicant_path(self, applicant_id: str) -> Path:
        return self.storage_root / applicant_id

    def init_applicant(
        self,
        applicant_id: str,
        full_name: str = "",
        sponsor_name: str = "",
        nominated_occupation: str = "",
    ) -> Path:
        base = ensure_dir(self.applicant_path(applicant_id))

        for category in SUPPORTED_CATEGORIES:
            ensure_dir(base / category)

        ensure_dir(base / "intake")
        ensure_dir(base / "generated")

        applicant_json = base / "applicant.json"
        if not applicant_json.exists():
            write_json(
                applicant_json,
                {
                    "applicant_id": applicant_id,
                    "full_name": full_name,
                    "sponsor_name": sponsor_name,
                    "nominated_occupation": nominated_occupation,
                    "visa_support_focus": ["482", "186"],
                    "created_at": now_iso(),
                    "updated_at": now_iso(),
                },
            )

        self._init_generated_placeholders(base)
        return base

    def scan_intake(self, applicant_id: str) -> list[dict[str, Any]]:
        base = self.applicant_path(applicant_id)
        intake_root = base / "intake"
        scanned: list[dict[str, Any]] = []

        if not intake_root.exists():
            return scanned

        for category in SUPPORTED_CATEGORIES:
            if category == "generated":
                continue
            category_input = intake_root / category
            if not category_input.exists():
                continue

            for file_path in sorted(category_input.glob("**/*")):
                if not file_path.is_file():
                    continue
                scanned.append(self._register_document(base, intake_root, applicant_id, file_path))

        return scanned

    def list_document_manifests(self, applicant_id: str) -> list[Path]:
        base = self.applicant_path(applicant_id)
        manifests: list[Path] = []
        for category in SUPPORTED_CATEGORIES:
            if category == "generated":
                continue
            category_dir = base / category
            if not category_dir.exists():
                continue
            manifests.extend(sorted(category_dir.rglob("meta/manifest.json")))
        return manifests

    def _register_document(
        self,
        applicant_base: Path,
        intake_root: Path,
        applicant_id: str,
        source_file: Path,
    ) -> dict[str, Any]:
        rel = source_file.relative_to(intake_root)
        category, subcategory, document_folder = self._parse_intake_relpath(rel)
        file_hash = self._sha1(source_file)
        doc_id = self._build_document_id(applicant_id, category, subcategory, document_folder, file_hash)
        doc_type = self._extract_document_type(document_folder, fallback=subcategory)
        doc_dir = applicant_base / category / subcategory / document_folder

        for folder in DOCUMENT_SUBFOLDERS:
            ensure_dir(doc_dir / folder)

        normalized_original_name = f"original{source_file.suffix.lower()}"
        destination = doc_dir / "original" / normalized_original_name
        copy_file(source_file, destination)

        storage_path = str(Path(category) / subcategory / document_folder)
        manifest = {
            "document_id": doc_id,
            "doc_id": doc_id,
            "applicant_id": applicant_id,
            "document_type": doc_type,
            "category": category,
            "subcategory": subcategory,
            "storage_path": storage_path,
            "source": str(source_file),
            "stored_original": str(destination),
            "filename": normalized_original_name,
            "original_filename": source_file.name,
            "extension": source_file.suffix.lower(),
            "bytes": source_file.stat().st_size,
            "sha1": file_hash,
            "files": {
                "original": f"original/{normalized_original_name}",
                "document_json": "extracted/document.json",
                "entities_json": "extracted/entities.json",
                "quality_json": "extracted/quality.json",
                "summary": "summary/summary.md",
            },
            "status": "scanned",
            "created_at": now_iso(),
            "updated_at": now_iso(),
        }

        manifest_path = doc_dir / "meta" / "manifest.json"
        existing = read_json(manifest_path, default={}) or {}
        if existing:
            manifest["created_at"] = existing.get("created_at", manifest["created_at"])
        write_json(manifest_path, manifest)
        return manifest

    def _init_generated_placeholders(self, applicant_base: Path) -> None:
        generated = applicant_base / "generated"
        ensure_dir(generated)

        placeholders = {
            "timeline.md": "# Timeline\n\nPending generation.\n",
            "checklist.md": "# Evidence Checklist\n\nPending generation.\n",
            "occupation_analysis.md": "# Occupation Analysis\n\nPending generation.\n",
            "cv.md": "# Applicant CV\n\nPending generation.\n",
            "chatgpt_briefing_pack.md": "# ChatGPT Briefing Pack\n\nPending generation.\n",
        }

        for filename, content in placeholders.items():
            path = generated / filename
            if not path.exists():
                path.write_text(content, encoding="utf-8")

    @staticmethod
    def _sha1(path: Path) -> str:
        h = hashlib.sha1()
        with path.open("rb") as f:
            for chunk in iter(lambda: f.read(65536), b""):
                h.update(chunk)
        return h.hexdigest()

    @staticmethod
    def _slug(value: str) -> str:
        slug = re.sub(r"[^a-zA-Z0-9_-]+", "-", value.strip()).strip("-").lower()
        return slug or "unknown"

    def _parse_intake_relpath(self, rel: Path) -> tuple[str, str, str]:
        parts = rel.parts
        category = self._slug(parts[0]) if len(parts) >= 1 else "uncategorized"
        subcategory = "uncategorized"
        document_folder = ""

        if len(parts) >= 4 and parts[-2].lower() == "original":
            # Supports intake path with explicit original/ folder:
            # intake/<category>/<subcategory>/<doc_folder>/original/<file>
            subcategory = self._slug(parts[1])
            document_folder = self._slug(parts[2])
        elif len(parts) >= 4:
            # Supports intake path:
            # intake/<category>/<subcategory>/<doc_folder>/<file>
            subcategory = self._slug(parts[1])
            document_folder = self._slug(parts[2])
        elif len(parts) == 3:
            subcategory = self._slug(parts[1])
            document_folder = self._slug(parts[1])
        elif len(parts) == 2:
            document_folder = self._slug(parts[1].rsplit(".", 1)[0])
        else:
            document_folder = "unclassified-document"

        return category, subcategory, document_folder

    def _build_document_id(
        self,
        applicant_id: str,
        category: str,
        subcategory: str,
        document_folder: str,
        file_hash: str,
    ) -> str:
        return self._slug(f"{applicant_id}_{category}_{subcategory}_{document_folder}_{file_hash[:8]}")

    @staticmethod
    def _extract_document_type(document_folder: str, fallback: str) -> str:
        # Expected shape: YYYY-MM-DD_passport-bio-page
        if "_" in document_folder:
            return document_folder.split("_", 1)[1].replace("-", "_")
        return fallback.replace("-", "_")
