"""
- call /messages on Anthropic API
- Make streaming + non-streaming request - just pass it through direct to Anthropic. No need to do anything special here
- Ensure requests are logged in the DB - stream + non-stream

"""

import asyncio
import contextvars
from functools import partial
from typing import Any, AsyncIterator, Coroutine, Dict, List, Optional, Union

import litellm
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
from litellm.llms.base_llm.anthropic_messages.transformation import (
    BaseAnthropicMessagesConfig,
)
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler
from litellm.llms.custom_httpx.llm_http_handler import BaseLLMHTTPHandler
from litellm.types.llms.anthropic_messages.anthropic_request import AnthropicMetadata
from litellm.types.llms.anthropic_messages.anthropic_response import (
    AnthropicMessagesResponse,
)
from litellm.types.router import GenericLiteLLMParams
from litellm.utils import ProviderConfigManager, client

from ..adapters.handler import LiteLLMMessagesToCompletionTransformationHandler
from ..responses_adapters.handler import LiteLLMMessagesToResponsesAPIHandler
from .utils import AnthropicMessagesRequestUtils, mock_response

# Providers that are routed directly to the OpenAI Responses API instead of
# going through chat/completions.
_RESPONSES_API_PROVIDERS = frozenset({"openai"})


def _should_route_to_responses_api(custom_llm_provider: Optional[str]) -> bool:
    """Return True when the provider should use the Responses API path.

    Set ``litellm.use_chat_completions_url_for_anthropic_messages = True`` to
    opt out and route OpenAI/Azure requests through chat/completions instead.
    """
    if litellm.use_chat_completions_url_for_anthropic_messages:
        return False
    return custom_llm_provider in _RESPONSES_API_PROVIDERS

####### ENVIRONMENT VARIABLES ###################
# Initialize any necessary instances or variables here
base_llm_http_handler = BaseLLMHTTPHandler()
#################################################


async def _execute_pre_request_hooks(
    model: str,
    messages: List[Dict],
    tools: Optional[List[Dict]],
    stream: Optional[bool],
    custom_llm_provider: Optional[str],
    **kwargs,
) -> Dict:
    """
    Execute pre-request hooks from CustomLogger callbacks.

    Allows CustomLoggers to modify request parameters before the API call.
    Used for WebSearch tool conversion, stream modification, etc.

    Args:
        model: Model name
        messages: List of messages
        tools: Optional tools list
        stream: Optional stream flag
        custom_llm_provider: Provider name (if not set, will be extracted from model)
        **kwargs: Additional request parameters

    Returns:
        Dict containing all (potentially modified) request parameters including tools, stream
    """
    # If custom_llm_provider not provided, extract from model
    if not custom_llm_provider:
        try:
            _, custom_llm_provider, _, _ = litellm.get_llm_provider(model=model)
        except Exception:
            # If extraction fails, continue without provider
            pass

    # Build complete request kwargs dict
    request_kwargs = {
        "tools": tools,
        "stream": stream,
        "litellm_params": {
            "custom_llm_provider": custom_llm_provider,
        },
        **kwargs,
    }

    if not litellm.callbacks:
        return request_kwargs

    from litellm.integrations.custom_logger import CustomLogger as _CustomLogger

    for callback in litellm.callbacks:
        if not isinstance(callback, _CustomLogger):
            continue

        # Call the pre-request hook
        modified_kwargs = await callback.async_pre_request_hook(
            model, messages, request_kwargs
        )

        # If hook returned modified kwargs, use them
        if modified_kwargs is not None:
            request_kwargs = modified_kwargs

    return request_kwargs


@client
async def anthropic_messages(
    max_tokens: int,
    messages: List[Dict],
    model: str,
    metadata: Optional[Dict] = None,
    stop_sequences: Optional[List[str]] = None,
    stream: Optional[bool] = False,
    system: Optional[str] = None,
    temperature: Optional[float] = None,
    thinking: Optional[Dict] = None,
    tool_choice: Optional[Dict] = None,
    tools: Optional[List[Dict]] = None,
    top_k: Optional[int] = None,
    top_p: Optional[float] = None,
    api_key: Optional[str] = None,
    api_base: Optional[str] = None,
    client: Optional[AsyncHTTPHandler] = None,
    custom_llm_provider: Optional[str] = None,
    **kwargs,
) -> Union[AnthropicMessagesResponse, AsyncIterator]:
    """
    Async: Make llm api request in Anthropic /messages API spec
    """
    # Execute pre-request hooks to allow CustomLoggers to modify request
    request_kwargs = await _execute_pre_request_hooks(
        model=model,
        messages=messages,
        tools=tools,
        stream=stream,
        custom_llm_provider=custom_llm_provider,
        **kwargs,
    )

    # Extract modified parameters
    tools = request_kwargs.pop("tools", tools)
    stream = request_kwargs.pop("stream", stream)
    # Remove litellm_params from kwargs (only needed for hooks)
    request_kwargs.pop("litellm_params", None)
    # Merge back any other modifications
    kwargs.update(request_kwargs)

    loop = asyncio.get_event_loop()
    kwargs["is_async"] = True

    func = partial(
        anthropic_messages_handler,
        max_tokens=max_tokens,
        messages=messages,
        model=model,
        metadata=metadata,
        stop_sequences=stop_sequences,
        stream=stream,
        system=system,
        temperature=temperature,
        thinking=thinking,
        tool_choice=tool_choice,
        tools=tools,
        top_k=top_k,
        top_p=top_p,
        api_key=api_key,
        api_base=api_base,
        client=client,
        custom_llm_provider=custom_llm_provider,
        **kwargs,
    )
    ctx = contextvars.copy_context()
    func_with_context = partial(ctx.run, func)
    init_response = await loop.run_in_executor(None, func_with_context)

    if asyncio.iscoroutine(init_response):
        response = await init_response
    else:
        response = init_response
    return response


def validate_anthropic_api_metadata(metadata: Optional[Dict] = None) -> Optional[Dict]:
    """
    Validate Anthropic API metadata - This is done to ensure only allowed `metadata` fields are passed to Anthropic API

    If there are any litellm specific metadata fields, use `litellm_metadata` key to pass them.
    """
    if metadata is None:
        return None
    anthropic_metadata_obj = AnthropicMetadata(**metadata)
    return anthropic_metadata_obj.model_dump(exclude_none=True)


def anthropic_messages_handler(
    max_tokens: int,
    messages: List[Dict],
    model: str,
    metadata: Optional[Dict] = None,
    stop_sequences: Optional[List[str]] = None,
    stream: Optional[bool] = False,
    system: Optional[str] = None,
    temperature: Optional[float] = None,
    thinking: Optional[Dict] = None,
    tool_choice: Optional[Dict] = None,
    tools: Optional[List[Dict]] = None,
    top_k: Optional[int] = None,
    top_p: Optional[float] = None,
    container: Optional[Dict] = None,
    api_key: Optional[str] = None,
    api_base: Optional[str] = None,
    client: Optional[AsyncHTTPHandler] = None,
    custom_llm_provider: Optional[str] = None,
    **kwargs,
) -> Union[
    AnthropicMessagesResponse,
    AsyncIterator[Any],
    Coroutine[Any, Any, Union[AnthropicMessagesResponse, AsyncIterator[Any]]],
]:
    """
    Makes Anthropic `/v1/messages` API calls In the Anthropic API Spec
    
    Args:
        container: Container config with skills for code execution
    """
    from litellm.types.utils import LlmProviders

    metadata = validate_anthropic_api_metadata(metadata)

    local_vars = locals()
    is_async = kwargs.pop("is_async", False)
    # Use provided client or create a new one
    litellm_logging_obj: LiteLLMLoggingObj = kwargs.get("litellm_logging_obj")  # type: ignore

    # Store original model name before get_llm_provider strips the provider prefix
    # This is needed by agentic hooks (e.g., websearch_interception) to make follow-up requests
    original_model = model

    litellm_params = GenericLiteLLMParams(
        **kwargs,
        api_key=api_key,
        api_base=api_base,
        custom_llm_provider=custom_llm_provider,
    )
    (
        model,
        custom_llm_provider,
        dynamic_api_key,
        dynamic_api_base,
    ) = litellm.get_llm_provider(
        model=model,
        custom_llm_provider=custom_llm_provider,
        api_base=litellm_params.api_base,
        api_key=litellm_params.api_key,
    )
    
    # Store agentic loop params in logging object for agentic hooks
    # This provides original request context needed for follow-up calls
    if litellm_logging_obj is not None:
        litellm_logging_obj.model_call_details["agentic_loop_params"] = {
            "model": original_model,
            "custom_llm_provider": custom_llm_provider,
        }
        
        # Check if stream was converted for WebSearch interception
        # This is set in the async wrapper above when stream=True is converted to stream=False
        if kwargs.get("_websearch_interception_converted_stream", False):
            litellm_logging_obj.model_call_details["websearch_interception_converted_stream"] = True

    if litellm_params.mock_response and isinstance(litellm_params.mock_response, str):

        return mock_response(
            model=model,
            messages=messages,
            max_tokens=max_tokens,
            mock_response=litellm_params.mock_response,
        )

    anthropic_messages_provider_config: Optional[BaseAnthropicMessagesConfig] = None

    if custom_llm_provider is not None and custom_llm_provider in [
        provider.value for provider in LlmProviders
    ]:
        anthropic_messages_provider_config = (
            ProviderConfigManager.get_provider_anthropic_messages_config(
                model=model,
                provider=litellm.LlmProviders(custom_llm_provider),
            )
        )
    if anthropic_messages_provider_config is None:
        # Route to Responses API for OpenAI / Azure, chat/completions for everything else.
        _shared_kwargs = dict(
            max_tokens=max_tokens,
            messages=messages,
            model=model,
            metadata=metadata,
            stop_sequences=stop_sequences,
            stream=stream,
            system=system,
            temperature=temperature,
            thinking=thinking,
            tool_choice=tool_choice,
            tools=tools,
            top_k=top_k,
            top_p=top_p,
            _is_async=is_async,
            api_key=api_key,
            api_base=api_base,
            client=client,
            custom_llm_provider=custom_llm_provider,
            **kwargs,
        )
        if _should_route_to_responses_api(custom_llm_provider):
            return LiteLLMMessagesToResponsesAPIHandler.anthropic_messages_handler(
                **_shared_kwargs
            )
        return LiteLLMMessagesToCompletionTransformationHandler.anthropic_messages_handler(
            **_shared_kwargs
        )

    if custom_llm_provider is None:
        raise ValueError(
            f"custom_llm_provider is required for Anthropic messages, passed in model={model}, custom_llm_provider={custom_llm_provider}"
        )

    local_vars.update(kwargs)
    anthropic_messages_optional_request_params = (
        AnthropicMessagesRequestUtils.get_requested_anthropic_messages_optional_param(
            params=local_vars
        )
    )
    return base_llm_http_handler.anthropic_messages_handler(
        model=model,
        messages=messages,
        anthropic_messages_provider_config=anthropic_messages_provider_config,
        anthropic_messages_optional_request_params=dict(
            anthropic_messages_optional_request_params
        ),
        _is_async=is_async,
        client=client,
        custom_llm_provider=custom_llm_provider,
        litellm_params=litellm_params,
        logging_obj=litellm_logging_obj,
        api_key=api_key,
        api_base=api_base,
        stream=stream,
        kwargs=kwargs,
    )
