# What is this?
## Tests `litellm.transcription` endpoint. Outside litellm module b/c of audio file used in testing (it's ~700kb).

import asyncio
import logging
import os
import sys
import time
import traceback
from typing import Optional

import aiohttp
import dotenv
import pytest
from dotenv import load_dotenv
from openai import AsyncOpenAI

import litellm
from litellm.integrations.custom_logger import CustomLogger

# Get the current directory of the file being run
pwd = os.path.dirname(os.path.realpath(__file__))
print(pwd)

file_path = os.path.join(pwd, "gettysburg.wav")

audio_file = open(file_path, "rb")


file2_path = os.path.join(pwd, "eagle.wav")
audio_file2 = open(file2_path, "rb")

load_dotenv()

sys.path.insert(
    0, os.path.abspath("../")
)  # Adds the parent directory to the system path
import litellm
from litellm import Router


@pytest.mark.parametrize(
    "model, api_key, api_base",
    [
        ("whisper-1", None, None),
        (
            "azure/whisper",
            os.getenv("AZURE_WHISPER_API_KEY"),
            os.getenv("AZURE_WHISPER_API_BASE"),
        ),
    ],
)
@pytest.mark.parametrize(
    "response_format, timestamp_granularities",
    [("json", None), ("vtt", None), ("verbose_json", ["word"])],
)
@pytest.mark.asyncio
@pytest.mark.flaky(retries=3, delay=1)
async def test_transcription(
    model, api_key, api_base, response_format, timestamp_granularities
):
    transcript = await litellm.atranscription(
        model=model,
        file=audio_file,
        api_key=api_key,
        api_base=api_base,
        response_format=response_format,
        timestamp_granularities=timestamp_granularities,
        drop_params=True,
    )
    print(f"transcript: {transcript.model_dump()}")
    print(f"transcript hidden params: {transcript._hidden_params}")

    assert transcript.text is not None


@pytest.mark.asyncio()
async def test_transcription_caching():
    import litellm
    from litellm.caching.caching import Cache

    litellm.set_verbose = True
    litellm.cache = Cache()

    # make raw llm api call

    response_1 = await litellm.atranscription(
        model="whisper-1",
        file=audio_file,
    )

    await asyncio.sleep(5)

    # cache hit

    response_2 = await litellm.atranscription(
        model="whisper-1",
        file=audio_file,
    )

    print("response_1", response_1)
    print("response_2", response_2)
    print("response2 hidden params", response_2._hidden_params)
    assert response_2._hidden_params["cache_hit"] is True

    # cache miss

    response_3 = await litellm.atranscription(
        model="whisper-1",
        file=audio_file2,
    )
    print("response_3", response_3)
    print("response3 hidden params", response_3._hidden_params)
    assert response_3._hidden_params.get("cache_hit") is not True
    assert response_3.text != response_2.text

    litellm.cache = None


@pytest.mark.asyncio
async def test_whisper_log_pre_call():
    from litellm.litellm_core_utils.litellm_logging import Logging
    from datetime import datetime
    from unittest.mock import patch, MagicMock
    from litellm.integrations.custom_logger import CustomLogger

    custom_logger = CustomLogger()

    litellm.callbacks = [custom_logger]

    with patch.object(custom_logger, "log_pre_api_call") as mock_log_pre_call:
        await litellm.atranscription(
            model="whisper-1",
            file=audio_file,
        )
        mock_log_pre_call.assert_called_once()


@pytest.mark.asyncio
async def test_whisper_log_pre_call():
    from litellm.litellm_core_utils.litellm_logging import Logging
    from datetime import datetime
    from unittest.mock import patch, MagicMock
    from litellm.integrations.custom_logger import CustomLogger

    custom_logger = CustomLogger()

    litellm.callbacks = [custom_logger]

    with patch.object(custom_logger, "log_pre_api_call") as mock_log_pre_call:
        await litellm.atranscription(
            model="whisper-1",
            file=audio_file,
        )
        mock_log_pre_call.assert_called_once()


@pytest.mark.asyncio
async def test_gpt_4o_transcribe():
    from litellm.litellm_core_utils.litellm_logging import Logging
    from datetime import datetime
    from unittest.mock import patch, MagicMock

    await litellm.atranscription(
        model="openai/gpt-4o-transcribe", file=audio_file, response_format="json"
    )


@pytest.mark.asyncio
async def test_gpt_4o_transcribe_model_mapping():
    """Test that GPT-4o transcription models are correctly mapped and not hardcoded to whisper-1"""
    
    # Test GPT-4o mini transcribe
    response = await litellm.atranscription(
        model="openai/gpt-4o-mini-transcribe", 
        file=audio_file, 
        response_format="json"
    )
    
    # Check that the response contains the correct model in hidden params
    assert response._hidden_params is not None
    assert response._hidden_params["model"] == "gpt-4o-mini-transcribe"
    assert response._hidden_params["custom_llm_provider"] == "openai"
    assert response.text is not None
    
    # Test GPT-4o transcribe
    response2 = await litellm.atranscription(
        model="openai/gpt-4o-transcribe", 
        file=audio_file, 
        response_format="json"
    )
    
    # Check that the response contains the correct model in hidden params
    assert response2._hidden_params is not None
    assert response2._hidden_params["model"] == "gpt-4o-transcribe"
    assert response2._hidden_params["custom_llm_provider"] == "openai"
    assert response2.text is not None
    
    # Test traditional whisper-1 still works
    response3 = await litellm.atranscription(
        model="openai/whisper-1", 
        file=audio_file, 
        response_format="json"
    )
    
    # Check that the response contains the correct model in hidden params
    assert response3._hidden_params is not None
    assert response3._hidden_params["model"] == "whisper-1"
    assert response3._hidden_params["custom_llm_provider"] == "openai"
    assert response3.text is not None


@pytest.mark.asyncio
async def test_azure_transcribe_model_mapping():
    """
    Test that Azure transcription models are correctly mapped and not hardcoded to whisper-1.
    This test validates that the request body contains the correct model parameter.
    """
    from unittest.mock import AsyncMock, patch, MagicMock
    from openai import AsyncAzureOpenAI
    
    # Create a mock response that looks like OpenAI's transcription response (as a BaseModel)
    from pydantic import BaseModel as PydanticBaseModel
    
    class MockTranscriptionResponse(PydanticBaseModel):
        text: str
    
    mock_transcription_response = MockTranscriptionResponse(text="This is a test transcription")
    
    # Create mock raw response with headers and parse() method
    mock_raw_response = MagicMock()
    mock_raw_response.headers = {"content-type": "application/json"}
    mock_raw_response.parse = MagicMock(return_value=mock_transcription_response)
    
    # Create a mock Azure client instance
    mock_azure_client = MagicMock(spec=AsyncAzureOpenAI)
    mock_azure_client.audio.transcriptions.with_raw_response.create = AsyncMock(return_value=mock_raw_response)
    mock_azure_client.api_key = "test-api-key"
    mock_azure_client._base_url = MagicMock()
    mock_azure_client._base_url._uri_reference = "https://my-endpoint-europe-berri-992.openai.azure.com/"
    
    # Mock the get_azure_openai_client method to return our mock client
    with patch("litellm.llms.azure.audio_transcriptions.AzureAudioTranscription.get_azure_openai_client", return_value=mock_azure_client):
        # Make the transcription call
        response = await litellm.atranscription(
            model="azure/whisper-1",
            file=audio_file,
            response_format="json",
            api_key="test-api-key",
            api_base="https://my-endpoint-europe-berri-992.openai.azure.com/",
            api_version="2024-02-15-preview",
            drop_params=True
        )
        
        # Verify the create method was called
        mock_azure_client.audio.transcriptions.with_raw_response.create.assert_called_once()
        
        # Get the call arguments to validate the model parameter
        call_kwargs = mock_azure_client.audio.transcriptions.with_raw_response.create.call_args.kwargs
        
        # Assert that the model parameter is "whisper-1" (not hardcoded incorrectly)
        assert call_kwargs["model"] == "whisper-1", f"Expected model 'whisper-1', got {call_kwargs['model']}"
        assert "file" in call_kwargs
        assert call_kwargs["response_format"] == "json"
        
        # Check that the response contains the correct model in hidden params
        assert response._hidden_params is not None
        assert response._hidden_params["model"] == "whisper-1"
        assert response._hidden_params["custom_llm_provider"] == "azure"
        assert response.text is not None
