import asyncio
import json
import os
import sys

import pytest
from fastapi.testclient import TestClient

sys.path.insert(
    0, os.path.abspath("../../../../..")
)  # Adds the parent directory to the system path
from unittest.mock import MagicMock, patch

import litellm
from litellm import ModelResponse, RateLimitError, completion
from litellm.llms.bedrock.chat.converse_transformation import AmazonConverseConfig
from litellm.types.llms.bedrock import ConverseTokenUsageBlock


def test_transform_usage():
    usage = ConverseTokenUsageBlock(
        **{
            "cacheReadInputTokenCount": 0,
            "cacheReadInputTokens": 0,
            "cacheWriteInputTokenCount": 1789,
            "cacheWriteInputTokens": 1789,
            "inputTokens": 3,
            "outputTokens": 401,
            "totalTokens": 2193,
        }
    )
    config = AmazonConverseConfig()
    openai_usage = config._transform_usage(usage)
    assert (
        openai_usage.prompt_tokens
        == usage["inputTokens"] + usage["cacheReadInputTokens"] + usage["cacheWriteInputTokens"]
    )
    assert openai_usage.completion_tokens == usage["outputTokens"]
    assert openai_usage.total_tokens == usage["totalTokens"]
    assert (
        openai_usage.prompt_tokens_details.cached_tokens
        == usage["cacheReadInputTokens"]
    )
    assert openai_usage._cache_creation_input_tokens == usage["cacheWriteInputTokens"]
    assert openai_usage._cache_read_input_tokens == usage["cacheReadInputTokens"]


def test_transform_system_message():
    config = AmazonConverseConfig()

    # Case 1:
    # System message popped
    # User message remains
    messages = [
        {"role": "system", "content": "You are a helpful assistant."},
        {"role": "user", "content": "Hello!"},
    ]
    out_messages, system_blocks = config._transform_system_message(messages.copy())
    assert len(out_messages) == 1
    assert out_messages[0]["role"] == "user"
    assert len(system_blocks) == 1
    assert system_blocks[0]["text"] == "You are a helpful assistant."

    # Case 2: System message with list content (type text)
    messages = [
        {
            "role": "system",
            "content": [
                {"type": "text", "text": "System prompt 1"},
                {"type": "text", "text": "System prompt 2"},
            ],
        },
        {"role": "user", "content": "Hi!"},
    ]
    out_messages, system_blocks = config._transform_system_message(messages.copy())
    assert len(out_messages) == 1
    assert out_messages[0]["role"] == "user"
    assert len(system_blocks) == 2
    assert system_blocks[0]["text"] == "System prompt 1"
    assert system_blocks[1]["text"] == "System prompt 2"

    # Case 3: System message with cache_control (should add cachePoint)
    messages = [
        {
            "role": "system",
            "content": "Cache this!",
            "cache_control": {"type": "ephemeral"},
        },
        {"role": "user", "content": "Hi!"},
    ]
    out_messages, system_blocks = config._transform_system_message(messages.copy())
    assert len(out_messages) == 1
    assert len(system_blocks) == 2
    assert system_blocks[0]["text"] == "Cache this!"
    assert "cachePoint" in system_blocks[1]
    assert system_blocks[1]["cachePoint"]["type"] == "default"

    # Case 3b: System message with two blocks, one with cache_control and one without
    messages = [
        {
            "role": "system",
            "content": [
                {
                    "type": "text",
                    "text": "Cache this!",
                    "cache_control": {"type": "ephemeral"},
                },
                {"type": "text", "text": "Don't cache this!"},
            ],
        },
        {"role": "user", "content": "Hi!"},
    ]
    out_messages, system_blocks = config._transform_system_message(messages.copy())
    assert len(out_messages) == 1
    assert len(system_blocks) == 3
    assert system_blocks[0]["text"] == "Cache this!"
    assert "cachePoint" in system_blocks[1]
    assert system_blocks[1]["cachePoint"]["type"] == "default"
    assert system_blocks[2]["text"] == "Don't cache this!"

    # Case 4: Non-system messages are not affected
    messages = [
        {"role": "user", "content": "Hello!"},
        {"role": "assistant", "content": "Hi!"},
    ]
    out_messages, system_blocks = config._transform_system_message(messages.copy())
    assert len(out_messages) == 2
    assert out_messages[0]["role"] == "user"
    assert out_messages[1]["role"] == "assistant"
    assert system_blocks == []


def test_transform_thinking_blocks_with_redacted_content():
    thinking_blocks = [
        {
            "reasoningText": {
                "text": "This is a test",
                "signature": "test_signature",
            }
        },
        {
            "redactedContent": "This is a redacted content",
        },
    ]
    config = AmazonConverseConfig()
    transformed_thinking_blocks = config._transform_thinking_blocks(thinking_blocks)
    assert len(transformed_thinking_blocks) == 2
    assert transformed_thinking_blocks[0]["type"] == "thinking"
    assert transformed_thinking_blocks[1]["type"] == "redacted_thinking"


def test_apply_tool_call_transformation_if_needed():
    from litellm.types.utils import Message

    config = AmazonConverseConfig()
    tool_calls = [
        {
            "type": "function",
            "function": {
                "name": "test_function",
                "arguments": "test_arguments",
            },
        },
    ]
    tool_response = {
        "type": "function",
        "name": "test_function",
        "parameters": {"test": "test"},
    }
    message = Message(
        role="user",
        content=json.dumps(tool_response),
    )
    transformed_message, _ = config.apply_tool_call_transformation_if_needed(
        message, tool_calls
    )
    assert len(transformed_message.tool_calls) == 1
    assert transformed_message.tool_calls[0].function.name == "test_function"
    assert transformed_message.tool_calls[0].function.arguments == json.dumps(
        tool_response["parameters"]
    )


def test_transform_tool_call_with_cache_control():
    from litellm.llms.bedrock.chat.converse_transformation import AmazonConverseConfig

    config = AmazonConverseConfig()

    messages = [{"role": "user", "content": "Am I lost?"}]

    tools = [
        {
            "type": "function",
            "function": {
                "name": "get_location",
                "description": "Get the user's location",
                "parameters": {
                    "type": "object",
                    "properties": {
                        "location": {
                            "type": "string",
                            "description": "The city and state, e.g. San Francisco, CA",
                        },
                        "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
                    },
                    "required": ["location"],
                },
            },
            "cache_control": {"type": "ephemeral"},
        },
    ]

    result = config.transform_request(
        model="bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
        messages=messages,
        optional_params={"tools": tools},
        litellm_params={},
        headers={},
    )

    assert "toolConfig" in result
    assert "tools" in result["toolConfig"]

    assert len(result["toolConfig"]["tools"]) == 2

    function_out_msg = result["toolConfig"]["tools"][0]
    print(function_out_msg)
    assert function_out_msg["toolSpec"]["name"] == "get_location"
    assert function_out_msg["toolSpec"]["description"] == "Get the user's location"
    assert (
        function_out_msg["toolSpec"]["inputSchema"]["json"]["properties"]["location"][
            "type"
        ]
        == "string"
    )

    transformed_cache_msg = result["toolConfig"]["tools"][1]
    assert "cachePoint" in transformed_cache_msg
    assert transformed_cache_msg["cachePoint"]["type"] == "default"


def test_reasoning_with_forced_tool_choice_switches_to_auto():
    config = AmazonConverseConfig()

    non_default_params = {
        "tools": [
            {
                "type": "function",
                "function": {"name": "get_current_weather", "parameters": {}},
            }
        ],
        "tool_choice": "required",
        "reasoning_effort": "low",
    }

    optional_params = config.map_openai_params(
        model="bedrock/us.anthropic.claude-3-7-sonnet-20250219-v1:0",
        non_default_params=non_default_params,
        optional_params={},
        drop_params=False,
    )

    assert optional_params["tool_choice"] == {"auto": {}}

def test_get_supported_openai_params():
    config = AmazonConverseConfig()
    supported_params = config.get_supported_openai_params(
        model="bedrock/converse/us.anthropic.claude-sonnet-4-20250514-v1:0"
    )
    assert "tools" in supported_params
    assert "tool_choice" in supported_params
    assert "thinking" in supported_params
    assert "reasoning_effort" in supported_params


def test_get_supported_openai_params_bedrock_converse():
    """
    Test that all documented bedrock converse models have the same set of supported openai params when using
    `bedrock/converse/` or `bedrock/` prefix.

    Note: This test is critical for routing, if we ever remove `litellm.BEDROCK_CONVERSE_MODELS`,
    please update this test to read `bedrock_converse` models from the model cost map.
    """
    for model in litellm.BEDROCK_CONVERSE_MODELS:
        print(f"Testing model: {model}")
        config = AmazonConverseConfig()
        supported_params_without_prefix = config.get_supported_openai_params(
            model=model
        )

        supported_params_with_prefix = config.get_supported_openai_params(
            model=f"bedrock/converse/{model}"
        )

        assert set(supported_params_without_prefix) == set(supported_params_with_prefix), f"Supported params mismatch for model: {model}. Without prefix: {supported_params_without_prefix}, With prefix: {supported_params_with_prefix}"
        print(f"✅ Passed for model: {model}")


def test_transform_request_helper_includes_anthropic_beta_and_tools():
    """Test _transform_request_helper includes anthropic_beta for computer tools."""
    config = AmazonConverseConfig()
    system_content_blocks = []
    optional_params = {
        "anthropic_beta": ["computer-use-2024-10-22"],
        "tools": [
            {
                "type": "computer_20241022",
                "name": "computer",
                "display_height_px": 768,
                "display_width_px": 1024,
                "display_number": 0,
            }
        ],
        "some_other_param": 123,
    }
    data = config._transform_request_helper(
        model="anthropic.claude-3-5-sonnet-20241022-v2:0",
        system_content_blocks=system_content_blocks,
        optional_params=optional_params,
        messages=None,
    )
    assert "additionalModelRequestFields" in data
    fields = data["additionalModelRequestFields"]
    assert "anthropic_beta" in fields
    assert fields["anthropic_beta"] == ["computer-use-2024-10-22"]
    # Verify computer tool is included
    assert "tools" in fields
    assert len(fields["tools"]) == 1
    assert fields["tools"][0]["type"] == "computer_20241022"


def test_transform_response_with_computer_use_tool():
    """Test response transformation with computer use tool call."""
    import httpx

    from litellm.llms.bedrock.chat.converse_transformation import AmazonConverseConfig
    from litellm.types.llms.bedrock import (
        ConverseResponseBlock,
        ConverseTokenUsageBlock,
    )
    from litellm.types.utils import ModelResponse

    # Simulate a Bedrock Converse response with a computer-use tool call
    response_json = {
        "additionalModelResponseFields": {},
        "metrics": {"latencyMs": 100.0},
        "output": {
            "message": {
                "role": "assistant",
                "content": [
                    {
                        "toolUse": {
                            "toolUseId": "tooluse_123",
                            "name": "computer",
                            "input": {
                                "display_height_px": 768,
                                "display_width_px": 1024,
                                "display_number": 0,
                            },
                        }
                    }
                ]
            }
        },
        "stopReason": "tool_use",
        "usage": {
            "inputTokens": 10,
            "outputTokens": 5,
            "totalTokens": 15,
            "cacheReadInputTokenCount": 0,
            "cacheReadInputTokens": 0,
            "cacheWriteInputTokenCount": 0,
            "cacheWriteInputTokens": 0,
        },
    }
    # Mock httpx.Response
    class MockResponse:
        def json(self):
            return response_json
        @property
        def text(self):
            return json.dumps(response_json)

    config = AmazonConverseConfig()
    model_response = ModelResponse()
    optional_params = {
        "tools": [
            {
                "type": "computer_20241022",
                "function": {
                    "name": "computer",
                    "parameters": {
                        "display_height_px": 768,
                        "display_width_px": 1024,
                        "display_number": 0,
                    },
                },
            }
        ]
    }
    # Call the transformation logic
    result = config._transform_response(
        model="bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
        response=MockResponse(),
        model_response=model_response,
        stream=False,
        logging_obj=None,
        optional_params=optional_params,
        api_key=None,
        data=None,
        messages=[],
        encoding=None,
    )
    # Check that the tool call is present in the returned message
    assert result.choices[0].message.tool_calls is not None
    assert len(result.choices[0].message.tool_calls) == 1
    tool_call = result.choices[0].message.tool_calls[0]
    assert tool_call.function.name == "computer"
    args = json.loads(tool_call.function.arguments)
    assert args["display_height_px"] == 768
    assert args["display_width_px"] == 1024
    assert args["display_number"] == 0


def test_transform_response_with_bash_tool():
    """Test response transformation with bash tool call."""
    import httpx

    from litellm.llms.bedrock.chat.converse_transformation import AmazonConverseConfig
    from litellm.types.llms.bedrock import (
        ConverseResponseBlock,
        ConverseTokenUsageBlock,
    )
    from litellm.types.utils import ModelResponse

    # Simulate a Bedrock Converse response with a bash tool call
    response_json = {
        "additionalModelResponseFields": {},
        "metrics": {"latencyMs": 100.0},
        "output": {
            "message": {
                "role": "assistant",
                "content": [
                    {
                        "toolUse": {
                            "toolUseId": "tooluse_456",
                            "name": "bash",
                            "input": {
                                "command": "ls -la *.py"
                            },
                        }
                    }
                ]
            }
        },
        "stopReason": "tool_use",
        "usage": {
            "inputTokens": 8,
            "outputTokens": 3,
            "totalTokens": 11,
            "cacheReadInputTokenCount": 0,
            "cacheReadInputTokens": 0,
            "cacheWriteInputTokenCount": 0,
            "cacheWriteInputTokens": 0,
        },
    }
    # Mock httpx.Response
    class MockResponse:
        def json(self):
            return response_json
        @property
        def text(self):
            return json.dumps(response_json)

    config = AmazonConverseConfig()
    model_response = ModelResponse()
    optional_params = {
        "tools": [
            {
                "type": "bash_20241022",
                "function": {
                    "name": "bash",
                    "parameters": {},
                },
            }
        ]
    }
    # Call the transformation logic
    result = config._transform_response(
        model="bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
        response=MockResponse(),
        model_response=model_response,
        stream=False,
        logging_obj=None,
        optional_params=optional_params,
        api_key=None,
        data=None,
        messages=[],
        encoding=None,
    )
    # Check that the tool call is present in the returned message
    assert result.choices[0].message.tool_calls is not None
    assert len(result.choices[0].message.tool_calls) == 1
    tool_call = result.choices[0].message.tool_calls[0]
    assert tool_call.function.name == "bash"
    args = json.loads(tool_call.function.arguments)
    assert args["command"] == "ls -la *.py"


def test_transform_response_with_structured_response_being_called():
    """Test response transformation with structured response."""
    from litellm.llms.bedrock.chat.converse_transformation import AmazonConverseConfig
    from litellm.types.utils import ModelResponse

    # Simulate a Bedrock Converse response with a bash tool call
    response_json = {
        "additionalModelResponseFields": {},
        "metrics": {"latencyMs": 100.0},
        "output": {
            "message": {
                "role": "assistant",
                "content": [
                    {
                        "toolUse": {
                            "toolUseId": "tooluse_456",
                            "name": "json_tool_call",
                            "input": {
                                "Current_Temperature": 62,
                                "Weather_Explanation": "San Francisco typically has mild, cool weather year-round due to its coastal location and marine influence. The city is known for its fog, moderate temperatures, and relatively stable climate with little seasonal variation."},
                        }
                    }
                ]
            }
        },
        "stopReason": "tool_use",
        "usage": {
            "inputTokens": 8,
            "outputTokens": 3,
            "totalTokens": 11,
            "cacheReadInputTokenCount": 0,
            "cacheReadInputTokens": 0,
            "cacheWriteInputTokenCount": 0,
            "cacheWriteInputTokens": 0,
        },
    }
    # Mock httpx.Response
    class MockResponse:
        def json(self):
            return response_json
        @property
        def text(self):
            return json.dumps(response_json)

    config = AmazonConverseConfig()
    model_response = ModelResponse()
    optional_params = {
        "json_mode": True,
        "tools": [
            {
                'type': 'function',
                'function': {
                    'name': 'get_weather',
                    'description': 'Get the current weather in a given location',
                    'parameters': {
                        'type': 'object',
                        'properties': {
                            'location': {
                                'type': 'string',
                                'description': 'The city and state, e.g. San Francisco, CA'
                            },
                            'unit': {
                                'type': 'string',
                                'enum': ['celsius', 'fahrenheit']
                            }
                        },
                        'required': ['location']
                    }
                }
            },
            {
                'type': 'function',
                'function': {
                    'name': 'json_tool_call',
                    'parameters': {
                        '$schema': 'http://json-schema.org/draft-07/schema#',
                        'type': 'object',
                        'required': ['Weather_Explanation', 'Current_Temperature'],
                        'properties': {
                            'Weather_Explanation': {
                                'type': ['string', 'null'],
                                'description': '1-2 sentences explaining the weather in the location'
                            },
                            'Current_Temperature': {
                                'type': ['number', 'null'],
                                'description': 'Current temperature in the location'
                            }
                        },
                        'additionalProperties': False
                    }
                }
            }
        ]
    }
    # Call the transformation logic
    result = config._transform_response(
        model="bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
        response=MockResponse(),
        model_response=model_response,
        stream=False,
        logging_obj=None,
        optional_params=optional_params,
        api_key=None,
        data=None,
        messages=[],
        encoding=None,
    )
    # Check that the tool call is present in the returned message
    assert result.choices[0].message.tool_calls is None

    assert result.choices[0].message.content is not None
    assert result.choices[0].message.content ==  '{"Current_Temperature": 62, "Weather_Explanation": "San Francisco typically has mild, cool weather year-round due to its coastal location and marine influence. The city is known for its fog, moderate temperatures, and relatively stable climate with little seasonal variation."}'

def test_transform_response_with_structured_response_calling_tool():
    """Test response transformation with structured response."""
    from litellm.llms.bedrock.chat.converse_transformation import AmazonConverseConfig
    from litellm.types.utils import ModelResponse

    # Simulate a Bedrock Converse response with a bash tool call
    response_json = {
        "metrics": {
            "latencyMs": 1148
        },
        "output": {
            "message":
            {
                "content": [
                    {
                        "text": "I\'ll check the current weather in San Francisco for you."
                    },
                    {
                        "toolUse": {
                            "input": {
                                "location": "San Francisco, CA",
                                "unit": "celsius"
                            },
                            "name": "get_weather",
                            "toolUseId": "tooluse_oKk__QrqSUmufMw3Q7vGaQ"
                        }
                    }
                ],
                "role": "assistant"
            }
        },
        "stopReason": "tool_use",
        "usage": {
            "cacheReadInputTokenCount": 0,
            "cacheReadInputTokens": 0,
            "cacheWriteInputTokenCount": 0,
            "cacheWriteInputTokens": 0,
            "inputTokens": 534,
            "outputTokens": 69,
            "totalTokens": 603
        }
    }
    # Mock httpx.Response
    class MockResponse:
        def json(self):
            return response_json
        @property
        def text(self):
            return json.dumps(response_json)

    config = AmazonConverseConfig()
    model_response = ModelResponse()
    optional_params = {
        "json_mode": True,
        "tools": [
            {
                'type': 'function',
                'function': {
                    'name': 'get_weather',
                    'description': 'Get the current weather in a given location',
                    'parameters': {
                        'type': 'object',
                        'properties': {
                            'location': {
                                'type': 'string',
                                'description': 'The city and state, e.g. San Francisco, CA'
                            },
                            'unit': {
                                'type': 'string',
                                'enum': ['celsius', 'fahrenheit']
                            }
                        },
                        'required': ['location']
                    }
                }
            },
            {
                'type': 'function',
                'function': {
                    'name': 'json_tool_call',
                    'parameters': {
                        '$schema': 'http://json-schema.org/draft-07/schema#',
                        'type': 'object',
                        'required': ['Weather_Explanation', 'Current_Temperature'],
                        'properties': {
                            'Weather_Explanation': {
                                'type': ['string', 'null'],
                                'description': '1-2 sentences explaining the weather in the location'
                            },
                            'Current_Temperature': {
                                'type': ['number', 'null'],
                                'description': 'Current temperature in the location'
                            }
                        },
                        'additionalProperties': False
                    }
                }
            }
        ]
    }
    # Call the transformation logic
    result = config._transform_response(
        model="bedrock/eu.anthropic.claude-sonnet-4-20250514-v1:0",
        response=MockResponse(),
        model_response=model_response,
        stream=False,
        logging_obj=None,
        optional_params=optional_params,
        api_key=None,
        data=None,
        messages=[],
        encoding=None,
    )
    # Check that the tool call is present in the returned message
    assert result.choices[0].message.tool_calls is not None
    assert len(result.choices[0].message.tool_calls) == 1
    assert result.choices[0].message.tool_calls[0].function.name == "get_weather"
    assert result.choices[0].message.tool_calls[0].function.arguments == '{"location": "San Francisco, CA", "unit": "celsius"}'


@pytest.mark.asyncio
async def test_bedrock_bash_tool_acompletion():
    """Test Bedrock with bash tool for ls command using acompletion."""

    # Test with bash tool instead of computer tool
    tools = [
        {
            "type": "bash_20241022",
            "name": "bash",
        }
    ]

    messages = [
        {
            "role": "user",
            "content": "run ls command and find all python files"
        }
    ]

    try:
        response = await litellm.acompletion(
            model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0",
            messages=messages,
            tools=tools,
            # Using dummy API key - test should fail with auth error, proving request formatting works
            api_key="dummy-key-for-testing"
        )
        # If we get here, something's wrong - we expect an auth error
        assert False, "Expected authentication error but got successful response"
    except Exception as e:
        error_str = str(e).lower()

        # Check if it's an expected authentication/credentials error
        auth_error_indicators = [
            "credentials", "authentication", "unauthorized", "access denied",
            "aws", "region", "profile", "token", "invalid", "signature"
        ]

        if any(auth_error in error_str for auth_error in auth_error_indicators):
            # This is expected - request formatting succeeded, auth failed as expected
            assert True
        else:
            # Unexpected error - might be tool handling issue
            pytest.fail(f"Unexpected error (might be tool handling issue): {e}")


@pytest.mark.asyncio
async def test_bedrock_computer_use_acompletion():
    """Test Bedrock computer use with acompletion function."""

    # Test with computer use tool
    tools = [
        {
            "type": "computer_20241022",
            "name": "computer",
            "display_height_px": 768,
            "display_width_px": 1024,
            "display_number": 0,
        }
    ]

    messages = [
        {
            "role": "user",
            "content": [
                {
                    "type": "text",
                    "text": "Go to the bedrock console"
                },
                {
                    "type": "image_url",
                    "image_url": {
                        "url": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8/5+hHgAHggJ/PchI7wAAAABJRU5ErkJggg=="
                    }
                }
            ]
        }
    ]

    try:
        response = await litellm.acompletion(
            model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0",
            messages=messages,
            tools=tools,
            # Using dummy API key - test should fail with auth error, proving request formatting works
            api_key="dummy-key-for-testing"
        )
        # If we get here, something's wrong - we expect an auth error
        assert False, "Expected authentication error but got successful response"
    except Exception as e:
        error_str = str(e).lower()

        # Check if it's an expected authentication/credentials error
        auth_error_indicators = [
            "credentials", "authentication", "unauthorized", "access denied",
            "aws", "region", "profile", "token", "invalid", "signature"
        ]

        if any(auth_error in error_str for auth_error in auth_error_indicators):
            # This is expected - request formatting succeeded, auth failed as expected
            assert True
        else:
            # Unexpected error - might be tool handling issue
            pytest.fail(f"Unexpected error (might be tool handling issue): {e}")


@pytest.mark.asyncio
async def test_transformation_directly():
    """Test the transformation directly to verify the request structure."""

    config = AmazonConverseConfig()

    tools = [
        {
            "type": "computer_20241022",
            "name": "computer",
            "display_height_px": 768,
            "display_width_px": 1024,
            "display_number": 0,
        },
        {
            "type": "bash_20241022",
            "name": "bash",
        }
    ]

    messages = [
        {
            "role": "user",
            "content": "run ls command and find all python files"
        }
    ]

    # Transform request
    request_data = config.transform_request(
        model="anthropic.claude-3-5-sonnet-20241022-v2:0",
        messages=messages,
        optional_params={"tools": tools},
        litellm_params={},
        headers={}
    )

    # Verify the structure
    assert "additionalModelRequestFields" in request_data
    additional_fields = request_data["additionalModelRequestFields"]

    # Check that anthropic_beta is set correctly for computer use
    assert "anthropic_beta" in additional_fields
    assert additional_fields["anthropic_beta"] == ["computer-use-2024-10-22"]

    # Check that tools are present
    assert "tools" in additional_fields
    assert len(additional_fields["tools"]) == 2

    # Verify tool types
    tool_types = [tool.get("type") for tool in additional_fields["tools"]]
    assert "computer_20241022" in tool_types
    assert "bash_20241022" in tool_types


def test_transform_request_helper_includes_anthropic_beta_and_tools_bash():
    """Test _transform_request_helper includes anthropic_beta for bash tools."""
    config = AmazonConverseConfig()
    system_content_blocks = []
    optional_params = {
        "anthropic_beta": ["computer-use-2024-10-22"],
        "tools": [
            {
                "type": "bash_20241022",
                "name": "bash",
            }
        ],
        "some_other_param": 123,
    }
    data = config._transform_request_helper(
        model="anthropic.claude-3-5-sonnet-20241022-v2:0",
        system_content_blocks=system_content_blocks,
        optional_params=optional_params,
        messages=None,
    )
    assert "additionalModelRequestFields" in data
    fields = data["additionalModelRequestFields"]
    assert "anthropic_beta" in fields
    assert fields["anthropic_beta"] == ["computer-use-2024-10-22"]
    # Verify bash tool is included
    assert "tools" in fields
    assert len(fields["tools"]) == 1
    assert fields["tools"][0]["type"] == "bash_20241022"


def test_transform_request_with_multiple_tools():
    """Test transformation with multiple tools including computer, bash, and function tools."""
    config = AmazonConverseConfig()

    # Use the exact payload from the user's error
    tools = [
        {
            "type": "computer_20241022",
            "function": {
                "name": "computer",
                "parameters": {
                    "display_height_px": 768,
                    "display_width_px": 1024,
                    "display_number": 0,
                },
            },
        },
        {
            "type": "bash_20241022",
            "name": "bash",
        },
        {
            "type": "text_editor_20241022",
            "name": "str_replace_editor",
        },
        {
            "type": "function",
            "function": {
                "name": "get_weather",
                "description": "Get the current weather in a given location",
                "parameters": {
                    "type": "object",
                    "properties": {
                        "location": {
                            "type": "string",
                            "description": "The city and state, e.g. San Francisco, CA",
                        },
                        "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
                    },
                    "required": ["location"],
                },
            }
        }
    ]

    messages = [
        {
            "role": "user",
            "content": "run ls command and find all python files"
        }
    ]

    # Transform request
    request_data = config.transform_request(
        model="anthropic.claude-3-5-sonnet-20241022-v2:0",
        messages=messages,
        optional_params={"tools": tools},
        litellm_params={},
        headers={}
    )

    # Verify the structure
    assert "additionalModelRequestFields" in request_data
    additional_fields = request_data["additionalModelRequestFields"]

    # Check that anthropic_beta is set correctly for computer use
    assert "anthropic_beta" in additional_fields
    assert additional_fields["anthropic_beta"] == ["computer-use-2024-10-22"]

    # Check that tools are present
    assert "tools" in additional_fields
    assert len(additional_fields["tools"]) == 3  # computer, bash, text_editor tools

    # Verify tool types
    tool_types = [tool.get("type") for tool in additional_fields["tools"]]
    assert "computer_20241022" in tool_types
    assert "bash_20241022" in tool_types
    assert "text_editor_20241022" in tool_types

    # Function tools are processed separately and not included in computer use tools
    # They would be in toolConfig if present


def test_transform_request_with_computer_tool_only():
    """Test transformation with only computer tool."""
    config = AmazonConverseConfig()

    tools = [
        {
            "type": "computer_20241022",
            "name": "computer",
            "display_height_px": 768,
            "display_width_px": 1024,
            "display_number": 0,
        }
    ]

    messages = [
        {
            "role": "user",
            "content": [
                {
                    "type": "text",
                    "text": "Go to the bedrock console"
                },
                {
                    "type": "image_url",
                    "image_url": {
                        "url": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8/5+hHgAHggJ/PchI7wAAAABJRU5ErkJggg=="
                    }
                }
            ]
        }
    ]

    # Transform request
    request_data = config.transform_request(
        model="anthropic.claude-3-5-sonnet-20241022-v2:0",
        messages=messages,
        optional_params={"tools": tools},
        litellm_params={},
        headers={}
    )

    # Verify the structure
    assert "additionalModelRequestFields" in request_data
    additional_fields = request_data["additionalModelRequestFields"]

    # Check that anthropic_beta is set correctly for computer use
    assert "anthropic_beta" in additional_fields
    assert additional_fields["anthropic_beta"] == ["computer-use-2024-10-22"]

    # Check that tools are present
    assert "tools" in additional_fields
    assert len(additional_fields["tools"]) == 1
    assert additional_fields["tools"][0]["type"] == "computer_20241022"


def test_transform_request_with_bash_tool_only():
    """Test transformation with only bash tool."""
    config = AmazonConverseConfig()

    tools = [
        {
            "type": "bash_20241022",
            "name": "bash",
        }
    ]

    messages = [
        {
            "role": "user",
            "content": "run ls command and find all python files"
        }
    ]

    # Transform request
    request_data = config.transform_request(
        model="anthropic.claude-3-5-sonnet-20241022-v2:0",
        messages=messages,
        optional_params={"tools": tools},
        litellm_params={},
        headers={}
    )

    # Verify the structure
    assert "additionalModelRequestFields" in request_data
    additional_fields = request_data["additionalModelRequestFields"]

    # Check that anthropic_beta is set correctly for computer use
    assert "anthropic_beta" in additional_fields
    assert additional_fields["anthropic_beta"] == ["computer-use-2024-10-22"]

    # Check that tools are present
    assert "tools" in additional_fields
    assert len(additional_fields["tools"]) == 1
    assert additional_fields["tools"][0]["type"] == "bash_20241022"


def test_transform_request_with_text_editor_tool():
    """Test transformation with text editor tool."""
    config = AmazonConverseConfig()

    tools = [
        {
            "type": "text_editor_20241022",
            "name": "str_replace_editor",
        }
    ]

    messages = [
        {
            "role": "user",
            "content": "Edit this text file"
        }
    ]

    # Transform request
    request_data = config.transform_request(
        model="anthropic.claude-3-5-sonnet-20241022-v2:0",
        messages=messages,
        optional_params={"tools": tools},
        litellm_params={},
        headers={}
    )

    # Verify the structure
    assert "additionalModelRequestFields" in request_data
    additional_fields = request_data["additionalModelRequestFields"]

    # Check that anthropic_beta is set correctly for computer use
    assert "anthropic_beta" in additional_fields
    assert additional_fields["anthropic_beta"] == ["computer-use-2024-10-22"]

    # Check that tools are present
    assert "tools" in additional_fields
    assert len(additional_fields["tools"]) == 1
    assert additional_fields["tools"][0]["type"] == "text_editor_20241022"


def test_transform_request_with_function_tool():
    """Test transformation with function tool."""
    config = AmazonConverseConfig()

    tools = [
        {
            "type": "function",
            "function": {
                "name": "get_weather",
                "description": "Get the current weather in a given location",
                "parameters": {
                    "type": "object",
                    "properties": {
                        "location": {
                            "type": "string",
                            "description": "The city and state, e.g. San Francisco, CA",
                        },
                        "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
                    },
                    "required": ["location"],
                },
            }
        }
    ]

    messages = [
        {
            "role": "user",
            "content": "What's the weather like in San Francisco?"
        }
    ]

    # Transform request
    request_data = config.transform_request(
        model="anthropic.claude-3-5-sonnet-20241022-v2:0",
        messages=messages,
        optional_params={"tools": tools},
        litellm_params={},
        headers={}
    )

    # Verify the structure
    assert "additionalModelRequestFields" in request_data
    additional_fields = request_data["additionalModelRequestFields"]

    # Function tools are not computer use tools, so they don't get anthropic_beta
    # They are processed through the regular tool config
    assert "toolConfig" in request_data
    assert "tools" in request_data["toolConfig"]
    assert len(request_data["toolConfig"]["tools"]) == 1
    assert request_data["toolConfig"]["tools"][0]["toolSpec"]["name"] == "get_weather"


def test_map_openai_params_with_response_format():
    """Test map_openai_params with response_format."""
    config = AmazonConverseConfig()

    tools = [
        {
            "type": "function",
            "function": {
                "name": "get_weather",
                "description": "Get the current weather in a given location",
                "parameters": {
                    "type": "object",
                    "properties": {
                        "location": {
                            "type": "string",
                            "description": "The city and state, e.g. San Francisco, CA",
                        },
                        "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
                    },
                    "required": ["location"],
                },
            }
        }
    ]

    json_schema = {
        "type": "json_schema",
        "json_schema": {
            "name": "WeatherResult",
            "schema": {
                "$schema": "http://json-schema.org/draft-07/schema#",
                "type": "object",
                "required": ["Weather_Explanation", "Current_Temperature"],
                "properties": {
                    "Weather_Explanation": {
                        "type": ["string", "null"],
                        "description": "1-2 sentences explaining the weather in the location",
                    },
                    "Current_Temperature": {
                        "type": ["number", "null"],
                        "description": "Current temperature in the location",
                    },
                },
                "additionalProperties": False,
            },
            "strict": False,
        },
    }

    optional_params = config.map_openai_params(
        non_default_params={"response_format": json_schema},
        optional_params={"tools": tools},
        model="eu.anthropic.claude-sonnet-4-20250514-v1:0",
        drop_params=False
    )

    assert "tools" in optional_params
    assert len(optional_params["tools"]) == 2
    assert optional_params["tools"][1]["type"] == "function"
    assert optional_params["tools"][1]["function"]["name"] == "json_tool_call"


@pytest.mark.asyncio
async def test_assistant_message_cache_control():
    """Test that assistant messages with cache_control generate cachePoint blocks."""
    from litellm.litellm_core_utils.prompt_templates.factory import (
        BedrockConverseMessagesProcessor,
        _bedrock_converse_messages_pt,
    )

    # Test assistant message with string content and cache_control
    messages = [
        {"role": "user", "content": "Hello"},
        {
            "role": "assistant",
            "content": "Hi there!",
            "cache_control": {"type": "ephemeral"}
        }
    ]

    result = _bedrock_converse_messages_pt(
        messages=messages,
        model="bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
        llm_provider="bedrock_converse"
    )

    async_result = await BedrockConverseMessagesProcessor._bedrock_converse_messages_pt_async(
        messages=messages,
        model="bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
        llm_provider="bedrock_converse"
    )

    assert result == async_result

    async_result = await BedrockConverseMessagesProcessor._bedrock_converse_messages_pt_async(
        messages=messages,
        model="bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
        llm_provider="bedrock_converse"
    )

    assert result == async_result

    # Should have user message and assistant message
    assert len(result) == 2
    assert result[0]["role"] == "user"
    assert result[1]["role"] == "assistant"

    # Assistant message should have text content and cachePoint
    assistant_content = result[1]["content"]
    assert len(assistant_content) == 2
    assert assistant_content[0]["text"] == "Hi there!"
    assert "cachePoint" in assistant_content[1]
    assert assistant_content[1]["cachePoint"]["type"] == "default"


@pytest.mark.asyncio
async def test_assistant_message_list_content_cache_control():
    """Test assistant messages with list content and cache_control."""
    from litellm.litellm_core_utils.prompt_templates.factory import (
        BedrockConverseMessagesProcessor,
        _bedrock_converse_messages_pt,
    )

    messages = [
        {"role": "user", "content": "Hello"},
        {
            "role": "assistant",
            "content": [
                {
                    "type": "text",
                    "text": "This should be cached",
                    "cache_control": {"type": "ephemeral"}
                }
            ]
        }
    ]

    result = _bedrock_converse_messages_pt(
        messages=messages,
        model="bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
        llm_provider="bedrock_converse"
    )

    async_result = await BedrockConverseMessagesProcessor._bedrock_converse_messages_pt_async(
        messages=messages,
        model="bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
        llm_provider="bedrock_converse"
    )

    assert result == async_result

    # Assistant message should have text content and cachePoint
    assistant_content = result[1]["content"]
    assert len(assistant_content) == 2
    assert assistant_content[0]["text"] == "This should be cached"
    assert "cachePoint" in assistant_content[1]
    assert assistant_content[1]["cachePoint"]["type"] == "default"


@pytest.mark.asyncio
async def test_tool_message_cache_control():
    """Test that tool messages with cache_control generate cachePoint blocks."""
    from litellm.litellm_core_utils.prompt_templates.factory import (
        BedrockConverseMessagesProcessor,
        _bedrock_converse_messages_pt,
    )

    messages = [
        {"role": "user", "content": "What's the weather?"},
        {
            "role": "assistant",
            "content": None,
            "tool_calls": [
                {
                    "id": "call_123",
                    "type": "function",
                    "function": {"name": "get_weather", "arguments": "{}"}
                }
            ]
        },
        {
            "role": "tool",
            "tool_call_id": "call_123",
            "content": [
                {
                    "type": "text",
                    "text": "Weather data: sunny, 25°C",
                    "cache_control": {"type": "ephemeral"}
                }
            ]
        }
    ]

    result = _bedrock_converse_messages_pt(
        messages=messages,
        model="bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
        llm_provider="bedrock_converse"
    )

    async_result = await BedrockConverseMessagesProcessor._bedrock_converse_messages_pt_async(
        messages=messages,
        model="bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
        llm_provider="bedrock_converse"
    )

    assert result == async_result

    # Should have user, assistant, and user (tool results) messages
    assert len(result) == 3

    # Last message should contain tool result and cachePoint
    tool_message_content = result[2]["content"]
    assert len(tool_message_content) == 2

    # First should be tool result
    assert "toolResult" in tool_message_content[0]
    assert tool_message_content[0]["toolResult"]["content"][0]["text"] == "Weather data: sunny, 25°C"

    # Second should be cachePoint
    assert "cachePoint" in tool_message_content[1]
    assert tool_message_content[1]["cachePoint"]["type"] == "default"


@pytest.mark.asyncio
async def test_tool_message_string_content_cache_control():
    """Test tool messages with string content and message-level cache_control."""
    from litellm.litellm_core_utils.prompt_templates.factory import (
        BedrockConverseMessagesProcessor,
        _bedrock_converse_messages_pt,
    )

    messages = [
        {"role": "user", "content": "What's the weather?"},
        {
            "role": "assistant",
            "content": None,
            "tool_calls": [
                {
                    "id": "call_123",
                    "type": "function",
                    "function": {"name": "get_weather", "arguments": "{}"}
                }
            ]
        },
        {
            "role": "tool",
            "tool_call_id": "call_123",
            "content": "Weather: sunny, 25°C",
            "cache_control": {"type": "ephemeral"}
        }
    ]

    result = _bedrock_converse_messages_pt(
        messages=messages,
        model="bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
        llm_provider="bedrock_converse"
    )

    async_result = await BedrockConverseMessagesProcessor._bedrock_converse_messages_pt_async(
        messages=messages,
        model="bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
        llm_provider="bedrock_converse"
    )

    assert result == async_result

    # Last message should contain tool result and cachePoint
    tool_message_content = result[2]["content"]
    assert len(tool_message_content) == 2

    # First should be tool result
    assert "toolResult" in tool_message_content[0]
    assert tool_message_content[0]["toolResult"]["content"][0]["text"] == "Weather: sunny, 25°C"

    # Second should be cachePoint
    assert "cachePoint" in tool_message_content[1]
    assert tool_message_content[1]["cachePoint"]["type"] == "default"


@pytest.mark.asyncio
async def test_assistant_tool_calls_cache_control():
    """Test that assistant tool_calls with cache_control generate cachePoint blocks."""
    from litellm.litellm_core_utils.prompt_templates.factory import (
        BedrockConverseMessagesProcessor,
        _bedrock_converse_messages_pt,
    )

    messages = [
        {"role": "user", "content": "Calculate 2+2"},
        {
            "role": "assistant",
            "content": None,
            "tool_calls": [
                {
                    "id": "call_proxy_123",
                    "type": "function",
                    "function": {"name": "calc", "arguments": "{}"},
                    "cache_control": {"type": "ephemeral"}
                }
            ]
        }
    ]

    result = _bedrock_converse_messages_pt(
        messages=messages,
        model="bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
        llm_provider="bedrock_converse"
    )

    async_result = await BedrockConverseMessagesProcessor._bedrock_converse_messages_pt_async(
        messages=messages,
        model="bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
        llm_provider="bedrock_converse"
    )

    assert result == async_result

    # Assistant message should have tool use and cachePoint
    assistant_content = result[1]["content"]
    assert len(assistant_content) == 2

    # First should be tool use
    assert "toolUse" in assistant_content[0]
    assert assistant_content[0]["toolUse"]["name"] == "calc"
    assert assistant_content[0]["toolUse"]["toolUseId"] == "call_proxy_123"

    # Second should be cachePoint
    assert "cachePoint" in assistant_content[1]
    assert assistant_content[1]["cachePoint"]["type"] == "default"


@pytest.mark.asyncio
async def test_multiple_tool_calls_with_mixed_cache_control():
    """Test multiple tool calls where only some have cache_control."""
    from litellm.litellm_core_utils.prompt_templates.factory import (
        BedrockConverseMessagesProcessor,
        _bedrock_converse_messages_pt,
    )

    messages = [
        {"role": "user", "content": "Do multiple calculations"},
        {
            "role": "assistant",
            "content": None,
            "tool_calls": [
                {
                    "id": "call_1",
                    "type": "function",
                    "function": {"name": "calc", "arguments": '{"expr": "2+2"}'},
                    "cache_control": {"type": "ephemeral"}
                },
                {
                    "id": "call_2",
                    "type": "function",
                    "function": {"name": "calc", "arguments": '{"expr": "3+3"}'}
                    # No cache_control
                }
            ]
        }
    ]

    result = _bedrock_converse_messages_pt(
        messages=messages,
        model="bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
        llm_provider="bedrock_converse"
    )

    async_result = await BedrockConverseMessagesProcessor._bedrock_converse_messages_pt_async(
        messages=messages,
        model="bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
        llm_provider="bedrock_converse"
    )

    assert result == async_result

    # Assistant message should have: toolUse1, cachePoint, toolUse2
    assistant_content = result[1]["content"]
    assert len(assistant_content) == 3

    # First tool use with cache
    assert "toolUse" in assistant_content[0]
    assert assistant_content[0]["toolUse"]["toolUseId"] == "call_1"

    # Cache point for first tool
    assert "cachePoint" in assistant_content[1]
    assert assistant_content[1]["cachePoint"]["type"] == "default"

    # Second tool use without cache
    assert "toolUse" in assistant_content[2]
    assert assistant_content[2]["toolUse"]["toolUseId"] == "call_2"


@pytest.mark.asyncio
async def test_no_cache_control_no_cache_point():
    """Test that messages without cache_control don't generate cachePoint blocks."""
    from litellm.litellm_core_utils.prompt_templates.factory import (
        BedrockConverseMessagesProcessor,
        _bedrock_converse_messages_pt,
    )

    messages = [
        {"role": "user", "content": "Hello"},
        {"role": "assistant", "content": "Hi there!"},  # No cache_control
        {
            "role": "tool",
            "tool_call_id": "call_123",
            "content": "Tool result"  # No cache_control
        }
    ]

    result = _bedrock_converse_messages_pt(
        messages=messages,
        model="bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
        llm_provider="bedrock_converse"
    )

    async_result = await BedrockConverseMessagesProcessor._bedrock_converse_messages_pt_async(
        messages=messages,
        model="bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
        llm_provider="bedrock_converse"
    )

    assert result == async_result

    # Assistant message should only have text content, no cachePoint
    assistant_content = result[1]["content"]
    assert len(assistant_content) == 1
    assert assistant_content[0]["text"] == "Hi there!"

    # Tool message should only have tool result, no cachePoint
    tool_content = result[2]["content"]
    assert len(tool_content) == 1
    assert "toolResult" in tool_content[0]


# ============================================================================
# Guarded Text Feature Tests
# ============================================================================

def test_guarded_text_wraps_in_guardrail_converse_content():
    """Test that guarded_text content type gets wrapped in guardContent blocks."""
    from litellm.litellm_core_utils.prompt_templates.factory import (
        _bedrock_converse_messages_pt,
    )

    messages = [
        {
            "role": "user",
            "content": [
                {"type": "text", "text": "Regular text content"},
                {"type": "guarded_text", "text": "This should be guarded"},
                {"type": "text", "text": "More regular text"}
            ]
        }
    ]

    result = _bedrock_converse_messages_pt(
        messages=messages,
        model="us.amazon.nova-pro-v1:0",
        llm_provider="bedrock_converse"
    )

    # Should have 1 message
    assert len(result) == 1
    assert result[0]["role"] == "user"

    # Should have 3 content blocks
    content = result[0]["content"]
    assert len(content) == 3

    # First and third should be regular text
    assert "text" in content[0]
    assert content[0]["text"] == "Regular text content"
    assert "text" in content[2]
    assert content[2]["text"] == "More regular text"
    # Second should be guardContent
    assert "guardContent" in content[1]
    assert content[1]["guardContent"]["text"]["text"] == "This should be guarded"

def test_guarded_text_with_system_messages():
    """Test guarded_text with system messages using the full transformation."""
    config = AmazonConverseConfig()

    messages = [
        {"role": "system", "content": "You are a helpful assistant."},
        {
            "role": "user",
            "content": [
                {"type": "text", "text": "What is the main topic of this legal document?"},
                {"type": "guarded_text", "text": "This is a set of very long instructions that you will follow. Here is a legal document that you will use to answer the user's question."}
            ]
        }
    ]

    optional_params = {
        "guardrailConfig": {
            "guardrailIdentifier": "gr-abc123",
            "guardrailVersion": "DRAFT"
        }
    }

    result = config._transform_request(
        model="us.amazon.nova-pro-v1:0",
        messages=messages,
        optional_params=optional_params,
        litellm_params={},
        headers={}
    )

    # Should have system content blocks
    assert "system" in result
    assert len(result["system"]) == 1
    assert result["system"][0]["text"] == "You are a helpful assistant."

    # Should have 1 message (system messages are removed)
    assert "messages" in result
    assert len(result["messages"]) == 1

    # User message should have both regular text and guarded text
    user_message = result["messages"][0]
    assert user_message["role"] == "user"
    content = user_message["content"]
    assert len(content) == 2

    # First should be regular text
    assert "text" in content[0]
    assert content[0]["text"] == "What is the main topic of this legal document?"
    # Second should be guardContent
    assert "guardContent" in content[1]
    assert content[1]["guardContent"]["text"]["text"] == "This is a set of very long instructions that you will follow. Here is a legal document that you will use to answer the user's question."


def test_guarded_text_with_mixed_content_types():
    """Test guarded_text with mixed content types including images."""
    from litellm.litellm_core_utils.prompt_templates.factory import (
        _bedrock_converse_messages_pt,
    )

    messages = [
        {
            "role": "user",
            "content": [
                {"type": "text", "text": "Look at this image"},
                {"type": "image_url", "image_url": {"url": "data:image/png;base64,test"}},
                {"type": "guarded_text", "text": "This sensitive content should be guarded"}
            ]
        }
    ]

    result = _bedrock_converse_messages_pt(
        messages=messages,
        model="us.amazon.nova-pro-v1:0",
        llm_provider="bedrock_converse"
    )

    # Should have 1 message
    assert len(result) == 1
    assert result[0]["role"] == "user"

    # Should have 3 content blocks
    content = result[0]["content"]
    assert len(content) == 3

    # First should be regular text
    assert "text" in content[0]
    assert content[0]["text"] == "Look at this image"

    # Second should be image
    assert "image" in content[1]

    # Third should be guardContent
    assert "guardContent" in content[2]
    assert content[2]["guardContent"]["text"]["text"] == "This sensitive content should be guarded"

@pytest.mark.asyncio
async def test_async_guarded_text():
    """Test async version of guarded_text processing."""
    from litellm.litellm_core_utils.prompt_templates.factory import (
        BedrockConverseMessagesProcessor,
    )

    messages = [
        {
            "role": "user",
            "content": [
                {"type": "text", "text": "Hello"},
                {"type": "guarded_text", "text": "This should be guarded"}
            ]
        }
    ]

    result = await BedrockConverseMessagesProcessor._bedrock_converse_messages_pt_async(
        messages=messages,
        model="us.amazon.nova-pro-v1:0",
        llm_provider="bedrock_converse"
    )

    # Should have 1 message
    assert len(result) == 1
    assert result[0]["role"] == "user"

    # Should have 2 content blocks
    content = result[0]["content"]
    assert len(content) == 2

    # First should be regular text
    assert "text" in content[0]
    assert content[0]["text"] == "Hello"

    # Second should be guardContent
    assert "guardContent" in content[1]
    assert content[1]["guardContent"]["text"]["text"] == "This should be guarded"


def test_guarded_text_with_tool_calls():
    """Test guarded_text with tool calls in the conversation."""
    from litellm.litellm_core_utils.prompt_templates.factory import (
        _bedrock_converse_messages_pt,
    )

    messages = [
        {
            "role": "user",
            "content": [
                {"type": "text", "text": "What's the weather?"},
                {"type": "guarded_text", "text": "Please be careful with sensitive information"}
            ]
        },
        {
            "role": "assistant",
            "content": None,
            "tool_calls": [
                {
                    "id": "call_123",
                    "type": "function",
                    "function": {"name": "get_weather", "arguments": "{}"}
                }
            ]
        },
        {
            "role": "tool",
            "tool_call_id": "call_123",
            "content": "It's sunny and 25°C"
        }
    ]

    result = _bedrock_converse_messages_pt(
        messages=messages,
        model="us.amazon.nova-pro-v1:0",
        llm_provider="bedrock_converse"
    )

    # Should have 3 messages
    assert len(result) == 3

    # First message (user) should have both text and guarded_text
    user_message = result[0]
    assert user_message["role"] == "user"
    content = user_message["content"]
    assert len(content) == 2

    # First should be regular text
    assert "text" in content[0]
    assert content[0]["text"] == "What's the weather?"

    # Second should be guardContent
    assert "guardContent" in content[1]
    assert content[1]["guardContent"]["text"]["text"] == "Please be careful with sensitive information"

    # Other messages should not have guardContent
    for i in range(1, 3):
        content = result[i]["content"]
        for block in content:
            assert "guardContent" not in block


def test_guarded_text_guardrail_config_preserved():
    """Test that guardrailConfig is preserved when using guarded_text."""
    config = AmazonConverseConfig()

    messages = [
        {
            "role": "user",
            "content": [
                {"type": "text", "text": "Hello"},
                {"type": "guarded_text", "text": "This should be guarded"}
            ]
        }
    ]

    optional_params = {
        "guardrailConfig": {
            "guardrailIdentifier": "gr-abc123",
            "guardrailVersion": "DRAFT"
        }
    }

    result = config._transform_request(
        model="us.amazon.nova-pro-v1:0",
        messages=messages,
        optional_params=optional_params,
        litellm_params={},
        headers={}
    )

    # GuardrailConfig should be present at top level
    assert "guardrailConfig" in result
    assert result["guardrailConfig"]["guardrailIdentifier"] == "gr-abc123"

    # GuardrailConfig should also be in inferenceConfig
    assert "inferenceConfig" in result
    assert "guardrailConfig" in result["inferenceConfig"]
    assert result["inferenceConfig"]["guardrailConfig"]["guardrailIdentifier"] == "gr-abc123"


def test_auto_convert_last_user_message_to_guarded_text():
    """Test that last user message is automatically converted to guarded_text when guardrailConfig is present."""
    config = AmazonConverseConfig()

    messages = [
        {
            "role": "user",
            "content": [
                {
                    "type": "text",
                    "text": "What is the main topic of this legal document?"
                }
            ]
        }
    ]

    optional_params = {
        "guardrailConfig": {
            "guardrailIdentifier": "gr-abc123",
            "guardrailVersion": "1"
        }
    }

    # Test the helper method directly
    converted_messages = config._convert_consecutive_user_messages_to_guarded_text(messages, optional_params)

    # Verify the conversion
    assert len(converted_messages) == 1
    assert converted_messages[0]["role"] == "user"
    assert len(converted_messages[0]["content"]) == 1
    assert converted_messages[0]["content"][0]["type"] == "guarded_text"
    assert converted_messages[0]["content"][0]["text"] == "What is the main topic of this legal document?"


def test_auto_convert_last_user_message_string_content():
    """Test that last user message with string content is automatically converted to guarded_text when guardrailConfig is present."""
    config = AmazonConverseConfig()

    messages = [
        {
            "role": "user",
            "content": "What is the main topic of this legal document?"
        }
    ]

    optional_params = {
        "guardrailConfig": {
            "guardrailIdentifier": "gr-abc123",
            "guardrailVersion": "1"
        }
    }

    # Test the helper method directly
    converted_messages = config._convert_consecutive_user_messages_to_guarded_text(messages, optional_params)

    # Verify the conversion
    assert len(converted_messages) == 1
    assert converted_messages[0]["role"] == "user"
    assert len(converted_messages[0]["content"]) == 1
    assert converted_messages[0]["content"][0]["type"] == "guarded_text"
    assert converted_messages[0]["content"][0]["text"] == "What is the main topic of this legal document?"


def test_no_conversion_when_no_guardrail_config():
    """Test that no conversion happens when guardrailConfig is not present."""
    config = AmazonConverseConfig()

    messages = [
        {
            "role": "user",
            "content": [
                {
                    "type": "text",
                    "text": "What is the main topic of this legal document?"
                }
            ]
        }
    ]

    optional_params = {}

    # Test the helper method directly
    converted_messages = config._convert_consecutive_user_messages_to_guarded_text(messages, optional_params)

    # Verify no conversion happened
    assert converted_messages == messages


def test_no_conversion_when_guarded_text_already_present():
    """Test that no conversion happens when guarded_text is already present in the last user message."""
    config = AmazonConverseConfig()

    messages = [
        {
            "role": "user",
            "content": [
                {
                    "type": "guarded_text",
                    "text": "This is already guarded"
                }
            ]
        }
    ]

    optional_params = {
        "guardrailConfig": {
            "guardrailIdentifier": "gr-abc123",
            "guardrailVersion": "1"
        }
    }

    # Test the helper method directly
    converted_messages = config._convert_consecutive_user_messages_to_guarded_text(messages, optional_params)

    # Verify no conversion happened
    assert converted_messages == messages


def test_auto_convert_with_mixed_content():
    """Test that only text elements are converted to guarded_text, other content types are preserved."""
    config = AmazonConverseConfig()

    messages = [
        {
            "role": "user",
            "content": [
                {
                    "type": "text",
                    "text": "What is the main topic of this legal document?"
                },
                {
                    "type": "image_url",
                    "image_url": {"url": "https://example.com/image.jpg"}
                }
            ]
        }
    ]

    optional_params = {
        "guardrailConfig": {
            "guardrailIdentifier": "gr-abc123",
            "guardrailVersion": "1"
        }
    }

    # Test the helper method directly
    converted_messages = config._convert_consecutive_user_messages_to_guarded_text(messages, optional_params)

    # Verify the conversion
    assert len(converted_messages) == 1
    assert converted_messages[0]["role"] == "user"
    assert len(converted_messages[0]["content"]) == 2

    # First element should be converted to guarded_text
    assert converted_messages[0]["content"][0]["type"] == "guarded_text"
    assert converted_messages[0]["content"][0]["text"] == "What is the main topic of this legal document?"

    # Second element should remain unchanged
    assert converted_messages[0]["content"][1]["type"] == "image_url"
    assert converted_messages[0]["content"][1]["image_url"]["url"] == "https://example.com/image.jpg"


def test_auto_convert_in_full_transformation():
    """Test that the automatic conversion works in the full transformation pipeline."""
    config = AmazonConverseConfig()

    messages = [
        {
            "role": "user",
            "content": [
                {
                    "type": "text",
                    "text": "What is the main topic of this legal document?"
                }
            ]
        }
    ]

    optional_params = {
        "guardrailConfig": {
            "guardrailIdentifier": "gr-abc123",
            "guardrailVersion": "1"
        }
    }

    # Test the full transformation
    result = config._transform_request(
        model="anthropic.claude-3-sonnet-20240229-v1:0",
        messages=messages,
        optional_params=optional_params,
        litellm_params={},
        headers={}
    )

    # Verify the transformation worked
    assert "messages" in result
    assert len(result["messages"]) == 1

    # The message should have guardContent
    message = result["messages"][0]
    assert "content" in message
    assert len(message["content"]) == 1
    assert "guardContent" in message["content"][0]
    assert message["content"][0]["guardContent"]["text"]["text"] == "What is the main topic of this legal document?"


def test_convert_consecutive_user_messages_to_guarded_text():
    """Test that consecutive user messages at the end are converted to guarded_text."""
    config = AmazonConverseConfig()

    messages = [
        {
            "role": "user",
            "content": [
                {
                    "type": "text",
                    "text": "First user message"
                }
            ]
        },
        {
            "role": "assistant",
            "content": "Assistant response"
        },
        {
            "role": "user",
            "content": [
                {
                    "type": "text",
                    "text": "Second user message"
                }
            ]
        },
        {
            "role": "user",
            "content": [
                {
                    "type": "text",
                    "text": "Third user message"
                }
            ]
        }
    ]

    optional_params = {
        "guardrailConfig": {
            "guardrailIdentifier": "gr-abc123",
            "guardrailVersion": "1"
        }
    }

    # Test the helper method directly
    converted_messages = config._convert_consecutive_user_messages_to_guarded_text(messages, optional_params)

    # Verify the conversion - only the last two user messages should be converted
    assert len(converted_messages) == 4

    # First user message should remain unchanged
    assert converted_messages[0]["role"] == "user"
    assert converted_messages[0]["content"][0]["type"] == "text"
    assert converted_messages[0]["content"][0]["text"] == "First user message"

    # Assistant message should remain unchanged
    assert converted_messages[1]["role"] == "assistant"
    assert converted_messages[1]["content"] == "Assistant response"

    # Second user message should be converted to guarded_text
    assert converted_messages[2]["role"] == "user"
    assert converted_messages[2]["content"][0]["type"] == "guarded_text"
    assert converted_messages[2]["content"][0]["text"] == "Second user message"

    # Third user message should be converted to guarded_text
    assert converted_messages[3]["role"] == "user"
    assert converted_messages[3]["content"][0]["type"] == "guarded_text"
    assert converted_messages[3]["content"][0]["text"] == "Third user message"


def test_convert_all_user_messages_when_all_consecutive():
    """Test that all user messages are converted when they are all consecutive at the end."""
    config = AmazonConverseConfig()

    messages = [
        {
            "role": "user",
            "content": [
                {
                    "type": "text",
                    "text": "First user message"
                }
            ]
        },
        {
            "role": "user",
            "content": [
                {
                    "type": "text",
                    "text": "Second user message"
                }
            ]
        },
        {
            "role": "user",
            "content": [
                {
                    "type": "text",
                    "text": "Third user message"
                }
            ]
        }
    ]

    optional_params = {
        "guardrailConfig": {
            "guardrailIdentifier": "gr-abc123",
            "guardrailVersion": "1"
        }
    }

    # Test the helper method directly
    converted_messages = config._convert_consecutive_user_messages_to_guarded_text(messages, optional_params)

    # Verify all three user messages are converted
    assert len(converted_messages) == 3

    for i in range(3):
        assert converted_messages[i]["role"] == "user"
        assert converted_messages[i]["content"][0]["type"] == "guarded_text"

    assert converted_messages[0]["content"][0]["text"] == "First user message"
    assert converted_messages[1]["content"][0]["text"] == "Second user message"
    assert converted_messages[2]["content"][0]["text"] == "Third user message"


def test_convert_consecutive_user_messages_with_string_content():
    """Test that consecutive user messages with string content are converted to guarded_text."""
    config = AmazonConverseConfig()

    messages = [
        {
            "role": "assistant",
            "content": "Assistant response"
        },
        {
            "role": "user",
            "content": "First user message"
        },
        {
            "role": "user",
            "content": "Second user message"
        }
    ]

    optional_params = {
        "guardrailConfig": {
            "guardrailIdentifier": "gr-abc123",
            "guardrailVersion": "1"
        }
    }

    # Test the helper method directly
    converted_messages = config._convert_consecutive_user_messages_to_guarded_text(messages, optional_params)

    # Verify the conversion
    assert len(converted_messages) == 3

    # Assistant message should remain unchanged
    assert converted_messages[0]["role"] == "assistant"
    assert converted_messages[0]["content"] == "Assistant response"

    # Both user messages should be converted to guarded_text
    assert converted_messages[1]["role"] == "user"
    assert len(converted_messages[1]["content"]) == 1
    assert converted_messages[1]["content"][0]["type"] == "guarded_text"
    assert converted_messages[1]["content"][0]["text"] == "First user message"

    assert converted_messages[2]["role"] == "user"
    assert len(converted_messages[2]["content"]) == 1
    assert converted_messages[2]["content"][0]["type"] == "guarded_text"
    assert converted_messages[2]["content"][0]["text"] == "Second user message"


def test_skip_consecutive_user_messages_with_existing_guarded_text():
    """Test that consecutive user messages with existing guarded_text are skipped."""
    config = AmazonConverseConfig()

    messages = [
        {
            "role": "user",
            "content": [
                {
                    "type": "guarded_text",
                    "text": "Already guarded"
                }
            ]
        },
        {
            "role": "user",
            "content": [
                {
                    "type": "text",
                    "text": "Should be converted"
                }
            ]
        }
    ]

    optional_params = {
        "guardrailConfig": {
            "guardrailIdentifier": "gr-abc123",
            "guardrailVersion": "1"
        }
    }

    # Test the helper method directly
    converted_messages = config._convert_consecutive_user_messages_to_guarded_text(messages, optional_params)

    # Verify the conversion
    assert len(converted_messages) == 2

    # First message should remain unchanged (already has guarded_text)
    assert converted_messages[0]["role"] == "user"
    assert converted_messages[0]["content"][0]["type"] == "guarded_text"
    assert converted_messages[0]["content"][0]["text"] == "Already guarded"

    # Second message should be converted
    assert converted_messages[1]["role"] == "user"
    assert converted_messages[1]["content"][0]["type"] == "guarded_text"
    assert converted_messages[1]["content"][0]["text"] == "Should be converted"


def test_request_metadata_parameter_support():
    """Test that requestMetadata is in supported parameters."""
    config = AmazonConverseConfig()
    supported_params = config.get_supported_openai_params(
        model="bedrock/converse/us.anthropic.claude-sonnet-4-20250514-v1:0"
    )
    assert "requestMetadata" in supported_params


def test_request_metadata_transformation():
    """Test that requestMetadata is properly transformed to top-level field."""
    config = AmazonConverseConfig()

    request_metadata = {
        "cost_center": "engineering",
        "user_id": "user123",
        "session_id": "sess_abc123"
    }

    messages = [
        {"role": "user", "content": "Hello!"},
    ]

    # Transform request with requestMetadata
    request_data = config.transform_request(
        model="anthropic.claude-3-5-sonnet-20240620-v1:0",
        messages=messages,
        optional_params={"requestMetadata": request_metadata},
        litellm_params={},
        headers={}
    )

    # Verify that requestMetadata appears as top-level field
    assert "requestMetadata" in request_data
    assert request_data["requestMetadata"] == request_metadata


def test_request_metadata_validation():
    """Test validation of requestMetadata constraints."""
    config = AmazonConverseConfig()

    messages = [{"role": "user", "content": "Hello!"}]

    # Test valid metadata
    valid_metadata = {
        "cost_center": "engineering",
        "user_id": "user123",
    }

    # Should not raise exception
    config.transform_request(
        model="anthropic.claude-3-5-sonnet-20240620-v1:0",
        messages=messages,
        optional_params={"requestMetadata": valid_metadata},
        litellm_params={},
        headers={}
    )

    # Test too many items (max 16)
    too_many_items = {f"key_{i}": f"value_{i}" for i in range(17)}

    try:
        config.transform_request(
            model="anthropic.claude-3-5-sonnet-20240620-v1:0",
            messages=messages,
            optional_params={"requestMetadata": too_many_items},
            litellm_params={},
            headers={}
        )
        assert False, "Should have raised validation error for too many items"
    except Exception as e:
        assert "maximum of 16 items" in str(e).lower()


def test_request_metadata_key_constraints():
    """Test key constraint validation."""
    config = AmazonConverseConfig()

    messages = [{"role": "user", "content": "Hello!"}]

    # Test key too long (max 256 characters)
    long_key = "a" * 257
    invalid_metadata = {long_key: "value"}

    try:
        config.transform_request(
            model="anthropic.claude-3-5-sonnet-20240620-v1:0",
            messages=messages,
            optional_params={"requestMetadata": invalid_metadata},
            litellm_params={},
            headers={}
        )
        assert False, "Should have raised validation error for key too long"
    except Exception as e:
        assert "key length" in str(e).lower() or "256 characters" in str(e).lower()

    # Test empty key
    invalid_metadata = {"": "value"}

    try:
        config.transform_request(
            model="anthropic.claude-3-5-sonnet-20240620-v1:0",
            messages=messages,
            optional_params={"requestMetadata": invalid_metadata},
            litellm_params={},
            headers={}
        )
        assert False, "Should have raised validation error for empty key"
    except Exception as e:
        assert "key length" in str(e).lower() or "empty" in str(e).lower()


def test_request_metadata_value_constraints():
    """Test value constraint validation."""
    config = AmazonConverseConfig()

    messages = [{"role": "user", "content": "Hello!"}]

    # Test value too long (max 256 characters)
    long_value = "a" * 257
    invalid_metadata = {"key": long_value}

    try:
        config.transform_request(
            model="anthropic.claude-3-5-sonnet-20240620-v1:0",
            messages=messages,
            optional_params={"requestMetadata": invalid_metadata},
            litellm_params={},
            headers={}
        )
        assert False, "Should have raised validation error for value too long"
    except Exception as e:
        assert "value length" in str(e).lower() or "256 characters" in str(e).lower()

    # Test empty value (should be allowed)
    valid_metadata = {"key": ""}

    # Should not raise exception
    config.transform_request(
        model="anthropic.claude-3-5-sonnet-20240620-v1:0",
        messages=messages,
        optional_params={"requestMetadata": valid_metadata},
        litellm_params={},
        headers={}
    )


def test_request_metadata_character_pattern():
    """Test character pattern validation for keys and values."""
    config = AmazonConverseConfig()

    messages = [{"role": "user", "content": "Hello!"}]

    # Test valid characters
    valid_metadata = {
        "cost-center_2024": "engineering@team#1",
        "user:id": "$100.00",
        "session+token": "/path/to=resource",
    }

    # Should not raise exception
    config.transform_request(
        model="anthropic.claude-3-5-sonnet-20240620-v1:0",
        messages=messages,
        optional_params={"requestMetadata": valid_metadata},
        litellm_params={},
        headers={}
    )


def test_request_metadata_with_other_params():
    """Test that requestMetadata works alongside other parameters."""
    config = AmazonConverseConfig()

    request_metadata = {
        "experiment": "test_A",
        "user_type": "premium"
    }

    messages = [
        {"role": "user", "content": "What's the weather?"},
    ]

    tools = [
        {
            "type": "function",
            "function": {
                "name": "get_weather",
                "description": "Get the current weather",
                "parameters": {
                    "type": "object",
                    "properties": {
                        "location": {"type": "string"}
                    },
                    "required": ["location"]
                }
            }
        }
    ]

    # Transform request with multiple parameters including request_metadata
    request_data = config.transform_request(
        model="anthropic.claude-3-5-sonnet-20240620-v1:0",
        messages=messages,
        optional_params={
            "requestMetadata": request_metadata,
            "tools": tools,
            "max_tokens": 100,
            "temperature": 0.7
        },
        litellm_params={},
        headers={}
    )

    # Verify requestMetadata is at top level
    assert "requestMetadata" in request_data
    assert request_data["requestMetadata"] == request_metadata

    # Verify other parameters are also processed correctly
    assert "toolConfig" in request_data
    assert "inferenceConfig" in request_data
    assert request_data["inferenceConfig"]["temperature"] == 0.7


def test_request_metadata_empty():
    """Test handling of empty requestMetadata."""
    config = AmazonConverseConfig()

    messages = [{"role": "user", "content": "Hello!"}]

    # Empty dict should be allowed
    request_data = config.transform_request(
        model="anthropic.claude-3-5-sonnet-20240620-v1:0",
        messages=messages,
        optional_params={"requestMetadata": {}},
        litellm_params={},
        headers={}
    )

    assert "requestMetadata" in request_data
    assert request_data["requestMetadata"] == {}


def test_request_metadata_not_provided():
    """Test that requestMetadata is not included when not provided."""
    config = AmazonConverseConfig()

    messages = [{"role": "user", "content": "Hello!"}]

    # No requestMetadata provided
    request_data = config.transform_request(
        model="anthropic.claude-3-5-sonnet-20240620-v1:0",
        messages=messages,
        optional_params={},
        litellm_params={},
        headers={}
    )

    # requestMetadata should not be in the request
    assert "requestMetadata" not in request_data


def test_empty_assistant_message_handling():
    """
    Test that empty assistant messages are handled correctly by replacing
    empty or whitespace-only content with a placeholder to prevent AWS Bedrock
    Converse API 400 Bad Request errors.
    """
    from litellm.litellm_core_utils.prompt_templates.factory import (
        _bedrock_converse_messages_pt,
    )
    # Import the litellm module that factory.py uses to ensure we patch the correct reference
    import litellm.litellm_core_utils.prompt_templates.factory as factory_module

    # Test case 1: Empty string content - test with modify_params=True to prevent merging
    messages = [
        {"role": "user", "content": "Hello"},
        {"role": "assistant", "content": ""},  # Empty content
        {"role": "user", "content": "How are you?"}
    ]

    # Use patch to ensure we modify the litellm reference that factory.py actually uses
    # This avoids issues with module reloading during parallel test execution
    with patch.object(factory_module.litellm, "modify_params", True):
        result = _bedrock_converse_messages_pt(
            messages=messages,
            model="anthropic.claude-3-5-sonnet-20240620-v1:0",
            llm_provider="bedrock_converse"
        )

        # Should have 3 messages: user, assistant (with placeholder), user
        assert len(result) == 3
        assert result[0]["role"] == "user"
        assert result[1]["role"] == "assistant"
        assert result[2]["role"] == "user"

        # Assistant message should have placeholder text instead of empty content
        # When modify_params=True, empty assistant messages get replaced with DEFAULT_ASSISTANT_CONTINUE_MESSAGE
        assert len(result[1]["content"]) == 1
        assert result[1]["content"][0]["text"] == "Please continue."

        # Test case 2: Whitespace-only content
        messages = [
            {"role": "user", "content": "Hello"},
            {"role": "assistant", "content": "   "},  # Whitespace-only content
            {"role": "user", "content": "How are you?"}
        ]

        result = _bedrock_converse_messages_pt(
            messages=messages,
            model="anthropic.claude-3-5-sonnet-20240620-v1:0",
            llm_provider="bedrock_converse"
        )

        # Assistant message should have placeholder text instead of whitespace
        assert len(result[1]["content"]) == 1
        assert result[1]["content"][0]["text"] == "Please continue."

        # Test case 3: Empty list content
        messages = [
            {"role": "user", "content": "Hello"},
            {"role": "assistant", "content": [{"type": "text", "text": ""}]},  # Empty text in list
            {"role": "user", "content": "How are you?"}
        ]

        result = _bedrock_converse_messages_pt(
            messages=messages,
            model="anthropic.claude-3-5-sonnet-20240620-v1:0",
            llm_provider="bedrock_converse"
        )

        # Assistant message should have placeholder text instead of empty text
        assert len(result[1]["content"]) == 1
        assert result[1]["content"][0]["text"] == "Please continue."

        # Test case 4: Normal content should not be affected
        messages = [
            {"role": "user", "content": "Hello"},
            {"role": "assistant", "content": "I'm doing well, thank you!"},  # Normal content
            {"role": "user", "content": "How are you?"}
        ]

        result = _bedrock_converse_messages_pt(
            messages=messages,
            model="anthropic.claude-3-5-sonnet-20240620-v1:0",
            llm_provider="bedrock_converse"
        )

        # Assistant message should keep original content
        assert len(result[1]["content"]) == 1
        assert result[1]["content"][0]["text"] == "I'm doing well, thank you!"


def test_is_nova_2_model():
    """Test the _is_nova_2_model() method for detecting Nova 2 models."""
    config = AmazonConverseConfig()

    # Test with amazon.nova-2-lite-v1:0
    assert config._is_nova_2_model("amazon.nova-2-lite-v1:0") is True

    # Test with regional variants
    assert config._is_nova_2_model("us.amazon.nova-2-lite-v1:0") is True
    assert config._is_nova_2_model("eu.amazon.nova-2-lite-v1:0") is True
    assert config._is_nova_2_model("apac.amazon.nova-2-lite-v1:0") is True

    # Test with other Nova 2 variants (pro, micro)
    assert config._is_nova_2_model("amazon.nova-pro-1-5-v1:0") is False
    assert config._is_nova_2_model("amazon.nova-micro-1-5-v1:0") is False
    assert config._is_nova_2_model("us.amazon.nova-pro-1-5-v1:0") is False
    assert config._is_nova_2_model("eu.amazon.nova-micro-1-5-v1:0") is False

    # Test with non-Nova-1.5 lite models (should return False)
    assert config._is_nova_2_model("amazon.nova-lite-v1:0") is False
    assert config._is_nova_2_model("amazon.nova-pro-v1:0") is False
    assert config._is_nova_2_model("amazon.nova-micro-v1:0") is False

    # Test with Nova v1:0 models (should return False)
    assert config._is_nova_2_model("us.amazon.nova-lite-v1:0") is False
    assert config._is_nova_2_model("eu.amazon.nova-pro-v1:0") is False

    # Test with completely different models (should return False)
    assert config._is_nova_2_model("anthropic.claude-3-5-sonnet-20240620-v1:0") is False
    assert config._is_nova_2_model("meta.llama3-70b-instruct-v1:0") is False
    assert config._is_nova_2_model("mistral.mistral-7b-instruct-v0:2") is False


def test_thinking_with_max_completion_tokens():
    """Test that thinking respects max_completion_tokens parameter."""
    config = AmazonConverseConfig()

    # Test case 1: max_completion_tokens is specified - should NOT set maxTokens automatically
    non_default_params_with_max_completion = {
        "thinking": {"type": "enabled", "budget_tokens": 5000},
        "max_completion_tokens": 10000,
    }
    optional_params = {}

    result = config.map_openai_params(
        non_default_params=non_default_params_with_max_completion,
        optional_params=optional_params,
        model="us.anthropic.claude-3-7-sonnet-20250219-v1:0",
        drop_params=False,
    )

    # Should have maxTokens set to max_completion_tokens value
    assert "maxTokens" in result
    assert result["maxTokens"] == 10000
    # Should have thinking config
    assert "thinking" in result
    assert result["thinking"]["type"] == "enabled"
    assert result["thinking"]["budget_tokens"] == 5000

    # Test case 2: max_tokens is specified - should NOT set maxTokens automatically
    non_default_params_with_max_tokens = {
        "thinking": {"type": "enabled", "budget_tokens": 5000},
        "max_tokens": 8000,
    }
    optional_params = {}

    result = config.map_openai_params(
        non_default_params=non_default_params_with_max_tokens,
        optional_params=optional_params,
        model="us.anthropic.claude-3-7-sonnet-20250219-v1:0",
        drop_params=False,
    )

    # Should have maxTokens set to max_tokens value
    assert "maxTokens" in result
    assert result["maxTokens"] == 8000
    # Should have thinking config
    assert "thinking" in result
    assert result["thinking"]["type"] == "enabled"
    assert result["thinking"]["budget_tokens"] == 5000

    # Test case 3: Neither max_tokens nor max_completion_tokens specified - should set maxTokens automatically
    from litellm.constants import DEFAULT_MAX_TOKENS

    non_default_params_without_max = {
        "thinking": {"type": "enabled", "budget_tokens": 5000},
    }
    optional_params = {}

    result = config.map_openai_params(
        non_default_params=non_default_params_without_max,
        optional_params=optional_params,
        model="us.anthropic.claude-3-7-sonnet-20250219-v1:0",
        drop_params=False,
    )

    # Should have maxTokens set to budget_tokens + DEFAULT_MAX_TOKENS
    assert "maxTokens" in result
    assert result["maxTokens"] == 5000 + DEFAULT_MAX_TOKENS
    # Should have thinking config
    assert "thinking" in result
    assert result["thinking"]["type"] == "enabled"
    assert result["thinking"]["budget_tokens"] == 5000

def test_drop_thinking_param_when_thinking_blocks_missing():
    """
    Test that thinking param is dropped when modify_params=True and
    thinking_blocks are missing from assistant message with tool_calls.

    This prevents the Anthropic/Bedrock error:
    "Expected thinking or redacted_thinking, but found tool_use"

    Related issue: https://github.com/BerriAI/litellm/issues/14194
    """
    from litellm.utils import last_assistant_with_tool_calls_has_no_thinking_blocks

    # Save original modify_params setting
    original_modify_params = litellm.modify_params

    try:
        # Test case 1: thinking should be dropped when modify_params=True
        # and assistant message has tool_calls but no thinking_blocks
        litellm.modify_params = True

        messages_without_thinking_blocks = [
            {"role": "user", "content": "Search for weather"},
            {
                "role": "assistant",
                "content": "",
                "tool_calls": [
                    {
                        "id": "call_123",
                        "type": "function",
                        "function": {"name": "search", "arguments": "{}"},
                    }
                ],
                # No thinking_blocks - simulates OpenAI-compatible client
            },
            {"role": "tool", "content": "Weather is sunny", "tool_call_id": "call_123"},
        ]

        optional_params = {"thinking": {"type": "enabled", "budget_tokens": 1000}}

        # Verify the condition is detected
        assert last_assistant_with_tool_calls_has_no_thinking_blocks(
            messages_without_thinking_blocks
        ), "Should detect missing thinking_blocks"

        # Simulate what _transform_request_helper does
        if (
            optional_params.get("thinking") is not None
            and messages_without_thinking_blocks is not None
            and last_assistant_with_tool_calls_has_no_thinking_blocks(
                messages_without_thinking_blocks
            )
        ):
            if litellm.modify_params:
                optional_params.pop("thinking", None)

        assert "thinking" not in optional_params, (
            "thinking param should be dropped when modify_params=True "
            "and thinking_blocks are missing"
        )

        # Test case 2: thinking should NOT be dropped when thinking_blocks are present
        messages_with_thinking_blocks = [
            {"role": "user", "content": "Search for weather"},
            {
                "role": "assistant",
                "content": "",
                "tool_calls": [
                    {
                        "id": "call_123",
                        "type": "function",
                        "function": {"name": "search", "arguments": "{}"},
                    }
                ],
                "thinking_blocks": [
                    {"type": "thinking", "thinking": "Let me search for weather..."}
                ],
            },
            {"role": "tool", "content": "Weather is sunny", "tool_call_id": "call_123"},
        ]

        optional_params_with_thinking = {
            "thinking": {"type": "enabled", "budget_tokens": 1000}
        }

        # Verify the condition is NOT detected when thinking_blocks are present
        assert not last_assistant_with_tool_calls_has_no_thinking_blocks(
            messages_with_thinking_blocks
        ), "Should NOT detect missing thinking_blocks when they are present"

        # Simulate what _transform_request_helper does
        if (
            optional_params_with_thinking.get("thinking") is not None
            and messages_with_thinking_blocks is not None
            and last_assistant_with_tool_calls_has_no_thinking_blocks(
                messages_with_thinking_blocks
            )
        ):
            if litellm.modify_params:
                optional_params_with_thinking.pop("thinking", None)

        assert "thinking" in optional_params_with_thinking, (
            "thinking param should NOT be dropped when thinking_blocks are present"
        )

        # Test case 3: thinking should NOT be dropped when modify_params=False
        litellm.modify_params = False

        optional_params_no_modify = {
            "thinking": {"type": "enabled", "budget_tokens": 1000}
        }

        # Simulate what _transform_request_helper does
        if (
            optional_params_no_modify.get("thinking") is not None
            and messages_without_thinking_blocks is not None
            and last_assistant_with_tool_calls_has_no_thinking_blocks(
                messages_without_thinking_blocks
            )
        ):
            if litellm.modify_params:
                optional_params_no_modify.pop("thinking", None)

        assert "thinking" in optional_params_no_modify, (
            "thinking param should NOT be dropped when modify_params=False"
        )

    finally:
        # Restore original modify_params setting
        litellm.modify_params = original_modify_params


def test_supports_native_structured_outputs():
    """Test model detection for native structured outputs support."""
    config = AmazonConverseConfig()

    # Supported models
    assert config._supports_native_structured_outputs(
        "anthropic.claude-sonnet-4-5-20250929-v1:0"
    )
    assert config._supports_native_structured_outputs(
        "anthropic.claude-haiku-4-5-20251001-v1:0"
    )
    assert config._supports_native_structured_outputs(
        "anthropic.claude-opus-4-6-v1:0"
    )
    assert config._supports_native_structured_outputs(
        "eu.anthropic.claude-opus-4-5-20260101-v1:0"
    )
    assert config._supports_native_structured_outputs("qwen.qwen3-235b-instruct-v1:0")
    assert config._supports_native_structured_outputs("mistral.mistral-large-3-v1:0")
    assert config._supports_native_structured_outputs("deepseek.deepseek-v3.1-v1:0")

    # Unsupported models — should fall back to tool-call approach
    assert not config._supports_native_structured_outputs(
        "anthropic.claude-3-5-sonnet-20241022-v2:0"
    )
    assert not config._supports_native_structured_outputs(
        "anthropic.claude-sonnet-4-20250514-v1:0"
    )
    assert not config._supports_native_structured_outputs(
        "meta.llama3-3-70b-instruct-v1:0"
    )
    assert not config._supports_native_structured_outputs(
        "amazon.nova-pro-v1:0"
    )
    # Excluded despite AWS listing them: broken constrained decoding on Bedrock
    assert not config._supports_native_structured_outputs(
        "openai.gpt-oss-120b-1:0"
    )
    assert not config._supports_native_structured_outputs(
        "mistral.magistral-small-2509"
    )


def test_create_output_config_for_response_format():
    """Test outputConfig dict creation from JSON schema."""
    config = AmazonConverseConfig()

    schema = {
        "type": "object",
        "properties": {
            "name": {"type": "string"},
            "age": {"type": "integer"},
        },
        "required": ["name", "age"],
    }

    output_config = config._create_output_config_for_response_format(
        json_schema=schema,
        name="PersonInfo",
        description="A person's info",
    )

    assert "textFormat" in output_config
    text_format = output_config["textFormat"]
    assert text_format["type"] == "json_schema"
    assert "structure" in text_format

    json_schema_def = text_format["structure"]["jsonSchema"]
    assert json_schema_def["name"] == "PersonInfo"
    assert json_schema_def["description"] == "A person's info"
    # schema field must be a JSON string, not a dict
    assert isinstance(json_schema_def["schema"], str)
    parsed_schema = json.loads(json_schema_def["schema"])
    # additionalProperties: false is injected by normalization
    expected = {**schema, "additionalProperties": False}
    assert parsed_schema == expected


def test_translate_response_format_native_output_config():
    """For supported models, _translate_response_format_param should produce outputConfig."""
    config = AmazonConverseConfig()

    response_format = {
        "type": "json_schema",
        "json_schema": {
            "name": "WeatherResult",
            "description": "Weather info",
            "schema": {
                "type": "object",
                "properties": {
                    "temp": {"type": "number"},
                },
                "required": ["temp"],
            },
        },
    }

    optional_params: dict = {}
    result = config._translate_response_format_param(
        value=response_format,
        model="anthropic.claude-sonnet-4-5-20250929-v1:0",
        optional_params=optional_params,
        non_default_params={"response_format": response_format},
        is_thinking_enabled=False,
    )

    # Should have outputConfig, NOT tools
    assert "outputConfig" in result
    assert "tools" not in result
    assert "tool_choice" not in result
    assert result["json_mode"] is True
    # No fake_stream for native approach
    assert "fake_stream" not in result

    # Verify the schema content (additionalProperties: false is added by normalization)
    schema_str = result["outputConfig"]["textFormat"]["structure"]["jsonSchema"]["schema"]
    parsed_schema = json.loads(schema_str)
    expected_schema = {**response_format["json_schema"]["schema"], "additionalProperties": False}
    assert parsed_schema == expected_schema
    assert (
        result["outputConfig"]["textFormat"]["structure"]["jsonSchema"]["name"]
        == "WeatherResult"
    )


def test_translate_response_format_fallback_tool_call():
    """For unsupported models, should fall back to tool-call approach."""
    config = AmazonConverseConfig()

    response_format = {
        "type": "json_schema",
        "json_schema": {
            "name": "WeatherResult",
            "schema": {
                "type": "object",
                "properties": {
                    "temp": {"type": "number"},
                },
            },
        },
    }

    optional_params: dict = {}
    result = config._translate_response_format_param(
        value=response_format,
        model="anthropic.claude-3-5-sonnet-20241022-v2:0",
        optional_params=optional_params,
        non_default_params={"response_format": response_format},
        is_thinking_enabled=False,
    )

    # Should use tool-call approach, NOT outputConfig
    assert "outputConfig" not in result
    assert "tools" in result
    assert result["json_mode"] is True


def test_native_structured_output_no_fake_stream():
    """When using native structured outputs with streaming, fake_stream should NOT be set."""
    config = AmazonConverseConfig()

    response_format = {
        "type": "json_schema",
        "json_schema": {
            "name": "Result",
            "schema": {
                "type": "object",
                "properties": {
                    "answer": {"type": "string"},
                },
            },
        },
    }

    optional_params: dict = {}
    result = config._translate_response_format_param(
        value=response_format,
        model="anthropic.claude-sonnet-4-5-20250929-v1:0",
        optional_params=optional_params,
        non_default_params={"response_format": response_format, "stream": True},
        is_thinking_enabled=False,
    )

    assert "outputConfig" in result
    assert result["json_mode"] is True
    # No fake_stream for native approach
    assert "fake_stream" not in result

    # Verify the schema content
    schema_str = result["outputConfig"]["textFormat"]["structure"]["jsonSchema"]["schema"]
    assert json.loads(schema_str) == {
        "type": "object",
        "properties": {"answer": {"type": "string"}},
        "additionalProperties": False,
    }


def test_transform_request_with_output_config():
    """Test that outputConfig flows through _transform_request_helper into the final request."""
    from litellm.types.llms.bedrock import OutputConfigBlock, OutputFormat, OutputFormatStructure, JsonSchemaDefinition

    config = AmazonConverseConfig()

    output_config = OutputConfigBlock(
        textFormat=OutputFormat(
            type="json_schema",
            structure=OutputFormatStructure(
                jsonSchema=JsonSchemaDefinition(
                    schema='{"type": "object", "properties": {"x": {"type": "string"}}, "additionalProperties": false}',
                    name="TestSchema",
                )
            ),
        )
    )

    messages = [{"role": "user", "content": "test"}]
    optional_params = {
        "outputConfig": output_config,
        "json_mode": True,
    }

    result = config._transform_request(
        model="anthropic.claude-sonnet-4-5-20250929-v1:0",
        messages=messages,
        optional_params=optional_params,
        litellm_params={},
        headers={},
    )

    assert "outputConfig" in result
    assert result["outputConfig"]["textFormat"]["type"] == "json_schema"
    assert result["outputConfig"]["textFormat"]["structure"]["jsonSchema"]["name"] == "TestSchema"


def test_transform_response_native_structured_output():
    """Test response handling when model returns JSON as text content (native structured output)."""
    response_json = {
        "output": {
            "message": {
                "role": "assistant",
                "content": [
                    {
                        "text": '{"temp": 62, "description": "Mild and foggy"}'
                    }
                ],
            }
        },
        "stopReason": "end_turn",
        "usage": {
            "inputTokens": 10,
            "outputTokens": 20,
            "totalTokens": 30,
        },
    }

    class MockResponse:
        def json(self):
            return response_json

        @property
        def text(self):
            return json.dumps(response_json)

    config = AmazonConverseConfig()
    model_response = ModelResponse()
    # json_mode=True but no tool_call in response — native structured output path
    optional_params = {"json_mode": True}

    result = config._transform_response(
        model="anthropic.claude-sonnet-4-5-20250929-v1:0",
        response=MockResponse(),
        model_response=model_response,
        stream=False,
        logging_obj=None,
        optional_params=optional_params,
        api_key=None,
        data={},
        messages=[],
        encoding=None,
    )

    # Content should be the JSON text directly
    assert result.choices[0].message.content == '{"temp": 62, "description": "Mild and foggy"}'
    # Should NOT have tool_calls
    assert result.choices[0].message.tool_calls is None
    assert result.choices[0].finish_reason == "stop"


def test_add_additional_properties_simple_object():
    """Object schemas without additionalProperties get it set to false."""
    schema = {
        "type": "object",
        "properties": {
            "city": {"type": "string"},
            "country": {"type": "string"},
        },
        "required": ["city", "country"],
    }
    result = AmazonConverseConfig._add_additional_properties_to_schema(schema)
    assert result["additionalProperties"] is False
    # Original should not be mutated
    assert "additionalProperties" not in schema


def test_add_additional_properties_already_set():
    """If additionalProperties is already set, don't overwrite it."""
    schema = {
        "type": "object",
        "properties": {"x": {"type": "string"}},
        "additionalProperties": True,
    }
    result = AmazonConverseConfig._add_additional_properties_to_schema(schema)
    assert result["additionalProperties"] is True


def test_add_additional_properties_nested():
    """Recursively processes nested object types in properties, items, $defs, anyOf."""
    schema = {
        "type": "object",
        "properties": {
            "address": {
                "type": "object",
                "properties": {
                    "street": {"type": "string"},
                    "zip": {"type": "string"},
                },
            },
            "tags": {
                "type": "array",
                "items": {
                    "type": "object",
                    "properties": {"name": {"type": "string"}},
                },
            },
        },
        "$defs": {
            "Metadata": {
                "type": "object",
                "properties": {"key": {"type": "string"}},
            }
        },
        "anyOf": [
            {
                "type": "object",
                "properties": {"variant": {"type": "string"}},
            }
        ],
    }
    result = AmazonConverseConfig._add_additional_properties_to_schema(schema)
    # Top-level
    assert result["additionalProperties"] is False
    # Nested property object
    assert result["properties"]["address"]["additionalProperties"] is False
    # Array items object
    assert result["properties"]["tags"]["items"]["additionalProperties"] is False
    # $defs object
    assert result["$defs"]["Metadata"]["additionalProperties"] is False
    # anyOf object
    assert result["anyOf"][0]["additionalProperties"] is False


def test_add_additional_properties_non_object():
    """Non-object schemas are returned unchanged."""
    schema = {"type": "string"}
    result = AmazonConverseConfig._add_additional_properties_to_schema(schema)
    assert "additionalProperties" not in result
    assert result == {"type": "string"}


def test_add_additional_properties_definitions():
    """Recursively processes object types inside 'definitions' (not just '$defs')."""
    schema = {
        "type": "object",
        "properties": {
            "item": {"$ref": "#/definitions/Item"},
        },
        "definitions": {
            "Item": {
                "type": "object",
                "properties": {
                    "name": {"type": "string"},
                    "details": {
                        "type": "object",
                        "properties": {"weight": {"type": "number"}},
                    },
                },
            }
        },
    }
    result = AmazonConverseConfig._add_additional_properties_to_schema(schema)
    # Top-level
    assert result["additionalProperties"] is False
    # definitions object
    assert result["definitions"]["Item"]["additionalProperties"] is False
    # Nested object inside definitions
    assert result["definitions"]["Item"]["properties"]["details"]["additionalProperties"] is False


def test_json_object_no_schema_falls_back_to_tool_call():
    """response_format: {type: json_object} with no schema should use tool-call fallback,
    even for models that support native structured outputs."""
    config = AmazonConverseConfig()
    optional_params: dict = {}
    non_default_params = {"response_format": {"type": "json_object"}}

    result = config._translate_response_format_param(
        value=non_default_params["response_format"],
        model="anthropic.claude-sonnet-4-5-20250929-v1:0",
        optional_params=optional_params,
        non_default_params=non_default_params,
        is_thinking_enabled=False,
    )

    # Should NOT use native outputConfig (no schema provided)
    assert "outputConfig" not in result
    # Should use tool-call fallback
    assert "tools" in result
    assert result["json_mode"] is True


def test_output_config_applies_additional_properties():
    """_create_output_config_for_response_format normalizes the schema."""
    schema = {
        "type": "object",
        "properties": {
            "name": {"type": "string"},
            "nested": {
                "type": "object",
                "properties": {"val": {"type": "integer"}},
            },
        },
    }
    output_config = AmazonConverseConfig._create_output_config_for_response_format(
        json_schema=schema, name="test_schema"
    )
    parsed = json.loads(output_config["textFormat"]["structure"]["jsonSchema"]["schema"])
    assert parsed["additionalProperties"] is False
    assert parsed["properties"]["nested"]["additionalProperties"] is False



_TOOL_PARAM = [
    {
        "type": "function",
        "function": {
            "name": "get_weather",
            "description": "Get the weather",
            "parameters": {
                "type": "object",
                "properties": {
                    "location": {
                        "type": "string",
                        "description": "The location to get weather for",
                    }
                },
                "required": ["location"],
            },
        },
    }
]


def test_parallel_tool_calls_newer_model_adds_disable_flag():
    """Newer Claude models (4.5+) should get disable_parallel_tool_use in additionalModelRequestFields."""
    config = AmazonConverseConfig()
    model = "anthropic.claude-sonnet-4-5-20250929-v1:0"
    messages = [{"role": "user", "content": "What's the weather in SF and NYC?"}]

    optional_params = config.map_openai_params(
        non_default_params={"parallel_tool_calls": False, "tools": _TOOL_PARAM},
        optional_params={},
        model=model,
        drop_params=False,
    )

    request_data = config.transform_request(
        model=model,
        messages=messages,
        optional_params=optional_params,
        litellm_params={},
        headers={},
    )

    assert "additionalModelRequestFields" in request_data
    assert "tool_choice" in request_data["additionalModelRequestFields"]
    assert request_data["additionalModelRequestFields"]["tool_choice"]["disable_parallel_tool_use"] is True
    assert "parallel_tool_calls" not in request_data["additionalModelRequestFields"]


def test_parallel_tool_calls_older_model_drops_disable_flag():
    """Older Claude models (pre-4.5) must NOT receive disable_parallel_tool_use — Bedrock rejects it."""
    config = AmazonConverseConfig()
    model = "anthropic.claude-3-5-sonnet-20241022-v2:0"
    messages = [{"role": "user", "content": "What's the weather in SF and NYC?"}]

    optional_params = config.map_openai_params(
        non_default_params={"parallel_tool_calls": False, "tools": _TOOL_PARAM},
        optional_params={},
        model=model,
        drop_params=False,
    )

    request_data = config.transform_request(
        model=model,
        messages=messages,
        optional_params=optional_params,
        litellm_params={},
        headers={},
    )

    additional = request_data.get("additionalModelRequestFields", {})
    assert "tool_choice" not in additional
    assert "parallel_tool_calls" not in additional


class TestBedrockMinThinkingBudgetTokens:
    """Test that thinking.budget_tokens is clamped to the Bedrock minimum (1024)."""

    def _map_params(
        self, thinking_value, model="anthropic.claude-3-7-sonnet-20250219-v1:0"
    ):
        """Helper to call map_openai_params with the given thinking value."""
        config = AmazonConverseConfig()
        non_default_params = {"thinking": thinking_value}
        optional_params = {"thinking": thinking_value}
        return config.map_openai_params(
            non_default_params=non_default_params,
            optional_params=optional_params,
            model=model,
            drop_params=False,
        )

    def test_budget_tokens_below_minimum_is_clamped(self):
        """budget_tokens < 1024 should be clamped to 1024."""
        result = self._map_params({"type": "enabled", "budget_tokens": 499})
        assert result["thinking"]["budget_tokens"] == 1024

    def test_budget_tokens_at_minimum_is_unchanged(self):
        """budget_tokens == 1024 should remain 1024."""
        result = self._map_params({"type": "enabled", "budget_tokens": 1024})
        assert result["thinking"]["budget_tokens"] == 1024

    def test_budget_tokens_above_minimum_is_unchanged(self):
        """budget_tokens > 1024 should remain unchanged."""
        result = self._map_params({"type": "enabled", "budget_tokens": 2048})
        assert result["thinking"]["budget_tokens"] == 2048

    def test_no_thinking_param_does_not_error(self):
        """When thinking is not provided, map_openai_params should not raise."""
        config = AmazonConverseConfig()
        result = config.map_openai_params(
            non_default_params={},
            optional_params={},
            model="anthropic.claude-3-7-sonnet-20250219-v1:0",
            drop_params=False,
        )
        assert "thinking" not in result or result.get("thinking") is None

def test_transform_response_with_both_json_tool_call_and_real_tool():
    """
    When Bedrock returns BOTH json_tool_call AND a real tool (get_weather),
    only the real tool should remain in tool_calls. The json_tool_call should be filtered out.
    Fixes https://github.com/BerriAI/litellm/issues/18381
    """
    from litellm.llms.bedrock.chat.converse_transformation import AmazonConverseConfig
    from litellm.types.utils import ModelResponse

    response_json = {
        "metrics": {"latencyMs": 200},
        "output": {
            "message": {
                "role": "assistant",
                "content": [
                    {
                        "toolUse": {
                            "toolUseId": "tooluse_json_001",
                            "name": "json_tool_call",
                            "input": {
                                "Current_Temperature": 62,
                                "Weather_Explanation": "Mild and cool.",
                            },
                        }
                    },
                    {
                        "toolUse": {
                            "toolUseId": "tooluse_weather_001",
                            "name": "get_weather",
                            "input": {
                                "location": "San Francisco, CA",
                                "unit": "fahrenheit",
                            },
                        }
                    },
                ],
            }
        },
        "stopReason": "tool_use",
        "usage": {
            "inputTokens": 100,
            "outputTokens": 50,
            "totalTokens": 150,
            "cacheReadInputTokenCount": 0,
            "cacheReadInputTokens": 0,
            "cacheWriteInputTokenCount": 0,
            "cacheWriteInputTokens": 0,
        },
    }

    class MockResponse:
        def json(self):
            return response_json

        @property
        def text(self):
            return json.dumps(response_json)

    config = AmazonConverseConfig()
    model_response = ModelResponse()
    optional_params = {"json_mode": True}

    result = config._transform_response(
        model="bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
        response=MockResponse(),
        model_response=model_response,
        stream=False,
        logging_obj=None,
        optional_params=optional_params,
        api_key=None,
        data=None,
        messages=[],
        encoding=None,
    )

    # Only real tool should remain
    assert result.choices[0].message.tool_calls is not None
    assert len(result.choices[0].message.tool_calls) == 1
    assert result.choices[0].message.tool_calls[0].function.name == "get_weather"
    assert (
        result.choices[0].message.tool_calls[0].function.arguments
        == '{"location": "San Francisco, CA", "unit": "fahrenheit"}'
    )

    # json_tool_call content should be preserved as message text
    content = result.choices[0].message.content
    assert content is not None
    parsed = json.loads(content)
    assert parsed["Current_Temperature"] == 62
    assert parsed["Weather_Explanation"] == "Mild and cool."


def test_transform_response_does_not_mutate_optional_params():
    """
    Verify that optional_params still contains json_mode after _transform_response.
    Previously, .pop() was used which mutated the caller's dict.
    """
    from litellm.llms.bedrock.chat.converse_transformation import AmazonConverseConfig
    from litellm.types.utils import ModelResponse

    response_json = {
        "metrics": {"latencyMs": 50},
        "output": {
            "message": {
                "role": "assistant",
                "content": [
                    {
                        "toolUse": {
                            "toolUseId": "tooluse_001",
                            "name": "json_tool_call",
                            "input": {"result": "ok"},
                        }
                    }
                ],
            }
        },
        "stopReason": "tool_use",
        "usage": {
            "inputTokens": 10,
            "outputTokens": 5,
            "totalTokens": 15,
            "cacheReadInputTokenCount": 0,
            "cacheReadInputTokens": 0,
            "cacheWriteInputTokenCount": 0,
            "cacheWriteInputTokens": 0,
        },
    }

    class MockResponse:
        def json(self):
            return response_json

        @property
        def text(self):
            return json.dumps(response_json)

    config = AmazonConverseConfig()
    model_response = ModelResponse()
    optional_params = {"json_mode": True, "other_key": "value"}

    config._transform_response(
        model="bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
        response=MockResponse(),
        model_response=model_response,
        stream=False,
        logging_obj=None,
        optional_params=optional_params,
        api_key=None,
        data=None,
        messages=[],
        encoding=None,
    )

    # json_mode should still be in optional_params (not popped)
    assert "json_mode" in optional_params
    assert optional_params["json_mode"] is True
    assert optional_params["other_key"] == "value"


def test_streaming_filters_json_tool_call_with_real_tools():
    """
    Simulate streaming chunks where both json_tool_call and a real tool arrive.
    Verify json_tool_call chunks are converted to text content while real tool
    chunks pass through normally.
    """
    from litellm.llms.bedrock.chat.invoke_handler import AWSEventStreamDecoder
    from litellm.types.llms.bedrock import (
        ContentBlockDeltaEvent,
        ContentBlockStartEvent,
    )

    decoder = AWSEventStreamDecoder(model="test-model", json_mode=True)

    # Chunk 1: json_tool_call start
    json_start = ContentBlockStartEvent(
        toolUse={
            "toolUseId": "tooluse_json_001",
            "name": "json_tool_call",
        }
    )
    tool_use_1, _, _ = decoder._handle_converse_start_event(json_start)
    # json_tool_call start should be suppressed (return None tool_use)
    assert tool_use_1 is None
    # tool_calls_index should NOT have been incremented
    assert decoder.tool_calls_index is None

    # Chunk 2: json_tool_call delta — should become text, not tool_use
    json_delta = ContentBlockDeltaEvent(toolUse={"input": '{"temp": 62}'})
    text_2, tool_use_2, _, _, _ = decoder._handle_converse_delta_event(
        json_delta, index=0
    )
    assert text_2 == '{"temp": 62}'
    assert tool_use_2 is None

    # Chunk 3: json_tool_call stop
    stop_tool = decoder._handle_converse_stop_event(index=0)
    assert stop_tool is None
    # _current_tool_name should be reset
    assert decoder._current_tool_name is None

    # Chunk 4: real tool start
    real_start = ContentBlockStartEvent(
        toolUse={
            "toolUseId": "tooluse_weather_001",
            "name": "get_weather",
        }
    )
    tool_use_4, _, _ = decoder._handle_converse_start_event(real_start)
    assert tool_use_4 is not None
    assert tool_use_4["function"]["name"] == "get_weather"
    assert decoder.tool_calls_index == 0

    # Chunk 5: real tool delta
    real_delta = ContentBlockDeltaEvent(
        toolUse={"input": '{"location": "SF"}'}
    )
    text_5, tool_use_5, _, _, _ = decoder._handle_converse_delta_event(
        real_delta, index=1
    )
    assert text_5 == ""
    assert tool_use_5 is not None
    assert tool_use_5["function"]["arguments"] == '{"location": "SF"}'


def test_streaming_without_json_mode_passes_all_tools():
    """
    Verify backward compatibility: when json_mode=False, all tools
    (including json_tool_call if present) pass through unchanged.
    """
    from litellm.llms.bedrock.chat.invoke_handler import AWSEventStreamDecoder
    from litellm.types.llms.bedrock import (
        ContentBlockDeltaEvent,
        ContentBlockStartEvent,
    )

    decoder = AWSEventStreamDecoder(model="test-model", json_mode=False)

    # json_tool_call start — should pass through when json_mode=False
    json_start = ContentBlockStartEvent(
        toolUse={
            "toolUseId": "tooluse_json_001",
            "name": "json_tool_call",
        }
    )
    tool_use, _, _ = decoder._handle_converse_start_event(json_start)
    assert tool_use is not None
    assert tool_use["function"]["name"] == "json_tool_call"
    assert decoder.tool_calls_index == 0

    # json_tool_call delta — should be a tool_use, not text
    json_delta = ContentBlockDeltaEvent(toolUse={"input": '{"data": 1}'})
    text, tool_use_delta, _, _, _ = decoder._handle_converse_delta_event(
        json_delta, index=0
    )
    assert text == ""
    assert tool_use_delta is not None
    assert tool_use_delta["function"]["arguments"] == '{"data": 1}'

