package com.gzzm.lobster.llm;

import java.util.Collections;
import java.util.List;

/**
 * LlmResponse —— 模型响应 / Response returned by the LLM adapter.
 */
public final class LlmResponse {

    private final String assistantText;
    private final List<ToolCall> toolCalls;
    private final int inputTokens;
    private final int outputTokens;
    private final int promptCacheHitTokens;
    private final int promptCacheMissTokens;
    private final String finishReason;
    private final String modelId;
    /** 原始响应字符串（调试用）/ Raw response for diagnostics. */
    private final String rawText;
    /**
     * 思考内容（thinking-mode 模型专用，deepseek-v4-flash / deepseek-reasoner / Qwen-QwQ 等）.
     * 多轮对话时必须把历史 reasoning_content 发回给 API，否则 DeepSeek 返 400.
     */
    private final String reasoningContent;

    public LlmResponse(String assistantText, List<ToolCall> toolCalls,
                       int inputTokens, int outputTokens, String finishReason,
                       String modelId, String rawText) {
        this(assistantText, toolCalls, inputTokens, outputTokens, finishReason, modelId, rawText, null);
    }

    public LlmResponse(String assistantText, List<ToolCall> toolCalls,
                       int inputTokens, int outputTokens, String finishReason,
                       String modelId, String rawText, String reasoningContent) {
        this(assistantText, toolCalls, inputTokens, outputTokens, finishReason, modelId, rawText,
                reasoningContent, 0, 0);
    }

    public LlmResponse(String assistantText, List<ToolCall> toolCalls,
                       int inputTokens, int outputTokens, String finishReason,
                       String modelId, String rawText, String reasoningContent,
                       int promptCacheHitTokens, int promptCacheMissTokens) {
        this.assistantText = assistantText == null ? "" : assistantText;
        this.toolCalls = toolCalls == null ? Collections.<ToolCall>emptyList() : toolCalls;
        this.inputTokens = inputTokens;
        this.outputTokens = outputTokens;
        this.promptCacheHitTokens = Math.max(0, promptCacheHitTokens);
        this.promptCacheMissTokens = Math.max(0, promptCacheMissTokens);
        this.finishReason = finishReason;
        this.modelId = modelId;
        this.rawText = rawText;
        this.reasoningContent = reasoningContent;
    }

    public String getAssistantText() { return assistantText; }
    public List<ToolCall> getToolCalls() { return toolCalls; }
    public int getInputTokens() { return inputTokens; }
    public int getOutputTokens() { return outputTokens; }
    public int getPromptCacheHitTokens() { return promptCacheHitTokens; }
    public int getPromptCacheMissTokens() { return promptCacheMissTokens; }
    public String getFinishReason() { return finishReason; }
    public String getModelId() { return modelId; }
    public String getRawText() { return rawText; }
    public String getReasoningContent() { return reasoningContent; }

    public boolean hasToolCalls() { return !toolCalls.isEmpty(); }
}
