o
    )iq,                    @   sD  d dl Z d dlmZ d dlmZ d dlmZmZmZm	Z	m
Z
mZmZmZ d dlZd dlmZ d dlmZ d dlmZ d dlmZmZ d dlmZ d dlmZmZmZmZ d d	lm Z m!Z!m"Z"m#Z# d d
l$m%Z%m&Z&m'Z'm(Z(m)Z) d dl*m+Z+ d dl,m-Z-m.Z.m/Z/m0Z0m1Z1m2Z2 d dl3m4Z4m5Z5m6Z6m7Z7m8Z8m9Z9 d dl:m;Z;m<Z< d dl=m>Z>m?Z?m@Z@mAZA d dlBmCZC d dlDmEZE d dlFmGZG d dlHmIZI d dlJmKZKmLZLmMZMmNZNmOZO d dlPmQZQ d dlRmSZSmTZTmUZU d dlVmWZW d dlXmYZYmZZZm[Z[ d dl\m]Z] d dl^m_Z_m`Z`maZambZb d dlcmdZd erd dlemfZf eEegZhededZiG dd  d ZjdS )!    N)Sequence)contextmanager)TYPE_CHECKINGAnyCallableClassVarOptionalUnioncastoverload)ValidationError)tqdm)TypeVar
deprecated)BeamSearchInstanceBeamSearchOutputBeamSearchSequencecreate_sort_beams_key_function)CompilationConfig
ModelDTypeTokenizerModeis_init_field)ConvertOption
EngineArgsHfOverridesPoolerConfigRunnerOption	LLMEngine)ChatCompletionMessageParamChatTemplateContentFormatOptionapply_hf_chat_templateapply_mistral_chat_templateparse_chat_messages$resolve_chat_template_content_format)ScoreContentPartParamScoreMultiModalParam_cosine_similarity_validate_score_input_lenscompress_token_type_idsget_score_prompt)_validate_truncation_sizelog_non_default_args)
PromptTypeSingletonPrompt
TextPromptTokensPrompt)parse_and_batch_prompt)init_logger)LoRARequest)QuantizationMethods)ClassificationRequestOutputEmbeddingRequestOutputPoolingRequestOutputRequestOutputScoringRequestOutput)PoolingParams)BeamSearchParamsRequestOutputKindSamplingParams)PoolingTask)AnyTokenizerMistralTokenizerget_cached_tokenizer)UsageContext)CounterDevicedeprecate_kwargs
is_list_of)LogitsProcessor)Metric_R)defaultc                <   @   s  e Zd ZU dZdZee ed< 	 ee	dd Z
ddddddd	d
ddddddddddddddddddddedededee dededededededee dee dee dee deded ed!ed"ed#ed$ed%eeeef  d&ee d'eeeef  d(ee d)eeeeeef ef  d*eeeeee f   d+df8d,d-Z	dd.ee d+efd/d0Z ded+dfd1d2Z!d+e"fd3d4Z#e$	dddd5d6eee"e%e" f  d7ee&e%e& f d8eee'd9e(f f d.eeee ef  d+ee) f
d:d;Z*e$e+d<				dd7ed6eee"ee" f  d=eee  d8eee'd9e(f f d.eeee ef  d+ee) fd>d;Z*e$e+d<				dd7ee d6eee"ee" f  d=eeee   d8eee'd9e(f f d.eeee ef  d+ee) fd?d;Z*e$e+d<		dddd5d7ee d6eee"ee" f  d=ee d8eee'd9e(f f d.eeee ef  d+ee) fd@d;Z*e$e+d<		dddd5d7eee  d6eee"ee" f  d=eee  d8eee'd9e(f f d.eeee ef  d+ee) fdAd;Z*e$e+d<		dd7dd6dd=eee eee  f d8eee'd9e(f f d.eeee ef  d+ee) fdBd;Z*e,d=dCdD dEdF						dd7eee&e%e& f eeeee f  f d6eee"e%e" f  d=eeee eee  f  d8eee'd9e(f f d.eeee ef  dGeee  d+ee) fdHd;Z*dIee&e%e& f d.eeee ef  fdJdKZ-dLe&d.ee dMeeeef  fdNdOZ.		P	ddQeee'd9e/f f dRee dSe0dTeeeef  d+ee/ f
dUdVZ1dWe'e2j3ge/f d+ee/ fdXdYZ4d.eeee ef  d7eee5e6f  d+eee  fdZd[Z7		dd7eee5e6f  d\e8d.eeee ef  d8ed+ee9 f
d]d^Z:										dd_eee; eee;  f d6eee"ee" f  d8eee'd9e(f f d.ee d`ee dae<dbedceddeeeeef   deeeeef  d'eeeef  d+ee) fdfdgZ=e$	dddddhddidjeee>e%e> f  d7ee&e%e& f dkee d8eee'd9e(f f d.eeee ef  dle?dmeeeef  d+ee@ fdndoZAe$e+d<						h	dd7edjeee>e%e> f  d=eee  dkee d8eee'd9e(f f d.eeee ef  dle?dmeeeef  d+ee@ fdpdoZAe$e+d<						h	dd7ee djeee>e%e> f  d=eeee   dkee d8eee'd9e(f f d.eeee ef  dle?dmeeeef  d+ee@ fdqdoZAe$e+d<		dddddhddid7ee djeee>e%e> f  d=ee dkee d8eee'd9e(f f d.eeee ef  dle?dmeeeef  d+ee@ fdrdoZAe$e+d<		dddddhddid7eee  djeee>e%e> f  d=eee  dkee d8eee'd9e(f f d.eeee ef  dle?dmeeeef  d+ee@ fdsdoZAe$e+d<				h	dd7ddjdd=eee eee  f dkee d8eee'd9e(f f d.eeee ef  dle?dmeeeef  d+ee@ fdtdoZAe,d=dudD dEdF								dd7eee&e%e& f eeeee f  f djeee>e%e> f  d=eeee eee  f  dkee d8eee'd9e(f f d.eeee ef  dlee? dmeeeef  d+ee@ fdvdoZAdddddwd7ee&e%e& f dkee d8eee'd9e(f f djeee>e%e> f  d.eeee ef  d+eeB fdxdyZCddddzd7ee&e%e& f d8eee'd9e(f f djeee>e%e> f  d.eeee ef  d+eeD f
d{d|ZEdddddwd7ee&e%e& f dkee d8eee'd9e(f f djeee>e%e> f  d.eeee ef  d+ee@ fd}d~ZF				ddedeeee6e5f  deeee6e5f  dkee d8eee'd9e(f f djee> d.eeee ef  d+eeG fddZH				ddedeee eeI f deee eeI f dkee d8eee'd9e(f f djee> d.eeee ef  d+eeG fddZJdddddwdeeKe%eK eLf deeKe%eK eLf dkee d8eee'd9e(f f djee> d.eeee ef  d+eeG fddZMdddZNdddZOddeeP d+efddZQddefddZRddeee  fddZSd+ed fddZTd7eeeee f  d=eeee eee  f  fddZUddddd7ee&e%e& f d\ee"e%e" e>e%e> f d8eee'd9e(f f d.eee%e ef  dmeeeef  dGeee  d+dfddZV			dde&d\ee"e>f dmeeeef  d.ee dGed+dfddZWddd8eee'd9e(f f d+eee)e@f  fddZXdS )LLMa  An LLM for generating texts from given prompts and sampling parameters.

    This class includes a tokenizer, a language model (possibly distributed
    across multiple GPUs), and GPU memory space allocated for intermediate
    states (aka KV cache). Given a batch of prompts and sampling parameters,
    this class generates texts from the model, using an intelligent batching
    mechanism and efficient memory management.

    Args:
        model: The name or path of a HuggingFace Transformers model.
        tokenizer: The name or path of a HuggingFace Transformers tokenizer.
        tokenizer_mode: The tokenizer mode. "auto" will use the fast tokenizer
            if available, and "slow" will always use the slow tokenizer.
        skip_tokenizer_init: If true, skip initialization of tokenizer and
            detokenizer. Expect valid prompt_token_ids and None for prompt
            from the input.
        trust_remote_code: Trust remote code (e.g., from HuggingFace) when
            downloading the model and tokenizer.
        allowed_local_media_path: Allowing API requests to read local images
            or videos from directories specified by the server file system.
            This is a security risk. Should only be enabled in trusted
            environments.
        tensor_parallel_size: The number of GPUs to use for distributed
            execution with tensor parallelism.
        dtype: The data type for the model weights and activations. Currently,
            we support `float32`, `float16`, and `bfloat16`. If `auto`, we use
            the `torch_dtype` attribute specified in the model config file.
            However, if the `torch_dtype` in the config is `float32`, we will
            use `float16` instead.
        quantization: The method used to quantize the model weights. Currently,
            we support "awq", "gptq", and "fp8" (experimental).
            If None, we first check the `quantization_config` attribute in the
            model config file. If that is None, we assume the model weights are
            not quantized and use `dtype` to determine the data type of
            the weights.
        revision: The specific model version to use. It can be a branch name,
            a tag name, or a commit id.
        tokenizer_revision: The specific tokenizer version to use. It can be a
            branch name, a tag name, or a commit id.
        seed: The seed to initialize the random number generator for sampling.
        gpu_memory_utilization: The ratio (between 0 and 1) of GPU memory to
            reserve for the model weights, activations, and KV cache. Higher
            values will increase the KV cache size and thus improve the model's
            throughput. However, if the value is too high, it may cause out-of-
            memory (OOM) errors.
        swap_space: The size (GiB) of CPU memory per GPU to use as swap space.
            This can be used for temporarily storing the states of the requests
            when their `best_of` sampling parameters are larger than 1. If all
            requests will have `best_of=1`, you can safely set this to 0.
            Noting that `best_of` is only supported in V0. Otherwise, too small
            values may cause out-of-memory (OOM) errors.
        cpu_offload_gb: The size (GiB) of CPU memory to use for offloading
            the model weights. This virtually increases the GPU memory space
            you can use to hold the model weights, at the cost of CPU-GPU data
            transfer for every forward pass.
        enforce_eager: Whether to enforce eager execution. If True, we will
            disable CUDA graph and always execute the model in eager mode.
            If False, we will use CUDA graph and eager execution in hybrid.
        max_seq_len_to_capture: Maximum sequence len covered by CUDA graphs.
            When a sequence has context length larger than this, we fall back
            to eager mode. Additionally for encoder-decoder models, if the
            sequence length of the encoder input is larger than this, we fall
            back to the eager mode.
        disable_custom_all_reduce: See
            [ParallelConfig][vllm.config.ParallelConfig].
        disable_async_output_proc: Disable async output processing.
            This may result in lower performance.
        hf_token: The token to use as HTTP bearer authorization for remote files
            . If `True`, will use the token generated when running
            `huggingface-cli login` (stored in `~/.huggingface`).
        hf_overrides: If a dictionary, contains arguments to be forwarded to the
            HuggingFace config. If a callable, it is called to update the
            HuggingFace config.
        mm_processor_kwargs: Arguments to be forwarded to the model's processor
            for multi-modal data, e.g., image processor. Overrides for the
            multi-modal processor obtained from `AutoProcessor.from_pretrained`.
            The available overrides depend on the model that is being run.
            For example, for Phi-3-Vision: `{"num_crops": 4}`.
        override_pooler_config: Initialize non-default pooling config or
            override default pooling config for the pooling model.
            e.g. `PoolerConfig(pooling_type="mean", normalize=False)`.
        compilation_config: Either an integer or a dictionary. If it is an
            integer, it is used as the level of compilation optimization. If it
            is a dictionary, it can specify the full compilation configuration.
        **kwargs: Arguments for [`EngineArgs`][vllm.EngineArgs].

    Note:
        This class is intended to be used for offline inference. For online
        serving, use the [AsyncLLMEngine][vllm.AsyncLLMEngine] class instead.
    TDEPRECATE_LEGACYc                 c   s    d| _ d V  d| _ d S )NTF)rL   )cls rN   `/home/app/PaddleOCR-VL-test/.venv_paddleocr/lib/python3.10/site-packages/vllm/entrypoints/llm.pydeprecate_legacy_api   s   
zLLM.deprecate_legacy_apiautoNF    g?   r   i    )runnerconvert	tokenizertokenizer_modeskip_tokenizer_inittrust_remote_codeallowed_local_media_pathtensor_parallel_sizedtypequantizationrevisiontokenizer_revisionseedgpu_memory_utilization
swap_spacecpu_offload_gbenforce_eagermax_seq_len_to_capturedisable_custom_all_reducedisable_async_output_prochf_tokenhf_overridesmm_processor_kwargsoverride_pooler_configcompilation_configlogits_processorsmodelrU   rV   rW   rX   rY   rZ   r[   r\   r]   r^   r_   r`   ra   rb   rc   rd   re   rf   rg   rh   ri   rj   rk   rl   rm   rn   returnc          %   
   K   s.  d|vrd|d< d|v r|d }t |trt||d< d|v rXt |d trXddlm} |d }z|d*i ||d< W n tyW }  zt	d||  t
d|  | d	} ~ ww |d	u r^i }|d	urt |trmt|d
}!nt |trdd }"td*i tt|"| }!n|}!nt }!td*i d|d|d|d|d|d|d|d|d|	d|
d|d|d|d|d|d|d|d|d|d |d!|d"|d#|d$|d%|d&|!d'||}#t|# tj|#tjd(| _t| j| _t | _d	| _tjr| j }$n| jjj}$td)|$ |$| _d	S )+zLLM constructor.Zdisable_log_statsT
worker_clsZkv_transfer_configr   )KVTransferConfigz[Failed to convert 'kv_transfer_config' dict to KVTransferConfig object. Dict: %s. Error: %sz'Invalid 'kv_transfer_config' provided: Nlevelc                 S   s   t t| d S )Nr   )r   r   xrN   rN   rO   <lambda>   s    zLLM.__init__.<locals>.<lambda>ro   rU   rV   rW   rX   rY   rZ   r[   r\   r]   r^   r_   r`   ra   rb   rc   rd   re   rf   rg   rh   ri   rj   rk   rl   rm   rn   )engine_argsZusage_contextzSupported_tasks: %srN   ) 
isinstancetypecloudpickledumpsdictvllm.configrr   r   loggererror
ValueErrorintr   filteritemsr   r,   r   Zfrom_engine_argsrB   Z	LLM_CLASS
llm_engineengine_classrC   request_counterdefault_sampling_paramsenvsVLLM_USE_V1Zget_supported_tasksmodel_configsupported_tasksinfo)%selfro   rU   rV   rW   rX   rY   rZ   r[   r\   r]   r^   r_   r`   ra   rb   rc   rd   re   rf   rg   rh   ri   rj   rk   rl   rm   rn   kwargsrq   rr   Zraw_config_dicteZcompilation_config_instance	predicaterx   r   rN   rN   rO   __init__   s   $




	


zLLM.__init__lora_requestc                 C   s   | j  |S N)r   get_tokenizer_groupZget_lora_tokenizer)r   r   rN   rN   rO   get_tokenizer.  s   
zLLM.get_tokenizerc                 C   s0   | j  }|jjdr||_d S t||_d S )NZCached)r   r   	__class____name__
startswithrW   rA   )r   rW   Ztokenizer_grouprN   rN   rO   set_tokenizer5  s   

zLLM.set_tokenizerc                 C   s6   | j d u r| jj | _ | j rtjdi | j S t S )NrN   )r   r   r   Zget_diff_sampling_paramr=   Zfrom_optionalr   rN   rN   rO   get_default_sampling_params@  s   

zLLM.get_default_sampling_params)use_tqdmr   sampling_paramspromptsr   .c               C      d S r   rN   )r   r   r   r   r   rN   rN   rO   generateH     zLLM.generatez0'prompt_token_ids' will become part of 'prompts'prompt_token_idsc                 C   r   r   rN   r   r   r   r   r   r   rN   rN   rO   r   U  r   c                 C   r   r   rN   r   rN   rN   rO   r   b  r   c                C   r   r   rN   r   rN   rN   rO   r   o     c                C   r   r   rN   r   rN   rN   rO   r   }  r   c                 C   r   r   rN   r   rN   rN   rO   r     s   
c                   C      t jS r   rK   rL   rN   rN   rN   rO   rw         zLLM.<lambda>z+Please use the 'prompts' parameter instead.)Zis_deprecatedZadditional_messagepriorityc                 C   s   | j j}|j}|dkrtd|dur&| jttttt	t f  ||d}	nttt
tt
 f |}	|du r9|  }i }
d}t|trE|j}t|j||
 | |	|}| j|	||||
|d | j|d}| j|tS )a  Generates the completions for the input prompts.

        This class automatically batches the given prompts, considering
        the memory constraint. For the best performance, put all of your prompts
        into a single list and pass it to this method.

        Args:
            prompts: The prompts to the LLM. You may pass a sequence of prompts
                for batch inference. See [PromptType][vllm.inputs.PromptType]
                for more details about the format of each prompts.
            sampling_params: The sampling parameters for text generation. If
                None, we use the default sampling parameters.
                When it is a single value, it is applied to every prompt.
                When it is a list, the list must have the same length as the
                prompts and it is paired one by one with the prompt.
            use_tqdm: If `True`, shows a tqdm progress bar.
                If a callable (e.g., `functools.partial(tqdm, leave=False)`),
                it is used to create the progress bar.
                If `False`, no progress bar is created.
            lora_request: LoRA request to use for generation, if any.
            priority: The priority of the requests, if any.
                Only applicable when priority scheduling policy is enabled.

        Returns:
            A list of `RequestOutput` objects containing the
            generated completions in the same order as the input prompts.

        Note:
            Using `prompts` and `prompt_token_ids` as keyword parameters is
            considered legacy and may be deprecated in the future. You should
            instead pass them via the `inputs` parameter.
        r   zLLM.generate() is only supported for generative models. Try passing `--runner generate` to use the model as a generative model.Nr   r   )r   paramsr   r   tokenization_kwargsr   r   )r   r   runner_typer   _convert_v1_inputsr
   r   r	   strlistr-   r   r   ry   r=   truncate_prompt_tokensr+   max_model_len _get_modality_specific_lora_reqs_validate_and_add_requests_run_enginer   validate_outputsr8   )r   r   r   r   r   r   r   r   r   parsed_promptsr   r   outputsrN   rN   rO   r     sH   0
	r   c                    st   j jj  d u sj jjr r jd u r|S t|ts|g}t|ts+|gt| n|} fddt	||D S )Nc                    s    g | ]\}} || jqS rN   )_resolve_single_prompt_mm_loradefault_mm_loras).0parsed_promptZopt_lora_reqlora_configr   rN   rO   
<listcomp>	  s    z8LLM._get_modality_specific_lora_reqs.<locals>.<listcomp>)
r   Zvllm_configr   r   is_multimodal_modelr   ry   r   lenzip)r   r   r   Zoptional_lorasrN   r   rO   r     s&   


z$LLM._get_modality_specific_lora_reqsr   r   c                 C   s   |rt |trd|vr|S ttttf |}t|d  | }|s'|S t	|dkr5t
d| |S | }|| }t||d }|rT|j|krRt
d |S t|||S )Nmulti_modal_datarS   zMultiple modality specific loras were registered and would be used by a single prompt consuming several modalities;  currently we only support one lora per request; as such, lora(s) registered with modalities: %s will be skippedzA modality with a registered lora and a lora_request with a different ID were provided; falling back to the lora_request as we only apply one LoRARequest per prompt)ry   r}   r
   r	   r/   r0   setkeysintersectionr   r   warningpopsortedindexZlora_int_idr3   )r   r   r   r   r   Zmodality_nameZmodality_lora_pathZmodality_lora_idrN   rN   rO   r     sB   

z"LLM._resolve_single_prompt_mm_lorarN   methodtimeoutargsr   c                 C   s   | j ||||S )a  
        Execute an RPC call on all workers.

        Args:
            method: Name of the worker method to execute, or a callable that
                is serialized and sent to all workers to execute.

                If the method is a callable, it should accept an additional
                `self` argument, in addition to the arguments passed in `args`
                and `kwargs`. The `self` argument will be the worker object.
            timeout: Maximum time in seconds to wait for execution. Raises a
                [`TimeoutError`][] on timeout. `None` means wait indefinitely.
            args: Positional arguments to pass to the worker method.
            kwargs: Keyword arguments to pass to the worker method.

        Returns:
            A list containing the results from each worker.

        Note:
            It is recommended to use this API to only pass control messages,
            and set up data-plane communication to pass data.
        )r   collective_rpc)r   r   r   r   r   rN   rN   rO   r   A  s   zLLM.collective_rpcfuncc                 C   s   | j j}||S )zy
        Run a function directly on the model inside each worker,
        returning the result for each of them.
        )r   Zmodel_executorapply_model)r   r   executorrN   rN   rO   r   _  s   
zLLM.apply_modelc                 C   sT   t |trt|t|krtd|du st |tr!|gt| S tdt| )z;Get the optional lora request corresponding to each prompt.z:Lora request list should be the same length as the promptsNzInvalid lora_request type )ry   r   r   r   r3   	TypeErrorrz   )r   r   r   rN   rN   rO   _get_beam_search_lora_requestsg  s   z"LLM._get_beam_search_lora_requestsr   c           +         s   |j }|j}|j}|j}|j}	| ||}
|  }t|j|	}dt	dt
fdd td| d|d}g }t|
|D ]=\}}i }d|v rH|d |d< d	|v rR|d	 |d	< d
|v r`tt
|}|d
 }n||d }|t|f|dd| q8t|}|rt|dddd}td |D ]}ttdd |D g }dgttdd |D  }tt|dd |dd }t|dkr nt fdd|D  \}}| j||d|d}t||D ]p\\}}}g }t||D ]T}|| } || }!|!jd jdur;|!jd jd }"|" D ]4\}#}$t	| j|#g | j|"g | j| j |$j! | j"| j#d}%|#|jkr4|s4|j$|% q||% qqt%||dd}&|&d| |_&qqg }'|D ]-}|j$'|j& t%|j$|dd}(|(d| })|)D ]
}*|(|*j|*_)qi|'t*|)d qP|'S )ao  
        Generate sequences using beam search.

        Args:
            prompts: A list of prompts. Each prompt can be a string or a list
                of token IDs.
            params: The beam search parameters.
            lora_request: LoRA request to use for generation, if any.
            use_tqdm: Whether to use tqdm to display the progress bar.
        beamrp   c                 S   s@   d| j i}| jd ur| j|d< | jd ur| j|d< tdi |S )Nr   r   rk   rN   )tokensr   rk   r0   )r   Ztoken_prompt_kwargsrN   rN   rO   create_tokens_prompt_from_beam  s   


z7LLM.beam_search.<locals>.create_tokens_prompt_from_beam   rS   )logprobs
max_tokenstemperaturer   rk   r   promptN)r   r   zBeam searchtokenF)descunitZ
unit_scalezThe progress bar shows the upper bound on token steps and may finish early due to stopping conditions. It does not reflect instance-level progress.c                 s   s    | ]}|j V  qd S r   )beamsr   instancerN   rN   rO   	<genexpr>  s    z"LLM.beam_search.<locals>.<genexpr>r   c                 s       | ]}t |jV  qd S r   )r   r   r   rN   rN   rO   r         

c                    s   g | ]	} ||j fqS rN   )r   )r   r   r   rN   rO   r     s    z#LLM.beam_search.<locals>.<listcomp>r   r   r   )r   r   r   cum_logprobr   rk   T)keyreverse)	sequences)+
beam_widthr   r   
ignore_eoslength_penaltyr   r   r   Zeos_token_idr   r0   r=   r   r
   encodeappendr   ranger   r   r   r   sum	itertools
accumulater   r   r   r   r   r   r   r   Zlogprobr   rk   	completedr   r   extenddecodetextr   )+r   r   r   r   r   r   r   r   r   r   Zlora_requestsrW   Zsort_beams_keyZbeam_search_paramsZ	instancesZlora_reqr   Z	mm_kwargsZprompt_tokensZ
token_iter_Z	all_beamsposZinstance_start_and_endZprompts_batchZlora_req_batchoutputstartendr   Zinstance_new_beamsiZcurrent_beamresultr   Ztoken_idZlogprob_objZnew_beamZsorted_beamsr   Zsorted_completedZ
best_beamsr   rN   r   rO   beam_searchw  s   







!zLLM.beam_searchmessageschat_templatechat_template_content_formatadd_generation_promptcontinue_final_messagetoolschat_template_kwargsc                 C   s  t |trtttt  |}nttt |g}| |}| j }t||	|||d}t||||	d}|	|
p7i  g }|D ]H}t
||||d\}}t|trYt|fd|i|}ntd|||d|}|j|dd}t|d}|d	urx||d
< |d	ur||d< || q=| j||||dS )a	  
        Generate responses for a chat conversation.

        The chat conversation is converted into a text prompt using the
        tokenizer and calls the [generate][] method to generate the
        responses.

        Multi-modal inputs can be passed in the same way you would pass them
        to the OpenAI API.

        Args:
            messages: A list of conversations or a single conversation.

                - Each conversation is represented as a list of messages.
                - Each message is a dictionary with 'role' and 'content' keys.

            sampling_params: The sampling parameters for text generation.
                If None, we use the default sampling parameters. When it
                is a single value, it is applied to every prompt. When it
                is a list, the list must have the same length as the
                prompts and it is paired one by one with the prompt.
            use_tqdm: If `True`, shows a tqdm progress bar.
                If a callable (e.g., `functools.partial(tqdm, leave=False)`),
                it is used to create the progress bar.
                If `False`, no progress bar is created.
            lora_request: LoRA request to use for generation, if any.
            chat_template: The template to use for structuring the chat.
                If not provided, the model's default chat template will be used.
            chat_template_content_format: The format to render message content.

                - "string" will render the content as a string.
                  Example: `"Who are you?"`
                - "openai" will render the content as a list of dictionaries,
                  similar to OpenAI schema.
                  Example: `[{"type": "text", "text": "Who are you?"}]`

            add_generation_prompt: If True, adds a generation template
                to each message.
            continue_final_message: If True, continues the final message in
                the conversation instead of starting a new one. Cannot be
                `True` if `add_generation_prompt` is also `True`.
            chat_template_kwargs: Additional kwargs to pass to the chat
                template.
            mm_processor_kwargs: Multimodal processor kwarg overrides for this
                chat request. Only used for offline requests.

        Returns:
            A list of `RequestOutput` objects containing the generated
            responses in the same order as the input messages.
        r   )r  r  r  r  )Zcontent_formatr  )rW   conversationr   F)Zadd_special_tokensr   Nr   rk   r   rN   )rF   r   r
   r   r   r   Zget_model_configr$   r}   updater#   ry   r@   r"   r!   r   r0   r   r   )r   r  r   r   r   r  r  r  r  r  r  rk   Zlist_of_messagesrW   r   Zresolved_content_formatZ_chat_template_kwargsr   Zmsgsr
  Zmm_datar   Z
prompt_strr   rN   rN   rO   chat  sx   
E




zLLM.chatr   )r   r   r   pooling_taskr   pooling_paramsr   r  r   c               C   r   r   rN   )r   r   r  r   r   r   r  r   rN   rN   rO   r        z
LLM.encodec	           	      C   r   r   rN   	r   r   r  r   r   r   r   r  r   rN   rN   rO   r     r  c	           	      C   r   r   rN   r  rN   rN   rO   r     r  c          	      C   r   r   rN   r  rN   rN   rO   r        c          	      C   r   r   rN   r  rN   rN   rO   r     r  c	           	      C   r   r   rN   r  rN   rN   rO   r     s   c                   C   r   r   r   rN   rN   rN   rO   rw     r   c	                 C   s4  |du rd| j v rd}nd}td| | jj}	|	j}
|
dkr#td|| j vr1td| j  d|durH| jtt	t
ttt f  ||d	}ntt
ttt f |}|du rZt }t|trf|||	 n|D ]}|||	 qh|du rtttf  }t|	j|| | j|||||d
 | j|d}| j|tS )a  Apply pooling to the hidden states corresponding to the input
        prompts.

        This class automatically batches the given prompts, considering
        the memory constraint. For the best performance, put all of your prompts
        into a single list and pass it to this method.

        Args:
            prompts: The prompts to the LLM. You may pass a sequence of prompts
                for batch inference. See [PromptType][vllm.inputs.PromptType]
                for more details about the format of each prompts.
            pooling_params: The pooling parameters for pooling. If None, we
                use the default pooling parameters.
            use_tqdm: If `True`, shows a tqdm progress bar.
                If a callable (e.g., `functools.partial(tqdm, leave=False)`),
                it is used to create the progress bar.
                If `False`, no progress bar is created.
            lora_request: LoRA request to use for generation, if any.
            pooling_task: Override the pooling task to use.

        Returns:
            A list of `PoolingRequestOutput` objects containing the
            pooled hidden states in the same order as the input prompts.

        Note:
            Using `prompts` and `prompt_token_ids` as keyword parameters is
            considered legacy and may be deprecated in the future. You should
            instead pass them via the `inputs` parameter.
        Nembedr   a  `LLM.encode` is currently using `pooling_task = %s`.
Please use one of the more specific methods or set the task directly when using `LLM.encode`:
  - For embeddings, use `LLM.embed(...)` or `pooling_task="embed"`.
  - For classification logits, use `LLM.classify(...)` or `pooling_task="classify"`.
  - For rewards, use `LLM.reward(...)` or `pooling_task="reward"`
  - For similarity scores, use `LLM.score(...)`.poolingzvLLM.encode() is only supported for pooling models. Try passing `--runner pooling` to use the model as a pooling model.zpooling_task must be one of .r   )r   r   r   r   r   r   )r   r   Zwarning_oncer   r   r   r   r   r
   r   r	   r   r   r-   r   r:   ry   verifyr}   r   r+   r   r   r   r   r   r7   )r   r   r  r   r   r   r   r  r   r   r   r   Zpooling_paramr   rN   rN   rO   r   	  sb   /



)r   r   r  r   c               C   s6   d| j vr	td| j|||||dd}dd |D S )a]  
        Generate an embedding vector for each prompt.

        This class automatically batches the given prompts, considering
        the memory constraint. For the best performance, put all of your prompts
        into a single list and pass it to this method.

        Args:
            prompts: The prompts to the LLM. You may pass a sequence of prompts
                for batch inference. See [PromptType][vllm.inputs.PromptType]
                for more details about the format of each prompts.
            pooling_params: The pooling parameters for pooling. If None, we
                use the default pooling parameters.
            use_tqdm: If `True`, shows a tqdm progress bar.
                If a callable (e.g., `functools.partial(tqdm, leave=False)`),
                it is used to create the progress bar.
                If `False`, no progress bar is created.
            lora_request: LoRA request to use for generation, if any.

        Returns:
            A list of `EmbeddingRequestOutput` objects containing the
            embedding vectors in the same order as the input prompts.
        r  z_Embedding API is not supported by this model. Try converting the model using `--convert embed`.)r   r   r  r   r  c                 S      g | ]}t |qS rN   )r6   	from_baser   itemrN   rN   rO   r         zLLM.embed.<locals>.<listcomp>r   r   r   )r   r   r   r   r  r   r   rN   rN   rO   r  |  s   
"	z	LLM.embed)r   r  r   c               C   s4   d| j vr	td| j||||dd}dd |D S )aZ  
        Generate class logits for each prompt.

        This class automatically batches the given prompts, considering
        the memory constraint. For the best performance, put all of your prompts
        into a single list and pass it to this method.

        Args:
            prompts: The prompts to the LLM. You may pass a sequence of prompts
                for batch inference. See [PromptType][vllm.inputs.PromptType]
                for more details about the format of each prompts.
            use_tqdm: If `True`, shows a tqdm progress bar.
                If a callable (e.g., `functools.partial(tqdm, leave=False)`),
                it is used to create the progress bar.
                If `False`, no progress bar is created.
            lora_request: LoRA request to use for generation, if any.
            pooling_params: The pooling parameters for pooling. If None, we
                use the default pooling parameters.
        Returns:
            A list of `ClassificationRequestOutput` objects containing the
            embedding vectors in the same order as the input prompts.
        classifyzgClassification API is not supported by this model. Try converting the model using `--convert classify`.)r   r  r   r  c                 S   r  rN   )r5   r  r  rN   rN   rO   r     r  z LLM.classify.<locals>.<listcomp>r  )r   r   r   r  r   r   rN   rN   rO   r    s   
 zLLM.classifyc               C   s   | j |||||ddS )a  
        Generate rewards for each prompt.

        Args:
            prompts: The prompts to the LLM. You may pass a sequence of prompts
                for batch inference. See [PromptType][vllm.inputs.PromptType]
                for more details about the format of each prompts.
            use_tqdm: If `True`, shows a tqdm progress bar.
                If a callable (e.g., `functools.partial(tqdm, leave=False)`),
                it is used to create the progress bar.
                If `False`, no progress bar is created.
            lora_request: LoRA request to use for generation, if any.
            pooling_params: The pooling parameters for pooling. If None, we
                use the default pooling parameters.
        Returns:
            A list of `PoolingRequestOutput` objects containing the
            pooled hidden states in the same order as the input prompts.
        r   )r   r   r  r   r  )r   )r   r   r   r   r  r   rN   rN   rO   reward  s   z
LLM.rewardtext_1text_2c                 C   s|   | j || ||||dd}|dt| }	|t|d  }
t|	dkr)|	t|
 }	t||	|
d}| j|t}dd |D S )Nr  )r   r   r   r  r  r   rS   )rW   Zembed_1Zembed_2c                 S   r  rN   r9   r  r  rN   rN   rO   r   &  r  z(LLM._embedding_score.<locals>.<listcomp>)r   r   r'   r   r   r7   )r   rW   r  r   r   r   r  r   Zencoded_outputZencoded_output_1Zencoded_output_2Zscoresr   rN   rN   rO   _embedding_score  s0   	

zLLM._embedding_scoredata_1data_2c                 C   sB  | j j}t|trtdt|dkr|t| }|d u r"tdd}| j j}|d| tt  }	i }
t	|j
||
 g }dd t||D }| j j}|D ]7\}}t|||||
d\}}tjry|dd  }ry| }t|}d	|i|_|	| n|	| || qL| j||	||d
 | j|d}| j|t}dd |D S )Nz0Score API is not supported for Mistral tokenizerrS   score)taskc                 S   s   g | ]\}}||fqS rN   rN   )r   t1t2rN   rN   rO   r   I  s    z-LLM._cross_encoding_score.<locals>.<listcomp>)r   r#  r$  rW   r   token_type_idsZcompressed_token_type_ids)r   r   r   r   r   c                 S   r  rN   r!  r  rN   rN   rO   r   l  r  )r   r   ry   r@   r   r   r:   r  r   r+   r   r   r*   r   r   r   cloner)   extra_kwargsr   r   r   r   r   r7   )r   rW   r#  r$  r   r   r  r   r   Zpooling_params_listr   r   Zinput_pairsqdr   Zengine_promptr)  r   
compressedr   r   rN   rN   rO   _cross_encoding_score(  s^   







zLLM._cross_encoding_scorec         	   	      s  | j jj}|dkrtd| jtfdddD r!tdjr1tjddd	kr1td
| 	 j
sdtttt tf ffdd}|| || dtffdd t|ttfrc|g} fdd|D }t|ttfrv|g} fdd|D }t|trd|v r|d}nt|tr|g}t|trd|v r|d}nt|tr|g}t|| jr| ||||||S | ||||||S )a  Generate similarity scores for all pairs `<text,text_pair>` or
          `<multi-modal data, multi-modal data pair>`.

        The inputs can be `1 -> 1`, `1 -> N` or `N -> N`.
        In the `1 - N` case the `data_1` input will be replicated `N`
        times to pair with the `data_2` inputs.
        The input pairs are used to build a list of prompts for the
        cross encoder model. This class automatically batches the prompts,
        considering the memory constraint. For the best performance, put all
        of your inputs into a single list and pass it to this method.

        Supports both text and multi-modal data (images, etc.) when used with
        appropriate multi-modal models. For multi-modal inputs, ensure the
        prompt structure matches the model's expected input format.

        Args:
            data_1: Can be a single prompt, a list of prompts or
                `ScoreMultiModalParam`, which can contain either text or
                multi-modal data. When a list, it must have the same length as
                the `data_2` list.
            data_2: The data to pair with the query to form the input to
                the LLM. Can be text or multi-modal data. See [PromptType]
                [vllm.inputs.PromptType] for more details about the format of
                each prompt.
            use_tqdm: If `True`, shows a tqdm progress bar.
                If a callable (e.g., `functools.partial(tqdm, leave=False)`),
                it is used to create the progress bar.
                If `False`, no progress bar is created.
            lora_request: LoRA request to use for generation, if any.
            pooling_params: The pooling parameters for pooling. If None, we
                use the default pooling parameters.
        Returns:
            A list of `ScoringRequestOutput` objects containing the
            generated scores in the same order as the input prompts.
        r  zuLLM.score() is only supported for pooling models. Try passing `--runner pooling` to use the model as a pooling model.c                 3   s    | ]}| vV  qd S r   rN   r   t)r   rN   rO   r     s    zLLM.score.<locals>.<genexpr>)r  r  zsScore API is not supported by this model. Try converting the model using `--convert embed` or `--convert classify`.Z
num_labelsr   rS   z.Score API is only enabled for num_labels == 1.datac                    s*   t | trd| v rtd j d S d S )Ncontentz*ScoreMultiModalParam is not supported for )ry   r}   r   architecture)r2  r	  rN   rO   check_data_type  s
   z"LLM.score.<locals>.check_data_typer   c                    sb   t | tr'd| v rtdd| v r tt| d } nd| v r'tt| d } t| tu s/J | S )Nr   z/Multi-modal prompt is not supported for scoringr   r   )	ry   r}   r   r   r
   r0   r/   rz   r   r   )rW   rN   rO   
ensure_str  s   
zLLM.score.<locals>.ensure_strc                       g | ]} |qS rN   rN   r0  r7  rN   rO   r         zLLM.score.<locals>.<listcomp>c                    r8  rN   rN   r0  r9  rN   rO   r     r:  r3  )r   r   r   r   r   allZis_cross_encodergetattrZ	hf_configr   r   r	   r.   r   r&   ry   r   r}   getr(   r/  r"  )	r   r#  r$  r   r   r  r   r   r5  rN   )r7  r   r   rW   rO   r%  n  sn   0


	z	LLM.scorec                 C      | j   d S r   )r   start_profiler   rN   rN   rO   r?       zLLM.start_profilec                 C   r>  r   )r   stop_profiler   rN   rN   rO   rA    r@  zLLM.stop_profiledevicec                 C   s   | j |S r   )r   reset_prefix_cache)r   rB  rN   rN   rO   rC    s   zLLM.reset_prefix_cachert   c                 C   s   |    | jj|d dS )a'  
        Put the engine to sleep. The engine should not process any requests.
        The caller should guarantee that no requests are being processed
        during the sleep period, before `wake_up` is called.

        Args:
            level: The sleep level. Level 1 sleep will offload the model
                weights and discard the kv cache. The content of kv cache
                is forgotten. Level 1 sleep is good for sleeping and waking
                up the engine to run the same model again. The model weights
                are backed up in CPU memory. Please make sure there's enough
                CPU memory to store the model weights. Level 2 sleep will
                discard both the model weights and the kv cache. The content
                of both the model weights and kv cache is forgotten. Level 2
                sleep is good for sleeping and waking up the engine to run a
                different model or update the model, where previous model
                weights are not needed. It reduces CPU memory pressure.
        rs   N)rC  r   sleep)r   rt   rN   rN   rO   rD    s   z	LLM.sleeptagsc                 C   s   | j | dS )a  
        Wake up the engine from sleep mode. See the [sleep][] method
        for more details.

        Args:
            tags: An optional list of tags to reallocate the engine memory
                for specific memory allocations. Values must be in
                `("weights", "kv_cache")`. If None, all memory is reallocated.
                wake_up should be called with all tags (or None) before the
                engine is used again.
        N)r   wake_up)r   rE  rN   rN   rO   rF    s   zLLM.wake_uprH   c                 C   s&   ddl m} t| j|sJ | j S )a!  Return a snapshot of aggregated metrics from Prometheus.

        Returns:
            A ``MetricSnapshot`` instance capturing the current state
            of all aggregated metrics from Prometheus.

        Note:
            This method is only available with the V1 LLM engine.
        r   r   )Zvllm.v1.engine.llm_enginer   ry   r   get_metrics)r   ZV1LLMEnginerN   rN   rO   rG  &  s   

zLLM.get_metricsc                 C   s   |d u r|d u rt d|d ur |d ur t|t|kr t d|d ur-dd t|D }|d ur:dd t|D }|d urCt|}n|d urKt|}g }t|D ]!}|d ur_t|| d}n|d urkt|| d}nt|| qQ|S )Nz4Either prompts or prompt_token_ids must be provided.z=The lengths of prompts and prompt_token_ids must be the same.c                 S      g | ]}|d  qS r3  rN   r   prN   rN   rO   r   F  r:  z*LLM._convert_v1_inputs.<locals>.<listcomp>c                 S   rH  rI  rN   rJ  rN   rN   rO   r   H  s    r6  r  )r   r   r1   r   r/   r0   AssertionErrorr   )r   r   r   num_requestsr   r   r  rN   rN   rO   r   5  s8   
zLLM._convert_v1_inputs)r   r   r   c                C   s  t |ttfr
|g}t|}t |trt||krtdt |tr,t||kr,tdt |tr3|n|fD ]}t |trAtj|_	q6|}	|rTt
|rL|nt}
|
|	dd}	t|	D ]&\}}| j|t |trh|| n||t |trs|| n||rz|| ndd qXd S )Nz3The lengths of prompts and params must be the same.z9The lengths of prompts and lora_request must be the same.zAdding requests)r   r   )r   r   r   )ry   r   r}   r   r   r   r=   r<   Z
FINAL_ONLYZoutput_kindcallabler   	enumerate_add_request)r   r   r   r   r   r   r   rM  spit	tqdm_funcr   r   rN   rN   rO   r   ^  s<   
zLLM._validate_and_add_requestsr   c                 C   s*   t t| j}| jj||||||d d S )N)r   r   r   )r   nextr   r   Zadd_request)r   r   r   r   r   r   
request_idrN   rN   rO   rP    s   
zLLM._add_requestr   c             	   C   sL  |r | j  }t|r|nt}||dddddddddd}g }d}d}| j  r| j  }|D ]`}	|	jr||	 |rt|	t	rt
|	j}
|	jd usOJ |t
|	j|
 7 }||jd	  }|td
d |	jD 7 }||jd	  }d|dd|dd|_||
 n|d |j|kr|  q2| j  s+|r|  t|dd dS )NzProcessed promptsTzest. speed input: r   z.2fz toks/s, output: z toks/s)totalr   Zdynamic_ncolspostfixelapsedc                 s   r   r   )r   Z	token_ids)r   ZstprN   rN   rO   r     r   z"LLM._run_engine.<locals>.<genexpr>rS   c                 S   s
   t | jS r   )r   rU  ru   rN   rN   rO   rw     s   
 z!LLM._run_engine.<locals>.<lambda>)r   )r   Zget_num_unfinished_requestsrN  r   Zhas_unfinished_requestsstepfinishedr   ry   r8   r   r   r   Zformat_dictr   rW  r  nrefreshcloser   )r   r   rM  rS  Zpbarr   Ztotal_in_toksZtotal_out_toksZstep_outputsr   r[  Zin_spdZout_spdrN   rN   rO   r     sX   

	










zLLM._run_enginer   )NNTN)NN)TN)NNNTNN)NrN   N)NF)
NTNNrQ   TFNNN)NNNTNr   N)NTNr   N)NNNNTNNN)NTNN)rp   N)rS   )NNr   )Yr   
__module____qualname____doc__rL   r   bool__annotations__classmethodr   rP   r   r   r   r   r   r   r   r4   floatr	   r   r}   r   r   r   r   rz   rG   r   r3   r?   r   r   r=   r   r   r   r-   r   r   r8   r   r   rE   r   r   rI   tupler   nnModuler   r0   r/   r   r;   r   r  r   r    r  r:   r>   r7   r   r6   r  r5   r  r  r9   r"  r%   r/  r.   r&   r%  r?  rA  rD   rC  rD  rF  rG  r   r   rP  r   rN   rN   rN   rO   rK   D   s  
 [	
!
 
	
	

	

	
X

1
"


 &
	

 	

	
	
	


	
	
	
s	

7	
4	

,	
)	
N

	

 


/
	

1

rK   )kr   collections.abcr   
contextlibr   typingr   r   r   r   r   r	   r
   r   r{   Ztorch.nnrf  Zpydanticr   Z	tqdm.autor   Ztyping_extensionsr   r   Z	vllm.envsr   Zvllm.beam_searchr   r   r   r   r~   r   r   r   r   Zvllm.engine.arg_utilsr   r   r   r   r   Zvllm.engine.llm_enginer   Zvllm.entrypoints.chat_utilsr   r    r!   r"   r#   r$   Zvllm.entrypoints.score_utilsr%   r&   r'   r(   r)   r*   Zvllm.entrypoints.utilsr+   r,   Zvllm.inputsr-   r.   r/   r0   Zvllm.inputs.parser1   Zvllm.loggerr2   Zvllm.lora.requestr3   Z'vllm.model_executor.layers.quantizationr4   Zvllm.outputsr5   r6   r7   r8   r9   Zvllm.pooling_paramsr:   Zvllm.sampling_paramsr;   r<   r=   Z
vllm.tasksr>   Z!vllm.transformers_utils.tokenizerr?   r@   rA   Zvllm.usage.usage_librB   Z
vllm.utilsrC   rD   rE   rF   Zvllm.v1.sample.logits_processorrG   Zvllm.v1.metrics.readerrH   r   r   rI   rK   rN   rN   rN   rO   <module>   sF   (  