o
    ưi                     @   s   d dl Z d dlZd dlmZ ejd e jd d dlZd dlmZm	Z	 d dl
mZ d dlZd dlmZ d dlmZ d dlmZ d d	lmZmZmZ G d
d deZdS )    N)Iterablez../..)LiteralOptional)HTTPException)verbose_proxy_logger)CustomLogger)UserAPIKeyAuth)CallTypesLiteralChoicesModelResponsec                   @   sL   e Zd Zddee fddZdd Zdefdd	Zd
e	de
defddZdS )_ENTERPRISE_LlamaGuardN
model_namec              
   C   s   |pt j}|d u rtd|| _t j}d }|d urZzt|d}| }W d    n1 s.w   Y  W n% tyB   td|  tyY } ztdt	| d| d }~ww || _
td| j
  d S )Nz!model_name not set for LlamaGuardrzFile not found. file_path=zAn error occurred: z, file_path=z self.unsafe_content_categories: )litellmZllamaguard_model_name
ValueErrormodelZ$llamaguard_unsafe_content_categoriesopenreadFileNotFoundError	Exceptionstrunsafe_content_categoriesr   debug)selfr   _model	file_pathdatafilee r   j/home/app/Keep/.python/lib/python3.10/site-packages/litellm_enterprise/enterprise_callbacks/llama_guard.py__init__   s,   


z_ENTERPRISE_LlamaGuard.__init__c                 C   s:   zt | tjrt| W d S W d S  ty   Y d S w N)r   r   r   Zset_verboseprintr   )r   Zprint_statementr   r   r    print_verbose6   s   
z$_ENTERPRISE_LlamaGuard.print_verbosemessagesc                 C   sv   | j d ur7| jd ur9t|d dkrdnd}tj| jd| d| j  ddd	d
dd	d
dd	d
ddd d S d S d S )N   r   ZAgentZUserz2[INST] Task: Check if there is unsafe content in 'z' messages in conversations according our safety policy with the below categories.

                <BEGIN UNSAFE CONTENT CATEGORIES>
                zh
                <END UNSAFE CONTENT CATEGORIES>

                <BEGIN CONVERSATION>

                zUser: z

)Zpre_messageZpost_messagezAgent: )systemuserZ	assistantz<END CONVERSATION>

        Provide your safety assessment for {role} in the above conversation:
        - First line must read 'safe' or 'unsafe'.
        - If unsafe, a second line must include a comma-separated list of violated categories. [/INST])r   Zinitial_prompt_valueZrolesZfinal_prompt_value)r   r   lenr   Zregister_prompt_template)r   r%   Zroler   r   r    set_custom_prompt_template>   s(   
z1_ENTERPRISE_LlamaGuard.set_custom_prompt_templater   user_api_key_dict	call_typec                    s   d|v rI|d d }t j| j|gddI dH }t|trIt|jd trI|jd jjdurIt|jd jjt	rId|jd jjv rIt
dd	d
id|S )z
        - Calls the Llama Guard Endpoint
        - Rejects request if it fails safety check

        The llama guard prompt template is applied automatically in factory.py
        r%   zmeta-llama/LlamaGuard-7b)r   r%   Zhf_model_nameNr   unsafei  errorzViolated content safety policy)status_codedetail)r   Zacompletionr   
isinstancer   choicesr
   messagecontentr   r   )r   r   r+   r,   Zsafety_check_messagesresponser   r   r    async_moderation_hooka   s,   z,_ENTERPRISE_LlamaGuard.async_moderation_hookr"   )__name__
__module____qualname__r   r   r!   r$   listr*   dictr   r	   r7   r   r   r   r    r      s    #r   )ossyscollections.abcr   pathinsertabspathtypingr   r   Zfastapir   r   Zlitellm._loggingr   Z"litellm.integrations.custom_loggerr   Zlitellm.proxy._typesr   Zlitellm.types.utilsr	   r
   r   r   r   r   r   r    <module>   s   	