o
    !iD                     @   sl   d dl mZ d dlmZmZ ddlmZmZmZm	Z	m
Z
mZmZmZ ddlmZ dd Zdd	 Zd
d ZdS )    )PaddlePredictorOption)get_default_deviceparse_device   )DEFAULT_CPU_THREADSDEFAULT_DEVICEDEFAULT_ENABLE_MKLDNNDEFAULT_MKLDNN_CACHE_CAPACITYDEFAULT_PRECISIONDEFAULT_USE_TENSORRTSUPPORTED_PRECISION_LISTDEFAULT_USE_CINN)str2boolc             	   C   s   t |ttttttd}|  |  }|D ]}td| i || } | d t	vr7td| d  dt	 d| 
d| d< | 
d| d	< | S )
N)device
enable_hpiuse_tensorrt	precisionenable_mkldnnmkldnn_cache_capacitycpu_threadsenable_cinnzUnknown argument: r   zInvalid precision: z. Supported values are: .r   	use_pptrtpptrt_precision)r   r   r
   r   r	   r   r   keys
ValueErrorr   pop)kwargsdefault_enable_hpiZdefault_valsZunknown_namesname r    b/home/app/PaddleOCR-VL-test/.venv_paddleocr/lib/python3.10/site-packages/paddleocr/_common_args.pyparse_common_args   s(   r"   c                 C   s   |d }|d u rt  }t|\}}i }||d< |d |d< t }|dkrF|d rB|d dkr2d|_n0|d d	ks>J |d d
|_n d|_n|dkr_|d }|rV|d |_nd|_|d |_nd|_|d |_||d< |S )Nr   r   Zuse_hpipZgpur   r   Zfp32Ztrt_fp32Zfp16Ztrt_fp16Zpaddlecpur   r   r   r   	pp_option)r   r   r   Zrun_moder   r   r   )Z
model_nameZcommon_argsr   Zdevice_type_Zinit_kwargsr$   r   r    r    r!   prepare_common_init_args<   s6   
r&   c                C   s   |rd}nd}| j dtt|d | j dt|dd | j dttdd | j d	tttd
d | j dttdd | j dtt	dd | j dtt
dd | j dttdd d S )Na  Device(s) to use for inference, e.g., `cpu`, `gpu`, `npu`, `gpu:0`, `gpu:0,1`. If multiple devices are specified, inference will be performed in parallel. Note that parallel inference is not always supported. By default, GPU 0 will be used if available; otherwise, the CPU will be used.zDevice to use for inference, e.g., `cpu`, `gpu`, `npu`, `gpu:0`. By default, GPU 0 will be used if available; otherwise, the CPU will be used.z--device)typedefaulthelpz--enable_hpiz&Enable the high performance inference.z--use_tensorrtzWhether to use the Paddle Inference TensorRT subgraph engine. If the model does not support TensorRT acceleration, even if this flag is set, acceleration will not be used.z--precisionzPPrecision for TensorRT when using the Paddle Inference TensorRT subgraph engine.)r'   r(   choicesr)   z--enable_mkldnnzEnable MKL-DNN acceleration for inference. If MKL-DNN is unavailable or the model does not support it, acceleration will not be used even if this flag is set.z--mkldnn_cache_capacityzMKL-DNN cache capacity.z--cpu_threadsz/Number of threads to use for inference on CPUs.z--enable_cinnz!Whether to use the CINN compiler.)add_argumentstrr   r   r   r
   r   r   intr	   r   r   )parserr   Zallow_multiple_deviceshelp_r    r    r!   add_common_cli_optsa   sh   
r0   N)Zpaddlex.inferencer   Zpaddlex.utils.devicer   r   
_constantsr   r   r   r	   r
   r   r   r   Z
_utils.clir   r"   r&   r0   r    r    r    r!   <module>   s   (
%