o
    * is                     @  s   d dl mZ d dlmZmZ d dlZd dlmZ d dlm	Z	m
Z
 d dlmZ er:d dlmZ d dlmZ d d	lmZ 	 		
dd ddZd!d"ddZ			d#d$ddZdS )%    )annotations)TYPE_CHECKINGAnyN)stream)_get_global_group_warn_cur_rank_not_in_groupconvert_object_to_tensor)Tensor)task)GroupTtensorr
   dstintgroupGroup | Nonesync_opboolreturntask | Nonec                 C  s   t j| |||ddS )a  
    Send a tensor to the receiver.

    Args:
        tensor (Tensor): The Tensor to send. Its data type
            should be float16, float32, float64, int32, int64, int8, uint8, bool or bfloat16.
        dst (int): The destination rank id.
        group (Group, optional): The group instance return by new_group or None for global default group. Default: None.
        sync_op (bool, optional): Whether this op is a sync op. The default value is True.

    Returns:
        Return a task object.

    Examples:
        .. code-block:: python

            >>> # doctest: +REQUIRES(env: DISTRIBUTED)
            >>> import paddle
            >>> import paddle.distributed as dist

            >>> dist.init_parallel_env()
            >>> if dist.get_rank() == 0:
            ...     data = paddle.to_tensor([7, 8, 9])
            ...     dist.send(data, dst=1)
            >>> else:
            ...     data = paddle.to_tensor([1, 2, 3])
            ...     dist.recv(data, src=0)
            >>> print(data)
            >>> # [7, 8, 9] (2 GPUs)
    F)r   r   r   Zuse_calc_stream)r   send)r   r   r   r    r   q/home/app/PaddleOCR-VL-test/.venv_paddleocr/lib/python3.10/site-packages/paddle/distributed/communication/send.pyr   #   s   $
r   c                 C  s   t | ||ddS )aB  
    Send tensor asynchronously

    Args:
        tensor (Tensor): The Tensor to send. Its data type
            should be float16, float32, float64, int32, int64, int8, uint8, bool or bfloat16.
        dst (int): The destination rank.
        group (Group, optional): The group instance return by new_group or None for global default group. Default: None.

    Returns:
        Return a task object.

    Warning:
        This API only supports the dygraph mode.

    Examples:
        .. code-block:: python

            >>> # doctest: +REQUIRES(env: DISTRIBUTED)
            >>> import paddle
            >>> import paddle.distributed as dist

            >>> dist.init_parallel_env()
            >>> if dist.get_rank() == 0:
            ...     data = paddle.to_tensor([7, 8, 9])
            ...     task = dist.isend(data, dst=1)
            >>> else:
            ...     data = paddle.to_tensor([1, 2, 3])
            ...     task = dist.irecv(data, src=0)
            >>> task.wait()  # type: ignore[union-attr]
            >>> print(data)
            >>> # [7, 8, 9] (2 GPUs)

    F)r   )r   )r   r   r   r   r   r   isendL   s   #r   object_list	list[Any]
int | Nonedst_in_groupc           	      C  s   | du s
t | dkrtd|du rt n|}t|rdS |dur/|dur)td||}n|du r5dn|}tdd | D  \}}dd |D }tj|dd	}t|||d
 t |dkrb|d }nt	|}t|||d
 dS )a[  
    Send a list of Python objects to the receiver.

    Args:
        object_list (list): The list of Python objects to send.
        dst (int, optional): The destination rank id. Default: 0.
        group (Group, optional): The group instance return by new_group or None for global default group. Default: None.
        dst_in_group (int, optional): The destination rank within the group. Cannot be specified together with dst. Default: None.

    Returns:
        This function does not return any value.

    Examples:
        .. code-block:: python

            >>> # doctest: +REQUIRES(env: DISTRIBUTED)
            >>> import paddle
            >>> import paddle.distributed as dist

            >>> dist.init_parallel_env()
            >>> if dist.get_rank() == 0:
            ...     data = ["hello", {"key": 100}, [1, 2, 3]]
            ...     dist.send_object_list(data, dst=1)
            >>> else:
            ...     data = [None] * 3  # type: ignore
            ...     dist.recv_object_list(data, src=0)
            >>> print(data)
            >>> # ["hello", {"key": 100}, [1, 2, 3]] (2 GPUs)
    Nr   z#object_list cannot be None or emptyz7Cannot specify both 'dst' and 'dst_in_group' arguments.c                 S  s   g | ]}t |qS r   r   ).0objr   r   r   
<listcomp>       z$send_object_list.<locals>.<listcomp>c                 S  s   g | ]}|  qS r   )item)r   sizer   r   r   r       r!   Zint64)Zdtype)r   r      )
len
ValueErrorr   r   Zget_global_rankzippaddleZ	to_tensorr   concat)	r   r   r   r   Ztensor_listZ	size_listZsize_list_valuesZobject_sizes_tensorZobject_tensorr   r   r   send_object_listr   s,   #

r*   )r   NT)
r   r
   r   r   r   r   r   r   r   r   )N)r   r
   r   r   r   r   r   r   )NNN)r   r   r   r   r   r   r   r   )
__future__r   typingr   r   r(   Z paddle.distributed.communicationr   Z&paddle.distributed.communication.groupr   r   Z4paddle.distributed.communication.serialization_utilsr	   r
   Zpaddle.base.corer   r   r   r   r*   r   r   r   r   <module>   s&   )(