o
    + i)                    @  st  d dl mZ d dlZd dlZd dlZd dlZd dlZd dlZd dlm	Z	 d dlm
Z
mZmZmZ d dlZd dlmZ d dlZd dlmZmZmZ d dlmZ d dlmZmZmZ d d	lmZ d d
lmZ d dl m!Z!m"Z"m#Z#m$Z$ d dl%m&Z& d dl'm(Z(m)Z) d dl*m+Z+m,Z,m-Z.m/Z/m0Z0m1Z1m2Z2m3Z3m4Z4 d dl5m6Z6 d dl7m8Z8m9Z9 d dl:m;Z; d dl<m=Z= d dl>m?Z? e
rd dl@mAZAmBZB d dlCmDZDmEZEmFZFmGZG d dlHmIZI g ZJedegef ZKedeegef ZLeeMeNef ej	eNef f ZOeeOgdf ZPeQdZReQdZSdd ZTdd ZUdd ZVd d! ZWd"d# ZXd$d% ZYG d&d' d'e6ZZG d(d) d)Z[G d*d+ d+Z\G d,d dZ]dS )-    )annotationsN)OrderedDict)TYPE_CHECKINGAnyCallableUnion)Self)Tensornnprofiler)ValueSet)core	frameworkunique_name)VarDesc)no_grad)_convert_into_variablein_declarative_modein_sot_simulation_modein_to_static_mode)_append_activation_in_dygraph)Executorglobal_scope)		ParameterProgram_current_expected_placeconvert_np_dtype_to_dtype_default_main_programin_dygraph_modein_pir_modename_structpaddle_type_to_proto_type)LayerHelperBase)ShardedStateDictbuild_sharded_state_dict)	ParamAttr)in_profiler_mode)
deprecated)IterableSequence)	DTypeLikeParamAttrLike	PlaceLike	ShapeLike)InitializerLayerz(.)([A-Z][a-z]+)z([a-z])([A-Z])c                 C  sT   t  s(| jjdk rtt  j| j_d| j_dS d| j_t	| j
 d dS dS )zC
    A pre-hook to mark op numbers before enter layer.forward.
    r   TFzX has recorded the op information before. Please check whether you call this layer twice.N)r   _op_recorderstartlenr   current_blockopsis_validwarningswarn
_full_name)layerinputs r;   b/home/app/PaddleOCR-VL-test/.venv_paddleocr/lib/python3.10/site-packages/paddle/nn/layer/layers.pyrecord_program_ops_pre_hookU   s   

r=   c           
      C  s   t  sO| jjrQ| jj}tt  j}|dkr||ksJ t  j|| }|| j_|| j_|D ]}| j	
 D ]
\}}||| q8q1| jjD ]
}	|	  qHdS dS dS )zd
    A post-hook to append customized attributes into all operators generated in current layer.
    r   N)r   r0   r5   r1   r2   r   r3   r4   end_customized_attrsitemsZ	_set_attrhooksremove)
r9   r:   outputsr1   r>   r4   op	attr_namevalZhook_helperr;   r;   r<   !set_op_customized_attrs_post_hookf   s   
rG   c                 C  s   dddd}| | | S )NZlinearZ	embedding)Zrow_parallel_linearZcolumn_parallel_linearZvocab_parallel_embedding)get)Z
dist_scopemappingr;   r;   r<   _scope_dist2single|   s
   rJ   c                 C  s   t d| }td| S )Nz\1_\2)_first_cap_resub_all_cap_relower)names1r;   r;   r<   _convert_camel_to_snake   s   rQ   c                 C  sd   |  d}t|dkr| S g }t|D ]\}}|dkr&|t|d |  q|d d d| S )N
   r    )splitr2   	enumerateappendstrjoin)stringindentrP   s2idxliner;   r;   r<   
_addindent   s   
r_   c                 C  s$   t | |v rd S | j|ddd d S )NTF)dtypefloating_onlyinclude_sublayers)type_to_impl)r9   r`   excluded_layersr;   r;   r<   _layer_trans_dtype   s   rf   c                      sv   e Zd Z fddZ					dddZdd Zdd	 Zdd
dZdddZdd Z	dd Z
dddZdd Z  ZS )LayerObjectHelperc                   s   t  j||d d S )N)
layer_type)super__init__selfrO   	__class__r;   r<   rj      s   zLayerObjectHelper.__init__Nc                 C  s   | j  j|||||dS )a  append an operator for this layer object.

           Args:
               type: operator type
               inputs: input variable of the operator
               dtype: data type of this parameter
               is_bias: if this is a bias parameter
               default_initializer: set the default initializer for this parameter

        Returns created parameter Variable.
        )rc   r:   rC   attrsstop_gradient)main_programr3   	append_op)rl   rc   r:   rC   ro   rp   r;   r;   r<   rr      s   
zLayerObjectHelper.append_opc                 C  sH   |}g }t |ttfr|D ]
}|| | q|S || | |S N)
isinstancelisttuplerW   Zto_variable)rl   	inputs_inr:   retinpr;   r;   r<   _multiple_input   s   z!LayerObjectHelper._multiple_inputc                 C  s*   |  |}t|dkr| j d|d S )NrS   z layer only takes one input inr   )rz   r2   rh   )rl   rw   r:   r;   r;   r<   _input   s   
zLayerObjectHelper._inputc                 C  s   |}t |tr
|g}t|dkrt||krtd| j t|dkr?|dkr?d g| }t|D ]}t|d ||< q1|}|S )NrS   zparameter number mismatch in r   )rt   r%   r2   
ValueErrorrO   rangecopydeepcopy)rl   lengthparam_attr_inZ
param_attrtmpir;   r;   r<   _multiple_param_attr   s   

z&LayerObjectHelper._multiple_param_attrc                 c  sd    t |}t|trtd| j |dur|ng }| |}| t||}t	||E dH  dS )zAccess all inputs and params one by one

           Args:
               inputs_in: inputs to be iter
               param_attr_in: param_attr to be iter

        Returns input, param_attr
        z"Param_attr should not be False in N)
r%   Z_to_attrrt   boolr|   rO   rz   r   r2   zip)rl   rw   r   r:   Zparam_attrsr;   r;   r<   iter_inputs_and_params   s   
	

z(LayerObjectHelper.iter_inputs_and_paramsc                 C  sd   |dur|ng }|  |}d}|D ]}|du r|j}q||jkr/td| d|j d| j q|S )zGet input data type

           Args:
               inputs_in: inputs wanted know the data type

        Returns dtype of the input
        NzData Type mismatch: z to z in )rz   r`   r|   rO   )rl   rw   r:   r`   Zeachr;   r;   r<   input_dtype   s   

zLayerObjectHelper.input_dtypec                 C  s4   | j  |}t|tstd| d| j |S )z}Get parameter specifically

           Args:
               name: parameter's name

        Returns target parameter
        zno Parameter name z
 found in )rq   global_blockvarrt   r   r|   rO   )rl   rO   paramr;   r;   r<   get_parameter  s   
zLayerObjectHelper.get_parameterc                 C  s   |}|du r|S t |trd|i}n
t| d| j |dur&|r&||d< |d}t r6t|||}|S | j|jd}| j	|d|gid|gi|d |S )	a%  Append activation

            Args:
                input_var: the input variable. The len(input_var.shape) is
                larger or equal than 2.
                act: activation type
                use_cudnn: if use cudnn

        Return the Variable of after append activation
        Nrc   z should be unicode or str in 	use_cudnnr`   XZOut)rc   r:   rC   ro   )
rt   rX   	TypeErrorrO   popr   r   Z"create_variable_for_type_inferencer`   rr   )rl   Z	input_varZactr   Zact_typeresr   r;   r;   r<   append_activation  s(   


z#LayerObjectHelper.append_activationc                 C  s(   |}t ||std|| j|j| jdS )zCheck if the input parameter is instance of input class

            Args:
                param: parameter to be check
                cls: class of the parameter

        Return result of the check (True or False)
        z?The input {0} parameter of method {1} must be {2}, in layer {3}N)rt   r   rh   __name__rO   )rl   r   clsr;   r;   r<   is_instance2  s   	
zLayerObjectHelper.is_instance)NNNNNrs   )NN)r   
__module____qualname__rj   rr   rz   r{   r   r   r   r   r   r   __classcell__r;   r;   rm   r<   rg      s     



#rg   c                   @  s   e Zd ZdZdddZdS )LayerOpsRecorderz=
    Record generated operators information in nn.Layer.
    NFc                 C  s"   || _ || _|| _|| _|| _d S rs   )r1   r>   r4   r5   rA   )rl   r1   r>   r4   r5   rA   r;   r;   r<   rj   K  s
   
zLayerOpsRecorder.__init__)r   r   NFN)r   r   r   __doc__rj   r;   r;   r;   r<   r   F  s    r   c                   @  s8   e Zd ZU dZdZded< dddddZdddZdS )HookRemoveHelperz3A HookRemoveHelper that can be used to remove hook.r   intnext_hook_idNextra_hook_dictrA   +typing.OrderedDict[int, Callable[..., Any]]r   r   returnNonec                C  sD   t || _tj| _t jd7  _d | _|d ur t || _d S d S )NrS   )weakrefref
_hooks_refr   r   _hook_id_extra_hooks_ref)rl   rA   r   r;   r;   r<   rj   X  s   zHookRemoveHelper.__init__c                 C  s^   |   }|d ur| j|v r|| j= | jd ur)|  }|d ur+| j|v r-|| j= d S d S d S d S rs   )r   r   r   )rl   rA   Zextra_hooksr;   r;   r<   rB   f  s   
zHookRemoveHelper.remove)rA   r   r   r   r   r   )r   r   )r   r   r   r   r   __annotations__rj   rB   r;   r;   r;   r<   r   S  s   
 r   c                   @  s  e Zd ZU dZded< 	ddddZdddZdddZdddZdddZ	dddZ
dd dd#d$Z					ddd0d1Zed2d3d4d5			ddd9d:Z			ddd;d<Zddd@dAZdddBdCZddEdFZddHdIZdddLdMZ	N	=	=dddRdSZ	N			=dddVdWZ	=dddYdZZddd[d\Z	N	=	=ddd]d^Zddd`daZddedfZddhdiZddjdkZddldmZddndoZddqdrZ ddtduZ!dvdw Z"ddydzZ#dd|d}Z$dd~dZ%dddZ&dddZ'dddZ(dddZ)dddZ*dddZ+		=	NddddZ,		=	N		=	=ddddZ-		=	N	=	=ddddZ.		=	N	=	=ddddZ/	NddddZ0e1j2	=ddddZ3			ddddZ4	=ddddZ5dddZ6				=	ddddZ7dddZ8e3Z9e3Z:	ddddZ;	ddddZ<	ddddZ=dS )r/   a  
    Dynamic graph Layer based on OOD, includes the parameters of the layer, the structure of the forward graph and so on.

    Parameters:
        name_scope (str, optional): prefix name used by the layer to name parameters.
            If prefix is "my_layer", parameter name in MyLayer
            can be "my_layer_0.w_n", where "w" is the parameter
            base name and "n" is an unique suffix auto-generated.
            If None, prefix name will be snake cased class name. Default: None.
        dtype(str, optional): data type of this parameter.
                If set str, it can be "bool",  "float16", "float32", "float64",
                "int8", "int16", "int32", "int64", "uint8" or "uint16".
                Default: "float32"

    Returns:
        None

    Examples:
        .. code-block:: python

            >>> import paddle
            >>> paddle.seed(100)

            >>> class MyLayer(paddle.nn.Layer):
            ...     def __init__(self):
            ...         super().__init__()
            ...         self._linear = paddle.nn.Linear(1, 1)
            ...         self._dropout = paddle.nn.Dropout(p=0.5)
            ...
            ...     def forward(self, input):
            ...         temp = self._linear(input)
            ...         temp = self._dropout(temp)
            ...         return temp
            ...
            >>> x = paddle.randn([10, 1], 'float32')
            >>> mylayer = MyLayer()
            >>> mylayer.eval()  # set mylayer._dropout to eval mode
            >>> out = mylayer(x)
            >>> mylayer.train()  # set mylayer._dropout to train mode
            >>> out = mylayer(x)
            >>> print(out)
            Tensor(shape=[10, 1], dtype=float32, place=Place(cpu), stop_gradient=False,
            [[-3.44879317],
             [ 0.        ],
             [ 0.        ],
             [-0.73825276],
             [ 0.        ],
             [ 0.        ],
             [ 0.64444798],
             [-3.22185946],
             [ 0.        ],
             [-0.68077987]])
    r   trainingNfloat32
name_scope
str | Noner`   r*   r   r   c                 C  s   d| _ |d u rt| jj}t|}t|| _t| j| _	d| _
|| _t | _t | _t | _t | _t | _t | _tg g d| _i | _t | _t | _t | _d| _t | _t | _d S )NTF)r4   rA   )r   rQ   rn   r   rJ   r   generater8   rg   _helper_built_dtyper   Z_init_in_dynamic_moder   _parameters_buffersset!_non_persistable_buffer_names_set_sub_layers_loaddict_holderr   r0   r?   _forward_pre_hooks_forward_post_hooks#_forward_pre_hooks_with_kwargs_flagZ_cast_to_low_precision_state_dict_hooksZ_original_funcs)rl   r   r`   r;   r;   r<   rj     s4   zLayer.__init__r   c                 C  0   t  r	t   d| _|  D ]}d|_q| S )a=  

        Sets this Layer and all its sublayers to training mode.
        This only effects certain modules like `Dropout` and `BatchNorm`.

        Returns:
            Layer: self

        Examples:
            .. code-block:: python

                >>> import paddle
                >>> paddle.seed(100)

                >>> class MyLayer(paddle.nn.Layer):
                ...     def __init__(self):
                ...         super().__init__()
                ...         self._linear = paddle.nn.Linear(1, 1)
                ...         self._dropout = paddle.nn.Dropout(p=0.5)
                ...
                ...     def forward(self, input):
                ...         temp = self._linear(input)
                ...         temp = self._dropout(temp)
                ...         return temp
                ...
                >>> x = paddle.randn([10, 1], 'float32')
                >>> mylayer = MyLayer()
                >>> mylayer.eval()  # set mylayer._dropout to eval mode
                >>> out = mylayer(x)
                >>> mylayer.train()  # set mylayer._dropout to train mode
                >>> out = mylayer(x)
                >>> print(out)
                Tensor(shape=[10, 1], dtype=float32, place=Place(cpu), stop_gradient=False,
                [[-3.44879317],
                 [ 0.        ],
                 [ 0.        ],
                 [-0.73825276],
                 [ 0.        ],
                 [ 0.        ],
                 [ 0.64444798],
                 [-3.22185946],
                 [ 0.        ],
                 [-0.68077987]])

        T)r   r   _dygraph_tracerZ
train_moder   	sublayersrl   r9   r;   r;   r<   train  s   1zLayer.trainc                 C  r   )a  
        Sets this Layer and all its sublayers to evaluation mode.
        This only effects certain modules like `Dropout` and `BatchNorm`.

        Returns:
            Layer: self

        Examples:
            .. code-block:: python

                >>> import paddle
                >>> paddle.seed(100)
                >>> class MyLayer(paddle.nn.Layer):
                ...     def __init__(self):
                ...         super().__init__()
                ...         self._linear = paddle.nn.Linear(1, 1)
                ...         self._dropout = paddle.nn.Dropout(p=0.5)
                ...
                ...     def forward(self, input):
                ...         temp = self._linear(input)
                ...         temp = self._dropout(temp)
                ...         return temp
                ...
                >>> x = paddle.randn([10, 1], 'float32')
                >>> mylayer = MyLayer()
                >>> mylayer.eval()  # set mylayer._dropout to eval mode
                >>> out = mylayer(x)
                >>> print(out)
                Tensor(shape=[10, 1], dtype=float32, place=Place(cpu), stop_gradient=False,
                [[-1.72439659],
                 [ 0.31532824],
                 [ 0.01192369],
                 [-0.36912638],
                 [-1.63426113],
                 [-0.93169814],
                 [ 0.32222399],
                 [-1.61092973],
                 [ 0.77209264],
                 [-0.34038994]])

        F)r   r   r   Z	eval_moder   r   r   r;   r;   r<   eval  s   -z
Layer.evalfnCallable[[Self], None]c                 C  s$   |   D ]}|| q||  | S )a	  

        Applies ``fn`` recursively to every sublayer (as returned by ``.sublayers()``)
        as well as self. Typical use includes initializing the parameters of a model.

        Parameters:
            fn (function): a function to be applied to each sublayer

        Returns:
            Layer, self

        Examples:
            .. code-block:: python

                >>> import paddle
                >>> import paddle.nn as nn
                >>> paddle.seed(2023)

                >>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))

                >>> def init_weights(layer):
                ...     if type(layer) == nn.Linear:
                ...         print('before init weight:', layer.weight.numpy())
                ...         new_weight = paddle.full(shape=layer.weight.shape, dtype=layer.weight.dtype, fill_value=0.9)
                ...         layer.weight.set_value(new_weight)
                ...         print('after init weight:', layer.weight.numpy())
                ...
                >>> net.apply(init_weights)

                >>> print(net.state_dict())
                before init weight: [[ 0.89611185  0.04935038]
                                     [-0.5888344   0.99266374]]
                after init weight: [[0.9 0.9]
                                    [0.9 0.9]]
                before init weight: [[-0.18615901 -0.22924072]
                                     [ 1.1517721   0.59859073]]
                after init weight: [[0.9 0.9]
                                    [0.9 0.9]]
                OrderedDict([('0.weight', Parameter containing:
                Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=False,
                [[0.89999998, 0.89999998],
                 [0.89999998, 0.89999998]])), ('0.bias', Parameter containing:
                Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=False,
                [0., 0.])), ('1.weight', Parameter containing:
                Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=False,
                [[0.89999998, 0.89999998],
                 [0.89999998, 0.89999998]])), ('1.bias', Parameter containing:
                Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=False,
                [0., 0.]))])
        )childrenapply)rl   r   r9   r;   r;   r<   r   E  s   3zLayer.applyrX   c                 C     | j S )a  

        Full name for this layer, composed by name_scope + "/" + MyLayer.__class__.__name__

        Returns:
            str, full name of this layer.

        Examples:
            .. code-block:: python

                >>> import paddle

                >>> class LinearNet(paddle.nn.Layer):
                ...     def __init__(self):
                ...         super().__init__(name_scope = "demo_linear_net")
                ...         self._linear = paddle.nn.Linear(1, 1)
                ...
                ...     def forward(self, x):
                ...         return self._linear(x)
                ...
                >>> linear_net = LinearNet()
                >>> print(linear_net.full_name())
                demo_linear_net_0

        )r8   rl   r;   r;   r<   	full_name  s   zLayer.full_namehook_ForwardPostHookr   c                 C     t | j}|| j|j< |S )aW  

        Register a forward post-hook for Layer. The hook will be called after `forward` function has been computed.

        It should have the following form, `input` and `output` of the `hook` is `input` and `output` of the `Layer` respectively.
        User can use forward post-hook to change the output of the Layer or perform information statistics tasks on the Layer.

        hook(Layer, input, output) -> None or modified output

        Parameters:
            hook(function): a function registered as a forward post-hook

        Returns:
            HookRemoveHelper, a HookRemoveHelper object that can be used to remove the added hook by calling `hook_remove_helper.remove()` .

        Examples:
            .. code-block:: python

                >>> import paddle
                >>> import numpy as np

                >>> # the forward_post_hook change the output of the layer: output = output * 2
                >>> def forward_post_hook(layer, input, output):
                ...     # user can use layer, input and output for information statistics tasks
                ...
                ...     # change the output
                ...     return output * 2
                ...
                >>> linear = paddle.nn.Linear(13, 5)

                >>> # register the hook
                >>> forward_post_hook_handle = linear.register_forward_post_hook(forward_post_hook)

                >>> value1 = np.arange(26).reshape(2, 13).astype("float32")
                >>> in1 = paddle.to_tensor(value1)

                >>> out0 = linear(in1)

                >>> # remove the hook
                >>> forward_post_hook_handle.remove()

                >>> out1 = linear(in1)

                >>> # hook change the linear's output to output * 2, so out0 is equal to out1 * 2.
                >>> assert (out0.numpy() == (out1.numpy()) * 2).any()

        )r   r   r   rl   r   hook_remove_helperr;   r;   r<   register_forward_post_hook  s   
2z Layer.register_forward_post_hookF)with_kwargs_ForwardPreHookr   c                C  s0   t | j| jd}|| j|j< |rd| j|j< |S )aO  

        Register a forward pre-hook for Layer. The hook will be called before `forward` function has been computed.

        It should have the following form, `input` of the `hook` is `input` of the `Layer`,
        hook can either return a tuple or a single modified value in the hook. We will wrap the value into a tuple if
        a single value is returned(unless that value is already a tuple).
        User can use forward pre-hook to change the input of the Layer or perform information statistics tasks on the Layer.

        hook(Layer, input) -> None or modified input

        Parameters:
            hook(function): a function registered as a forward pre-hook

        Returns:
            HookRemoveHelper, a HookRemoveHelper object that can be used to remove the added hook by calling `hook_remove_helper.remove()` .

        Examples:
            .. code-block:: python

                >>> import paddle
                >>> import numpy as np

                >>> # the forward_pre_hook change the input of the layer: input = input * 2
                >>> def forward_pre_hook(layer, input):
                ...     # user can use layer and input for information statistics tasks
                ...
                ...     # change the input
                ...     input_return = (input[0] * 2)
                ...     return input_return
                ...
                >>> linear = paddle.nn.Linear(13, 5)

                >>> # register the hook
                >>> forward_pre_hook_handle = linear.register_forward_pre_hook(forward_pre_hook)

                >>> value0 = np.arange(26).reshape(2, 13).astype("float32")
                >>> in0 = paddle.to_tensor(value0)
                >>> out0 = linear(in0)

                >>> # remove the hook
                >>> forward_pre_hook_handle.remove()

                >>> value1 = value0 * 2
                >>> in1 = paddle.to_tensor(value1)
                >>> out1 = linear(in1)

                >>> # hook change the linear's input to input * 2, so out0 is equal to out1.
                >>> assert (out0.numpy() == out1.numpy()).any()
        r   T)r   r   r   r   )rl   r   r   r   r;   r;   r<   register_forward_pre_hook  s   5zLayer.register_forward_pre_hookshaper-   attrParamAttrLike | NoneDTypeLike | Noneis_biasdefault_initializerInitializer | NonedevicePlaceLike | Noner	   c                 C  s8   t |}t|tr|dkrd}| jj||||||dS )aB	  Create parameters for this layer.

        Parameters:
            shape(list): Shape of the parameter. The data type in the list must be int.
            attr(ParamAttr, optional): Parameter attribute of weight. Please refer to :ref:`api_paddle_ParamAttr`. Default: None.
            dtype(str, optional): Data type of this parameter.
                If set str, it can be "bool",  "float16", "float32", "float64",
                "int8", "int16", "int32", "int64", "uint8" or "uint16". Default: "float32".
            is_bias(bool, optional): if this is a bias parameter. Default: False.
            default_initializer(Initializer, optional): the default initializer for this parameter.
                If set None, default initializer will be set to paddle.nn.initializer.Xavier and paddle.nn.initializer.Constant
                for non-bias and bias parameter, respectively. Default: None.
            device(PlaceLike, optional): the device place for the parameter. Default: None.

        Returns:
            :Tensor, created parameter.

        Examples:
            .. code-block:: python

                >>> import paddle
                >>> paddle.seed(2023)

                >>> class MyLayer(paddle.nn.Layer):
                ...     def __init__(self):
                ...         super().__init__()
                ...         self._linear = paddle.nn.Linear(1, 1)
                ...         w_tmp = self.create_parameter([1,1])
                ...         self.add_parameter("w_tmp", w_tmp)
                ...
                ...     def forward(self, input):
                ...         return self._linear(input)
                ...
                >>> mylayer = MyLayer()
                >>> for name, param in mylayer.named_parameters():
                ...     print(name, param)      # will print w_tmp,_linear.weight,_linear.bias
                w_tmp Parameter containing:
                Tensor(shape=[1, 1], dtype=float32, place=Place(cpu), stop_gradient=False,
                [[0.06979191]])
                _linear.weight Parameter containing:
                Tensor(shape=[1, 1], dtype=float32, place=Place(cpu), stop_gradient=False,
                [[1.26729357]])
                _linear.bias Parameter containing:
                Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=False,
                [0.])
         N)r   )r~   r   rt   rX   r   create_parameter)rl   r   r   r`   r   r   r   Z	temp_attrr;   r;   r<   r     s   
7zLayer.create_parameterz2.0.0zpaddle.nn.Layer.create_tensorz(New api in create_tensor, easier to use.)ZsinceZ	update_toreasonrO   persistablebool | Nonec                 C  P   |durd | j|g}ntd | jdg}| jj j|||tj	j
jdS )a,  

        Create Tensor for this layer.

        Parameters:
            name(str, optional): name of the tensor. Please refer to :ref:`api_guide_Name` . Default: None

            persistable(bool, optional): if set this tensor persistable. Default: False

            dtype(str, optional): data type of this parameter. If set str, it can be "bool", "float16", "float32", "float64","int8", "int16", "int32", "int64", "uint8" or "uint16". If set None, it will be "float32". Default: None

        Returns:
            Tensor, created Tensor.

        Examples:
            .. code-block:: python

                >>> import paddle

                >>> class MyLinear(paddle.nn.Layer):
                ...     def __init__(self,
                ...                 in_features,
                ...                 out_features):
                ...         super().__init__()
                ...         self.linear = paddle.nn.Linear( 10, 10)
                ...
                ...         self.back_var = self.create_variable(name = "linear_tmp_0", dtype=self._dtype)
                ...
                ...     def forward(self, input):
                ...         out = self.linear(input)
                ...         paddle.assign( out, self.back_var)
                ...
                ...         return out

        N._generated_varrO   r   r`   rc   rY   r8   r   r   r   rq   r3   Z
create_varr   r   VarTypeZDENSE_TENSORrl   rO   r   r`   var_namer;   r;   r<   create_variableO  s   .zLayer.create_variablec                 C  r   )a]  

        Create Tensor for this layer.

        Parameters:
            name(str, optional): name of the tensor. Please refer to :ref:`api_guide_Name` . Default: None.
            persistable(bool, optional): if set this tensor persistable. Default: False.
            dtype(str, optional): data type of this parameter.
                If set str, it can be "bool",  "float16", "float32", "float64",
                "int8", "int16", "int32", "int64", "uint8" or "uint16".
                If set None, it will be "float32". Default: None.

        Returns:
            Tensor, created Tensor.

        Examples:
            .. code-block:: python

                >>> import paddle

                >>> class MyLinear(paddle.nn.Layer):
                ...     def __init__(self,
                ...                  in_features,
                ...                  out_features):
                ...         super().__init__()
                ...         self.linear = paddle.nn.Linear(10, 10)
                ...
                ...         self.back_var = self.create_tensor(name = "linear_tmp_0", dtype=self._dtype)
                ...
                ...     def forward(self, input):
                ...         out = self.linear(input)
                ...         paddle.assign(out, self.back_var)
                ...
                ...         return out

        Nr   r   r   r   r   r;   r;   r<   create_tensor  s   *zLayer.create_tensorTrb   list[Tensor]c                 C     dd | j |dD }|S )a  

        Returns a list of all Parameters from current layer and its sub-layers.

        Parameters:
            include_sublayers (bool, optional): Whether to return the parameters of the sublayer.
                If True, the returned list contains the parameters of the sublayer.
                Default: True.

        Returns:
            list, list of Tensor, a list of Parameters.

        Examples:
            .. code-block:: python

                >>> import paddle
                >>> paddle.seed(100)

                >>> linear = paddle.nn.Linear(1, 1)
                >>> print(linear.parameters())
                [Parameter containing:
                Tensor(shape=[1, 1], dtype=float32, place=Place(cpu), stop_gradient=False,
                [[0.18551230]]), Parameter containing:
                Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=False,
                [0.])]

        c                 S     g | ]\}}|qS r;   r;   ).0_r   r;   r;   r<   
<listcomp>      z$Layer.parameters.<locals>.<listcomp>rb   )named_parametersrl   rb   rx   r;   r;   r<   
parameters  s   zLayer.parametersc                 C  s   g d}t |tjtjfst|tu rU||v rUt |ttjfr$t|}|| _| 	 D ]}||_q+| j
ddD ]
\}}|d| q7| jddD ]
\}}|d| qH| S tdt| )a  

        Casts all parameters and buffers to dtype and then return the Layer.

        Parameters:
            dtype(str|paddle.dtype|numpy.dtype): target data type of layer.
                If set str, it can be "bool", "bfloat16", "float16", "float32", "float64",
                "int8", "int16", "int32", "int64", "uint8", "complex64", "complex128".
                Default: None

        Returns:
            Layer, self

        Examples:
            .. code-block:: python

                >>> import paddle
                >>> import paddle.nn as nn
                >>> weight_attr = paddle.ParamAttr(name="weight",initializer=paddle.nn.initializer.Constant(value=1.5))
                >>> bias_attr = paddle.ParamAttr(name="bias",initializer=paddle.nn.initializer.Constant(value=2.5))

                >>> linear = paddle.nn.Linear(2, 2, weight_attr=weight_attr, bias_attr=bias_attr).to(device="cpu",dtype="float32")
                >>> print(linear)
                Linear(in_features=2, out_features=2, dtype=float32)
                >>> print(linear.parameters())
                [Parameter containing:
                Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=False,
                    [[1.50000000, 1.50000000],
                        [1.50000000, 1.50000000]]), Parameter containing:
                Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=False,
                    [2.50000000, 2.50000000])]

                >>> linear=linear.astype("int8")
                >>> print(linear)
                Linear(in_features=2, out_features=2, dtype=paddle.int8)
                >>> print(linear.parameters())
                [Parameter containing:
                Tensor(shape=[2, 2], dtype=int8, place=Place(cpu), stop_gradient=False,
                    [[1, 1],
                        [1, 1]]), Parameter containing:
                Tensor(shape=[2], dtype=int8, place=Place(cpu), stop_gradient=False,
                    [2, 2])]

        )bfloat16float16r   Zfloat64Zint8Zint16Zint32Zint64Zuint8Z	complex64Z
complex128r   Tr   Nzdtype value error, must be 'bfloat16', 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'uint8', 'complex64', 'complex128', 'bool', or paddle.dtype, numpy.dtype, but receive )rt   paddler`   nprc   rX   r   r   r   r   r   Z_tonamed_bufferstor|   )rl   r`   Zvalid_dtypesr9   r   r   bufferr;   r;   r<   astype  s(   -
zLayer.astypeIterable[Layer]c                 c  s    |   D ]\}}|V  qdS )ar  

        Returns an iterator over immediate children layers.

        Yields:
            Layer: a child layer

        Examples:
            .. code-block:: python

                >>> import paddle

                >>> linear1 = paddle.nn.Linear(10, 3)
                >>> linear2 = paddle.nn.Linear(3, 10, bias_attr=False)
                >>> model = paddle.nn.Sequential(linear1, linear2)

                >>> layer_list = list(model.children())

                >>> print(layer_list)
                [Linear(in_features=10, out_features=3, dtype=float32), Linear(in_features=3, out_features=10, dtype=float32)]

        N)named_children)rl   r   r9   r;   r;   r<   r   8  s   zLayer.childrenIterable[tuple[str, Layer]]c                 c  sD    t  }| j D ]\}}|dur||vr|| ||fV  q	dS )a  Returns an iterator over immediate children layers, yielding both
        the name of the layer as well as the layer itself.

        Yields:
            (string, Layer): Tuple containing a name and child layer

        Examples:
            .. code-block:: python

                >>> import paddle

                >>> linear1 = paddle.nn.Linear(10, 3)
                >>> linear2 = paddle.nn.Linear(3, 10, bias_attr=False)
                >>> model = paddle.nn.Sequential(linear1, linear2)
                >>> for prefix, layer in model.named_children():
                ...     print(prefix, layer)
                0 Linear(in_features=10, out_features=3, dtype=float32)
                1 Linear(in_features=3, out_features=10, dtype=float32)
        N)r   r   r@   add)rl   memorO   r9   r;   r;   r<   r   R  s   

zLayer.named_childreninclude_selflist[Layer]c                 C  r   )a  

        Returns a list of sub layers.

        Parameters:
            include_self(bool, optional): Whether return self as sublayers. Default: False.

        Returns:
            list of Layer, a list of sub layers.

        Examples:
            .. code-block:: python

                >>> import paddle

                >>> class MyLayer(paddle.nn.Layer):
                ...     def __init__(self):
                ...         super().__init__()
                ...         self._linear = paddle.nn.Linear(1, 1)
                ...         self._dropout = paddle.nn.Dropout(p=0.5)
                ...
                ...     def forward(self, input):
                ...         temp = self._linear(input)
                ...         temp = self._dropout(temp)
                ...         return temp
                ...
                >>> mylayer = MyLayer()
                >>> print(mylayer.sublayers())
                [Linear(in_features=1, out_features=1, dtype=float32), Dropout(p=0.5, axis=None, mode=upscale_in_train)]

        c                 S  r   r;   r;   )r   r   r9   r;   r;   r<   r     r   z#Layer.sublayers.<locals>.<listcomp>)r  )named_sublayers)rl   r  rx   r;   r;   r<   r   l  s    
zLayer.sublayersr   prefixremove_duplicateIterable[tuple[str, Tensor]]c                 c  s    t  r
t s
t nt }|r| j|d|dnt|g| g}|D ]/\}}|j }|D ]#\}	}
|
du s7|
|v r8q+|r?||
 ||rDdnd |	 }||
fV  q+q dS )a	  
        Returns an iterator over all parameters in the Layer, yielding tuple of name and parameter.

        Parameters:
            prefix(str, optional): Prefix to prepend to all parameter names. Default: ''.
            include_sublayers(bool, optional): Whether include the parameters of sublayers.
                If True, also include the named parameters from sublayers. Default: True.
            remove_duplicate(bool, optional): Whether to remove duplicated parameters in the result.
                Default: True.

        Yields:
            (string, Parameter): Tuple of name and Parameter

        Examples:
            .. code-block:: python

                >>> import paddle
                >>> paddle.seed(100)

                >>> fc1 = paddle.nn.Linear(10, 3)
                >>> fc2 = paddle.nn.Linear(3, 10, bias_attr=False)
                >>> model = paddle.nn.Sequential(fc1, fc2)
                >>> for name, param in model.named_parameters():
                ...     print(name, param)
                0.weight Parameter containing:
                Tensor(shape=[10, 3], dtype=float32, place=Place(cpu), stop_gradient=False,
                [[ 0.07276392, -0.39791510, -0.66356444],
                 [ 0.02143478, -0.18519843, -0.32485050],
                 [-0.42249614,  0.08450919, -0.66838276],
                 [ 0.38208580, -0.24303678,  0.55127048],
                 [ 0.47745085,  0.62117910, -0.08336520],
                 [-0.28653207,  0.47237599, -0.05868882],
                 [-0.14385653,  0.29945642,  0.12832761],
                 [-0.21237159,  0.38539791, -0.62760031],
                 [ 0.02637231,  0.20621127,  0.43255770],
                 [-0.19984481, -0.26259184, -0.29696006]])
                0.bias Parameter containing:
                Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=False,
                [0., 0., 0.])
                1.weight Parameter containing:
                Tensor(shape=[3, 10], dtype=float32, place=Place(cpu), stop_gradient=False,
                [[ 0.01985580, -0.40268910,  0.41172385, -0.47249708, -0.09002256,
                 -0.00533628, -0.52048630,  0.62360322,  0.20848787, -0.02033746],
                 [ 0.58281910,  0.12841827,  0.12907702,  0.02325618, -0.07746267,
                 0.31950659, -0.37924835, -0.59209681, -0.11732036, -0.58378261],
                 [-0.62100595,  0.22293305,  0.28229684, -0.03687060, -0.59323978,
                 0.08411229,  0.53275704,  0.40431368,  0.03171402, -0.17922515]])
        Tr  r  r  Nr   r   )	r   r   r   r   r  r   r   r@   r  )rl   r  rb   r  Z
params_setr  layer_prefixsublayerparamskeyr   rO   r;   r;   r<   r     s,   7		

zLayer.named_parameters
layers_setset[Layer] | Nonec                 c  s    |du rt  }|r| |vr|r||  || fV  | j D ]\}}|du r(q||r-dnd | }|j|d||dE dH  qdS )a		  
        Returns an iterator over all sublayers in the Layer, yielding tuple of name and sublayer.
        The duplicate sublayer will only be yielded once.

        Parameters:
            prefix(str, optional): Prefix to prepend to all parameter names. Default: ''.
            include_self(bool, optional): Whether include the Layer itself. Default: False.
            layers_set(set, optional): The set to record duplicate sublayers. Default: None.
            remove_duplicate(bool, optional): Whether to remove duplicated sublayers in the result.
                Default: True.

        Yields:
            (string, Layer): Tuple of name and Layer

        Examples:
            .. code-block:: python

                >>> import paddle

                >>> fc1 = paddle.nn.Linear(10, 3)
                >>> fc2 = paddle.nn.Linear(3, 10, bias_attr=False)
                >>> model = paddle.nn.Sequential(fc1, fc2)
                >>> for prefix, layer in model.named_sublayers():
                ...     print(prefix, layer)
                0 Linear(in_features=10, out_features=3, dtype=float32)
                1 Linear(in_features=3, out_features=10, dtype=float32)

                >>> l = paddle.nn.Linear(10, 3)
                >>> model = paddle.nn.Sequential(l, l)
                >>> for prefix, layer in model.named_sublayers(include_self=True, remove_duplicate=True):
                ...     print(prefix, layer)
                 Sequential(
                  (0): Linear(in_features=10, out_features=3, dtype=float32)
                  (1): Linear(in_features=10, out_features=3, dtype=float32)
                )
                0 Linear(in_features=10, out_features=3, dtype=float32)

                >>> l = paddle.nn.Linear(10, 3)
                >>> model = paddle.nn.Sequential(l, l)
                >>> for prefix, layer in model.named_sublayers(include_self=True, remove_duplicate=False):
                ...     print(prefix, layer)
                 Sequential(
                  (0): Linear(in_features=10, out_features=3, dtype=float32)
                  (1): Linear(in_features=10, out_features=3, dtype=float32)
                )
                0 Linear(in_features=10, out_features=3, dtype=float32)
                1 Linear(in_features=10, out_features=3, dtype=float32)

        Nr   r   T)r  r  r  r  )r   r  r   r@   r  )rl   r  r  r  r  r  r9   r
  r;   r;   r<   r    s$   8

zLayer.named_sublayerstensorc                 C  s   d| j vr	tdt|tstdt|j dd|v r!td|dkr)tdt| |r;|| j	vr;td| d	|d
urRt|t
jjksRtdt|j d|| j	|< |ra| j| d
S | j| d
S )a  
        Registers a tensor as buffer into the layer.

        `buffer` is a non-trainable tensor and will not be updated by optimizer,
        but is necessary for evaluation and inference. For example, the mean and variance in BatchNorm layers.
        The registered buffer is persistable by default, and will be saved into
        `state_dict` alongside parameters. If set persistable=False, it registers
        a non-persistable buffer, so that it will not be a part of `state_dict` .

        Buffers can be accessed as attributes using given names.

        Parameters:
            name (string): name of the buffer. The buffer can be accessed
                from this layer using the given name
            tensor (Tensor): the tensor to be registered as buffer.
            persistable (bool): whether the buffer is part of this layer's
                state_dict.

        Returns:
            None

        Examples:
            .. code-block:: python

                >>> import numpy as np
                >>> import paddle

                >>> linear = paddle.nn.Linear(10, 3)
                >>> value = np.array([0]).astype("float32")
                >>> buffer = paddle.to_tensor(value)
                >>> linear.register_buffer("buf_name", buffer, persistable=True)

                >>> # get the buffer by attribute.
                >>> print(linear.buf_name)
                Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
                [0.])

        r   )super().__init__() should be called firstz4The name of buffer should be a string, but received r   zThe name of buffer can not contain `.`, because when you access the newly added buffer in the form of `self.**.**`, it will cause AttributeError.r   z$The name of buffer can not be empty.zattribute '' already exists.Nz>The registered buffer should be a Paddle.Tensor, but received )__dict__r|   rt   rX   r   rc   r   KeyErrorhasattrr   r   eagerr	   r   discardr  )rl   rO   r  r   r;   r;   r<   register_buffer'  s,   
*

zLayer.register_bufferc                 C  r   )a\  

        Returns a list of all buffers from current layer and its sub-layers.

        Parameters:
            include_sublayers(bool, optional): Whether include the buffers of sublayers. If True, also include the buffers from sublayers. Default: True.

        Returns:
            list of Tensor, a list of buffers.

        Examples:
            .. code-block:: python

                >>> import numpy as np
                >>> import paddle

                >>> linear = paddle.nn.Linear(10, 3)
                >>> value = np.array([0]).astype("float32")
                >>> buffer = paddle.to_tensor(value)
                >>> linear.register_buffer("buf_name", buffer, persistable=True)

                >>> print(linear.buffers())
                [Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
                [0.])]

        c                 S  r   r;   r;   )r   r   r   r;   r;   r<   r     r   z!Layer.buffers.<locals>.<listcomp>r   )r   r   r;   r;   r<   buffersl  s   zLayer.buffersc                 c  s    t  }|r| j|d|dnt|g| g}|D ]/\}}|j }|D ]#\}	}
|
du s.|
|v r/q"|r6||
 ||r;dnd |	 }||
fV  q"qdS )a  
        Returns an iterator over all buffers in the Layer, yielding tuple of name and Tensor.

        Parameters:
            prefix(str, optional): Prefix to prepend to all buffer names. Default: ''.
            include_sublayers(bool, optional): Whether include the buffers of sublayers.
                If True, also include the named buffers from sublayers. Default: True.
            remove_duplicate(bool, optional): Whether to remove duplicated buffers in the result.
                Default: True.

        Yields:
            (string, Tensor): Tuple of name and tensor

        Examples:
            .. code-block:: python

                >>> import numpy as np
                >>> import paddle

                >>> fc1 = paddle.nn.Linear(10, 3)
                >>> buffer1 = paddle.to_tensor(np.array([0]).astype("float32"))
                >>> # register a tensor as buffer by specific `persistable`
                >>> fc1.register_buffer("buf_name_1", buffer1, persistable=True)

                >>> fc2 = paddle.nn.Linear(3, 10)
                >>> buffer2 = paddle.to_tensor(np.array([1]).astype("float32"))
                >>> # register a buffer by assigning an attribute with Tensor.
                >>> # The `persistable` can only be False by this way.
                >>> fc2.buf_name_2 = buffer2

                >>> model = paddle.nn.Sequential(fc1, fc2)

                >>> # get all named buffers
                >>> for name, buffer in model.named_buffers():
                ...     print(name, buffer)
                0.buf_name_1 Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
                [0.])
                1.buf_name_2 Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
                [1.])
        Tr	  Nr   r   )r   r  r   r   r@   r  )rl   r  rb   r  Zbuffers_setr  r
  r  r  r  r   rO   r;   r;   r<   r     s*   .	

zLayer.named_buffersset_to_zeroc                 C  s"   |   D ]
}|jr|| qdS )ad  
        Clear the gradients of all parameters for this layer.

        Args:
            set_to_zero (bool, optional): Whether to set the trainable parameters'
                gradients to zero or None. Default is True.

        Returns:
            None

        Examples:
            .. code-block:: python

                >>> import paddle
                >>> import numpy as np

                >>> value = np.arange(26).reshape(2, 13).astype("float32")
                >>> a = paddle.to_tensor(value)
                >>> linear = paddle.nn.Linear(13, 5)
                >>> adam = paddle.optimizer.Adam(learning_rate=0.01,
                ...                              parameters=linear.parameters())
                >>> out = linear(a)
                >>> out.backward()
                >>> adam.step()
                >>> linear.clear_gradients()

        N)r   Z	trainableZclear_gradient)rl   r  pr;   r;   r<   clear_gradients  s
   
zLayer.clear_gradientsargsr   kwargsc                 O  s   d S rs   r;   )rl   r  r  r;   r;   r<   _build_once  s   zLayer._build_oncer:   c           	      O  sZ  | j  D ]?\}}|| jv r1|| ||}|d ur0t|tr(t|dkr(|\}}qtd| dq|| |}|d urDt|tsB|f}|}q| jsS| j|i | d| _t	 ryt
| jjt
jj | j|i |}W d    n1 ssw   Y  nt| jj | j|i |}W d    n1 sw   Y  | j D ]}|| ||}|d ur|}q|S )N   zPforward pre-hook must return None or a tuple of (new_args, new_kwargs), but got r   T)r   r@   r   rt   rv   r2   RuntimeErrorr   r  r&   r   ZRecordEventrn   r   ZTracerEventTypeForwardforwardr    r   values)	rl   r:   r  Zhook_idZforward_pre_hookZargs_kwargs_resulthook_resultrC   Zforward_post_hookr;   r;   r<   _dygraph_call_func  sN   



zLayer._dygraph_call_funcc                 O  sX   t  s$| js$| js$| jjtju s| jr$t r$t rt	 r$| j
|i |S | j|i |S rs   )r   r   r   rn   r  r/   r   r   r&   r   r#  r&  rl   r:   r  r;   r;   r<   __call__  s   zLayer.__call__c                 O  s   t )z
        Defines the computation performed at every call.
        Should be overridden by all subclasses.

        Parameters:
            *inputs(tuple): unpacked tuple arguments
            **kwargs(dict): unpacked dict arguments
        )NotImplementedErrorr'  r;   r;   r<   r#  ,  s   	zLayer.forwardc                 G  s   t d)Nz"Layer shouldn't implement backward)r|   )rl   r:   r;   r;   r<   backward7  s   zLayer.backwardr  c                 C  s$   t |ts|du sJ || j|< |S )a+  

        Adds a sub Layer instance.

        Added sublayer can be accessed by self.name

        Parameters:
            name(str): name of this sublayer.
            sublayer(Layer): an instance of Layer.
        Returns:
            Layer, the sublayer passed in.

        Examples:
            .. code-block:: python

                >>> import paddle

                >>> class MySequential(paddle.nn.Layer):
                ...     def __init__(self, *layers):
                ...         super().__init__()
                ...         if len(layers) > 0 and isinstance(layers[0], tuple):
                ...             for name, layer in layers:
                ...                 self.add_sublayer(name, layer)
                ...         else:
                ...             for idx, layer in enumerate(layers):
                ...                 self.add_sublayer(str(idx), layer)
                ...
                ...     def forward(self, input):
                ...         for layer in self._sub_layers.values():
                ...             input = layer(input)
                ...         return input
                ...
                >>> fc1 = paddle.nn.Linear(10, 3)
                >>> fc2 = paddle.nn.Linear(3, 10, bias_attr=False)
                >>> model = MySequential(fc1, fc2)
                >>> for prefix, layer in model.named_sublayers():
                ...     print(prefix, layer)
                0 Linear(in_features=10, out_features=3, dtype=float32)
                1 Linear(in_features=3, out_features=10, dtype=float32)
        N)rt   r/   r   )rl   rO   r  r;   r;   r<   add_sublayer:  s   )
zLayer.add_sublayer	parameterc                 C  s  d| j vr	tdt|tstdt|j dd|v r!td|dkr)tdt| |r;|| j	vr;td| d	|d
urTt|t
jtjjfsTtdt|j d|d
u r]d
| j	|< t| jdkr||j| jv ssJ d|j d|| j|j  || j	|< |S )a  Adds a Parameter instance.

        Added parameter can be accessed by self.name

        Parameters:
            name(str): name of this sublayer.
            parameter(Parameter): an instance of Parameter.
        Returns:
            Parameter, the parameter passed in.
        Examples:
            .. code-block:: python

                >>> import paddle
                >>> paddle.seed(100)

                >>> class MyLayer(paddle.nn.Layer):
                ...     def __init__(self):
                ...         super().__init__()
                ...         self._linear = paddle.nn.Linear(1, 1)
                ...         w_tmp = self.create_parameter([1,1])
                ...         self.add_parameter("w_tmp", w_tmp)
                ...
                ...     def forward(self, input):
                ...         return self._linear(input)
                ...
                >>> mylayer = MyLayer()
                >>> for name, param in mylayer.named_parameters():
                ...     print(name, param)
                w_tmp Parameter containing:
                Tensor(shape=[1, 1], dtype=float32, place=Place(cpu), stop_gradient=False,
                [[-1.01448846]])
                _linear.weight Parameter containing:
                Tensor(shape=[1, 1], dtype=float32, place=Place(cpu), stop_gradient=False,
                [[0.18551230]])
                _linear.bias Parameter containing:
                Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=False,
                [0.])
        r   z,super().__init__() should be called firstly.z7The name of parameter should be a string, but received r   zThe name of parameter can not contain `.`, because when you access the newly added parameter in the form of `self.**.**`, it will cause AttributeError.r   z'The name of parameter can not be empty.zThe parameter 'r  Nz>The parameter to be added should be a Parameter, but received r   &Parameter not found, Can't not find [  ] in state_dict)r  r!  rt   rX   r   rc   r   r  r  r   r   r   r   pirValuer2   r   rO   	set_value)rl   rO   r,  r;   r;   r<   add_parameterh  s:   
'



zLayer.add_parameterc                   s    fdd}t |tstdt|j  j| |dds4 t}t	 j
jdks/J |g j
_|ddsa t}t	 jdkrN jj|jdd	 t	 j
jdksXJ  j
j| d
S d
S )aY  
        Add customized attribute while append_op. In case of quantization, we want to save
        some attributes into op_desc while exporting inference model by @to_static.

        Arguments:
            attrs(dict): customized attributes that will be added into op_descs.

        NOTE: The interface is only exposed to developers.
        c                   s@   | r j n j}| rtnt}d}|rtt|}|| |k}|S )NF)r   r   r=   rG   nextreversed)is_pre_hookZlayers_hooksZcandidate_hookZalready_registeredlast_keyr   r;   r<   is_already_registered  s   z2Layer._set_op_attrs.<locals>.is_already_registeredz)attrs should be type(dict), but received T)r5  r   FrS   )lastN)rt   dictr   rc   r   r?   updater   r=   r2   r0   rA   r   rG   r   move_to_endr   rW   )rl   ro   r7  Zpre_hook_helperZpost_hook_helperr;   r   r<   _set_op_attrs  s.   



zLayer._set_op_attrsdict[str, Any]c                 C  r   rs   )r  r   r;   r;   r<   __getstate__  s   zLayer.__getstate__statec                 C  s   | j | d S rs   )r  r:  )rl   r?  r;   r;   r<   __setstate__  s   zLayer.__setstate__c                 C  s   d| j v r| j d }|| jv rt rt| j| S | j| S d| j v r2| j d }|| jv r2| j| S d| j v rM| j d }||v rMt rIt|| S || S t| |S )Nr   r   r   )r  r   r   r   r   object__getattribute__)rl   rO   r   r   r   r;   r;   r<   __getattr__  s"   









zLayer.__getattr__valuec                   sp   fdd}t |tjjjjrt|  |  |_d S t t	t
|  d tr-t|  | | jdd }t |tjrp|d u rBtdt| jdkra|j| jv sXJ d|j d|| j|j  || j| j| j || < d S t |tjjr|  dkr|d u rtd|| j| j| j || < d S |d ur |v r|d urtd	  d
t
|j dd | < d S | jdd }t |tr|d u rtd|| j| j| j || < d S |d ur |v r|d urtd  dt
|j dd | < d S | jdd }t |tjj r8|d u rtd|| j| j| j  | jvr&| j!"  |js2t#$d  |_|| < d S |d ur |v rt
|tj%ksRt |tjjrddlm&} t' rq|  d u rqt(d  d  d  d|  d u st
t	|  tjj kr||| < d S ||t	|   d S |d urtd  dt
|j dd | < d S t|  | d S )Nc                    s   | D ]	} |v r| = qd S rs   r;   )ZdictsdrO   r;   r<   _remove_if_exist	  s
   z+Layer.__setattr__.<locals>._remove_if_existr   r  r   r-  r.  zbuiltin.parameterzassignment to parameter 'z0' should be of type Parameter or None, but got ''r   zassignment to sublayer 'z,' should be of type Layer or None, but got 'r   Z	_buffers_)assignzIn Dy2stat, self.z is a buffer and self.z0 is not allowed to be set to Variable when self.z	 is None.zassignment to buffers 'z7' should be of type core.DenseTensor or None, but got '))rt   r   ZjitZ	dy2staticZprogram_translatorZStaticFunctionrA  __setattr__Z_patched_namegetattrrc   propertyr  rH   r   r   r|   r2   r   rO   r1  r   r   r/  r0  Zget_defining_opr   r   r/   r   r   r  r	   r   r  r   r   VariablerI  r   r!  )rl   rO   rD  rG  r  Zlayersr   rI  r;   rF  r<   rJ    s   


zLayer.__setattr__c                 C  s^   || j v r| j |= d S || jv r| j|= d S || jv r'| j|= | j| d S t| | d S rs   )r   r   r   r   r  rA  __delattr__rk   r;   r;   r<   rN  v  s   


zLayer.__delattr__	list[str]c                 C  sZ   t | j}t| j }t| j }t| j }t| j }|| | | | }|S )a  
        Return a list. Get all parameters, buffers(non-parameter tensors), sublayers, method and attr of Layer.

        Examples:
            .. code-block:: python
                >>> import paddle
                >>> import numpy as np

                >>> class Mylayer(paddle.nn.Layer):
                ...     def __init__(self):
                ...         super().__init__()
                ...         self.linear1 = paddle.nn.Linear(10, 10)
                ...         self.linear2 = paddle.nn.Linear(5, 5)
                ...         self.conv2d = paddle.nn.Conv2D(3, 2, 3)
                ...         self.embedding = paddle.nn.Embedding(128, 16)
                ...         self.h_0 = paddle.to_tensor(np.zeros([10, 10]).astype('float32'))
                ...
                >>> mylayer = Mylayer()
                >>> print(dir(mylayer))
                ['__call__', '__class__', '__delattr__', '__dict__', ..., 'training']
        )dirrn   ru   r  keysr   r   r   )rl   methodro   r   r   r  rQ  r;   r;   r<   __dir__  s   
zLayer.__dir__c                 C  s   dS )zs
        Extra representation of this layer, you can have custom implementation
        of your own layer.
        r   r;   r   r;   r;   r<   
extra_repr  s   zLayer.extra_reprc                 C  s   g }|   }|d}g }| j D ]\}}t|}t|d}|d| d |  q| jjd }|rQt	|dkrE|dd
| d 7 }nt	|dkrQ||d 7 }|r^|dd
| d 7 }|d7 }|S )	NrR   r   (z): rS   z
  r   ))rT  rU   r   r@   reprr_   rW   rn   r   r2   rY   )rl   extra_linesrT  Zsublayer_linesrO   r9   Zsublayer_strZ	final_strr;   r;   r<   __repr__  s$   

zLayer.__repr___StateDictHookc                 C  r   rs   )r   r   r   r   r;   r;   r<   register_state_dict_hook  s   
zLayer.register_state_dict_hookdestination_StateDict | Nonestructured_name_prefix
_StateDictc           	      C  s   |du rt  }| j D ]\}}|dur|||| < q| j D ]\}}|dur3|| jvr3|||| < q |rO| j D ]\}}|durN||||| d  q;|S )z
        The difference from state_dict() is that state_dict_hook will not be called,
        but the original types of parameters and buffers will be maintained.
        Nr   )r   r   r@   r   r   r   _obtain_parameters_buffers)	rl   r\  rb   r^  rO   datar   
layer_name
layer_itemr;   r;   r<   r`    s*   


z Layer._obtain_parameters_buffersinclude_non_persistable_bufferuse_hook	keep_varsc              	   C  s  |du rt  }| j D ]\}}|dur |r|n| ||| < q| j D ],\}}	|sB|	durA|| jvrA|r9|	n|	 ||| < q&|	durR|rJ|	n|	 ||| < q&|rq| j D ]\}
}|durp|||||
 d ||| qZ|r| j	 D ]}||}|dur|}qx|S )a|  
        Get all parameters and persistable buffers of current layer and its sub-layers. And set them into a dict

        Parameters:
            destination(dict, optional) : If provide, all the parameters and persistable buffers will be set to this dict . Default: None.
            include_sublayers(bool, optional) : If true, also include the parameters and persistable buffers from sublayers. Default: True.
            include_non_persistable_buffer(bool, optional): If true, include non persistable buffers of current layer and its sub-layers, it is used in pure fp16 and jit.save. Default: False.
            use_hook(bool, optional) : If true, the operations contained in _state_dict_hooks will be appended to the destination. Default: True.
            keep_vars(bool, optional) : If false, the returned tensors in the state dict are detached from autograd. Default: True.
        Nr   )
r   r   r@   detachr   r   r   _state_dict_implr   r$  )rl   r\  rb   r^  rd  re  rf  rO   ra  r   rb  rc  Zstate_dict_hookr%  r;   r;   r<   rh    sJ   




	zLayer._state_dict_implc                 C     | j |||d||dS )a"  

        Get all parameters and buffers of current layer and its sub-layers. And set them into a dict

        Parameters:
            destination(dict, optional) : If provide, all the parameters and persistable buffers will be set to this dict . Default: None.
            include_sublayers(bool, optional) : If true, also include the parameters and persistable buffers from sublayers. Default: True.
            use_hook(bool, optional) : If true, the operations contained in _state_dict_hooks will be appended to the destination. Default: True.
            keep_vars(bool, optional) : If false, the returned tensors in the state dict are detached from autograd. Default: True.

        Returns:
            dict, a dict contains all the parameters and persistable buffers.

        Examples:
            .. code-block:: python

                >>> import paddle

                >>> emb = paddle.nn.Embedding(10, 10)

                >>> state_dict = emb.to_static_state_dict()
                >>> paddle.save( state_dict, "paddle_dy.pdparams")

        Tr\  rb   r^  rd  re  rf  rh  rl   r\  rb   r^  re  rf  r;   r;   r<   to_static_state_dict$  s    zLayer.to_static_state_dictc                 C  ri  )a"  
        Get all parameters and persistable buffers of current layer and its sub-layers. And set them into a dict

        Parameters:
            destination(dict, optional) : If provide, all the parameters and persistable buffers will be set to this dict . Default: None.
            include_sublayers(bool, optional) : If true, also include the parameters and persistable buffers from sublayers. Default: True.
            use_hook(bool, optional) : If true, the operations contained in _state_dict_hooks will be appended to the destination. Default: True.
            keep_vars(bool, optional) : If false, the returned tensors in the state dict are detached from autograd. Default: True.

        Returns:
            dict: a dict contains all the parameters and persistable buffers.

        Examples:
            .. code-block:: python

                >>> import paddle

                >>> emb = paddle.nn.Embedding(10, 10)

                >>> state_dict = emb.state_dict()
                >>> paddle.save(state_dict, "paddle_dy.pdparams")

        Frj  rk  rl  r;   r;   r<   
state_dictM  s   zLayer.state_dictr#   c                 C  sj   i }| j ddd}t|d|d}|| | j D ]\}}|dur2|j| | dd}|| q|S )a  Recursively builds a sharded state dictionary for the model and its sub-layers.

        Args:
            structured_name_prefix: Prefix to prepend to all tensor names for hierarchical naming.

        Returns:
            Dictionary mapping tensor names to ShardedWeight.
            The dictionary contains both the current layer's parameters and all sub-layer parameters.
        r   F)r^  rb   N)rn  Zshard_rulesr  r   )r^  )rn  r$   r:  r   r@   sharded_state_dict)rl   r^  ro  rn  Zcurrent_sharded_dictrb  rc  Zsub_shardedr;   r;   r<   ro  u  s&   

zLayer.sharded_state_dictrn  use_structured_nametuple[list[str], list[str]]c                   s  g t   g } fdd}g }| jdd D ]6\}}|r!|n|j}z|||}	||	 W q tyO }
 ztd| dt|
  W Y d}
~
qd}
~
ww 	 D ]}| vr_|| qTt
 rs|D ]	\}}|| qe|fS dd	 }zAt rttjj j}tjjjd
d |D t | ntt j}tdd |D t | |D ]	\}}||| qW |fS  ty } ztdd}~w ty } ztdd}~ww )a  
        Set parameters and persistable buffers from state_dict. All the parameters and buffers will be reset by the tensor in the state_dict

        Parameters:
            state_dict(dict) : Dict contains all the parameters and persistable buffers.
            use_structured_name(bool, optional) : If true, use structured name as key, otherwise, use parameter or buffer name as key.
                                                  Default: True.
        Returns:
            missing_keys(list):A list of str containing the missing keys
            unexpected_keys(list):A list of str containing the unexpected keys

        Examples:
            .. code-block:: python

                >>> import paddle

                >>> emb = paddle.nn.Embedding(10, 10)

                >>> state_dict = emb.state_dict()
                >>> paddle.save(state_dict, "paddle_dy.pdparams")
                >>> para_state_dict = paddle.load("paddle_dy.pdparams")
                >>> emb.set_state_dict(para_state_dict)

        c                   s    | d }|d u r|  t|  dt|ttfrCt|t|kr:|  t|  dt| dt|  |  ||fS t	|j
rM|
 n|j
}t|t|j
krp|  t|  dt| dt|j
 d |  ||fS )Nz# is not found in the provided dict.z receives the length of z, but the expected shape is z receives a shape r   )rH   rW   r|   rt   r9  ru   r2   r  inspectismethodr   )r  r   r?  Zstate_shapeZ
match_keysZmissing_keysrn  r;   r<   _check_match  s2   






z*Layer.set_state_dict.<locals>._check_matchF)re  zSkip loading for z. Nc                 S  s   t  | j }| }| rt }nR| rt	 }nI|
 r6t }||  t| }n2| rVt }||  ttj dd | }nt }||  t| }||| d S )N:r   )r   Zfind_varrO   
get_tensorZ_placeZis_cpu_placer   CPUPlaceZis_cuda_pinned_placeCUDAPinnedPlaceZis_xpu_placeZPlaceZ	set_placeXPUPlaceZxpu_device_idZis_custom_placeZCustomPlacer   r   Z
get_devicerU   Zcustom_device_id	CUDAPlaceZgpu_device_idr   )r   Zndarraytr  placer;   r;   r<   _set_var  s*   

z&Layer.set_state_dict.<locals>._set_varc                 S     g | ]\}}|qS r;   r;   r   r   r?  r;   r;   r<   r   
	      z(Layer.set_state_dict.<locals>.<listcomp>c                 S  r  r;   r;   r  r;   r;   r<   r   	  r  zThis error might happens in dy2static, while calling 'set_state_dict' dynamically in 'forward', which is not supported. If you only need call 'set_state_dict' once, move it to '__init__'.)r   rh  r@   rO   rW   r|   r6   r7   rX   rQ  r   r1  r   r   r   baser   Z_current_expected_place__default_executorZ	libpaddler/  Zcreate_loaded_parameterr   _get_devicer   Z_create_loaded_parameterr   )rl   rn  rp  Zunexpected_keysru  Zmatched_param_stater  r   Zkey_nameZ	match_reserrr?  r~  executorer;   rt  r<   set_state_dict  sp   
&
8

zLayer.set_state_dictblockingc                 C  s   | j |||dddS )a	  
        Cast the parameters and buffers of Layer by the give device, dtype and blocking.

        Parameters:
            device(str|paddle.CPUPlace()|paddle.CUDAPlace()|paddle.CUDAPinnedPlace()|paddle.XPUPlace()|None, optional): The device of the Layer which want to be stored.
            If None, the device is the same with the original Tensor. If device is string, it can be ``cpu``, ``gpu:x`` and ``xpu:x``, where ``x`` is the
            index of the GPUs or XPUs. Default: None.

            dtype(str|numpy.dtype|paddle.dtype|None, optional): The type of the data. If None, the dtype is the same with the original Tensor. Default: None.

            blocking(bool|None, optional): If False and the source is in pinned memory, the copy will be
              asynchronous with respect to the host. Otherwise, the argument has no effect. If None, the blocking is set True. Default: None.

        Returns:
            self

        Examples:
            .. code-block:: python

                >>> import paddle
                >>> paddle.seed(2023)

                >>> linear=paddle.nn.Linear(2, 2)
                >>> linear.weight
                >>> print(linear.weight)
                Parameter containing:
                Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=False,
                [[ 0.89611185,  0.04935038],
                 [-0.58883440,  0.99266374]])

                >>> linear.to(dtype='float64')
                >>> linear.weight
                >>> print(linear.weight)
                Parameter containing:
                Tensor(shape=[2, 2], dtype=float64, place=Place(gpu:0), stop_gradient=False,
                [[ 0.89611185,  0.04935038],
                 [-0.58883440,  0.99266374]])

                >>> linear.to(device='cpu')
                >>> linear.weight
                >>> print(linear.weight)
                Parameter containing:
                Tensor(shape=[2, 2], dtype=float64, place=Place(cpu), stop_gradient=False,
                [[ 0.89611185,  0.04935038],
                 [-0.58883440,  0.99266374]])

                >>> # doctest: +REQUIRES(env:GPU)
                >>> linear.to(device=paddle.CUDAPinnedPlace(), blocking=False)
                >>> linear.weight
                >>> print(linear.weight)
                Parameter containing:
                Tensor(shape=[2, 2], dtype=float64, place=Place(gpu_pinned), stop_gradient=False,
                [[ 0.89611185,  0.04935038],
                 [-0.58883440,  0.99266374]])

        TF)r   r`   r  rb   ra   )rd   )rl   r   r`   r  r;   r;   r<   r   "	  s   >zLayer.tofuncICallable[[Tensor, PlaceLike | None, DTypeLike | None, bool | None], None]c              	   C  s   |r|   D ]}|||||| q| j D ]C\}}|d urZt  |||||}	W d    n1 s4w   Y  |jd urZt  || |||}
W d    n1 sUw   Y  q| j D ]\}}|d urr|||||| j|< q`|| _d S rs   )	r   _applyr   r@   r   ZgradZ
_grad_ivarr   r   )rl   r  r   r`   r  rb   r9   r  r   Zparam_appliedZgrad_appliedbufr;   r;   r<   r  h	  s(   


zLayer._applyr|  c                 C  sv  |d u r|j }|d u r|j}t|tjtjfst|}|j  r]t|tjr*t	| n|}t
|}t|j| d d d d }t }||k rZ|t |}	|    n|}	n|}	|d ur||	jkrtjjj|	j d |	j|d}
W d    n1 sw   Y  n|	}
|d ur|
j |s|
||}n|
}|  }|  }| r|| |S || |S )N   rS   g333333?)r}  r   )r}  r`   rt   r   r   r   ZDataTyper   Zis_gpu_placer!   Zsize_of_dtyper   prodr   gpu_memory_availableZ_copy_tor   rx  rD  rw  _clearr  r   Z_dygraph_place_guardcastZ_equalsZ_is_initializedZ_share_data_withZ_share_data_nocheck_with)rl   r|  r   r`   r  Zproto_dtypeZ
size_dtypeZwaiting_alloc_memoryr  Zt_usedZt_castedZnew_tZ
dst_tensorZ
src_tensorr;   r;   r<   
_transform	  sL   





zLayer._transformra   c                   s   |du r|du r|du rS |dur5t |trtj|}nt |tjtjtjtj	fr,n	t
dt|j |du r<d}n	t |tsEJ d fdd}t  tjdtd ||||| W d   n1 skw   Y  |_S )	a  
        Cast the parameters and buffers of Layer by the give device, dtype and blocking.

        Parameters:
            device(str|paddle.CPUPlace()|paddle.CUDAPlace()|paddle.CUDAPinnedPlace()|paddle.XPUPlace()|None, optional): The device of the Layer which want to be stored.
            If None, the device is the same with the original Tensor. If device is string, it can be ``cpu``, ``gpu:x`` and ``xpu:x``, where ``x`` is the
            index of the GPUs or XPUs. Default: None.

            dtype(str|numpy.dtype|paddle.dtype|None, optional): The type of the data. If None, the dtype is the same with the original Tensor. Default: None.

            blocking(bool|None, optional): If False and the source is in pinned memory, the copy will be
              asynchronous with respect to the host. Otherwise, the argument has no effect. If None, the blocking is set True. Default: None.

            include_sublayers(bool, optional): If True, deal with self and all sublayers parameters and buffers, if not only deal with self parameters and buffers. Default: True.

            floating_only(bool, optional): If True, only cast all floating point parameters and buffers of Layer by the give device, dtype and blocking.

        Returns:
            self

        Nzdevice value error, must be str, paddle.CPUPlace(), paddle.CUDAPlace(), paddle.CUDAPinnedPlace() or paddle.XPUPlace(), but the type of device is Tz5blocking value error, must be the True, False or Nonec                   s"    r	t | s	| S | |||S rs   )r   Zis_floating_pointr  )r|  r   r`   r  ra   rl   r;   r<   	transform
  s   z!Layer._to_impl.<locals>.transformignore)category)rt   rX   r   r   Z_convert_to_placer   rx  r{  ry  rz  r|   rc   r   r   r6   catch_warningsfilterwarningsUserWarningr  r   )rl   r   r`   r  rb   ra   r  r;   r  r<   rd   	  s>   
	
zLayer._to_implr   c                 C  sp   t j }t j }t j|| |  D ]	}||  qW d   n1 s*w   Y  t j r6|S |S )z
        Return startup program containing initialization operations of all parameters.

        NOTE(dev): This is a very low level API and only for inner developer.
        N)	r   r  r   Zprogram_guardr   Z_create_init_opr   r   Zuse_pir_api)rl   Zstartup_programrq   r   r;   r;   r<   _startup_program
  s   


zLayer._startup_programre   Layer | Sequence[Layer] | Nonec                   sf    du rg n  t  tr g nt  ttfrt  ntdt j d fdd}| |S )a  
        Casts all floating point parameters and buffers to ``float`` data type.

        Parameters:
            excluded_layers(nn.Layer|list|tuple|None, optional): Specify the layers that need to be kept original data type. if excluded_layers is None, casts all floating point parameters and buffers. Default: None.

        Returns:
            Layer: self

        Examples:
            .. code-block:: python

                >>> import paddle

                >>> class Model(paddle.nn.Layer):
                ...     def __init__(self):
                ...         super().__init__()
                ...         self.linear = paddle.nn.Linear(1, 1)
                ...         self.dropout = paddle.nn.Dropout(p=0.5)
                ...
                ...     def forward(self, input):
                ...         out = self.linear(input)
                ...         out = self.dropout(out)
                ...         return out
                ...
                >>> model = Model()
                >>> model.float()
                Model(
                    (linear): Linear(in_features=1, out_features=1, dtype=paddle.float32)
                    (dropout): Dropout(p=0.5, axis=None, mode=upscale_in_train)
                )
        N9excluded_layers should be type nn.Layer or list, but got r   c                      t | tj  d S rs   )rf   r   r   r9   re   r;   r<   layer_transS
     z Layer.float.<locals>.layer_trans)rt   rc   ru   rv   r   r   r   rl   re   r  r;   r  r<   float$
  s   $


zLayer.floatc                      t j du rtd | S  du rtjgn  t tr! g nt t	t
fr-t	  ntdt j d fdd}| |S )a  
        Casts all floating point parameters and buffers to ``float16`` data type.


        .. note::
            ``nn.BatchNorm`` does not support ``bfloat16`` weights, so it would not be converted by default.


        Parameters:
           excluded_layers(nn.Layer|list|tuple|None, optional): Specify the layers that need to be kept original data type. if excluded_layers is None, casts all floating point parameters and buffers except ``nn.BatchNorm``. Default: None.

        Returns:
            Layer: self

        Examples:
            .. code-block:: python

                >>> # doctest: +SKIP('Paddle compiled by the user does not support float16, so keep original data type.')
                >>> import paddle

                >>> class Model(paddle.nn.Layer):
                ...     def __init__(self):
                ...         super().__init__()
                ...         self.linear = paddle.nn.Linear(1, 1)
                ...         self.dropout = paddle.nn.Dropout(p=0.5)
                ...
                ...     def forward(self, input):
                ...         out = self.linear(input)
                ...         out = self.dropout(out)
                ...         return out
                ...
                >>> model = Model()
                >>> model.float16()
                Model(
                    (linear): Linear(in_features=1, out_features=1, dtype=float32)
                    (dropout): Dropout(p=0.5, axis=None, mode=upscale_in_train)
                )
        FzQPaddle compiled by the user does not support float16, so keep original data type.Nr  r   c                   r  rs   )rf   r   r   r  r  r;   r<   r  
  r  z"Layer.float16.<locals>.layer_trans)r   ampZis_float16_supportedr6   r7   r
   	BatchNormrt   rc   ru   rv   r   r   r   r  r;   r  r<   r   X
  s    *


zLayer.float16c                   r  )a/  
        Casts all floating point parameters and buffers to ``bfloat16`` data type.


        .. note::
            ``nn.BatchNorm`` does not support ``bfloat16`` weights, so it would not be converted by default.


        Parameters:
            excluded_layers(nn.Layer|list|tuple|None, optional): Specify the layers that need to be kept original data type. if excluded_layers is None, casts all floating point parameters and buffers except ``nn.BatchNorm``. Default: None.

        Returns:
            Layer: self

        Examples:
            .. code-block:: python

                >>> # doctest: +SKIP('bfloat need V100 compile')
                >>> import paddle

                >>> class Model(paddle.nn.Layer):
                ...     def __init__(self):
                ...         super().__init__()
                ...         self.linear = paddle.nn.Linear(1, 1)
                ...         self.dropout = paddle.nn.Dropout(p=0.5)
                ...
                ...     def forward(self, input):
                ...         out = self.linear(input)
                ...         out = self.dropout(out)
                ...         return out
                ...
                >>> model = Model()
                >>> model.bfloat16()
                >>> #UserWarning: Paddle compiled by the user does not support bfloat16, so keep original data type.
                Model(
                    (linear): Linear(in_features=1, out_features=1, dtype=float32)
                    (dropout): Dropout(p=0.5, axis=None, mode=upscale_in_train)
                )
        FzRPaddle compiled by the user does not support bfloat16, so keep original data type.Nr  r   c                   r  rs   )rf   r   r   r  r  r;   r<   r  
  r  z#Layer.bfloat16.<locals>.layer_trans)r   r  Zis_bfloat16_supportedr6   r7   r
   r  rt   rc   ru   rv   r   r   r   r  r;   r  r<   r   
  s    +


zLayer.bfloat16)Nr   )r   r   r`   r*   r   r   )r   r   )r   r   r   r   )r   rX   )r   r   r   r   )r   r   r   r   r   r   )NNFNN)r   r-   r   r   r`   r   r   r   r   r   r   r   r   r	   )NNN)rO   r   r   r   r`   r   r   r	   )T)rb   r   r   r   rs   )r`   r   r   r   )r   r   )r   r   )F)r  r   r   r  )r   TT)r  rX   rb   r   r  r   r   r  )r   FNT)
r  rX   r  r   r  r  r  r   r   r   )rO   rX   r  r	   r   r   r   r   )r  r   r   r   )r  r   r  r   r   r   )r:   r   r  r   r   r   )r:   r   r   r   )rO   rX   r  r/   r   r/   )rO   rX   r,  r	   r   r	   )r   r=  )r?  r=  r   r   )rO   rX   r   r   )rO   rX   rD  r   r   r   )rO   rX   r   r   )r   rO  )r   rZ  r   r   )NTr   )r\  r]  rb   r   r^  rX   r   r_  )NTr   FTT)r\  r]  rb   r   r^  rX   rd  r   re  r   rf  r   r   r_  )NTr   TT)r\  r]  rb   r   r^  rX   re  r   rf  r   r   r_  )r   )r^  rX   r   r#   )rn  r_  rp  r   r   rq  )r   r   r`   r   r  r   r   r   )r  r  r   r   r`   r   r  r   rb   r   r   r   )
r|  r	   r   r   r`   r   r  r   r   r	   )NNNTF)
r   r   r`   r   r  r   rb   r   ra   r   )r   r   )re   r  r   r   )>r   r   r   r   r   rj   r   r   r   r   r   r   r   r'   r   r   r   r   r   r   r   r   r  r  r  r   r  r  r&  r(  r#  r*  r+  r2  r<  r>  r@  rC  rJ  rN  rS  rT  rY  r[  r`  rh  rm  rn  ro  r   Zdeprecate_stat_dictr  r   r  r  rd   r  Zset_dict	load_dictr  r   r   r;   r;   r;   r<   r/   q  s   
 6
+
:
6
:
7C>:8$
P
(NJE%B
 

+



.K
<



n

 

	#@+*& 	N
D
G5C)^
__future__r   r~   rr  retypingr6   r   collectionsr   r   r   r   r   numpyr   Ztyping_extensionsr   r   r	   r
   r   Zpaddle.autograd.backward_utilsr   Zpaddle.baser   r   r   Zpaddle.base.corer   Zpaddle.base.dygraphr   Zpaddle.base.dygraph.baser   r   r   r   Zpaddle.base.dygraph_utilsr   Zpaddle.base.executorr   r   Zpaddle.base.frameworkr   r   r   r  r   r   r   r   r    r!   Zpaddle.base.layer_helper_baser"   Z5paddle.distributed.flex_checkpoint.dcp.sharded_weightr#   r$   Zpaddle.frameworkr%   Zpaddle.profiler.utilsr&   Zpaddle.utilsr'   collections.abcr(   r)   Zpaddle._typingr*   r+   r,   r-   Zpaddle.nn.initializerr.   __all__r   r   r9  rX   r_  rZ  compilerK   rM   r=   rG   rJ   rQ   r_   rf   rg   r   r   r/   r;   r;   r;   r<   <module>   sh   ,


 )