o
    piw                     @  s>  U d dl mZ d dlmZ d dlZd dlmZ d dlmZ d dl	m
Z
 erd dlmZ d dlmZ d dlZd dlmZ d d	lmZ d d
lmZ d dlmZ eeef Zded< eejejejejf Zded< eeee ee ej e ef Z!ded< eeee ee ej eejejf  ef Z"ded< G dd de
Z#dS )    )annotations)TYPE_CHECKINGN)Normal)ExpTransform)TransformedDistribution)Sequence)Union)	TypeAlias)Tensor)NestedSequencer	   _LognormalLocBase_LognormalLocNDArray_LognormalLoc_LognormalScalec                      sn   e Zd ZU dZded< ded< d fd	d
ZedddZedddZdddZ	dddZ
dddZ  ZS )	LogNormala  The LogNormal distribution with location `loc` and `scale` parameters.

    .. math::

        X \sim Normal(\mu, \sigma)

        Y = exp(X) \sim LogNormal(\mu, \sigma)


    Due to LogNormal distribution is based on the transformation of Normal distribution, we call that :math:`Normal(\mu, \sigma)` is the underlying distribution of :math:`LogNormal(\mu, \sigma)`

    Mathematical details

    The probability density function (pdf) is

    .. math::
        pdf(x; \mu, \sigma) = \frac{1}{\sigma x \sqrt{2\pi}}e^{(-\frac{(ln(x) - \mu)^2}{2\sigma^2})}

    In the above equation:

    * :math:`loc = \mu`: is the means of the underlying Normal distribution.
    * :math:`scale = \sigma`: is the stddevs of the underlying Normal distribution.

    Args:
        loc(int|float|complex|list|tuple|numpy.ndarray|Tensor): The means of the underlying Normal distribution.The data type is float32, float64, complex64 and complex128.
        scale(int|float|list|tuple|numpy.ndarray|Tensor): The stddevs of the underlying Normal distribution.

    Examples:
        .. code-block:: python

            >>> import paddle
            >>> from paddle.distribution import LogNormal

            >>> # Define a single scalar LogNormal distribution.
            >>> dist = LogNormal(loc=0., scale=3.)
            >>> # Define a batch of two scalar valued LogNormals.
            >>> # The underlying Normal of first has mean 1 and standard deviation 11, the underlying Normal of second 2 and 22.
            >>> dist = LogNormal(loc=[1., 2.], scale=[11., 22.])
            >>> # Get 3 samples, returning a 3 x 2 tensor.
            >>> dist.sample((3, ))

            >>> # Define a batch of two scalar valued LogNormals.
            >>> # Their underlying Normal have mean 1, but different standard deviations.
            >>> dist = LogNormal(loc=1., scale=[11., 22.])

            >>> # Complete example
            >>> value_tensor = paddle.to_tensor([0.8], dtype="float32")

            >>> lognormal_a = LogNormal([0.], [1.])
            >>> lognormal_b = LogNormal([0.5], [2.])
            >>> sample = lognormal_a.sample((2, ))
            >>> # a random tensor created by lognormal distribution with shape: [2, 1]
            >>> entropy = lognormal_a.entropy()
            >>> print(entropy)
            Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
                [1.41893852])
            >>> lp = lognormal_a.log_prob(value_tensor)
            >>> print(lp)
            Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
                [-0.72069150])
            >>> p = lognormal_a.probs(value_tensor)
            >>> print(p)
            Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
                [0.48641577])
            >>> kl = lognormal_a.kl_divergence(lognormal_b)
            >>> print(kl)
            Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
                [0.34939718])
    r
   locscaler   r   returnNonec                   s:   t ||d| _| jj| _| jj| _t | jt g d S )N)r   r   )r   _baser   r   super__init__r   )selfr   r   	__class__ d/home/app/PaddleOCR-VL/.venv_paddleocr/lib/python3.10/site-packages/paddle/distribution/lognormal.pyr      s   

zLogNormal.__init__c                 C  s   t | jj| jjd  S )zZMean of lognormal distribution.

        Returns:
            Tensor: mean value.
           )paddleexpr   meanvariancer   r   r   r   r       s   zLogNormal.meanc                 C  s(   t | jjt d| jj | jj  S )zbVariance of lognormal distribution.

        Returns:
            Tensor: variance value.
        r   )r   expm1r   r!   r   r    r"   r   r   r   r!      s   zLogNormal.variancec                 C  s   | j  | j j S )a  Shannon entropy in nats.

        The entropy is

        .. math::

            entropy(\sigma) = 0.5 \log (2 \pi e \sigma^2) + \mu

        In the above equation:

        * :math:`loc = \mu`: is the mean of the underlying Normal distribution.
        * :math:`scale = \sigma`: is the stddevs of the underlying Normal distribution.

        Returns:
          Tensor: Shannon entropy of lognormal distribution.

        )r   entropyr    r"   r   r   r   r$      s   zLogNormal.entropyvaluec                 C  s   t | |S )zProbability density/mass function.

        Args:
          value (Tensor): The input tensor.

        Returns:
          Tensor: probability.The data type is same with :attr:`value` .

        )r   r   Zlog_prob)r   r%   r   r   r   probs   s   
zLogNormal.probsotherc                 C  s   | j |j S )a
  The KL-divergence between two lognormal distributions.

        The probability density function (pdf) is

        .. math::

            KL\_divergence(\mu_0, \sigma_0; \mu_1, \sigma_1) = 0.5 (ratio^2 + (\frac{diff}{\sigma_1})^2 - 1 - 2 \ln {ratio})

        .. math::

            ratio = \frac{\sigma_0}{\sigma_1}

        .. math::

            diff = \mu_1 - \mu_0

        In the above equation:

        * :math:`loc = \mu_0`: is the means of current underlying Normal distribution.
        * :math:`scale = \sigma_0`: is the stddevs of current underlying Normal distribution.
        * :math:`loc = \mu_1`: is the means of other underlying Normal distribution.
        * :math:`scale = \sigma_1`: is the stddevs of other underlying Normal distribution.
        * :math:`ratio`: is the ratio of scales.
        * :math:`diff`: is the difference between means.

        Args:
            other (LogNormal): instance of LogNormal.

        Returns:
            Tensor: kl-divergence between two lognormal distributions.

        )r   kl_divergence)r   r'   r   r   r   r(      s   !zLogNormal.kl_divergence)r   r   r   r   r   r   )r   r
   )r%   r
   r   r
   )r'   r   r   r
   )__name__
__module____qualname____doc____annotations__r   propertyr    r!   r$   r&   r(   __classcell__r   r   r   r   r   6   s   
 F


r   )$
__future__r   typingr   r   Zpaddle.distribution.normalr   Zpaddle.distribution.transformr   Z,paddle.distribution.transformed_distributionr   collections.abcr   r   numpynpZnumpy.typingZnptZtyping_extensionsr	   r
   Zpaddle._typingr   floatcomplexr   r-   Zfloat32Zfloat64Z	complex64Z
complex128r   ZNDArrayr   r   r   r   r   r   r   <module>   sF   	