o
    pi)                     @  s   d dl mZ d dlmZmZ d dlZd dlZddlm	Z	 ddl
mZmZmZ er4d dlmZ d dlmZ 			
								d&d'd$d%ZdS )(    )annotations)TYPE_CHECKINGLiteralN   )strong_wolfe)_value_and_gradient&check_initial_inverse_hessian_estimatecheck_input_type)Callable)Tensor2   Hz>&.>r         ?float32objective_funcCallable[[Tensor], Tensor]initial_positionr   	max_itersinttolerance_gradfloattolerance_change initial_inverse_hessian_estimateTensor | Noneline_search_fnLiteral['strong_wolfe']max_line_search_itersinitial_step_lengthdtypeLiteral['float32', 'float64']name
str | Nonereturn0tuple[bool, int, Tensor, Tensor, Tensor, Tensor]c                   s&  dvrt d dd}t|d| tj|jd d |du r% }n
t|d	| t| t|}t| }t|\}}tj	d
gd
dd}tj	d
gddd}tj	d
gddd}tj	d
gddd}fdd} fdd}tj
jj||||||||||gd ||||||fS )a  
    Minimizes a differentiable function `func` using the BFGS method.
    The BFGS is a quasi-Newton method for solving an unconstrained optimization problem over a differentiable function.
    Closely related is the Newton method for minimization. Consider the iterate update formula:

    .. math::
        x_{k+1} = x_{k} + H_k \nabla{f_k}

    If :math:`H_k` is the inverse Hessian of :math:`f` at :math:`x_k`, then it's the Newton method.
    If :math:`H_k` is symmetric and positive definite, used as an approximation of the inverse Hessian, then
    it's a quasi-Newton. In practice, the approximated Hessians are obtained
    by only using the gradients, over either whole or part of the search
    history, the former is BFGS, the latter is L-BFGS.

    Reference:
        Jorge Nocedal, Stephen J. Wright, Numerical Optimization, Second Edition, 2006. pp140: Algorithm 6.1 (BFGS Method).

    Args:
        objective_func: the objective function to minimize. ``objective_func`` accepts a 1D Tensor and returns a scalar.
        initial_position (Tensor): the starting point of the iterates, has the same shape with the input of ``objective_func`` .
        max_iters (int, optional): the maximum number of minimization iterations. Default value: 50.
        tolerance_grad (float, optional): terminates if the gradient norm is smaller than this. Currently gradient norm uses inf norm. Default value: 1e-7.
        tolerance_change (float, optional): terminates if the change of function value/position/parameter between two iterations is smaller than this value. Default value: 1e-9.
        initial_inverse_hessian_estimate (Tensor, optional): the initial inverse hessian approximation at initial_position. It must be symmetric and positive definite. If not given, will use an identity matrix of order N, which is size of ``initial_position`` . Default value: None.
        line_search_fn (str, optional): indicate which line search method to use, only support 'strong wolfe' right now. May support 'Hager Zhang' in the future. Default value: 'strong wolfe'.
        max_line_search_iters (int, optional): the maximum number of line search iterations. Default value: 50.
        initial_step_length (float, optional): step length used in first iteration of line search. different initial_step_length may cause different optimal result. For methods like Newton and quasi-Newton the initial trial step length should always be 1.0. Default value: 1.0.
        dtype ('float32' | 'float64', optional): data type used in the algorithm, the data type of the input parameter must be consistent with the dtype. Default value: 'float32'.
        name (str, optional): Name for the operation. For more information, please refer to :ref:`api_guide_Name`. Default value: None.

    Returns:
        output(tuple):

            - is_converge (bool): Indicates whether found the minimum within tolerance.
            - num_func_calls (int): number of objective function called.
            - position (Tensor): the position of the last iteration. If the search converged, this value is the argmin of the objective function regarding to the initial position.
            - objective_value (Tensor): objective function value at the `position`.
            - objective_gradient (Tensor): objective function gradient at the `position`.
            - inverse_hessian_estimate (Tensor): the estimate of inverse hessian at the `position`.

    Examples:
        .. code-block:: python
            :name: code-example1

            >>> # Example1: 1D Grid Parameters
            >>> import paddle
            >>> # Randomly simulate a batch of input data
            >>> inputs = paddle. normal(shape=(100, 1))
            >>> labels = inputs * 2.0
            >>> # define the loss function
            >>> def loss(w):
            ...     y = w * inputs
            ...     return paddle.nn.functional.square_error_cost(y, labels).mean()
            >>> # Initialize weight parameters
            >>> w = paddle.normal(shape=(1,))
            >>> # Call the bfgs method to solve the weight that makes the loss the smallest, and update the parameters
            >>> for epoch in range(0, 10):
            ...     # Call the bfgs method to optimize the loss, note that the third parameter returned represents the weight
            ...     w_update = paddle.incubate.optimizer.functional.minimize_bfgs(loss, w)[2]
            ...     # Use paddle.assign to update parameters in place
            ...     paddle. assign(w_update, w)

        .. code-block:: python
            :name: code-example2

            >>> # Example2: Multidimensional Grid Parameters
            >>> import paddle
            >>> def flatten(x):
            ...     return x. flatten()
            >>> def unflatten(x):
            ...     return x.reshape((2,2))
            >>> # Assume the network parameters are more than one dimension
            >>> def net(x):
            ...     assert len(x.shape) > 1
            ...     return x.square().mean()
            >>> # function to be optimized
            >>> def bfgs_f(flatten_x):
            ...     return net(unflatten(flatten_x))
            >>> x = paddle.rand([2,2])
            >>> for i in range(0, 10):
            ...     # Flatten x before using minimize_bfgs
            ...     x_update = paddle.incubate.optimizer.functional.minimize_bfgs(bfgs_f, flatten(x))[2]
            ...     # unflatten x_update, then update parameters
            ...     paddle.assign(unflatten(x_update), x)
    )r   Zfloat64z?The dtype must be 'float32' or 'float64', but the specified is .minimize_bfgsr   r   r   Nr   r   Zint64shapeZ
fill_valuer   Fboolc                   s   |  k | @ S )N )kdoneis_convergenum_func_callsxkvalueg1Hk)r   r+   p/home/app/PaddleOCR-VL/.venv_paddleocr/lib/python3.10/site-packages/paddle/incubate/optimizer/functional/bfgs.pycond   s   zminimize_bfgs.<locals>.condc                   sz  t || }dkrt||d\}	}}
}ntd d||7 }|	| }|
| }|| }|
}t |d}t |d}t || t jj dkfdd fd	d}|| |	   }|| |	   }t t ||||| |	   }| d
7 } t j
j|tjd}t j
j|tjd}t ||k B |k B | t || t ||	dkB | | |||||||gS )Nr   )fr0   pkr   r   r   zNCurrently only support line_search_fn = 'strong_wolfe', but the specified is ''r   g        c                     s   t jdgd dS )Nr   g     @@r(   )paddlefullr+   r'   r+   r4   <lambda>   s    z-minimize_bfgs.<locals>.body.<locals>.<lambda>c                     s   d  S )Nr   r+   r+   Zrhok_invr+   r4   r;      s    r   )p)r9   matmulr   NotImplementedErrorZ	unsqueezedotstaticnnr5   tZlinalgZnormnpinfassign)r,   r-   r.   r/   r0   r1   r2   r3   r7   alphag2Zls_func_callsskZykZrhokZVk_transposeZVkZgnormZpk_norm)Ir   r   r   r   r   r   r   r<   r4   body   sR   	


zminimize_bfgs.<locals>.body)r5   rK   Z	loop_vars)
ValueErrorr	   r9   eyer)   r   rF   detachr   r:   rA   rB   Z
while_loop)r   r   r   r   r   r   r   r   r   r   r!   Zop_namer3   r0   r1   r2   r/   r,   r-   r.   r5   rK   r+   )	rJ   r   r   r   r   r   r   r   r   r4   r&   $   s<   c

9r&   )	r   r   r   Nr   r   r   r   N)r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r    r!   r"   r#   r$   )
__future__r   typingr   r   numpyrD   r9   Zline_searchr   utilsr   r   r	   collections.abcr
   r   r&   r+   r+   r+   r4   <module>   s&   