o
    ·2úh?  ã                   @   sV   d dl mZ d dlmZ d dlmZ edgƒG dd„ dejƒƒZej 	dej
¡e_dS )	é    )Úops)Úkeras_export)Ú	optimizerzkeras.optimizers.Adamc                       s`   e Zd ZdZ																d‡ fd
d„	Z‡ fdd„Zdd„ Z‡ fdd„Z‡  ZS )ÚAdama¯  Optimizer that implements the Adam algorithm.

    Adam optimization is a stochastic gradient descent method that is based on
    adaptive estimation of first-order and second-order moments.

    According to
    [Kingma et al., 2014](http://arxiv.org/abs/1412.6980),
    the method is "*computationally
    efficient, has little memory requirement, invariant to diagonal rescaling of
    gradients, and is well suited for problems that are large in terms of
    data/parameters*".

    Args:
        learning_rate: A float, a
            `keras.optimizers.schedules.LearningRateSchedule` instance, or
            a callable that takes no arguments and returns the actual value to
            use. The learning rate. Defaults to `0.001`.
        beta_1: A float value or a constant float tensor, or a callable
            that takes no arguments and returns the actual value to use. The
            exponential decay rate for the 1st moment estimates. Defaults to
            `0.9`.
        beta_2: A float value or a constant float tensor, or a callable
            that takes no arguments and returns the actual value to use. The
            exponential decay rate for the 2nd moment estimates. Defaults to
            `0.999`.
        epsilon: A small constant for numerical stability. This epsilon is
            "epsilon hat" in the Kingma and Ba paper (in the formula just before
            Section 2.1), not the epsilon in Algorithm 1 of the paper. Defaults
            to `1e-7`.
        amsgrad: Boolean. Whether to apply AMSGrad variant of this algorithm
            from the paper "On the Convergence of Adam and beyond". Defaults
            to `False`.
        {{base_optimizer_keyword_args}}
    çü©ñÒMbP?çÍÌÌÌÌÌì?ç+‡ÙÎ÷ï?çH¯¼šò×z>FNç®Gáz®ï?Úadamc                    sF   t ƒ jd||||||	|
||||dœ|¤Ž || _|| _|| _|| _d S )N)Úlearning_rateÚnameÚweight_decayÚclipnormÚ	clipvalueÚglobal_clipnormÚuse_emaÚema_momentumÚema_overwrite_frequencyÚloss_scale_factorÚgradient_accumulation_steps© )ÚsuperÚ__init__Úbeta_1Úbeta_2ÚepsilonÚamsgrad)Úselfr   r   r   r   r   r   r   r   r   r   r   r   r   r   r   Úkwargs©Ú	__class__r   úT/var/www/html/chatgem/venv/lib/python3.10/site-packages/keras/src/optimizers/adam.pyr   +   s&   õô
zAdam.__init__c                    sJ   | j rdS tƒ  |¡ |  |ddg¡\| _| _| jr#|  |d¡| _dS dS )a  Initialize optimizer variables.

        Adam optimizer has 3 types of variables: momentums, velocities and
        velocity_hat (only set when amsgrad is applied),

        Args:
            var_list: list of model variables to build Adam variables on.
        NÚmomentumÚvelocityÚvelocity_hat)Úbuiltr   ÚbuildÚadd_optimizer_variablesÚ
_momentumsÚ_velocitiesr   Ú_velocity_hats)r   Úvar_listr    r   r"   r'   Q   s   	ÿ
ÿÿz
Adam.buildc                 C   sB  t  ||j¡}t  ||j¡}t  | jd |j¡}t  t  | j|j¡|¡}t  t  | j|j¡|¡}| j|  |¡ }| j	|  |¡ }	|t  
d| ¡ d|  }
|  |t  t  ||¡d| j ¡¡ |  |	t  t  t  |¡|	¡d| j ¡¡ | jr‰| j|  |¡ }|  |t  ||	¡¡ |}	|  |t  t  ||
¡t  t  
|	¡| j¡¡¡ dS )z=Update step given gradient and the associated model variable.é   N)r   ÚcastÚdtypeÚ
iterationsÚpowerr   r   r)   Ú_get_variable_indexr*   ÚsqrtÚ
assign_addÚmultiplyÚsubtractÚsquarer   r+   ÚassignÚmaximumÚ
assign_subÚdivideÚaddr   )r   ÚgradientÚvariabler   ÚlrÚ
local_stepÚbeta_1_powerÚbeta_2_powerÚmÚvÚalphaÚv_hatr   r   r"   Úupdate_stepf   s>   ÿÿÿÿþÿþzAdam.update_stepc                    s*   t ƒ  ¡ }| | j| j| j| jdœ¡ |S )N)r   r   r   r   )r   Ú
get_configÚupdater   r   r   r   )r   Úconfigr    r   r"   rH   ‹   s   
üÿzAdam.get_config)r   r   r   r	   FNNNNFr
   NNNr   )	Ú__name__Ú
__module__Ú__qualname__Ú__doc__r   r'   rG   rH   Ú__classcell__r   r   r    r"   r      s*    %ð&%r   z{{base_optimizer_keyword_args}}N)Ú	keras.srcr   Úkeras.src.api_exportr   Úkeras.src.optimizersr   Ú	Optimizerr   rN   ÚreplaceÚbase_optimizer_keyword_argsr   r   r   r"   Ú<module>   s     
ÿ