o
    2h                     @   sV   d dl mZ d dlmZ d dlmZ edgG dd dejZej	dej
e_dS )	    )ops)keras_export)	optimizerzkeras.optimizers.RMSpropc                       s`   e Zd ZdZ																d fd
d	Z fddZdd Z fddZ  ZS )RMSpropak  Optimizer that implements the RMSprop algorithm.

    The gist of RMSprop is to:

    - Maintain a moving (discounted) average of the square of gradients
    - Divide the gradient by the root of this average

    This implementation of RMSprop uses plain momentum, not Nesterov momentum.

    The centered version additionally maintains a moving average of the
    gradients, and uses that average to estimate the variance.

    Args:
        learning_rate: A float, a
            `keras.optimizers.schedules.LearningRateSchedule` instance, or
            a callable that takes no arguments and returns the actual value to
            use. The learning rate. Defaults to `0.001`.
        rho: float, defaults to 0.9. Discounting factor for the old gradients.
        momentum: float, defaults to 0.0. If not 0.0., the optimizer tracks the
            momentum value, with a decay rate equals to `1 - momentum`.
        epsilon: A small constant for numerical stability. This epsilon is
            "epsilon hat" in the Kingma and Ba paper (in the formula just before
            Section 2.1), not the epsilon in Algorithm 1 of the paper. Defaults
            to 1e-7.
        centered: Boolean. If `True`, gradients are normalized by the estimated
            variance of the gradient; if False, by the uncentered second moment.
            Setting this to `True` may help with training, but is slightly more
            expensive in terms of computation and memory. Defaults to `False`.
        {{base_optimizer_keyword_args}}

    Example:

    >>> opt = keras.optimizers.RMSprop(learning_rate=0.1)
    >>> var1 = keras.backend.Variable(10.0)
    >>> loss = lambda: (var1 ** 2) / 2.0  # d(loss) / d(var1) = var1
    >>> opt.minimize(loss, [var1])
    >>> var1
    9.683772

    Reference:

    - [Hinton, 2012](
        http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf)
    MbP??        Hz>FNGz?rmspropc                    sF   t  jd|||||	|
|||||d| || _|| _|| _|| _d S )N)learning_rateweight_decayclipnorm	clipvalueglobal_clipnormuse_emaema_momentumema_overwrite_frequencyloss_scale_factorgradient_accumulation_stepsname )super__init__rhomomentumepsiloncentered)selfr   r   r   r   r   r   r   r   r   r   r   r   r   r   r   kwargs	__class__r   W/var/www/html/chatgem/venv/lib/python3.10/site-packages/keras/src/optimizers/rmsprop.pyr   5   s&   
zRMSprop.__init__c                    sd   | j rd S t | | |d| _g | _| jdkr!| |d| _g | _| jr0| |d| _d S d S )Nvelocityr   r   average_gradient)	builtr   buildadd_optimizer_variables_velocities
_momentumsr   _average_gradientsr   )r   var_listr    r   r"   r&   [   s   

zRMSprop.buildc                 C   sL  t ||j}t ||j}| j| | }d}| jdkr%| j| | }d}| jr2| j| | }| j	}| 
|t t ||t d| t | | jrn| 
|t t ||t d| | |t | | j }	nt || j}	t t ||t |	}
| jdkr| 
|t t | j||
 | || dS | ||
 dS )z=Update step given gradient and the associated model variable.Nr      )r   castdtyper(   _get_variable_indexr   r)   r   r*   r   assignaddmultiplysquarer   dividesqrt
assign_sub)r   gradientvariabler   lrr#   r   average_gradr   denominator	incrementr   r   r"   update_stepm   sN   



zRMSprop.update_stepc                    s*   t   }|| j| j| j| jd |S )N)r   r   r   r   )r   
get_configupdater   r   r   r   )r   configr    r   r"   r>      s   
zRMSprop.get_config)r   r   r   r	   FNNNNFr
   NNNr   )	__name__
__module____qualname____doc__r   r&   r=   r>   __classcell__r   r   r    r"   r      s*    /&/r   z{{base_optimizer_keyword_args}}N)	keras.srcr   keras.src.api_exportr   keras.src.optimizersr   	Optimizerr   rD   replacebase_optimizer_keyword_argsr   r   r   r"   <module>   s     $
