o
    ·2úh,  ã                   @   sT   d dl mZ d dlmZ d dlmZ edƒG dd„ dejƒƒZej 	dej
¡e_dS )	é    )Úops)Úkeras_export)Ú	optimizerzkeras.optimizers.SGDc                       s\   e Zd ZdZ													 d‡ fdd„	Z‡ fd	d
„Zdd„ Z‡ fdd„Z‡  ZS )ÚSGDaI  Gradient descent (with momentum) optimizer.

    Update rule for parameter `w` with gradient `g` when `momentum` is 0:

    ```python
    w = w - learning_rate * g
    ```

    Update rule when `momentum` is larger than 0:

    ```python
    velocity = momentum * velocity - learning_rate * g
    w = w + velocity
    ```

    When `nesterov=True`, this rule becomes:

    ```python
    velocity = momentum * velocity - learning_rate * g
    w = w + momentum * velocity - learning_rate * g
    ```

    Args:
        learning_rate: A float, a
            `keras.optimizers.schedules.LearningRateSchedule` instance, or
            a callable that takes no arguments and returns the actual value to
            use. The learning rate. Defaults to `0.01`.
        momentum: float hyperparameter >= 0 that accelerates gradient descent in
            the relevant direction and dampens oscillations. 0 is vanilla
            gradient descent. Defaults to `0.0`.
        nesterov: boolean. Whether to apply Nesterov momentum.
            Defaults to `False`.
        {{base_optimizer_keyword_args}}
    ç{®Gáz„?ç        FNç®Gáz®ï?c                    s\   t ƒ jd||||||||	|
||dœ|¤Ž t|tƒr"|dk s"|dkr&tdƒ‚|| _|| _d S )N)Úlearning_rateÚnameÚweight_decayÚclipnormÚ	clipvalueÚglobal_clipnormÚuse_emaÚema_momentumÚema_overwrite_frequencyÚloss_scale_factorÚgradient_accumulation_stepsr   é   z*`momentum` must be a float between [0, 1].© )ÚsuperÚ__init__Ú
isinstanceÚfloatÚ
ValueErrorÚmomentumÚnesterov)Úselfr	   r   r   r   r   r   r   r   r   r   r   r   r
   Úkwargs©Ú	__class__r   úS/var/www/html/chatgem/venv/lib/python3.10/site-packages/keras/src/optimizers/sgd.pyr   +   s&   õô
zSGD.__init__c                    s<   | j rdS tƒ  |¡ g | _| jdkr|  |d¡| _dS dS )záInitialize optimizer variables.

        SGD optimizer has one variable `momentums`, only set if `self.momentum`
        is not 0.

        Args:
          var_list: list of model variables to build SGD variables on.
        Nr   r   )Úbuiltr   ÚbuildÚ	momentumsr   Úadd_optimizer_variables)r   Ú	variablesr   r   r!   r#   O   s   	
ÿz	SGD.buildc              
   C   sÌ   t  ||j¡}t  ||j¡}d}| jdkr| j|  |¡ }|durZt  | j|j¡}|  |t  t  ||¡t  ||¡¡¡ | j	rR|  
|t  t  ||¡t  ||¡¡¡ dS |  
||¡ dS |  |t  ||¡¡ dS )z=Update step given gradient and the associated model variable.Nr   )r   ÚcastÚdtyper   r$   Ú_get_variable_indexÚassignÚsubtractÚmultiplyr   Ú
assign_addÚ
assign_sub)r   ÚgradientÚvariabler	   Úmr   r   r   r!   Úupdate_step_   s0   


þþ

þþzSGD.update_stepc                    s"   t ƒ  ¡ }| | j| jdœ¡ |S )N)r   r   )r   Ú
get_configÚupdater   r   )r   Úconfigr   r   r!   r3   }   s   
þÿzSGD.get_config)r   r   FNNNNFr   NNNr   )	Ú__name__Ú
__module__Ú__qualname__Ú__doc__r   r#   r2   r3   Ú__classcell__r   r   r   r!   r      s&    %ò$r   z{{base_optimizer_keyword_args}}N)Ú	keras.srcr   Úkeras.src.api_exportr   Úkeras.src.optimizersr   Ú	Optimizerr   r9   ÚreplaceÚbase_optimizer_keyword_argsr   r   r   r!   Ú<module>   s     
ÿ