o
    2h_"                     @   sb   d dl mZ d dl mZ d dlmZ d dlmZ edgG dd dejZej	
deje_	d	S )
    )initializers)ops)keras_export)	optimizerzkeras.optimizers.Ftrlc                       sd   e Zd ZdZ																		d fd
d	Z fddZdd Z fddZ  ZS )Ftrla
  Optimizer that implements the FTRL algorithm.

    "Follow The Regularized Leader" (FTRL) is an optimization algorithm
    developed at Google for click-through rate prediction in the early 2010s. It
    is most suitable for shallow models with large and sparse feature spaces.
    The algorithm is described by
    [McMahan et al., 2013](https://research.google.com/pubs/archive/41159.pdf).
    The Keras version has support for both online L2 regularization
    (the L2 regularization described in the paper
    above) and shrinkage-type L2 regularization
    (which is the addition of an L2 penalty to the loss function).

    Initialization:

    ```python
    n = 0
    sigma = 0
    z = 0
    ```

    Update rule for one variable `w`:

    ```python
    prev_n = n
    n = n + g ** 2
    sigma = (n ** -lr_power - prev_n ** -lr_power) / lr
    z = z + g - sigma * w
    if abs(z) < lambda_1:
      w = 0
    else:
      w = (sgn(z) * lambda_1 - z) / ((beta + sqrt(n)) / alpha + lambda_2)
    ```

    Notation:

    - `lr` is the learning rate
    - `g` is the gradient for the variable
    - `lambda_1` is the L1 regularization strength
    - `lambda_2` is the L2 regularization strength
    - `lr_power` is the power to scale n.

    Check the documentation for the `l2_shrinkage_regularization_strength`
    parameter for more details when shrinkage is enabled, in which case gradient
    is replaced with a gradient with shrinkage.

    Args:
        learning_rate: A float, a
            `keras.optimizers.schedules.LearningRateSchedule` instance, or
            a callable that takes no arguments and returns the actual value to
            use. The learning rate. Defaults to `0.001`.
        learning_rate_power: A float value, must be less or equal to zero.
            Controls how the learning rate decreases during training. Use zero
            for a fixed learning rate.
        initial_accumulator_value: The starting value for accumulators. Only
            zero or positive values are allowed.
        l1_regularization_strength: A float value, must be greater than or equal
            to zero. Defaults to `0.0`.
        l2_regularization_strength: A float value, must be greater than or equal
            to zero. Defaults to `0.0`.
        l2_shrinkage_regularization_strength: A float value, must be greater
            than or equal to zero. This differs from L2 above in that the L2
            above is a stabilization penalty, whereas this L2 shrinkage is a
            magnitude penalty. When input is sparse shrinkage will only happen
            on the active weights.
        beta: A float value, representing the beta value from the paper.
            Defaults to `0.0`.
        {{base_optimizer_keyword_args}}
    MbP?      皙?        NFGz?ftrlc                    s   t  jd	||||	|
||||||d| |dk r!td| d|dkr-td| d|dk r9td| d|dk rEtd| d|dk rQtd| d|| _|| _|| _|| _|| _|| _d S )
N)learning_ratenameweight_decayclipnorm	clipvalueglobal_clipnormuse_emaema_momentumema_overwrite_frequencyloss_scale_factorgradient_accumulation_stepsr
   z^`initial_accumulator_value` needs to be positive or zero. Received: initial_accumulator_value=.zR`learning_rate_power` needs to be negative or zero. Received: learning_rate_power=z``l1_regularization_strength` needs to be positive or zero. Received: l1_regularization_strength=z``l2_regularization_strength` needs to be positive or zero. Received: l2_regularization_strength=zt`l2_shrinkage_regularization_strength` needs to be positive or zero. Received: l2_shrinkage_regularization_strength= )	super__init__
ValueErrorlearning_rate_powerinitial_accumulator_valuel1_regularization_strengthl2_regularization_strength$l2_shrinkage_regularization_strengthbeta)selfr   r   r   r   r    r!   r"   r   r   r   r   r   r   r   r   r   r   kwargs	__class__r   T/var/www/html/chatgem/venv/lib/python3.10/site-packages/keras/src/optimizers/ftrl.pyr   N   sh   
zFtrl.__init__c                    sF   | j rdS t | t| j}| j|ddg|dgd\| _| _dS )zInitialize optimizer variables.

        Args:
            var_list: list of model variables to build Ftrl variables on.
        Naccumulatorlinearzeros)initializer)	builtr   buildr   Constantr   add_optimizer_variables_accumulators_linears)r#   var_listaccumulator_initializerr%   r   r'   r-      s   z
Ftrl.buildc                 C   s(  t ||j}t ||j}| j| | }| j| | }| j}| j}|| jd|   }t 	|t 
d| j |}	t 	|t |}
| |t |	t 
t t t |
| t || || t 	t t |
| |d| }t || j | j}| |t t ||| | ||
 dS )z=Update step given gradient and the associated model variable.g       @   N)r   castdtyper0   _get_variable_indexr1   r   r    r"   addmultiplyr!   square
assign_addsubtractdividepowerclipr   assign)r#   gradientvariabler   lraccumr)   lr_powerl2_reggrad_to_use	new_accum	quadraticlinear_clippedr   r   r'   update_step   sV   
zFtrl.update_stepc              	      s2   t   }|| j| j| j| j| j| jd |S )N)r   r   r   r    r!   r"   )	r   
get_configupdater   r   r   r    r!   r"   )r#   configr%   r   r'   rL      s   

zFtrl.get_config)r   r   r	   r
   r
   r
   r
   NNNNFr   NNNr   )	__name__
__module____qualname____doc__r   r-   rK   rL   __classcell__r   r   r%   r'   r      s.    GK2r   z{{base_optimizer_keyword_args}}N)	keras.srcr   r   keras.src.api_exportr   keras.src.optimizersr   	Optimizerr   rR   replacebase_optimizer_keyword_argsr   r   r   r'   <module>   s     f
