o
    2hP                     @   sz   d Z ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlm	Z	 ddl
mZ ed	gd
G dd de	jZdS )zGradientDescent for TensorFlow.    )indexed_slices)ops)gen_training_ops)math_ops)resource_variable_ops)	optimizer)	tf_exportztrain.GradientDescentOptimizer)v1c                       sJ   e Zd ZdZd fdd	Zdd Zdd	 Zd
d Zdd Zdd Z	  Z
S )GradientDescentOptimizerz<Optimizer that implements the gradient descent algorithm.
  FGradientDescentc                    s"   t t| || || _d| _dS )as  Construct a new gradient descent optimizer.

    Args:
      learning_rate: A Tensor or a floating point value.  The learning
        rate to use.
      use_locking: If True use locks for update operations.
      name: Optional name prefix for the operations created when applying
        gradients. Defaults to "GradientDescent".

    @compatibility(eager)
    When eager execution is enabled, `learning_rate` can be a callable that
    takes no arguments and returns the actual value to use. This can be useful
    for changing these values across different invocations of optimizer
    functions.
    @end_compatibility
    N)superr
   __init___learning_rate_learning_rate_tensor)selflearning_rateuse_lockingname	__class__ f/var/www/html/chatgem/venv/lib/python3.10/site-packages/tensorflow/python/training/gradient_descent.pyr      s   
z!GradientDescentOptimizer.__init__c                 C   s$   t j|t| j|jj|| jdjS N)r   )	r   apply_gradient_descentr   castr   dtype
base_dtype_use_lockingop)r   gradvarr   r   r   _apply_dense4   s   z%GradientDescentOptimizer._apply_densec                 C   s$   t j|jt| j|jj|| jdS r   )	r   resource_apply_gradient_descenthandler   r   r   r   r   r   )r   r   r#   r   r   r   _resource_apply_dense;   s   z.GradientDescentOptimizer._resource_apply_densec              	   C   s$   t |j|| t| j|jj S )N)r   resource_scatter_addr#   r   r   r   r   r   )r   r   r#   indicesr   r   r   (_resource_apply_sparse_duplicate_indicesA   s   zAGradientDescentOptimizer._resource_apply_sparse_duplicate_indicesc                 C   s6   t |jt| j|jj |j|j	}|j
|| jdS r   )r   IndexedSlicesvaluesr   r   r   r   r   r&   dense_shapescatter_subr   )r   r   r    deltar   r   r   _apply_sparse_duplicate_indicesH   s   z8GradientDescentOptimizer._apply_sparse_duplicate_indicesc                 C   s    |  | j}tj|dd| _d S )Nr   )r   )_call_if_callabler   r   convert_to_tensorr   )r   r   r   r   r   _prepareO   s   z!GradientDescentOptimizer._prepare)Fr   )__name__
__module____qualname____doc__r   r!   r$   r'   r-   r0   __classcell__r   r   r   r   r
      s    r
   N)r4   tensorflow.python.frameworkr   r   tensorflow.python.opsr   r   r   tensorflow.python.trainingr    tensorflow.python.util.tf_exportr   	Optimizerr
   r   r   r   r   <module>   s   
