o
    2hS"                     @   s   d dl mZ d dl mZ d dl mZ d dl mZ d dlmZ d dlmZ d dl	m
Z
 edd	gG d
d deZdd ZdddZdddZ				dddZdd Zdd Zdd ZdS )     )backend)dtype_policies)ops)tree)keras_export)KerasSaveable)	auto_namez
keras.Losszkeras.losses.Lossc                   @   sT   e Zd ZdZdddZedd Zddd	Zd
d Zdd Z	e
dd Zdd ZdS )Lossa  Loss base class.

    This is the class to subclass in order to create new custom losses.

    Args:
        reduction: Type of reduction to apply to the loss. In almost all cases
            this should be `"sum_over_batch_size"`. Supported options are
            `"sum"`, `"sum_over_batch_size"`, `"mean"`,
            `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
            `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
            sample size, and `"mean_with_sample_weight"` sums the loss and
            divides by the sum of the sample weights. `"none"` and `None`
            perform no aggregation. Defaults to `"sum_over_batch_size"`.
        name: Optional name for the loss instance.
        dtype: The dtype of the loss's computations. Defaults to `None`, which
            means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
            `"float32"` unless set to different value
            (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
            provided, then the `compute_dtype` will be utilized.

    To be implemented by subclasses:

    * `call()`: Contains the logic for loss calculation using `y_true`,
        `y_pred`.

    Example subclass implementation:

    ```python
    class MeanSquaredError(Loss):
        def call(self, y_true, y_pred):
            return ops.mean(ops.square(y_pred - y_true), axis=-1)
    ```
    Nsum_over_batch_sizec                 C   s>   |pt | jj| _t|| _t|pt	 | _
| j
j| _d S N)r   	__class____name__namestandardize_reduction	reductionr   getr   floatx_dtype_policycompute_dtype_dtype)selfr   r   dtype r   P/var/www/html/chatgem/venv/lib/python3.10/site-packages/keras/src/losses/loss.py__init__.   s   
zLoss.__init__c                 C   s   | j S r   )r   r   r   r   r   r   4   s   z
Loss.dtypec                    s   t |}t jO t fdd|}t fdd|} ||}t |}|d ur8|d ur8||@ }n|d ur?|}n	|d urF|}nd }t||| j	 j
dW  d    S 1 s]w   Y  d S )Nc                       t j|  jdS Nr   r   convert_to_tensorr   xr   r   r   <lambda>=       zLoss.__call__.<locals>.<lambda>c                    r   r   r   r!   r   r   r   r#   @   r$   )sample_weightmaskr   r   )r   get_keras_maskr   
name_scoper   r   map_structurecallreduce_weighted_valuesr   r   )r   y_truey_predr%   in_masklossesout_maskr&   r   r   r   __call__8   s2   


$zLoss.__call__c                 C   s   t r   )NotImplementedError)r   r,   r-   r   r   r   r*   W      z	Loss.callc                 C   s   | j | jdS )Nr   r   r4   r   r   r   r   
get_configZ   s   zLoss.get_configc                 C   s   | di |S )Nr   r   )clsconfigr   r   r   from_config]   s   zLoss.from_configc                 C   s   dS )Nr	   r   r   r   r   r   	_obj_typea   r3   zLoss._obj_type)Nr
   Nr   )r   
__module____qualname____doc__r   propertyr   r1   r*   r5   classmethodr8   r9   r   r   r   r   r	   
   s    
"


r	   c                 C   s(   h d}| |vrt d| d|  | S )N>   Nsummeannoner
   mean_with_sample_weightz8Invalid value for argument `reduction`. Expected one of z. Received: reduction=)
ValueError)r   allowedr   r   r   r   e   s   r   Tc                 C   s   t | j}t |j}||kr| |fS ||d kr4| jd dkr4|dkr-|r-tj|dd}ntj| dd} ||d krY|jd dkrY|dkrR|rRtj| dd} | |fS tj|dd}| |fS )zCSqueeze/expand last dim if ranks differ from expected by exactly 1.   )axis)lenshaper   expand_dimssqueeze)x1x2expand_rank_1x1_rankx2_rankr   r   r   squeeze_or_expand_to_same_rankw   s    

rQ   Nr
   c                 C   s   |d u s|dkst | jdkst | jdkr| S t| }|dv rP|dkr4|d ur4tt||j}ntttjt| dd|j}t||}t	|}|S )NrA   r   )r   )r
   r@   rB   rB   int32r   )
tuplerI   r   r?   castr   prodr    divide_no_nanscale_loss_for_distribution)valuesr%   r   lossdivisorr   r   r   reduce_values   s$   
r[   c                 C   s   t |}tj| |d} |d urtj||d}|d ur!tj||d}t||| j|d}|d ur@t|| j}t| |\} }| | } t| ||}|S )Nr   )r   r   )r   r   r    
apply_maskr   rT   rQ   r[   )rX   r%   r&   r   r   rY   r   r   r   r+      s"   
r+   c                 C   s   |durIt j||d}|dv r/t t t jt |dd|}t |}|||t   9 }| durGt j| |d} t|| \}} | |9 } | S |} | S )z2Applies any mask on predictions to sample weights.Nr   )r@   r
   rR   )	r   rT   rU   r    rI   r?   r   epsilonrQ   )r%   r&   r   r   totalvalidr   r   r   r\      s$   
r\   c                 C   sF   t   dkr!ddl}|j j}|dkr!t| td| | j} | S )zScales the given value by the number of replicas in the strategy.

    Currently, this function is only effective when using the tensorflow backend
    and `tf.distribute`.
    
tensorflowr   NrE   g      ?	r   r`   
distributeget_strategynum_replicas_in_syncr   multiplyrT   r   valuetfnum_replicasr   r   r   rW      s   rW   c                 C   sB   t   dkrddl}|j j}|dkrt| t|| j} | S )zUnscales the given value by the number of replicas in the strategy.

    Currently, this function is only effective when using the tensorflow backend
    and `tf.distribute`.
    r`   r   NrE   ra   rf   r   r   r   unscale_loss_for_distribution   s   rj   )T)Nr
   )NNr
   N)	keras.srcr   r   r   r   keras.src.api_exportr   keras.src.saving.keras_saveabler   keras.src.utils.namingr   r	   r   rQ   r[   r+   r\   rW   rj   r   r   r   r   <module>   s(    
Z


!