o
    2h,M                     @   sj  d dl Z d dlmZ d dlmZ d dlmZ d dlmZ d dlm	Z	 d dlm
Z
 d dlmZ d d	lmZ d d
lmZ d dlmZ d dlmZ edG dd dejZedG dd dejZedG dd dejZedG dd dejZedG dd dejZedG dd dejZedG d d! d!ejZed"G d#d$ d$ejZd(d&d'ZdS ))    N)initializers)ops)keras_export)squeeze_or_expand_to_same_rank)log_cosh)mean_absolute_error)mean_absolute_percentage_error)mean_squared_error)mean_squared_logarithmic_error)reduction_metrics)	normalizezkeras.metrics.MeanSquaredErrorc                       *   e Zd ZdZd fdd	Zdd Z  ZS )	MeanSquaredErrora  Computes the mean squared error between `y_true` and `y_pred`.

    Formula:

    ```python
    loss = mean(square(y_true - y_pred))
    ```

    Args:
        name: (Optional) string name of the metric instance.
        dtype: (Optional) data type of the metric result.

    Example:
    >>> m = keras.metrics.MeanSquaredError()
    >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
    >>> m.result()
    0.25
    r	   Nc                       t  jt||d d| _d S )N)fnnamedtypedown)super__init__r	   
_directionselfr   r   	__class__ _/var/www/html/chatgem/venv/lib/python3.10/site-packages/keras/src/metrics/regression_metrics.pyr   %      
zMeanSquaredError.__init__c                 C      | j | jdS Nr   r   r    r   r   r   r   
get_config*      zMeanSquaredError.get_config)r	   N__name__
__module____qualname____doc__r   r"   __classcell__r   r   r   r   r      s    r   zkeras.metrics.MeanAbsoluteErrorc                       r   )	MeanAbsoluteErrora  Computes the mean absolute error between the labels and predictions.

    Formula:

    ```python
    loss = mean(abs(y_true - y_pred))
    ```

    Args:
        name: (Optional) string name of the metric instance.
        dtype: (Optional) data type of the metric result.

    Examples:

    >>> m = keras.metrics.MeanAbsoluteError()
    >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
    >>> m.result()
    0.25

    >>> m.reset_state()
    >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
    ...                sample_weight=[1, 0])
    >>> m.result()
    0.5

    Usage with `compile()` API:

    ```python
    model.compile(
        optimizer='sgd',
        loss='mse',
        metrics=[keras.metrics.MeanAbsoluteError()])
    ```
    r   Nc                    r   Nr   r   )r   r   r   r   r   r   r   r   r   S   r   zMeanAbsoluteError.__init__c                 C   r   r   r    r!   r   r   r   r"   X   r#   zMeanAbsoluteError.get_config)r   Nr$   r   r   r   r   r*   .       #r*   z)keras.metrics.MeanAbsolutePercentageErrorc                       r   )	MeanAbsolutePercentageErrora)  Computes mean absolute percentage error between `y_true` and `y_pred`.

    Formula:

    ```python
    loss = 100 * mean(abs((y_true - y_pred) / y_true))
    ```

    Args:
        name: (Optional) string name of the metric instance.
        dtype: (Optional) data type of the metric result.

    Examples:
    >>> m = keras.metrics.MeanAbsolutePercentageError()
    >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
    >>> m.result()
    250000000.0

    >>> m.reset_state()
    >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
    ...                sample_weight=[1, 0])
    >>> m.result()
    500000000.0

    Usage with `compile()` API:

    ```python
    model.compile(
        optimizer='sgd',
        loss='mse',
        metrics=[keras.metrics.MeanAbsolutePercentageError()])
    ```
    r   Nc                    r   r+   )r   r   r   r   r   r   r   r   r      r   z$MeanAbsolutePercentageError.__init__c                 C   r   r   r    r!   r   r   r   r"      r#   z&MeanAbsolutePercentageError.get_config)r   Nr$   r   r   r   r   r.   \   s    "r.   z)keras.metrics.MeanSquaredLogarithmicErrorc                       r   )	MeanSquaredLogarithmicErrora,  Computes mean squared logarithmic error between `y_true` and `y_pred`.

    Formula:

    ```python
    loss = mean(square(log(y_true + 1) - log(y_pred + 1)))
    ```

    Args:
        name: (Optional) string name of the metric instance.
        dtype: (Optional) data type of the metric result.

    Examples:

    >>> m = keras.metrics.MeanSquaredLogarithmicError()
    >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
    >>> m.result()
    0.12011322

    >>> m.reset_state()
    >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
    ...                sample_weight=[1, 0])
    >>> m.result()
    0.24022643

    Usage with `compile()` API:

    ```python
    model.compile(
        optimizer='sgd',
        loss='mse',
        metrics=[keras.metrics.MeanSquaredLogarithmicError()])
    ```
    r
   Nc                    r   r+   )r   r   r
   r   r   r   r   r   r      r   z$MeanSquaredLogarithmicError.__init__c                 C   r   r   r    r!   r   r   r   r"      r#   z&MeanSquaredLogarithmicError.get_config)r
   Nr$   r   r   r   r   r/      r-   r/   z"keras.metrics.RootMeanSquaredErrorc                       s<   e Zd ZdZd
 fdd	Zd fdd	Z fdd	Z  ZS )RootMeanSquaredErrora
  Computes root mean squared error metric between `y_true` and `y_pred`.

    Formula:

    ```python
    loss = sqrt(mean((y_pred - y_true) ** 2))
    ```

    Args:
        name: (Optional) string name of the metric instance.
        dtype: (Optional) data type of the metric result.

    Examples:

    >>> m = keras.metrics.RootMeanSquaredError()
    >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
    >>> m.result()
    0.5

    >>> m.reset_state()
    >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
    ...                sample_weight=[1, 0])
    >>> m.result()
    0.70710677

    Usage with `compile()` API:

    ```python
    model.compile(
        optimizer='sgd',
        loss='mse',
        metrics=[keras.metrics.RootMeanSquaredError()])
    ```
    root_mean_squared_errorNc                    s   t  j||d d| _d S r+   )r   r   r   r   r   r   r   r      s   
zRootMeanSquaredError.__init__c                    sH   t || j}t || j}t||\}}t || }t j||dS )  Accumulates root mean squared error statistics.

        Args:
            y_true: The ground truth values.
            y_pred: The predicted values.
            sample_weight: Optional weighting of each example. Can
                be a `Tensor` whose rank is either 0, or the same rank as
                `y_true`, and must be broadcastable to `y_true`.
                Defaults to `1`.

        Returns:
            Update op.
        )sample_weight)r   convert_to_tensor_dtyper   squarer   update_state)r   y_truey_predr3   error_sqr   r   r   r7      s
   z!RootMeanSquaredError.update_statec                    s   t t  S N)r   sqrtr   resultr!   r   r   r   r=      s   zRootMeanSquaredError.result)r1   Nr;   )r%   r&   r'   r(   r   r7   r=   r)   r   r   r   r   r0      s
    #r0   zkeras.metrics.CosineSimilarityc                       s*   e Zd ZdZd	 fdd	Zdd Z  ZS )
CosineSimilaritya  Computes the cosine similarity between the labels and predictions.

    Formula:

    ```python
    loss = sum(l2_norm(y_true) * l2_norm(y_pred))
    ```
    See: [Cosine Similarity](https://en.wikipedia.org/wiki/Cosine_similarity).
    This metric keeps the average cosine similarity between `predictions` and
    `labels` over a stream of data.

    Args:
        name: (Optional) string name of the metric instance.
        dtype: (Optional) data type of the metric result.
        axis: (Optional) Defaults to `-1`. The dimension along which the cosine
            similarity is computed.

    Examples:

    >>> # l2_norm(y_true) = [[0., 1.], [1./1.414, 1./1.414]]
    >>> # l2_norm(y_pred) = [[1., 0.], [1./1.414, 1./1.414]]
    >>> # l2_norm(y_true) . l2_norm(y_pred) = [[0., 0.], [0.5, 0.5]]
    >>> # result = mean(sum(l2_norm(y_true) . l2_norm(y_pred), axis=1))
    >>> #        = ((0. + 0.) +  (0.5 + 0.5)) / 2
    >>> m = keras.metrics.CosineSimilarity(axis=1)
    >>> m.update_state([[0., 1.], [1., 1.]], [[1., 0.], [1., 1.]])
    >>> m.result()
    0.49999997

    >>> m.reset_state()
    >>> m.update_state([[0., 1.], [1., 1.]], [[1., 0.], [1., 1.]],
    ...                sample_weight=[0.3, 0.7])
    >>> m.result()
    0.6999999

    Usage with `compile()` API:

    ```python
    model.compile(
        optimizer='sgd',
        loss='mse',
        metrics=[keras.metrics.CosineSimilarity(axis=1)])
    ```
    cosine_similarityNc                    s   t  jt|||d d| _d S )N)r   axisup)r   r   r?   r   )r   r   r   rA   r   r   r   r   (  s   
zCosineSimilarity.__init__c                 C   r   r   r    r!   r   r   r   r"   -  r#   zCosineSimilarity.get_config)r?   Nr@   r$   r   r   r   r   r>      s    -r>   zkeras.metrics.LogCoshErrorc                       r   )	LogCoshErrora;  Computes the logarithm of the hyperbolic cosine of the prediction error.

    Formula:

    ```python
    error = y_pred - y_true
    logcosh = mean(log((exp(error) + exp(-error))/2), axis=-1)
    ```

    Args:
        name: (Optional) string name of the metric instance.
        dtype: (Optional) data type of the metric result.

    Examples:

    >>> m = keras.metrics.LogCoshError()
    >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
    >>> m.result()
    0.10844523

    >>> m.reset_state()
    >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
    ...                sample_weight=[1, 0])
    >>> m.result()
    0.21689045

    Usage with `compile()` API:

    ```python
    model.compile(optimizer='sgd',
                  loss='mse',
                  metrics=[keras.metrics.LogCoshError()])
    ```
    logcoshNc                    r   r+   )r   r   r   r   r   r   r   r   r   V  r   zLogCoshError.__init__c                 C   r   r   r    r!   r   r   r   r"   [  r#   zLogCoshError.get_config)rD   Nr$   r   r   r   r   rC   1  r-   rC   zkeras.metrics.R2Scorec                       sX   e Zd ZdZ				d fdd	Zdd	 Zdd
dZdd Zdd Z fddZ	  Z
S )R2Scorea  Computes R2 score.

    Formula:

    ```python
    sum_squares_residuals = sum((y_true - y_pred) ** 2)
    sum_squares = sum((y_true - mean(y_true)) ** 2)
    R2 = 1 - sum_squares_residuals / sum_squares
    ```

    This is also called the
    [coefficient of determination](
    https://en.wikipedia.org/wiki/Coefficient_of_determination).

    It indicates how close the fitted regression line
    is to ground-truth data.

    - The highest score possible is 1.0. It indicates that the predictors
        perfectly accounts for variation in the target.
    - A score of 0.0 indicates that the predictors do not
        account for variation in the target.
    - It can also be negative if the model is worse than random.

    This metric can also compute the "Adjusted R2" score.

    Args:
        class_aggregation: Specifies how to aggregate scores corresponding to
            different output classes (or target dimensions),
            i.e. different dimensions on the last axis of the predictions.
            Equivalent to `multioutput` argument in Scikit-Learn.
            Should be one of
            `None` (no aggregation), `"uniform_average"`,
            `"variance_weighted_average"`.
        num_regressors: Number of independent regressors used
            ("Adjusted R2" score). 0 is the standard R2 score.
            Defaults to `0`.
        name: Optional. string name of the metric instance.
        dtype: Optional. data type of the metric result.

    Example:

    >>> y_true = np.array([[1], [4], [3]], dtype=np.float32)
    >>> y_pred = np.array([[2], [4], [4]], dtype=np.float32)
    >>> metric = keras.metrics.R2Score()
    >>> metric.update_state(y_true, y_pred)
    >>> result = metric.result()
    >>> result
    0.57142854
    uniform_averager   r2_scoreNc                    sx   t  j||d d| _d}||vrtd| d| |dk r&td| || _|| _| jdt d	d
| _	d| _
d S )Nr    rB   )NrF   variance_weighted_averagez@Invalid value for argument `class_aggregation`. Expected one of z. Received: class_aggregation=r   z]Invalid value for argument `num_regressors`. Expected a value >= 0. Received: num_regressors=r   num_samples)shapeinitializerr   F)r   r   r   
ValueErrorclass_aggregationnum_regressorsadd_variabler   ZerosrI   _built)r   rM   rN   r   r   valid_class_aggregation_valuesr   r   r   r     s2   
zR2Score.__init__c                 C   s   t |dkst |dkrtd| d| d|d d u s#|d d u r.td| d| d|d }| jd|gt d| _| jd	|gt d| _| jd
|gt d| _| jd|gt d| _d| _	d S )N   zcR2Score expects 2D inputs with shape (batch_size, output_dim). Received input shapes: y_pred.shape=z and y_true.shape=.r@   zR2Score expects 2D inputs with shape (batch_size, output_dim), with output_dim fully defined (not None). Received input shapes: y_pred.shape=squared_sum)r   rJ   rK   sumresidualcountT)
lenrL   rO   r   rP   rU   rV   	total_mserX   rQ   )r   y_true_shapey_pred_shapenum_classesr   r   r   _build  sL   
zR2Score._buildc              	   C   sF  t j|| jd}t j|| jd}t||\}}| js"| |j|j |du r(d}t j|| jd}t|jdkr>t j	|dd}t 
|t |}|t ||j }| j| jt j|dd  | j| jt j|| dd  | j| jt j|| d t ||j dd  | j| jt j|dd  | j| jt |  dS )r2   r,   N   rA   r   rS   )r   r4   r5   r   rQ   r^   rJ   r   rY   expand_dimsbroadcast_tocastrV   assignrU   rZ   rX   rI   size)r   r8   r9   r3   weighted_y_truer   r   r   r7     s4   zR2Score.update_statec                 C   s:  | j | j }| j| j |  }d| j|  }tt|d|}| jdkr*t|}n| jdkr@t || }t |}|| }n|}| j	dkr| j	| j
d krXtjddd |S | j	| j
d kritjd	dd |S tj| j
d
d}tj| j	d
d}ttd|t|d}	tt||d}
tdt|	|
}|S )Nr_   g        rF   rH   r   zdMore independent predictors than datapoints in adjusted R2 score. Falling back to standard R2 score.rS   )
stacklevelzIDivision by zero in Adjusted R2 score. Falling back to standard R2 score.float32r,   g      ?)rV   rX   rU   rZ   r   whereisinfrM   meanrN   rI   warningswarnr4   multiplysubtractdivide)r   rk   total
raw_scoresrG   weighted_sumsum_of_weightsnpnumdenr   r   r   r=     s@   




zR2Score.resultc                 C   s(   | j D ]}|tj|j|jd qd S )Nr,   )	variablesrd   r   zerosrJ   r   )r   vr   r   r   reset_state4  s   
zR2Score.reset_statec                    s,   | j | j| j| jd}t  }i ||S )N)r   r   rM   rN   )r   r   rM   rN   r   r"   )r   configbase_configr   r   r   r"   8  s   
zR2Score.get_config)rF   r   rG   Nr;   )r%   r&   r'   r(   r   r^   r7   r=   r|   r"   r)   r   r   r   r   rE   `  s    4%
'.&rE   r@   c                 C   sR   t |}t j| |jd} t| |\} }t||d}t| |d} t j| | |dS )aN  Computes the cosine similarity between labels and predictions.

    Formula:

    ```python
    loss = sum(l2_norm(y_true) * l2_norm(y_pred))
    ```

    Args:
        y_true: Tensor of true targets.
        y_pred: Tensor of predicted targets.
        axis: Axis along which to determine similarity. Defaults to `-1`.

    Returns:
        Cosine similarity tensor.

    Example:

    >>> y_true = [[0., 1.], [1., 1.], [1., 1.]]
    >>> y_pred = [[1., 0.], [1., 1.], [-1., -1.]]
    >>> loss = keras.losses.cosine_similarity(y_true, y_pred, axis=-1)
    [0., 0.99999994, -0.99999994]
    r,   r`   )r   r4   r   r   r   rV   )r8   r9   rA   r   r   r   r?   C  s   
r?   )r@   )rl   	keras.srcr   r   keras.src.api_exportr   keras.src.losses.lossr   keras.src.losses.lossesr   r   r   r	   r
   keras.src.metricsr   keras.src.utils.numerical_utilsr   MeanMetricWrapperr   r*   r.   r/   Meanr0   r>   rC   MetricrE   r?   r   r   r   r   <module>   s<    -,-A7. c