o
    2h                     @   s  d dl Zd dlmZ d dlmZ d dlmZ d dlmZ d dlmZ d dl	m
Z
 d dlmZ d d	lmZ G d
d deZedG dd deZedG dd deZedG dd deZedG dd deZedG dd deZedG dd deZG dd deZed G d!d" d"eZed#G d$d% d%eZed&G d'd( d(eZed)G d*d+ d+eZed,G d-d. d.eZdS )/    N)activations)backend)initializers)ops)keras_export)metrics_utils)Metric)to_listc                       sB   e Zd ZdZ	d fdd	ZdddZdd Z fd	d
Z  ZS )_ConfusionMatrixConditionCounta  Calculates the number of the given confusion matrix condition.

    Args:
        confusion_matrix_cond: One of `metrics_utils.ConfusionMatrix`
            conditions.
        thresholds: (Optional) Defaults to `0.5`. A float value or a python list
            / tuple of float threshold values in `[0, 1]`. A threshold is
            compared with prediction values to determine the truth value of
            predictions (i.e., above the threshold is `True`, below is `False`).
            One metric value is generated for each threshold value.
        name: (Optional) string name of the metric instance.
        dtype: (Optional) data type of the metric result.
    Nc                    s\   t  j||d || _|| _tj|dd| _t| j| _| j	t
| jft dd| _d S )Nnamedtype      ?default_thresholdaccumulatorshapeinitializerr   )super__init___confusion_matrix_condinit_thresholdsr   parse_init_thresholds
thresholds is_evenly_distributed_thresholds_thresholds_distributed_evenlyadd_variablelenr   Zerosr   )selfconfusion_matrix_condr   r   r   	__class__ ^/var/www/html/chatgem/venv/lib/python3.10/site-packages/keras/src/metrics/confusion_metrics.pyr      s   

z'_ConfusionMatrixConditionCount.__init__c                 C   s"   t j| j| ji||| j| j|dS )an  Accumulates the metric statistics.

        Args:
            y_true: The ground truth values.
            y_pred: The predicted values.
            sample_weight: Optional weighting of each example. Defaults to `1`.
                Can be a tensor whose rank is either 0, or the same rank as
                `y_true`, and must be broadcastable to `y_true`.
        )r   thresholds_distributed_evenlysample_weight)r   !update_confusion_matrix_variablesr   r   r   r   r    y_truey_predr'   r$   r$   r%   update_state.   s   

z+_ConfusionMatrixConditionCount.update_statec                 C   s*   t | jdkr| jd }n| j}t|S N   r   )r   r   r   r   convert_to_tensorr    resultr$   r$   r%   r1   A   s   
z%_ConfusionMatrixConditionCount.resultc                        d| j i}t  }i ||S )Nr   )r   r   
get_configr    configbase_configr"   r$   r%   r3   H      

z)_ConfusionMatrixConditionCount.get_configNNNN)	__name__
__module____qualname____doc__r   r,   r1   r3   __classcell__r$   r$   r"   r%   r
      s    
r
   zkeras.metrics.FalsePositivesc                       "   e Zd ZdZd fdd	Z  ZS )FalsePositivesa  Calculates the number of false positives.

    If `sample_weight` is given, calculates the sum of the weights of
    false positives. This metric creates one local variable, `accumulator`
    that is used to keep track of the number of false positives.

    If `sample_weight` is `None`, weights default to 1.
    Use `sample_weight` of 0 to mask values.

    Args:
        thresholds: (Optional) Defaults to `0.5`. A float value, or a Python
            list/tuple of float threshold values in `[0, 1]`. A threshold is
            compared with prediction values to determine the truth value of
            predictions (i.e., above the threshold is `True`, below is `False`).
            If used with a loss function that sets `from_logits=True` (i.e. no
            sigmoid applied to predictions), `thresholds` should be set to 0.
            One metric value is generated for each threshold value.
        name: (Optional) string name of the metric instance.
        dtype: (Optional) data type of the metric result.

    Examples:

    >>> m = keras.metrics.FalsePositives()
    >>> m.update_state([0, 1, 0, 0], [0, 0, 1, 1])
    >>> m.result()
    2.0

    >>> m.reset_state()
    >>> m.update_state([0, 1, 0, 0], [0, 0, 1, 1], sample_weight=[0, 0, 1, 0])
    >>> m.result()
    1.0
    Nc                       t  jtjj|||d d S N)r!   r   r   r   )r   r   r   ConfusionMatrixFALSE_POSITIVESr    r   r   r   r"   r$   r%   r   q      
zFalsePositives.__init__r8   r:   r;   r<   r=   r   r>   r$   r$   r"   r%   r@   N       !r@   zkeras.metrics.FalseNegativesc                       r?   )FalseNegativesa  Calculates the number of false negatives.

    If `sample_weight` is given, calculates the sum of the weights of
    false negatives. This metric creates one local variable, `accumulator`
    that is used to keep track of the number of false negatives.

    If `sample_weight` is `None`, weights default to 1.
    Use `sample_weight` of 0 to mask values.

    Args:
        thresholds: (Optional) Defaults to `0.5`. A float value, or a Python
            list/tuple of float threshold values in `[0, 1]`. A threshold is
            compared with prediction values to determine the truth value of
            predictions (i.e., above the threshold is `True`, below is `False`).
            If used with a loss function that sets `from_logits=True` (i.e. no
            sigmoid applied to predictions), `thresholds` should be set to 0.
            One metric value is generated for each threshold value.
        name: (Optional) string name of the metric instance.
        dtype: (Optional) data type of the metric result.

    Example:

    >>> m = keras.metrics.FalseNegatives()
    >>> m.update_state([0, 1, 1, 1], [0, 1, 0, 0])
    >>> m.result()
    2.0

    >>> m.reset_state()
    >>> m.update_state([0, 1, 1, 1], [0, 1, 0, 0], sample_weight=[0, 0, 1, 0])
    >>> m.result()
    1.0
    Nc                    rA   rB   )r   r   r   rC   FALSE_NEGATIVESrE   r"   r$   r%   r      rF   zFalseNegatives.__init__r8   rG   r$   r$   r"   r%   rI   z   rH   rI   zkeras.metrics.TrueNegativesc                       r?   )TrueNegativesa  Calculates the number of true negatives.

    If `sample_weight` is given, calculates the sum of the weights of
    true negatives. This metric creates one local variable, `accumulator`
    that is used to keep track of the number of true negatives.

    If `sample_weight` is `None`, weights default to 1.
    Use `sample_weight` of 0 to mask values.

    Args:
        thresholds: (Optional) Defaults to `0.5`. A float value, or a Python
            list/tuple of float threshold values in `[0, 1]`. A threshold is
            compared with prediction values to determine the truth value of
            predictions (i.e., above the threshold is `True`, below is `False`).
            If used with a loss function that sets `from_logits=True` (i.e. no
            sigmoid applied to predictions), `thresholds` should be set to 0.
            One metric value is generated for each threshold value.
        name: (Optional) string name of the metric instance.
        dtype: (Optional) data type of the metric result.

    Example:

    >>> m = keras.metrics.TrueNegatives()
    >>> m.update_state([0, 1, 0, 0], [1, 1, 0, 0])
    >>> m.result()
    2.0

    >>> m.reset_state()
    >>> m.update_state([0, 1, 0, 0], [1, 1, 0, 0], sample_weight=[0, 0, 1, 0])
    >>> m.result()
    1.0
    Nc                    rA   rB   )r   r   r   rC   TRUE_NEGATIVESrE   r"   r$   r%   r      rF   zTrueNegatives.__init__r8   rG   r$   r$   r"   r%   rK      rH   rK   zkeras.metrics.TruePositivesc                       r?   )TruePositivesa  Calculates the number of true positives.

    If `sample_weight` is given, calculates the sum of the weights of
    true positives. This metric creates one local variable, `true_positives`
    that is used to keep track of the number of true positives.

    If `sample_weight` is `None`, weights default to 1.
    Use `sample_weight` of 0 to mask values.

    Args:
        thresholds: (Optional) Defaults to `0.5`. A float value, or a Python
            list/tuple of float threshold values in `[0, 1]`. A threshold is
            compared with prediction values to determine the truth value of
            predictions (i.e., above the threshold is `True`, below is `False`).
            If used with a loss function that sets `from_logits=True` (i.e. no
            sigmoid applied to predictions), `thresholds` should be set to 0.
            One metric value is generated for each threshold value.
        name: (Optional) string name of the metric instance.
        dtype: (Optional) data type of the metric result.

    Example:

    >>> m = keras.metrics.TruePositives()
    >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1])
    >>> m.result()
    2.0

    >>> m.reset_state()
    >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0])
    >>> m.result()
    1.0
    Nc                    rA   rB   )r   r   r   rC   TRUE_POSITIVESrE   r"   r$   r%   r      rF   zTruePositives.__init__r8   rG   r$   r$   r"   r%   rM      rH   rM   zkeras.metrics.Precisionc                       J   e Zd ZdZ	d fdd	ZdddZdd Zd	d
 Z fddZ  Z	S )	Precisiona  Computes the precision of the predictions with respect to the labels.

    The metric creates two local variables, `true_positives` and
    `false_positives` that are used to compute the precision. This value is
    ultimately returned as `precision`, an idempotent operation that simply
    divides `true_positives` by the sum of `true_positives` and
    `false_positives`.

    If `sample_weight` is `None`, weights default to 1.
    Use `sample_weight` of 0 to mask values.

    If `top_k` is set, we'll calculate precision as how often on average a class
    among the top-k classes with the highest predicted values of a batch entry
    is correct and can be found in the label for that entry.

    If `class_id` is specified, we calculate precision by considering only the
    entries in the batch for which `class_id` is above the threshold and/or in
    the top-k highest predictions, and computing the fraction of them for which
    `class_id` is indeed a correct label.

    Args:
        thresholds: (Optional) A float value, or a Python list/tuple of float
            threshold values in `[0, 1]`. A threshold is compared with
            prediction values to determine the truth value of predictions (i.e.,
            above the threshold is `True`, below is `False`). If used with a
            loss function that sets `from_logits=True` (i.e. no sigmoid applied
            to predictions), `thresholds` should be set to 0. One metric value
            is generated for each threshold value. If neither `thresholds` nor
            `top_k` are set, the default is to calculate precision with
            `thresholds=0.5`.
        top_k: (Optional) Unset by default. An int value specifying the top-k
            predictions to consider when calculating precision.
        class_id: (Optional) Integer class ID for which we want binary metrics.
            This must be in the half-open interval `[0, num_classes)`, where
            `num_classes` is the last dimension of predictions.
        name: (Optional) string name of the metric instance.
        dtype: (Optional) data type of the metric result.

    Example:

    >>> m = keras.metrics.Precision()
    >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1])
    >>> m.result()
    0.6666667

    >>> m.reset_state()
    >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0])
    >>> m.result()
    1.0

    >>> # With top_k=2, it will calculate precision over y_true[:2]
    >>> # and y_pred[:2]
    >>> m = keras.metrics.Precision(top_k=2)
    >>> m.update_state([0, 0, 1, 1], [1, 1, 1, 1])
    >>> m.result()
    0.0

    >>> # With top_k=4, it will calculate precision over y_true[:4]
    >>> # and y_pred[:4]
    >>> m = keras.metrics.Precision(top_k=4)
    >>> m.update_state([0, 0, 1, 1], [1, 1, 1, 1])
    >>> m.result()
    0.5

    Usage with `compile()` API:

    ```python
    model.compile(optimizer='sgd',
                  loss='binary_crossentropy',
                  metrics=[keras.metrics.Precision()])
    ```

    Usage with a loss with `from_logits=True`:

    ```python
    model.compile(optimizer='adam',
                  loss=keras.losses.BinaryCrossentropy(from_logits=True),
                  metrics=[keras.metrics.Precision(thresholds=0)])
    ```
    Nc                       t  j||d d| _|| _|| _|| _|d u rdntj}tj||d| _	t
| j	| _| jt| j	ft dd| _| jt| j	ft dd| _d S )Nr   upr   r   true_positivesr   false_positives)r   r   
_directionr   top_kclass_idr   NEG_INFr   r   r   r   r   r   r   r   rS   rT   r    r   rV   rW   r   r   r   r"   r$   r%   r   Q  *   


zPrecision.__init__c              
   C   :   t jt jj| jt jj| ji||| j| j| j	| j
|d dS )a  Accumulates true positive and false positive statistics.

        Args:
            y_true: The ground truth values, with the same dimensions as
                `y_pred`. Will be cast to `bool`.
            y_pred: The predicted values. Each element must be in the range
                `[0, 1]`.
            sample_weight: Optional weighting of each example. Defaults to `1`.
                Can be a tensor whose rank is either 0, or the same rank as
                `y_true`, and must be broadcastable to `y_true`.
        r   r&   rV   rW   r'   N)r   r(   rC   rN   rS   rD   rT   r   r   rV   rW   r)   r$   r$   r%   r,   n     


zPrecision.update_statec                 C   4   t | jt | j| j}t| jdkr|d S |S r-   )r   divide_no_nanrS   addrT   r   r   r0   r$   r$   r%   r1     
   zPrecision.resultc                 C   :   t t| j}| jt|f | jt|f d S r9   )r   r	   r   rS   assignr   zerosrT   r    num_thresholdsr$   r$   r%   reset_state     zPrecision.reset_statec                    (   | j | j| jd}t  }i ||S N)r   rV   rW   r   rV   rW   r   r3   r4   r"   r$   r%   r3        
zPrecision.get_configNNNNNr9   
r:   r;   r<   r=   r   r,   r1   rg   r3   r>   r$   r$   r"   r%   rP      s    R
rP   zkeras.metrics.Recallc                       rO   )Recalla
  Computes the recall of the predictions with respect to the labels.

    This metric creates two local variables, `true_positives` and
    `false_negatives`, that are used to compute the recall. This value is
    ultimately returned as `recall`, an idempotent operation that simply divides
    `true_positives` by the sum of `true_positives` and `false_negatives`.

    If `sample_weight` is `None`, weights default to 1.
    Use `sample_weight` of 0 to mask values.

    If `top_k` is set, recall will be computed as how often on average a class
    among the labels of a batch entry is in the top-k predictions.

    If `class_id` is specified, we calculate recall by considering only the
    entries in the batch for which `class_id` is in the label, and computing the
    fraction of them for which `class_id` is above the threshold and/or in the
    top-k predictions.

    Args:
        thresholds: (Optional) A float value, or a Python list/tuple of float
            threshold values in `[0, 1]`. A threshold is compared with
            prediction values to determine the truth value of predictions (i.e.,
            above the threshold is `True`, below is `False`). If used with a
            loss function that sets `from_logits=True` (i.e. no sigmoid
            applied to predictions), `thresholds` should be set to 0.
            One metric value is generated for each threshold value.
            If neither `thresholds` nor `top_k` are set,
            the default is to calculate recall with `thresholds=0.5`.
        top_k: (Optional) Unset by default. An int value specifying the top-k
            predictions to consider when calculating recall.
        class_id: (Optional) Integer class ID for which we want binary metrics.
            This must be in the half-open interval `[0, num_classes)`, where
            `num_classes` is the last dimension of predictions.
        name: (Optional) string name of the metric instance.
        dtype: (Optional) data type of the metric result.

    Example:

    >>> m = keras.metrics.Recall()
    >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1])
    >>> m.result()
    0.6666667

    >>> m.reset_state()
    >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0])
    >>> m.result()
    1.0

    Usage with `compile()` API:

    ```python
    model.compile(optimizer='sgd',
                  loss='binary_crossentropy',
                  metrics=[keras.metrics.Recall()])
    ```

    Usage with a loss with `from_logits=True`:

    ```python
    model.compile(optimizer='adam',
                  loss=keras.losses.BinaryCrossentropy(from_logits=True),
                  metrics=[keras.metrics.Recall(thresholds=0)])
    ```
    Nc                    rQ   )Nr   rR   r   r   rS   r   false_negatives)r   r   rU   r   rV   rW   r   rX   r   r   r   r   r   r   r   r   rS   rp   rY   r"   r$   r%   r     rZ   zRecall.__init__c              
   C   r[   )a  Accumulates true positive and false negative statistics.

        Args:
            y_true: The ground truth values, with the same dimensions as
                `y_pred`. Will be cast to `bool`.
            y_pred: The predicted values. Each element must be in the range
                `[0, 1]`.
            sample_weight: Optional weighting of each example. Defaults to `1`.
                Can be a tensor whose rank is either 0, or the same rank as
                `y_true`, and must be broadcastable to `y_true`.
        r\   N)r   r(   rC   rN   rS   rJ   rp   r   r   rV   rW   r)   r$   r$   r%   r,     r]   zRecall.update_statec                 C   r^   r-   )r   r_   rS   r`   rp   r   r   r0   r$   r$   r%   r1     ra   zRecall.resultc                 C   rb   r9   )r   r	   r   rS   rc   r   rd   rp   re   r$   r$   r%   rg     rh   zRecall.reset_statec                    ri   rj   rk   r4   r"   r$   r%   r3   $  rl   zRecall.get_configrm   r9   rn   r$   r$   r"   r%   ro     s    B
ro   c                       sJ   e Zd ZdZ	d fdd	ZdddZdd	 Z fd
dZdd Z  Z	S )SensitivitySpecificityBasezAbstract base class for computing sensitivity and specificity.

    For additional information about specificity and sensitivity, see
    [the following](https://en.wikipedia.org/wiki/Sensitivity_and_specificity).
       Nc                    s   t  j||d d| _ dkrtd  || _|| _ dkr(dg| _d| _n fdd	t d
 D }dg| dg | _d| _| j	t
| jft dd| _| j	t
| jft dd| _| j	t
| jft dd| _| j	t
| jft dd| _d S )Nr   rR   r   zKArgument `num_thresholds` must be an integer > 0. Received: num_thresholds=r.   r   Fc                        g | ]}|d  d  d   qS r.         ?r$   .0irf   r$   r%   
<listcomp>I      z7SensitivitySpecificityBase.__init__.<locals>.<listcomp>           ru   TrS   r   rT   true_negativesrp   )r   r   rU   
ValueErrorvaluerW   r   r   ranger   r   r   r   rS   rT   r~   rp   )r    r   rf   rW   r   r   r   r"   ry   r%   r   5  sL   





z#SensitivitySpecificityBase.__init__c              	   C   sJ   t jt jj| jt jj| jt jj| jt jj	| j
i||| j| j| j|d dS )at  Accumulates confusion matrix statistics.

        Args:
            y_true: The ground truth values.
            y_pred: The predicted values.
            sample_weight: Optional weighting of each example. Defaults to `1`.
                Can be a tensor whose rank is either 0, or the same rank as
                `y_true`, and must be broadcastable to `y_true`.
        )r   r&   rW   r'   N)r   r(   rC   rN   rS   rL   r~   rD   rT   rJ   rp   r   r   rW   r)   r$   r$   r%   r,   e  s   





z'SensitivitySpecificityBase.update_statec                 C   s^   t | j}| jt|f | jt|f | jt|f | jt|f d S r9   )	r   r   rS   rc   r   rd   rT   r~   rp   re   r$   r$   r%   rg   ~  s
   
z&SensitivitySpecificityBase.reset_statec                    r2   )NrW   )rW   r   r3   r4   r"   r$   r%   r3     r7   z%SensitivitySpecificityBase.get_configc                 C   sH   t ||| j}t t |d}t jt ||dd}t ||dS )a  Returns the maximum of dependent_statistic that satisfies the
        constraint.

        Args:
            constrained: Over these values the constraint is specified. A rank-1
                tensor.
            dependent: From these values the maximum that satiesfies the
                constraint is selected. Values in this tensor and in
                `constrained` are linked by having the same threshold at each
                position, hence this tensor must have the same shape.
            predicate: A binary boolean functor to be applied to arguments
                `constrained` and `self.value`, e.g. `ops.greater`.

        Returns:
            maximal dependent value, if no value satisfies the constraint 0.0.
        r   )initialr}   )r   nonzeror   greatersizemaxtakewhere)r    constrained	dependent	predicatefeasiblefeasible_existsmax_dependentr$   r$   r%   _find_max_under_constraint  s   z5SensitivitySpecificityBase._find_max_under_constraintrr   NNNr9   )
r:   r;   r<   r=   r   r,   rg   r3   r   r>   r$   r$   r"   r%   rq   .  s    
0rq   z&keras.metrics.SensitivityAtSpecificityc                       >   e Zd ZdZ				d
 fdd	Zdd Z fdd	Z  ZS )SensitivityAtSpecificitya  Computes best sensitivity where specificity is >= specified value.

    `Sensitivity` measures the proportion of actual positives that are correctly
    identified as such `(tp / (tp + fn))`.
    `Specificity` measures the proportion of actual negatives that are correctly
    identified as such `(tn / (tn + fp))`.

    This metric creates four local variables, `true_positives`,
    `true_negatives`, `false_positives` and `false_negatives` that are used to
    compute the sensitivity at the given specificity. The threshold for the
    given specificity value is computed and used to evaluate the corresponding
    sensitivity.

    If `sample_weight` is `None`, weights default to 1.
    Use `sample_weight` of 0 to mask values.

    If `class_id` is specified, we calculate precision by considering only the
    entries in the batch for which `class_id` is above the threshold
    predictions, and computing the fraction of them for which `class_id` is
    indeed a correct label.

    For additional information about specificity and sensitivity, see
    [the following](https://en.wikipedia.org/wiki/Sensitivity_and_specificity).

    Args:
        specificity: A scalar value in range `[0, 1]`.
        num_thresholds: (Optional) Defaults to 200. The number of thresholds to
            use for matching the given specificity.
        class_id: (Optional) Integer class ID for which we want binary metrics.
            This must be in the half-open interval `[0, num_classes)`, where
            `num_classes` is the last dimension of predictions.
        name: (Optional) string name of the metric instance.
        dtype: (Optional) data type of the metric result.

    Example:

    >>> m = keras.metrics.SensitivityAtSpecificity(0.5)
    >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8])
    >>> m.result()
    0.5

    >>> m.reset_state()
    >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8],
    ...                sample_weight=[1, 1, 2, 2, 1])
    >>> m.result()
    0.333333

    Usage with `compile()` API:

    ```python
    model.compile(
        optimizer='sgd',
        loss='binary_crossentropy',
        metrics=[keras.metrics.SensitivityAtSpecificity(specificity=0.5)])
    ```
    rr   Nc                    D   |dk s|dkrt d| || _|| _t j|||||d d S )Nr   r.   zJArgument `specificity` must be in the range [0, 1]. Received: specificity=rf   rW   r   r   )r   specificityrf   r   r   )r    r   rf   rW   r   r   r"   r$   r%   r        
z!SensitivityAtSpecificity.__init__c                 C   sD   t | jt | j| j}t | jt | j| j}| ||t jS r9   	r   r_   rS   r`   rp   r~   rT   r   greater_equalr    sensitivitiesspecificitiesr$   r$   r%   r1        zSensitivityAtSpecificity.resultc                    $   | j | jd}t  }i ||S )N)rf   r   )rf   r   r   r3   r4   r"   r$   r%   r3     
   
z#SensitivityAtSpecificity.get_configr   r:   r;   r<   r=   r   r1   r3   r>   r$   r$   r"   r%   r         <r   z&keras.metrics.SpecificityAtSensitivityc                       r   )SpecificityAtSensitivitya  Computes best specificity where sensitivity is >= specified value.

    `Sensitivity` measures the proportion of actual positives that are correctly
    identified as such `(tp / (tp + fn))`.
    `Specificity` measures the proportion of actual negatives that are correctly
    identified as such `(tn / (tn + fp))`.

    This metric creates four local variables, `true_positives`,
    `true_negatives`, `false_positives` and `false_negatives` that are used to
    compute the specificity at the given sensitivity. The threshold for the
    given sensitivity value is computed and used to evaluate the corresponding
    specificity.

    If `sample_weight` is `None`, weights default to 1.
    Use `sample_weight` of 0 to mask values.

    If `class_id` is specified, we calculate precision by considering only the
    entries in the batch for which `class_id` is above the threshold
    predictions, and computing the fraction of them for which `class_id` is
    indeed a correct label.

    For additional information about specificity and sensitivity, see
    [the following](https://en.wikipedia.org/wiki/Sensitivity_and_specificity).

    Args:
        sensitivity: A scalar value in range `[0, 1]`.
        num_thresholds: (Optional) Defaults to 200. The number of thresholds to
            use for matching the given sensitivity.
        class_id: (Optional) Integer class ID for which we want binary metrics.
            This must be in the half-open interval `[0, num_classes)`, where
            `num_classes` is the last dimension of predictions.
        name: (Optional) string name of the metric instance.
        dtype: (Optional) data type of the metric result.

    Example:

    >>> m = keras.metrics.SpecificityAtSensitivity(0.5)
    >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8])
    >>> m.result()
    0.66666667

    >>> m.reset_state()
    >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8],
    ...                sample_weight=[1, 1, 2, 2, 2])
    >>> m.result()
    0.5

    Usage with `compile()` API:

    ```python
    model.compile(
        optimizer='sgd',
        loss='binary_crossentropy',
        metrics=[keras.metrics.SpecificityAtSensitivity(sensitivity=0.3)])
    ```
    rr   Nc                    r   )Nr   r.   zJArgument `sensitivity` must be in the range [0, 1]. Received: sensitivity=r   )r   sensitivityrf   r   r   )r    r   rf   rW   r   r   r"   r$   r%   r   E  r   z!SpecificityAtSensitivity.__init__c                 C   sD   t | jt | j| j}t | jt | j| j}| ||t jS r9   r   r   r$   r$   r%   r1   \  r   zSpecificityAtSensitivity.resultc                    r   )N)rf   r   )rf   r   r   r3   r4   r"   r$   r%   r3   i  r   z#SpecificityAtSensitivity.get_configr   r   r$   r$   r"   r%   r   
  r   r   zkeras.metrics.PrecisionAtRecallc                       s8   e Zd ZdZ	d
 fdd	Zdd Z fdd	Z  ZS )PrecisionAtRecalla  Computes best precision where recall is >= specified value.

    This metric creates four local variables, `true_positives`,
    `true_negatives`, `false_positives` and `false_negatives` that are used to
    compute the precision at the given recall. The threshold for the given
    recall value is computed and used to evaluate the corresponding precision.

    If `sample_weight` is `None`, weights default to 1.
    Use `sample_weight` of 0 to mask values.

    If `class_id` is specified, we calculate precision by considering only the
    entries in the batch for which `class_id` is above the threshold
    predictions, and computing the fraction of them for which `class_id` is
    indeed a correct label.

    Args:
        recall: A scalar value in range `[0, 1]`.
        num_thresholds: (Optional) Defaults to 200. The number of thresholds to
            use for matching the given recall.
        class_id: (Optional) Integer class ID for which we want binary metrics.
            This must be in the half-open interval `[0, num_classes)`, where
            `num_classes` is the last dimension of predictions.
        name: (Optional) string name of the metric instance.
        dtype: (Optional) data type of the metric result.

    Example:

    >>> m = keras.metrics.PrecisionAtRecall(0.5)
    >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8])
    >>> m.result()
    0.5

    >>> m.reset_state()
    >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8],
    ...                sample_weight=[2, 2, 2, 1, 1])
    >>> m.result()
    0.33333333

    Usage with `compile()` API:

    ```python
    model.compile(
        optimizer='sgd',
        loss='binary_crossentropy',
        metrics=[keras.metrics.PrecisionAtRecall(recall=0.8)])
    ```
    rr   Nc                    r   )Nr   r.   z@Argument `recall` must be in the range [0, 1]. Received: recall=r   rf   rW   r   r   )r   recallrf   r   r   )r    r   rf   rW   r   r   r"   r$   r%   r     s   
zPrecisionAtRecall.__init__c                 C   sD   t | jt | j| j}t | jt | j| j}| ||t jS r9   r   r_   rS   r`   rp   rT   r   r   r    recalls
precisionsr$   r$   r%   r1     r   zPrecisionAtRecall.resultc                    r   )N)rf   r   )rf   r   r   r3   r4   r"   r$   r%   r3     s   
zPrecisionAtRecall.get_configr   r   r$   r$   r"   r%   r   r  s    1r   zkeras.metrics.RecallAtPrecisionc                       r   )RecallAtPrecisionag  Computes best recall where precision is >= specified value.

    For a given score-label-distribution the required precision might not
    be achievable, in this case 0.0 is returned as recall.

    This metric creates four local variables, `true_positives`,
    `true_negatives`, `false_positives` and `false_negatives` that are used to
    compute the recall at the given precision. The threshold for the given
    precision value is computed and used to evaluate the corresponding recall.

    If `sample_weight` is `None`, weights default to 1.
    Use `sample_weight` of 0 to mask values.

    If `class_id` is specified, we calculate precision by considering only the
    entries in the batch for which `class_id` is above the threshold
    predictions, and computing the fraction of them for which `class_id` is
    indeed a correct label.

    Args:
        precision: A scalar value in range `[0, 1]`.
        num_thresholds: (Optional) Defaults to 200. The number of thresholds
            to use for matching the given precision.
        class_id: (Optional) Integer class ID for which we want binary metrics.
            This must be in the half-open interval `[0, num_classes)`, where
            `num_classes` is the last dimension of predictions.
        name: (Optional) string name of the metric instance.
        dtype: (Optional) data type of the metric result.

    Example:

    >>> m = keras.metrics.RecallAtPrecision(0.8)
    >>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9])
    >>> m.result()
    0.5

    >>> m.reset_state()
    >>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9],
    ...                sample_weight=[1, 0, 0, 1])
    >>> m.result()
    1.0

    Usage with `compile()` API:

    ```python
    model.compile(
        optimizer='sgd',
        loss='binary_crossentropy',
        metrics=[keras.metrics.RecallAtPrecision(precision=0.8)])
    ```
    rr   Nc                    r   )Nr   r.   zFArgument `precision` must be in the range [0, 1]. Received: precision=r   )r   	precisionrf   r   r   )r    r   rf   rW   r   r   r"   r$   r%   r     r   zRecallAtPrecision.__init__c                 C   sD   t | jt | j| j}t | jt | j| j}| ||t jS r9   r   r   r$   r$   r%   r1     r   zRecallAtPrecision.resultc                    r   )N)rf   r   )rf   r   r   r3   r4   r"   r$   r%   r3   "  r   zRecallAtPrecision.get_configr   r   r$   r$   r"   r%   r     s    6r   zkeras.metrics.AUCc                       sx   e Zd ZdZ										d fdd	Zed	d
 Zdd ZdddZdd Z	dd Z
dd Z fddZ  ZS )AUCa  Approximates the AUC (Area under the curve) of the ROC or PR curves.

    The AUC (Area under the curve) of the ROC (Receiver operating
    characteristic; default) or PR (Precision Recall) curves are quality
    measures of binary classifiers. Unlike the accuracy, and like cross-entropy
    losses, ROC-AUC and PR-AUC evaluate all the operational points of a model.

    This class approximates AUCs using a Riemann sum. During the metric
    accumulation phrase, predictions are accumulated within predefined buckets
    by value. The AUC is then computed by interpolating per-bucket averages.
    These buckets define the evaluated operational points.

    This metric creates four local variables, `true_positives`,
    `true_negatives`, `false_positives` and `false_negatives` that are used to
    compute the AUC.  To discretize the AUC curve, a linearly spaced set of
    thresholds is used to compute pairs of recall and precision values. The area
    under the ROC-curve is therefore computed using the height of the recall
    values by the false positive rate, while the area under the PR-curve is the
    computed using the height of the precision values by the recall.

    This value is ultimately returned as `auc`, an idempotent operation that
    computes the area under a discretized curve of precision versus recall
    values (computed using the aforementioned variables). The `num_thresholds`
    variable controls the degree of discretization with larger numbers of
    thresholds more closely approximating the true AUC. The quality of the
    approximation may vary dramatically depending on `num_thresholds`. The
    `thresholds` parameter can be used to manually specify thresholds which
    split the predictions more evenly.

    For a best approximation of the real AUC, `predictions` should be
    distributed approximately uniformly in the range `[0, 1]` (if
    `from_logits=False`). The quality of the AUC approximation may be poor if
    this is not the case. Setting `summation_method` to 'minoring' or 'majoring'
    can help quantify the error in the approximation by providing lower or upper
    bound estimate of the AUC.

    If `sample_weight` is `None`, weights default to 1.
    Use `sample_weight` of 0 to mask values.

    Args:
        num_thresholds: (Optional) The number of thresholds to
            use when discretizing the roc curve. Values must be > 1.
            Defaults to `200`.
        curve: (Optional) Specifies the name of the curve to be computed,
            `'ROC'` (default) or `'PR'` for the Precision-Recall-curve.
        summation_method: (Optional) Specifies the [Riemann summation method](
              https://en.wikipedia.org/wiki/Riemann_sum) used.
              'interpolation' (default) applies mid-point summation scheme for
              `ROC`.  For PR-AUC, interpolates (true/false) positives but not
              the ratio that is precision (see Davis & Goadrich 2006 for
              details); 'minoring' applies left summation for increasing
              intervals and right summation for decreasing intervals; 'majoring'
              does the opposite.
        name: (Optional) string name of the metric instance.
        dtype: (Optional) data type of the metric result.
        thresholds: (Optional) A list of floating point values to use as the
            thresholds for discretizing the curve. If set, the `num_thresholds`
            parameter is ignored. Values should be in `[0, 1]`. Endpoint
            thresholds equal to {`-epsilon`, `1+epsilon`} for a small positive
            epsilon value will be automatically included with these to correctly
            handle predictions equal to exactly 0 or 1.
        multi_label: boolean indicating whether multilabel data should be
            treated as such, wherein AUC is computed separately for each label
            and then averaged across labels, or (when `False`) if the data
            should be flattened into a single label before AUC computation. In
            the latter case, when multilabel data is passed to AUC, each
            label-prediction pair is treated as an individual data point. Should
            be set to `False` for multi-class data.
        num_labels: (Optional) The number of labels, used when `multi_label` is
            True. If `num_labels` is not specified, then state variables get
            created on the first call to `update_state`.
        label_weights: (Optional) list, array, or tensor of non-negative weights
            used to compute AUCs for multilabel data. When `multi_label` is
            True, the weights are applied to the individual label AUCs when they
            are averaged to produce the multi-label AUC. When it's False, they
            are used to weight the individual label predictions in computing the
            confusion matrix on the flattened data. Note that this is unlike
            `class_weights` in that `class_weights` weights the example
            depending on the value of its label, whereas `label_weights` depends
            only on the index of that label before flattening; therefore
            `label_weights` should not be used for multi-class data.
        from_logits: boolean indicating whether the predictions (`y_pred` in
        `update_state`) are probabilities or sigmoid logits. As a rule of thumb,
        when using a keras loss, the `from_logits` constructor argument of the
        loss should match the AUC `from_logits` constructor argument.

    Example:

    >>> m = keras.metrics.AUC(num_thresholds=3)
    >>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9])
    >>> # threshold values are [0 - 1e-7, 0.5, 1 + 1e-7]
    >>> # tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
    >>> # tp_rate = recall = [1, 0.5, 0], fp_rate = [1, 0, 0]
    >>> # auc = ((((1 + 0.5) / 2) * (1 - 0)) + (((0.5 + 0) / 2) * (0 - 0)))
    >>> #     = 0.75
    >>> m.result()
    0.75

    >>> m.reset_state()
    >>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9],
    ...                sample_weight=[1, 0, 0, 1])
    >>> m.result()
    1.0

    Usage with `compile()` API:

    ```python
    # Reports the AUC of a model outputting a probability.
    model.compile(optimizer='sgd',
                  loss=keras.losses.BinaryCrossentropy(),
                  metrics=[keras.metrics.AUC()])

    # Reports the AUC of a model outputting a logit.
    model.compile(optimizer='sgd',
                  loss=keras.losses.BinaryCrossentropy(from_logits=True),
                  metrics=[keras.metrics.AUC(from_logits=True)])
    ```
    rr   ROCinterpolationNFc                    s  d| _ t|tjr|ttjvrtd| dttj t|tjr7|ttjvr7td| dttj |d u| _|d ur[t|d | _	t
|}ttdg| dg | _n dkrftd	   | _	 fd
dt d D }d| _tdt  g| dt  g | _t|tjr|| _ntj|| _t|tjr|| _ntj|| _t j||d || _|| _|	d urtj|	| jd}	|	| _nd | _|
| _d| _| jr|rd |g}| | d S d S |rtd| d  d S )NrR   z Invalid `curve` argument value "z". Expected one of: z+Invalid `summation_method` argument value "r|   r}   ru   r.   zKArgument `num_thresholds` must be an integer > 1. Received: num_thresholds=c                    rs   rt   r$   rv   ry   r$   r%   rz     r{   z AUC.__init__.<locals>.<listcomp>Tr   )r   Fz7`num_labels` is needed only when `multi_label` is True.) rU   
isinstancer   AUCCurvelistr   AUCSummationMethod_init_from_thresholdsr   rf   sortedr   nparrayr   r   r   epsilon_thresholdscurvefrom_strsummation_methodr   r   multi_label
num_labelsr   r   label_weights_from_logits_built_build)r    rf   r   r   r   r   r   r   r   r   from_logitsr   r"   ry   r%   r     s   


zAUC.__init__c                 C   s
   t | jS )z'The thresholds used for evaluating AUC.)r   r   )r    r$   r$   r%   r   
  s   
zAUC.thresholdsc                 C   s   | j r!t|dkrtdt| d| |d | _| j| jg}n| jg}|| _| j|t dd| _	| j|t dd| _
| j|t dd| _| j|t d	d| _d
| _dS )zKInitialize TP, FP, TN, and FN tensors, given the shape of the
        data.r|   z>`y_pred` must have rank 2 when `multi_label=True`. Found rank z$. Full shape received for `y_pred`: r.   rS   r   rT   r~   rp   TN)r   r   r   _num_labelsrf   _build_input_shaper   r   r   rS   rT   r~   rp   r   )r    r   variable_shaper$   r$   r%   r     sD   

z
AUC._buildc              
   C   s~   | j s	| |j | jrdn| j}| jrt|}tj	tj
j| jtj
j| jtj
j| jtj
j| ji||| j| j|| j|d dS )a  Accumulates confusion matrix statistics.

        Args:
            y_true: The ground truth values.
            y_pred: The predicted values.
            sample_weight: Optional weighting of each example. Can
                be a tensor whose rank is either 0, or the same rank as
                `y_true`, and must be broadcastable to `y_true`. Defaults to
                `1`.
        N)r&   r'   r   r   )r   r   r   r   r   r   r   sigmoidr   r(   rC   rN   rS   rL   r~   rD   rT   rJ   rp   r   r   )r    r*   r+   r'   r   r$   r$   r%   r,   7  s&   





zAUC.update_statec           	      C   s  t | jd| jd  | jdd }t | j| j}t |d| jd  |dd }t |t |d}t | jdd t ||dd }t 	t 
|d| jd  dk|dd dkt |d| jd  t |dd dt |dd }t t |t |t |t |t t | jdd | jdd d}| jrt j|dd}| jdu rt |S t t t || jt | jS t |S )a  Interpolation formula inspired by section 4 of Davis & Goadrich 2006.

        https://www.biostat.wisc.edu/~page/rocpr.pdf

        Note here we derive & use a closed formula not present in the paper
        as follows:

            Precision = TP / (TP + FP) = TP / P

        Modeling all of TP (true positive), FP (false positive) and their sum
        P = TP + FP (predicted positive) as varying linearly within each
        interval [A, B] between successive thresholds, we get

            Precision slope = dTP / dP
                            = (TP_B - TP_A) / (P_B - P_A)
                            = (TP - TP_A) / (P - P_A)
            Precision = (TP_A + slope * (P - P_A)) / P

        The area within the interval is (slope / total_pos_weight) times

            int_A^B{Precision.dP} = int_A^B{(TP_A + slope * (P - P_A)) * dP / P}
            int_A^B{Precision.dP} = int_A^B{slope * dP + intercept * dP / P}

        where intercept = TP_A - slope * P_A = TP_B - slope * P_B, resulting in

            int_A^B{Precision.dP} = TP_B - TP_A + intercept * log(P_B / P_A)

        Bringing back the factor (slope / total_pos_weight) we'd put aside, we
        get

            slope * [dTP + intercept *  log(P_B / P_A)] / total_pos_weight

        where dTP == TP_B - TP_A.

        Note that when P_A == 0 the above calculation simplifies into

            int_A^B{Precision.dTP} = int_A^B{slope * dTP}
                                   = slope * (TP_B - TP_A)

        which is really equivalent to imputing constant precision throughout the
        first bucket having >0 true positives.

        Returns:
            pr_auc: an approximation of the area under the P-R curve.
        Nr.   r   axis)r   subtractrS   rf   r`   rT   r_   maximummultiplyr   logical_and	ones_likelogrp   r   sumr   mean)	r    dtppdp
prec_slope	interceptsafe_p_ratiopr_auc_incrementby_label_aucr$   r$   r%   interpolate_pr_auc]  sF   /"(" 




zAUC.interpolate_pr_aucc                 C   sL  | j tjjkr| jtjjkr|  S t	| j
t| j
| j}| j tjjkr8t	| jt| j| j}|}|}ne| j tjjkrQt	| j
t| j
| j}|}|}nLt	t| j
| jt| j| j}d|t	| j| j
  }d|t	| j| j
  }tt| j
dd|}tt| j
dd|}t|d}|}|}| jtjjkrtt|d | jd  |dd  d}	n*| jtjjkrt|d | jd  |dd  }	nt|d | jd  |dd  }	tt|d | jd  |dd  |	}
| jr!tj|
dd}| jd u rt|S t	tt|| jt| jS t|
S )Nru   r}   r.   g       @r   r   )r   r   r   PRr   r   INTERPOLATIONr   r   r_   rS   r`   rp   r   rT   r~   r   equalr   dividerf   MINORINGminimumr   r   r   r   r   r   )r    r   fp_ratexyr   scaling_factorrecall_gainprecision_gainheightsriemann_termsr   r$   r$   r%   r1     s~   "$""


z
AUC.resultc                 C   sr   | j r7| jr| j| jf}n| jf}| jt| | jt| | j	t| | j
t| d S d S r9   )r   r   rf   r   rS   rc   r   rd   rT   r~   rp   )r    r   r$   r$   r%   rg     s   zAUC.reset_statec                    sX   | j }| j| jj| jj| j| j|| jd}| jr!| j	dd |d< t
  }i ||S )N)rf   r   r   r   r   r   r   r.   r   )r   rf   r   r   r   r   r   r   r   r   r   r3   )r    r   r5   r6   r"   r$   r%   r3   *  s   
zAUC.get_config)
rr   r   r   NNNFNNFr9   )r:   r;   r<   r=   r   propertyr   r   r,   r   r1   rg   r3   r>   r$   r$   r"   r%   r   +  s*    yf

(&Zgr   )numpyr   	keras.srcr   r   r   r   keras.src.api_exportr   keras.src.metricsr   keras.src.metrics.metricr   keras.src.utils.python_utilsr	   r
   r@   rI   rK   rM   rP   ro   rq   r   r   r   r   r   r$   r$   r$   r%   <module>   sF    A++++   tggVa