o
    2h=:                     @   s  d Z ddlZddlmZ ddlmZ ddlmZ ddlmZ ddlm	Z	 ddlm
Z
 dd	lmZ dd
lmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddgZdZedgdG dd dejZG dd deZeeedddZdS )zThe Beta distribution class.    N)constant_op)dtypes)ops)tensor_shape)	array_ops)	check_ops)control_flow_ops)math_ops)nn)
random_ops)distribution)kullback_leibler)util)deprecation)	tf_exportBetaBetaWithSoftplusConcentrationzuNote: `x` must have dtype `self.dtype` and be in
`[0, 1].` It must have a shape compatible with `self.batch_shape()`.zdistributions.Beta)v1c                       s(  e Zd ZdZejdddd					 d5 fdd		Zed
d Ze	dd Z
e	dd Ze	dd Zdd Zdd Zdd Zdd Zd6ddZeedd Zeedd Zeed d! Zeed"d# Zd$d% Zd&d' Zd(d) Zd*d+ Zd,d- Zed.d/d0 Zd1d2 Zd3d4 Z   Z!S )7r   a`  Beta distribution.

  The Beta distribution is defined over the `(0, 1)` interval using parameters
  `concentration1` (aka "alpha") and `concentration0` (aka "beta").

  #### Mathematical Details

  The probability density function (pdf) is,

  ```none
  pdf(x; alpha, beta) = x**(alpha - 1) (1 - x)**(beta - 1) / Z
  Z = Gamma(alpha) Gamma(beta) / Gamma(alpha + beta)
  ```

  where:

  * `concentration1 = alpha`,
  * `concentration0 = beta`,
  * `Z` is the normalization constant, and,
  * `Gamma` is the [gamma function](
    https://en.wikipedia.org/wiki/Gamma_function).

  The concentration parameters represent mean total counts of a `1` or a `0`,
  i.e.,

  ```none
  concentration1 = alpha = mean * total_concentration
  concentration0 = beta  = (1. - mean) * total_concentration
  ```

  where `mean` in `(0, 1)` and `total_concentration` is a positive real number
  representing a mean `total_count = concentration1 + concentration0`.

  Distribution parameters are automatically broadcast in all functions; see
  examples for details.

  Warning: The samples can be zero due to finite precision.
  This happens more often when some of the concentrations are very small.
  Make sure to round the samples to `np.finfo(dtype).tiny` before computing the
  density.

  Samples of this distribution are reparameterized (pathwise differentiable).
  The derivatives are computed using the approach described in
  (Figurnov et al., 2018).

  #### Examples

  ```python
  import tensorflow_probability as tfp
  tfd = tfp.distributions

  # Create a batch of three Beta distributions.
  alpha = [1, 2, 3]
  beta = [1, 2, 3]
  dist = tfd.Beta(alpha, beta)

  dist.sample([4, 5])  # Shape [4, 5, 3]

  # `x` has three batch entries, each with two samples.
  x = [[.1, .4, .5],
       [.2, .3, .5]]
  # Calculate the probability of each pair of samples under the corresponding
  # distribution in `dist`.
  dist.prob(x)         # Shape [2, 3]
  ```

  ```python
  # Create batch_shape=[2, 3] via parameter broadcast:
  alpha = [[1.], [2]]      # Shape [2, 1]
  beta = [3., 4, 5]        # Shape [3]
  dist = tfd.Beta(alpha, beta)

  # alpha broadcast as: [[1., 1, 1,],
  #                      [2, 2, 2]]
  # beta broadcast as:  [[3., 4, 5],
  #                      [3, 4, 5]]
  # batch_Shape [2, 3]
  dist.sample([4, 5])  # Shape [4, 5, 2, 3]

  x = [.2, .3, .5]
  # x will be broadcast as [[.2, .3, .5],
  #                         [.2, .3, .5]],
  # thus matching batch_shape [2, 3].
  dist.prob(x)         # Shape [2, 3]
  ```

  Compute the gradients of samples w.r.t. the parameters:

  ```python
  alpha = tf.constant(1.0)
  beta = tf.constant(2.0)
  dist = tfd.Beta(alpha, beta)
  samples = dist.sample(5)  # Shape [5]
  loss = tf.reduce_mean(tf.square(samples))  # Arbitrary loss function
  # Unbiased stochastic gradients of the loss function
  grads = tf.gradients(loss, [alpha, beta])
  ```

  References:
    Implicit Reparameterization Gradients:
      [Figurnov et al., 2018]
      (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients)
      ([pdf]
      (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients.pdf))
  
2019-01-01zThe TensorFlow Distributions library has moved to TensorFlow Probability (https://github.com/tensorflow/probability). You should update all references to use `tfp.distributions` instead of `tf.distributions`.T	warn_onceNFc              	      s   t t }tj|||gd0}| tj|dd|| _| tj|dd|| _t	| j| jg | j| j | _
W d   n1 sAw   Y  tt| j| j
j||tj|| j| j| j
g|d dS )a9  Initialize a batch of Beta distributions.

    Args:
      concentration1: Positive floating-point `Tensor` indicating mean
        number of successes; aka "alpha". Implies `self.dtype` and
        `self.batch_shape`, i.e.,
        `concentration1.shape = [N1, N2, ..., Nm] = self.batch_shape`.
      concentration0: Positive floating-point `Tensor` indicating mean
        number of failures; aka "beta". Otherwise has same semantics as
        `concentration1`.
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
        (e.g., mean, mode, variance) use the value "`NaN`" to indicate the
        result is undefined. When `False`, an exception is raised if one or
        more of the statistic's batch members are undefined.
      name: Python `str` name prefixed to Ops created by this class.
    valuesconcentration1nameconcentration0N)dtypevalidate_argsallow_nan_statsreparameterization_type
parametersgraph_parentsr   )dictlocalsr   
name_scope!_maybe_assert_valid_concentrationconvert_to_tensor_concentration1_concentration0r   assert_same_float_dtype_total_concentrationsuperr   __init__r   r   FULLY_REPARAMETERIZEDselfr   r   r   r   r   r!   	__class__ c/var/www/html/chatgem/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/beta.pyr-      s6   
"


zBeta.__init__c                 C   s$   t tddgtj| tjdgd S )Nr   r   r      )r#   zipr   r'   r   int32)sample_shaper3   r3   r4   _param_shapes   s   zBeta._param_shapesc                 C      | j S )z6Concentration parameter associated with a `1` outcome.)r(   r0   r3   r3   r4   r         zBeta.concentration1c                 C   r;   )z6Concentration parameter associated with a `0` outcome.)r)   r<   r3   r3   r4   r      r=   zBeta.concentration0c                 C   r;   )z Sum of concentration parameters.)r+   r<   r3   r3   r4   total_concentration   r=   zBeta.total_concentrationc                 C   s   t | jS N)r   shaper>   r<   r3   r3   r4   _batch_shape_tensor      zBeta._batch_shape_tensorc                 C   s
   | j  S r?   )r>   	get_shaper<   r3   r3   r4   _batch_shape      
zBeta._batch_shapec                 C   s   t jg tjdS )Nr5   )r   constantr   r8   r<   r3   r3   r4   _event_shape_tensor   s   zBeta._event_shape_tensorc                 C   s
   t g S r?   )r   TensorShaper<   r3   r3   r4   _event_shape   rE   zBeta._event_shapec                 C   st   t j| j| jd| j }t j| j| jd| j }tj|g|| j|d}tj|g|| jt	|dd}|||  }|S )Nr5   )r@   alphar   seedbeta)
r   	ones_liker>   r   r   r   r   random_gammadistribution_utilgen_new_seed)r0   nrK   expanded_concentration1expanded_concentration0gamma1_samplegamma2_samplebeta_sampler3   r3   r4   	_sample_n   s0   
zBeta._sample_nc                 C   s   |  ||   S r?   )_log_unnormalized_prob_log_normalizationr0   xr3   r3   r4   	_log_prob     zBeta._log_probc                 C      t | |S r?   )r	   expr\   rZ   r3   r3   r4   _prob	     z
Beta._probc                 C   r^   r?   )r	   log_cdfrZ   r3   r3   r4   _log_cdf  ra   zBeta._log_cdfc                 C   s   t | j| j|S r?   )r	   betaincr   r   rZ   r3   r3   r4   rc     r]   z	Beta._cdfc                 C   s2   |  |}t| jd || jd t|   S N      ?)_maybe_assert_valid_sampler	   xlogyr   r   log1prZ   r3   r3   r4   rX     s   
zBeta._log_unnormalized_probc                 C   s$   t | jt | j t | j S r?   )r	   lgammar   r   r>   r<   r3   r3   r4   rY     s
   


zBeta._log_normalizationc                 C   sJ   |   | jd t| j  | jd t| j  | jd t| j  S )Nrg          @)rY   r   r	   digammar   r>   r<   r3   r3   r4   _entropy  s   
zBeta._entropyc                 C   s   | j | j S r?   )r(   r+   r<   r3   r3   r4   _mean'  rB   z
Beta._meanc                 C   s   |   d|     d| j  S rf   )ro   r>   r<   r3   r3   r4   	_variance*  s   zBeta._variancea  Note: The mode is undefined when `concentration1 <= 1` or
      `concentration0 <= 1`. If `self.allow_nan_stats` is `True`, `NaN`
      is used for undefined modes. If `self.allow_nan_stats` is `False` an
      exception is raised when one or more modes are undefined.c                 C   s   | j d | jd  }| jr3tj|  tjtj| j	
 ddd}t| j dk| jdk}t|||S ttjtjg | j	d| j ddtjtjg | j	d| jddg|S )	Nrg   rl   r5   nanr   z'Mode undefined for concentration1 <= 1.messagez'Mode undefined for concentration0 <= 1.)r   r>   r   r   fillbatch_shape_tensornparrayrq   r   as_numpy_dtyper	   logical_andr   where_v2r   with_dependenciesr   assert_lessones)r0   moderq   
is_definedr3   r3   r4   _mode-  s2   	z
Beta._modec                 C   s    |s|S t tj|ddg|S )z1Checks the validity of a concentration parameter.z)Concentration parameter must be positive.rr   )r   r{   r   assert_positive)r0   concentrationr   r3   r3   r4   r&   G  s   z&Beta._maybe_assert_valid_concentrationc              	   C   s:   | j s|S ttj|ddtj|tg | jddg|S )z Checks the validity of a sample.zsample must be positiverr   zsample must be less than `1`.)	r   r   r{   r   r   r|   r   r}   r   rZ   r3   r3   r4   rh   Q  s   zBeta._maybe_assert_valid_sample)NNFTr   r?   )"__name__
__module____qualname____doc__r   
deprecatedr-   staticmethodr:   propertyr   r   r>   rA   rD   rG   rI   rW   rO   AppendDocstring_beta_sample_noter\   r`   rd   rc   rX   rY   rn   ro   rp   r   r&   rh   __classcell__r3   r3   r1   r4   r   .   sZ    j	0










c                       s8   e Zd ZdZejdddd			 d	 fdd	Z  ZS )
r   zFBeta with softplus transform of `concentration1` and `concentration0`.r   zWUse `tfd.Beta(tf.nn.softplus(concentration1), tf.nn.softplus(concentration2))` instead.Tr   Fc                    st   t t }tj|||gd}tt| jtj|ddtj|dd|||d W d    n1 s0w   Y  || _	d S )Nr   softplus_concentration1r   softplus_concentration0)r   r   r   r   r   )
r#   r$   r   r%   r,   r   r-   r
   softplus_parametersr/   r1   r3   r4   r-   a  s$   



z&BetaWithSoftplusConcentration.__init__)FTr   )r   r   r   r   r   r   r-   r   r3   r3   r1   r4   r   ^  s    c              	      s   d fdd	}t j|d j j jjjjgd, |dddt j|d	  t j|d
  t j|d  W  d   S 1 sKw   Y  dS )a4  Calculate the batchwise KL divergence KL(d1 || d2) with d1 and d2 Beta.

  Args:
    d1: instance of a Beta distribution object.
    d2: instance of a Beta distribution object.
    name: (optional) Name to use for created operations.
      default is "kl_beta_beta".

  Returns:
    Batchwise KL(d1 || d2)
  Tc                    s,   t  | }t | }|r|| S | |  S r?   )getattr)fnis_propertyfn1fn2d1d2r3   r4   delta  s   

z_kl_beta_beta.<locals>.deltakl_beta_betar   rY   F)r   r   r   r>   N)T)r   r%   r   r   r>   r	   rm   )r   r   r   r   r3   r   r4   _kl_beta_betaz  s&   


$r   r?   )r   numpyrv   tensorflow.python.frameworkr   r   r   r   tensorflow.python.opsr   r   r   r	   r
   r   #tensorflow.python.ops.distributionsr   r   r   rO   tensorflow.python.utilr    tensorflow.python.util.tf_exportr   __all__r   Distributionr   r   
RegisterKLr   r3   r3   r3   r4   <module>   s8   
  1
