o
    2h'                     @   s   d Z ddlZddlmZ ddlmZ ddlmZ ddlmZ ddlm	Z	 ddlm
Z
 dd	lmZ dd
lmZ dd ZeddejfddZeddejfddZeddejfddZdd Zdd ZeddejfddZdS )z1Gradients for operators defined in random_ops.py.    N)constant_op)dtypes)ops)	array_ops)clip_ops)gen_array_ops)gen_random_ops)math_opsc                 C   s2   t jt j|gtjdt | gdd}t | |S )N)dtyper   axis)r   concatonesr   int32shapereshape)xnum_dimensions	new_shape r   \/var/www/html/chatgem/venv/lib/python3.10/site-packages/tensorflow/python/ops/random_grad.pyadd_leading_unit_dimensions   s   r   RandomGammaopc                 C   s   | j d }| j d }| jd }t|g( t|d }t||}t||}dt	j
|| t	|dfW  d   S 1 s@w   Y  dS )a  Returns the gradient of a Gamma sample w.r.t. alpha.

  The gradient is computed using implicit differentiation
  (Figurnov et al., 2018).

  Args:
    op: A `RandomGamma` operation. We assume that the inputs to the operation
      are `shape` and `alpha` tensors, and the output is the `sample` tensor.
    grad: The incoming gradient `dloss / dsample` of the same shape as
      `op.outputs[0]`.

  Returns:
    A `Tensor` with derivatives `dloss / dalpha`.

  References:
    Implicit Reparameterization Gradients:
      [Figurnov et al., 2018]
      (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients)
      ([pdf]
      (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients.pdf))
  r      Nr   )inputsoutputsr   control_dependenciesr   r   r   r   random_gamma_gradr	   
reduce_sumrange)r   gradr   alphasamplenum_sample_dimensionsalpha_broadcastable	partial_ar   r   r   _RandomGammaGrad$   s   


$r'   StatelessRandomGammaV2c                 C   sb   | j d }| j d }| jd }t|g ddt||||fW  d   S 1 s*w   Y  dS )a  Returns the gradient of a Gamma sample w.r.t. alpha.

  The gradient is computed using implicit differentiation
  (Figurnov et al., 2018).

  Args:
    op: A `StatelessRandomGamma` operation. We assume that the inputs to the
      operation are `shape`, `seed` and `alpha` tensors, and the output is the
      `sample` tensor.
    grad: The incoming gradient `dloss / dsample` of the same shape as
      `op.outputs[0]`.

  Returns:
    A `Tensor` with derivatives `dloss / dalpha`.

  References:
    Implicit Reparameterization Gradients:
      [Figurnov et al., 2018]
      (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients)
      ([pdf]
      (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients.pdf))
  r      Nr   r   r   r   _StatelessGammaGradAlphar   r!   r   r"   r#   r   r   r   _StatelessRandomGammaV2GradL   s   


$r-   StatelessRandomGammaV3c              
   C   sf   | j d }| j d }| jd }t|g ddddt||||fW  d   S 1 s,w   Y  dS )a  Returns the gradient of a Gamma sample w.r.t. alpha.

  The gradient is computed using implicit differentiation
  (Figurnov et al., 2018).

  Args:
    op: A `StatelessRandomGamma` operation. We assume that the inputs to the
      operation are `shape`, `key`, `counter`, `alg`, and `alpha` tensors, and
      the output is the `sample` tensor.
    grad: The incoming gradient `dloss / dsample` of the same shape as
      `op.outputs[0]`.

  Returns:
    A `Tensor` with derivatives `dloss / dalpha`.

  References:
    Implicit Reparameterization Gradients:
      [Figurnov et al., 2018]
      (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients)
      ([pdf]
      (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients.pdf))
  r      Nr*   r,   r   r   r   _StatelessRandomGammaV3Gradl   s   


$r0   c                 C   sF   t | d t | }t||}t||}tj|| t|dS )z/Returns gradients of a gamma sampler wrt alpha.r   r   )	r   r   rankr   r   r   r	   r   r    )r   r"   r#   r!   r$   r%   r&   r   r   r   r+      s   r+   c                 C   sp   t jdtd | jdd}| | }t|}t||k dt	| t|dkdt
| t
|}d| S )zNormal distribution function.      ?g       @half_sqrt_2)r
   nameg      ?g        )r   constantnpsqrtr
   r	   absr   whereerferfc)r   r3   wzyr   r   r   _Ndtr   s   
r?   %StatelessParameterizedTruncatedNormalc           !      C   s  | j d }| j d }| j d }| j d }| j d }| jd }t|g || | }|| | }	|| | }
t|
t| t|	t|  }t|jjj	}t|jjj
}t||d| }td|
d |	d   t| }td|
d |d   t|  }t|
}|
}t|}t|}t|}t|}t||}t||}t||}tt|t| }tj|| |d}tj|| |d}tj|| |d}tj|| |d}t||\}}t||\}}t||\}}t||\}} ttj||d	d
|}ttj||d	d
|}ttj||d	d
|}ttj|| d	d
|}dd||||fW  d   S 1 sGw   Y  dS )a?  Returns the gradient of a TruncatedNormal sample w.r.t. parameters.

  The gradient is computed using implicit differentiation
  (Figurnov et al., 2018).

  Args:
    op: A `StatelessParameterizedTruncatedNormal` operation. We assume that the
      inputs to the operation are `shape`, `seed`, `mean`, `stddev`, `minval`,
      and `maxval` tensors, and the output is the `sample` tensor.
    grad: The incoming gradient `dloss / dsample` of the same shape as
      `op.outputs[0]`.

  Returns:
    A list of `Tensor` with derivates with respect to each parameter.

  References:
    Implicit Reparameterization Gradients:
      [Figurnov et al., 2018]
      (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients)
      ([pdf]
      (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients.pdf))
  r   r)      r/      r   r2   r   T)r   keepdimsN)r   r   r   r   r?   r6   finfor
   as_numpy_dtypetinyepsr   clip_by_valuer	   exploglog1pr   	ones_liker   broadcast_dynamic_shaper    sizer   r   broadcast_gradient_argsr   )!r   r!   r   meanstddevminvalmaxvalr#   
minval_std
maxval_std
sample_std
cdf_samplerF   rG   dmaxvaldminvaldmeandstddev
mean_shapestddev_shapeminval_shapemaxval_shapebroadcast_shape
extra_dims	grad_meangrad_stddevgrad_minvalgrad_maxval_rmeanrstddevrminvalrmaxvalr   r   r   *_StatelessParameterizedTruncatedNormalGrad   s   











&rk   )__doc__numpyr6   tensorflow.python.frameworkr   r   r   tensorflow.python.opsr   r   r   r   r	   r   RegisterGradient	Operationr'   r-   r0   r+   r?   rk   r   r   r   r   <module>   s*   ' 