o
    2hG5                     @   s  d Z ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlm	Z	 e
d	 e
d
 eddejfddZeddejfddZeddejfddZeddejfddZeddejfddZeddejfddZeddd  Zdejfd!d"Zed#dejfd$d%Zed&dejfd'd(Zed)dejfd*d+Zed,d-ejfd.d/Zed0d-ejfd1d2Zed3dejfd4d5Zed6dejfd7d8Zd9S ):z1Gradients for operators defined in sparse_ops.py.    )dtypes)ops)sparse_tensor)	array_ops)gen_sparse_ops)math_ops)
sparse_opsSparseAddGradSparseConcatSparseReorderopc           
      C   sb   | j d }| j d }t|d }t|}t|||}t|}t	|j
}	dt||	dfS )a  Gradients for the SparseReorder op.

  Args:
    op: the SparseReorder op
    unused_output_indices_grad: the incoming gradients of the output indices
    output_values_grad: the incoming gradients of the output values

  Returns:
    Gradient for each of the 3 input tensors:
      (input_indices, input_values, input_shape)
    The gradients for input_indices and input_shape is None.
  r      N)inputsr   shaper   ranger   SparseTensorr   sparse_reorderinvert_permutationvaluesgather)
r   unused_output_indices_gradoutput_values_gradinput_indicesinput_shapenum_entriesentry_indicessp_unordered
sp_orderedinverted_permutation r   \/var/www/html/chatgem/venv/lib/python3.10/site-packages/tensorflow/python/ops/sparse_grad.py_SparseReorderGrad   s   



r!   	SparseAddc                 G   st   |d }| j d }| j d }| jd }t||||\}}|| j d   || j d   d|dd|ddfS )am  The backward operator for the SparseAdd op.

  The SparseAdd op calculates A + B, where A, B, and the sum are all represented
  as `SparseTensor` objects.  This op takes in the upstream gradient w.r.t.
  non-empty values of the sum, and outputs the gradients w.r.t. the non-empty
  values of A and B.

  Args:
    op: the SparseAdd op
    *grads: the incoming gradients, one element per output of `op`

  Returns:
    Gradient for each of the 6 input tensors of SparseAdd:
      (a_indices, a_values, a_shape, b_indices, b_values, b_shape, thresh)
    The gradients for the indices, shapes, and the threshold are None.
     r         N)r   outputsr   sparse_add_grad	set_shape	get_shape)r   gradsval_grad	a_indices	b_indicessum_indices
a_val_grad
b_val_gradr   r   r    _SparseAddGrad<   s   


r1   SparseTensorDenseAddc                 C   s   | j d }d t||d |fS )Nr   )r   r   	gather_nd)r   out_grad
sp_indicesr   r   r    _SparseTensorDenseAddGrad^   s   
r6   SparseReduceSumc                 C   s\   | j d }| j d }t|| j d }t||}|t|tj }dt||| ddfS )z:Similar to gradient for the Sum Op (i.e. tf.reduce_sum()).r   r   r$   N)	r   r   reduced_shaper   reshapecastr   int64r3   )r   r4   r5   sp_shapeoutput_shape_kept_dimsout_grad_reshapedscaler   r   r    _SparseReduceSumGrade   s   

r@   SparseSlicec                 G   sX   |d }| j d }| j d }| jd }t||||}|| j d   d|dddfS )a  The backward operator for the SparseSlice op.

  This op takes in the upstream gradient w.r.t. non-empty values of
  the sliced `SparseTensor`, and outputs the gradients w.r.t.
  the non-empty values of input `SparseTensor`.

  Args:
    op: the SparseSlice op
    *grads: the incoming gradients, one element per output of `op`

  Returns:
    Gradient for each of the 5 input tensors of SparseSlice:
      (indices, values, shape, start, size)
    The gradients for the indices, shape, start and the size are None.
  r#   r   r$   N)r   r&   r   sparse_slice_gradr(   r)   )r   r*   backprop_val_gradr   input_startoutput_indicesr+   r   r   r    _SparseSliceGradr   s   


rF   SparseTensorDenseMatMulc                 C   s  | j dd \}}}| j d }| d}| d}|jj}|jj}	||	kr0td| d|	 dtj||||| d}
|rDtj|
d	d
}
|dddf }|dddf }t	||s[|n|}t	|sd|nt
||sl|n|}|s|stjt|dt|dd	d}n=|r|stjt|dt|dd	d}n(|s|rtt|dt|d}n|r|rtjt|dt|dd	d	d}dtj|ddgdd|
fS )a  Gradients for the dense tensor in the SparseTensorDenseMatMul op.

  Args:
    op: the SparseTensorDenseMatMul op
    grad: the incoming gradient

  Returns:
    Gradient for each of the 4 input tensors:
      (sparse_indices, sparse_values, sparse_shape, dense_tensor)
    The gradients for indices and shape are None.

  Raises:
    TypeError: When the two operands don't have the same type.
  Nr$   	adjoint_a	adjoint_bzDSparseTensorDenseMatMul op received operands with different types: `z` and `z`.)rH   T)	conjugater   r#   )rI   )rH   rI   )axis)r   get_attrdtype
base_dtype	TypeErrorr   sparse_tensor_dense_mat_mulr   matrix_transposer   	transposer   matmulexpand_dimssqueeze)r   gradr,   a_valuesa_shapebadj_aadj_ba_typeb_typeb_gradrowscolsparts_aparts_ba_values_gradr   r   r    _SparseTensorDenseMatMulGrad   sb   








rf   SparseDenseCwiseAddc                 C      t d)Nz4Gradient for SparseDenseCwiseAdd is not implemented.NotImplementedError	unused_opunused_gradr   r   r    _SparseDenseCwiseAddGrad      rn   c                 C   s  | j d }| j d }| j d }tt|tj}tt|t| d}t	t
|tjj|gd}|| }	||	 }
t|
t	dg|gdddg}
t||
}|ra|| }|| j d  }n|| }|| j d  t|  }tt|t|
||}d|d|fS )z4Common code for SparseDenseCwise{Mul,Div} gradients.r   r   r$   rL   r#   N)r   r   r:   r   r   r   r;   rV   sizeconcatonesr   slicer3   squarer   
sparse_add
zeros_liker   r   )r   rX   is_mul	x_indicesx_shapeyy_shapenum_added_dimsaugmented_y_shapescalingscaled_indices
dense_valsdxdy_valdyr   r   r    _SparseDenseCwiseMulOrDivGrad   s6   


r   SparseDenseCwiseMulc                 C      t | |dS )z"Gradients for SparseDenseCwiseMul.Tr   r   rX   r   r   r    _SparseDenseCwiseMulGrad     r   SparseDenseCwiseDivc                 C   r   )z"Gradients for SparseDenseCwiseDiv.Fr   r   r   r   r    _SparseDenseCwiseDivGrad  r   r   SparseSoftmaxc                 C   s   | j d | j d }}| jd }t|||}t|||}t||j|j |}tj|dgdd }t||}	|	j|j }
d|
dgS )a  Gradients for SparseSoftmax.

  The calculation is the same as SoftmaxGrad:

    grad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax

  where we now only operate on the non-zero values present in the SparseTensors.

  Args:
    op: the SparseSoftmax op.
    grad: the upstream gradient w.r.t. the non-zero SparseSoftmax output values.

  Returns:
    Gradients w.r.t. the input (sp_indices, sp_values, sp_shape).
  r   r   rL   T)keepdimsN)r   r&   r   r   r   r   sparse_reduce_sumsparse_dense_cwise_add)r   rX   indicesr   out_vals	sp_outputsp_grad
sp_productsum_reducedsp_sumgrad_xr   r   r    _SparseSoftmaxGrad  s   


r   SparseSparseMaximumrl   c                 C   rh   )Nz4Gradient for SparseSparseMaximum is not implemented.ri   rk   r   r   r    _SparseSparseMaximumGrad?  ro   r   SparseSparseMinimumc                 C   rh   )Nz4Gradient for SparseSparseMinimum is not implemented.ri   rk   r   r   r    _SparseSparseMinimumGradF  ro   r   SparseFillEmptyRowsc                 C   s(   | j d }tj||d\}}d|d|gS )z"Gradients for SparseFillEmptyRows.r$   )reverse_index_mapgrad_valuesN)r&   r   sparse_fill_empty_rows_grad)r   unused_grad_output_indicesoutput_grad_valuesunused_grad_empty_row_indicatorunused_grad_reverse_index_mapr   d_valuesd_default_valuer   r   r    _SparseFillEmptyRowsGradM  s
   
	
r   SparseToDensec                 C   sF   | j \}}}}t||}t|t| }t|t|||gS )N)r   r   r3   r   
reduce_sumrv   )r   rX   sparse_indicesoutput_shape_sparse_values_graddefault_value_gradr   r   r    _SparseToDenseGrad`  s   r   N)__doc__tensorflow.python.frameworkr   r   r   tensorflow.python.opsr   r   r   r   NotDifferentiableRegisterGradient	Operationr!   r1   r6   r@   rF   rf   rn   r   r   r   r   r   r   r   r   r   r   r   r    <module>   sV   

!Y
"!