o
    2hy8                     @   s   d dl Z d dlZd dlmZ d dlmZ d dlmZ d dlm	Z	 dd Z
dd	 Z	
	dddZ		
		d ddZdd Zdd Zdd Zdd Zd!ddZeddd ZdS )"    N)tree)keras_exportcanonicalize_axis)to_tuple_or_listc                 C   s,  t | } t |}| }|}t| t|kr!dgt| t|  | }t| t|k r6dgt|t|   |  } t | }tt| D ]S}| | dkrO|| ||< q@| | du rd|| dkr]dn|| ||< q@|| dksx|| du sx|| | | kr| | ||< q@td| |  d||  d| d| d	|S )aS  Broadcast input shapes to a unified shape.

    Convert to list for mutability.

    Args:
        shape1: A tuple or list of integers.
        shape2: A tuple or list of integers.

    Returns:
        output_shape (list of integers or `None`): The broadcasted shape.

    Example:
    >>> broadcast_shapes((5, 3), (1, 3))
    [5, 3]
       Nz2Cannot broadcast shape, the failure dim has value z!, which cannot be broadcasted to z. Input shapes are:  and .)listlenrange
ValueError)shape1shape2origin_shape1origin_shape2output_shapei r   X/var/www/html/chatgem/venv/lib/python3.10/site-packages/keras/src/ops/operation_utils.pybroadcast_shapes   s8   (r   c                    sj   t | }  du rt|  t  t t|  fdd D  t|  fddtD }t|S )a  Compute the output shape for the `expand_dims` operation.

    Args:
        input_shape: Input shape.
        axis: int or sequence of ints for the axis to expand.

    Returns:
        Tuple of ints: The output shape after the `expand_dims` operation.
    Nc                    s   g | ]}t | qS r   r   ).0a)out_ndimr   r   
<listcomp>F   s    z4compute_expand_dims_output_shape.<locals>.<listcomp>c                    s    g | ]}| v r
d nt qS r   )nextr   ax)axis
shape_iterr   r   r   H   s    )r
   r   r   iterr   tuple)input_shaper   	new_shaper   )r   r   r    r    compute_expand_dims_output_shape7   s   
r%   validchannels_lastc                 C   sp  |du r|n|}t | }t| } |dkr| dd }n| dd }g }tt|D ]}|| du r;d||< || q*t|}|dkrot|| | d }	tt|	D ]}||vrm|	| dk rmtd|  d	| d
qVn|dkrt|d | d }	ntd| dd |	D }	|D ]}d|	|< qt|	}	|dkr|d f|	 |d f }
|
S |d |d f|	 }
|
S )a3  Computes the output shape of pooling operations.

    Args:
        input_shape: Input shape. Must be a tuple of integers.
        pool_size: Size of the pooling operation. Must be a tuple of integers.
        strides: Stride of the pooling operation. Must be a tuple of integers.
            Defaults to `pool_size`.
        padding: Padding method. Available methods are `"valid"` or `"same"`.
            Defaults to `"valid"`.
        data_format: String, either `"channels_last"` or `"channels_first"`.
            The ordering of the dimensions in the inputs. `"channels_last"`
            corresponds to inputs with shape `(batch, height, width, channels)`
            while `"channels_first"` corresponds to inputs with shape
            `(batch, channels, height, weight)`. Defaults to `"channels_last"`.

    Returns:
        Tuple of ints: The output shape of the pooling operation.

    Examples:

    # Basic usage with square pooling on a single image
    >>> compute_pooling_output_shape((1, 4, 4, 1), (2, 2))
    (1, 2, 2, 1)

    # Strided pooling on a single image with strides different from pool_size
    >>> compute_pooling_output_shape((1, 4, 4, 1), (2, 2), strides=(1, 1))
    (1, 3, 3, 1)

    # Pooling on a batch of images
    >>> compute_pooling_output_shape((32, 4, 4, 3), (2, 2))
    (32, 2, 2, 3)
    Nr'   r      r&   r   z@Computed output size would be negative. Received: `inputs.shape=z` and `pool_size=`.samezGArgument `padding` must be either 'valid' or 'same'. Received: padding=c                 S      g | ]}t |qS r   intr   r   r   r   r   r          z0compute_pooling_output_shape.<locals>.<listcomp>)	r
   nparrayr   r   appendfloorr   r"   )r#   	pool_sizestridespaddingdata_formatinput_shape_originspatial_shape	none_dimsr   output_spatial_shaper   r   r   r   compute_pooling_output_shapeN   sh   '




r=   r   c              	   C   s$  |dkr| dd }|| d |f }n| dd }|| d |f }t |t | kr4td| d|  dt|tr@|ft | }t|trL|ft | }t |t |kr_td	| d
|  dg }	t|}tt |D ]}
||
 du r}d||
< |	|
 qlt|dd }t|}|dkrt|||d   d | d }tt |D ]}
|
|	vr||
 dk rtd|  d| d| dqn|dks|dkrt|d | d }ntd| ddd |D }|	D ]}
d||
< qt	|}|dkr| d f| |d f }|S | d |d f| }|S )z%Compute the output shape of conv ops.r'   r   r(   r)   NzNKernel shape must have the same length as input, but received kernel of shape z and input of shape r	   zlDilation must be None, scalar or tuple/list of length of inputs' spatial shape, but received `dilation_rate=z` and input of shape r&   r   z?Computed output size would be negative. Received `inputs shape=z`, `kernel shape=z`, `dilation_rate=r*   r+   causalz9`padding` must be either `'valid'` or `'same'`. Received c                 S   r,   r   r-   r/   r   r   r   r      r0   z-compute_conv_output_shape.<locals>.<listcomp>)
r   r   
isinstancer.   r1   r2   r   r3   r4   r"   )r#   filterskernel_sizer6   r7   r8   dilation_rater:   kernel_shaper;   r   kernel_spatial_shaper<   r   r   r   r   compute_conv_output_shape   s   







rF   c                 C   s   t | dkrd| d f} t |dkr|d df}| d dur7|d dur7| d |d kr7td|  d| dt| dd |dd }| d |d g}|| }t | dkrY|d= t |dkrb|d= t|S )	zCompute the output shape of a `matmul` operation.

    Args:
        shape1: Shape of the left operand.
        shape2: Shape of the right operand.

    Returns:
        Tuple of ints: The output shape for the `matmul` operation.
    r   r   r(   Nr>   z[Inner dimensions (`x1.shape[-1]` and `x2.shape[-2]`) must be equal, but received `x1.shape=z` and `x2.shape=r*   )r   r   r   r"   )r   r   leading_shapelast_2_dims_shaper   r   r   r   compute_matmul_output_shape   s,   
rI   c           
   	   C   s  | d}|dkrtd| d| d| dd| v r$tdd	 |D S t| }|d
krC|t|krAtd|  d| d| |S d}d}t|D ]\}}|dkrV|}qK||9 }qK|d
kse|| d
krutd| d|  d| d| t|}	|| |	|< t|	S )zConverts `-1` in `newshape` to either an actual dimension or `None`.

    This utility does not special case the 0th dimension (batch size).
    r(   r   z4There must be at most one unknown dimension (-1) in z. Received: =r	   Nc                 s   s     | ]}|d kr
|ndV  qdS )r(   Nr   )r   dimr   r   r   	<genexpr>(  s    z/compute_reshape_output_shape.<locals>.<genexpr>r   zFThe total size of the tensor must be unchanged. Received: input_shape=z, zyThe total size of the tensor must be unchanged, however, the input size cannot by divided by the specified dimensions in z. Received: input_shape=)countr   r"   mathprod	enumerater
   )
r#   newshapenewshape_arg_nameunknown_dim_count
input_sizeknown_output_sizeunknown_dim_indexindexrK   r   r   r   r   compute_reshape_output_shape  s^   


rX   c                    sf   t   |du rt ddd S t|t kr(tdt  dt| dt fdd|D S )a  Compute the output shape for the `transpose` operation.

    Args:
        input_shape: Input shape.
        axes: Permutation of the dimensions for the `transpose` operation.

    Returns:
        Tuple of ints: The output shape after the `transpose` operation.
    Nr(   zDaxis must be a list of the same length as the input shape, expected z, but received r	   c                 3   s    | ]} | V  qd S Nr   r   r#   r   r   rL   ]      z1compute_transpose_output_shape.<locals>.<genexpr>)r
   r"   r   r   )r#   axesr   rZ   r   compute_transpose_output_shapeJ  s   
r]   c                 C   sv   t | } t |}|d u rd | v rd gntt| g} t| t|kr.td|  d| d|| | |< t| |}|S )NzM`x` and `indices` must have the same number of dimensions, but receive shape r   r	   )r
   r.   r1   rO   r   r   r   )r#   indices_shaper   r   r   r   r   $compute_take_along_axis_output_shape`  s    
r_   Fc                 C   s|   t | } |d u r|rtdd | D S tg S t|tr|f}|r.|D ]}d| |< q#t| S t|ddD ]}| |= q4t| S )Nc                 S   s   g | ]}d qS r   r   )r   _r   r   r   r   w  s    z reduce_shape.<locals>.<listcomp>r   T)reverse)r
   r"   r@   r.   sorted)shaper   keepdimsr   r   r   r   reduce_shapes  s   

re   zkeras.utils.get_source_inputsc                    s   t | ds| S | j\}}}|r|js| gS |j| }|jr#t|jS g }|jD ]} t| }|D ] t	 fdd|D rB|
  q0q(|S )zReturns the list of input tensors necessary to compute `tensor`.

    Output will always be a list of tensors
    (potentially with 1 element).

    Args:
        tensor: The tensor to start from.

    Returns:
        List of input tensors.
    _keras_historyc                 3   s    | ]} |uV  qd S rY   r   )r   txr   r   rL     r[   z$get_source_inputs.<locals>.<genexpr>)hasattrrf   _inbound_nodesis_inputr   flattenoutput_tensorsinput_tensorsget_source_inputsallr3   )tensor	operation
node_indexr`   nodesource_tensorsprevious_sourcesr   rh   r   rp     s"   




rp   )r&   r'   )r   r&   r'   r   )NF)rN   numpyr1   	keras.srcr   keras.src.api_exportr   &keras.src.backend.common.backend_utilsr   r   r   r%   r=   rF   rI   rX   r]   r_   re   rp   r   r   r   r   <module>   s.    ,
]
P#0
