o
    2hlD                     @   s   d dl mZ d dl mZ d dl mZ d dl mZ d dl mZ d dl mZ d dlmZ d dl	m
Z
 d d	lmZ d d
lmZ d dlmZ edG dd deeZedG dd deZdS )    )activations)backend)constraints)initializers)ops)regularizers)keras_export)	InputSpec)Layer)DropoutRNNCell)RNNzkeras.layers.SimpleRNNCellc                       sf   e Zd ZdZ														d fd	d
	Zdd ZdddZdddZ fddZ  Z	S )SimpleRNNCellav  Cell class for SimpleRNN.

    This class processes one step within the whole time sequence input, whereas
    `keras.layer.SimpleRNN` processes the whole sequence.

    Args:
        units: Positive integer, dimensionality of the output space.
        activation: Activation function to use.
            Default: hyperbolic tangent (`tanh`).
            If you pass `None`, no activation is applied
            (ie. "linear" activation: `a(x) = x`).
        use_bias: Boolean, (default `True`), whether the layer
            should use a bias vector.
        kernel_initializer: Initializer for the `kernel` weights matrix,
            used for the linear transformation of the inputs. Default:
            `"glorot_uniform"`.
        recurrent_initializer: Initializer for the `recurrent_kernel`
            weights matrix, used for the linear transformation
            of the recurrent state. Default: `"orthogonal"`.
        bias_initializer: Initializer for the bias vector. Default: `"zeros"`.
        kernel_regularizer: Regularizer function applied to the `kernel` weights
            matrix. Default: `None`.
        recurrent_regularizer: Regularizer function applied to the
            `recurrent_kernel` weights matrix. Default: `None`.
        bias_regularizer: Regularizer function applied to the bias vector.
            Default: `None`.
        kernel_constraint: Constraint function applied to the `kernel` weights
            matrix. Default: `None`.
        recurrent_constraint: Constraint function applied to the
            `recurrent_kernel` weights matrix. Default: `None`.
        bias_constraint: Constraint function applied to the bias vector.
            Default: `None`.
        dropout: Float between 0 and 1. Fraction of the units to drop for the
            linear transformation of the inputs. Default: 0.
        recurrent_dropout: Float between 0 and 1. Fraction of the units to drop
            for the linear transformation of the recurrent state. Default: 0.
        seed: Random seed for dropout.

    Call arguments:
        sequence: A 2D tensor, with shape `(batch, features)`.
        states: A 2D tensor with shape `(batch, units)`, which is the state
            from the previous time step.
        training: Python boolean indicating whether the layer should behave in
            training mode or in inference mode. Only relevant when `dropout` or
            `recurrent_dropout` is used.

    Example:

    ```python
    inputs = np.random.random([32, 10, 8]).astype(np.float32)
    rnn = keras.layers.RNN(keras.layers.SimpleRNNCell(4))
    output = rnn(inputs)  # The output has shape `(32, 4)`.
    rnn = keras.layers.RNN(
        keras.layers.SimpleRNNCell(4),
        return_sequences=True,
        return_state=True
    )
    # whole_sequence_output has shape `(32, 10, 4)`.
    # final_state has shape `(32, 4)`.
    whole_sequence_output, final_state = rnn(inputs)
    ```
    tanhTglorot_uniform
orthogonalzerosN        c                    s   |dkrt d| dt jdi | || _tj|| _|| _t	
|| _|| _t
|| _t
|| _t
|| _t
|| _t
|| _t
|	| _t
|
| _t
|| _t
|| _tdtd|| _tdtd|| _| j| _| j| _d S )Nr   zQReceived an invalid value for argument `units`, expected a positive integer, got .g      ?r    )
ValueErrorsuper__init__seedr   randomSeedGeneratorseed_generatorunitsr   get
activationuse_biasr   kernel_initializerrecurrent_initializerbias_initializerr   kernel_regularizerrecurrent_regularizerbias_regularizerr   kernel_constraintrecurrent_constraintbias_constraintminmaxdropoutrecurrent_dropout
state_sizeoutput_size)selfr   r   r   r    r!   r"   r#   r$   r%   r&   r'   r(   r+   r,   r   kwargs	__class__r   Z/var/www/html/chatgem/venv/lib/python3.10/site-packages/keras/src/layers/rnn/simple_rnn.pyr   O   s2   zSimpleRNNCell.__init__c                 C   s~   | j |d | jfd| j| j| jd| _| j | j| jfd| j| j| jd| _	| j
r:| j | jfd| j| j| jd| _d S d | _d S )Nkernel)shapenameinitializerregularizer
constraintrecurrent_kernelbias)
add_weightr   r    r#   r&   r5   r!   r$   r'   r;   r   r"   r%   r(   r<   )r/   input_shaper   r   r3   build   s.   

zSimpleRNNCell.buildFc           
      C   s   t |ttfr|d n|}| |}| |}|r!|d ur!|| }t|| j}| jd ur2|| j7 }|r<|d ur<|| }|t|| j	 }| j
d urO| 
|}t |ttfrY|gn|}	||	fS )Nr   )
isinstancelisttupleget_dropout_maskget_recurrent_dropout_maskr   matmulr5   r<   r;   r   )
r/   sequencestatestrainingprev_outputdp_maskrec_dp_maskhoutput	new_stater   r   r3   call   s   





zSimpleRNNCell.callc                 C   s   t j|| jf| jdgS )N)dtype)r   r   r-   compute_dtype)r/   
batch_sizer   r   r3   get_initial_state   s   zSimpleRNNCell.get_initial_statec                    s   | j t| j| jt| jt| jt| jt	| j
t	| jt	| jt| jt| jt| j| j| j| jd}t  }i ||S )N)r   r   r   r    r!   r"   r#   r$   r%   r&   r'   r(   r+   r,   r   )r   r   	serializer   r   r   r    r!   r"   r   r#   r$   r%   r   r&   r'   r(   r+   r,   r   r   
get_configr/   configbase_configr1   r   r3   rU      s8   





zSimpleRNNCell.get_config)r   Tr   r   r   NNNNNNr   r   N)FN)
__name__
__module____qualname____doc__r   r?   rO   rS   rU   __classcell__r   r   r1   r3   r      s*    B1

r   zkeras.layers.SimpleRNNc                       s  e Zd ZdZ																									d. fd
d	Zd/ fdd	Zedd Zedd Zedd Z	edd Z
edd Zedd Zedd Zedd Zedd Zed d! Zed"d# Zed$d% Zed&d' Zed(d) Z fd*d+Zed,d- Z  ZS )0	SimpleRNNa  Fully-connected RNN where the output is to be fed back as the new input.

    Args:
        units: Positive integer, dimensionality of the output space.
        activation: Activation function to use.
            Default: hyperbolic tangent (`tanh`).
            If you pass None, no activation is applied
            (ie. "linear" activation: `a(x) = x`).
        use_bias: Boolean, (default `True`), whether the layer uses
            a bias vector.
        kernel_initializer: Initializer for the `kernel` weights matrix,
            used for the linear transformation of the inputs. Default:
            `"glorot_uniform"`.
        recurrent_initializer: Initializer for the `recurrent_kernel`
            weights matrix, used for the linear transformation of the recurrent
            state.  Default: `"orthogonal"`.
        bias_initializer: Initializer for the bias vector. Default: `"zeros"`.
        kernel_regularizer: Regularizer function applied to the `kernel` weights
            matrix. Default: `None`.
        recurrent_regularizer: Regularizer function applied to the
            `recurrent_kernel` weights matrix. Default: `None`.
        bias_regularizer: Regularizer function applied to the bias vector.
            Default: `None`.
        activity_regularizer: Regularizer function applied to the output of the
            layer (its "activation"). Default: `None`.
        kernel_constraint: Constraint function applied to the `kernel` weights
            matrix. Default: `None`.
        recurrent_constraint: Constraint function applied to the
            `recurrent_kernel` weights matrix.  Default: `None`.
        bias_constraint: Constraint function applied to the bias vector.
            Default: `None`.
        dropout: Float between 0 and 1.
            Fraction of the units to drop for the linear transformation
            of the inputs. Default: 0.
        recurrent_dropout: Float between 0 and 1.
            Fraction of the units to drop for the linear transformation of the
            recurrent state. Default: 0.
        return_sequences: Boolean. Whether to return the last output
            in the output sequence, or the full sequence. Default: `False`.
        return_state: Boolean. Whether to return the last state
            in addition to the output. Default: `False`.
        go_backwards: Boolean (default: `False`).
            If `True`, process the input sequence backwards and return the
            reversed sequence.
        stateful: Boolean (default: `False`). If `True`, the last state
            for each sample at index i in a batch will be used as the
            initial state for the sample of index i in the following batch.
        unroll: Boolean (default: `False`).
            If `True`, the network will be unrolled,
            else a symbolic loop will be used.
            Unrolling can speed-up an RNN,
            although it tends to be more memory-intensive.
            Unrolling is only suitable for short sequences.

    Call arguments:
        sequence: A 3D tensor, with shape `[batch, timesteps, feature]`.
        mask: Binary tensor of shape `[batch, timesteps]` indicating whether
            a given timestep should be masked. An individual `True` entry
            indicates that the corresponding timestep should be utilized,
            while a `False` entry indicates that the corresponding timestep
            should be ignored.
        training: Python boolean indicating whether the layer should behave in
            training mode or in inference mode.
            This argument is passed to the cell when calling it.
            This is only relevant if `dropout` or `recurrent_dropout` is used.
        initial_state: List of initial state tensors to be passed to the first
            call of the cell.

    Example:

    ```python
    inputs = np.random.random((32, 10, 8))
    simple_rnn = keras.layers.SimpleRNN(4)
    output = simple_rnn(inputs)  # The output has shape `(32, 4)`.
    simple_rnn = keras.layers.SimpleRNN(
        4, return_sequences=True, return_state=True
    )
    # whole_sequence_output has shape `(32, 10, 4)`.
    # final_state has shape `(32, 4)`.
    whole_sequence_output, final_state = simple_rnn(inputs)
    ```
    r   Tr   r   r   Nr   Fc                    s   t |fi d|d|d|d|d|d|d|d|	d	|d
|d|d|d|d|d|dd d|dddd}t j|f|||||d| tddg| _d S )Nr   r   r    r!   r"   r#   r$   r%   r&   r'   r(   r+   r,   r   rP   	trainableTr7   simple_rnn_cell)return_sequencesreturn_statego_backwardsstatefulunroll   )ndim)r   r   r   r   r	   
input_spec)r/   r   r   r   r    r!   r"   r#   r$   r%   activity_regularizerr&   r'   r(   r+   r,   rb   rc   rd   re   rf   r   r0   cellr1   r   r3   r   (  sb   	
	zSimpleRNN.__init__c                    s   t  j||||dS )N)maskrH   initial_state)r   rO   )r/   	sequencesrm   rl   rH   r1   r   r3   rO   `  s   zSimpleRNN.callc                 C      | j jS rY   )rk   r   r/   r   r   r3   r   e     zSimpleRNN.unitsc                 C   ro   rY   )rk   r   rp   r   r   r3   r   i  rq   zSimpleRNN.activationc                 C   ro   rY   )rk   r   rp   r   r   r3   r   m  rq   zSimpleRNN.use_biasc                 C   ro   rY   )rk   r    rp   r   r   r3   r    q  rq   zSimpleRNN.kernel_initializerc                 C   ro   rY   )rk   r!   rp   r   r   r3   r!   u  rq   zSimpleRNN.recurrent_initializerc                 C   ro   rY   )rk   r"   rp   r   r   r3   r"   y  rq   zSimpleRNN.bias_initializerc                 C   ro   rY   )rk   r#   rp   r   r   r3   r#   }  rq   zSimpleRNN.kernel_regularizerc                 C   ro   rY   )rk   r$   rp   r   r   r3   r$     rq   zSimpleRNN.recurrent_regularizerc                 C   ro   rY   )rk   r%   rp   r   r   r3   r%     rq   zSimpleRNN.bias_regularizerc                 C   ro   rY   )rk   r&   rp   r   r   r3   r&     rq   zSimpleRNN.kernel_constraintc                 C   ro   rY   )rk   r'   rp   r   r   r3   r'     rq   zSimpleRNN.recurrent_constraintc                 C   ro   rY   )rk   r(   rp   r   r   r3   r(     rq   zSimpleRNN.bias_constraintc                 C   ro   rY   )rk   r+   rp   r   r   r3   r+     rq   zSimpleRNN.dropoutc                 C   ro   rY   )rk   r,   rp   r   r   r3   r,     rq   zSimpleRNN.recurrent_dropoutc                    s   | j t| j| jt| jt| jt| jt	| j
t	| jt	| jt	| jt| jt| jt| j| j| jd}t  }|d= i ||S )N)r   r   r   r    r!   r"   r#   r$   r%   rj   r&   r'   r(   r+   r,   rk   )r   r   rT   r   r   r   r    r!   r"   r   r#   r$   r%   rj   r   r&   r'   r(   r+   r,   r   rU   rV   r1   r   r3   rU     s>   





zSimpleRNN.get_configc                 C   s   | di |S )Nr   r   )clsrW   r   r   r3   from_config  s   zSimpleRNN.from_config)r   Tr   r   r   NNNNNNNr   r   FFFFFN)NNF)rZ   r[   r\   r]   r   rO   propertyr   r   r   r    r!   r"   r#   r$   r%   r&   r'   r(   r+   r,   rU   classmethodrs   r^   r   r   r1   r3   r_      sn    V8













"r_   N)	keras.srcr   r   r   r   r   r   keras.src.api_exportr   keras.src.layers.input_specr	   keras.src.layers.layerr
   %keras.src.layers.rnn.dropout_rnn_cellr   keras.src.layers.rnn.rnnr   r   r_   r   r   r   r3   <module>   s      E