o
    2h+                     @   s   d Z ddlZddlZddlZddlmZ ddlmZ edG dd deZ	eddd
dZ
ed						dddZdS )z4Deprecated sequence preprocessing APIs from Keras 1.    N)keras_export)	PyDatasetz8keras._legacy.preprocessing.sequence.TimeseriesGeneratorc                   @   sH   e Zd ZdZ							dddZd	d
 Zdd Zdd Zdd ZdS )TimeseriesGeneratoral  Utility class for generating batches of temporal data.

    DEPRECATED.

    This class takes in a sequence of data-points gathered at
    equal intervals, along with time series parameters such as
    stride, length of history, etc., to produce batches for
    training/validation.

    Arguments:
        data: Indexable generator (such as list or Numpy array)
            containing consecutive data points (timesteps).
            The data should be at 2D, and axis 0 is expected
            to be the time dimension.
        targets: Targets corresponding to timesteps in `data`.
            It should have same length as `data`.
        length: Length of the output sequences (in number of timesteps).
        sampling_rate: Period between successive individual timesteps
            within sequences. For rate `r`, timesteps
            `data[i]`, `data[i-r]`, ... `data[i - length]`
            are used for create a sample sequence.
        stride: Period between successive output sequences.
            For stride `s`, consecutive output samples would
            be centered around `data[i]`, `data[i+s]`, `data[i+2*s]`, etc.
        start_index: Data points earlier than `start_index` will not be used
            in the output sequences. This is useful to reserve part of the
            data for test or validation.
        end_index: Data points later than `end_index` will not be used
            in the output sequences. This is useful to reserve part of the
            data for test or validation.
        shuffle: Whether to shuffle output samples,
            or instead draw them in chronological order.
        reverse: Boolean: if `true`, timesteps in each output sample will be
            in reverse chronological order.
        batch_size: Number of timeseries samples in each batch
            (except maybe the last one).

    Returns:
        A PyDataset instance.
       r   NF   c                 C   s   t |t |krtdt | dt | || _|| _|| _|| _|| _|| | _|d u r4t |d }|| _|| _	|	| _
|
| _| j| jkrStd| j d| j dd S )Nz;Data and targets have to be of same length. Data length is z while target length is r   z`start_index+length=z > end_index=zU` is disallowed, as no part of the sequence would be left to be used as current step.)len
ValueErrordatatargetslengthsampling_ratestridestart_index	end_indexshufflereverse
batch_size)selfr	   r
   r   r   r   r   r   r   r   r    r   b/var/www/html/chatgem/venv/lib/python3.10/site-packages/keras/src/legacy/preprocessing/sequence.py__init__7   s6   

zTimeseriesGenerator.__init__c                 C   s$   | j | j | j| j  | j| j  S )N)r   r   r   r   r   r   r   r   __len__`   s   
zTimeseriesGenerator.__len__c                    s    j rtjj j jd  jd}n j j j |  }t|t	| j j   jd  j}t
 fdd|D }t
 fdd|D } jrZ|d d d d ddf |fS ||fS )Nr   )sizec                    s$   g | ]} j | j | j qS r   )r	   r   r   .0rowr   r   r   
<listcomp>s   s    z3TimeseriesGenerator.__getitem__.<locals>.<listcomp>c                    s   g | ]} j | qS r   )r
   r   r   r   r   r   x   s    .)r   nprandomrandintr   r   r   r   arangeminarrayr   )r   indexrowsisamplesr
   r   r   r   __getitem__e   s&   
zTimeseriesGenerator.__getitem__c                 C   s   | j }t| j jtjkr| j  }zt|}W n ty, } ztd| |d}~ww | j	}t| j	jtjkr>| j	 }zt|}W n tyY } ztd| |d}~ww ||| j
| j| j| j| j| j| j| jd
S )zReturns the TimeseriesGenerator configuration as Python dictionary.

        Returns:
            A Python dictionary with the TimeseriesGenerator configuration.
        zData not JSON Serializable: NzTargets not JSON Serializable: )
r	   r
   r   r   r   r   r   r   r   r   )r	   type
__module__r   __name__tolistjsondumps	TypeErrorr
   r   r   r   r   r   r   r   r   )r   r	   	json_dataer
   json_targetsr   r   r   
get_config~   s:   

zTimeseriesGenerator.get_configc                 K   s(   |   }| jj|d}tj|fi |S )a  Returns a JSON string containing the generator's configuration.

        Args:
            **kwargs: Additional keyword arguments to be passed
                to `json.dumps()`.

        Returns:
            A JSON string containing the tokenizer configuration.
        )
class_nameconfig)r4   	__class__r,   r.   r/   )r   kwargsr6   timeseries_generator_configr   r   r   to_json   s
   
zTimeseriesGenerator.to_json)r   r   r   NFFr   )	r,   r+   __qualname____doc__r   r   r)   r4   r:   r   r   r   r   r      s    .
)#r   z8keras._legacy.preprocessing.sequence.make_sampling_tableh㈵>c                 C   sV   d}t | }d|d< |t ||  d dd|   }|| }t d|t | S )aC  Generates a word rank-based probabilistic sampling table.

    DEPRECATED.

    Used for generating the `sampling_table` argument for `skipgrams`.
    `sampling_table[i]` is the probability of sampling
    the word i-th most common word in a dataset
    (more common words should be sampled less frequently, for balance).

    The sampling probabilities are generated according
    to the sampling distribution used in word2vec:

    ```
    p(word) = (min(1, sqrt(word_frequency / sampling_factor) /
        (word_frequency / sampling_factor)))
    ```

    We assume that the word frequencies follow Zipf's law (s=1) to derive
    a numerical approximation of frequency(rank):

    `frequency(rank) ~ 1/(rank * (log(rank) + gamma) + 1/2 - 1/(12*rank))`
    where `gamma` is the Euler-Mascheroni constant.

    Args:
        size: Int, number of possible words to sample.
        sampling_factor: The sampling factor in the word2vec formula.

    Returns:
        A 1D Numpy array of length `size` where the ith entry
        is the probability that a word of rank i should be sampled.
    gX9v?r   r   g      ?      ?g      (@)r   r"   logminimumsqrt)r   sampling_factorgammarankinv_fqfr   r   r   make_sampling_table   s   !
"rG   z.keras._legacy.preprocessing.sequence.skipgrams   r>   TFc                    sj  g }g }	t | D ]O\}
}|sq|dur|| t k rqtd|
| }tt| |
| d }t||D ]#}||
krV| | }|s@q3|||g |rQ|	ddg q3|	d q3q|dkrtt|	| }dd |D t | fddt|D 7 }|r|	ddgg| 7 }	n|	dg| 7 }	|r|du rt	dd}t
| t| t
| t|	 ||	fS )a  Generates skipgram word pairs.

    DEPRECATED.

    This function transforms a sequence of word indexes (list of integers)
    into tuples of words of the form:

    - (word, word in the same window), with label 1 (positive samples).
    - (word, random word from the vocabulary), with label 0 (negative samples).

    Read more about Skipgram in this gnomic paper by Mikolov et al.:
    [Efficient Estimation of Word Representations in
    Vector Space](http://arxiv.org/pdf/1301.3781v3.pdf)

    Args:
        sequence: A word sequence (sentence), encoded as a list
            of word indices (integers). If using a `sampling_table`,
            word indices are expected to match the rank
            of the words in a reference dataset (e.g. 10 would encode
            the 10-th most frequently occurring token).
            Note that index 0 is expected to be a non-word and will be skipped.
        vocabulary_size: Int, maximum possible word index + 1
        window_size: Int, size of sampling windows (technically half-window).
            The window of a word `w_i` will be
            `[i - window_size, i + window_size+1]`.
        negative_samples: Float >= 0. 0 for no negative (i.e. random) samples.
            1 for same number as positive samples.
        shuffle: Whether to shuffle the word couples before returning them.
        categorical: bool. if False, labels will be
            integers (eg. `[0, 1, 1 .. ]`),
            if `True`, labels will be categorical, e.g.
            `[[1,0],[0,1],[0,1] .. ]`.
        sampling_table: 1D array of size `vocabulary_size` where the entry i
            encodes the probability to sample a word of rank i.
        seed: Random seed.

    Returns:
        couples, labels: where `couples` are int pairs and
            `labels` are either 0 or 1.

    Note:
        By convention, index 0 in the vocabulary is
        a non-word and will be skipped.
    Nr   r   c                 S   s   g | ]}|d  qS )r   r   )r   cr   r   r   r   ,  s    zskipgrams.<locals>.<listcomp>c                    s,   g | ]}|t   td  d  gqS )r   )r   r    r!   )r   r'   vocabulary_sizewordsr   r   r   /  s    g    cA)	enumerater    maxr#   r   rangeappendintr   r!   seed)sequencerK   window_sizenegative_samplesr   categoricalsampling_tablerR   coupleslabelsr'   wiwindow_start
window_endjwjnum_negative_samplesr   rJ   r   	skipgrams   sN   7





r`   )r=   )rH   r>   TFNN)r<   r.   r    numpyr   keras.src.api_exportr   3keras.src.trainers.data_adapters.py_dataset_adapterr   r   rG   r`   r   r   r   r   <module>   s&     ')