o
    2hx                     @   sf  d dl Z d dlmZ d dlmZ d dlmZ d dlmZ d dlm	Z	 d dl
mZ d dlmZ d	Zed
 Zed Zed Zed Z														d5ddZeddg								d6ddZeddg								d7d d!Z	d8d$d%Zd9d&d'Zd9d(d)Zd9d*d+Zed,d9d-d.Zed/d:d1d2Zejjd3ejejd4e_ ejj e_ dS );    N)backend)layers)keras_export)imagenet_utils)
Functional)operation_utils)
file_utilszDhttps://storage.googleapis.com/tensorflow/keras-applications/nasnet/zNASNet-mobile.h5zNASNet-mobile-no-top.h5zNASNet-large.h5zNASNet-large-no-top.h5     `   T   imagenet  softmaxNASNetc              	   C   s  t  dkr
td|dv st|std|dkr%|r%|
dkr%tdt| tr<d| v r<|dkr<td	t|  d
 |du rBd}tj	| |dt  ||d} t  dkr\t
jddd |du rgtj| d}nt |sttj|| d}n|}|d|d   dkrtd| t  dkrdnd}|d }tj|ddddddd|}tj|d d!d"d#|}d}t||||d  d$d%\}}t|||| d&d%\}}t|D ]}t|||| d%\}}qt|||| d'| d%\}}|s|n|}t|D ]}t|||| || d  d%\}}qt||||d  d'd|  d%\}}|s!|n|}t|D ]}t||||d  d| | d  d%\}}q'td(|}|rat |}t|| tj|
|d)d*|}n|	d+krmt |}n|	d,krxt |}|durt|}n|}t|||d-}|dkr|d.kr|rtjd/td0d1d2}n	tjd3td0d4d2}|| |S |dkr|rtjd5t d0d6d2}n	tjd7t!d0d8d2}|| |S td9|dur|| |S ):a  Instantiates a NASNet model.

    Reference:
    - [Learning Transferable Architectures for Scalable Image Recognition](
        https://arxiv.org/abs/1707.07012) (CVPR 2018)

    For image classification use cases, see
    [this page for detailed examples](
      https://keras.io/api/applications/#usage-examples-for-image-classification-models).

    For transfer learning use cases, make sure to read the
    [guide to transfer learning & fine-tuning](
      https://keras.io/guides/transfer_learning/).

    Note: each Keras Application expects a specific kind of input preprocessing.
    For NasNet, call `keras.applications.nasnet.preprocess_input`
    on your inputs before passing them to the model.
    `nasnet.preprocess_input` will scale input pixels between -1 and 1.

    Args:
        input_shape: Optional shape tuple, the input shape
            is by default `(331, 331, 3)` for NASNetLarge and
            `(224, 224, 3)` for NASNetMobile.
            It should have exactly 3 input channels,
            and width and height should be no smaller than 32.
            E.g. `(224, 224, 3)` would be one valid value.
        penultimate_filters: Number of filters in the penultimate layer.
            NASNet models use the notation `NASNet (N @ P)`, where:
                -   N is the number of blocks
                -   P is the number of penultimate filters
        num_blocks: Number of repeated blocks of the NASNet model.
            NASNet models use the notation `NASNet (N @ P)`, where:
                -   N is the number of blocks
                -   P is the number of penultimate filters
        stem_block_filters: Number of filters in the initial stem block
        skip_reduction: Whether to skip the reduction step at the tail
            end of the network.
        filter_multiplier: Controls the width of the network.
            - If `filter_multiplier` < 1.0, proportionally decreases the number
                of filters in each layer.
            - If `filter_multiplier` > 1.0, proportionally increases the number
                of filters in each layer.
            - If `filter_multiplier` = 1, default number of filters from the
                paper are used at each layer.
        include_top: Whether to include the fully-connected
            layer at the top of the network.
        weights: `None` (random initialization) or
            `imagenet` (ImageNet weights)
        input_tensor: Optional Keras tensor (i.e. output of
            `layers.Input()`)
            to use as image input for the model.
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model
                will be the 4D tensor output of the
                last convolutional block.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional block, and thus
                the output of the model will be a
                2D tensor.
            - `max` means that global max pooling will
                be applied.
        classes: Optional number of classes to classify images
            into, only to be specified if `include_top` is `True`, and
            if no `weights` argument is specified.
        default_size: Specifies the default image size of the model
        classifier_activation: A `str` or callable.
            The activation function to use on the "top" layer.
            Ignored unless `include_top=True`.
            Set `classifier_activation=None` to return the logits
            of the "top" layer. When loading pretrained weights,
            `classifier_activation` can only be `None` or `"softmax"`.
        name: The name of the model (string).

    Returns:
        A model instance.
    channels_firstzNASNet does not support the `channels_first` image data format. Switch to `channels_last` by editing your local config file at ~/.keras/keras.json>   Nr   zThe `weights` argument should be either `None` (random initialization), `imagenet` (pre-training on ImageNet), or the path to the weights file to be loaded.r   r   zWIf using `weights` as `"imagenet"` with `include_top` as true, `classes` should be 1000NzWhen specifying the input shape of a NASNet and loading `ImageNet` weights, the input_shape argument must be static (no None entries). Got: `input_shape=z`.K      )default_sizemin_sizedata_formatrequire_flattenweightschannels_lasta  The NASNet family of models is only available for the input data format "channels_last" (width, height, channels). However your settings specify the default data format "channels_first" (channels, width, height). You should set `image_data_format="channels_last"` in your Keras config located at ~/.keras/keras.json. The model being returned right now will expect inputs to follow the "channels_last" data format.r   )
stacklevel)shape)tensorr      r   zuFor NASNet-A models, the `penultimate_filters` must be a multiple of 24 * (`filter_multiplier` ** 2). Current value:       r!   r   r   validF
stem_conv1	he_normal)stridespaddinguse_biasnamekernel_initializerZڊ?MbP?stem_bn1axismomentumepsilonr)   stem_1block_idstem_2reduce_relupredictions)
activationr)   avgmaxr)      znasnet_mobile.h5models 020fb642bf7360b370c678b08e0adf61)cache_subdir	file_hashznasnet_mobile_no_top.h5 1ed92395b5b598bdda52abe5c0dbfd63znasnet_large.h5 11577c9a518f0070763c2b964a382f17znasnet_large_no_top.h5 d81d89dc07e6e56530c4e77faddd61b5zDImageNet weights can only be loaded with NASNetLarge or NASNetMobile)"r   image_data_format
ValueErrorr   exists
isinstancetuplestrr   obtain_input_shapewarningswarnr   Inputis_keras_tensorConv2DBatchNormalization_reduction_a_cellrange_normal_a_cell
ActivationGlobalAveragePooling2Dvalidate_activationDenseGlobalMaxPooling2Dr   get_source_inputsr   get_fileNASNET_MOBILE_WEIGHT_PATH NASNET_MOBILE_WEIGHT_PATH_NO_TOPload_weightsNASNET_LARGE_WEIGHT_PATHNASNET_LARGE_WEIGHT_PATH_NO_TOP)input_shapepenultimate_filters
num_blocksstem_block_filtersskip_reductionfilter_multiplierinclude_topr   input_tensorpoolingclassesr   classifier_activationr)   	img_inputchannel_dimfiltersxpip0inputsmodelweights_path rv   X/var/www/html/chatgem/venv/lib/python3.10/site-packages/keras/src/applications/nasnet.pyr      s:  ^		















	

z&keras.applications.nasnet.NASNetMobilezkeras.applications.NASNetMobilenasnet_mobilec                 C   sH   t   dkr
td|s| du rd} t| ddddd	|||||d
||dS )a
  Instantiates a Mobile NASNet model in ImageNet mode.

    Reference:
    - [Learning Transferable Architectures for Scalable Image Recognition](
        https://arxiv.org/abs/1707.07012) (CVPR 2018)

    Optionally loads weights pre-trained on ImageNet.
    Note that the data format convention used by the model is
    the one specified in your Keras config at `~/.keras/keras.json`.

    Note: each Keras Application expects a specific kind of input preprocessing.
    For NASNet, call `keras.applications.nasnet.preprocess_input` on your
    inputs before passing them to the model.

    Args:
        input_shape: Optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(224, 224, 3)` for NASNetMobile
            It should have exactly 3 inputs channels,
            and width and height should be no smaller than 32.
            E.g. `(224, 224, 3)` would be one valid value.
        include_top: Whether to include the fully-connected
            layer at the top of the network.
        weights: `None` (random initialization) or
            `imagenet` (ImageNet weights). For loading `imagenet` weights,
            `input_shape` should be (224, 224, 3)
        input_tensor: Optional Keras tensor (i.e. output of
            `layers.Input()`)
            to use as image input for the model.
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model
                will be the 4D tensor output of the
                last convolutional layer.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a
                2D tensor.
            - `max` means that global max pooling will
                be applied.
        classes: Optional number of classes to classify images
            into, only to be specified if `include_top` is `True`, and
            if no `weights` argument is specified.
        classifier_activation: A `str` or callable. The activation function to
            use on the "top" layer. Ignored unless `include_top=True`. Set
            `classifier_activation=None` to return the logits of the "top"
            layer.  When loading pretrained weights, `classifier_activation` can
            only be `None` or `"softmax"`.
        name: The name of the model (string).

    Returns:
        A Keras model instance.
    torchz}NASNetMobile is not available with the torch backend at this time due to an outstanding bug. If interested, please open a PR.N)r=   r=   r!   i      r   Fr   r=   rb   rc   rd   re   rf   rg   r   rh   ri   rj   r   rk   r)   )r   rF   r   ra   rg   r   rh   ri   rj   rk   r)   rv   rv   rw   NASNetMobile;  s,   Fr}   z%keras.applications.nasnet.NASNetLargezkeras.applications.NASNetLargenasnet_largec                 C   s$   t | ddddd|||||d||dS )a
  Instantiates a NASNet model in ImageNet mode.

    Reference:
    - [Learning Transferable Architectures for Scalable Image Recognition](
        https://arxiv.org/abs/1707.07012) (CVPR 2018)

    Optionally loads weights pre-trained on ImageNet.
    Note that the data format convention used by the model is
    the one specified in your Keras config at `~/.keras/keras.json`.

    Note: each Keras Application expects a specific kind of input preprocessing.
    For NASNet, call `keras.applications.nasnet.preprocess_input` on your
    inputs before passing them to the model.

    Args:
        input_shape: Optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(331, 331, 3)` for NASNetLarge.
            It should have exactly 3 inputs channels,
            and width and height should be no smaller than 32.
            E.g. `(224, 224, 3)` would be one valid value.
        include_top: Whether to include the fully-connected
            layer at the top of the network.
        weights: `None` (random initialization) or
            `imagenet` (ImageNet weights).  For loading `imagenet` weights,
            `input_shape` should be (331, 331, 3)
        input_tensor: Optional Keras tensor (i.e. output of
            `layers.Input()`)
            to use as image input for the model.
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model
                will be the 4D tensor output of the
                last convolutional layer.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a
                2D tensor.
            - `max` means that global max pooling will
                be applied.
        classes: Optional number of classes to classify images
            into, only to be specified if `include_top` is `True`, and
            if no `weights` argument is specified.
        classifier_activation: A `str` or callable. The activation function to
            use on the "top" layer. Ignored unless `include_top=True`. Set
            `classifier_activation=None` to return the logits of the "top"
            layer.  When loading pretrained weights, `classifier_activation`
            can only be `None` or `"softmax"`.
        name: The name of the model (string).

    Returns:
        A Keras model instance.
    r	   r
   r   Tr   r   r{   )r   r|   rv   rv   rw   NASNetLarge  s    Fr   r    r   r   c              	   C   s  t  dkrdnd}t d| k td| }|dkr1tjt||d| d|}d	}nd
}tj|||d| |dd|}tj	|ddd| d|}td|}tj||d| d
dd|}tj	|ddd| d|}W d   |S 1 sw   Y  |S )aF  Adds 2 blocks of [relu-separable conv-batchnorm].

    Args:
        ip: Input tensor
        filters: Number of output filters per layer
        kernel_size: Kernel size of separable convolutions
        strides: Strided convolution for downsampling
        block_id: String block_id

    Returns:
        A Keras tensor
    r   r   r   separable_conv_block_r7   r"   separable_conv_1_pad_r'   r)   r#   sameseparable_conv_1_F)r&   r)   r'   r(   r+   r,   separable_conv_1_bn_r.   separable_conv_2_)r)   r'   r(   separable_conv_2_bn_N)
r   rE   
name_scoper   rU   ZeroPadding2Dr   correct_padSeparableConv2DrQ   )iprn   kernel_sizer&   r4   rm   ro   conv_padrv   rv   rw   _separable_conv_block  sh   

&&r   c                 C   sb  t  dkrdnd}t  dkrdnd}t d | du r"|} n| j| |j| krt d| { tjd	d
| d| } tjdddd| d| }tj|d dddd| dd|}tjdd| }tj	dd|}tjdddd| d|}tj|d dddd| dd|}tj
||g|d} tj|ddd| d | } W d   n1 sw   Y  n^| j| |krt d!| . td	| } tj|dddd"| ddd#| } tj|ddd| d | } W d   n(1 sw   Y  W d   | S W d   | S W d   | S W d   | S W d   | S 1 s*w   Y  | S )$a  Adjusts the input `previous path` to match the shape of the `input`.

    Used in situations where the output number of filters needs to be changed.

    Args:
        p: Input tensor which needs to be modified
        ip: Input tensor whose shape needs to be matched
        filters: Number of output filters to be matched
        block_id: String block_id

    Returns:
        Adjusted Keras tensor
    r   r   r   r   adjust_blockNadjust_reduction_block_r7   adjust_relu_1_r<   r   r"   r#   adjust_avg_pool_1_r&   r'   r)   r   Fadjust_conv_1_r%   )r'   r(   r)   r*   )r   r   r   )r'   )r   r   r   )croppingadjust_avg_pool_2_adjust_conv_2_)r/   r+   r,   
adjust_bn_r.   adjust_projection_block_adjust_conv_projection_r&   r'   r)   r(   r*   )r   rE   r   r   r   rU   AveragePooling2DrP   r   
Cropping2DconcatenaterQ   )rp   r   rn   r4   rm   img_dimp1p2rv   rv   rw   _adjust_block-  s   		,	
C
C
C
C
CCr   c              
   C   s  t  dkrdnd}t d| O t|| ||}td| }tj|dddd| d	d
d|}tj|ddd| d|}t d) t||dd| d}t||d| d}tj	||gd| d}W d   n1 srw   Y  t d* t||dd| d}	t||dd| d}
tj	|	|
gd| d}W d   n1 sw   Y  t d" tj
dddd| d |}tj	||gd!| d}W d   n1 sw   Y  t d"0 tj
dddd#| d |}tj
dddd$| d |}tj	||gd%| d}W d   n	1 sw   Y  t d& t||d'| d}tj	||gd(| d}W d   n	1 sBw   Y  tj||||||g|d)| d*}W d   || fS 1 sfw   Y  || fS )+zAdds a Normal cell for NASNet-A (Fig. 4 in the paper).

    Args:
        ip: Input tensor `x`
        p: Input tensor `p`
        filters: Number of output filters
        block_id: String block_id

    Returns:
        A Keras tensor
    r   r   r   normal_A_block_r7   r   r   normal_conv_1_Fr%   r   r+   r,   normal_bn_1_r.   block_1   r   normal_left1_)r   r4   normal_right1_r3   normal_add_1_r<   Nblock_2normal_left2_r    normal_right2_normal_add_2_block_3normal_left3_r   normal_add_3_block_4normal_left4_normal_right4_normal_add_4_block_5normal_left5_normal_add_5_normal_concat_r/   r)   )r   rE   r   r   r   rU   rP   rQ   r   addr   r   )r   rp   rn   r4   rm   hx1_1x1_2x1x2_1x2_2x2x3x4_1x4_2x4x5ro   rv   rv   rw   rT     s   			
LLrT   c              
   C   s  t  dkrdnd}t d| m t|| ||}td| }tj|dddd| d	d
d|}tj|ddd| d|}tjt	
|dd| d|}t d, t||ddd| d}t||ddd| d}tj||gd| d}	W d   n1 sw   Y  t d. tjdddd | d!|}
t||ddd"| d}tj|
|gd#| d}W d   n1 sw   Y  t d$. tjdddd%| d!|}t||ddd&| d}tj||gd'| d}W d   n1 sw   Y  t d( tjdddd)| d!|	}t||g}W d   n	1 s&w   Y  t d*- t|	|dd)| d+}tjdddd,| d!|}tj||gd-| d}W d   n	1 saw   Y  tj||||g|d.| d/}|| fW  d   S 1 sw   Y  dS )0zAdds a Reduction cell for NASNet-A (Fig. 4 in the paper).

    Args:
      ip: Input tensor `x`
      p: Input tensor `p`
      filters: Number of output filters
      block_id: String block_id

    Returns:
      A Keras tensor
    r   r   r   reduction_A_block_r7   r   r   reduction_conv_1_Fr%   r   r+   r,   reduction_bn_1_r.   r!   reduction_pad_1_r   r   r   r"   reduction_left1_)r&   r4   )   r   reduction_right1_reduction_add_1_r<   Nr   r    r#   reduction_left2_r   reduction_right2_reduction_add_2_r   reduction_left3_reduction_right3_reduction_add3_r   reduction_left4_r   r3   reduction_right5_reduction_add4_reduction_concat_r   )r   rE   r   r   r   rU   rP   rQ   r   r   r   r   r   MaxPooling2Dr   r   )r   rp   rn   r4   rm   r   h3r   r   r   r   r   r   x3_1x3_2r   r   x5_1x5_2r   ro   rv   rv   rw   rR     s   	
	
&rR   z*keras.applications.nasnet.preprocess_inputc                 C   s   t j| |ddS )Ntf)r   mode)r   preprocess_input)ro   r   rv   rv   rw   r   U  s   r   z,keras.applications.nasnet.decode_predictionsr   c                 C   s   t j| |dS )N)top)r   decode_predictions)predsr   rv   rv   rw   r   \  s   r    )r   reterror)Nr	   r
   r   Tr   Tr   NNr   Nr   r   )NTr   NNr   r   rx   )NTr   NNr   r   r~   )r    r   N)N)r   )!rL   	keras.srcr   r   keras.src.api_exportr   keras.src.applicationsr   keras.src.modelsr   keras.src.opsr   keras.src.utilsr   BASE_WEIGHTS_PATHr\   r]   r_   r`   r   r}   r   r   r   rT   rR   r   r   PREPROCESS_INPUT_DOCformatPREPROCESS_INPUT_RET_DOC_TFPREPROCESS_INPUT_ERROR_DOC__doc__rv   rv   rv   rw   <module>   s    
  )ZS

:
W
]t