o
    2hsF                     @   s   d dl Z d dlmZ d dlmZ d dlmZ d dlmZ d dlm	Z	 d dl
mZ d dlmZ d	Zed
dg									d ddZdd Zd!ddZedd!ddZedd"ddZejjdejejde_ejje_dS )#    N)backend)layers)keras_export)imagenet_utils)
Functional)operation_utils)
file_utilszJhttps://storage.googleapis.com/tensorflow/keras-applications/mobilenet_v2/z+keras.applications.mobilenet_v2.MobileNetV2zkeras.applications.MobileNetV2      ?Timagenet  softmaxc	                 C   s  |dv st |std| d|dkr#|r#|dkr#td| d| dur|durzt|}	W n( tyZ   z
tt|}	W n tyW   td| d	t| dw Y nw |	rt d
kry|j	d | d krxtd|j	 d|  dn|j	d | d krtd|j	 d|  dntd| d| du r|durzt| W n ty   td| dt| w | du rt|sd}
n`| du rt|rt d
kr|j	d }|j	d }n
|j	d }|j	d }||kr|dv r|}
n1d}
n.| du rd}
n't d
kr| d }| d }n| d }| d }||kr$|dv r$|}
nd}
t
j| |
dt ||d} t dkr?d\}}nd\}}| | }| | }|dkrp|dvr]td| d||ksg|dvrpd}tjddd |du r|tj| d }nt|stj|| d!}n|}t d
krdnd"}td| d#}tj|dd$d%d&d'd(|}tj|d)d*d+d,|}tjd-d.d/|}t|d0|dddd1}t|d2|dd3dd1}t|d2|dd3dd1}t|d|dd3dd1}t|d|dd3d4d1}t|d|dd3d5d1}t|d6|dd3d3d1}t|d6|dd3d7d1}t|d6|dd3d#d1}t|d6|dd3d8d1}t|d9|dd3d:d1}t|d9|dd3d;d1}t|d9|dd3d<d1}t|d=|dd3d>d1}t|d=|dd3d?d1}t|d=|dd3d@d1}t|dA|dd3d0d1}|dBkrvtdC| d#}ndC}tj|dd&dDdE|}tj|d)d*dFd,|}tjd-dGd/|}|rt |}t
|| tj||dHdI|}n|dJkrt |}n|dKkrt |}|durt|}n|}|du rdL|dMdN| }t|||d/}|dkr4|rdOtt| dN t| dP }t| }t j||dQdR}ndOtt| dN t| dS dP }t| }t j||dQdR}|| |S |dur>|| |S )Ta  Instantiates the MobileNetV2 architecture.

    MobileNetV2 is very similar to the original MobileNet,
    except that it uses inverted residual blocks with
    bottlenecking features. It has a drastically lower
    parameter count than the original MobileNet.
    MobileNets support any input size greater
    than 32 x 32, with larger image sizes
    offering better performance.

    Reference:
    - [MobileNetV2: Inverted Residuals and Linear Bottlenecks](
        https://arxiv.org/abs/1801.04381) (CVPR 2018)

    This function returns a Keras image classification model,
    optionally loaded with weights pre-trained on ImageNet.

    For image classification use cases, see
    [this page for detailed examples](
      https://keras.io/api/applications/#usage-examples-for-image-classification-models).

    For transfer learning use cases, make sure to read the
    [guide to transfer learning & fine-tuning](
      https://keras.io/guides/transfer_learning/).

    Note: each Keras Application expects a specific kind of input preprocessing.
    For MobileNetV2, call
    `keras.applications.mobilenet_v2.preprocess_input`
    on your inputs before passing them to the model.
    `mobilenet_v2.preprocess_input` will scale input pixels between -1 and 1.

    Args:
        input_shape: Optional shape tuple, only to be specified if `include_top`
            is `False` (otherwise the input shape has to be `(224, 224, 3)`
            (with `"channels_last"` data format) or `(3, 224, 224)`
            (with `"channels_first"` data format).
            It should have exactly 3 inputs channels, and width and
            height should be no smaller than 32. E.g. `(200, 200, 3)` would
            be one valid value. Defaults to `None`.
            `input_shape` will be ignored if the `input_tensor` is provided.
        alpha: Controls the width of the network. This is known as the width
            multiplier in the MobileNet paper.
            - If `alpha < 1.0`, proportionally decreases the number
                of filters in each layer.
            - If `alpha > 1.0`, proportionally increases the number
                of filters in each layer.
            - If `alpha == 1`, default number of filters from the paper
                are used at each layer. Defaults to `1.0`.
        include_top: Boolean, whether to include the fully-connected layer
            at the top of the network. Defaults to `True`.
        weights: One of `None` (random initialization), `"imagenet"`
            (pre-training on ImageNet), or the path to the weights file
            to be loaded. Defaults to `"imagenet"`.
        input_tensor: Optional Keras tensor (i.e. output of `layers.Input()`)
            to use as image input for the model. `input_tensor` is useful
            for sharing inputs between multiple different networks.
            Defaults to `None`.
        pooling: Optional pooling mode for feature extraction when `include_top`
            is `False`.
            - `None` (default) means that the output of the model will be
                the 4D tensor output of the last convolutional block.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional block, and thus
                the output of the model will be a 2D tensor.
            - `max` means that global max pooling will be applied.
        classes: Optional number of classes to classify images into,
            only to be specified if `include_top` is `True`, and if
            no `weights` argument is specified. Defaults to `1000`.
        classifier_activation: A `str` or callable. The activation function
            to use on the "top" layer. Ignored unless `include_top=True`.
            Set `classifier_activation=None` to return the logits of the "top"
            layer. When loading pretrained weights, `classifier_activation`
            can only be `None` or `"softmax"`.
        name: String, the name of the model.

    Returns:
        A model instance.
    >   Nr
   zThe `weights` argument should be either `None` (random initialization), `imagenet` (pre-training on ImageNet), or the path to the weights file to be loaded.  Received `weights=`r
   r   zfIf using `weights="imagenet"` with `include_top` as true, `classes` should be 1000. Received `classes=Nzinput_tensor: z7is not type input_tensor. Received `type(input_tensor)=channels_first   z|input_shape[1] must equal shape(input_tensor)[1] when `image_data_format` is `channels_first`; Received `input_tensor.shape=z`, `input_shape=   zNinput_tensor.shape[2] must equal input_shape[1]; Received `input_tensor.shape=z;input_tensor is not a Keras tensor; Received `input_tensor=z9input_tensor must be a valid Keras tensor type; Received z	 of type       )`            r   r       )default_sizemin_sizedata_formatrequire_flattenweightschannels_last)r   r   )r   r   )gffffff?g      ?g      ?r	   g?gffffff?zIf imagenet weights are being loaded, alpha must be one of `0.35`, `0.50`, `0.75`, `1.0`, `1.3` or `1.4` only; Received `alpha=z`input_shape` is undefined or non-square, or `rows` is not in [96, 128, 160, 192, 224]. Weights for input shape (224, 224) will be loaded as the default.)
stacklevel)shape)tensorr      )r   r   sameFConv1)kernel_sizestridespaddinguse_biasnameMbP?+?bn_Conv1axisepsilonmomentumr)         @
Conv1_relur)      )filtersalphastride	expansionblock_id            @      	   r   
         r            i@  r	   i   Conv_1)r%   r(   r)   	Conv_1_bnout_relupredictions)
activationr)   avgmaxmobilenetv2_z0.2f_0mobilenet_v2_weights_tf_dim_ordering_tf_kernels_z.h5models)cache_subdir_no_top)r   exists
ValueErrorr   is_keras_tensorr   get_source_inputstypeimage_data_formatr   r   obtain_input_shapewarningswarnr   Input_make_divisibleConv2DBatchNormalizationReLU_inverted_res_blockGlobalAveragePooling2Dvalidate_activationDenseGlobalMaxPooling2Dr   strfloatBASE_WEIGHT_PATHget_fileload_weights)input_shaper6   include_topr   input_tensorpoolingclassesclassifier_activationr)   is_input_t_tensorr   rowscolsrow_axiscol_axis	img_inputchannel_axisfirst_block_filtersxlast_block_filtersinputsmodel
model_nameweight_pathweights_path r   ^/var/www/html/chatgem/venv/lib/python3.10/site-packages/keras/src/applications/mobilenet_v2.pyMobileNetV2   s  `	



	














r   c                 C   s  t  dkrdnd}| j| }t|| }t|d}	| }
d| d}|rNtj|| dddd	|d
 d|
}
tj|dd|d d|
}
tjd|d d|
}
nd}|dkrctj	t
|
d|d d|
}
tjd|d	d|dkrodnd|d d|
}
tj|dd|d d|
}
tjd|d d|
}
tj|	dddd	|d d|
}
tj|dd|d d|
}
||	kr|dkrtj|d d| |
gS |
S ) zInverted ResNet block.r   r   r!   r"   block_rO   r#   FNexpand)r%   r'   r(   rK   r)   r*   r+   	expand_BNr-   r1   expand_relur3   expanded_conv_r   r   pad)r'   r)   valid	depthwise)r%   r&   rK   r(   r'   r)   depthwise_BNdepthwise_reluproject
project_BNadd)r   rY   r   intr^   r   r_   r`   ra   ZeroPadding2Dr   correct_padDepthwiseConv2DAdd)r|   r8   r7   r6   r5   r9   rx   in_channelspointwise_conv_filterspointwise_filtersrz   prefixr   r   r   rb     s   

rb   c                 C   sB   |d u r|}t |t| |d  | | }|d|  k r||7 }|S )Nr   g?)rM   r   )vdivisor	min_valuenew_vr   r   r   r^     s   r^   z0keras.applications.mobilenet_v2.preprocess_inputc                 C   s   t j| |ddS )Ntf)r   mode)r   preprocess_input)rz   r   r   r   r   r     s   r   z2keras.applications.mobilenet_v2.decode_predictionsr=   c                 C   s   t j| |dS )N)top)r   decode_predictions)predsr   r   r   r   r     s   r    )r   reterror)	Nr	   Tr
   NNr   r   N)N)r=   )r[   	keras.srcr   r   keras.src.api_exportr   keras.src.applicationsr   keras.src.modelsr   keras.src.opsr   keras.src.utilsr   ri   r   rb   r^   r   r   PREPROCESS_INPUT_DOCformatPREPROCESS_INPUT_RET_DOC_TFPREPROCESS_INPUT_ERROR_DOC__doc__r   r   r   r   <module>   sN      ~
K
