o
    2h<                     @   s   d dl mZ d dl mZ d dlmZ d dlmZ d dlmZ d dl	m
Z
 d dlmZ dZd	Zed
dg								d!ddZ	d"ddZedd#ddZedd$ddZejjdejejd e_ejje_dS )%    )backend)layers)keras_export)imagenet_utils)
Functional)operation_utils)
file_utilsz|https://storage.googleapis.com/tensorflow/keras-applications/inception_v3/inception_v3_weights_tf_dim_ordering_tf_kernels.h5zhttps://storage.googleapis.com/tensorflow/keras-applications/inception_v3/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5z+keras.applications.inception_v3.InceptionV3zkeras.applications.InceptionV3TimagenetN  softmaxinception_v3c                 C   s  |dv st |std| |dkr!| r!|dkr!td| tj|ddt | |d}|d	u r9tj|d
}nt	|sFtj||d}n|}t dkrQd}	nd}	t
|dddddd}
t
|
ddddd}
t
|
ddd}
tjddd|
}
t
|
ddddd}
t
|
ddddd}
tjddd|
}
t
|
ddd}t
|
ddd}t
|ddd}t
|
ddd}t
|ddd}t
|ddd}tjdddd|
}t
|ddd}tj||||g|	dd}
t
|
ddd}t
|
ddd}t
|ddd}t
|
ddd}t
|ddd}t
|ddd}tjdddd|
}t
|ddd}tj||||g|	d d}
t
|
ddd}t
|
ddd}t
|ddd}t
|
ddd}t
|ddd}t
|ddd}tjdddd|
}t
|ddd}tj||||g|	d!d}
t
|
d"ddddd}t
|
ddd}t
|ddd}t
|dddddd}tjddd|
}tj|||g|	d#d}
t
|
ddd}t
|
d$dd}t
|d$dd%}t
|dd%d}t
|
d$dd}t
|d$d%d}t
|d$dd%}t
|d$d%d}t
|ddd%}tjdddd|
}t
|ddd}tj||||g|	d&d}
td'D ]e}t
|
ddd}t
|
d(dd}t
|d(dd%}t
|dd%d}t
|
d(dd}t
|d(d%d}t
|d(dd%}t
|d(d%d}t
|ddd%}tjdddd|
}t
|ddd}tj||||g|	d)td|  d}
qt
|
ddd}t
|
ddd}t
|ddd%}t
|dd%d}t
|
ddd}t
|dd%d}t
|ddd%}t
|dd%d}t
|ddd%}tjdddd|
}t
|ddd}tj||||g|	d*d}
t
|
ddd}t
|d+ddddd}t
|
ddd}t
|ddd%}t
|dd%d}t
|dddddd}tjddd|
}tj|||g|	d,d}
td'D ]u}t
|
d+dd}t
|
d"dd}t
|d"dd}t
|d"dd}tj||g|	d-t| d}t
|
d.dd}t
|d"dd}t
|d"dd}t
|d"dd}tj||g|	d/}tjdddd|
}t
|ddd}tj||||g|	d)td0|  d}
q| rtjd1d2|
}
t|| tj||d3d4|
}
n|d5krt |
}
n|d6krt |
}
|d	urt|}n|}t||
|d2}|dkr| rt jd7td8d9d:}n	t jd;td8d<d:}|| |S |d	ur|| |S )=a  Instantiates the Inception v3 architecture.

    Reference:
    - [Rethinking the Inception Architecture for Computer Vision](
        http://arxiv.org/abs/1512.00567) (CVPR 2016)

    This function returns a Keras image classification model,
    optionally loaded with weights pre-trained on ImageNet.

    For image classification use cases, see
    [this page for detailed examples](
      https://keras.io/api/applications/#usage-examples-for-image-classification-models).

    For transfer learning use cases, make sure to read the
    [guide to transfer learning & fine-tuning](
      https://keras.io/guides/transfer_learning/).

    Note: each Keras Application expects a specific kind of input preprocessing.
    For `InceptionV3`, call
    `keras.applications.inception_v3.preprocess_input` on your inputs
    before passing them to the model.
    `inception_v3.preprocess_input` will scale input pixels between -1 and 1.

    Args:
        include_top: Boolean, whether to include the fully-connected
            layer at the top, as the last layer of the network.
            Defaults to `True`.
        weights: One of `None` (random initialization),
            `imagenet` (pre-training on ImageNet),
            or the path to the weights file to be loaded.
            Defaults to `"imagenet"`.
        input_tensor: Optional Keras tensor (i.e. output of `layers.Input()`)
            to use as image input for the model. `input_tensor` is useful for
            sharing inputs between multiple different networks.
            Defaults to `None`.
        input_shape: Optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(299, 299, 3)` (with `channels_last` data format)
            or `(3, 299, 299)` (with `channels_first` data format).
            It should have exactly 3 inputs channels,
            and width and height should be no smaller than 75.
            E.g. `(150, 150, 3)` would be one valid value.
            `input_shape` will be ignored if the `input_tensor` is provided.
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` (default) means that the output of the model will be
                the 4D tensor output of the last convolutional block.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional block, and thus
                the output of the model will be a 2D tensor.
            - `max` means that global max pooling will be applied.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is `True`, and
            if no `weights` argument is specified. Defaults to 1000.
        classifier_activation: A `str` or callable. The activation function
            to use on the "top" layer. Ignored unless `include_top=True`.
            Set `classifier_activation=None` to return the logits of the "top"
            layer. When loading pretrained weights, `classifier_activation`
            can only be `None` or `"softmax"`.
        name: The name of the model (string).

    Returns:
        A model instance.
    >   Nr	   zThe `weights` argument should be either `None` (random initialization), `imagenet` (pre-training on ImageNet), or the path to the weights file to be loaded; Received: weights=r	   r
   zbIf using `weights="imagenet"` with `include_top=True`, `classes` should be 1000. Received classes=i+  K   )default_sizemin_sizedata_formatrequire_flattenweightsN)shape)tensorr   channels_first          )   r   valid)stridespadding)r   @   )r   r   )r   P      0      `   r   r   samemixed0)axisnamemixed1mixed2i  mixed3      mixed4r      mixedmixed7i@  mixed8mixed9_i  )r&   	   avg_poolr'   predictions)
activationr'   avgmaxz2inception_v3_weights_tf_dim_ordering_tf_kernels.h5models 9a0d58056eeedaa3f26cb7ebd46da564)cache_subdir	file_hashz8inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5 bcbd6486424b2319ff4ef7d526e38f63)r   exists
ValueErrorr   obtain_input_shaper   image_data_formatr   Inputis_keras_tensor	conv2d_bnMaxPooling2DAveragePooling2DconcatenaterangestrGlobalAveragePooling2Dvalidate_activationDenseGlobalMaxPooling2Dr   get_source_inputsr   get_fileWEIGHTS_PATHWEIGHTS_PATH_NO_TOPload_weights)include_topr   input_tensorinput_shapepoolingclassesclassifier_activationr'   	img_inputchannel_axisx	branch1x1	branch5x5branch3x3dblbranch_pool	branch3x3	branch7x7branch7x7dblibranch7x7x3branch3x3_1branch3x3_2branch3x3dbl_1branch3x3dbl_2inputsmodelweights_path rm   ^/var/www/html/chatgem/venv/lib/python3.10/site-packages/keras/src/applications/inception_v3.pyInceptionV3   s  Q	

















ro   r$   r#   c           
      C   s   |dur|d }|d }nd}d}t  dkrd}	nd}	tj|||f||d|d| } tj|	d|d	| } tjd
|d| } | S )a  Utility function to apply conv + BN.

    Args:
        x: input tensor.
        filters: filters in `Conv2D`.
        num_row: height of the convolution kernel.
        num_col: width of the convolution kernel.
        padding: padding mode in `Conv2D`.
        strides: strides in `Conv2D`.
        name: name of the ops; will become `name + '_conv'`
            for the convolution and `name + '_bn'` for the
            batch norm layer.

    Returns:
        Output tensor after applying `Conv2D` and `BatchNormalization`.
    N_bn_convr   r   r   F)r   r   use_biasr'   )r&   scaler'   relur5   )r   rB   r   Conv2DBatchNormalization
Activation)
r\   filtersnum_rownum_colr   r   r'   bn_name	conv_namebn_axisrm   rm   rn   rE     s*   
rE   z0keras.applications.inception_v3.preprocess_inputc                 C   s   t j| |ddS )Ntf)r   mode)r   preprocess_input)r\   r   rm   rm   rn   r     s   r   z2keras.applications.inception_v3.decode_predictionsr!   c                 C   s   t j| |dS )N)top)r   decode_predictions)predsr   rm   rm   rn   r     s   r    )r   reterror)Tr	   NNNr
   r   r   )r$   r#   N)N)r!   )	keras.srcr   r   keras.src.api_exportr   keras.src.applicationsr   keras.src.modelsr   keras.src.opsr   keras.src.utilsr   rQ   rR   ro   rE   r   r   PREPROCESS_INPUT_DOCformatPREPROCESS_INPUT_RET_DOC_TFPREPROCESS_INPUT_ERROR_DOC__doc__rm   rm   rm   rn   <module>   sN      i
*