o
    2h%                     @   s   d dl mZ d dl mZ d dlmZ d dlmZ d dlmZ d dl	m
Z
 d dlmZ dZd	Zed
dg								dddZeddddZeddddZejjdejejde_ejje_dS )     )backend)layers)keras_export)imagenet_utils)
Functional)operation_utils)
file_utilsznhttps://storage.googleapis.com/tensorflow/keras-applications/vgg19/vgg19_weights_tf_dim_ordering_tf_kernels.h5zthttps://storage.googleapis.com/tensorflow/keras-applications/vgg19/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5zkeras.applications.vgg19.VGG19zkeras.applications.VGG19TimagenetN  softmaxvgg19c                 C   s  |dv st |std| |dkr!| r!|dkr!td| tj|ddt | |d}|d	u r9tj|d
}nt	|sFtj||d}n|}tj
dddddd|}	tj
dddddd|	}	tjdddd|	}	tj
dddddd|	}	tj
dddddd|	}	tjdddd|	}	tj
dddddd|	}	tj
dddddd|	}	tj
dddddd|	}	tj
dddddd|	}	tjdddd|	}	tj
d dddd!d|	}	tj
d dddd"d|	}	tj
d dddd#d|	}	tj
d dddd$d|	}	tjddd%d|	}	tj
d dddd&d|	}	tj
d dddd'd|	}	tj
d dddd(d|	}	tj
d dddd)d|	}	tjddd*d|	}	| rjtjd+d,|	}	tjd-dd.d/|	}	tjd-dd0d/|	}	t|| tj||d1d/|	}	n|d2krvt |	}	n|d3krt |	}	|d	urt|}
n|}
t|
|	|d,}|dkr| rt jd4td5d6d7}n	t jd8td5d9d7}|| |S |d	ur|| |S ):a  Instantiates the VGG19 model.

    Reference:
    - [Very Deep Convolutional Networks for Large-Scale Image Recognition](
    https://arxiv.org/abs/1409.1556) (ICLR 2015)

    For image classification use cases, see
    [this page for detailed examples](
      https://keras.io/api/applications/#usage-examples-for-image-classification-models).

    For transfer learning use cases, make sure to read the
    [guide to transfer learning & fine-tuning](
      https://keras.io/guides/transfer_learning/).

    The default input size for this model is 224x224.

    Note: each Keras Application expects a specific kind of input preprocessing.
    For VGG19, call `keras.applications.vgg19.preprocess_input` on your
    inputs before passing them to the model.
    `vgg19.preprocess_input` will convert the input images from RGB to BGR,
    then will zero-center each color channel with respect to the ImageNet
    dataset, without scaling.

    Args:
        include_top: whether to include the 3 fully-connected
            layers at the top of the network.
        weights: one of `None` (random initialization),
            `"imagenet"` (pre-training on ImageNet),
            or the path to the weights file to be loaded.
        input_tensor: optional Keras tensor
            (i.e. output of `layers.Input()`)
            to use as image input for the model.
        input_shape: optional shape tuple, only to be specified
            if `include_top` is `False` (otherwise the input shape
            has to be `(224, 224, 3)`
            (with `channels_last` data format) or
            `(3, 224, 224)` (with `"channels_first"` data format).
            It should have exactly 3 input channels,
            and width and height should be no smaller than 32.
            E.g. `(200, 200, 3)` would be one valid value.
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model will be
                the 4D tensor output of the
                last convolutional block.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional block, and thus
                the output of the model will be a 2D tensor.
            - `max` means that global max pooling will
                be applied.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is `True`, and
            if no `weights` argument is specified.
        classifier_activation: A `str` or callable. The activation function to
            use on the "top" layer. Ignored unless `include_top=True`. Set
            `classifier_activation=None` to return the logits of the "top"
            layer.  When loading pretrained weights, `classifier_activation` can
            only be `None` or `"softmax"`.
        name: The name of the model (string).

    Returns:
        A model instance.
    >   Nr	   zThe `weights` argument should be either `None` (random initialization), 'imagenet' (pre-training on ImageNet), or the path to the weights file to be loaded.  Received: weights=r	   r
   zcIf using `weights='imagenet'` with `include_top=True`, `classes` should be 1000.  Received classes=       )default_sizemin_sizedata_formatrequire_flattenweightsN)shape)tensorr   @   )   r   relusameblock1_conv1)
activationpaddingnameblock1_conv2)   r   block1_pool)stridesr      block2_conv1block2_conv2block2_pool   block3_conv1block3_conv2block3_conv3block3_conv4block3_pooli   block4_conv1block4_conv2block4_conv3block4_conv4block4_poolblock5_conv1block5_conv2block5_conv3block5_conv4block5_poolflatten)r   i   fc1)r   r   fc2predictionsavgmaxz+vgg19_weights_tf_dim_ordering_tf_kernels.h5models cbe5617147190e668d6c5d5026f83318)cache_subdir	file_hashz1vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5 253f8cb515780f3b799900260a226db6)r   exists
ValueErrorr   obtain_input_shaper   image_data_formatr   Inputis_keras_tensorConv2DMaxPooling2DFlattenDensevalidate_activationGlobalAveragePooling2DGlobalMaxPooling2Dr   get_source_inputsr   get_fileWEIGHTS_PATHWEIGHTS_PATH_NO_TOPload_weights)include_topr   input_tensorinput_shapepoolingclassesclassifier_activationr   	img_inputxinputsmodelweights_path r^   W/var/www/html/chatgem/venv/lib/python3.10/site-packages/keras/src/applications/vgg19.pyVGG19   s&  K	























r`   z)keras.applications.vgg19.preprocess_inputc                 C   s   t j| |ddS )Ncaffe)r   mode)r   preprocess_input)rZ   r   r^   r^   r_   rc      s   rc   z+keras.applications.vgg19.decode_predictions   c                 C   s   t j| |dS )N)top)r   decode_predictions)predsre   r^   r^   r_   rf      s   rf    )rb   reterror)Tr	   NNNr
   r   r   )N)rd   )	keras.srcr   r   keras.src.api_exportr   keras.src.applicationsr   keras.src.modelsr   keras.src.opsr   keras.src.utilsr   rP   rQ   r`   rc   rf   PREPROCESS_INPUT_DOCformatPREPROCESS_INPUT_RET_DOC_CAFFEPREPROCESS_INPUT_ERROR_DOC__doc__r^   r^   r^   r_   <module>   s@    
 [