o
    2h6                     @   sJ  d Z ddlZddlmZ ddlmZ ddlmZ	 ddl
mZ ddl
mZ ddl
mZ dd	l
mZ dd
l
mZ ddlmZ ddlmZ ddlmZ edgde e	jZe	jZedd+ddZedgddd Zedgddd Zeddd Zeddd Zedd d! Zed"d#d$ Zed%d&d' Zed(d)d* Z dS ),zTesting.    N)mock)	test_util)
googletest)assert_equal_graph_def)create_local_cluster)TensorFlowTestCase)gpu_device_name)is_gpu_available)compute_gradient_error)compute_gradient)	tf_exportz	test.mock)v1z	test.mainc                 C   s   t   t| S )zRuns all unit tests.)
_test_utilInstallStackTraceHandler_googletestmain)argv r   Z/var/www/html/chatgem/venv/lib/python3.10/site-packages/tensorflow/python/platform/test.pyr   1   s   
r   ztest.get_temp_dirc                   C      t  S )a  Returns a temporary directory for use during tests.

  There is no need to delete the directory after the test.

  @compatibility(TF2)
  This function is removed in TF2. Please use `TestCase.get_temp_dir` instead
  in a test case.
  Outside of a unit test, obtain a temporary directory through Python's
  `tempfile` module.
  @end_compatibility

  Returns:
    The temporary directory.
  )r   
GetTempDirr   r   r   r   get_temp_dir8   s   r   ztest.test_src_dir_pathc                 C   
   t | S )zCreates an absolute test srcdir path given a relative path.

  Args:
    relative_path: a path relative to tensorflow root.
      e.g. "core/platform".

  Returns:
    An absolute path to the linked in runfiles.
  )r   test_src_dir_path)relative_pathr   r   r   r   K   s   
r   ztest.is_built_with_cudac                   C   r   )a%  Returns whether TensorFlow was built with CUDA (GPU) support.

  This method should only be used in tests written with `tf.test.TestCase`. A
  typical usage is to skip tests that should only run with CUDA (GPU).

  >>> class MyTest(tf.test.TestCase):
  ...
  ...   def test_add_on_gpu(self):
  ...     if not tf.test.is_built_with_cuda():
  ...       self.skipTest("test is only applicable on GPU")
  ...
  ...     with tf.device("GPU:0"):
  ...       self.assertEqual(tf.math.add(1.0, 2.0), 3.0)

  TensorFlow official binary is built with CUDA.
  )r   IsGoogleCudaEnabledr   r   r   r   is_built_with_cudaY      r   ztest.is_built_with_rocmc                   C   r   )a)  Returns whether TensorFlow was built with ROCm (GPU) support.

  This method should only be used in tests written with `tf.test.TestCase`. A
  typical usage is to skip tests that should only run with ROCm (GPU).

  >>> class MyTest(tf.test.TestCase):
  ...
  ...   def test_add_on_gpu(self):
  ...     if not tf.test.is_built_with_rocm():
  ...       self.skipTest("test is only applicable on GPU")
  ...
  ...     with tf.device("GPU:0"):
  ...       self.assertEqual(tf.math.add(1.0, 2.0), 3.0)

  TensorFlow official binary is NOT built with ROCm.
  )r   IsBuiltWithROCmr   r   r   r   is_built_with_rocmn   r   r   ztest.disable_with_predicatec                    s    fdd}|S )z"Disables the test if pred is true.c                    s   t   fdd}|S )Nc                    s*    r
|   d S  | g|R i |S N)skipTest)selfargskwargs)funcpredskip_messager   r   wrapper_disable_with_predicate   s   zhdisable_with_predicate.<locals>.decorator_disable_with_predicate.<locals>.wrapper_disable_with_predicate)	functoolswraps)r%   r(   r&   r'   )r%   r    decorator_disable_with_predicate   s   z@disable_with_predicate.<locals>.decorator_disable_with_predicater   )r&   r'   r,   r   r+   r   disable_with_predicate   s   r-   ztest.is_built_with_gpu_supportc                   C   s   t  pt S )a9  Returns whether TensorFlow was built with GPU (CUDA or ROCm) support.

  This method should only be used in tests written with `tf.test.TestCase`. A
  typical usage is to skip tests that should only run with GPU.

  >>> class MyTest(tf.test.TestCase):
  ...
  ...   def test_add_on_gpu(self):
  ...     if not tf.test.is_built_with_gpu_support():
  ...       self.skipTest("test is only applicable on GPU")
  ...
  ...     with tf.device("GPU:0"):
  ...       self.assertEqual(tf.math.add(1.0, 2.0), 3.0)

  TensorFlow official binary is built with CUDA GPU support.
  )r   r   r   r   r   r   is_built_with_gpu_support   s   r.   ztest.is_built_with_xlac                   C   r   )a`  Returns whether TensorFlow was built with XLA support.

  This method should only be used in tests written with `tf.test.TestCase`. A
  typical usage is to skip tests that should only run with XLA.

  >>> class MyTest(tf.test.TestCase):
  ...
  ...   def test_add_on_xla(self):
  ...     if not tf.test.is_built_with_xla():
  ...       self.skipTest("test is only applicable on XLA")

  ...     @tf.function(jit_compile=True)
  ...     def add(x, y):
  ...       return tf.math.add(x, y)
  ...
  ...     self.assertEqual(add(tf.ones(()), tf.ones(())), 2.0)

  TensorFlow official binary is built with XLA.
  )r   IsBuiltWithXLAr   r   r   r   is_built_with_xla   s   r0   ztest.is_cpu_target_availablec                 C   r   )a  Indicates whether TensorFlow was built with support for a given CPU target.

  Args:
    target: The name of the CPU target whose support to check for.

  Returns:
    A boolean indicating whether TensorFlow was built with support for the
    given CPU target.

  This method should only be used in tests written with `tf.test.TestCase`. A
  typical usage is to skip tests that should only run with a given target.

  >>> class MyTest(tf.test.TestCase):
  ...
  ...   def test_add_on_aarch64(self):
  ...     if not tf.test.is_cpu_target_available('aarch64'):
  ...       self.skipTest("test is only applicable on AArch64")

  ...     @tf.function(jit_compile=True)
  ...     def add(x, y):
  ...       return tf.math.add(x, y)
  ...
  ...     self.assertEqual(add(tf.ones(()), tf.ones(())), 2.0)
  )r   IsCPUTargetAvailable)targetr   r   r   is_cpu_target_available   s   
r3   r    )!__doc__r)   unittestr   tensorflow.python.frameworkr   r   tensorflow.python.platformr   r   %tensorflow.python.framework.test_utilr   r   r   TestCaser   r	   &tensorflow.python.ops.gradient_checkerr
   r    tensorflow.python.util.tf_exportr   	BenchmarkStubOutForTestingr   r   r   r   r   r-   r.   r0   r3   r   r   r   r   <module>   sD   








