tf-models-nightly 2.11.0.dev20230322__py2.py3-none-any.whl → 2.11.0.dev20230323__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -95,6 +95,20 @@ class Config(params_dict.ParamsDict):
95
95
  def BUILDER(self):
96
96
  return self._BUILDER
97
97
 
98
+ @classmethod
99
+ def _get_annotations(cls):
100
+ """Returns valid annotations.
101
+
102
+ Note: this is similar to dataclasses.__annotations__ except it also includes
103
+ annotations from its parent classes.
104
+ """
105
+ all_annotations = typing.get_type_hints(cls)
106
+ # Removes Config class annotation from the value, e.g., default_params,
107
+ # restrictions, etc.
108
+ for k in Config.__annotations__:
109
+ del all_annotations[k]
110
+ return all_annotations
111
+
98
112
  @classmethod
99
113
  def _isvalidsequence(cls, v):
100
114
  """Check if the input values are valid sequences.
@@ -175,9 +189,10 @@ class Config(params_dict.ParamsDict):
175
189
  if not subconfig_type:
176
190
  subconfig_type = Config
177
191
 
178
- if k in cls.__annotations__:
192
+ annotations = cls._get_annotations()
193
+ if k in annotations:
179
194
  # Directly Config subtype.
180
- type_annotation = cls.__annotations__[k] # pytype: disable=invalid-annotation
195
+ type_annotation = annotations[k]
181
196
  i = 0
182
197
  # Loop for striping the Optional annotation.
183
198
  traverse_in = True
@@ -326,6 +341,9 @@ class Config(params_dict.ParamsDict):
326
341
  @classmethod
327
342
  def from_args(cls, *args, **kwargs):
328
343
  """Builds a config from the given list of arguments."""
344
+ # Note we intend to keep `__annotations__` instead of `_get_annotations`.
345
+ # Assuming a parent class of (a, b) with the sub-class of (c, d), the
346
+ # sub-class will take (c, d) for args, rather than starting from (a, b).
329
347
  attributes = list(cls.__annotations__.keys())
330
348
  default_params = {a: p for a, p in zip(attributes, args)}
331
349
  default_params.update(kwargs)
@@ -33,6 +33,7 @@ class DumpConfig2(base_config.Config):
33
33
  c: int = 2
34
34
  d: str = 'text'
35
35
  e: DumpConfig1 = DumpConfig1()
36
+ optional_e: Optional[DumpConfig1] = None
36
37
 
37
38
 
38
39
  @dataclasses.dataclass
@@ -348,6 +349,34 @@ class BaseConfigTest(parameterized.TestCase, tf.test.TestCase):
348
349
  ]),
349
350
  "['s', 1, 1.0, True, None, {}, [], (), {8: 9, (2,): (3, [4], {6: 7})}]")
350
351
 
352
+ def test_with_superclass_override(self):
353
+ config = DumpConfig2()
354
+ config.override({'optional_e': {'a': 2}})
355
+ self.assertEqual(
356
+ config.optional_e.as_dict(),
357
+ {
358
+ 'a': 2,
359
+ 'b': 'text',
360
+ },
361
+ )
362
+
363
+ # Previously, the following will fail. See b/274696969 for context.
364
+ config = DumpConfig3()
365
+ config.override({'optional_e': {'a': 2}})
366
+ self.assertEqual(
367
+ config.optional_e.as_dict(),
368
+ {
369
+ 'a': 2,
370
+ 'b': 'text',
371
+ },
372
+ )
373
+
374
+ def test_get_annotations_without_base_config_leak(self):
375
+ with self.assertRaisesRegex(
376
+ KeyError, "The key 'restrictions' does not exist"
377
+ ):
378
+ DumpConfig3().override({'restrictions': None})
379
+
351
380
  def test_with_restrictions(self):
352
381
  restrictions = ['e.a<c']
353
382
  config = DumpConfig2(restrictions=restrictions)
@@ -389,82 +389,94 @@ def _fill_rectangle_video(image,
389
389
  return image
390
390
 
391
391
 
392
- def cutout_video(image: tf.Tensor, replace: int = 0) -> tf.Tensor:
392
+ def cutout_video(video: tf.Tensor, replace: int = 0) -> tf.Tensor:
393
393
  """Apply cutout (https://arxiv.org/abs/1708.04552) to a video.
394
394
 
395
395
  This operation applies a random size 3D mask of zeros to a random location
396
- within `image`. The mask is padded The pixel values filled in will be of the
396
+ within `video`. The mask is padded The pixel values filled in will be of the
397
397
  value `replace`. The location where the mask will be applied is randomly
398
- chosen uniformly over the whole image. The size of the mask is randomly
398
+ chosen uniformly over the whole video. The size of the mask is randomly
399
399
  sampled uniformly from [0.25*height, 0.5*height], [0.25*width, 0.5*width],
400
400
  and [1, 0.25*depth], which represent the height, width, and number of frames
401
401
  of the input video tensor respectively.
402
402
 
403
403
  Args:
404
- image: A video Tensor of type uint8.
404
+ video: A video Tensor of shape [T, H, W, C].
405
405
  replace: What pixel value to fill in the image in the area that has the
406
406
  cutout mask applied to it.
407
407
 
408
408
  Returns:
409
- An video Tensor that is of type uint8.
409
+ A video Tensor with cutout applied.
410
410
  """
411
- image_depth = tf.shape(image)[0]
412
- image_height = tf.shape(image)[1]
413
- image_width = tf.shape(image)[2]
411
+ tf.debugging.assert_shapes([
412
+ (video, ('T', 'H', 'W', 'C')),
413
+ ])
414
+
415
+ video_depth = tf.shape(video)[0]
416
+ video_height = tf.shape(video)[1]
417
+ video_width = tf.shape(video)[2]
414
418
 
415
419
  # Sample the center location in the image where the zero mask will be applied.
416
420
  cutout_center_height = tf.random.uniform(
417
- shape=[], minval=0, maxval=image_height, dtype=tf.int32)
421
+ shape=[], minval=0, maxval=video_height, dtype=tf.int32
422
+ )
418
423
 
419
424
  cutout_center_width = tf.random.uniform(
420
- shape=[], minval=0, maxval=image_width, dtype=tf.int32)
425
+ shape=[], minval=0, maxval=video_width, dtype=tf.int32
426
+ )
421
427
 
422
428
  cutout_center_depth = tf.random.uniform(
423
- shape=[], minval=0, maxval=image_depth, dtype=tf.int32)
429
+ shape=[], minval=0, maxval=video_depth, dtype=tf.int32
430
+ )
424
431
 
425
432
  pad_size_height = tf.random.uniform(
426
433
  shape=[],
427
- minval=tf.maximum(1, tf.cast(image_height / 4, tf.int32)),
428
- maxval=tf.maximum(2, tf.cast(image_height / 2, tf.int32)),
429
- dtype=tf.int32)
434
+ minval=tf.maximum(1, tf.cast(video_height / 4, tf.int32)),
435
+ maxval=tf.maximum(2, tf.cast(video_height / 2, tf.int32)),
436
+ dtype=tf.int32,
437
+ )
430
438
  pad_size_width = tf.random.uniform(
431
439
  shape=[],
432
- minval=tf.maximum(1, tf.cast(image_width / 4, tf.int32)),
433
- maxval=tf.maximum(2, tf.cast(image_width / 2, tf.int32)),
434
- dtype=tf.int32)
440
+ minval=tf.maximum(1, tf.cast(video_width / 4, tf.int32)),
441
+ maxval=tf.maximum(2, tf.cast(video_width / 2, tf.int32)),
442
+ dtype=tf.int32,
443
+ )
435
444
  pad_size_depth = tf.random.uniform(
436
445
  shape=[],
437
446
  minval=1,
438
- maxval=tf.maximum(2, tf.cast(image_depth / 4, tf.int32)),
439
- dtype=tf.int32)
447
+ maxval=tf.maximum(2, tf.cast(video_depth / 4, tf.int32)),
448
+ dtype=tf.int32,
449
+ )
440
450
 
441
451
  lower_pad = tf.maximum(0, cutout_center_height - pad_size_height)
442
452
  upper_pad = tf.maximum(
443
- 0, image_height - cutout_center_height - pad_size_height)
453
+ 0, video_height - cutout_center_height - pad_size_height
454
+ )
444
455
  left_pad = tf.maximum(0, cutout_center_width - pad_size_width)
445
- right_pad = tf.maximum(0, image_width - cutout_center_width - pad_size_width)
456
+ right_pad = tf.maximum(0, video_width - cutout_center_width - pad_size_width)
446
457
  back_pad = tf.maximum(0, cutout_center_depth - pad_size_depth)
447
458
  forward_pad = tf.maximum(
448
- 0, image_depth - cutout_center_depth - pad_size_depth)
459
+ 0, video_depth - cutout_center_depth - pad_size_depth
460
+ )
449
461
 
450
462
  cutout_shape = [
451
- image_depth - (back_pad + forward_pad),
452
- image_height - (lower_pad + upper_pad),
453
- image_width - (left_pad + right_pad),
463
+ video_depth - (back_pad + forward_pad),
464
+ video_height - (lower_pad + upper_pad),
465
+ video_width - (left_pad + right_pad),
454
466
  ]
455
467
  padding_dims = [[back_pad, forward_pad],
456
468
  [lower_pad, upper_pad],
457
469
  [left_pad, right_pad]]
458
470
  mask = tf.pad(
459
- tf.zeros(cutout_shape, dtype=image.dtype),
460
- padding_dims,
461
- constant_values=1)
471
+ tf.zeros(cutout_shape, dtype=video.dtype), padding_dims, constant_values=1
472
+ )
462
473
  mask = tf.expand_dims(mask, -1)
463
- mask = tf.tile(mask, [1, 1, 1, 3])
464
- image = tf.where(
465
- tf.equal(mask, 0),
466
- tf.ones_like(image, dtype=image.dtype) * replace, image)
467
- return image
474
+ num_channels = tf.shape(video)[-1]
475
+ mask = tf.tile(mask, [1, 1, 1, num_channels])
476
+ video = tf.where(
477
+ tf.equal(mask, 0), tf.ones_like(video, dtype=video.dtype) * replace, video
478
+ )
479
+ return video
468
480
 
469
481
 
470
482
  def gaussian_noise(
@@ -21,6 +21,7 @@ from __future__ import print_function
21
21
  import random
22
22
  from absl.testing import parameterized
23
23
 
24
+ import numpy as np
24
25
  import tensorflow as tf
25
26
 
26
27
  from official.vision.ops import augment
@@ -85,6 +86,14 @@ class TransformsTest(parameterized.TestCase, tf.test.TestCase):
85
86
  image = tf.zeros(shape, dtype=dtype)
86
87
  self.assertAllEqual(image, augment.rotate(image, degrees))
87
88
 
89
+ def test_cutout_video(self, dtype):
90
+ for num_channels in (1, 2, 3):
91
+ video = tf.ones((2, 2, 2, num_channels), dtype=dtype)
92
+ video = augment.cutout_video(video)
93
+
94
+ num_zeros = np.sum(video == 0)
95
+ self.assertGreater(num_zeros, 0)
96
+
88
97
 
89
98
  class AutoaugmentTest(tf.test.TestCase, parameterized.TestCase):
90
99
 
@@ -85,16 +85,21 @@ def representative_dataset(
85
85
  yield [image]
86
86
 
87
87
 
88
- def convert_tflite_model(saved_model_dir: str,
89
- quant_type: Optional[str] = None,
90
- params: Optional[cfg.ExperimentConfig] = None,
91
- task: Optional[base_task.Task] = None,
92
- calibration_steps: Optional[int] = 2000,
93
- denylisted_ops: Optional[List[str]] = None) -> 'bytes':
88
+ def convert_tflite_model(
89
+ saved_model_dir: Optional[str] = None,
90
+ model: Optional[tf.keras.Model] = None,
91
+ quant_type: Optional[str] = None,
92
+ params: Optional[cfg.ExperimentConfig] = None,
93
+ task: Optional[base_task.Task] = None,
94
+ calibration_steps: Optional[int] = 2000,
95
+ denylisted_ops: Optional[List[str]] = None,
96
+ ) -> 'bytes':
94
97
  """Converts and returns a TFLite model.
95
98
 
96
99
  Args:
97
100
  saved_model_dir: The directory to the SavedModel.
101
+ model: An optional tf.keras.Model instance. If `saved_model_dir` is not
102
+ available, convert this model to TFLite.
98
103
  quant_type: The post training quantization (PTQ) method. It can be one of
99
104
  `default` (dynamic range), `fp16` (float16), `int8` (integer wih float
100
105
  fallback), `int8_full` (integer only) and None (no quantization).
@@ -111,9 +116,16 @@ def convert_tflite_model(saved_model_dir: str,
111
116
 
112
117
  Raises:
113
118
  ValueError: If `representative_dataset_path` is not present if integer
114
- quantization is requested.
119
+ quantization is requested, or both `saved_model_dir` or `model` are not
120
+ provided.
115
121
  """
116
- converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)
122
+ if saved_model_dir:
123
+ converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)
124
+ elif model is not None:
125
+ converter = tf.lite.TFLiteConverter.from_keras_model(model)
126
+ else:
127
+ raise ValueError('Either `saved_model_dir` or `model` must be specified.')
128
+
117
129
  if quant_type:
118
130
  if quant_type.startswith('int8'):
119
131
  converter.optimizations = [tf.lite.Optimize.DEFAULT]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: tf-models-nightly
3
- Version: 2.11.0.dev20230322
3
+ Version: 2.11.0.dev20230323
4
4
  Summary: TensorFlow Official Models
5
5
  Home-page: https://github.com/tensorflow/models
6
6
  Author: Google Inc.
@@ -205,8 +205,8 @@ official/modeling/activations/sigmoid_test.py,sha256=yl_7jCvV__5bPAbkvyQJcCnDGW6
205
205
  official/modeling/activations/swish.py,sha256=NWUn8lzdNMdX78oe5kylXYjMEmrIeSwuKWKpkBhXwG4,2291
206
206
  official/modeling/activations/swish_test.py,sha256=MHf5CL6mdKMiODt-i_m15fMREHhPshVdsWe9A2v8uS0,1411
207
207
  official/modeling/hyperparams/__init__.py,sha256=llk_04oD2rd3nnWRmsQAQ19l6OBKMPhQcjh_7RxN4wM,846
208
- official/modeling/hyperparams/base_config.py,sha256=VqNUjpf6kgt1vLLzU33_STyV_dZXJHYBvSaP9IYknq0,11838
209
- official/modeling/hyperparams/base_config_test.py,sha256=4jCujgFhbxQUPvhOOnWa9HLysYuT58VyqRbMeFJn7iY,11814
208
+ official/modeling/hyperparams/base_config.py,sha256=k6LSNEJOpGg0int5gGR-6SrU9ibs1w8iLhsHMNeNpvY,12509
209
+ official/modeling/hyperparams/base_config_test.py,sha256=7IAWD_a1KjRUp6T9P8mDCCPEwHcM9WNeeYPhMrDEP2I,12593
210
210
  official/modeling/hyperparams/oneof.py,sha256=N_KhdFtTF1xvciQvsiNkh5q3nVcqE0S0dtqITK2GLug,1870
211
211
  official/modeling/hyperparams/oneof_test.py,sha256=1UAEMkGi6CFz_lKal60UbH7tzdz3096PYz123Th_pbg,1882
212
212
  official/modeling/hyperparams/params_dict.py,sha256=gyP6R3OYMNQcE9FyNU2GVwqCMLDcGzMiiSBhc_wwYS0,16412
@@ -971,8 +971,8 @@ official/vision/ops/anchor.py,sha256=ETLOiNx3CPbsj1yLqgMXU-YXPi6zSwXF6yP6RbEpQeM
971
971
  official/vision/ops/anchor_generator.py,sha256=hf5pZSKYVM2iSI6TlLYIxEOwKeBGvGiy-Xo3MR47hRU,7234
972
972
  official/vision/ops/anchor_generator_test.py,sha256=fa8PkMR_weA2TGiK_tVfeIZFV4bSUp9nGXuUFTaGfVg,5286
973
973
  official/vision/ops/anchor_test.py,sha256=Q8-7crDckzndyRMNmfeZXz88MTSYmLQpg3ELbhcdkak,7623
974
- official/vision/ops/augment.py,sha256=-Oevj7iFf--AsoiN03r8kiAILQ_1cjJojSDcNqB_jlA,91189
975
- official/vision/ops/augment_test.py,sha256=aUGf0ABjm2OSu5v-TZLmsomzSkRQk8ez9x5PQ8PlLvY,18896
974
+ official/vision/ops/augment.py,sha256=pNa5xFsD5Pgv6YeyzCrvNXiEAttvKRkW_4YFP2F6xyg,91331
975
+ official/vision/ops/augment_test.py,sha256=K7OwJSZ8C9iWYQJ7lxnk5qI8rIYTkkZhFI_q3zJw82g,19168
976
976
  official/vision/ops/box_matcher.py,sha256=AeDSdM8F6atkD71gniF0my1P0VsV93UcpeMayWVMOA4,9057
977
977
  official/vision/ops/box_matcher_test.py,sha256=N96ra6RHzRBgumomNfx0xZuOvEFWqQGeu77vZr7NK9w,2428
978
978
  official/vision/ops/box_ops.py,sha256=7oSB7dBJgVLS3Ny9hHAU6tjcExRpPjY-xmtadrKtgxQ,34487
@@ -1004,7 +1004,7 @@ official/vision/serving/export_saved_model_lib_v2.py,sha256=BlPSuA0OOFPIkm_iSRBt
1004
1004
  official/vision/serving/export_tfhub.py,sha256=KPxiKl1oN47fwqFeICwXcLW7rUN8435OjJ3VxNqZtPw,3500
1005
1005
  official/vision/serving/export_tfhub_lib.py,sha256=uU4lKFcZB7NqZjdA_k-AmI0bxqgHHz6kaVBtMHdiAs4,2870
1006
1006
  official/vision/serving/export_tflite.py,sha256=0yF12gNHXOr1oRIkrjFRtB80inviz4MoBIxppl_j-4A,5105
1007
- official/vision/serving/export_tflite_lib.py,sha256=nJ1NxDuVRa-KhG4dneNhqNttyM9ZoKI79LHCIwXdepw,6556
1007
+ official/vision/serving/export_tflite_lib.py,sha256=_otzNiP7kjpr2TP3CBnj94s3zYF3DffeEgoCem6axrw,6907
1008
1008
  official/vision/serving/export_tflite_lib_test.py,sha256=ysXdFoMSp46rKJHvNw9wlCMp-heAHhY68BKaMMXIF1A,7185
1009
1009
  official/vision/serving/export_utils.py,sha256=36KfyIgF0-HxPaOJsWT4IH4heKkFWYnDVB-iX_mwm9I,4871
1010
1010
  official/vision/serving/image_classification.py,sha256=V20LDXOIWTSpUlmuUBwBh9Sa-ipaRo8kgwW4qVFiJZk,2858
@@ -1069,9 +1069,9 @@ tensorflow_models/__init__.py,sha256=021FKgqdPz3ds1xxfV67FWL7e5ECQ7WHbo67D37vAQI
1069
1069
  tensorflow_models/tensorflow_models_test.py,sha256=3oRV5seq-V1La0eY0IFpGLD7AKkiemylW8GyvZIRtmo,1385
1070
1070
  tensorflow_models/nlp/__init__.py,sha256=ro-1L0G8Z1wby8D1Jbaa3No-n73tiNEx7C4f8pAUNlk,807
1071
1071
  tensorflow_models/vision/__init__.py,sha256=3qeLW_6HkgH5hEclFog2DIRu1FSzOr3JynyM23zGhu8,833
1072
- tf_models_nightly-2.11.0.dev20230322.dist-info/AUTHORS,sha256=1dG3fXVu9jlo7bul8xuix5F5vOnczMk7_yWn4y70uw0,337
1073
- tf_models_nightly-2.11.0.dev20230322.dist-info/LICENSE,sha256=WxeBS_DejPZQabxtfMOM_xn8qoZNJDQjrT7z2wG1I4U,11512
1074
- tf_models_nightly-2.11.0.dev20230322.dist-info/METADATA,sha256=nDQJeoSeIrhNpJ_zLaHs0TTJh6nTufa15zPzqMjIZUk,1426
1075
- tf_models_nightly-2.11.0.dev20230322.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
1076
- tf_models_nightly-2.11.0.dev20230322.dist-info/top_level.txt,sha256=gum2FfO5R4cvjl2-QtP-S1aNmsvIZaFFT6VFzU0f4-g,33
1077
- tf_models_nightly-2.11.0.dev20230322.dist-info/RECORD,,
1072
+ tf_models_nightly-2.11.0.dev20230323.dist-info/AUTHORS,sha256=1dG3fXVu9jlo7bul8xuix5F5vOnczMk7_yWn4y70uw0,337
1073
+ tf_models_nightly-2.11.0.dev20230323.dist-info/LICENSE,sha256=WxeBS_DejPZQabxtfMOM_xn8qoZNJDQjrT7z2wG1I4U,11512
1074
+ tf_models_nightly-2.11.0.dev20230323.dist-info/METADATA,sha256=rK5fmRrHIaf7Z3A655AJ9uUtsrcNQ9UAhxSzvS7mdE8,1426
1075
+ tf_models_nightly-2.11.0.dev20230323.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
1076
+ tf_models_nightly-2.11.0.dev20230323.dist-info/top_level.txt,sha256=gum2FfO5R4cvjl2-QtP-S1aNmsvIZaFFT6VFzU0f4-g,33
1077
+ tf_models_nightly-2.11.0.dev20230323.dist-info/RECORD,,