tf-models-nightly 2.11.0.dev20230321__py2.py3-none-any.whl → 2.11.0.dev20230323__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -389,82 +389,94 @@ def _fill_rectangle_video(image,
389
389
  return image
390
390
 
391
391
 
392
- def cutout_video(image: tf.Tensor, replace: int = 0) -> tf.Tensor:
392
+ def cutout_video(video: tf.Tensor, replace: int = 0) -> tf.Tensor:
393
393
  """Apply cutout (https://arxiv.org/abs/1708.04552) to a video.
394
394
 
395
395
  This operation applies a random size 3D mask of zeros to a random location
396
- within `image`. The mask is padded The pixel values filled in will be of the
396
+ within `video`. The mask is padded The pixel values filled in will be of the
397
397
  value `replace`. The location where the mask will be applied is randomly
398
- chosen uniformly over the whole image. The size of the mask is randomly
398
+ chosen uniformly over the whole video. The size of the mask is randomly
399
399
  sampled uniformly from [0.25*height, 0.5*height], [0.25*width, 0.5*width],
400
400
  and [1, 0.25*depth], which represent the height, width, and number of frames
401
401
  of the input video tensor respectively.
402
402
 
403
403
  Args:
404
- image: A video Tensor of type uint8.
404
+ video: A video Tensor of shape [T, H, W, C].
405
405
  replace: What pixel value to fill in the image in the area that has the
406
406
  cutout mask applied to it.
407
407
 
408
408
  Returns:
409
- An video Tensor that is of type uint8.
409
+ A video Tensor with cutout applied.
410
410
  """
411
- image_depth = tf.shape(image)[0]
412
- image_height = tf.shape(image)[1]
413
- image_width = tf.shape(image)[2]
411
+ tf.debugging.assert_shapes([
412
+ (video, ('T', 'H', 'W', 'C')),
413
+ ])
414
+
415
+ video_depth = tf.shape(video)[0]
416
+ video_height = tf.shape(video)[1]
417
+ video_width = tf.shape(video)[2]
414
418
 
415
419
  # Sample the center location in the image where the zero mask will be applied.
416
420
  cutout_center_height = tf.random.uniform(
417
- shape=[], minval=0, maxval=image_height, dtype=tf.int32)
421
+ shape=[], minval=0, maxval=video_height, dtype=tf.int32
422
+ )
418
423
 
419
424
  cutout_center_width = tf.random.uniform(
420
- shape=[], minval=0, maxval=image_width, dtype=tf.int32)
425
+ shape=[], minval=0, maxval=video_width, dtype=tf.int32
426
+ )
421
427
 
422
428
  cutout_center_depth = tf.random.uniform(
423
- shape=[], minval=0, maxval=image_depth, dtype=tf.int32)
429
+ shape=[], minval=0, maxval=video_depth, dtype=tf.int32
430
+ )
424
431
 
425
432
  pad_size_height = tf.random.uniform(
426
433
  shape=[],
427
- minval=tf.maximum(1, tf.cast(image_height / 4, tf.int32)),
428
- maxval=tf.maximum(2, tf.cast(image_height / 2, tf.int32)),
429
- dtype=tf.int32)
434
+ minval=tf.maximum(1, tf.cast(video_height / 4, tf.int32)),
435
+ maxval=tf.maximum(2, tf.cast(video_height / 2, tf.int32)),
436
+ dtype=tf.int32,
437
+ )
430
438
  pad_size_width = tf.random.uniform(
431
439
  shape=[],
432
- minval=tf.maximum(1, tf.cast(image_width / 4, tf.int32)),
433
- maxval=tf.maximum(2, tf.cast(image_width / 2, tf.int32)),
434
- dtype=tf.int32)
440
+ minval=tf.maximum(1, tf.cast(video_width / 4, tf.int32)),
441
+ maxval=tf.maximum(2, tf.cast(video_width / 2, tf.int32)),
442
+ dtype=tf.int32,
443
+ )
435
444
  pad_size_depth = tf.random.uniform(
436
445
  shape=[],
437
446
  minval=1,
438
- maxval=tf.maximum(2, tf.cast(image_depth / 4, tf.int32)),
439
- dtype=tf.int32)
447
+ maxval=tf.maximum(2, tf.cast(video_depth / 4, tf.int32)),
448
+ dtype=tf.int32,
449
+ )
440
450
 
441
451
  lower_pad = tf.maximum(0, cutout_center_height - pad_size_height)
442
452
  upper_pad = tf.maximum(
443
- 0, image_height - cutout_center_height - pad_size_height)
453
+ 0, video_height - cutout_center_height - pad_size_height
454
+ )
444
455
  left_pad = tf.maximum(0, cutout_center_width - pad_size_width)
445
- right_pad = tf.maximum(0, image_width - cutout_center_width - pad_size_width)
456
+ right_pad = tf.maximum(0, video_width - cutout_center_width - pad_size_width)
446
457
  back_pad = tf.maximum(0, cutout_center_depth - pad_size_depth)
447
458
  forward_pad = tf.maximum(
448
- 0, image_depth - cutout_center_depth - pad_size_depth)
459
+ 0, video_depth - cutout_center_depth - pad_size_depth
460
+ )
449
461
 
450
462
  cutout_shape = [
451
- image_depth - (back_pad + forward_pad),
452
- image_height - (lower_pad + upper_pad),
453
- image_width - (left_pad + right_pad),
463
+ video_depth - (back_pad + forward_pad),
464
+ video_height - (lower_pad + upper_pad),
465
+ video_width - (left_pad + right_pad),
454
466
  ]
455
467
  padding_dims = [[back_pad, forward_pad],
456
468
  [lower_pad, upper_pad],
457
469
  [left_pad, right_pad]]
458
470
  mask = tf.pad(
459
- tf.zeros(cutout_shape, dtype=image.dtype),
460
- padding_dims,
461
- constant_values=1)
471
+ tf.zeros(cutout_shape, dtype=video.dtype), padding_dims, constant_values=1
472
+ )
462
473
  mask = tf.expand_dims(mask, -1)
463
- mask = tf.tile(mask, [1, 1, 1, 3])
464
- image = tf.where(
465
- tf.equal(mask, 0),
466
- tf.ones_like(image, dtype=image.dtype) * replace, image)
467
- return image
474
+ num_channels = tf.shape(video)[-1]
475
+ mask = tf.tile(mask, [1, 1, 1, num_channels])
476
+ video = tf.where(
477
+ tf.equal(mask, 0), tf.ones_like(video, dtype=video.dtype) * replace, video
478
+ )
479
+ return video
468
480
 
469
481
 
470
482
  def gaussian_noise(
@@ -21,6 +21,7 @@ from __future__ import print_function
21
21
  import random
22
22
  from absl.testing import parameterized
23
23
 
24
+ import numpy as np
24
25
  import tensorflow as tf
25
26
 
26
27
  from official.vision.ops import augment
@@ -85,6 +86,14 @@ class TransformsTest(parameterized.TestCase, tf.test.TestCase):
85
86
  image = tf.zeros(shape, dtype=dtype)
86
87
  self.assertAllEqual(image, augment.rotate(image, degrees))
87
88
 
89
+ def test_cutout_video(self, dtype):
90
+ for num_channels in (1, 2, 3):
91
+ video = tf.ones((2, 2, 2, num_channels), dtype=dtype)
92
+ video = augment.cutout_video(video)
93
+
94
+ num_zeros = np.sum(video == 0)
95
+ self.assertGreater(num_zeros, 0)
96
+
88
97
 
89
98
  class AutoaugmentTest(tf.test.TestCase, parameterized.TestCase):
90
99
 
@@ -182,7 +182,12 @@ def resize_and_crop_image(image,
182
182
  with tf.name_scope('resize_and_crop_image'):
183
183
  image_size = tf.cast(tf.shape(image)[0:2], tf.float32)
184
184
 
185
- random_jittering = (aug_scale_min != 1.0 or aug_scale_max != 1.0)
185
+ random_jittering = (
186
+ isinstance(aug_scale_min, tf.Tensor)
187
+ or isinstance(aug_scale_max, tf.Tensor)
188
+ or not math.isclose(aug_scale_min, 1.0)
189
+ or not math.isclose(aug_scale_max, 1.0)
190
+ )
186
191
 
187
192
  if random_jittering:
188
193
  random_scale = tf.random.uniform(
@@ -292,7 +297,12 @@ def resize_and_crop_image_v2(image,
292
297
  scaled_size)
293
298
  desired_size = scaled_size
294
299
 
295
- random_jittering = (aug_scale_min != 1.0 or aug_scale_max != 1.0)
300
+ random_jittering = (
301
+ isinstance(aug_scale_min, tf.Tensor)
302
+ or isinstance(aug_scale_max, tf.Tensor)
303
+ or not math.isclose(aug_scale_min, 1.0)
304
+ or not math.isclose(aug_scale_max, 1.0)
305
+ )
296
306
 
297
307
  if random_jittering:
298
308
  random_scale = tf.random.uniform(
@@ -641,10 +651,12 @@ def horizontal_flip_masks(masks):
641
651
  return masks[:, :, ::-1]
642
652
 
643
653
 
644
- def random_horizontal_flip(image, normalized_boxes=None, masks=None, seed=1):
654
+ def random_horizontal_flip(
655
+ image, normalized_boxes=None, masks=None, seed=1, prob=0.5
656
+ ):
645
657
  """Randomly flips input image and bounding boxes horizontally."""
646
658
  with tf.name_scope('random_horizontal_flip'):
647
- do_flip = tf.greater(tf.random.uniform([], seed=seed), 0.5)
659
+ do_flip = tf.less(tf.random.uniform([], seed=seed), prob)
648
660
 
649
661
  image = tf.cond(
650
662
  do_flip,
@@ -713,10 +725,12 @@ def random_horizontal_flip_with_roi(
713
725
  return image, boxes, masks, roi_boxes
714
726
 
715
727
 
716
- def random_vertical_flip(image, normalized_boxes=None, masks=None, seed=1):
728
+ def random_vertical_flip(
729
+ image, normalized_boxes=None, masks=None, seed=1, prob=0.5
730
+ ):
717
731
  """Randomly flips input image and bounding boxes vertically."""
718
732
  with tf.name_scope('random_vertical_flip'):
719
- do_flip = tf.greater(tf.random.uniform([], seed=seed), 0.5)
733
+ do_flip = tf.less(tf.random.uniform([], seed=seed), prob)
720
734
 
721
735
  image = tf.cond(
722
736
  do_flip,
@@ -85,16 +85,21 @@ def representative_dataset(
85
85
  yield [image]
86
86
 
87
87
 
88
- def convert_tflite_model(saved_model_dir: str,
89
- quant_type: Optional[str] = None,
90
- params: Optional[cfg.ExperimentConfig] = None,
91
- task: Optional[base_task.Task] = None,
92
- calibration_steps: Optional[int] = 2000,
93
- denylisted_ops: Optional[List[str]] = None) -> 'bytes':
88
+ def convert_tflite_model(
89
+ saved_model_dir: Optional[str] = None,
90
+ model: Optional[tf.keras.Model] = None,
91
+ quant_type: Optional[str] = None,
92
+ params: Optional[cfg.ExperimentConfig] = None,
93
+ task: Optional[base_task.Task] = None,
94
+ calibration_steps: Optional[int] = 2000,
95
+ denylisted_ops: Optional[List[str]] = None,
96
+ ) -> 'bytes':
94
97
  """Converts and returns a TFLite model.
95
98
 
96
99
  Args:
97
100
  saved_model_dir: The directory to the SavedModel.
101
+ model: An optional tf.keras.Model instance. If `saved_model_dir` is not
102
+ available, convert this model to TFLite.
98
103
  quant_type: The post training quantization (PTQ) method. It can be one of
99
104
  `default` (dynamic range), `fp16` (float16), `int8` (integer wih float
100
105
  fallback), `int8_full` (integer only) and None (no quantization).
@@ -111,9 +116,16 @@ def convert_tflite_model(saved_model_dir: str,
111
116
 
112
117
  Raises:
113
118
  ValueError: If `representative_dataset_path` is not present if integer
114
- quantization is requested.
119
+ quantization is requested, or both `saved_model_dir` or `model` are not
120
+ provided.
115
121
  """
116
- converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)
122
+ if saved_model_dir:
123
+ converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)
124
+ elif model is not None:
125
+ converter = tf.lite.TFLiteConverter.from_keras_model(model)
126
+ else:
127
+ raise ValueError('Either `saved_model_dir` or `model` must be specified.')
128
+
117
129
  if quant_type:
118
130
  if quant_type.startswith('int8'):
119
131
  converter.optimizations = [tf.lite.Optimize.DEFAULT]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: tf-models-nightly
3
- Version: 2.11.0.dev20230321
3
+ Version: 2.11.0.dev20230323
4
4
  Summary: TensorFlow Official Models
5
5
  Home-page: https://github.com/tensorflow/models
6
6
  Author: Google Inc.
@@ -205,8 +205,8 @@ official/modeling/activations/sigmoid_test.py,sha256=yl_7jCvV__5bPAbkvyQJcCnDGW6
205
205
  official/modeling/activations/swish.py,sha256=NWUn8lzdNMdX78oe5kylXYjMEmrIeSwuKWKpkBhXwG4,2291
206
206
  official/modeling/activations/swish_test.py,sha256=MHf5CL6mdKMiODt-i_m15fMREHhPshVdsWe9A2v8uS0,1411
207
207
  official/modeling/hyperparams/__init__.py,sha256=llk_04oD2rd3nnWRmsQAQ19l6OBKMPhQcjh_7RxN4wM,846
208
- official/modeling/hyperparams/base_config.py,sha256=VqNUjpf6kgt1vLLzU33_STyV_dZXJHYBvSaP9IYknq0,11838
209
- official/modeling/hyperparams/base_config_test.py,sha256=4jCujgFhbxQUPvhOOnWa9HLysYuT58VyqRbMeFJn7iY,11814
208
+ official/modeling/hyperparams/base_config.py,sha256=k6LSNEJOpGg0int5gGR-6SrU9ibs1w8iLhsHMNeNpvY,12509
209
+ official/modeling/hyperparams/base_config_test.py,sha256=7IAWD_a1KjRUp6T9P8mDCCPEwHcM9WNeeYPhMrDEP2I,12593
210
210
  official/modeling/hyperparams/oneof.py,sha256=N_KhdFtTF1xvciQvsiNkh5q3nVcqE0S0dtqITK2GLug,1870
211
211
  official/modeling/hyperparams/oneof_test.py,sha256=1UAEMkGi6CFz_lKal60UbH7tzdz3096PYz123Th_pbg,1882
212
212
  official/modeling/hyperparams/params_dict.py,sha256=gyP6R3OYMNQcE9FyNU2GVwqCMLDcGzMiiSBhc_wwYS0,16412
@@ -759,15 +759,15 @@ official/projects/yt8m/__init__.py,sha256=XKqEvUISyqNK_cFqr7umxt6r-vnABJ2OqGEKE3
759
759
  official/projects/yt8m/train.py,sha256=NNzjalMAmrRSQlYGZlXNAbnwdXK2OMJXfS6qwv6_cTk,987
760
760
  official/projects/yt8m/train_test.py,sha256=TUqDVIJeDrkbmVUlJ1rw5RqOtRBzyYwguO-09OdLmkk,3892
761
761
  official/projects/yt8m/configs/__init__.py,sha256=sItZXhE5JuW4RRDBhX9EW4eSrp1HdDxhmEv9420aaD0,692
762
- official/projects/yt8m/configs/yt8m.py,sha256=PCez9EyP1ne4_wn6S7uO29Vwxzd2wWdb3HdipAfRsZA,8704
762
+ official/projects/yt8m/configs/yt8m.py,sha256=R-wUAo-udQYYEfS7SJ4JzyCcifC8NJqnpUiiSltUVFM,8629
763
763
  official/projects/yt8m/configs/yt8m_test.py,sha256=5zyL8EVpP127q6yZkBlAve3Fko2ujDCThsWZ9pUDGBM,1561
764
764
  official/projects/yt8m/modeling/__init__.py,sha256=XKqEvUISyqNK_cFqr7umxt6r-vnABJ2OqGEKE395w20,609
765
- official/projects/yt8m/modeling/nn_layers.py,sha256=qMJyW4ZR7FI1yYOJdbltooBGp0q8eF6k7Sf_yZM8fbc,5490
766
- official/projects/yt8m/modeling/yt8m_model.py,sha256=dfF2qhS5-rHdQMuq5ZjcSKBdE7stOAjFIY4o3npiDFA,8697
765
+ official/projects/yt8m/modeling/nn_layers.py,sha256=DK3yKXBC3yPMfpGqYW1Se5AhQcNBul3376uOFpFc0QQ,10392
766
+ official/projects/yt8m/modeling/yt8m_model.py,sha256=QkRW1yktCJPqE7x2cGOkcBzF605D0D1Gb5ndEIENF9E,3720
767
767
  official/projects/yt8m/modeling/yt8m_model_test.py,sha256=k5zPv4lFCJ9UjD2BSJpo4ggVHX5ZAbZ1m7kFWLe-SIM,2131
768
- official/projects/yt8m/modeling/yt8m_model_utils.py,sha256=bXoALSMfyMixux4BDUFAcTVadSz6uav9sIA-dtK4x8E,8408
768
+ official/projects/yt8m/modeling/yt8m_model_utils.py,sha256=lrSjCKjfBTTlhr3ledY5mit9Qy5P5dAnO_IgBX9gUSs,8451
769
769
  official/projects/yt8m/tasks/__init__.py,sha256=tS1Bb__H9G0UtOQMi7sdvaJv4c4ROqNVwaSSQuwCC0Y,692
770
- official/projects/yt8m/tasks/yt8m_task.py,sha256=G9PI6NkMFEc_0tViQez7Jiaks1KKu2wkmaii5PUSZqk,14615
770
+ official/projects/yt8m/tasks/yt8m_task.py,sha256=BJhvjVnaRPYNrScjDlXUxtKL09bZOijP_EfwuO3Zdio,14438
771
771
  official/recommendation/__init__.py,sha256=XKqEvUISyqNK_cFqr7umxt6r-vnABJ2OqGEKE395w20,609
772
772
  official/recommendation/constants.py,sha256=aTDcZc7_1Ir3Wt3NAzM96exWcV41iXDLUAz33kf5z4g,2877
773
773
  official/recommendation/create_ncf_data.py,sha256=oP6ueDrWfS7bDKW099TFdy9LiYfdd40jFp4Zv6WlENE,4008
@@ -851,7 +851,7 @@ official/vision/dataloaders/classification_input.py,sha256=nyAzbzuorMIusOQd2h8bm
851
851
  official/vision/dataloaders/decoder.py,sha256=XGvZHeqJzGr1cgXY4VpEQzGvapkd80u_FDfKqD4LfRs,1016
852
852
  official/vision/dataloaders/input_reader.py,sha256=S7NlBfBcGfdqWrIvQNpL5nM7YDFBQWceZv5PQZRCWME,10410
853
853
  official/vision/dataloaders/input_reader_factory.py,sha256=Gc5eZ4kEUfwp7E5U_Bl9JaEN6WRZCgtku69gP7a6zjw,1623
854
- official/vision/dataloaders/maskrcnn_input.py,sha256=8siryuNYgSNbe_5U5__K2p1LHrD3FjpVHQq6I6ZILWQ,16922
854
+ official/vision/dataloaders/maskrcnn_input.py,sha256=F1BfbUjZpwHzAG7NrKQCPbKYyOvjR1qWQ3DMjTPPqC8,16837
855
855
  official/vision/dataloaders/parser.py,sha256=7TJNrl28Ddf7w1Nx7FMMbt_aX0SRDX8h0UZa-upemo4,2315
856
856
  official/vision/dataloaders/retinanet_input.py,sha256=osZU-_eh70aK5CUD9dwzfoOv8xDfSMQPYxAn3hlQR78,13594
857
857
  official/vision/dataloaders/segmentation_input.py,sha256=xNHHSnX_VTaHE6A1WHyvabGmbPLwpJ3JNOmK8B2-RM0,11828
@@ -859,7 +859,7 @@ official/vision/dataloaders/tf_example_decoder.py,sha256=0J-rQzSkUwgR_xEZ-UR5ToS
859
859
  official/vision/dataloaders/tf_example_decoder_test.py,sha256=sPvAtExClFz7C6MYqg0pEgZzZ5-PcZYipHU0bKk2Fn8,12619
860
860
  official/vision/dataloaders/tf_example_label_map_decoder.py,sha256=SkNyEohAFvUJCVP1XRqy4lTT7bn7agULtgrExfFeLzE,2588
861
861
  official/vision/dataloaders/tf_example_label_map_decoder_test.py,sha256=FAXpKc8BZq2y28XjsbTaxlMRwe9vS53mPLW3r61ShHI,7746
862
- official/vision/dataloaders/tfds_classification_decoders.py,sha256=sQHsswMcbeCixDX0kvii9m_KbE3xxMv5ovFcYFJh1d4,1242
862
+ official/vision/dataloaders/tfds_classification_decoders.py,sha256=X_WUL3QwgGBtE0K0oguONRD38NJs7DNP7yW_gukqimY,1301
863
863
  official/vision/dataloaders/tfds_detection_decoders.py,sha256=-2l3aSCOEoJZO3h0LOrdyUn7TBgFy_iWMzQKE_DuN-Q,2273
864
864
  official/vision/dataloaders/tfds_factory.py,sha256=wEZCTW8h6CRHEO8cniSEgxXsrtrFZS_WSjaDiEFvqHc,2568
865
865
  official/vision/dataloaders/tfds_factory_test.py,sha256=PmgbXmfkmh8erSSii8GYLXu_coShbIdCEjzTeOepd3w,4010
@@ -873,7 +873,7 @@ official/vision/evaluation/__init__.py,sha256=XKqEvUISyqNK_cFqr7umxt6r-vnABJ2OqG
873
873
  official/vision/evaluation/coco_evaluator.py,sha256=LMl4rACazArEGsI5RlgRiQW3q5DeQimhh72NjfXhDLs,15535
874
874
  official/vision/evaluation/coco_utils.py,sha256=9-5uF3ofhhNdlGbdPBYM8ySbtxxXc_NorGFnU9kCxjg,17911
875
875
  official/vision/evaluation/coco_utils_test.py,sha256=PjqRYETQsOAEtcA5Pod8NPmA1xc-7wm_poYU7W_mOXw,1710
876
- official/vision/evaluation/instance_metrics.py,sha256=HJzrhyl2ETX86zXnyCNBaWG4Lz_z27oqxvVDUt2pcAI,30175
876
+ official/vision/evaluation/instance_metrics.py,sha256=X2McJng7V57AZdoo6-k3C117SUhpKGnsuLbtEOxIgKE,29104
877
877
  official/vision/evaluation/instance_metrics_test.py,sha256=GqtAgSpl4hR-wrnk55Efm8dX2b3hHNbHR9hvvR4sJ6s,10813
878
878
  official/vision/evaluation/iou.py,sha256=8VH0AHMquWvmdLtr9Yl0ZWv7efJGhhi1sKtAYUyVhCY,6272
879
879
  official/vision/evaluation/iou_test.py,sha256=1aPmTZmvp6_bL6yr2c9Wa2StUZU2xZ3mICN11pJoojg,5380
@@ -971,8 +971,8 @@ official/vision/ops/anchor.py,sha256=ETLOiNx3CPbsj1yLqgMXU-YXPi6zSwXF6yP6RbEpQeM
971
971
  official/vision/ops/anchor_generator.py,sha256=hf5pZSKYVM2iSI6TlLYIxEOwKeBGvGiy-Xo3MR47hRU,7234
972
972
  official/vision/ops/anchor_generator_test.py,sha256=fa8PkMR_weA2TGiK_tVfeIZFV4bSUp9nGXuUFTaGfVg,5286
973
973
  official/vision/ops/anchor_test.py,sha256=Q8-7crDckzndyRMNmfeZXz88MTSYmLQpg3ELbhcdkak,7623
974
- official/vision/ops/augment.py,sha256=-Oevj7iFf--AsoiN03r8kiAILQ_1cjJojSDcNqB_jlA,91189
975
- official/vision/ops/augment_test.py,sha256=aUGf0ABjm2OSu5v-TZLmsomzSkRQk8ez9x5PQ8PlLvY,18896
974
+ official/vision/ops/augment.py,sha256=pNa5xFsD5Pgv6YeyzCrvNXiEAttvKRkW_4YFP2F6xyg,91331
975
+ official/vision/ops/augment_test.py,sha256=K7OwJSZ8C9iWYQJ7lxnk5qI8rIYTkkZhFI_q3zJw82g,19168
976
976
  official/vision/ops/box_matcher.py,sha256=AeDSdM8F6atkD71gniF0my1P0VsV93UcpeMayWVMOA4,9057
977
977
  official/vision/ops/box_matcher_test.py,sha256=N96ra6RHzRBgumomNfx0xZuOvEFWqQGeu77vZr7NK9w,2428
978
978
  official/vision/ops/box_ops.py,sha256=7oSB7dBJgVLS3Ny9hHAU6tjcExRpPjY-xmtadrKtgxQ,34487
@@ -981,7 +981,7 @@ official/vision/ops/iou_similarity_test.py,sha256=SMq2wPl98_HenxAKk4p4By_ZoRy6LY
981
981
  official/vision/ops/mask_ops.py,sha256=9yR6KWC9croKvzMtSFUU3cK87-ileUD8MzHfJ-IvBJo,10260
982
982
  official/vision/ops/mask_ops_test.py,sha256=34mwpK2-GRS0BWLcOz2Dk2h9kaNBu5zBV70pElpAGsI,2825
983
983
  official/vision/ops/nms.py,sha256=Q5iGXJ-f_hcLNzyCu7CXKigCK4yHSq40ed7ornqZfVQ,8115
984
- official/vision/ops/preprocess_ops.py,sha256=xiQgsetqA380Po751xaKPMZYUG_3YAmyWIBA0lyfwPw,39149
984
+ official/vision/ops/preprocess_ops.py,sha256=NEcNSEZP1mvWD-XmS8nBvsGxE7EpY_i5MyIsmjM1yp4,39477
985
985
  official/vision/ops/preprocess_ops_3d.py,sha256=aUn1OTLkr1046sAfVefybRxHzhaVAtywlmP0R1UGbIg,15378
986
986
  official/vision/ops/preprocess_ops_3d_test.py,sha256=mrs0IVby6WFc2gcD8pIYU__KQFfhmP4m9U000hrpIJQ,7239
987
987
  official/vision/ops/preprocess_ops_test.py,sha256=_6ozeiWsDMBxSQslNJ2y5SCqqTLwbGzgLBjU7dohqmE,11710
@@ -1004,7 +1004,7 @@ official/vision/serving/export_saved_model_lib_v2.py,sha256=BlPSuA0OOFPIkm_iSRBt
1004
1004
  official/vision/serving/export_tfhub.py,sha256=KPxiKl1oN47fwqFeICwXcLW7rUN8435OjJ3VxNqZtPw,3500
1005
1005
  official/vision/serving/export_tfhub_lib.py,sha256=uU4lKFcZB7NqZjdA_k-AmI0bxqgHHz6kaVBtMHdiAs4,2870
1006
1006
  official/vision/serving/export_tflite.py,sha256=0yF12gNHXOr1oRIkrjFRtB80inviz4MoBIxppl_j-4A,5105
1007
- official/vision/serving/export_tflite_lib.py,sha256=nJ1NxDuVRa-KhG4dneNhqNttyM9ZoKI79LHCIwXdepw,6556
1007
+ official/vision/serving/export_tflite_lib.py,sha256=_otzNiP7kjpr2TP3CBnj94s3zYF3DffeEgoCem6axrw,6907
1008
1008
  official/vision/serving/export_tflite_lib_test.py,sha256=ysXdFoMSp46rKJHvNw9wlCMp-heAHhY68BKaMMXIF1A,7185
1009
1009
  official/vision/serving/export_utils.py,sha256=36KfyIgF0-HxPaOJsWT4IH4heKkFWYnDVB-iX_mwm9I,4871
1010
1010
  official/vision/serving/image_classification.py,sha256=V20LDXOIWTSpUlmuUBwBh9Sa-ipaRo8kgwW4qVFiJZk,2858
@@ -1069,9 +1069,9 @@ tensorflow_models/__init__.py,sha256=021FKgqdPz3ds1xxfV67FWL7e5ECQ7WHbo67D37vAQI
1069
1069
  tensorflow_models/tensorflow_models_test.py,sha256=3oRV5seq-V1La0eY0IFpGLD7AKkiemylW8GyvZIRtmo,1385
1070
1070
  tensorflow_models/nlp/__init__.py,sha256=ro-1L0G8Z1wby8D1Jbaa3No-n73tiNEx7C4f8pAUNlk,807
1071
1071
  tensorflow_models/vision/__init__.py,sha256=3qeLW_6HkgH5hEclFog2DIRu1FSzOr3JynyM23zGhu8,833
1072
- tf_models_nightly-2.11.0.dev20230321.dist-info/AUTHORS,sha256=1dG3fXVu9jlo7bul8xuix5F5vOnczMk7_yWn4y70uw0,337
1073
- tf_models_nightly-2.11.0.dev20230321.dist-info/LICENSE,sha256=WxeBS_DejPZQabxtfMOM_xn8qoZNJDQjrT7z2wG1I4U,11512
1074
- tf_models_nightly-2.11.0.dev20230321.dist-info/METADATA,sha256=ONfrJ2f_hyv4tO-2Dj8AVq-IBYx3v_a9Q8ViV5oJfAQ,1426
1075
- tf_models_nightly-2.11.0.dev20230321.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
1076
- tf_models_nightly-2.11.0.dev20230321.dist-info/top_level.txt,sha256=gum2FfO5R4cvjl2-QtP-S1aNmsvIZaFFT6VFzU0f4-g,33
1077
- tf_models_nightly-2.11.0.dev20230321.dist-info/RECORD,,
1072
+ tf_models_nightly-2.11.0.dev20230323.dist-info/AUTHORS,sha256=1dG3fXVu9jlo7bul8xuix5F5vOnczMk7_yWn4y70uw0,337
1073
+ tf_models_nightly-2.11.0.dev20230323.dist-info/LICENSE,sha256=WxeBS_DejPZQabxtfMOM_xn8qoZNJDQjrT7z2wG1I4U,11512
1074
+ tf_models_nightly-2.11.0.dev20230323.dist-info/METADATA,sha256=rK5fmRrHIaf7Z3A655AJ9uUtsrcNQ9UAhxSzvS7mdE8,1426
1075
+ tf_models_nightly-2.11.0.dev20230323.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
1076
+ tf_models_nightly-2.11.0.dev20230323.dist-info/top_level.txt,sha256=gum2FfO5R4cvjl2-QtP-S1aNmsvIZaFFT6VFzU0f4-g,33
1077
+ tf_models_nightly-2.11.0.dev20230323.dist-info/RECORD,,