tf-models-nightly 2.17.0.dev20240313__py2.py3-none-any.whl → 2.17.0.dev20240314__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -91,6 +91,10 @@ class DataConfig(cfg.DataConfig):
91
91
  )
92
92
  additional_dense_features: List[DenseFeatureConfig] = dataclasses.field(
93
93
  default_factory=list)
94
+ # If `centered_crop` is set to True, then resized crop
95
+ # (if smaller than padded size) is place in the center of the image.
96
+ # Default behaviour is to place it at left top corner.
97
+ centered_crop: bool = False
94
98
 
95
99
 
96
100
  @dataclasses.dataclass
@@ -25,48 +25,54 @@ from official.vision.ops import preprocess_ops
25
25
  class Decoder(decoder.Decoder):
26
26
  """A tf.Example decoder for segmentation task."""
27
27
 
28
- def __init__(self,
29
- image_feature=config_lib.DenseFeatureConfig(),
30
- additional_dense_features=None):
28
+ def __init__(
29
+ self,
30
+ image_feature=config_lib.DenseFeatureConfig(),
31
+ additional_dense_features=None,
32
+ ):
31
33
  self._keys_to_features = {
32
- 'image/encoded':
33
- tf.io.FixedLenFeature((), tf.string, default_value=''),
34
- 'image/height':
35
- tf.io.FixedLenFeature((), tf.int64, default_value=0),
36
- 'image/width':
37
- tf.io.FixedLenFeature((), tf.int64, default_value=0),
38
- 'image/segmentation/class/encoded':
39
- tf.io.FixedLenFeature((), tf.string, default_value=''),
40
- image_feature.feature_name:
41
- tf.io.FixedLenFeature((), tf.string, default_value='')
34
+ 'image/encoded': tf.io.FixedLenFeature((), tf.string, default_value=''),
35
+ 'image/height': tf.io.FixedLenFeature((), tf.int64, default_value=0),
36
+ 'image/width': tf.io.FixedLenFeature((), tf.int64, default_value=0),
37
+ 'image/segmentation/class/encoded': tf.io.FixedLenFeature(
38
+ (), tf.string, default_value=''
39
+ ),
40
+ image_feature.feature_name: tf.io.FixedLenFeature(
41
+ (), tf.string, default_value=''
42
+ ),
42
43
  }
43
44
  if additional_dense_features:
44
45
  for feature in additional_dense_features:
45
46
  self._keys_to_features[feature.feature_name] = tf.io.FixedLenFeature(
46
- (), tf.string, default_value='')
47
+ (), tf.string, default_value=''
48
+ )
47
49
 
48
50
  def decode(self, serialized_example):
49
- return tf.io.parse_single_example(serialized_example,
50
- self._keys_to_features)
51
+ return tf.io.parse_single_example(
52
+ serialized_example, self._keys_to_features
53
+ )
51
54
 
52
55
 
53
56
  class Parser(parser.Parser):
54
57
  """Parser to parse an image and its annotations into a dictionary of tensors."""
55
58
 
56
- def __init__(self,
57
- output_size,
58
- crop_size=None,
59
- resize_eval_groundtruth=True,
60
- gt_is_matting_map=False,
61
- groundtruth_padded_size=None,
62
- ignore_label=255,
63
- aug_rand_hflip=False,
64
- preserve_aspect_ratio=True,
65
- aug_scale_min=1.0,
66
- aug_scale_max=1.0,
67
- dtype='float32',
68
- image_feature=config_lib.DenseFeatureConfig(),
69
- additional_dense_features=None):
59
+ def __init__(
60
+ self,
61
+ output_size,
62
+ crop_size=None,
63
+ resize_eval_groundtruth=True,
64
+ gt_is_matting_map=False,
65
+ groundtruth_padded_size=None,
66
+ ignore_label=255,
67
+ aug_rand_hflip=False,
68
+ preserve_aspect_ratio=True,
69
+ aug_scale_min=1.0,
70
+ aug_scale_max=1.0,
71
+ dtype='float32',
72
+ image_feature=config_lib.DenseFeatureConfig(),
73
+ additional_dense_features=None,
74
+ centered_crop=False,
75
+ ):
70
76
  """Initializes parameters for parsing annotations in the dataset.
71
77
 
72
78
  Args:
@@ -100,13 +106,18 @@ class Parser(parser.Parser):
100
106
  dataset mean/stddev.
101
107
  additional_dense_features: `list` of DenseFeatureConfig for additional
102
108
  dense features.
109
+ centered_crop: If `centered_crop` is set to True, then resized crop (if
110
+ smaller than padded size) is place in the center of the image. Default
111
+ behaviour is to place it at left top corner.
103
112
  """
104
113
  self._output_size = output_size
105
114
  self._crop_size = crop_size
106
115
  self._resize_eval_groundtruth = resize_eval_groundtruth
107
116
  if (not resize_eval_groundtruth) and (groundtruth_padded_size is None):
108
- raise ValueError('groundtruth_padded_size ([height, width]) needs to be'
109
- 'specified when resize_eval_groundtruth is False.')
117
+ raise ValueError(
118
+ 'groundtruth_padded_size ([height, width]) needs to be'
119
+ 'specified when resize_eval_groundtruth is False.'
120
+ )
110
121
  self._gt_is_matting_map = gt_is_matting_map
111
122
  self._groundtruth_padded_size = groundtruth_padded_size
112
123
  self._ignore_label = ignore_label
@@ -122,6 +133,12 @@ class Parser(parser.Parser):
122
133
 
123
134
  self._image_feature = image_feature
124
135
  self._additional_dense_features = additional_dense_features
136
+ self._centered_crop = centered_crop
137
+ if self._centered_crop and not self._resize_eval_groundtruth:
138
+ raise ValueError(
139
+ 'centered_crop is only supported when resize_eval_groundtruth is'
140
+ ' True.'
141
+ )
125
142
 
126
143
  def _prepare_image_and_label(self, data):
127
144
  """Prepare normalized image and label."""
@@ -129,21 +146,25 @@ class Parser(parser.Parser):
129
146
  width = data['image/width']
130
147
 
131
148
  label = tf.io.decode_image(
132
- data['image/segmentation/class/encoded'], channels=1)
149
+ data['image/segmentation/class/encoded'], channels=1
150
+ )
133
151
  label = tf.reshape(label, (1, height, width))
134
152
  label = tf.cast(label, tf.float32)
135
153
 
136
154
  image = tf.io.decode_image(
137
155
  data[self._image_feature.feature_name],
138
156
  channels=self._image_feature.num_channels,
139
- dtype=tf.uint8)
157
+ dtype=tf.uint8,
158
+ )
140
159
  image = tf.reshape(image, (height, width, self._image_feature.num_channels))
141
160
  # Normalizes the image feature with mean and std values, which are divided
142
161
  # by 255 because an uint8 image are re-scaled automatically. Images other
143
162
  # than uint8 type will be wrongly normalized.
144
163
  image = preprocess_ops.normalize_image(
145
- image, [mean / 255.0 for mean in self._image_feature.mean],
146
- [stddev / 255.0 for stddev in self._image_feature.stddev])
164
+ image,
165
+ [mean / 255.0 for mean in self._image_feature.mean],
166
+ [stddev / 255.0 for stddev in self._image_feature.stddev],
167
+ )
147
168
 
148
169
  if self._additional_dense_features:
149
170
  input_list = [image]
@@ -151,11 +172,14 @@ class Parser(parser.Parser):
151
172
  feature = tf.io.decode_image(
152
173
  data[feature_cfg.feature_name],
153
174
  channels=feature_cfg.num_channels,
154
- dtype=tf.uint8)
175
+ dtype=tf.uint8,
176
+ )
155
177
  feature = tf.reshape(feature, (height, width, feature_cfg.num_channels))
156
178
  feature = preprocess_ops.normalize_image(
157
- feature, [mean / 255.0 for mean in feature_cfg.mean],
158
- [stddev / 255.0 for stddev in feature_cfg.stddev])
179
+ feature,
180
+ [mean / 255.0 for mean in feature_cfg.mean],
181
+ [stddev / 255.0 for stddev in feature_cfg.stddev],
182
+ )
159
183
  input_list.append(feature)
160
184
  concat_input = tf.concat(input_list, axis=2)
161
185
  else:
@@ -164,7 +188,8 @@ class Parser(parser.Parser):
164
188
  if not self._preserve_aspect_ratio:
165
189
  label = tf.reshape(label, [data['image/height'], data['image/width'], 1])
166
190
  concat_input = tf.image.resize(
167
- concat_input, self._output_size, method='bilinear')
191
+ concat_input, self._output_size, method='bilinear'
192
+ )
168
193
  label = tf.image.resize(label, self._output_size, method='nearest')
169
194
  label = tf.reshape(label[:, :, -1], [1] + self._output_size)
170
195
 
@@ -195,14 +220,16 @@ class Parser(parser.Parser):
195
220
 
196
221
  image_mask = tf.concat([image, label], axis=2)
197
222
  image_mask_crop = tf.image.random_crop(
198
- image_mask, self._crop_size + [tf.shape(image_mask)[-1]])
223
+ image_mask, self._crop_size + [tf.shape(image_mask)[-1]]
224
+ )
199
225
  image = image_mask_crop[:, :, :-1]
200
226
  label = tf.reshape(image_mask_crop[:, :, -1], [1] + self._crop_size)
201
227
 
202
228
  # Flips image randomly during training.
203
229
  if self._aug_rand_hflip:
204
230
  image, _, label = preprocess_ops.random_horizontal_flip(
205
- image, masks=label)
231
+ image, masks=label
232
+ )
206
233
 
207
234
  train_image_size = self._crop_size if self._crop_size else self._output_size
208
235
  # Resizes and crops image.
@@ -211,7 +238,9 @@ class Parser(parser.Parser):
211
238
  train_image_size,
212
239
  train_image_size,
213
240
  aug_scale_min=self._aug_scale_min,
214
- aug_scale_max=self._aug_scale_max)
241
+ aug_scale_max=self._aug_scale_max,
242
+ centered_crop=self._centered_crop,
243
+ )
215
244
 
216
245
  # Resizes and crops boxes.
217
246
  image_scale = image_info[2, :]
@@ -221,11 +250,17 @@ class Parser(parser.Parser):
221
250
  # The label is first offset by +1 and then padded with 0.
222
251
  label += 1
223
252
  label = tf.expand_dims(label, axis=3)
224
- label = preprocess_ops.resize_and_crop_masks(label, image_scale,
225
- train_image_size, offset)
253
+ label = preprocess_ops.resize_and_crop_masks(
254
+ label,
255
+ image_scale,
256
+ train_image_size,
257
+ offset,
258
+ centered_crop=self._centered_crop,
259
+ )
226
260
  label -= 1
227
261
  label = tf.where(
228
- tf.equal(label, -1), self._ignore_label * tf.ones_like(label), label)
262
+ tf.equal(label, -1), self._ignore_label * tf.ones_like(label), label
263
+ )
229
264
  label = tf.squeeze(label, axis=0)
230
265
  valid_mask = tf.not_equal(label, self._ignore_label)
231
266
 
@@ -255,30 +290,58 @@ class Parser(parser.Parser):
255
290
 
256
291
  # Resizes and crops image.
257
292
  image, image_info = preprocess_ops.resize_and_crop_image(
258
- image, self._output_size, self._output_size)
293
+ image,
294
+ self._output_size,
295
+ self._output_size,
296
+ centered_crop=self._centered_crop,
297
+ )
259
298
 
260
299
  if self._resize_eval_groundtruth:
261
300
  # Resizes eval masks to match input image sizes. In that case, mean IoU
262
301
  # is computed on output_size not the original size of the images.
263
302
  image_scale = image_info[2, :]
264
303
  offset = image_info[3, :]
265
- label = preprocess_ops.resize_and_crop_masks(label, image_scale,
266
- self._output_size, offset)
304
+ label = preprocess_ops.resize_and_crop_masks(
305
+ label,
306
+ image_scale,
307
+ self._output_size,
308
+ offset,
309
+ centered_crop=self._centered_crop,
310
+ )
267
311
  else:
268
- label = tf.image.pad_to_bounding_box(label, 0, 0,
269
- self._groundtruth_padded_size[0],
270
- self._groundtruth_padded_size[1])
312
+ if self._centered_crop:
313
+ label_size = tf.cast(tf.shape(label)[0:2], tf.int32)
314
+ label = tf.image.pad_to_bounding_box(
315
+ label,
316
+ tf.maximum(
317
+ (self._groundtruth_padded_size[0] - label_size[0]) // 2, 0
318
+ ),
319
+ tf.maximum(
320
+ (self._groundtruth_padded_size[1] - label_size[1]) // 2, 0
321
+ ),
322
+ self._groundtruth_padded_size[0],
323
+ self._groundtruth_padded_size[1],
324
+ )
325
+ else:
326
+ label = tf.image.pad_to_bounding_box(
327
+ label,
328
+ 0,
329
+ 0,
330
+ self._groundtruth_padded_size[0],
331
+ self._groundtruth_padded_size[1],
332
+ )
271
333
 
272
334
  label -= 1
273
335
  label = tf.where(
274
- tf.equal(label, -1), self._ignore_label * tf.ones_like(label), label)
336
+ tf.equal(label, -1), self._ignore_label * tf.ones_like(label), label
337
+ )
275
338
  label = tf.squeeze(label, axis=0)
276
339
 
277
340
  valid_mask = tf.not_equal(label, self._ignore_label)
278
341
  labels = {
279
342
  'masks': label,
280
343
  'valid_masks': valid_mask,
281
- 'image_info': image_info
344
+ 'image_info': image_info,
282
345
  }
283
346
 
284
347
  # Cast image as self._dtype
@@ -168,6 +168,7 @@ def resize_and_crop_image(
168
168
  seed=1,
169
169
  method=tf.image.ResizeMethod.BILINEAR,
170
170
  keep_aspect_ratio=True,
171
+ centered_crop=False,
171
172
  ):
172
173
  """Resizes the input image to output size (RetinaNet style).
173
174
 
@@ -195,6 +196,9 @@ def resize_and_crop_image(
195
196
  seed: seed for random scale jittering.
196
197
  method: function to resize input image to scaled image.
197
198
  keep_aspect_ratio: whether or not to keep the aspect ratio when resizing.
199
+ centered_crop: If `centered_crop` is set to True, then resized crop (if
200
+ smaller than padded size) is place in the center of the image. Default
201
+ behaviour is to place it at left top corner.
198
202
 
199
203
  Returns:
200
204
  output_image: `Tensor` of shape [height, width, 3] where [height, width]
@@ -266,9 +270,19 @@ def resize_and_crop_image(
266
270
 
267
271
  output_image = scaled_image
268
272
  if padded_size is not None:
269
- output_image = tf.image.pad_to_bounding_box(
270
- scaled_image, 0, 0, padded_size[0], padded_size[1]
271
- )
273
+ if centered_crop:
274
+ scaled_image_size = tf.cast(tf.shape(scaled_image)[0:2], tf.int32)
275
+ output_image = tf.image.pad_to_bounding_box(
276
+ scaled_image,
277
+ tf.maximum((padded_size[0] - scaled_image_size[0]) // 2, 0),
278
+ tf.maximum((padded_size[1] - scaled_image_size[1]) // 2, 0),
279
+ padded_size[0],
280
+ padded_size[1],
281
+ )
282
+ else:
283
+ output_image = tf.image.pad_to_bounding_box(
284
+ scaled_image, 0, 0, padded_size[0], padded_size[1]
285
+ )
272
286
 
273
287
  image_info = tf.stack([
274
288
  image_size,
@@ -686,7 +700,9 @@ def resize_and_crop_boxes(boxes, image_scale, output_size, offset):
686
700
  return boxes
687
701
 
688
702
 
689
- def resize_and_crop_masks(masks, image_scale, output_size, offset):
703
+ def resize_and_crop_masks(
704
+ masks, image_scale, output_size, offset, centered_crop: bool = False
705
+ ):
690
706
  """Resizes boxes to output size with scale and offset.
691
707
 
692
708
  Args:
@@ -697,6 +713,9 @@ def resize_and_crop_masks(masks, image_scale, output_size, offset):
697
713
  output image size.
698
714
  offset: 2D `Tensor` representing top-left corner [y0, x0] to crop scaled
699
715
  boxes.
716
+ centered_crop: If `centered_crop` is set to True, then resized crop (if
717
+ smaller than padded size) is place in the center of the image. Default
718
+ behaviour is to place it at left top corner.
700
719
 
701
720
  Returns:
702
721
  masks: `Tensor` of shape [N, H, W, C] representing the scaled masks.
@@ -719,6 +738,7 @@ def resize_and_crop_masks(masks, image_scale, output_size, offset):
719
738
  scaled_masks = tf.image.resize(
720
739
  masks, scaled_size, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR
721
740
  )
741
+
722
742
  offset = tf.cast(offset, tf.int32)
723
743
  scaled_masks = scaled_masks[
724
744
  :,
@@ -727,9 +747,20 @@ def resize_and_crop_masks(masks, image_scale, output_size, offset):
727
747
  :,
728
748
  ]
729
749
 
730
- output_masks = tf.image.pad_to_bounding_box(
731
- scaled_masks, 0, 0, output_size[0], output_size[1]
732
- )
750
+ if centered_crop:
751
+ scaled_mask_size = tf.cast(tf.shape(scaled_masks)[1:3], tf.int32)
752
+ output_masks = tf.image.pad_to_bounding_box(
753
+ scaled_masks,
754
+ tf.maximum((output_size[0] - scaled_mask_size[0]) // 2, 0),
755
+ tf.maximum((output_size[1] - scaled_mask_size[1]) // 2, 0),
756
+ output_size[0],
757
+ output_size[1],
758
+ )
759
+ else:
760
+ output_masks = tf.image.pad_to_bounding_box(
761
+ scaled_masks, 0, 0, output_size[0], output_size[1]
762
+ )
763
+
733
764
  # Remove padding.
734
765
  output_masks = output_masks[1::]
735
766
  return output_masks
@@ -482,6 +482,46 @@ class InputUtilsTest(parameterized.TestCase, tf.test.TestCase):
482
482
  self.assertShapeEqual(input_image, aug_image)
483
483
  self.assertDTypeEqual(aug_image, np.uint8)
484
484
 
485
+ @parameterized.parameters(0.25, 0.5, 0.75, 1, 1.25, 1.5)
486
+ def test_resize_and_crop_image_and_masks(self, scale):
487
+ image = tf.convert_to_tensor(np.random.rand(1024, 2048, 3))
488
+ label = tf.convert_to_tensor(np.ones((1, 1024, 2048, 1), dtype=np.int32))
489
+ image, image_info = preprocess_ops.resize_and_crop_image(
490
+ image, (256, 256), (256, 256), scale, scale, centered_crop=True
491
+ )
492
+ image_scale = image_info[2, :]
493
+ offset = image_info[3, :]
494
+ label = preprocess_ops.resize_and_crop_masks(
495
+ label, image_scale, (256, 256), offset, centered_crop=True
496
+ )
497
+ self.assertEqual(image.shape[0:2], label.shape[1:3])
498
+ image_arr = image.numpy()
499
+ label_arr = np.squeeze(label.numpy())
500
+
501
+ scaled_height = round(1024 * 256 * scale / 2048)
502
+ scaled_width = round(2048 * 256 * scale / 2048)
503
+ height_offset = max((256 - scaled_height) // 2, 0)
504
+ width_offset = max((256 - scaled_width) // 2, 0)
505
+
506
+ self.assertEqual(
507
+ label_arr[
508
+ height_offset : 256 - height_offset,
509
+ width_offset : 256 - width_offset,
510
+ ].mean(),
511
+ 1,
512
+ )
513
+ self.assertEqual(label_arr[0:height_offset, :].mean(), 0)
514
+ self.assertEqual(image_arr[0:height_offset, :, :].mean(), 0)
515
+ self.assertEqual(label_arr[256 - height_offset :, :].mean(), 0)
516
+ self.assertEqual(image_arr[256 - height_offset :, :, :].mean(), 0)
517
+ if width_offset > 0:
518
+ self.assertEqual(label_arr[height_offset, 0:width_offset].mean(), 0)
519
+ self.assertEqual(label_arr[height_offset, 256 - width_offset :].mean(), 0)
520
+ self.assertEqual(image_arr[height_offset, 0:width_offset, :].mean(), 0)
521
+ self.assertEqual(
522
+ image_arr[height_offset, 256 - width_offset :, :].mean(), 0
523
+ )
524
+
485
525
 
486
526
  if __name__ == '__main__':
487
527
  tf.test.main()
@@ -114,7 +114,8 @@ class SemanticSegmentationTask(base_task.Task):
114
114
  preserve_aspect_ratio=params.preserve_aspect_ratio,
115
115
  dtype=params.dtype,
116
116
  image_feature=params.image_feature,
117
- additional_dense_features=params.additional_dense_features)
117
+ additional_dense_features=params.additional_dense_features,
118
+ centered_crop=params.centered_crop)
118
119
 
119
120
  reader = input_reader_factory.input_reader_generator(
120
121
  params,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: tf-models-nightly
3
- Version: 2.17.0.dev20240313
3
+ Version: 2.17.0.dev20240314
4
4
  Summary: TensorFlow Official Models
5
5
  Home-page: https://github.com/tensorflow/models
6
6
  Author: Google Inc.
@@ -965,7 +965,7 @@ official/vision/configs/maskrcnn.py,sha256=yL8kggxXaCTIpSkcozAV2UudO7UqVcEh1_-rM
965
965
  official/vision/configs/maskrcnn_test.py,sha256=Wfkbz30h2qxPcpuu6CEpQsf8I_2df6y10-4bRLsWlj8,1733
966
966
  official/vision/configs/retinanet.py,sha256=oCKinkh4IyPslmI1pakwi6dVziwjkZ2cIcpSoGRjqnM,17806
967
967
  official/vision/configs/retinanet_test.py,sha256=ffS3QufQMLF8FZhKNmi7Yr1RDTnIyZ1XKQ9agr2EyW8,1699
968
- official/vision/configs/semantic_segmentation.py,sha256=wN9ejI_KTOS4SasCHJTRRdV_3DWW0eiqvVWtArevP8o,30616
968
+ official/vision/configs/semantic_segmentation.py,sha256=4ZAyLWKcFYReyrEWBc5b7wld3mMcuH0RcaRe_4J2RrA,30831
969
969
  official/vision/configs/semantic_segmentation_test.py,sha256=va-ZG6CtBKcs0NicZe6WmJvHxPxxih7nB0orNtrRiEA,1867
970
970
  official/vision/configs/video_classification.py,sha256=tf2XJhD_7c1Ned3eS93Sc0qrQ8U3M_zVJy09KI-9em8,14513
971
971
  official/vision/configs/video_classification_test.py,sha256=I1HSamxRQ3-f4-YHIeUChnT5CtHCxFQdiL0zy6RRUXU,1879
@@ -988,7 +988,7 @@ official/vision/dataloaders/input_reader_factory.py,sha256=WpvSA8qyqAo3wkmme4WqX
988
988
  official/vision/dataloaders/maskrcnn_input.py,sha256=iCc08yYD-7mvIPojgBjm_nSvoQACXWCIeZNZN8CfXSs,16822
989
989
  official/vision/dataloaders/parser.py,sha256=nMXnhigMa_ascSJ2OK88xi4HdE9xvfL3G4oMrHau-t4,2315
990
990
  official/vision/dataloaders/retinanet_input.py,sha256=joxJL4hQVPw-FW5iUc7RsxP60N7iYGRuVFpU3gC5flE,18291
991
- official/vision/dataloaders/segmentation_input.py,sha256=EV5mVYyDzmNefGtHTUw7tecMGpajTnyrfY7nV_zugnY,11838
991
+ official/vision/dataloaders/segmentation_input.py,sha256=Klg5KAChYZDRvqzZfyIzdPy54rTlWYZp2AotolD3WX8,12934
992
992
  official/vision/dataloaders/tf_example_decoder.py,sha256=9yCT6uSLMpmw50w7zdaRR_BXy6vIvliLZntrYAgzD18,8647
993
993
  official/vision/dataloaders/tf_example_decoder_test.py,sha256=PHxneXHn5-eIMdmk1uI4IPLa178kTCifa4EF53ik2Jo,12629
994
994
  official/vision/dataloaders/tf_example_label_map_decoder.py,sha256=EHu6ZQvYxqjUliOlsN_f4okYt9Hdpydv_lM_dQwrklU,2598
@@ -1115,10 +1115,10 @@ official/vision/ops/iou_similarity_test.py,sha256=x5jlcMqMCUYC5cRgdbR0VlAW67AoXo
1115
1115
  official/vision/ops/mask_ops.py,sha256=cZLpIowzEA57bXPDbVXa6mktZVHvGSH-TQ1CxHjpQXw,10270
1116
1116
  official/vision/ops/mask_ops_test.py,sha256=D3xbbbleJd4HkpWOSDSEy6hNihsRBY93BqPF6JP-dJk,2835
1117
1117
  official/vision/ops/nms.py,sha256=bKYDAtyV5j6PG7g-RGF2ZccCI5V1xVvuajNblCy1TGs,8125
1118
- official/vision/ops/preprocess_ops.py,sha256=RAouiPkapg_TJHRh_dj1mwcULTBdEvHf6wHPSL_S-Nk,41306
1118
+ official/vision/ops/preprocess_ops.py,sha256=tDW9apUocwSzGnKkgSrKGwqbcZpb6-WQ8nGnW1_ds20,42569
1119
1119
  official/vision/ops/preprocess_ops_3d.py,sha256=K2583ynA9Zt9pOBNoWSD8KtQR1fwRYBoylJ9NusIBtI,16110
1120
1120
  official/vision/ops/preprocess_ops_3d_test.py,sha256=1MmygNfRBnQYGszsrKBGqP_GrPlP4_UGuddCbkYcIms,8364
1121
- official/vision/ops/preprocess_ops_test.py,sha256=ilCDXPyGI7bYWIXWumTcqNJJ9fVEYmGloW-95au0rOo,15483
1121
+ official/vision/ops/preprocess_ops_test.py,sha256=FY3EUvQIb82fYqYJPmzkE7pmkhXIQrd7JNLGFPB6SXI,17213
1122
1122
  official/vision/ops/sampling_ops.py,sha256=1jywCA_E4qDUFWsykCLUwZsWtQSR0QREXdJhvP5qCvc,16072
1123
1123
  official/vision/ops/spatial_transform_ops.py,sha256=PVEJGAn0ygtsrid84vD5GgV0jsjyWoNn14RBzreMxM4,38389
1124
1124
  official/vision/ops/target_gather.py,sha256=Ir3X76yXYEVFSYX5h-yfS8SMkY37GYuypBP2C8ykggo,3965
@@ -1150,7 +1150,7 @@ official/vision/tasks/__init__.py,sha256=qfhL5xyDrjZez_zjw613TyciLkqtWm-INFeES7G
1150
1150
  official/vision/tasks/image_classification.py,sha256=Oh4tH-SAD0-MmVfglE3pFXI6-LXVJr2C-WwbudnOdSk,16699
1151
1151
  official/vision/tasks/maskrcnn.py,sha256=iC8-OIFyYcQWpdbBODCXEag2R3YUNdQcZsn_wYAd8f0,25569
1152
1152
  official/vision/tasks/retinanet.py,sha256=EFILc2YPDeLqWcH7QtzN2k5sT5KdKQwioh12NKVOIqg,18261
1153
- official/vision/tasks/semantic_segmentation.py,sha256=jpKzMSrhgXXD1y-HqvUtrcktCOEfL99C35uiaZval3U,14243
1153
+ official/vision/tasks/semantic_segmentation.py,sha256=hQBxyT1qZ1SQd4xHCWETiVNcQs96mYxPEXzdTMY8zTI,14287
1154
1154
  official/vision/tasks/video_classification.py,sha256=F4RnG_OvnayPDeWb8khEp8lFyM6CRWi_FlUaBOgsQjk,14318
1155
1155
  official/vision/utils/__init__.py,sha256=7oiypy0N82PDw9aSdcJBLVoGTd_oRSUOdvuJhMv4leQ,609
1156
1156
  official/vision/utils/ops_test.py,sha256=GqsKndrbfbpsfR6Bhs4gnMelXMVnO66CN5eNna-Wj7Y,4394
@@ -1203,9 +1203,9 @@ tensorflow_models/__init__.py,sha256=etxw45SHxuwFCRX5qGxGMP83II0JfJulzNl5GSNJvhw
1203
1203
  tensorflow_models/tensorflow_models_test.py,sha256=AxUYUdiQn416UR7jg0h6rmv688esvlKDfpyDCIQkF18,1395
1204
1204
  tensorflow_models/nlp/__init__.py,sha256=4tA5Pf4qaFwT-fIFOpX7x7FHJpnyJT-5UgOeFYTyMlc,807
1205
1205
  tensorflow_models/vision/__init__.py,sha256=zBorY_v5xva1uI-qxhZO3Qh-Dii-Suq6wEYh6hKHDfc,833
1206
- tf_models_nightly-2.17.0.dev20240313.dist-info/AUTHORS,sha256=1dG3fXVu9jlo7bul8xuix5F5vOnczMk7_yWn4y70uw0,337
1207
- tf_models_nightly-2.17.0.dev20240313.dist-info/LICENSE,sha256=WxeBS_DejPZQabxtfMOM_xn8qoZNJDQjrT7z2wG1I4U,11512
1208
- tf_models_nightly-2.17.0.dev20240313.dist-info/METADATA,sha256=XJi11Uap1W1o_JQQ5OlBn7Ty-1FNcy_jAjkSeu3F6dE,1432
1209
- tf_models_nightly-2.17.0.dev20240313.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
1210
- tf_models_nightly-2.17.0.dev20240313.dist-info/top_level.txt,sha256=gum2FfO5R4cvjl2-QtP-S1aNmsvIZaFFT6VFzU0f4-g,33
1211
- tf_models_nightly-2.17.0.dev20240313.dist-info/RECORD,,
1206
+ tf_models_nightly-2.17.0.dev20240314.dist-info/AUTHORS,sha256=1dG3fXVu9jlo7bul8xuix5F5vOnczMk7_yWn4y70uw0,337
1207
+ tf_models_nightly-2.17.0.dev20240314.dist-info/LICENSE,sha256=WxeBS_DejPZQabxtfMOM_xn8qoZNJDQjrT7z2wG1I4U,11512
1208
+ tf_models_nightly-2.17.0.dev20240314.dist-info/METADATA,sha256=rG9SC_Q5R5o4KDAZzsZhuLMumj4osPggvZYK-8MIPUM,1432
1209
+ tf_models_nightly-2.17.0.dev20240314.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
1210
+ tf_models_nightly-2.17.0.dev20240314.dist-info/top_level.txt,sha256=gum2FfO5R4cvjl2-QtP-S1aNmsvIZaFFT6VFzU0f4-g,33
1211
+ tf_models_nightly-2.17.0.dev20240314.dist-info/RECORD,,