tf-models-nightly 2.19.0.dev20250129__py2.py3-none-any.whl → 2.19.0.dev20250131__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -43,14 +43,14 @@ class DistributeUtilsTest(tf.test.TestCase):
43
43
 
44
44
  def test_one_device_strategy_cpu(self):
45
45
  ds = distribute_utils.get_distribution_strategy('one_device', num_gpus=0)
46
- self.assertEquals(ds.num_replicas_in_sync, 1)
47
- self.assertEquals(len(ds.extended.worker_devices), 1)
46
+ self.assertEqual(ds.num_replicas_in_sync, 1)
47
+ self.assertEqual(len(ds.extended.worker_devices), 1)
48
48
  self.assertIn('CPU', ds.extended.worker_devices[0])
49
49
 
50
50
  def test_one_device_strategy_gpu(self):
51
51
  ds = distribute_utils.get_distribution_strategy('one_device', num_gpus=1)
52
- self.assertEquals(ds.num_replicas_in_sync, 1)
53
- self.assertEquals(len(ds.extended.worker_devices), 1)
52
+ self.assertEqual(ds.num_replicas_in_sync, 1)
53
+ self.assertEqual(len(ds.extended.worker_devices), 1)
54
54
  self.assertIn('GPU', ds.extended.worker_devices[0])
55
55
 
56
56
  def test_mirrored_strategy(self):
@@ -58,8 +58,8 @@ class DistributeUtilsTest(tf.test.TestCase):
58
58
  _ = distribute_utils.get_distribution_strategy(num_gpus=0)
59
59
  # 5 GPUs.
60
60
  ds = distribute_utils.get_distribution_strategy(num_gpus=5)
61
- self.assertEquals(ds.num_replicas_in_sync, 5)
62
- self.assertEquals(len(ds.extended.worker_devices), 5)
61
+ self.assertEqual(ds.num_replicas_in_sync, 5)
62
+ self.assertEqual(len(ds.extended.worker_devices), 5)
63
63
  for device in ds.extended.worker_devices:
64
64
  self.assertIn('GPU', device)
65
65
 
@@ -105,12 +105,13 @@ class DistributeUtilsTest(tf.test.TestCase):
105
105
  ds, tf.distribute.TPUStrategy)
106
106
 
107
107
  def test_invalid_strategy(self):
108
- with self.assertRaisesRegexp(
109
- ValueError,
110
- 'distribution_strategy must be a string but got: False. If'):
108
+ with self.assertRaisesRegex(
109
+ ValueError, 'distribution_strategy must be a string but got: False. If'
110
+ ):
111
111
  distribute_utils.get_distribution_strategy(False)
112
- with self.assertRaisesRegexp(
113
- ValueError, 'distribution_strategy must be a string but got: 1'):
112
+ with self.assertRaisesRegex(
113
+ ValueError, 'distribution_strategy must be a string but got: 1'
114
+ ):
114
115
  distribute_utils.get_distribution_strategy(1)
115
116
 
116
117
  def test_get_strategy_scope(self):
@@ -13,7 +13,6 @@
13
13
  # limitations under the License.
14
14
 
15
15
  """DETR detection task definition."""
16
-
17
16
  from typing import Optional
18
17
 
19
18
  from absl import logging
@@ -48,25 +47,21 @@ class DetectionTask(base_task.Task):
48
47
  def build_model(self):
49
48
  """Build DETR model."""
50
49
 
51
- input_specs = tf_keras.layers.InputSpec(
52
- shape=[None] + self._task_config.model.input_size
53
- )
50
+ input_specs = tf_keras.layers.InputSpec(shape=[None] +
51
+ self._task_config.model.input_size)
54
52
 
55
53
  backbone = backbones.factory.build_backbone(
56
54
  input_specs=input_specs,
57
55
  backbone_config=self._task_config.model.backbone,
58
- norm_activation_config=self._task_config.model.norm_activation,
59
- )
60
-
61
- model = detr.DETR(
62
- backbone,
63
- self._task_config.model.backbone_endpoint_name,
64
- self._task_config.model.num_queries,
65
- self._task_config.model.hidden_size,
66
- self._task_config.model.num_classes,
67
- self._task_config.model.num_encoder_layers,
68
- self._task_config.model.num_decoder_layers,
69
- )
56
+ norm_activation_config=self._task_config.model.norm_activation)
57
+
58
+ model = detr.DETR(backbone,
59
+ self._task_config.model.backbone_endpoint_name,
60
+ self._task_config.model.num_queries,
61
+ self._task_config.model.hidden_size,
62
+ self._task_config.model.num_classes,
63
+ self._task_config.model.num_encoder_layers,
64
+ self._task_config.model.num_decoder_layers)
70
65
  return model
71
66
 
72
67
  def initialize(self, model: tf_keras.Model):
@@ -89,13 +84,12 @@ class DetectionTask(base_task.Task):
89
84
  status = ckpt.restore(ckpt_dir_or_file)
90
85
  status.expect_partial().assert_existing_objects_matched()
91
86
 
92
- logging.info(
93
- 'Finished loading pretrained checkpoint from %s', ckpt_dir_or_file
94
- )
87
+ logging.info('Finished loading pretrained checkpoint from %s',
88
+ ckpt_dir_or_file)
95
89
 
96
- def build_inputs(
97
- self, params, input_context: Optional[tf.distribute.InputContext] = None
98
- ):
90
+ def build_inputs(self,
91
+ params,
92
+ input_context: Optional[tf.distribute.InputContext] = None):
99
93
  """Build input dataset."""
100
94
  if isinstance(params, coco.COCODataConfig):
101
95
  dataset = coco.COCODataLoader(params).load(input_context)
@@ -106,17 +100,14 @@ class DetectionTask(base_task.Task):
106
100
  decoder_cfg = params.decoder.get()
107
101
  if params.decoder.type == 'simple_decoder':
108
102
  decoder = tf_example_decoder.TfExampleDecoder(
109
- regenerate_source_id=decoder_cfg.regenerate_source_id
110
- )
103
+ regenerate_source_id=decoder_cfg.regenerate_source_id)
111
104
  elif params.decoder.type == 'label_map_decoder':
112
105
  decoder = tf_example_label_map_decoder.TfExampleDecoderLabelMap(
113
106
  label_map=decoder_cfg.label_map,
114
- regenerate_source_id=decoder_cfg.regenerate_source_id,
115
- )
107
+ regenerate_source_id=decoder_cfg.regenerate_source_id)
116
108
  else:
117
- raise ValueError(
118
- 'Unknown decoder type: {}!'.format(params.decoder.type)
119
- )
109
+ raise ValueError('Unknown decoder type: {}!'.format(
110
+ params.decoder.type))
120
111
 
121
112
  parser = detr_input.Parser(
122
113
  class_offset=self._task_config.losses.class_offset,
@@ -127,8 +118,7 @@ class DetectionTask(base_task.Task):
127
118
  params,
128
119
  dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
129
120
  decoder_fn=decoder.decode,
130
- parser_fn=parser.parse_fn(params.is_training),
131
- )
121
+ parser_fn=parser.parse_fn(params.is_training))
132
122
  dataset = reader.read(input_context=input_context)
133
123
 
134
124
  return dataset
@@ -187,8 +177,7 @@ class DetectionTask(base_task.Task):
187
177
  box_targets = labels['boxes']
188
178
 
189
179
  cost = self._compute_cost(
190
- cls_outputs, box_outputs, cls_targets, box_targets
191
- )
180
+ cls_outputs, box_outputs, cls_targets, box_targets)
192
181
 
193
182
  _, indices = matchers.hungarian_matching(cost)
194
183
  indices = tf.stop_gradient(indices)
@@ -199,41 +188,31 @@ class DetectionTask(base_task.Task):
199
188
 
200
189
  background = tf.equal(cls_targets, 0)
201
190
  num_boxes = tf.reduce_sum(
202
- tf.cast(tf.logical_not(background), tf.float32), axis=-1
203
- )
191
+ tf.cast(tf.logical_not(background), tf.float32), axis=-1)
204
192
 
205
193
  # Down-weight background to account for class imbalance.
206
194
  xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
207
- labels=cls_targets, logits=cls_assigned
208
- )
195
+ labels=cls_targets, logits=cls_assigned)
209
196
  cls_loss = self._task_config.losses.lambda_cls * tf.where(
210
- background,
211
- self._task_config.losses.background_cls_weight * xentropy,
212
- xentropy,
213
- )
197
+ background, self._task_config.losses.background_cls_weight * xentropy,
198
+ xentropy)
214
199
  cls_weights = tf.where(
215
200
  background,
216
201
  self._task_config.losses.background_cls_weight * tf.ones_like(cls_loss),
217
- tf.ones_like(cls_loss),
218
- )
202
+ tf.ones_like(cls_loss))
219
203
 
220
204
  # Box loss is only calculated on non-background class.
221
205
  l_1 = tf.reduce_sum(tf.abs(box_assigned - box_targets), axis=-1)
222
206
  box_loss = self._task_config.losses.lambda_box * tf.where(
223
- background, tf.zeros_like(l_1), l_1
224
- )
207
+ background, tf.zeros_like(l_1), l_1)
225
208
 
226
209
  # Giou loss is only calculated on non-background class.
227
- giou = tf.linalg.diag_part(
228
- 1.0
229
- - box_ops.bbox_generalized_overlap(
230
- box_ops.cycxhw_to_yxyx(box_assigned),
231
- box_ops.cycxhw_to_yxyx(box_targets),
232
- )
233
- )
210
+ giou = tf.linalg.diag_part(1.0 - box_ops.bbox_generalized_overlap(
211
+ box_ops.cycxhw_to_yxyx(box_assigned),
212
+ box_ops.cycxhw_to_yxyx(box_targets)
213
+ ))
234
214
  giou_loss = self._task_config.losses.lambda_giou * tf.where(
235
- background, tf.zeros_like(giou), giou
236
- )
215
+ background, tf.zeros_like(giou), giou)
237
216
 
238
217
  # Consider doing all reduce once in train_step to speed up.
239
218
  num_boxes_per_replica = tf.reduce_sum(num_boxes)
@@ -241,11 +220,13 @@ class DetectionTask(base_task.Task):
241
220
  replica_context = tf.distribute.get_replica_context()
242
221
  num_boxes_sum, cls_weights_sum = replica_context.all_reduce(
243
222
  tf.distribute.ReduceOp.SUM,
244
- [num_boxes_per_replica, cls_weights_per_replica],
245
- )
246
- cls_loss = tf.math.divide_no_nan(tf.reduce_sum(cls_loss), cls_weights_sum)
247
- box_loss = tf.math.divide_no_nan(tf.reduce_sum(box_loss), num_boxes_sum)
248
- giou_loss = tf.math.divide_no_nan(tf.reduce_sum(giou_loss), num_boxes_sum)
223
+ [num_boxes_per_replica, cls_weights_per_replica])
224
+ cls_loss = tf.math.divide_no_nan(
225
+ tf.reduce_sum(cls_loss), cls_weights_sum)
226
+ box_loss = tf.math.divide_no_nan(
227
+ tf.reduce_sum(box_loss), num_boxes_sum)
228
+ giou_loss = tf.math.divide_no_nan(
229
+ tf.reduce_sum(giou_loss), num_boxes_sum)
249
230
 
250
231
  aux_losses = tf.add_n(aux_losses) if aux_losses else 0.0
251
232
 
@@ -264,8 +245,7 @@ class DetectionTask(base_task.Task):
264
245
  annotation_file=self._task_config.annotation_file,
265
246
  include_mask=False,
266
247
  need_rescale_bboxes=True,
267
- per_category_metrics=self._task_config.per_category_metrics,
268
- )
248
+ per_category_metrics=self._task_config.per_category_metrics)
269
249
  return metrics
270
250
 
271
251
  def train_step(self, inputs, model, optimizer, metrics=None):
@@ -355,8 +335,7 @@ class DetectionTask(base_task.Task):
355
335
 
356
336
  outputs = model(features, training=False)[-1]
357
337
  loss, cls_loss, box_loss, giou_loss = self.build_losses(
358
- outputs=outputs, labels=labels, aux_losses=model.losses
359
- )
338
+ outputs=outputs, labels=labels, aux_losses=model.losses)
360
339
 
361
340
  # Multiply for logging.
362
341
  # Since we expect the gradient replica sum to happen in the optimizer,
@@ -374,33 +353,25 @@ class DetectionTask(base_task.Task):
374
353
  # This is for backward compatibility.
375
354
  if 'detection_boxes' not in outputs:
376
355
  detection_boxes = box_ops.cycxhw_to_yxyx(
377
- outputs['box_outputs']
378
- ) * tf.expand_dims(
379
- tf.concat(
380
- [
381
- labels['image_info'][:, 1:2, 0],
382
- labels['image_info'][:, 1:2, 1],
383
- labels['image_info'][:, 1:2, 0],
384
- labels['image_info'][:, 1:2, 1],
356
+ outputs['box_outputs']) * tf.expand_dims(
357
+ tf.concat([
358
+ labels['image_info'][:, 1:2, 0], labels['image_info'][:, 1:2,
359
+ 1],
360
+ labels['image_info'][:, 1:2, 0], labels['image_info'][:, 1:2,
361
+ 1]
385
362
  ],
386
- axis=1,
387
- ),
388
- axis=1,
389
- )
363
+ axis=1),
364
+ axis=1)
390
365
  else:
391
366
  detection_boxes = outputs['detection_boxes']
392
367
 
393
- if 'detection_scores' not in outputs:
394
- detection_scores = tf.math.reduce_max(
395
- tf.nn.softmax(outputs['cls_outputs'])[:, :, 1:], axis=-1
396
- )
397
- else:
398
- detection_scores = outputs['detection_scores']
368
+ detection_scores = tf.math.reduce_max(
369
+ tf.nn.softmax(outputs['cls_outputs'])[:, :, 1:], axis=-1
370
+ ) if 'detection_scores' not in outputs else outputs['detection_scores']
399
371
 
400
372
  if 'detection_classes' not in outputs:
401
- detection_classes = (
402
- tf.math.argmax(outputs['cls_outputs'][:, :, 1:], axis=-1) + 1
403
- )
373
+ detection_classes = tf.math.argmax(
374
+ outputs['cls_outputs'][:, :, 1:], axis=-1) + 1
404
375
  else:
405
376
  detection_classes = outputs['detection_classes']
406
377
 
@@ -408,12 +379,9 @@ class DetectionTask(base_task.Task):
408
379
  num_detections = tf.reduce_sum(
409
380
  tf.cast(
410
381
  tf.math.greater(
411
- tf.math.reduce_max(outputs['cls_outputs'], axis=-1), 0
412
- ),
413
- tf.int32,
414
- ),
415
- axis=-1,
416
- )
382
+ tf.math.reduce_max(outputs['cls_outputs'], axis=-1), 0),
383
+ tf.int32),
384
+ axis=-1)
417
385
  else:
418
386
  num_detections = outputs['num_detections']
419
387
 
@@ -423,7 +391,7 @@ class DetectionTask(base_task.Task):
423
391
  'detection_classes': detection_classes,
424
392
  'num_detections': num_detections,
425
393
  'source_id': labels['id'],
426
- 'image_info': labels['image_info'],
394
+ 'image_info': labels['image_info']
427
395
  }
428
396
 
429
397
  ground_truths = {
@@ -431,13 +399,13 @@ class DetectionTask(base_task.Task):
431
399
  'height': labels['image_info'][:, 0:1, 0],
432
400
  'width': labels['image_info'][:, 0:1, 1],
433
401
  'num_detections': tf.reduce_sum(
434
- tf.cast(tf.math.greater(labels['classes'], 0), tf.int32), axis=-1
435
- ),
402
+ tf.cast(tf.math.greater(labels['classes'], 0), tf.int32), axis=-1),
436
403
  'boxes': labels['gt_boxes'],
437
404
  'classes': labels['classes'],
438
- 'is_crowds': labels['is_crowd'],
405
+ 'is_crowds': labels['is_crowd']
439
406
  }
440
- logs.update({'predictions': predictions, 'ground_truths': ground_truths})
407
+ logs.update({'predictions': predictions,
408
+ 'ground_truths': ground_truths})
441
409
 
442
410
  all_losses = {
443
411
  'cls_loss': cls_loss,
@@ -457,8 +425,8 @@ class DetectionTask(base_task.Task):
457
425
  state = self.coco_metric
458
426
 
459
427
  state.update_state(
460
- step_outputs['ground_truths'], step_outputs['predictions']
461
- )
428
+ step_outputs['ground_truths'],
429
+ step_outputs['predictions'])
462
430
  return state
463
431
 
464
432
  def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
@@ -190,7 +190,7 @@ class BaseTest(tf.test.TestCase):
190
190
  train_examples[l].add((u_raw, i_raw))
191
191
  counts[(u_raw, i_raw)] += 1
192
192
 
193
- self.assertRegexpMatches(md5.hexdigest(), END_TO_END_TRAIN_MD5)
193
+ self.assertRegex(md5.hexdigest(), END_TO_END_TRAIN_MD5)
194
194
 
195
195
  num_positives_seen = len(train_examples[True])
196
196
  self.assertEqual(producer._train_pos_users.shape[0], num_positives_seen)
@@ -254,7 +254,7 @@ class BaseTest(tf.test.TestCase):
254
254
  # from the negatives.
255
255
  assert (u_raw, i_raw) not in self.seen_pairs
256
256
 
257
- self.assertRegexpMatches(md5.hexdigest(), END_TO_END_EVAL_MD5)
257
+ self.assertRegex(md5.hexdigest(), END_TO_END_EVAL_MD5)
258
258
 
259
259
  def _test_fresh_randomness(self, constructor_type):
260
260
  train_epochs = 5
@@ -300,7 +300,7 @@ class BaseTest(tf.test.TestCase):
300
300
  else:
301
301
  negative_counts[(u, i)] += 1
302
302
 
303
- self.assertRegexpMatches(md5.hexdigest(), FRESH_RANDOMNESS_MD5)
303
+ self.assertRegex(md5.hexdigest(), FRESH_RANDOMNESS_MD5)
304
304
 
305
305
  # The positive examples should appear exactly once each epoch
306
306
  self.assertAllEqual(
@@ -82,7 +82,7 @@ class SyntheticDataTest(tf.test.TestCase):
82
82
  for n in range(5):
83
83
  inp, lab = sess.run((input_element, label_element))
84
84
  self.assertAllClose(inp, [123., 123., 123., 123., 123.])
85
- self.assertEquals(lab, 456)
85
+ self.assertEqual(lab, 456)
86
86
 
87
87
  def test_generate_only_input_data(self):
88
88
  d = model_helpers.generate_synthetic_data(
@@ -111,7 +111,7 @@ class SyntheticDataTest(tf.test.TestCase):
111
111
  element = tf.compat.v1.data.make_one_shot_iterator(d).get_next()
112
112
  self.assertIn('a', element)
113
113
  self.assertIn('b', element)
114
- self.assertEquals(len(element['b']), 2)
114
+ self.assertEqual(len(element['b']), 2)
115
115
  self.assertIn('c', element['b'])
116
116
  self.assertIn('d', element['b'])
117
117
  self.assertNotIn('c', element)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: tf-models-nightly
3
- Version: 2.19.0.dev20250129
3
+ Version: 2.19.0.dev20250131
4
4
  Summary: TensorFlow Official Models
5
5
  Home-page: https://github.com/tensorflow/models
6
6
  Author: Google Inc.
@@ -2,7 +2,7 @@ official/__init__.py,sha256=7oiypy0N82PDw9aSdcJBLVoGTd_oRSUOdvuJhMv4leQ,609
2
2
  official/common/__init__.py,sha256=Yqe5TjLrAR68lQ5G781dwlbbT6mELRByDipCtr4jQY4,610
3
3
  official/common/dataset_fn.py,sha256=MSRNhNGvMnZtjkuxFIZ90vGJgKlr2JeGaEYpz82Y3Ew,1858
4
4
  official/common/distribute_utils.py,sha256=dEciUuyBsxfv6T9rvPk_5zcvLyDFv0lPkhkFvyOGe_0,9575
5
- official/common/distribute_utils_test.py,sha256=ltR8wrIbfoX1miJElPQzZN2aioKX6Hx8-mfzoo-_WGc,4914
5
+ official/common/distribute_utils_test.py,sha256=KSJ8zhiWrbmXzgoNC86gbwXGjnjoxtxou4S3_VFyilw,4908
6
6
  official/common/flags.py,sha256=rtCyljUH5M0p5a1CvBnj6tdubZe4XkwzqiOz1ML6F7I,5347
7
7
  official/common/registry_imports.py,sha256=ez-j0sOvVyYBHHGM5hNjTF0jqHUbHI0zv-4SHe_AZ4s,843
8
8
  official/common/streamz_counters.py,sha256=OtdlnI5aosH_DWP8jx2MGEpjs5AhyMztr5E5qhrC4nE,1057
@@ -555,7 +555,7 @@ official/projects/detr/serving/export_module.py,sha256=3m9LXFG8vVjHv5QZVf4lyrR7L
555
555
  official/projects/detr/serving/export_module_test.py,sha256=Y_1tGLg7rKQN7FgmE681m7QUbu2L3pa9pnNkqjQqaMQ,3497
556
556
  official/projects/detr/serving/export_saved_model.py,sha256=AuYQCrnWd7n3nupIgAYwtzLtWw0w4QmRZV8GtEOKxGk,3995
557
557
  official/projects/detr/tasks/__init__.py,sha256=7oiypy0N82PDw9aSdcJBLVoGTd_oRSUOdvuJhMv4leQ,609
558
- official/projects/detr/tasks/detection.py,sha256=UHBhtIQktgvg4MqRGHD3PEdQr7om7cjPdAbKQDrl2K8,16107
558
+ official/projects/detr/tasks/detection.py,sha256=6eUHzRYKAsQUvKRoVTkXVk5nJr7_0mpIOfgagYu1qR8,16127
559
559
  official/projects/detr/tasks/detection_test.py,sha256=Ao5Taen9-QWMIa1ruiRDXY-ieAeluS8uXjMVF5lg3Lo,6864
560
560
  official/projects/maskconver/__init__.py,sha256=7oiypy0N82PDw9aSdcJBLVoGTd_oRSUOdvuJhMv4leQ,609
561
561
  official/projects/maskconver/train.py,sha256=XDW0ArwYLcJpT137J3QgBEaoZHb-GE3rGbnKNsSVynI,1397
@@ -902,7 +902,7 @@ official/recommendation/constants.py,sha256=gCMNNJHwX0fUEabCiTJa7tV7KHoXD5wc-jMI
902
902
  official/recommendation/create_ncf_data.py,sha256=_UEOulLVbZLY1MN8FukpD0hEN5n030CpeWpUu46gK5I,3999
903
903
  official/recommendation/data_pipeline.py,sha256=lE9dc5zLgwXN8wohNTGTClUvFytQSqbmU-Tv0jQ3PMU,37216
904
904
  official/recommendation/data_preprocessing.py,sha256=haYd4hoEgTLEfgTDzS037Mbk-IluQzr6d-4RrdyDsNQ,10318
905
- official/recommendation/data_test.py,sha256=N5w17RumG1YYoZm0obsQUDNIsF0ZfPWT-kfTgv_BgY0,12831
905
+ official/recommendation/data_test.py,sha256=fkPAn16YK3z3UWQRDrKOlLSvVXn2sWKSCM7-R_cMm_s,12807
906
906
  official/recommendation/movielens.py,sha256=vET5x3zc8vW9ruZatMO3gRFUdPJjeDFJtHXfWr0bOAI,9725
907
907
  official/recommendation/ncf_common.py,sha256=r9P3HH4JQl4Tu3ae3RnNrdSa2ZcH7N7ZXyxCMhJwqY0,12288
908
908
  official/recommendation/ncf_input_pipeline.py,sha256=y2DCc5IVBK3LBk9yUVWfwb2dz-DCt2T0suASZI02UxU,6974
@@ -988,7 +988,7 @@ official/utils/flags/flags_test.py,sha256=UFGiGEjyR1wCvbqjdCxhzjGWm0xLrc4kn-zkaE
988
988
  official/utils/misc/__init__.py,sha256=7oiypy0N82PDw9aSdcJBLVoGTd_oRSUOdvuJhMv4leQ,609
989
989
  official/utils/misc/keras_utils.py,sha256=AxsatfCV3Desrp48_WGxoQsfMO0uA9bcEXFXa7djRqs,7793
990
990
  official/utils/misc/model_helpers.py,sha256=E7AHQ-3ppDrtmKUFKAvaEnBAZI2TW3p8Hg8-I4Rvp3w,3370
991
- official/utils/misc/model_helpers_test.py,sha256=JyH9vycF4nX6PGYuK-IZe53ApvAc6MkVj7p7m8F3ckw,4559
991
+ official/utils/misc/model_helpers_test.py,sha256=40paIH7IzeFa1OCSXUSmfMDAJYTYj3C2BDAR1t1b9fU,4557
992
992
  official/utils/testing/__init__.py,sha256=7oiypy0N82PDw9aSdcJBLVoGTd_oRSUOdvuJhMv4leQ,609
993
993
  official/utils/testing/integration.py,sha256=qo00sSG1fC65wI5nVZ6qm2Iek8CBShpMODgt2IdjPsE,2220
994
994
  official/utils/testing/mock_task.py,sha256=Lag4UZpJlhHu0nZy2HG-7Rcnkrg0Xv6mW5zj18_a7Ek,3312
@@ -1248,9 +1248,9 @@ tensorflow_models/tensorflow_models_test.py,sha256=nc6A9K53OGqF25xN5St8EiWvdVbda
1248
1248
  tensorflow_models/nlp/__init__.py,sha256=4tA5Pf4qaFwT-fIFOpX7x7FHJpnyJT-5UgOeFYTyMlc,807
1249
1249
  tensorflow_models/uplift/__init__.py,sha256=mqfa55gweOdpKoaQyid4A_4u7xw__FcQeSIF0k_pYmI,999
1250
1250
  tensorflow_models/vision/__init__.py,sha256=zBorY_v5xva1uI-qxhZO3Qh-Dii-Suq6wEYh6hKHDfc,833
1251
- tf_models_nightly-2.19.0.dev20250129.dist-info/AUTHORS,sha256=1dG3fXVu9jlo7bul8xuix5F5vOnczMk7_yWn4y70uw0,337
1252
- tf_models_nightly-2.19.0.dev20250129.dist-info/LICENSE,sha256=WxeBS_DejPZQabxtfMOM_xn8qoZNJDQjrT7z2wG1I4U,11512
1253
- tf_models_nightly-2.19.0.dev20250129.dist-info/METADATA,sha256=95ZUPaAhvpDnBFQr2f7G5M6ehijusBpEd-EThxNmCIE,1432
1254
- tf_models_nightly-2.19.0.dev20250129.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
1255
- tf_models_nightly-2.19.0.dev20250129.dist-info/top_level.txt,sha256=gum2FfO5R4cvjl2-QtP-S1aNmsvIZaFFT6VFzU0f4-g,33
1256
- tf_models_nightly-2.19.0.dev20250129.dist-info/RECORD,,
1251
+ tf_models_nightly-2.19.0.dev20250131.dist-info/AUTHORS,sha256=1dG3fXVu9jlo7bul8xuix5F5vOnczMk7_yWn4y70uw0,337
1252
+ tf_models_nightly-2.19.0.dev20250131.dist-info/LICENSE,sha256=WxeBS_DejPZQabxtfMOM_xn8qoZNJDQjrT7z2wG1I4U,11512
1253
+ tf_models_nightly-2.19.0.dev20250131.dist-info/METADATA,sha256=8DTSuiOFEreTUsiFrAK1BugRzgjHpDPVUHLYrgK8UWA,1432
1254
+ tf_models_nightly-2.19.0.dev20250131.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
1255
+ tf_models_nightly-2.19.0.dev20250131.dist-info/top_level.txt,sha256=gum2FfO5R4cvjl2-QtP-S1aNmsvIZaFFT6VFzU0f4-g,33
1256
+ tf_models_nightly-2.19.0.dev20250131.dist-info/RECORD,,