tf-models-nightly 2.17.0.dev20240707__py2.py3-none-any.whl → 2.17.0.dev20240709__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -112,6 +112,11 @@ class ModelConfig(hyperparams.Config):
112
112
  module
113
113
  dcn_use_bias: Flag to determine whether to use bias for the dcn interaction
114
114
  module
115
+ use_partial_tpu_embedding: Flag to determine whether to use partial tpu
116
+ embedding layer or not.
117
+ max_ids_per_chip_per_sample: Maximum number of ids per chip per sample.
118
+ max_ids_per_table: Maximum number of ids per table.
119
+ max_unique_ids_per_table: Maximum number of unique ids per table.
115
120
  """
116
121
  num_dense_features: int = 13
117
122
  vocab_sizes: List[int] = dataclasses.field(default_factory=list)
@@ -128,6 +133,10 @@ class ModelConfig(hyperparams.Config):
128
133
  dcn_kernel_initializer: str = 'truncated_normal'
129
134
  dcn_bias_initializer: str = 'zeros'
130
135
  dcn_use_bias: bool = True
136
+ use_partial_tpu_embedding: bool = True
137
+ max_ids_per_chip_per_sample: int | None = None
138
+ max_ids_per_table: Union[int, List[int]] | None = None
139
+ max_unique_ids_per_table: Union[int, List[int]] | None = None
131
140
 
132
141
 
133
142
  @dataclasses.dataclass
@@ -424,6 +433,7 @@ def dlrm_dcn_v2_criteo_tb_config() -> Config:
424
433
  dcn_use_bias=True,
425
434
  concat_dense=False,
426
435
  use_multi_hot=True,
436
+ use_partial_tpu_embedding=False,
427
437
  multi_hot_sizes=multi_hot_sizes,
428
438
  ),
429
439
  loss=Loss(label_smoothing=0.0),
@@ -15,7 +15,7 @@
15
15
  """Task for the Ranking model."""
16
16
 
17
17
  import math
18
- from typing import Dict, List, Optional, Union
18
+ from typing import Dict, List, Optional, Union, Tuple
19
19
 
20
20
  import tensorflow as tf, tf_keras
21
21
  import tensorflow_recommenders as tfrs
@@ -35,8 +35,14 @@ def _get_tpu_embedding_feature_config(
35
35
  vocab_sizes: List[int],
36
36
  embedding_dim: Union[int, List[int]],
37
37
  table_name_prefix: str = 'embedding_table',
38
- batch_size: Optional[int] = None
39
- ) -> Dict[str, tf.tpu.experimental.embedding.FeatureConfig]:
38
+ batch_size: Optional[int] = None,
39
+ max_ids_per_chip_per_sample: Optional[int] = None,
40
+ max_ids_per_table: Optional[Union[int, List[int]]] = None,
41
+ max_unique_ids_per_table: Optional[Union[int, List[int]]] = None,
42
+ ) -> Tuple[
43
+ Dict[str, tf.tpu.experimental.embedding.FeatureConfig],
44
+ Optional[tf.tpu.experimental.embedding.SparseCoreEmbeddingConfig],
45
+ ]:
40
46
  """Returns TPU embedding feature config.
41
47
 
42
48
  i'th table config will have vocab size of vocab_sizes[i] and embedding
@@ -47,6 +53,11 @@ def _get_tpu_embedding_feature_config(
47
53
  embedding_dim: An integer or a list of embedding table dimensions.
48
54
  table_name_prefix: a prefix for embedding tables.
49
55
  batch_size: Per-replica batch size.
56
+ max_ids_per_chip_per_sample: Maximum number of embedding ids per chip per
57
+ sample.
58
+ max_ids_per_table: Maximum number of embedding ids per table.
59
+ max_unique_ids_per_table: Maximum number of unique embedding ids per table.
60
+
50
61
  Returns:
51
62
  A dictionary of feature_name, FeatureConfig pairs.
52
63
  """
@@ -54,14 +65,49 @@ def _get_tpu_embedding_feature_config(
54
65
  if len(vocab_sizes) != len(embedding_dim):
55
66
  raise ValueError(
56
67
  f'length of vocab_sizes: {len(vocab_sizes)} is not equal to the '
57
- f'length of embedding_dim: {len(embedding_dim)}')
68
+ f'length of embedding_dim: {len(embedding_dim)}'
69
+ )
58
70
  elif isinstance(embedding_dim, int):
59
71
  embedding_dim = [embedding_dim] * len(vocab_sizes)
60
72
  else:
61
- raise ValueError('embedding_dim is not either a list or an int, got '
62
- f'{type(embedding_dim)}')
73
+ raise ValueError(
74
+ 'embedding_dim is not either a list or an int, got '
75
+ f'{type(embedding_dim)}'
76
+ )
77
+
78
+ if isinstance(max_ids_per_table, List):
79
+ if len(vocab_sizes) != len(max_ids_per_table):
80
+ raise ValueError(
81
+ f'length of vocab_sizes: {len(vocab_sizes)} is not equal to the '
82
+ f'length of max_ids_per_table: {len(max_ids_per_table)}'
83
+ )
84
+ elif isinstance(max_ids_per_table, int):
85
+ max_ids_per_table = [max_ids_per_table] * len(vocab_sizes)
86
+ elif max_ids_per_table is not None:
87
+ raise ValueError(
88
+ 'max_ids_per_table is not either a list or an int or None, got '
89
+ f'{type(max_ids_per_table)}'
90
+ )
91
+
92
+ if isinstance(max_unique_ids_per_table, List):
93
+ if len(vocab_sizes) != len(max_unique_ids_per_table):
94
+ raise ValueError(
95
+ f'length of vocab_sizes: {len(vocab_sizes)} is not equal to the '
96
+ 'length of max_unique_ids_per_table: '
97
+ f'{len(max_unique_ids_per_table)}'
98
+ )
99
+ elif isinstance(max_unique_ids_per_table, int):
100
+ max_unique_ids_per_table = [max_unique_ids_per_table] * len(vocab_sizes)
101
+ elif max_unique_ids_per_table is not None:
102
+ raise ValueError(
103
+ 'max_unique_ids_per_table is not either a list or an int or None, '
104
+ f'got {type(max_unique_ids_per_table)}'
105
+ )
63
106
 
64
107
  feature_config = {}
108
+ sparsecore_config = None
109
+ max_ids_per_table_dict = {}
110
+ max_unique_ids_per_table_dict = {}
65
111
 
66
112
  for i, vocab_size in enumerate(vocab_sizes):
67
113
  table_config = tf.tpu.experimental.embedding.TableConfig(
@@ -69,15 +115,35 @@ def _get_tpu_embedding_feature_config(
69
115
  dim=embedding_dim[i],
70
116
  combiner='mean',
71
117
  initializer=tf.initializers.TruncatedNormal(
72
- mean=0.0, stddev=1 / math.sqrt(embedding_dim[i])),
73
- name=table_name_prefix + '_%02d' % i)
118
+ mean=0.0, stddev=1 / math.sqrt(embedding_dim[i])
119
+ ),
120
+ name=table_name_prefix + '_%02d' % i,
121
+ )
74
122
  feature_config[str(i)] = tf.tpu.experimental.embedding.FeatureConfig(
75
123
  name=str(i),
76
124
  table=table_config,
77
125
  output_shape=[batch_size] if batch_size else None,
78
126
  )
127
+ if max_ids_per_table:
128
+ max_ids_per_table_dict[str(table_name_prefix + '_%02d' % i)] = (
129
+ max_ids_per_table[i]
130
+ )
131
+ if max_unique_ids_per_table:
132
+ max_unique_ids_per_table_dict[str(table_name_prefix + '_%02d' % i)] = (
133
+ max_unique_ids_per_table[i]
134
+ )
79
135
 
80
- return feature_config
136
+ if all((max_ids_per_chip_per_sample, max_ids_per_table,
137
+ max_unique_ids_per_table)):
138
+ sparsecore_config = tf.tpu.experimental.embedding.SparseCoreEmbeddingConfig(
139
+ disable_table_stacking=False,
140
+ max_ids_per_chip_per_sample=max_ids_per_chip_per_sample,
141
+ max_ids_per_table=max_ids_per_table_dict,
142
+ max_unique_ids_per_table=max_unique_ids_per_table_dict,
143
+ allow_id_dropping=False,
144
+ )
145
+
146
+ return feature_config, sparsecore_config
81
147
 
82
148
 
83
149
  class RankingTask(base_task.Task):
@@ -173,25 +239,33 @@ class RankingTask(base_task.Task):
173
239
  decay_start_steps=dense_lr_config.decay_start_steps)
174
240
  dense_optimizer.learning_rate = dense_lr_callable
175
241
 
176
- feature_config = _get_tpu_embedding_feature_config(
177
- embedding_dim=self.task_config.model.embedding_dim,
178
- vocab_sizes=self.task_config.model.vocab_sizes,
179
- batch_size=self.task_config.train_data.global_batch_size
180
- // tf.distribute.get_strategy().num_replicas_in_sync,
242
+ feature_config, sparse_core_embedding_config = (
243
+ _get_tpu_embedding_feature_config(
244
+ embedding_dim=self.task_config.model.embedding_dim,
245
+ vocab_sizes=self.task_config.model.vocab_sizes,
246
+ batch_size=self.task_config.train_data.global_batch_size
247
+ // tf.distribute.get_strategy().num_replicas_in_sync,
248
+ max_ids_per_chip_per_sample=self.task_config.model.max_ids_per_chip_per_sample,
249
+ max_ids_per_table=self.task_config.model.max_ids_per_table,
250
+ max_unique_ids_per_table=self.task_config.model.max_unique_ids_per_table,
251
+ )
181
252
  )
182
253
 
183
- if self.task_config.model.use_multi_hot:
184
- embedding_layer = tfrs.layers.embedding.tpu_embedding_layer.TPUEmbedding(
254
+ # to work around PartialTPUEmbedding issue in v5p and to enable multi hot
255
+ # features
256
+ if self.task_config.model.use_partial_tpu_embedding:
257
+ embedding_layer = tfrs.experimental.layers.embedding.PartialTPUEmbedding(
185
258
  feature_config=feature_config,
186
259
  optimizer=embedding_optimizer,
187
260
  pipeline_execution_with_tensor_core=self.trainer_config.pipeline_sparse_and_dense_execution,
261
+ size_threshold=self.task_config.model.size_threshold,
188
262
  )
189
263
  else:
190
- embedding_layer = tfrs.experimental.layers.embedding.PartialTPUEmbedding(
264
+ embedding_layer = tfrs.layers.embedding.tpu_embedding_layer.TPUEmbedding(
191
265
  feature_config=feature_config,
192
266
  optimizer=embedding_optimizer,
193
267
  pipeline_execution_with_tensor_core=self.trainer_config.pipeline_sparse_and_dense_execution,
194
- size_threshold=self.task_config.model.size_threshold,
268
+ sparse_core_embedding_config=sparse_core_embedding_config,
195
269
  )
196
270
 
197
271
  if self.task_config.model.interaction == 'dot':
@@ -35,6 +35,7 @@ def _get_params_override(vocab_sizes,
35
35
  concat_dense=True,
36
36
  dcn_num_layers=3,
37
37
  dcn_low_rank_dim=64,
38
+ use_partial_tpu_embedding=True,
38
39
  ):
39
40
  # Update `data_dir` if `synthetic_data=False`.
40
41
  data_dir = ''
@@ -53,6 +54,7 @@ def _get_params_override(vocab_sizes,
53
54
  'dcn_num_layers': dcn_num_layers,
54
55
  'dcn_low_rank_dim': dcn_low_rank_dim,
55
56
  'use_multi_hot': use_multi_hot,
57
+ 'use_partial_tpu_embedding': use_partial_tpu_embedding,
56
58
  'multi_hot_sizes': multi_hot_sizes,
57
59
  },
58
60
  'train_data': {
@@ -90,10 +92,20 @@ class TrainTest(parameterized.TestCase, tf.test.TestCase):
90
92
  super().tearDown()
91
93
 
92
94
  @parameterized.named_parameters(
93
- ('DlrmOneDeviceCTL', 'one_device', 'dot', True, True, 3, False),
94
- ('DlrmOneDevice', 'one_device', 'dot', False, True, 3, False),
95
- ('DcnOneDeviceCTL', 'one_device', 'cross', True, True, 3, False),
96
- ('DcnOneDevice', 'one_device', 'cross', False, True, 3, False),
95
+ ('DlrmOneDeviceCTL', 'one_device', 'dot', True, True, 3, 64, False, True),
96
+ ('DlrmOneDevice', 'one_device', 'dot', False, True, 3, 64, False, True),
97
+ (
98
+ 'DcnOneDeviceCTL',
99
+ 'one_device',
100
+ 'cross',
101
+ True,
102
+ True,
103
+ 3,
104
+ 64,
105
+ False,
106
+ True,
107
+ ),
108
+ ('DcnOneDevice', 'one_device', 'cross', False, True, 3, 64, False, True),
97
109
  (
98
110
  'DlrmDcnV2OneDeviceCTL',
99
111
  'one_device',
@@ -102,7 +114,8 @@ class TrainTest(parameterized.TestCase, tf.test.TestCase):
102
114
  False,
103
115
  3,
104
116
  64,
105
- True
117
+ True,
118
+ False,
106
119
  ),
107
120
  (
108
121
  'DlrmDcnV2OneDevice',
@@ -112,12 +125,13 @@ class TrainTest(parameterized.TestCase, tf.test.TestCase):
112
125
  False,
113
126
  3,
114
127
  64,
115
- True
128
+ True,
129
+ False,
116
130
  ),
117
- ('DlrmTPUCTL', 'tpu', 'dot', True, True, 3, 64, False),
118
- ('DlrmTPU', 'tpu', 'dot', False, True, 3, 64, False),
119
- ('DcnTPUCTL', 'tpu', 'cross', True, True, 3, 64, False),
120
- ('DcnTPU', 'tpu', 'cross', False, True, 3, 64, False),
131
+ ('DlrmTPUCTL', 'tpu', 'dot', True, True, 3, 64, False, True),
132
+ ('DlrmTPU', 'tpu', 'dot', False, True, 3, 64, False, True),
133
+ ('DcnTPUCTL', 'tpu', 'cross', True, True, 3, 64, False, True),
134
+ ('DcnTPU', 'tpu', 'cross', False, True, 3, 64, False, True),
121
135
  (
122
136
  'DlrmDcnV2TPUCTL',
123
137
  'tpu',
@@ -126,7 +140,8 @@ class TrainTest(parameterized.TestCase, tf.test.TestCase):
126
140
  False,
127
141
  3,
128
142
  64,
129
- True
143
+ True,
144
+ False,
130
145
  ),
131
146
  (
132
147
  'DlrmDcnV2TPU',
@@ -136,12 +151,13 @@ class TrainTest(parameterized.TestCase, tf.test.TestCase):
136
151
  False,
137
152
  3,
138
153
  64,
139
- True
154
+ True,
155
+ False,
140
156
  ),
141
- ('DlrmMirroredCTL', 'Mirrored', 'dot', True, True, 3, 64, False),
142
- ('DlrmMirrored', 'Mirrored', 'dot', False, True, 3, 64, False),
143
- ('DcnMirroredCTL', 'Mirrored', 'cross', True, True, 3, 64, False),
144
- ('DcnMirrored', 'Mirrored', 'cross', False, True, 3, 64, False),
157
+ ('DlrmMirroredCTL', 'Mirrored', 'dot', True, True, 3, 64, False, True),
158
+ ('DlrmMirrored', 'Mirrored', 'dot', False, True, 3, 64, False, True),
159
+ ('DcnMirroredCTL', 'Mirrored', 'cross', True, True, 3, 64, False, True),
160
+ ('DcnMirrored', 'Mirrored', 'cross', False, True, 3, 64, False, True),
145
161
  (
146
162
  'DlrmDcnV2MirroredCTL',
147
163
  'Mirrored',
@@ -150,7 +166,8 @@ class TrainTest(parameterized.TestCase, tf.test.TestCase):
150
166
  False,
151
167
  3,
152
168
  64,
153
- True
169
+ True,
170
+ False,
154
171
  ),
155
172
  (
156
173
  'DlrmDcnV2Mirrored',
@@ -160,7 +177,8 @@ class TrainTest(parameterized.TestCase, tf.test.TestCase):
160
177
  False,
161
178
  3,
162
179
  64,
163
- True
180
+ True,
181
+ False,
164
182
  ),
165
183
  )
166
184
  def testTrainEval(
@@ -172,6 +190,7 @@ class TrainTest(parameterized.TestCase, tf.test.TestCase):
172
190
  dcn_num_layers=3,
173
191
  dcn_low_rank_dim=64,
174
192
  use_multi_hot=False,
193
+ use_partial_tpu_embedding=True,
175
194
  ):
176
195
  # Set up simple trainer with synthetic data.
177
196
  # By default the mode must be `train_and_eval`.
@@ -190,23 +209,45 @@ class TrainTest(parameterized.TestCase, tf.test.TestCase):
190
209
  concat_dense=concat_dense,
191
210
  dcn_num_layers=dcn_num_layers,
192
211
  dcn_low_rank_dim=dcn_low_rank_dim,
212
+ use_partial_tpu_embedding=use_partial_tpu_embedding,
193
213
  )
214
+
194
215
  train.main('unused_args')
195
216
  self.assertNotEmpty(
196
217
  tf.io.gfile.glob(os.path.join(self._model_dir, 'params.yaml'))
197
218
  )
198
219
 
199
220
  @parameterized.named_parameters(
200
- ('DlrmTPUCTL', 'tpu', 'dot', True, True, 3, 64, False),
201
- ('DlrmTPU', 'tpu', 'dot', False, True, 3, 64, False),
202
- ('DcnTPUCTL', 'tpu', 'cross', True, True, 3, 64, False),
203
- ('DcnTPU', 'tpu', 'cross', False, True, 3, 64, False),
204
- ('DlrmDcnV2TPUCTL', 'tpu', 'multi_layer_dcn', True, False, 3, 64, True),
205
- ('DlrmDcnV2TPU', 'tpu', 'multi_layer_dcn', False, False, 3, 64, True),
206
- ('DlrmMirroredCTL', 'Mirrored', 'dot', True, True, 3, 64, False),
207
- ('DlrmMirrored', 'Mirrored', 'dot', False, True, 3, 64, False),
208
- ('DcnMirroredCTL', 'Mirrored', 'cross', True, True, 3, 64, False),
209
- ('DcnMirrored', 'Mirrored', 'cross', False, True, 3, 64, False),
221
+ ('DlrmTPUCTL', 'tpu', 'dot', True, True, 3, 64, False, True),
222
+ ('DlrmTPU', 'tpu', 'dot', False, True, 3, 64, False, True),
223
+ ('DcnTPUCTL', 'tpu', 'cross', True, True, 3, 64, False, True),
224
+ ('DcnTPU', 'tpu', 'cross', False, True, 3, 64, False, True),
225
+ (
226
+ 'DlrmDcnV2TPUCTL',
227
+ 'tpu',
228
+ 'multi_layer_dcn',
229
+ True,
230
+ False,
231
+ 3,
232
+ 64,
233
+ True,
234
+ False,
235
+ ),
236
+ (
237
+ 'DlrmDcnV2TPU',
238
+ 'tpu',
239
+ 'multi_layer_dcn',
240
+ False,
241
+ False,
242
+ 3,
243
+ 64,
244
+ True,
245
+ False,
246
+ ),
247
+ ('DlrmMirroredCTL', 'Mirrored', 'dot', True, True, 3, 64, False, True),
248
+ ('DlrmMirrored', 'Mirrored', 'dot', False, True, 3, 64, False, True),
249
+ ('DcnMirroredCTL', 'Mirrored', 'cross', True, True, 3, 64, False, True),
250
+ ('DcnMirrored', 'Mirrored', 'cross', False, True, 3, 64, False, True),
210
251
  (
211
252
  'DlrmDcnV2MirroredCTL',
212
253
  'Mirrored',
@@ -215,7 +256,8 @@ class TrainTest(parameterized.TestCase, tf.test.TestCase):
215
256
  False,
216
257
  3,
217
258
  64,
218
- True
259
+ True,
260
+ False,
219
261
  ),
220
262
  (
221
263
  'DlrmDcnV2Mirrored',
@@ -225,7 +267,8 @@ class TrainTest(parameterized.TestCase, tf.test.TestCase):
225
267
  False,
226
268
  3,
227
269
  64,
228
- True
270
+ True,
271
+ False,
229
272
  ),
230
273
  )
231
274
  def testTrainThenEval(
@@ -237,6 +280,7 @@ class TrainTest(parameterized.TestCase, tf.test.TestCase):
237
280
  dcn_num_layers=3,
238
281
  dcn_low_rank_dim=64,
239
282
  use_multi_hot=False,
283
+ use_partial_tpu_embedding=True,
240
284
  ):
241
285
  # Set up simple trainer with synthetic data.
242
286
  vocab_sizes = [40, 12, 11, 13]
@@ -252,6 +296,7 @@ class TrainTest(parameterized.TestCase, tf.test.TestCase):
252
296
  dcn_num_layers=dcn_num_layers,
253
297
  dcn_low_rank_dim=dcn_low_rank_dim,
254
298
  use_multi_hot=use_multi_hot,
299
+ use_partial_tpu_embedding=use_partial_tpu_embedding,
255
300
  )
256
301
 
257
302
  default_mode = FLAGS.mode
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: tf-models-nightly
3
- Version: 2.17.0.dev20240707
3
+ Version: 2.17.0.dev20240709
4
4
  Summary: TensorFlow Official Models
5
5
  Home-page: https://github.com/tensorflow/models
6
6
  Author: Google Inc.
@@ -877,12 +877,12 @@ official/recommendation/popen_helper.py,sha256=TMWMwsW1DF15YCJ0RG9pE3wsL6njLu8Ed
877
877
  official/recommendation/stat_utils.py,sha256=BjWRO2jzmAJyqeRUw0hMHhQqlyDNrdNvQJKAmbDJ4Rc,3076
878
878
  official/recommendation/ranking/__init__.py,sha256=7oiypy0N82PDw9aSdcJBLVoGTd_oRSUOdvuJhMv4leQ,609
879
879
  official/recommendation/ranking/common.py,sha256=fUpBln1auJWwvX5BJIYJYxRQDzXPqCzbS6NYAcn0KaQ,3998
880
- official/recommendation/ranking/task.py,sha256=ovrPMW7uaLNGjUO7AFXjXpNUHN1MZQyLCq3M5bEXc2k,10850
880
+ official/recommendation/ranking/task.py,sha256=NLoU4ZrCzrxfqg3h-xk_S8kVN1ZedZ-PKjdULmwusQU,13908
881
881
  official/recommendation/ranking/task_test.py,sha256=vPN_5oq1tWF3r7GuRTuAppKSF_mP3qoGFzGRoX09ylw,2891
882
882
  official/recommendation/ranking/train.py,sha256=_7zC2SVsOOOBN--If0XlWw4gtdaEeTT04PbCQRSzieo,6604
883
- official/recommendation/ranking/train_test.py,sha256=VlB74V7nPl88TOjLMVqw1w_i-HBTNvvO3X7R8N9y5CA,7839
883
+ official/recommendation/ranking/train_test.py,sha256=n96JN-gI5wj4cbZNJCwQ9XLViKt0ZAg9f9dtaTpAdV8,8756
884
884
  official/recommendation/ranking/configs/__init__.py,sha256=7oiypy0N82PDw9aSdcJBLVoGTd_oRSUOdvuJhMv4leQ,609
885
- official/recommendation/ranking/configs/config.py,sha256=C6LHjDsH7cEGj--dHB3l8TBSPfmqmsspVnck4X1ogJ8,14278
885
+ official/recommendation/ranking/configs/config.py,sha256=03KupVFJ9QeslF81Ec7n4VyPn-bVvyZaEd10GQWomVA,14844
886
886
  official/recommendation/ranking/configs/config_test.py,sha256=4_W0YUwJ2q0D8HdPdIvpJcAsaySjb_pDMWj26G7T1Ws,1474
887
887
  official/recommendation/ranking/data/__init__.py,sha256=7oiypy0N82PDw9aSdcJBLVoGTd_oRSUOdvuJhMv4leQ,609
888
888
  official/recommendation/ranking/data/data_pipeline.py,sha256=5GSF1naOl6GdqOVin7_Yx_hprmzaq0WAImHqDxysAoI,7329
@@ -1212,9 +1212,9 @@ tensorflow_models/tensorflow_models_test.py,sha256=nc6A9K53OGqF25xN5St8EiWvdVbda
1212
1212
  tensorflow_models/nlp/__init__.py,sha256=4tA5Pf4qaFwT-fIFOpX7x7FHJpnyJT-5UgOeFYTyMlc,807
1213
1213
  tensorflow_models/uplift/__init__.py,sha256=mqfa55gweOdpKoaQyid4A_4u7xw__FcQeSIF0k_pYmI,999
1214
1214
  tensorflow_models/vision/__init__.py,sha256=zBorY_v5xva1uI-qxhZO3Qh-Dii-Suq6wEYh6hKHDfc,833
1215
- tf_models_nightly-2.17.0.dev20240707.dist-info/AUTHORS,sha256=1dG3fXVu9jlo7bul8xuix5F5vOnczMk7_yWn4y70uw0,337
1216
- tf_models_nightly-2.17.0.dev20240707.dist-info/LICENSE,sha256=WxeBS_DejPZQabxtfMOM_xn8qoZNJDQjrT7z2wG1I4U,11512
1217
- tf_models_nightly-2.17.0.dev20240707.dist-info/METADATA,sha256=VdAhQ49dDpv8r-ycZKtDDrbnnqKvxb3o1jVMmma2JUw,1432
1218
- tf_models_nightly-2.17.0.dev20240707.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
1219
- tf_models_nightly-2.17.0.dev20240707.dist-info/top_level.txt,sha256=gum2FfO5R4cvjl2-QtP-S1aNmsvIZaFFT6VFzU0f4-g,33
1220
- tf_models_nightly-2.17.0.dev20240707.dist-info/RECORD,,
1215
+ tf_models_nightly-2.17.0.dev20240709.dist-info/AUTHORS,sha256=1dG3fXVu9jlo7bul8xuix5F5vOnczMk7_yWn4y70uw0,337
1216
+ tf_models_nightly-2.17.0.dev20240709.dist-info/LICENSE,sha256=WxeBS_DejPZQabxtfMOM_xn8qoZNJDQjrT7z2wG1I4U,11512
1217
+ tf_models_nightly-2.17.0.dev20240709.dist-info/METADATA,sha256=8vfe5XTduG-zExj5iEl3JbJxCIa5_Qo0J7Fuc0TAD6U,1432
1218
+ tf_models_nightly-2.17.0.dev20240709.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
1219
+ tf_models_nightly-2.17.0.dev20240709.dist-info/top_level.txt,sha256=gum2FfO5R4cvjl2-QtP-S1aNmsvIZaFFT6VFzU0f4-g,33
1220
+ tf_models_nightly-2.17.0.dev20240709.dist-info/RECORD,,