tf-models-nightly 2.16.0.dev20240223__py2.py3-none-any.whl → 2.16.0.dev20240225__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- official/vision/ops/augment.py +2 -1
- official/vision/ops/augment_test.py +8 -4
- official/vision/serving/export_base.py +28 -44
- official/vision/serving/image_classification.py +35 -0
- official/vision/serving/image_classification_test.py +80 -20
- {tf_models_nightly-2.16.0.dev20240223.dist-info → tf_models_nightly-2.16.0.dev20240225.dist-info}/METADATA +1 -1
- {tf_models_nightly-2.16.0.dev20240223.dist-info → tf_models_nightly-2.16.0.dev20240225.dist-info}/RECORD +11 -11
- {tf_models_nightly-2.16.0.dev20240223.dist-info → tf_models_nightly-2.16.0.dev20240225.dist-info}/AUTHORS +0 -0
- {tf_models_nightly-2.16.0.dev20240223.dist-info → tf_models_nightly-2.16.0.dev20240225.dist-info}/LICENSE +0 -0
- {tf_models_nightly-2.16.0.dev20240223.dist-info → tf_models_nightly-2.16.0.dev20240225.dist-info}/WHEEL +0 -0
- {tf_models_nightly-2.16.0.dev20240223.dist-info → tf_models_nightly-2.16.0.dev20240225.dist-info}/top_level.txt +0 -0
official/vision/ops/augment.py
CHANGED
@@ -665,6 +665,7 @@ def _fill_rectangle_video(image,
|
|
665
665
|
image_time = tf.shape(image)[0]
|
666
666
|
image_height = tf.shape(image)[1]
|
667
667
|
image_width = tf.shape(image)[2]
|
668
|
+
image_channels = tf.shape(image)[3]
|
668
669
|
|
669
670
|
lower_pad = tf.maximum(0, center_height - half_height)
|
670
671
|
upper_pad = tf.maximum(0, image_height - center_height - half_height)
|
@@ -681,7 +682,7 @@ def _fill_rectangle_video(image,
|
|
681
682
|
padding_dims,
|
682
683
|
constant_values=1)
|
683
684
|
mask = tf.expand_dims(mask, -1)
|
684
|
-
mask = tf.tile(mask, [1, 1, 1,
|
685
|
+
mask = tf.tile(mask, [1, 1, 1, image_channels])
|
685
686
|
|
686
687
|
if replace is None:
|
687
688
|
fill = tf.random.normal(tf.shape(image), dtype=image.dtype)
|
@@ -479,12 +479,14 @@ class MixupAndCutmixTest(tf.test.TestCase, parameterized.TestCase):
|
|
479
479
|
self.assertAllGreaterEqual(aug_labels, label_smoothing / num_classes -
|
480
480
|
1e4) # With tolerance
|
481
481
|
|
482
|
-
|
482
|
+
@parameterized.product(num_channels=[3, 4])
|
483
|
+
def test_mixup_changes_video(self, num_channels: int):
|
483
484
|
batch_size = 12
|
484
485
|
num_classes = 1000
|
485
486
|
label_smoothing = 0.1
|
486
487
|
|
487
|
-
images = tf.random.normal(
|
488
|
+
images = tf.random.normal(
|
489
|
+
(batch_size, 8, 224, 224, num_channels), dtype=tf.float32)
|
488
490
|
labels = tf.range(batch_size)
|
489
491
|
augmenter = augment.MixupAndCutmix(
|
490
492
|
mixup_alpha=1., cutmix_alpha=0., num_classes=num_classes)
|
@@ -500,12 +502,14 @@ class MixupAndCutmixTest(tf.test.TestCase, parameterized.TestCase):
|
|
500
502
|
1e4) # With tolerance
|
501
503
|
self.assertFalse(tf.math.reduce_all(images == aug_images))
|
502
504
|
|
503
|
-
|
505
|
+
@parameterized.product(num_channels=[3, 4])
|
506
|
+
def test_cutmix_changes_video(self, num_channels: int):
|
504
507
|
batch_size = 12
|
505
508
|
num_classes = 1000
|
506
509
|
label_smoothing = 0.1
|
507
510
|
|
508
|
-
images = tf.random.normal(
|
511
|
+
images = tf.random.normal(
|
512
|
+
(batch_size, 8, 224, 224, num_channels), dtype=tf.float32)
|
509
513
|
labels = tf.range(batch_size)
|
510
514
|
augmenter = augment.MixupAndCutmix(
|
511
515
|
mixup_alpha=0., cutmix_alpha=1., num_classes=num_classes)
|
@@ -125,28 +125,20 @@ class ExportModule(export_base.ExportModule, metaclass=abc.ABCMeta):
|
|
125
125
|
@tf.function
|
126
126
|
def inference_from_image_bytes(self, inputs: tf.Tensor):
|
127
127
|
with tf.device('cpu:0'):
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
images = tf.stack(images)
|
143
|
-
else:
|
144
|
-
images = []
|
145
|
-
# Need to use for loop instead of enumerate as enumerate is not well
|
146
|
-
# supported in the exported savedmodel.
|
147
|
-
for i in range(len(inputs)): # pylint: disable=range-len-with-index-access
|
148
|
-
images.append(tf.cast(self._decode_image(inputs[i]), tf.uint8))
|
149
|
-
images = tf.ragged.stack(images)
|
128
|
+
images = tf.nest.map_structure(
|
129
|
+
tf.identity,
|
130
|
+
tf.map_fn(
|
131
|
+
self._decode_image,
|
132
|
+
elems=inputs,
|
133
|
+
fn_output_signature=tf.TensorSpec(
|
134
|
+
shape=[None] * len(self._input_image_size)
|
135
|
+
+ [self._num_channels],
|
136
|
+
dtype=tf.uint8,
|
137
|
+
),
|
138
|
+
parallel_iterations=32,
|
139
|
+
),
|
140
|
+
)
|
141
|
+
images = tf.stack(images)
|
150
142
|
return self.serve(images)
|
151
143
|
|
152
144
|
@tf.function
|
@@ -154,28 +146,20 @@ class ExportModule(export_base.ExportModule, metaclass=abc.ABCMeta):
|
|
154
146
|
self, inputs: tf.Tensor
|
155
147
|
) -> Mapping[str, tf.Tensor]:
|
156
148
|
with tf.device('cpu:0'):
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
images = tf.stack(images)
|
172
|
-
else:
|
173
|
-
images = []
|
174
|
-
# Need to use for loop instead of enumerate as enumerate is not well
|
175
|
-
# supported in the exported savedmodel.
|
176
|
-
for i in range(len(inputs)): # pylint: disable=range-len-with-index-access
|
177
|
-
images.append(tf.cast(self._decode_tf_example(inputs[i]), tf.uint8))
|
178
|
-
images = tf.ragged.stack(images)
|
149
|
+
images = tf.nest.map_structure(
|
150
|
+
tf.identity,
|
151
|
+
tf.map_fn(
|
152
|
+
self._decode_tf_example,
|
153
|
+
elems=inputs,
|
154
|
+
fn_output_signature=tf.TensorSpec(
|
155
|
+
shape=[None] * len(self._input_image_size)
|
156
|
+
+ [self._num_channels],
|
157
|
+
dtype=tf.uint8,
|
158
|
+
),
|
159
|
+
parallel_iterations=32,
|
160
|
+
),
|
161
|
+
)
|
162
|
+
images = tf.stack(images)
|
179
163
|
return self.serve(images)
|
180
164
|
|
181
165
|
def get_inference_signatures(self, function_keys: Dict[Text, Text]):
|
@@ -53,6 +53,41 @@ class ClassificationModule(export_base.ExportModule):
|
|
53
53
|
image, offset=preprocess_ops.MEAN_RGB, scale=preprocess_ops.STDDEV_RGB)
|
54
54
|
return image
|
55
55
|
|
56
|
+
def _decode_image(self, encoded_image_bytes: str) -> tf.Tensor:
|
57
|
+
"""Decodes an image bytes to an image tensor.
|
58
|
+
|
59
|
+
Use `tf.image.decode_image` to decode an image if input is expected to be 2D
|
60
|
+
image; otherwise use `tf.io.decode_raw` to convert the raw bytes to tensor
|
61
|
+
and reshape it to desire shape.
|
62
|
+
|
63
|
+
Args:
|
64
|
+
encoded_image_bytes: An encoded image string to be decoded.
|
65
|
+
|
66
|
+
Returns:
|
67
|
+
A decoded image tensor.
|
68
|
+
"""
|
69
|
+
if len(self._input_image_size) == 2:
|
70
|
+
# Decode an image if 2D input is expected.
|
71
|
+
image_tensor = tf.image.decode_image(
|
72
|
+
encoded_image_bytes, channels=self._num_channels
|
73
|
+
)
|
74
|
+
image_tensor.set_shape((None, None, self._num_channels))
|
75
|
+
# Resize image to input_size to support varible image resolutions in a
|
76
|
+
# batch for tf_example and image_bytes input type.
|
77
|
+
image_tensor = tf.image.resize(
|
78
|
+
tf.cast(image_tensor, tf.float32),
|
79
|
+
self._input_image_size,
|
80
|
+
method=tf.image.ResizeMethod.BILINEAR,
|
81
|
+
)
|
82
|
+
image_tensor = tf.cast(image_tensor, tf.uint8)
|
83
|
+
else:
|
84
|
+
# Convert raw bytes into a tensor and reshape it, if not 2D input.
|
85
|
+
image_tensor = tf.io.decode_raw(encoded_image_bytes, out_type=tf.uint8)
|
86
|
+
image_tensor = tf.reshape(
|
87
|
+
image_tensor, self._input_image_size + [self._num_channels]
|
88
|
+
)
|
89
|
+
return image_tensor
|
90
|
+
|
56
91
|
def serve(self, images):
|
57
92
|
"""Cast image to float and run inference.
|
58
93
|
|
@@ -29,46 +29,50 @@ from official.vision.serving import image_classification
|
|
29
29
|
|
30
30
|
class ImageClassificationExportTest(tf.test.TestCase, parameterized.TestCase):
|
31
31
|
|
32
|
-
def _get_classification_module(self, input_type):
|
32
|
+
def _get_classification_module(self, input_type, batch_size=1):
|
33
33
|
params = exp_factory.get_exp_config('resnet_imagenet')
|
34
34
|
params.task.model.backbone.resnet.model_id = 18
|
35
35
|
classification_module = image_classification.ClassificationModule(
|
36
36
|
params,
|
37
|
-
batch_size=
|
37
|
+
batch_size=batch_size,
|
38
38
|
input_image_size=[224, 224],
|
39
|
-
input_type=input_type
|
39
|
+
input_type=input_type,
|
40
|
+
)
|
40
41
|
return classification_module
|
41
42
|
|
42
43
|
def _export_from_module(self, module, input_type, save_directory):
|
43
44
|
signatures = module.get_inference_signatures(
|
44
|
-
{input_type: 'serving_default'}
|
45
|
-
|
46
|
-
|
47
|
-
signatures=signatures)
|
45
|
+
{input_type: 'serving_default'}
|
46
|
+
)
|
47
|
+
tf.saved_model.save(module, save_directory, signatures=signatures)
|
48
48
|
|
49
|
-
def _get_dummy_input(self, input_type):
|
49
|
+
def _get_dummy_input(self, input_type, image_size=224):
|
50
50
|
"""Get dummy input for the given input type."""
|
51
51
|
|
52
52
|
if input_type == 'image_tensor':
|
53
|
-
return tf.zeros((1,
|
53
|
+
return tf.zeros((1, image_size, image_size, 3), dtype=np.uint8)
|
54
54
|
elif input_type == 'image_bytes':
|
55
|
-
image = Image.fromarray(
|
55
|
+
image = Image.fromarray(
|
56
|
+
np.zeros((image_size, image_size, 3), dtype=np.uint8)
|
57
|
+
)
|
56
58
|
byte_io = io.BytesIO()
|
57
59
|
image.save(byte_io, 'PNG')
|
58
60
|
return [byte_io.getvalue()]
|
59
61
|
elif input_type == 'tf_example':
|
60
|
-
image_tensor = tf.zeros((
|
62
|
+
image_tensor = tf.zeros((image_size, image_size, 3), dtype=tf.uint8)
|
61
63
|
encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).numpy()
|
62
64
|
example = tf.train.Example(
|
63
65
|
features=tf.train.Features(
|
64
66
|
feature={
|
65
|
-
'image/encoded':
|
66
|
-
tf.train.
|
67
|
-
|
68
|
-
}
|
67
|
+
'image/encoded': tf.train.Feature(
|
68
|
+
bytes_list=tf.train.BytesList(value=[encoded_jpeg])
|
69
|
+
),
|
70
|
+
}
|
71
|
+
)
|
72
|
+
).SerializeToString()
|
69
73
|
return [example]
|
70
74
|
elif input_type == 'tflite':
|
71
|
-
return tf.zeros((1,
|
75
|
+
return tf.zeros((1, image_size, image_size, 3), dtype=np.float32)
|
72
76
|
|
73
77
|
@parameterized.parameters(
|
74
78
|
{'input_type': 'image_tensor'},
|
@@ -86,10 +90,14 @@ class ImageClassificationExportTest(tf.test.TestCase, parameterized.TestCase):
|
|
86
90
|
self._export_from_module(module, input_type, tmp_dir)
|
87
91
|
|
88
92
|
self.assertTrue(os.path.exists(os.path.join(tmp_dir, 'saved_model.pb')))
|
89
|
-
self.assertTrue(
|
90
|
-
os.path.join(tmp_dir, 'variables', 'variables.index'))
|
91
|
-
|
92
|
-
|
93
|
+
self.assertTrue(
|
94
|
+
os.path.exists(os.path.join(tmp_dir, 'variables', 'variables.index'))
|
95
|
+
)
|
96
|
+
self.assertTrue(
|
97
|
+
os.path.exists(
|
98
|
+
os.path.join(tmp_dir, 'variables', 'variables.data-00000-of-00001')
|
99
|
+
)
|
100
|
+
)
|
93
101
|
|
94
102
|
imported = tf.saved_model.load(tmp_dir)
|
95
103
|
classification_fn = imported.signatures['serving_default']
|
@@ -101,6 +109,58 @@ class ImageClassificationExportTest(tf.test.TestCase, parameterized.TestCase):
|
|
101
109
|
tf.map_fn(
|
102
110
|
module._build_inputs,
|
103
111
|
elems=tf.zeros((1, 224, 224, 3), dtype=tf.uint8),
|
112
|
+
fn_output_signature=tf.TensorSpec(
|
113
|
+
shape=[224, 224, 3], dtype=tf.float32
|
114
|
+
),
|
115
|
+
),
|
116
|
+
)
|
117
|
+
else:
|
118
|
+
processed_images = images
|
119
|
+
expected_logits = module.model(processed_images, training=False)
|
120
|
+
expected_prob = tf.nn.softmax(expected_logits)
|
121
|
+
out = classification_fn(tf.constant(images))
|
122
|
+
|
123
|
+
# The imported model should contain any trackable attrs that the original
|
124
|
+
# model had.
|
125
|
+
self.assertTrue(hasattr(imported.model, 'test_trackable'))
|
126
|
+
self.assertAllClose(out['logits'].numpy(), expected_logits.numpy())
|
127
|
+
self.assertAllClose(out['probs'].numpy(), expected_prob.numpy())
|
128
|
+
|
129
|
+
@parameterized.parameters(
|
130
|
+
{'input_type': 'image_bytes'},
|
131
|
+
{'input_type': 'tf_example'},
|
132
|
+
)
|
133
|
+
def test_multi_size_images_inference(self, input_type='image_tensor'):
|
134
|
+
tmp_dir = self.get_temp_dir()
|
135
|
+
module = self._get_classification_module(input_type, batch_size=None)
|
136
|
+
# Test that the model restores any attrs that are trackable objects
|
137
|
+
# (eg: tables, resource variables, keras models/layers, tf.hub modules).
|
138
|
+
module.model.test_trackable = tf_keras.layers.InputLayer(input_shape=(4,))
|
139
|
+
|
140
|
+
self._export_from_module(module, input_type, tmp_dir)
|
141
|
+
|
142
|
+
self.assertTrue(os.path.exists(os.path.join(tmp_dir, 'saved_model.pb')))
|
143
|
+
self.assertTrue(
|
144
|
+
os.path.exists(os.path.join(tmp_dir, 'variables', 'variables.index'))
|
145
|
+
)
|
146
|
+
self.assertTrue(
|
147
|
+
os.path.exists(
|
148
|
+
os.path.join(tmp_dir, 'variables', 'variables.data-00000-of-00001')
|
149
|
+
)
|
150
|
+
)
|
151
|
+
|
152
|
+
imported = tf.saved_model.load(tmp_dir)
|
153
|
+
classification_fn = imported.signatures['serving_default']
|
154
|
+
|
155
|
+
images = self._get_dummy_input(input_type, image_size=224)
|
156
|
+
images += self._get_dummy_input(input_type, image_size=196)
|
157
|
+
|
158
|
+
if input_type != 'tflite':
|
159
|
+
processed_images = tf.nest.map_structure(
|
160
|
+
tf.stop_gradient,
|
161
|
+
tf.map_fn(
|
162
|
+
module._build_inputs,
|
163
|
+
elems=tf.zeros((2, 224, 224, 3), dtype=tf.uint8),
|
104
164
|
fn_output_signature=tf.TensorSpec(
|
105
165
|
shape=[224, 224, 3], dtype=tf.float32)))
|
106
166
|
else:
|
@@ -1060,8 +1060,8 @@ official/vision/ops/anchor.py,sha256=ruGKslsQid6A0yfiJuG6sf_JEbJuHHpWL77D9s3SNx4
|
|
1060
1060
|
official/vision/ops/anchor_generator.py,sha256=293RH42qBk8zMtfiJovtBJ6Hn9yi4Ln-Az-sGXNAOR0,7274
|
1061
1061
|
official/vision/ops/anchor_generator_test.py,sha256=grVbHuxlu1W7xbjMErV3q0ARxCesHN6q-7LvLcCi-_4,5296
|
1062
1062
|
official/vision/ops/anchor_test.py,sha256=OQPiWHNUkQi3KaMWzOYd9HuqJa9vQ6Za4_6tf79X0qY,7633
|
1063
|
-
official/vision/ops/augment.py,sha256=
|
1064
|
-
official/vision/ops/augment_test.py,sha256=
|
1063
|
+
official/vision/ops/augment.py,sha256=20K-OWV_GLbpzv8sY5rBe6x3H5MIRbTlDWOPTfNlGeY,108290
|
1064
|
+
official/vision/ops/augment_test.py,sha256=9QISeqJ79JgkI2-Hn4pMxPRRin3mbOd7mSJ4v7S8W6E,25881
|
1065
1065
|
official/vision/ops/box_matcher.py,sha256=AvZd7CUUZnT4FwETLyVz3Uxb3gO-o94OwlZrvm7CtX0,9067
|
1066
1066
|
official/vision/ops/box_matcher_test.py,sha256=YdxaTITaUyKBA27Pss5MZKF2ibBfSu879222c7lKngU,2438
|
1067
1067
|
official/vision/ops/box_ops.py,sha256=FRKLYzVYd3zFqEYX6Ql4Pr39csvR2Vc-OQv-WxhegFM,34618
|
@@ -1081,7 +1081,7 @@ official/vision/ops/target_gather_test.py,sha256=yiTZg7w4HYh19pw9yIDppvenscp8MSB
|
|
1081
1081
|
official/vision/serving/__init__.py,sha256=YlRnCfAvBWlB7gn-Dz32dxVIDBCzxH8PhdM14kHxPgs,702
|
1082
1082
|
official/vision/serving/detection.py,sha256=uZY4m3_KqkW5dLtFroN3TQuHPUQUXhP4iY9CfpW7UwY,10765
|
1083
1083
|
official/vision/serving/detection_test.py,sha256=s3ueCplPII8Am7tPSWcUv9KUcAqh1AWb6X91_B6qZIM,9165
|
1084
|
-
official/vision/serving/export_base.py,sha256=
|
1084
|
+
official/vision/serving/export_base.py,sha256=ah8Cuh_cxpptCpRAjptvA00a-yWgd-KokLk3UBLdVt4,7363
|
1085
1085
|
official/vision/serving/export_base_v2.py,sha256=GYIqt-xaOv4UztAKjx-acD-9i2pjftMw46DWRMy7Bsk,2741
|
1086
1086
|
official/vision/serving/export_base_v2_test.py,sha256=khuY14W9Oi8LGlv_CvMwFiNnFbgpPVzvvD_hugf1_lk,2880
|
1087
1087
|
official/vision/serving/export_module_factory.py,sha256=TRnqqLkGPVhfDFzsUC6dQIfjGA3Ctv7vSLiHkyOc6fg,3549
|
@@ -1095,8 +1095,8 @@ official/vision/serving/export_tfhub_lib.py,sha256=BZC4XSbly0DLDSlWnphjWaD0Q6SOt
|
|
1095
1095
|
official/vision/serving/export_tflite.py,sha256=OhIVHrcDvB83p4WroheWcTmeYArMV9TJcgUVJWytWIc,5115
|
1096
1096
|
official/vision/serving/export_tflite_lib.py,sha256=2AWkyEsLvMBE19m2WAa0IpyFsHV8sIR4Gvcv5ZQWbmg,7272
|
1097
1097
|
official/vision/serving/export_utils.py,sha256=8mJb1MF_6kk3lbqZOZq2Lwu4A2L1KWxjnWnV_ZpYlVI,4881
|
1098
|
-
official/vision/serving/image_classification.py,sha256=
|
1099
|
-
official/vision/serving/image_classification_test.py,sha256=
|
1098
|
+
official/vision/serving/image_classification.py,sha256=Pl2TG7Kjq3-DXh-MywjXQ1MvUmQQLWIer6NgA66iAMM,4300
|
1099
|
+
official/vision/serving/image_classification_test.py,sha256=qZmuiQewptSQdY2iQEkx8rHjULybgADuXsQ84SjLCok,6759
|
1100
1100
|
official/vision/serving/semantic_segmentation.py,sha256=1RmEOpYz-HjB-VKa3k9p3gZj3h_3ob5d-3RO9_cXfH0,4160
|
1101
1101
|
official/vision/serving/semantic_segmentation_test.py,sha256=QVx12ciMA6T20d3wuKu8pe6ekEyPj8aNbKbRKxOgRik,5638
|
1102
1102
|
official/vision/serving/video_classification.py,sha256=s37SdFoASmX0b3MubTpMdebPOvegx0Nj7yFogu5rYXE,6884
|
@@ -1158,9 +1158,9 @@ tensorflow_models/__init__.py,sha256=etxw45SHxuwFCRX5qGxGMP83II0JfJulzNl5GSNJvhw
|
|
1158
1158
|
tensorflow_models/tensorflow_models_test.py,sha256=AxUYUdiQn416UR7jg0h6rmv688esvlKDfpyDCIQkF18,1395
|
1159
1159
|
tensorflow_models/nlp/__init__.py,sha256=4tA5Pf4qaFwT-fIFOpX7x7FHJpnyJT-5UgOeFYTyMlc,807
|
1160
1160
|
tensorflow_models/vision/__init__.py,sha256=zBorY_v5xva1uI-qxhZO3Qh-Dii-Suq6wEYh6hKHDfc,833
|
1161
|
-
tf_models_nightly-2.16.0.
|
1162
|
-
tf_models_nightly-2.16.0.
|
1163
|
-
tf_models_nightly-2.16.0.
|
1164
|
-
tf_models_nightly-2.16.0.
|
1165
|
-
tf_models_nightly-2.16.0.
|
1166
|
-
tf_models_nightly-2.16.0.
|
1161
|
+
tf_models_nightly-2.16.0.dev20240225.dist-info/AUTHORS,sha256=1dG3fXVu9jlo7bul8xuix5F5vOnczMk7_yWn4y70uw0,337
|
1162
|
+
tf_models_nightly-2.16.0.dev20240225.dist-info/LICENSE,sha256=WxeBS_DejPZQabxtfMOM_xn8qoZNJDQjrT7z2wG1I4U,11512
|
1163
|
+
tf_models_nightly-2.16.0.dev20240225.dist-info/METADATA,sha256=G4ySKJnIDr-Te1AAJO0ABym3d7WfOagwR4n0y-GMAJI,1432
|
1164
|
+
tf_models_nightly-2.16.0.dev20240225.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
|
1165
|
+
tf_models_nightly-2.16.0.dev20240225.dist-info/top_level.txt,sha256=gum2FfO5R4cvjl2-QtP-S1aNmsvIZaFFT6VFzU0f4-g,33
|
1166
|
+
tf_models_nightly-2.16.0.dev20240225.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|