tf-models-nightly 2.18.0.dev20240905__py2.py3-none-any.whl → 2.18.0.dev20240906__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- official/nlp/modeling/networks/mobile_bert_encoder.py +0 -30
- official/vision/modeling/backbones/mobilenet.py +72 -0
- official/vision/modeling/backbones/mobilenet_test.py +11 -0
- official/vision/serving/semantic_segmentation.py +5 -2
- official/vision/serving/semantic_segmentation_test.py +70 -6
- {tf_models_nightly-2.18.0.dev20240905.dist-info → tf_models_nightly-2.18.0.dev20240906.dist-info}/METADATA +1 -1
- {tf_models_nightly-2.18.0.dev20240905.dist-info → tf_models_nightly-2.18.0.dev20240906.dist-info}/RECORD +11 -11
- {tf_models_nightly-2.18.0.dev20240905.dist-info → tf_models_nightly-2.18.0.dev20240906.dist-info}/AUTHORS +0 -0
- {tf_models_nightly-2.18.0.dev20240905.dist-info → tf_models_nightly-2.18.0.dev20240906.dist-info}/LICENSE +0 -0
- {tf_models_nightly-2.18.0.dev20240905.dist-info → tf_models_nightly-2.18.0.dev20240906.dist-info}/WHEEL +0 -0
- {tf_models_nightly-2.18.0.dev20240905.dist-info → tf_models_nightly-2.18.0.dev20240906.dist-info}/top_level.txt +0 -0
@@ -164,36 +164,6 @@ class MobileBERTEncoder(tf_keras.Model):
|
|
164
164
|
attention_scores=all_attention_scores)
|
165
165
|
super().__init__(
|
166
166
|
inputs=self.inputs, outputs=outputs, **kwargs)
|
167
|
-
self._config = dict(
|
168
|
-
name=self.name,
|
169
|
-
word_vocab_size=word_vocab_size,
|
170
|
-
word_embed_size=word_embed_size,
|
171
|
-
type_vocab_size=type_vocab_size,
|
172
|
-
max_sequence_length=max_sequence_length,
|
173
|
-
num_blocks=num_blocks,
|
174
|
-
hidden_size=hidden_size,
|
175
|
-
num_attention_heads=num_attention_heads,
|
176
|
-
intermediate_size=intermediate_size,
|
177
|
-
intermediate_act_fn=intermediate_act_fn,
|
178
|
-
hidden_dropout_prob=hidden_dropout_prob,
|
179
|
-
attention_probs_dropout_prob=attention_probs_dropout_prob,
|
180
|
-
intra_bottleneck_size=intra_bottleneck_size,
|
181
|
-
initializer_range=initializer_range,
|
182
|
-
use_bottleneck_attention=use_bottleneck_attention,
|
183
|
-
key_query_shared_bottleneck=key_query_shared_bottleneck,
|
184
|
-
num_feedforward_networks=num_feedforward_networks,
|
185
|
-
normalization_type=normalization_type,
|
186
|
-
classifier_activation=classifier_activation,
|
187
|
-
input_mask_dtype=input_mask_dtype,
|
188
|
-
**kwargs,
|
189
|
-
)
|
190
|
-
|
191
|
-
def get_config(self):
|
192
|
-
return dict(self._config)
|
193
|
-
|
194
|
-
@classmethod
|
195
|
-
def from_config(cls, config):
|
196
|
-
return cls(**config)
|
197
167
|
|
198
168
|
def get_embedding_table(self):
|
199
169
|
return self.embedding_layer.word_embedding.embeddings
|
@@ -766,6 +766,77 @@ def _mnv4_conv_medium_block_specs():
|
|
766
766
|
}
|
767
767
|
|
768
768
|
|
769
|
+
def _mnv4_conv_medium_seg_block_specs():
|
770
|
+
"""Tailored MobileNetV4ConvMedium for dense prediction, e.g. segmentation."""
|
771
|
+
|
772
|
+
def convbn(kernel_size, strides, filters, output=False):
|
773
|
+
return BlockSpec(
|
774
|
+
block_fn='convbn',
|
775
|
+
kernel_size=kernel_size,
|
776
|
+
filters=filters,
|
777
|
+
strides=strides,
|
778
|
+
is_output=output,
|
779
|
+
)
|
780
|
+
|
781
|
+
def fused_ib(kernel_size, strides, filters, output=False):
|
782
|
+
return BlockSpec(
|
783
|
+
block_fn='fused_ib',
|
784
|
+
kernel_size=kernel_size,
|
785
|
+
filters=filters,
|
786
|
+
strides=strides,
|
787
|
+
expand_ratio=4.0,
|
788
|
+
is_output=output,
|
789
|
+
)
|
790
|
+
|
791
|
+
def uib(
|
792
|
+
start_dw_ks, middle_dw_ks, strides, filters, expand_ratio, output=False
|
793
|
+
):
|
794
|
+
return BlockSpec(
|
795
|
+
block_fn='uib',
|
796
|
+
start_dw_kernel_size=start_dw_ks,
|
797
|
+
middle_dw_kernel_size=middle_dw_ks,
|
798
|
+
filters=filters,
|
799
|
+
strides=strides,
|
800
|
+
expand_ratio=expand_ratio,
|
801
|
+
use_layer_scale=False,
|
802
|
+
is_output=output,
|
803
|
+
)
|
804
|
+
|
805
|
+
blocks = [
|
806
|
+
convbn(3, 2, 32),
|
807
|
+
fused_ib(3, 2, 48, output=True),
|
808
|
+
# 3rd stage
|
809
|
+
uib(3, 5, 2, 80, 4.0),
|
810
|
+
uib(3, 3, 1, 80, 2.0, output=True),
|
811
|
+
# 4th stage
|
812
|
+
uib(3, 5, 2, 160, 6.0),
|
813
|
+
uib(3, 3, 1, 160, 4.0),
|
814
|
+
uib(3, 3, 1, 160, 4.0),
|
815
|
+
uib(3, 5, 1, 160, 4.0),
|
816
|
+
uib(3, 3, 1, 160, 4.0),
|
817
|
+
uib(3, 0, 1, 160, 4.0),
|
818
|
+
uib(3, 0, 1, 160, 4.0, output=True),
|
819
|
+
# 5th stage
|
820
|
+
uib(5, 5, 2, 256, 6.0),
|
821
|
+
uib(5, 5, 1, 128, 4.0),
|
822
|
+
uib(3, 5, 1, 128, 4.0),
|
823
|
+
uib(3, 5, 1, 128, 4.0),
|
824
|
+
uib(3, 0, 1, 128, 4.0),
|
825
|
+
uib(3, 5, 1, 128, 2.0),
|
826
|
+
uib(5, 5, 1, 128, 4.0),
|
827
|
+
uib(5, 0, 1, 128, 2.0, output=False),
|
828
|
+
# FC layers
|
829
|
+
convbn(1, 1, 448, output=True),
|
830
|
+
BlockSpec(block_fn='gpooling', is_output=False),
|
831
|
+
convbn(1, 1, 1280),
|
832
|
+
]
|
833
|
+
return {
|
834
|
+
'spec_name': 'MobileNetV4ConvMediumSeg',
|
835
|
+
'block_spec_schema': block_spec_field_list(),
|
836
|
+
'block_specs': block_spec_values_to_list(blocks),
|
837
|
+
}
|
838
|
+
|
839
|
+
|
769
840
|
MNV4ConvLarge_BLOCK_SPECS = {
|
770
841
|
'spec_name': 'MobileNetV4ConvLarge',
|
771
842
|
'block_spec_schema': [
|
@@ -1077,6 +1148,7 @@ SUPPORTED_SPECS_MAP = {
|
|
1077
1148
|
'MobileNetV4ConvLarge': MNV4ConvLarge_BLOCK_SPECS,
|
1078
1149
|
'MobileNetV4HybridMedium': _mnv4_hybrid_medium_block_specs(),
|
1079
1150
|
'MobileNetV4HybridLarge': _mnv4_hybrid_large_block_specs(),
|
1151
|
+
'MobileNetV4ConvMediumSeg': _mnv4_conv_medium_seg_block_specs(),
|
1080
1152
|
}
|
1081
1153
|
|
1082
1154
|
|
@@ -43,6 +43,7 @@ class MobileNetTest(parameterized.TestCase, tf.test.TestCase):
|
|
43
43
|
'MobileNetV4ConvLarge',
|
44
44
|
'MobileNetV4HybridMedium',
|
45
45
|
'MobileNetV4HybridLarge',
|
46
|
+
'MobileNetV4ConvMediumSeg',
|
46
47
|
)
|
47
48
|
def test_serialize_deserialize(self, model_id):
|
48
49
|
# Create a network object that sets all of its config options.
|
@@ -96,6 +97,7 @@ class MobileNetTest(parameterized.TestCase, tf.test.TestCase):
|
|
96
97
|
'MobileNetV4ConvLarge',
|
97
98
|
'MobileNetV4HybridMedium',
|
98
99
|
'MobileNetV4HybridLarge',
|
100
|
+
'MobileNetV4ConvMediumSeg',
|
99
101
|
],
|
100
102
|
)
|
101
103
|
)
|
@@ -126,6 +128,7 @@ class MobileNetTest(parameterized.TestCase, tf.test.TestCase):
|
|
126
128
|
'MobileNetV4ConvLarge',
|
127
129
|
'MobileNetV4HybridMedium',
|
128
130
|
'MobileNetV4HybridLarge',
|
131
|
+
'MobileNetV4ConvMediumSeg',
|
129
132
|
],
|
130
133
|
[32, 224],
|
131
134
|
)
|
@@ -153,6 +156,7 @@ class MobileNetTest(parameterized.TestCase, tf.test.TestCase):
|
|
153
156
|
'MobileNetV4ConvLarge': [48, 96, 192, 512],
|
154
157
|
'MobileNetV4HybridMedium': [48, 80, 160, 256],
|
155
158
|
'MobileNetV4HybridLarge': [48, 96, 192, 512],
|
159
|
+
'MobileNetV4ConvMediumSeg': [48, 80, 160, 448],
|
156
160
|
}
|
157
161
|
|
158
162
|
network = mobilenet.MobileNet(model_id=model_id,
|
@@ -184,6 +188,7 @@ class MobileNetTest(parameterized.TestCase, tf.test.TestCase):
|
|
184
188
|
'MobileNetV4ConvLarge',
|
185
189
|
'MobileNetV4HybridMedium',
|
186
190
|
'MobileNetV4HybridLarge',
|
191
|
+
'MobileNetV4ConvMediumSeg',
|
187
192
|
],
|
188
193
|
[32, 224],
|
189
194
|
)
|
@@ -211,6 +216,7 @@ class MobileNetTest(parameterized.TestCase, tf.test.TestCase):
|
|
211
216
|
'MobileNetV4ConvLarge': [None, None, None, None],
|
212
217
|
'MobileNetV4HybridMedium': [None, None, None, None],
|
213
218
|
'MobileNetV4HybridLarge': [None, None, None, None],
|
219
|
+
'MobileNetV4ConvMediumSeg': [None, None, None, None],
|
214
220
|
}
|
215
221
|
network = mobilenet.MobileNet(model_id=model_id,
|
216
222
|
filter_size_scale=1.0,
|
@@ -247,6 +253,7 @@ class MobileNetTest(parameterized.TestCase, tf.test.TestCase):
|
|
247
253
|
'MobileNetV4ConvLarge',
|
248
254
|
'MobileNetV4HybridMedium',
|
249
255
|
'MobileNetV4HybridLarge',
|
256
|
+
'MobileNetV4ConvMediumSeg',
|
250
257
|
],
|
251
258
|
[1.0, 0.75],
|
252
259
|
)
|
@@ -285,6 +292,8 @@ class MobileNetTest(parameterized.TestCase, tf.test.TestCase):
|
|
285
292
|
('MobileNetV4HybridMedium', 0.75): 6072584,
|
286
293
|
('MobileNetV4HybridLarge', 1.0): 36648024,
|
287
294
|
('MobileNetV4HybridLarge', 0.75): 21598064,
|
295
|
+
('MobileNetV4ConvMediumSeg', 1.0): 3787024,
|
296
|
+
('MobileNetV4ConvMediumSeg', 0.75): 2302536,
|
288
297
|
}
|
289
298
|
|
290
299
|
input_size = 224
|
@@ -314,6 +323,7 @@ class MobileNetTest(parameterized.TestCase, tf.test.TestCase):
|
|
314
323
|
'MobileNetV4ConvLarge',
|
315
324
|
'MobileNetV4HybridMedium',
|
316
325
|
'MobileNetV4HybridLarge',
|
326
|
+
'MobileNetV4ConvMediumSeg',
|
317
327
|
],
|
318
328
|
[8, 16, 32],
|
319
329
|
)
|
@@ -340,6 +350,7 @@ class MobileNetTest(parameterized.TestCase, tf.test.TestCase):
|
|
340
350
|
'MobileNetV4ConvLarge': 512,
|
341
351
|
'MobileNetV4HybridMedium': 256,
|
342
352
|
'MobileNetV4HybridLarge': 512,
|
353
|
+
'MobileNetV4ConvMediumSeg': 448,
|
343
354
|
}
|
344
355
|
|
345
356
|
network = mobilenet.MobileNet(
|
@@ -26,7 +26,8 @@ class SegmentationModule(export_base.ExportModule):
|
|
26
26
|
|
27
27
|
def _build_model(self):
|
28
28
|
input_specs = tf_keras.layers.InputSpec(
|
29
|
-
shape=[self._batch_size] + self._input_image_size + [
|
29
|
+
shape=[self._batch_size] + self._input_image_size + [self._num_channels]
|
30
|
+
)
|
30
31
|
|
31
32
|
return factory.build_segmentation_model(
|
32
33
|
input_specs=input_specs,
|
@@ -72,7 +73,9 @@ class SegmentationModule(export_base.ExportModule):
|
|
72
73
|
if self._input_type != 'tflite':
|
73
74
|
with tf.device('cpu:0'):
|
74
75
|
images_spec = tf.TensorSpec(
|
75
|
-
shape=self._input_image_size + [
|
76
|
+
shape=self._input_image_size + [self._num_channels],
|
77
|
+
dtype=tf.float32,
|
78
|
+
)
|
76
79
|
image_info_spec = tf.TensorSpec(shape=[4, 2], dtype=tf.float32)
|
77
80
|
|
78
81
|
images, image_info = tf.nest.map_structure(
|
@@ -49,20 +49,22 @@ class SemanticSegmentationExportTest(tf.test.TestCase, parameterized.TestCase):
|
|
49
49
|
{input_type: 'serving_default'})
|
50
50
|
tf.saved_model.save(module, save_directory, signatures=signatures)
|
51
51
|
|
52
|
-
def _get_dummy_input(self, input_type, input_image_size):
|
52
|
+
def _get_dummy_input(self, input_type, input_image_size, num_channels):
|
53
53
|
"""Get dummy input for the given input type."""
|
54
54
|
|
55
55
|
height = input_image_size[0]
|
56
56
|
width = input_image_size[1]
|
57
57
|
if input_type == 'image_tensor':
|
58
|
-
return tf.zeros((1, height, width,
|
58
|
+
return tf.zeros((1, height, width, num_channels), dtype=np.uint8)
|
59
59
|
elif input_type == 'image_bytes':
|
60
|
-
image = Image.fromarray(
|
60
|
+
image = Image.fromarray(
|
61
|
+
np.zeros((height, width, num_channels), dtype=np.uint8)
|
62
|
+
)
|
61
63
|
byte_io = io.BytesIO()
|
62
64
|
image.save(byte_io, 'PNG')
|
63
65
|
return [byte_io.getvalue()]
|
64
66
|
elif input_type == 'tf_example':
|
65
|
-
image_tensor = tf.zeros((height, width,
|
67
|
+
image_tensor = tf.zeros((height, width, num_channels), dtype=tf.uint8)
|
66
68
|
encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).numpy()
|
67
69
|
example = tf.train.Example(
|
68
70
|
features=tf.train.Features(
|
@@ -73,7 +75,7 @@ class SemanticSegmentationExportTest(tf.test.TestCase, parameterized.TestCase):
|
|
73
75
|
})).SerializeToString()
|
74
76
|
return [example]
|
75
77
|
elif input_type == 'tflite':
|
76
|
-
return tf.zeros((1, height, width,
|
78
|
+
return tf.zeros((1, height, width, num_channels), dtype=np.float32)
|
77
79
|
|
78
80
|
@parameterized.parameters(
|
79
81
|
('image_tensor', False, [112, 112], False),
|
@@ -105,7 +107,7 @@ class SemanticSegmentationExportTest(tf.test.TestCase, parameterized.TestCase):
|
|
105
107
|
imported = tf.saved_model.load(tmp_dir)
|
106
108
|
segmentation_fn = imported.signatures['serving_default']
|
107
109
|
|
108
|
-
images = self._get_dummy_input(input_type, input_image_size)
|
110
|
+
images = self._get_dummy_input(input_type, input_image_size, num_channels=3)
|
109
111
|
if input_type != 'tflite':
|
110
112
|
processed_images, _ = tf.nest.map_structure(
|
111
113
|
tf.stop_gradient,
|
@@ -128,6 +130,68 @@ class SemanticSegmentationExportTest(tf.test.TestCase, parameterized.TestCase):
|
|
128
130
|
out = segmentation_fn(tf.constant(images))
|
129
131
|
self.assertAllClose(out['logits'].numpy(), expected_output.numpy())
|
130
132
|
|
133
|
+
@parameterized.parameters(
|
134
|
+
('image_tensor',),
|
135
|
+
('tflite',),
|
136
|
+
)
|
137
|
+
def test_export_with_extra_input_channels(self, input_type):
|
138
|
+
tmp_dir = self.get_temp_dir()
|
139
|
+
num_channels = 6
|
140
|
+
params = exp_factory.get_exp_config('mnv2_deeplabv3_pascal')
|
141
|
+
params.task.init_checkpoint = None
|
142
|
+
params.task.model.input_size = [112, 112, num_channels]
|
143
|
+
params.task.export_config.rescale_output = False
|
144
|
+
params.task.train_data.preserve_aspect_ratio = False
|
145
|
+
params.task.train_data.image_feature.mean = [0.5] * num_channels
|
146
|
+
params.task.train_data.image_feature.stddev = [0.5] * num_channels
|
147
|
+
params.task.train_data.image_feature.num_channels = num_channels
|
148
|
+
module = semantic_segmentation.SegmentationModule(
|
149
|
+
params,
|
150
|
+
batch_size=1,
|
151
|
+
input_image_size=[112, 112],
|
152
|
+
input_type=input_type,
|
153
|
+
num_channels=num_channels,
|
154
|
+
)
|
155
|
+
|
156
|
+
self._export_from_module(module, input_type, tmp_dir)
|
157
|
+
|
158
|
+
self.assertTrue(os.path.exists(os.path.join(tmp_dir, 'saved_model.pb')))
|
159
|
+
self.assertTrue(
|
160
|
+
os.path.exists(os.path.join(tmp_dir, 'variables', 'variables.index'))
|
161
|
+
)
|
162
|
+
self.assertTrue(
|
163
|
+
os.path.exists(
|
164
|
+
os.path.join(tmp_dir, 'variables', 'variables.data-00000-of-00001')
|
165
|
+
)
|
166
|
+
)
|
167
|
+
|
168
|
+
imported = tf.saved_model.load(tmp_dir)
|
169
|
+
segmentation_fn = imported.signatures['serving_default']
|
170
|
+
|
171
|
+
images = self._get_dummy_input(input_type, [112, 112], num_channels)
|
172
|
+
|
173
|
+
if input_type != 'tflite':
|
174
|
+
processed_images, _ = tf.nest.map_structure(
|
175
|
+
tf.stop_gradient,
|
176
|
+
tf.map_fn(
|
177
|
+
module._build_inputs,
|
178
|
+
elems=tf.zeros((1, 112, 112, num_channels), dtype=tf.uint8),
|
179
|
+
fn_output_signature=(
|
180
|
+
tf.TensorSpec(
|
181
|
+
shape=[112, 112, num_channels], dtype=tf.float32
|
182
|
+
),
|
183
|
+
tf.TensorSpec(shape=[4, 2], dtype=tf.float32),
|
184
|
+
),
|
185
|
+
),
|
186
|
+
)
|
187
|
+
else:
|
188
|
+
processed_images = images
|
189
|
+
|
190
|
+
logits = module.model(processed_images, training=False)['logits']
|
191
|
+
expected_output = tf.image.resize(logits, [112, 112], method='bilinear')
|
192
|
+
out = segmentation_fn(tf.constant(images))
|
193
|
+
self.assertAllClose(out['logits'].numpy(), expected_output.numpy())
|
194
|
+
|
131
195
|
def test_export_invalid_batch_size(self):
|
132
196
|
batch_size = 3
|
133
197
|
tmp_dir = self.get_temp_dir()
|
@@ -407,7 +407,7 @@ official/nlp/modeling/networks/fnet.py,sha256=DAIQCixcqDoZjGSEo1apm3pAFlm9Qlsn1I
|
|
407
407
|
official/nlp/modeling/networks/fnet_test.py,sha256=mCQc67QeaPbiResoAYIUl9V3mAhp7XR4pQ5we1KcuL8,4553
|
408
408
|
official/nlp/modeling/networks/funnel_transformer.py,sha256=5_dcVgOgJoXZ-wZT2X5w-yaNt1OTqj-whWjg73bKkSg,24127
|
409
409
|
official/nlp/modeling/networks/funnel_transformer_test.py,sha256=XEVGmOa-sy7CTe46NQkQ18MZGdWTEB-5qYlujOndCl4,17562
|
410
|
-
official/nlp/modeling/networks/mobile_bert_encoder.py,sha256=
|
410
|
+
official/nlp/modeling/networks/mobile_bert_encoder.py,sha256=IuAw-nsQFvh4uVgroemaVM7DoclMfeL_BvfKek1qjc4,7542
|
411
411
|
official/nlp/modeling/networks/mobile_bert_encoder_test.py,sha256=u9WqDiIuKlQuA2cEUg_InpgaXXQASGHGhxoCseyHOZI,7115
|
412
412
|
official/nlp/modeling/networks/packed_sequence_embedding.py,sha256=Hu1hcTtfBA96dboCNcAPwe2D7x0QYWHxY4BpO5nUDUw,12810
|
413
413
|
official/nlp/modeling/networks/packed_sequence_embedding_test.py,sha256=atjMoYpt78Uz16NXdp7l7IrJQtgcfxvkzcKDvhl5bac,5076
|
@@ -1063,8 +1063,8 @@ official/vision/modeling/backbones/factory.py,sha256=coJKJpPMhgM9gAc2Q7I5_CuzAaH
|
|
1063
1063
|
official/vision/modeling/backbones/factory_test.py,sha256=7ZJRDSQ_cqJFyfqLK375V_wEqgrQpqibzNDZzNbhthU,8635
|
1064
1064
|
official/vision/modeling/backbones/mobiledet.py,sha256=iEC_KbqYqUBBBwZUfRCVtqllQwK6N4T1jmiDl29B-Ys,24896
|
1065
1065
|
official/vision/modeling/backbones/mobiledet_test.py,sha256=O2yfL7MSCGtKsnXr0IVUtjicrhZGGkwTXWCLtqdsL0Y,3804
|
1066
|
-
official/vision/modeling/backbones/mobilenet.py,sha256=
|
1067
|
-
official/vision/modeling/backbones/mobilenet_test.py,sha256=
|
1066
|
+
official/vision/modeling/backbones/mobilenet.py,sha256=WWQGODkzYlQgCAiLVsUJ2OFpM0lL6-XLa6xjyV8VFxw,63321
|
1067
|
+
official/vision/modeling/backbones/mobilenet_test.py,sha256=DALtiz7dTtDWutjdFCzEKtlLkXehJ_8AiizyetaVgIc,13565
|
1068
1068
|
official/vision/modeling/backbones/resnet.py,sha256=dnYkdlYUzChGLOrQnUbwb9YJ7BDiFwgnLptks7kFb7k,16384
|
1069
1069
|
official/vision/modeling/backbones/resnet_3d.py,sha256=Cq1lrlRqIg9ss_ud1iM_axW9lsTVtGYe3iA4DL9Orzk,18657
|
1070
1070
|
official/vision/modeling/backbones/resnet_3d_test.py,sha256=hhCkW28UXc2peKHGgFl0MnYexFV8qTwEUkMPZ26a_MA,3799
|
@@ -1160,8 +1160,8 @@ official/vision/serving/export_tflite_lib.py,sha256=2AWkyEsLvMBE19m2WAa0IpyFsHV8
|
|
1160
1160
|
official/vision/serving/export_utils.py,sha256=8mJb1MF_6kk3lbqZOZq2Lwu4A2L1KWxjnWnV_ZpYlVI,4881
|
1161
1161
|
official/vision/serving/image_classification.py,sha256=wEthg6y-geVsRkAuQ1SKv-fnECMFXYuE1qR1H0yCVBA,4562
|
1162
1162
|
official/vision/serving/image_classification_test.py,sha256=qZmuiQewptSQdY2iQEkx8rHjULybgADuXsQ84SjLCok,6759
|
1163
|
-
official/vision/serving/semantic_segmentation.py,sha256=
|
1164
|
-
official/vision/serving/semantic_segmentation_test.py,sha256=
|
1163
|
+
official/vision/serving/semantic_segmentation.py,sha256=dhrJEznThcJghAO_VtAbWoR4B5x1FQZ2r5nmVHLyLdg,4221
|
1164
|
+
official/vision/serving/semantic_segmentation_test.py,sha256=XiI1YUeR1XxFQjfDFBjE-pqgHNR42Ifuz6DLQbFKmg4,7990
|
1165
1165
|
official/vision/serving/video_classification.py,sha256=s37SdFoASmX0b3MubTpMdebPOvegx0Nj7yFogu5rYXE,6884
|
1166
1166
|
official/vision/serving/video_classification_test.py,sha256=vx-o4y_mkgLnCrLfw-uWm6S5nrZKIcDcv_1wMEicXrE,4232
|
1167
1167
|
official/vision/tasks/__init__.py,sha256=qfhL5xyDrjZez_zjw613TyciLkqtWm-INFeES7GwOPQ,995
|
@@ -1222,9 +1222,9 @@ tensorflow_models/tensorflow_models_test.py,sha256=nc6A9K53OGqF25xN5St8EiWvdVbda
|
|
1222
1222
|
tensorflow_models/nlp/__init__.py,sha256=4tA5Pf4qaFwT-fIFOpX7x7FHJpnyJT-5UgOeFYTyMlc,807
|
1223
1223
|
tensorflow_models/uplift/__init__.py,sha256=mqfa55gweOdpKoaQyid4A_4u7xw__FcQeSIF0k_pYmI,999
|
1224
1224
|
tensorflow_models/vision/__init__.py,sha256=zBorY_v5xva1uI-qxhZO3Qh-Dii-Suq6wEYh6hKHDfc,833
|
1225
|
-
tf_models_nightly-2.18.0.
|
1226
|
-
tf_models_nightly-2.18.0.
|
1227
|
-
tf_models_nightly-2.18.0.
|
1228
|
-
tf_models_nightly-2.18.0.
|
1229
|
-
tf_models_nightly-2.18.0.
|
1230
|
-
tf_models_nightly-2.18.0.
|
1225
|
+
tf_models_nightly-2.18.0.dev20240906.dist-info/AUTHORS,sha256=1dG3fXVu9jlo7bul8xuix5F5vOnczMk7_yWn4y70uw0,337
|
1226
|
+
tf_models_nightly-2.18.0.dev20240906.dist-info/LICENSE,sha256=WxeBS_DejPZQabxtfMOM_xn8qoZNJDQjrT7z2wG1I4U,11512
|
1227
|
+
tf_models_nightly-2.18.0.dev20240906.dist-info/METADATA,sha256=GB6Gi0zUN0SQPy3AlZv5aCOqKhVlhkpZ3aL-3xN_iWk,1432
|
1228
|
+
tf_models_nightly-2.18.0.dev20240906.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
|
1229
|
+
tf_models_nightly-2.18.0.dev20240906.dist-info/top_level.txt,sha256=gum2FfO5R4cvjl2-QtP-S1aNmsvIZaFFT6VFzU0f4-g,33
|
1230
|
+
tf_models_nightly-2.18.0.dev20240906.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|