tf-models-nightly 2.16.0.dev20240216__py2.py3-none-any.whl → 2.16.0.dev20240218__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- official/vision/serving/detection.py +5 -2
- official/vision/serving/export_base.py +101 -62
- official/vision/serving/image_classification.py +3 -2
- official/vision/serving/semantic_segmentation.py +3 -1
- {tf_models_nightly-2.16.0.dev20240216.dist-info → tf_models_nightly-2.16.0.dev20240218.dist-info}/METADATA +1 -1
- {tf_models_nightly-2.16.0.dev20240216.dist-info → tf_models_nightly-2.16.0.dev20240218.dist-info}/RECORD +10 -10
- {tf_models_nightly-2.16.0.dev20240216.dist-info → tf_models_nightly-2.16.0.dev20240218.dist-info}/AUTHORS +0 -0
- {tf_models_nightly-2.16.0.dev20240216.dist-info → tf_models_nightly-2.16.0.dev20240218.dist-info}/LICENSE +0 -0
- {tf_models_nightly-2.16.0.dev20240216.dist-info → tf_models_nightly-2.16.0.dev20240218.dist-info}/WHEEL +0 -0
- {tf_models_nightly-2.16.0.dev20240216.dist-info → tf_models_nightly-2.16.0.dev20240218.dist-info}/top_level.txt +0 -0
@@ -78,6 +78,11 @@ class DetectionModule(export_base.ExportModule):
|
|
78
78
|
|
79
79
|
def _build_inputs(self, image):
|
80
80
|
"""Builds detection model inputs for serving."""
|
81
|
+
|
82
|
+
if isinstance(image, tf.RaggedTensor):
|
83
|
+
image = image.to_tensor()
|
84
|
+
image = tf.cast(image, dtype=tf.float32)
|
85
|
+
|
81
86
|
# Normalizes image with mean and std pixel values.
|
82
87
|
image = preprocess_ops.normalize_image(
|
83
88
|
image, offset=preprocess_ops.MEAN_RGB, scale=preprocess_ops.STDDEV_RGB)
|
@@ -134,8 +139,6 @@ class DetectionModule(export_base.ExportModule):
|
|
134
139
|
"""
|
135
140
|
model_params = self.params.task.model
|
136
141
|
with tf.device('cpu:0'):
|
137
|
-
images = tf.cast(images, dtype=tf.float32)
|
138
|
-
|
139
142
|
# Tensor Specs for map_fn outputs (images, anchor_boxes, and image_info).
|
140
143
|
images_spec = tf.TensorSpec(shape=self._padded_size + [3],
|
141
144
|
dtype=tf.float32)
|
@@ -18,6 +18,7 @@ import abc
|
|
18
18
|
from typing import Dict, List, Mapping, Optional, Text
|
19
19
|
|
20
20
|
import tensorflow as tf, tf_keras
|
21
|
+
|
21
22
|
from official.core import config_definitions as cfg
|
22
23
|
from official.core import export_base
|
23
24
|
|
@@ -25,15 +26,17 @@ from official.core import export_base
|
|
25
26
|
class ExportModule(export_base.ExportModule, metaclass=abc.ABCMeta):
|
26
27
|
"""Base Export Module."""
|
27
28
|
|
28
|
-
def __init__(
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
29
|
+
def __init__(
|
30
|
+
self,
|
31
|
+
params: cfg.ExperimentConfig,
|
32
|
+
*,
|
33
|
+
batch_size: int,
|
34
|
+
input_image_size: List[int],
|
35
|
+
input_type: str = 'image_tensor',
|
36
|
+
num_channels: int = 3,
|
37
|
+
model: Optional[tf_keras.Model] = None,
|
38
|
+
input_name: Optional[str] = None
|
39
|
+
):
|
37
40
|
"""Initializes a module for export.
|
38
41
|
|
39
42
|
Args:
|
@@ -72,17 +75,20 @@ class ExportModule(export_base.ExportModule, metaclass=abc.ABCMeta):
|
|
72
75
|
if len(self._input_image_size) == 2:
|
73
76
|
# Decode an image if 2D input is expected.
|
74
77
|
image_tensor = tf.image.decode_image(
|
75
|
-
encoded_image_bytes, channels=self._num_channels
|
78
|
+
encoded_image_bytes, channels=self._num_channels
|
79
|
+
)
|
76
80
|
image_tensor.set_shape((None, None, self._num_channels))
|
77
81
|
else:
|
78
82
|
# Convert raw bytes into a tensor and reshape it, if not 2D input.
|
79
83
|
image_tensor = tf.io.decode_raw(encoded_image_bytes, out_type=tf.uint8)
|
80
|
-
image_tensor = tf.reshape(
|
81
|
-
|
84
|
+
image_tensor = tf.reshape(
|
85
|
+
image_tensor, self._input_image_size + [self._num_channels]
|
86
|
+
)
|
82
87
|
return image_tensor
|
83
88
|
|
84
89
|
def _decode_tf_example(
|
85
|
-
self, tf_example_string_tensor: tf.train.Example
|
90
|
+
self, tf_example_string_tensor: tf.train.Example
|
91
|
+
) -> tf.Tensor:
|
86
92
|
"""Decodes a TF Example to an image tensor.
|
87
93
|
|
88
94
|
Args:
|
@@ -94,7 +100,8 @@ class ExportModule(export_base.ExportModule, metaclass=abc.ABCMeta):
|
|
94
100
|
"""
|
95
101
|
keys_to_features = {'image/encoded': tf.io.FixedLenFeature((), tf.string)}
|
96
102
|
parsed_tensors = tf.io.parse_single_example(
|
97
|
-
serialized=tf_example_string_tensor, features=keys_to_features
|
103
|
+
serialized=tf_example_string_tensor, features=keys_to_features
|
104
|
+
)
|
98
105
|
image_tensor = self._decode_image(parsed_tensors['image/encoded'])
|
99
106
|
image_tensor.set_shape(
|
100
107
|
[None] * len(self._input_image_size) + [self._num_channels]
|
@@ -107,7 +114,8 @@ class ExportModule(export_base.ExportModule, metaclass=abc.ABCMeta):
|
|
107
114
|
|
108
115
|
@tf.function
|
109
116
|
def inference_from_image_tensors(
|
110
|
-
self, inputs: tf.Tensor
|
117
|
+
self, inputs: tf.Tensor
|
118
|
+
) -> Mapping[str, tf.Tensor]:
|
111
119
|
return self.serve(inputs)
|
112
120
|
|
113
121
|
@tf.function
|
@@ -117,39 +125,57 @@ class ExportModule(export_base.ExportModule, metaclass=abc.ABCMeta):
|
|
117
125
|
@tf.function
|
118
126
|
def inference_from_image_bytes(self, inputs: tf.Tensor):
|
119
127
|
with tf.device('cpu:0'):
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
128
|
+
if len(inputs) == 1:
|
129
|
+
images = tf.nest.map_structure(
|
130
|
+
tf.identity,
|
131
|
+
tf.map_fn(
|
132
|
+
self._decode_image,
|
133
|
+
elems=inputs,
|
134
|
+
fn_output_signature=tf.TensorSpec(
|
135
|
+
shape=[None] * len(self._input_image_size)
|
136
|
+
+ [self._num_channels],
|
137
|
+
dtype=tf.uint8,
|
138
|
+
),
|
139
|
+
parallel_iterations=32,
|
140
|
+
),
|
141
|
+
)
|
142
|
+
images = tf.stack(images)
|
143
|
+
else:
|
144
|
+
images = []
|
145
|
+
# Need to use for loop instead of enumerate as enumerate is not well
|
146
|
+
# supported in the exported savedmodel.
|
147
|
+
for i in range(len(inputs)): # pylint: disable=range-len-with-index-access
|
148
|
+
images.append(tf.cast(self._decode_image(inputs[i]), tf.uint8))
|
149
|
+
images = tf.ragged.stack(images)
|
131
150
|
return self.serve(images)
|
132
151
|
|
133
152
|
@tf.function
|
134
|
-
def inference_from_tf_example(
|
135
|
-
|
153
|
+
def inference_from_tf_example(
|
154
|
+
self, inputs: tf.Tensor
|
155
|
+
) -> Mapping[str, tf.Tensor]:
|
136
156
|
with tf.device('cpu:0'):
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
157
|
+
if len(inputs) == 1:
|
158
|
+
images = tf.nest.map_structure(
|
159
|
+
tf.identity,
|
160
|
+
tf.map_fn(
|
161
|
+
self._decode_tf_example,
|
162
|
+
elems=inputs,
|
163
|
+
fn_output_signature=tf.TensorSpec(
|
164
|
+
shape=[None] * len(self._input_image_size)
|
165
|
+
+ [self._num_channels],
|
166
|
+
dtype=tf.uint8,
|
167
|
+
),
|
168
|
+
parallel_iterations=32,
|
169
|
+
),
|
170
|
+
)
|
171
|
+
images = tf.stack(images)
|
172
|
+
else:
|
173
|
+
images = []
|
174
|
+
# Need to use for loop instead of enumerate as enumerate is not well
|
175
|
+
# supported in the exported savedmodel.
|
176
|
+
for i in range(len(inputs)): # pylint: disable=range-len-with-index-access
|
177
|
+
images.append(tf.cast(self._decode_tf_example(inputs[i]), tf.uint8))
|
178
|
+
images = tf.ragged.stack(images)
|
153
179
|
return self.serve(images)
|
154
180
|
|
155
181
|
def get_inference_signatures(self, function_keys: Dict[Text, Text]):
|
@@ -167,33 +193,46 @@ class ExportModule(export_base.ExportModule, metaclass=abc.ABCMeta):
|
|
167
193
|
for key, def_name in function_keys.items():
|
168
194
|
if key == 'image_tensor':
|
169
195
|
input_signature = tf.TensorSpec(
|
170
|
-
shape=[self._batch_size]
|
171
|
-
[self.
|
196
|
+
shape=[self._batch_size]
|
197
|
+
+ [None] * len(self._input_image_size)
|
198
|
+
+ [self._num_channels],
|
172
199
|
dtype=tf.uint8,
|
173
|
-
name=self._input_name
|
174
|
-
|
175
|
-
|
176
|
-
|
200
|
+
name=self._input_name,
|
201
|
+
)
|
202
|
+
signatures[def_name] = (
|
203
|
+
self.inference_from_image_tensors.get_concrete_function(
|
204
|
+
input_signature
|
205
|
+
)
|
206
|
+
)
|
177
207
|
elif key == 'image_bytes':
|
178
208
|
input_signature = tf.TensorSpec(
|
179
|
-
shape=[self._batch_size], dtype=tf.string, name=self._input_name
|
180
|
-
|
181
|
-
|
182
|
-
|
209
|
+
shape=[self._batch_size], dtype=tf.string, name=self._input_name
|
210
|
+
)
|
211
|
+
signatures[def_name] = (
|
212
|
+
self.inference_from_image_bytes.get_concrete_function(
|
213
|
+
input_signature
|
214
|
+
)
|
215
|
+
)
|
183
216
|
elif key == 'serve_examples' or key == 'tf_example':
|
184
217
|
input_signature = tf.TensorSpec(
|
185
|
-
shape=[self._batch_size], dtype=tf.string, name=self._input_name
|
186
|
-
|
187
|
-
|
188
|
-
|
218
|
+
shape=[self._batch_size], dtype=tf.string, name=self._input_name
|
219
|
+
)
|
220
|
+
signatures[def_name] = (
|
221
|
+
self.inference_from_tf_example.get_concrete_function(
|
222
|
+
input_signature
|
223
|
+
)
|
224
|
+
)
|
189
225
|
elif key == 'tflite':
|
190
226
|
input_signature = tf.TensorSpec(
|
191
|
-
shape=[self._batch_size]
|
192
|
-
|
227
|
+
shape=[self._batch_size]
|
228
|
+
+ self._input_image_size
|
229
|
+
+ [self._num_channels],
|
193
230
|
dtype=tf.float32,
|
194
|
-
name=self._input_name
|
231
|
+
name=self._input_name,
|
232
|
+
)
|
195
233
|
signatures[def_name] = self.inference_for_tflite.get_concrete_function(
|
196
|
-
input_signature
|
234
|
+
input_signature
|
235
|
+
)
|
197
236
|
else:
|
198
237
|
raise ValueError('Unrecognized `input_type`')
|
199
238
|
return signatures
|
@@ -36,6 +36,9 @@ class ClassificationModule(export_base.ExportModule):
|
|
36
36
|
def _build_inputs(self, image):
|
37
37
|
"""Builds classification model inputs for serving."""
|
38
38
|
# Center crops and resizes image.
|
39
|
+
if isinstance(image, tf.RaggedTensor):
|
40
|
+
image = image.to_tensor()
|
41
|
+
image = tf.cast(image, dtype=tf.float32)
|
39
42
|
if self.params.task.train_data.aug_crop:
|
40
43
|
image = preprocess_ops.center_crop_image(image)
|
41
44
|
|
@@ -62,8 +65,6 @@ class ClassificationModule(export_base.ExportModule):
|
|
62
65
|
# with TFLite quantization.
|
63
66
|
if self._input_type != 'tflite':
|
64
67
|
with tf.device('cpu:0'):
|
65
|
-
images = tf.cast(images, dtype=tf.float32)
|
66
|
-
|
67
68
|
images = tf.nest.map_structure(
|
68
69
|
tf.identity,
|
69
70
|
tf.map_fn(
|
@@ -35,6 +35,9 @@ class SegmentationModule(export_base.ExportModule):
|
|
35
35
|
|
36
36
|
def _build_inputs(self, image):
|
37
37
|
"""Builds classification model inputs for serving."""
|
38
|
+
if isinstance(image, tf.RaggedTensor):
|
39
|
+
image = image.to_tensor()
|
40
|
+
image = tf.cast(image, dtype=tf.float32)
|
38
41
|
|
39
42
|
# Normalizes image with mean and std pixel values.
|
40
43
|
image_feature = self.params.task.train_data.image_feature
|
@@ -68,7 +71,6 @@ class SegmentationModule(export_base.ExportModule):
|
|
68
71
|
image_info = None
|
69
72
|
if self._input_type != 'tflite':
|
70
73
|
with tf.device('cpu:0'):
|
71
|
-
images = tf.cast(images, dtype=tf.float32)
|
72
74
|
images_spec = tf.TensorSpec(
|
73
75
|
shape=self._input_image_size + [3], dtype=tf.float32)
|
74
76
|
image_info_spec = tf.TensorSpec(shape=[4, 2], dtype=tf.float32)
|
@@ -1079,9 +1079,9 @@ official/vision/ops/spatial_transform_ops.py,sha256=PVEJGAn0ygtsrid84vD5GgV0jsjy
|
|
1079
1079
|
official/vision/ops/target_gather.py,sha256=Ir3X76yXYEVFSYX5h-yfS8SMkY37GYuypBP2C8ykggo,3965
|
1080
1080
|
official/vision/ops/target_gather_test.py,sha256=yiTZg7w4HYh19pw9yIDppvenscp8MSBGlhDko180l08,2561
|
1081
1081
|
official/vision/serving/__init__.py,sha256=YlRnCfAvBWlB7gn-Dz32dxVIDBCzxH8PhdM14kHxPgs,702
|
1082
|
-
official/vision/serving/detection.py,sha256=
|
1082
|
+
official/vision/serving/detection.py,sha256=uZY4m3_KqkW5dLtFroN3TQuHPUQUXhP4iY9CfpW7UwY,10765
|
1083
1083
|
official/vision/serving/detection_test.py,sha256=s3ueCplPII8Am7tPSWcUv9KUcAqh1AWb6X91_B6qZIM,9165
|
1084
|
-
official/vision/serving/export_base.py,sha256=
|
1084
|
+
official/vision/serving/export_base.py,sha256=6HS9Tj1iHhYvkmg7qqOk_l0oGD3uot9hyI4EFJDklbo,8190
|
1085
1085
|
official/vision/serving/export_base_v2.py,sha256=GYIqt-xaOv4UztAKjx-acD-9i2pjftMw46DWRMy7Bsk,2741
|
1086
1086
|
official/vision/serving/export_base_v2_test.py,sha256=khuY14W9Oi8LGlv_CvMwFiNnFbgpPVzvvD_hugf1_lk,2880
|
1087
1087
|
official/vision/serving/export_module_factory.py,sha256=TRnqqLkGPVhfDFzsUC6dQIfjGA3Ctv7vSLiHkyOc6fg,3549
|
@@ -1095,9 +1095,9 @@ official/vision/serving/export_tfhub_lib.py,sha256=BZC4XSbly0DLDSlWnphjWaD0Q6SOt
|
|
1095
1095
|
official/vision/serving/export_tflite.py,sha256=OhIVHrcDvB83p4WroheWcTmeYArMV9TJcgUVJWytWIc,5115
|
1096
1096
|
official/vision/serving/export_tflite_lib.py,sha256=2AWkyEsLvMBE19m2WAa0IpyFsHV8sIR4Gvcv5ZQWbmg,7272
|
1097
1097
|
official/vision/serving/export_utils.py,sha256=8mJb1MF_6kk3lbqZOZq2Lwu4A2L1KWxjnWnV_ZpYlVI,4881
|
1098
|
-
official/vision/serving/image_classification.py,sha256=
|
1098
|
+
official/vision/serving/image_classification.py,sha256=ikvRyxMwGnFj7ptk6TT9H2YEJmxoZUcAHAihwghXgUk,2936
|
1099
1099
|
official/vision/serving/image_classification_test.py,sha256=y1Mv8UDxdCfs2rRIUBgnLFE9f-4V6uOn592ueNMOc4s,4622
|
1100
|
-
official/vision/serving/semantic_segmentation.py,sha256=
|
1100
|
+
official/vision/serving/semantic_segmentation.py,sha256=1RmEOpYz-HjB-VKa3k9p3gZj3h_3ob5d-3RO9_cXfH0,4160
|
1101
1101
|
official/vision/serving/semantic_segmentation_test.py,sha256=QVx12ciMA6T20d3wuKu8pe6ekEyPj8aNbKbRKxOgRik,5638
|
1102
1102
|
official/vision/serving/video_classification.py,sha256=s37SdFoASmX0b3MubTpMdebPOvegx0Nj7yFogu5rYXE,6884
|
1103
1103
|
official/vision/serving/video_classification_test.py,sha256=vx-o4y_mkgLnCrLfw-uWm6S5nrZKIcDcv_1wMEicXrE,4232
|
@@ -1158,9 +1158,9 @@ tensorflow_models/__init__.py,sha256=etxw45SHxuwFCRX5qGxGMP83II0JfJulzNl5GSNJvhw
|
|
1158
1158
|
tensorflow_models/tensorflow_models_test.py,sha256=AxUYUdiQn416UR7jg0h6rmv688esvlKDfpyDCIQkF18,1395
|
1159
1159
|
tensorflow_models/nlp/__init__.py,sha256=4tA5Pf4qaFwT-fIFOpX7x7FHJpnyJT-5UgOeFYTyMlc,807
|
1160
1160
|
tensorflow_models/vision/__init__.py,sha256=zBorY_v5xva1uI-qxhZO3Qh-Dii-Suq6wEYh6hKHDfc,833
|
1161
|
-
tf_models_nightly-2.16.0.
|
1162
|
-
tf_models_nightly-2.16.0.
|
1163
|
-
tf_models_nightly-2.16.0.
|
1164
|
-
tf_models_nightly-2.16.0.
|
1165
|
-
tf_models_nightly-2.16.0.
|
1166
|
-
tf_models_nightly-2.16.0.
|
1161
|
+
tf_models_nightly-2.16.0.dev20240218.dist-info/AUTHORS,sha256=1dG3fXVu9jlo7bul8xuix5F5vOnczMk7_yWn4y70uw0,337
|
1162
|
+
tf_models_nightly-2.16.0.dev20240218.dist-info/LICENSE,sha256=WxeBS_DejPZQabxtfMOM_xn8qoZNJDQjrT7z2wG1I4U,11512
|
1163
|
+
tf_models_nightly-2.16.0.dev20240218.dist-info/METADATA,sha256=B9tEBf5OIvvaK4sZY0DGKxIh_U47mLKz1EplgQqNOv0,1432
|
1164
|
+
tf_models_nightly-2.16.0.dev20240218.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
|
1165
|
+
tf_models_nightly-2.16.0.dev20240218.dist-info/top_level.txt,sha256=gum2FfO5R4cvjl2-QtP-S1aNmsvIZaFFT6VFzU0f4-g,33
|
1166
|
+
tf_models_nightly-2.16.0.dev20240218.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|