tf-models-nightly 2.17.0.dev20240530__py2.py3-none-any.whl → 2.17.0.dev20240602__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
official/common/flags.py CHANGED
@@ -112,3 +112,12 @@ def define_flags():
112
112
 
113
113
  flags.DEFINE_string(
114
114
  'tpu_platform', default=None, help='TPU platform type.')
115
+
116
+ flags.DEFINE_string(
117
+ 'tfhub_handle',
118
+ None,
119
+ 'TFHub handle for publishing the model to TFHub. The model '
120
+ 'is published to TFHub if this flag is set. Please use a '
121
+ 'TFHubPusher (go/tflex/standard_components/pusher) component if '
122
+ 'running in TFleX.',
123
+ )
@@ -41,27 +41,37 @@ class DetectionModule(export_base.ExportModule):
41
41
  return self._input_image_size
42
42
 
43
43
  def _build_model(self):
44
-
45
44
  nms_versions_supporting_dynamic_batch_size = {'batched', 'v2', 'v3'}
46
45
  nms_version = self.params.task.model.detection_generator.nms_version
47
- if (self._batch_size is None and
48
- nms_version not in nms_versions_supporting_dynamic_batch_size):
49
- logging.info('nms_version is set to `batched` because `%s` '
50
- 'does not support with dynamic batch size.', nms_version)
46
+ if (
47
+ self._batch_size is None
48
+ and nms_version not in nms_versions_supporting_dynamic_batch_size
49
+ ):
50
+ logging.info(
51
+ 'nms_version is set to `batched` because `%s` '
52
+ 'does not support with dynamic batch size.',
53
+ nms_version,
54
+ )
51
55
  self.params.task.model.detection_generator.nms_version = 'batched'
52
56
 
53
- input_specs = tf_keras.layers.InputSpec(shape=[
54
- self._batch_size, *self._padded_size, 3])
57
+ input_specs = tf_keras.layers.InputSpec(
58
+ shape=[self._batch_size, *self._padded_size, 3]
59
+ )
55
60
 
56
61
  if isinstance(self.params.task.model, configs.maskrcnn.MaskRCNN):
57
62
  model = factory.build_maskrcnn(
58
- input_specs=input_specs, model_config=self.params.task.model)
63
+ input_specs=input_specs, model_config=self.params.task.model
64
+ )
59
65
  elif isinstance(self.params.task.model, configs.retinanet.RetinaNet):
60
66
  model = factory.build_retinanet(
61
- input_specs=input_specs, model_config=self.params.task.model)
67
+ input_specs=input_specs, model_config=self.params.task.model
68
+ )
62
69
  else:
63
- raise ValueError('Detection module not implemented for {} model.'.format(
64
- type(self.params.task.model)))
70
+ raise ValueError(
71
+ 'Detection module not implemented for {} model.'.format(
72
+ type(self.params.task.model)
73
+ )
74
+ )
65
75
 
66
76
  return model
67
77
 
@@ -73,7 +83,8 @@ class DetectionModule(export_base.ExportModule):
73
83
  max_level=model_params.max_level,
74
84
  num_scales=model_params.anchor.num_scales,
75
85
  aspect_ratios=model_params.anchor.aspect_ratios,
76
- anchor_size=model_params.anchor.anchor_size)
86
+ anchor_size=model_params.anchor.anchor_size,
87
+ )
77
88
  return input_anchor(image_size=self._padded_size)
78
89
 
79
90
  def _build_inputs(self, image):
@@ -85,7 +96,8 @@ class DetectionModule(export_base.ExportModule):
85
96
 
86
97
  # Normalizes image with mean and std pixel values.
87
98
  image = preprocess_ops.normalize_image(
88
- image, offset=preprocess_ops.MEAN_RGB, scale=preprocess_ops.STDDEV_RGB)
99
+ image, offset=preprocess_ops.MEAN_RGB, scale=preprocess_ops.STDDEV_RGB
100
+ )
89
101
 
90
102
  image, image_info = preprocess_ops.resize_and_crop_image(
91
103
  image,
@@ -131,20 +143,24 @@ class DetectionModule(export_base.ExportModule):
131
143
 
132
144
  Args:
133
145
  images: The images tensor.
146
+
134
147
  Returns:
135
148
  images: The images tensor cast to float.
136
149
  anchor_boxes: Dict mapping anchor levels to anchor boxes.
137
150
  image_info: Tensor containing the details of the image resizing.
138
-
139
151
  """
140
152
  model_params = self.params.task.model
141
153
  with tf.device('cpu:0'):
142
154
  # Tensor Specs for map_fn outputs (images, anchor_boxes, and image_info).
143
- images_spec = tf.TensorSpec(shape=self._padded_size + [3],
144
- dtype=tf.float32)
155
+ images_spec = tf.TensorSpec(
156
+ shape=self._padded_size + [3], dtype=tf.float32
157
+ )
145
158
 
146
- num_anchors = model_params.anchor.num_scales * len(
147
- model_params.anchor.aspect_ratios) * 4
159
+ num_anchors = (
160
+ model_params.anchor.num_scales
161
+ * len(model_params.anchor.aspect_ratios)
162
+ * 4
163
+ )
148
164
  anchor_shapes = []
149
165
  for level in range(model_params.min_level, model_params.max_level + 1):
150
166
  anchor_level_spec = tf.TensorSpec(
@@ -153,7 +169,8 @@ class DetectionModule(export_base.ExportModule):
153
169
  math.ceil(self._padded_size[1] / 2**level),
154
170
  num_anchors,
155
171
  ],
156
- dtype=tf.float32)
172
+ dtype=tf.float32,
173
+ )
157
174
  anchor_shapes.append((str(level), anchor_level_spec))
158
175
 
159
176
  image_info_spec = tf.TensorSpec(shape=[4, 2], dtype=tf.float32)
@@ -163,9 +180,14 @@ class DetectionModule(export_base.ExportModule):
163
180
  tf.map_fn(
164
181
  self._build_inputs,
165
182
  elems=images,
166
- fn_output_signature=(images_spec, dict(anchor_shapes),
167
- image_info_spec),
168
- parallel_iterations=32))
183
+ fn_output_signature=(
184
+ images_spec,
185
+ dict(anchor_shapes),
186
+ image_info_spec,
187
+ ),
188
+ parallel_iterations=32,
189
+ ),
190
+ )
169
191
 
170
192
  return images, anchor_boxes, image_info
171
193
 
@@ -174,6 +196,7 @@ class DetectionModule(export_base.ExportModule):
174
196
 
175
197
  Args:
176
198
  images: uint8 Tensor of shape [batch_size, None, None, 3]
199
+
177
200
  Returns:
178
201
  Tensor holding detection output logits.
179
202
  """
@@ -190,10 +213,15 @@ class DetectionModule(export_base.ExportModule):
190
213
  # [desired_height, desired_width], [y_scale, x_scale],
191
214
  # [y_offset, x_offset]]. When input_type is tflite, input image is
192
215
  # supposed to be preprocessed already.
193
- image_info = tf.convert_to_tensor([[
194
- self._input_image_size, self._input_image_size, [1.0, 1.0], [0, 0]
195
- ]],
196
- dtype=tf.float32)
216
+ image_info = tf.convert_to_tensor(
217
+ [[
218
+ self._input_image_size,
219
+ self._input_image_size,
220
+ [1.0, 1.0],
221
+ [0, 0],
222
+ ]],
223
+ dtype=tf.float32,
224
+ )
197
225
  input_image_shape = image_info[:, 1, :]
198
226
 
199
227
  # To overcome keras.Model extra limitation to save a model with layers that
@@ -226,20 +254,23 @@ class DetectionModule(export_base.ExportModule):
226
254
  # point outputs.
227
255
  if export_config.cast_num_detections_to_float:
228
256
  detections['num_detections'] = tf.cast(
229
- detections['num_detections'], dtype=tf.float32)
257
+ detections['num_detections'], dtype=tf.float32
258
+ )
230
259
  if export_config.cast_detection_classes_to_float:
231
260
  detections['detection_classes'] = tf.cast(
232
- detections['detection_classes'], dtype=tf.float32)
261
+ detections['detection_classes'], dtype=tf.float32
262
+ )
233
263
 
234
264
  final_outputs = {
235
265
  'detection_boxes': detections['detection_boxes'],
236
266
  'detection_scores': detections['detection_scores'],
237
267
  'detection_classes': detections['detection_classes'],
238
- 'num_detections': detections['num_detections']
268
+ 'num_detections': detections['num_detections'],
239
269
  }
240
270
  if 'detection_outer_boxes' in detections:
241
- final_outputs['detection_outer_boxes'] = (
242
- detections['detection_outer_boxes'])
271
+ final_outputs['detection_outer_boxes'] = detections[
272
+ 'detection_outer_boxes'
273
+ ]
243
274
  else:
244
275
  # For RetinaNet model, apply export_config.
245
276
  if isinstance(self.params.task.model, configs.retinanet.RetinaNet):
@@ -250,7 +281,7 @@ class DetectionModule(export_base.ExportModule):
250
281
  detections = self._normalize_coordinates(detections, keys, image_info)
251
282
  final_outputs = {
252
283
  'decoded_boxes': detections['decoded_boxes'],
253
- 'decoded_box_scores': detections['decoded_box_scores']
284
+ 'decoded_box_scores': detections['decoded_box_scores'],
254
285
  }
255
286
 
256
287
  if 'detection_masks' in detections.keys():
@@ -15,7 +15,7 @@
15
15
  r"""Vision models export utility function for serving/inference."""
16
16
 
17
17
  import os
18
- from typing import Optional, List, Union, Text, Dict
18
+ from typing import Dict, List, Optional, Union
19
19
 
20
20
  from absl import logging
21
21
  import tensorflow as tf, tf_keras
@@ -45,7 +45,7 @@ def export_inference_graph(
45
45
  log_model_flops_and_params: bool = False,
46
46
  checkpoint: Optional[tf.train.Checkpoint] = None,
47
47
  input_name: Optional[str] = None,
48
- function_keys: Optional[Union[List[Text], Dict[Text, Text]]] = None,
48
+ function_keys: Optional[Union[List[str], Dict[str, str]]] = None,
49
49
  add_tpu_function_alias: Optional[bool] = False,
50
50
  ):
51
51
  """Exports inference graph for the model specified in the exp config.
@@ -83,57 +83,68 @@ def export_inference_graph(
83
83
 
84
84
  if export_checkpoint_subdir:
85
85
  output_checkpoint_directory = os.path.join(
86
- export_dir, export_checkpoint_subdir)
86
+ export_dir, export_checkpoint_subdir
87
+ )
87
88
  else:
88
89
  output_checkpoint_directory = None
89
90
 
90
91
  if export_saved_model_subdir:
91
92
  output_saved_model_directory = os.path.join(
92
- export_dir, export_saved_model_subdir)
93
+ export_dir, export_saved_model_subdir
94
+ )
93
95
  else:
94
96
  output_saved_model_directory = export_dir
95
97
 
96
98
  # TODO(arashwan): Offers a direct path to use ExportModule with Task objects.
97
99
  if not export_module:
98
- if isinstance(params.task,
99
- configs.image_classification.ImageClassificationTask):
100
+ if isinstance(
101
+ params.task, configs.image_classification.ImageClassificationTask
102
+ ):
100
103
  export_module = image_classification.ClassificationModule(
101
104
  params=params,
102
105
  batch_size=batch_size,
103
106
  input_image_size=input_image_size,
104
107
  input_type=input_type,
105
108
  num_channels=num_channels,
106
- input_name=input_name)
109
+ input_name=input_name,
110
+ )
107
111
  elif isinstance(params.task, configs.retinanet.RetinaNetTask) or isinstance(
108
- params.task, configs.maskrcnn.MaskRCNNTask):
112
+ params.task, configs.maskrcnn.MaskRCNNTask
113
+ ):
109
114
  export_module = detection.DetectionModule(
110
115
  params=params,
111
116
  batch_size=batch_size,
112
117
  input_image_size=input_image_size,
113
118
  input_type=input_type,
114
119
  num_channels=num_channels,
115
- input_name=input_name)
116
- elif isinstance(params.task,
117
- configs.semantic_segmentation.SemanticSegmentationTask):
120
+ input_name=input_name,
121
+ )
122
+ elif isinstance(
123
+ params.task, configs.semantic_segmentation.SemanticSegmentationTask
124
+ ):
118
125
  export_module = semantic_segmentation.SegmentationModule(
119
126
  params=params,
120
127
  batch_size=batch_size,
121
128
  input_image_size=input_image_size,
122
129
  input_type=input_type,
123
130
  num_channels=num_channels,
124
- input_name=input_name)
125
- elif isinstance(params.task,
126
- configs.video_classification.VideoClassificationTask):
131
+ input_name=input_name,
132
+ )
133
+ elif isinstance(
134
+ params.task, configs.video_classification.VideoClassificationTask
135
+ ):
127
136
  export_module = video_classification.VideoClassificationModule(
128
137
  params=params,
129
138
  batch_size=batch_size,
130
139
  input_image_size=input_image_size,
131
140
  input_type=input_type,
132
141
  num_channels=num_channels,
133
- input_name=input_name)
142
+ input_name=input_name,
143
+ )
134
144
  else:
135
- raise ValueError('Export module not implemented for {} task.'.format(
136
- type(params.task)))
145
+ raise ValueError(
146
+ 'Export module not implemented for {} task.'.format(type(params.task))
147
+ )
137
148
 
138
149
  if add_tpu_function_alias:
139
150
  if input_type == 'image_tensor':
@@ -160,7 +171,8 @@ def export_inference_graph(
160
171
  checkpoint=checkpoint,
161
172
  checkpoint_path=checkpoint_path,
162
173
  timestamped=False,
163
- save_options=save_options)
174
+ save_options=save_options,
175
+ )
164
176
 
165
177
  if output_checkpoint_directory:
166
178
  ckpt = tf.train.Checkpoint(model=export_module.model)
@@ -171,16 +183,16 @@ def export_inference_graph(
171
183
  inputs_kwargs = None
172
184
  if isinstance(
173
185
  params.task,
174
- (configs.retinanet.RetinaNetTask, configs.maskrcnn.MaskRCNNTask)):
186
+ (configs.retinanet.RetinaNetTask, configs.maskrcnn.MaskRCNNTask),
187
+ ):
175
188
  # We need to create inputs_kwargs argument to specify the input shapes for
176
189
  # subclass model that overrides model.call to take multiple inputs,
177
190
  # e.g., RetinaNet model.
178
191
  inputs_kwargs = {
179
- 'images':
180
- tf.TensorSpec([1] + input_image_size + [num_channels],
181
- tf.float32),
182
- 'image_shape':
183
- tf.TensorSpec([1, 2], tf.float32)
192
+ 'images': tf.TensorSpec(
193
+ [1] + input_image_size + [num_channels], tf.float32
194
+ ),
195
+ 'image_shape': tf.TensorSpec([1, 2], tf.float32),
184
196
  }
185
197
  dummy_inputs = {
186
198
  k: tf.ones(v.shape.as_list(), tf.float32)
@@ -191,9 +203,14 @@ def export_inference_graph(
191
203
  else:
192
204
  logging.info(
193
205
  'Logging model flops and params not implemented for %s task.',
194
- type(params.task))
206
+ type(params.task),
207
+ )
195
208
  return
196
- train_utils.try_count_flops(export_module.model, inputs_kwargs,
197
- os.path.join(export_dir, 'model_flops.txt'))
198
- train_utils.write_model_params(export_module.model,
199
- os.path.join(export_dir, 'model_params.txt'))
209
+ train_utils.try_count_flops(
210
+ export_module.model,
211
+ inputs_kwargs,
212
+ os.path.join(export_dir, 'model_flops.txt'),
213
+ )
214
+ train_utils.write_model_params(
215
+ export_module.model, os.path.join(export_dir, 'model_params.txt')
216
+ )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: tf-models-nightly
3
- Version: 2.17.0.dev20240530
3
+ Version: 2.17.0.dev20240602
4
4
  Summary: TensorFlow Official Models
5
5
  Home-page: https://github.com/tensorflow/models
6
6
  Author: Google Inc.
@@ -3,7 +3,7 @@ official/common/__init__.py,sha256=Yqe5TjLrAR68lQ5G781dwlbbT6mELRByDipCtr4jQY4,6
3
3
  official/common/dataset_fn.py,sha256=MSRNhNGvMnZtjkuxFIZ90vGJgKlr2JeGaEYpz82Y3Ew,1858
4
4
  official/common/distribute_utils.py,sha256=FC4YgXohuUhp_-ZG5kNoerHdyOtGbRjCVdgKCKh5zyY,8572
5
5
  official/common/distribute_utils_test.py,sha256=ltR8wrIbfoX1miJElPQzZN2aioKX6Hx8-mfzoo-_WGc,4914
6
- official/common/flags.py,sha256=PT3F8-MMG0byEfu-PI5qqBUzl-Y4e2GAPDGLmYKTM-s,4286
6
+ official/common/flags.py,sha256=weXgDeMK7tZ8eY8d5LgMLRQDJYuu2hrJF-XfpOqOP34,4579
7
7
  official/common/registry_imports.py,sha256=ez-j0sOvVyYBHHGM5hNjTF0jqHUbHI0zv-4SHe_AZ4s,843
8
8
  official/common/streamz_counters.py,sha256=OtdlnI5aosH_DWP8jx2MGEpjs5AhyMztr5E5qhrC4nE,1057
9
9
  official/core/__init__.py,sha256=84W8TGHlKgmzJjruHEJ0bGA4E4R_03tEeIkUcIGOP60,1265
@@ -1128,7 +1128,7 @@ official/vision/ops/spatial_transform_ops.py,sha256=PVEJGAn0ygtsrid84vD5GgV0jsjy
1128
1128
  official/vision/ops/target_gather.py,sha256=Ir3X76yXYEVFSYX5h-yfS8SMkY37GYuypBP2C8ykggo,3965
1129
1129
  official/vision/ops/target_gather_test.py,sha256=yiTZg7w4HYh19pw9yIDppvenscp8MSBGlhDko180l08,2561
1130
1130
  official/vision/serving/__init__.py,sha256=YlRnCfAvBWlB7gn-Dz32dxVIDBCzxH8PhdM14kHxPgs,702
1131
- official/vision/serving/detection.py,sha256=uZY4m3_KqkW5dLtFroN3TQuHPUQUXhP4iY9CfpW7UwY,10765
1131
+ official/vision/serving/detection.py,sha256=33aMP4oYpWAro0YyD_PGx3i0CmztQ84PmSQTa3HFynU,11022
1132
1132
  official/vision/serving/detection_test.py,sha256=s3ueCplPII8Am7tPSWcUv9KUcAqh1AWb6X91_B6qZIM,9165
1133
1133
  official/vision/serving/export_base.py,sha256=ah8Cuh_cxpptCpRAjptvA00a-yWgd-KokLk3UBLdVt4,7363
1134
1134
  official/vision/serving/export_base_v2.py,sha256=GYIqt-xaOv4UztAKjx-acD-9i2pjftMw46DWRMy7Bsk,2741
@@ -1136,7 +1136,7 @@ official/vision/serving/export_base_v2_test.py,sha256=khuY14W9Oi8LGlv_CvMwFiNnFb
1136
1136
  official/vision/serving/export_module_factory.py,sha256=TRnqqLkGPVhfDFzsUC6dQIfjGA3Ctv7vSLiHkyOc6fg,3549
1137
1137
  official/vision/serving/export_module_factory_test.py,sha256=kDTOmPhdvFXKC57UNo4RR5asLPP5D3hW83gULt4aWlU,4605
1138
1138
  official/vision/serving/export_saved_model.py,sha256=oeP99VFTOTUcxs2H8wFZtJy2sxiIBCn6vVcEEPjAIgQ,5758
1139
- official/vision/serving/export_saved_model_lib.py,sha256=kuDsoWRvxI2sg3b0NgxJKNrMRN_PlpB4go7a6XXSYO8,8123
1139
+ official/vision/serving/export_saved_model_lib.py,sha256=KtaQc26BMHAyf6OllnyFFy-mmNcxjne8KVQqjSoMdH8,8119
1140
1140
  official/vision/serving/export_saved_model_lib_test.py,sha256=ggDN65ndXlab7cF0HefDCENDnAhJgs5C3ZCx0boZeeg,2409
1141
1141
  official/vision/serving/export_saved_model_lib_v2.py,sha256=8GQ0IrEB2r2OxsAW5tW3l076HvPSRQTwomSRhiwYddA,3710
1142
1142
  official/vision/serving/export_tfhub.py,sha256=2-LzrgbnxvdawL2v0s2RgClc6R6YidKK2vnbEaBwRSY,3500
@@ -1208,9 +1208,9 @@ tensorflow_models/tensorflow_models_test.py,sha256=nc6A9K53OGqF25xN5St8EiWvdVbda
1208
1208
  tensorflow_models/nlp/__init__.py,sha256=4tA5Pf4qaFwT-fIFOpX7x7FHJpnyJT-5UgOeFYTyMlc,807
1209
1209
  tensorflow_models/uplift/__init__.py,sha256=mqfa55gweOdpKoaQyid4A_4u7xw__FcQeSIF0k_pYmI,999
1210
1210
  tensorflow_models/vision/__init__.py,sha256=zBorY_v5xva1uI-qxhZO3Qh-Dii-Suq6wEYh6hKHDfc,833
1211
- tf_models_nightly-2.17.0.dev20240530.dist-info/AUTHORS,sha256=1dG3fXVu9jlo7bul8xuix5F5vOnczMk7_yWn4y70uw0,337
1212
- tf_models_nightly-2.17.0.dev20240530.dist-info/LICENSE,sha256=WxeBS_DejPZQabxtfMOM_xn8qoZNJDQjrT7z2wG1I4U,11512
1213
- tf_models_nightly-2.17.0.dev20240530.dist-info/METADATA,sha256=_wjDgDF_kcyFxGJsy9ZsDCkVP-9a6ScSgVC109WtmVI,1432
1214
- tf_models_nightly-2.17.0.dev20240530.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
1215
- tf_models_nightly-2.17.0.dev20240530.dist-info/top_level.txt,sha256=gum2FfO5R4cvjl2-QtP-S1aNmsvIZaFFT6VFzU0f4-g,33
1216
- tf_models_nightly-2.17.0.dev20240530.dist-info/RECORD,,
1211
+ tf_models_nightly-2.17.0.dev20240602.dist-info/AUTHORS,sha256=1dG3fXVu9jlo7bul8xuix5F5vOnczMk7_yWn4y70uw0,337
1212
+ tf_models_nightly-2.17.0.dev20240602.dist-info/LICENSE,sha256=WxeBS_DejPZQabxtfMOM_xn8qoZNJDQjrT7z2wG1I4U,11512
1213
+ tf_models_nightly-2.17.0.dev20240602.dist-info/METADATA,sha256=YuCYy7aRmeIqaJgGHKDDFARblNeYHi4D4hUfmN_Tl90,1432
1214
+ tf_models_nightly-2.17.0.dev20240602.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
1215
+ tf_models_nightly-2.17.0.dev20240602.dist-info/top_level.txt,sha256=gum2FfO5R4cvjl2-QtP-S1aNmsvIZaFFT6VFzU0f4-g,33
1216
+ tf_models_nightly-2.17.0.dev20240602.dist-info/RECORD,,