tf-models-nightly 2.19.0.dev20250112__py2.py3-none-any.whl → 2.19.0.dev20250114__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -137,6 +137,7 @@ class RetinaNetHead(hyperparams.Config):
137
137
  @dataclasses.dataclass
138
138
  class DetectionGenerator(hyperparams.Config):
139
139
  apply_nms: bool = True
140
+ decode_boxes: bool = True
140
141
  pre_nms_top_k: int = 5000
141
142
  pre_nms_score_threshold: float = 0.05
142
143
  nms_iou_threshold: float = 0.5
@@ -157,9 +157,14 @@ class Parser(parser.Parser):
157
157
  dtype=tf.uint8,
158
158
  )
159
159
  image = tf.reshape(image, (height, width, self._image_feature.num_channels))
160
- # Normalizes the image feature with mean and std values, which are divided
161
- # by 255 because an uint8 image are re-scaled automatically. Images other
162
- # than uint8 type will be wrongly normalized.
160
+ # Normalizes the image feature.
161
+ # The mean and stddev values are divided by 255 to ensure correct
162
+ # normalization, as the input `uint8` image is automatically converted to
163
+ # `float32` and rescaled to values in the range [0, 1] before the
164
+ # normalization happens (as a pre-processing step). So, we re-scale the
165
+ # mean and stddev values to the range [0, 1] beforehand.
166
+ # See `preprocess_ops.normalize_image` for details on the expected ranges
167
+ # for the image mean (`offset`) and stddev (`scale`).
163
168
  image = preprocess_ops.normalize_image(
164
169
  image,
165
170
  [mean / 255.0 for mean in self._image_feature.mean],
@@ -82,13 +82,28 @@ def normalize_image(
82
82
  ) -> tf.Tensor:
83
83
  """Normalizes the image to zero mean and unit variance.
84
84
 
85
- If the input image dtype is float, it is expected to either have values in
86
- [0, 1) and offset is MEAN_NORM, or have values in [0, 255] and offset is
87
- MEAN_RGB.
85
+ This function normalizes the input image by subtracting the `offset`
86
+ and dividing by the `scale`.
87
+
88
+ **Important Note about Input Types and Normalization:**
89
+
90
+ * **Integer Images:** If the input `image` is an integer type (e.g., `uint8`),
91
+ the provided `offset` and `scale` values should be already **normalized**
92
+ to the range [0, 1]. This is because the function converts integer images to
93
+ float32 with values in the range [0, 1] before the normalization happens.
94
+
95
+ * **Float Images:** If the input `image` is a float type (e.g., `float32`),
96
+ the `offset` and `scale` values should be in the **same range** as the
97
+ image data.
98
+ - If the image has values in [0, 1], the `offset` and `scale` should
99
+ also be in [0, 1].
100
+ - If the image has values in [0, 255], the `offset` and `scale` should
101
+ also be in [0, 255].
88
102
 
89
103
  Args:
90
- image: A tf.Tensor in either (1) float dtype with values in range [0, 1) or
91
- [0, 255], or (2) int type with values in range [0, 255].
104
+ image: A `tf.Tensor` in either:
105
+ (1) float dtype with values in range [0, 1) or [0, 255], or
106
+ (2) int type with values in range [0, 255].
92
107
  offset: A tuple of mean values to be subtracted from the image.
93
108
  scale: A tuple of normalization factors.
94
109
 
@@ -136,6 +136,14 @@ class DetectionModule(export_base.ExportModule):
136
136
 
137
137
  return detections_dict
138
138
 
139
+ def _flatten_output(self, feature_map, feature_size=4):
140
+ flatten_outputs = []
141
+ for level_output in feature_map.values():
142
+ flatten_outputs.append(
143
+ tf.reshape(level_output, (self._batch_size, -1, feature_size))
144
+ )
145
+ return tf.concat(flatten_outputs, axis=1)
146
+
139
147
  def preprocess(
140
148
  self, images: tf.Tensor
141
149
  ) -> Tuple[tf.Tensor, Mapping[str, tf.Tensor], tf.Tensor]:
@@ -271,6 +279,18 @@ class DetectionModule(export_base.ExportModule):
271
279
  final_outputs['detection_outer_boxes'] = detections[
272
280
  'detection_outer_boxes'
273
281
  ]
282
+ elif (
283
+ isinstance(self.params.task.model, configs.retinanet.RetinaNet)
284
+ and not self.params.task.model.detection_generator.decode_boxes
285
+ ):
286
+ final_outputs = {
287
+ 'raw_boxes': self._flatten_output(detections['box_outputs'], 4),
288
+ 'raw_scores': tf.sigmoid(
289
+ self._flatten_output(
290
+ detections['cls_outputs'], self.params.task.model.num_classes
291
+ )
292
+ ),
293
+ }
274
294
  else:
275
295
  # For RetinaNet model, apply export_config.
276
296
  if isinstance(self.params.task.model, configs.retinanet.RetinaNet):
@@ -38,6 +38,7 @@ class DetectionExportTest(tf.test.TestCase, parameterized.TestCase):
38
38
  normalized_coordinates=False,
39
39
  nms_version='batched',
40
40
  output_intermediate_features=False,
41
+ decode_boxes=True,
41
42
  ):
42
43
  params = exp_factory.get_exp_config(experiment_name)
43
44
  params.task.model.outer_boxes_scale = outer_boxes_scale
@@ -48,6 +49,8 @@ class DetectionExportTest(tf.test.TestCase, parameterized.TestCase):
48
49
  params.task.model.detection_generator.nms_version = nms_version
49
50
  if output_intermediate_features:
50
51
  params.task.export_config.output_intermediate_features = True
52
+ if not decode_boxes:
53
+ params.task.model.detection_generator.decode_boxes = False
51
54
  detection_module = detection.DetectionModule(
52
55
  params,
53
56
  batch_size=1,
@@ -232,6 +235,41 @@ class DetectionExportTest(tf.test.TestCase, parameterized.TestCase):
232
235
  max_values.numpy(), tf.ones_like(max_values).numpy()
233
236
  )
234
237
 
238
+ @parameterized.parameters(
239
+ 'retinanet_mobile_coco',
240
+ 'retinanet_spinenet_coco',
241
+ )
242
+ def test_export_without_decoding_boxes(
243
+ self,
244
+ experiment_name,
245
+ ):
246
+ input_type = 'tflite'
247
+ tmp_dir = self.get_temp_dir()
248
+ module = self._get_detection_module(
249
+ experiment_name,
250
+ input_type=input_type,
251
+ apply_nms=False,
252
+ decode_boxes=False,
253
+ )
254
+
255
+ self._export_from_module(module, input_type, tmp_dir)
256
+
257
+ imported = tf.saved_model.load(tmp_dir)
258
+ detection_fn = imported.signatures['serving_default']
259
+
260
+ images = self._get_dummy_input(
261
+ input_type, batch_size=1, image_size=(640, 640)
262
+ )
263
+ outputs = detection_fn(tf.constant(images))
264
+
265
+ self.assertContainsSubset(
266
+ {
267
+ 'raw_boxes',
268
+ 'raw_scores',
269
+ },
270
+ outputs.keys(),
271
+ )
272
+
235
273
 
236
274
  if __name__ == '__main__':
237
275
  tf.test.main()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: tf-models-nightly
3
- Version: 2.19.0.dev20250112
3
+ Version: 2.19.0.dev20250114
4
4
  Summary: TensorFlow Official Models
5
5
  Home-page: https://github.com/tensorflow/models
6
6
  Author: Google Inc.
@@ -1005,7 +1005,7 @@ official/vision/configs/image_classification.py,sha256=JiJUYKI82NtlNXEjATF0JzSmh
1005
1005
  official/vision/configs/image_classification_test.py,sha256=cdlkY5fPaqNZUc4-A2OOEUS1v3T-ekxqUc0Vm4IknBI,1867
1006
1006
  official/vision/configs/maskrcnn.py,sha256=yL8kggxXaCTIpSkcozAV2UudO7UqVcEh1_-rMd7oGDs,24236
1007
1007
  official/vision/configs/maskrcnn_test.py,sha256=Wfkbz30h2qxPcpuu6CEpQsf8I_2df6y10-4bRLsWlj8,1733
1008
- official/vision/configs/retinanet.py,sha256=oCKinkh4IyPslmI1pakwi6dVziwjkZ2cIcpSoGRjqnM,17806
1008
+ official/vision/configs/retinanet.py,sha256=68GPsklbUThOlA4IeRM2gRBeoRMzNVX8wWYmTSDfz34,17834
1009
1009
  official/vision/configs/retinanet_test.py,sha256=ffS3QufQMLF8FZhKNmi7Yr1RDTnIyZ1XKQ9agr2EyW8,1699
1010
1010
  official/vision/configs/semantic_segmentation.py,sha256=xJbCWl46hTlUkw_S5UeKctdjuAZPTqE6UtSqQaLezYc,30808
1011
1011
  official/vision/configs/semantic_segmentation_test.py,sha256=va-ZG6CtBKcs0NicZe6WmJvHxPxxih7nB0orNtrRiEA,1867
@@ -1030,7 +1030,7 @@ official/vision/dataloaders/input_reader_factory.py,sha256=WpvSA8qyqAo3wkmme4WqX
1030
1030
  official/vision/dataloaders/maskrcnn_input.py,sha256=6QhSnKv7Q9qlKrQ6VnWsOxKsOq55MxBs9Rr47ZStUks,16803
1031
1031
  official/vision/dataloaders/parser.py,sha256=nMXnhigMa_ascSJ2OK88xi4HdE9xvfL3G4oMrHau-t4,2315
1032
1032
  official/vision/dataloaders/retinanet_input.py,sha256=0UgHSuhjxii0IpAODqc1eW3CIfMio6IUg5c5vCthFqg,18811
1033
- official/vision/dataloaders/segmentation_input.py,sha256=Klg5KAChYZDRvqzZfyIzdPy54rTlWYZp2AotolD3WX8,12934
1033
+ official/vision/dataloaders/segmentation_input.py,sha256=PaoBn6QoONuIp-t5YChY2n_gRje-tEl5rSnfGI0b8Sc,13254
1034
1034
  official/vision/dataloaders/tf_example_decoder.py,sha256=9yCT6uSLMpmw50w7zdaRR_BXy6vIvliLZntrYAgzD18,8647
1035
1035
  official/vision/dataloaders/tf_example_decoder_test.py,sha256=iRPq780PvET64Ooa-1m5ahMp8lAObpC3l8ez8aATr1A,12610
1036
1036
  official/vision/dataloaders/tf_example_label_map_decoder.py,sha256=zquhN__lZv-S4rEo595Sveno64c-lNUxlf6-z1w7J3c,2579
@@ -1159,7 +1159,7 @@ official/vision/ops/iou_similarity_test.py,sha256=x5jlcMqMCUYC5cRgdbR0VlAW67AoXo
1159
1159
  official/vision/ops/mask_ops.py,sha256=X0hHlL0vUKl1Jt2LWbZDDulpS1CSJPSg-2VeqXb0oRM,10250
1160
1160
  official/vision/ops/mask_ops_test.py,sha256=leC0GTvdNeT0XyDnRmZwS1JSJ6NDMOpJcqNlUDpMwQI,2816
1161
1161
  official/vision/ops/nms.py,sha256=zUzJLuL-vk5feTK1MEOI49qmK8VxGVGl8GIMzN702yk,8106
1162
- official/vision/ops/preprocess_ops.py,sha256=1NcE_iFPyzREdql0CRKGobFieCA6niSJcXV3Uqw635k,42588
1162
+ official/vision/ops/preprocess_ops.py,sha256=n6sgzbXJppln8z0q-uqdIMLNMP44VAEeyP9w8xDRUy0,43301
1163
1163
  official/vision/ops/preprocess_ops_3d.py,sha256=o0ivTBK2BwaiPp_NhVG_yBc10VUySxfE7eKQkL7RNaU,16762
1164
1164
  official/vision/ops/preprocess_ops_3d_test.py,sha256=LA3-Ue4FTjdsr5Kl_BnpAMNcfikWylMisD2GrBTOLzA,9309
1165
1165
  official/vision/ops/preprocess_ops_test.py,sha256=nuNRdVL9PETUDHic-2pm7PTTtr4KN79CoDw6_M8Zw9w,17638
@@ -1168,8 +1168,8 @@ official/vision/ops/spatial_transform_ops.py,sha256=PVEJGAn0ygtsrid84vD5GgV0jsjy
1168
1168
  official/vision/ops/target_gather.py,sha256=Ir3X76yXYEVFSYX5h-yfS8SMkY37GYuypBP2C8ykggo,3965
1169
1169
  official/vision/ops/target_gather_test.py,sha256=yiTZg7w4HYh19pw9yIDppvenscp8MSBGlhDko180l08,2561
1170
1170
  official/vision/serving/__init__.py,sha256=YlRnCfAvBWlB7gn-Dz32dxVIDBCzxH8PhdM14kHxPgs,702
1171
- official/vision/serving/detection.py,sha256=33aMP4oYpWAro0YyD_PGx3i0CmztQ84PmSQTa3HFynU,11022
1172
- official/vision/serving/detection_test.py,sha256=s3ueCplPII8Am7tPSWcUv9KUcAqh1AWb6X91_B6qZIM,9165
1171
+ official/vision/serving/detection.py,sha256=VvX3s6CqLlkveGla7EoWWnR7iDn1edJLTbtxhnuSUOs,11759
1172
+ official/vision/serving/detection_test.py,sha256=TZTVfJusX8Z4ycQSmp72e0yiOVhdRGVHMW2Lm2v-mYc,10119
1173
1173
  official/vision/serving/export_base.py,sha256=ah8Cuh_cxpptCpRAjptvA00a-yWgd-KokLk3UBLdVt4,7363
1174
1174
  official/vision/serving/export_base_v2.py,sha256=GYIqt-xaOv4UztAKjx-acD-9i2pjftMw46DWRMy7Bsk,2741
1175
1175
  official/vision/serving/export_base_v2_test.py,sha256=khuY14W9Oi8LGlv_CvMwFiNnFbgpPVzvvD_hugf1_lk,2880
@@ -1248,9 +1248,9 @@ tensorflow_models/tensorflow_models_test.py,sha256=nc6A9K53OGqF25xN5St8EiWvdVbda
1248
1248
  tensorflow_models/nlp/__init__.py,sha256=4tA5Pf4qaFwT-fIFOpX7x7FHJpnyJT-5UgOeFYTyMlc,807
1249
1249
  tensorflow_models/uplift/__init__.py,sha256=mqfa55gweOdpKoaQyid4A_4u7xw__FcQeSIF0k_pYmI,999
1250
1250
  tensorflow_models/vision/__init__.py,sha256=zBorY_v5xva1uI-qxhZO3Qh-Dii-Suq6wEYh6hKHDfc,833
1251
- tf_models_nightly-2.19.0.dev20250112.dist-info/AUTHORS,sha256=1dG3fXVu9jlo7bul8xuix5F5vOnczMk7_yWn4y70uw0,337
1252
- tf_models_nightly-2.19.0.dev20250112.dist-info/LICENSE,sha256=WxeBS_DejPZQabxtfMOM_xn8qoZNJDQjrT7z2wG1I4U,11512
1253
- tf_models_nightly-2.19.0.dev20250112.dist-info/METADATA,sha256=Fl1ZTj_Bd4PBnUvs1el2XDdtdVhDsSuCrlbqqyEsvSk,1432
1254
- tf_models_nightly-2.19.0.dev20250112.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
1255
- tf_models_nightly-2.19.0.dev20250112.dist-info/top_level.txt,sha256=gum2FfO5R4cvjl2-QtP-S1aNmsvIZaFFT6VFzU0f4-g,33
1256
- tf_models_nightly-2.19.0.dev20250112.dist-info/RECORD,,
1251
+ tf_models_nightly-2.19.0.dev20250114.dist-info/AUTHORS,sha256=1dG3fXVu9jlo7bul8xuix5F5vOnczMk7_yWn4y70uw0,337
1252
+ tf_models_nightly-2.19.0.dev20250114.dist-info/LICENSE,sha256=WxeBS_DejPZQabxtfMOM_xn8qoZNJDQjrT7z2wG1I4U,11512
1253
+ tf_models_nightly-2.19.0.dev20250114.dist-info/METADATA,sha256=wqYpNROU39Q3ZstxpK1sjbBxTm_7WHFDftL4C-opllo,1432
1254
+ tf_models_nightly-2.19.0.dev20250114.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
1255
+ tf_models_nightly-2.19.0.dev20250114.dist-info/top_level.txt,sha256=gum2FfO5R4cvjl2-QtP-S1aNmsvIZaFFT6VFzU0f4-g,33
1256
+ tf_models_nightly-2.19.0.dev20250114.dist-info/RECORD,,