tf-models-nightly 2.12.0.dev20230522__py2.py3-none-any.whl → 2.12.0.dev20230524__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -13,10 +13,7 @@
13
13
  # limitations under the License.
14
14
 
15
15
  """Contains definitions of dense prediction heads."""
16
- from __future__ import annotations
17
-
18
- import copy
19
- from typing import Any, Dict, List, Mapping, Optional, Union, Type
16
+ from typing import List, Mapping, Union, Optional, Any, Dict
20
17
 
21
18
  # Import libraries
22
19
 
@@ -29,80 +26,6 @@ from official.projects.qat.vision.quantization import configs
29
26
  from official.projects.qat.vision.quantization import helper
30
27
 
31
28
 
32
- class SeparableConv2DQuantized(tf.keras.layers.Layer):
33
- """Quantized SeperableConv2D."""
34
-
35
- def __init__(self,
36
- name: Optional[str] = None,
37
- last_quantize: bool = False,
38
- **conv_kwargs):
39
- """Initializes a SeparableConv2DQuantized.
40
-
41
- Args:
42
- name: The name of the layer.
43
- last_quantize: A `bool` indicates whether add quantization for the output.
44
- **conv_kwargs: A keyword arguments to be used for conv and dwconv.
45
- """
46
-
47
- super().__init__(name=name)
48
- self._conv_kwargs = copy.deepcopy(conv_kwargs)
49
- self._name = name
50
- self._last_quantize = last_quantize
51
-
52
- def build(self, input_shape: Union[tf.TensorShape, List[tf.TensorShape]]):
53
- """Creates the child layers of the layer."""
54
- depthwise_conv2d_quantized = helper.quantize_wrapped_layer(
55
- tf.keras.layers.DepthwiseConv2D,
56
- configs.Default8BitConvQuantizeConfig(
57
- ['depthwise_kernel'], [], True))
58
- conv2d_quantized = helper.quantize_wrapped_layer(
59
- tf.keras.layers.Conv2D,
60
- configs.Default8BitConvQuantizeConfig(
61
- ['kernel'], [], self._last_quantize))
62
-
63
- dwconv_kwargs = self._conv_kwargs.copy()
64
- # Depthwise conv input filters is always equal to output filters.
65
- # This filters argument only needed for the point-wise conv2d op.
66
- del dwconv_kwargs['filters']
67
- dwconv_kwargs.update({
68
- 'activation': None,
69
- 'use_bias': False,
70
- })
71
- self.dw_conv = depthwise_conv2d_quantized(name='dw', **dwconv_kwargs)
72
-
73
- conv_kwargs = self._conv_kwargs.copy()
74
- conv_kwargs.update({
75
- 'kernel_size': (1, 1),
76
- 'strides': (1, 1),
77
- 'padding': 'valid',
78
- 'groups': 1,
79
- })
80
-
81
- self.conv = conv2d_quantized(name='pw', **conv_kwargs)
82
-
83
- def call(self, inputs: tf.Tensor) -> tf.Tensor:
84
- """Call the separable conv layer."""
85
- x = self.dw_conv(inputs)
86
- outputs = self.conv(x)
87
- return outputs
88
-
89
- def get_config(self) -> Dict[str, Any]:
90
- """Returns the config of the layer."""
91
- config = self._conv_kwargs.copy()
92
- config.update({
93
- 'name': self._name,
94
- 'last_quantize': self._last_quantize,
95
- })
96
- return config
97
-
98
- @classmethod
99
- def from_config(
100
- cls: Type[SeparableConv2DQuantized],
101
- config: Dict[str, Any]) -> SeparableConv2DQuantized:
102
- """Creates a layer from its config."""
103
- return cls(**config)
104
-
105
-
106
29
  @tf.keras.utils.register_keras_serializable(package='Vision')
107
30
  class RetinaNetHeadQuantized(tf.keras.layers.Layer):
108
31
  """Creates a RetinaNet quantized head."""
@@ -201,7 +124,7 @@ class RetinaNetHeadQuantized(tf.keras.layers.Layer):
201
124
  def build(self, input_shape: Union[tf.TensorShape, List[tf.TensorShape]]):
202
125
  """Creates the variables of the head."""
203
126
  if self._config_dict['use_separable_conv']:
204
- conv_op = SeparableConv2DQuantized
127
+ conv_op = helper.SeparableConv2DQuantized
205
128
  else:
206
129
  conv_op = helper.quantize_wrapped_layer(
207
130
  tf.keras.layers.Conv2D,
@@ -13,7 +13,11 @@
13
13
  # limitations under the License.
14
14
 
15
15
  """Quantization helpers."""
16
- from typing import Any, Dict
16
+
17
+ from __future__ import annotations
18
+
19
+ import copy
20
+ from typing import Any, Dict, List, Optional, Type, Union
17
21
 
18
22
  import tensorflow as tf
19
23
 
@@ -22,20 +26,42 @@ from official.projects.qat.vision.quantization import configs
22
26
 
23
27
 
24
28
  _QUANTIZATION_WEIGHT_NAMES = [
25
- 'output_max', 'output_min', 'optimizer_step', 'kernel_min', 'kernel_max',
26
- 'add_three_min', 'add_three_max', 'divide_six_min', 'divide_six_max',
27
- 'depthwise_kernel_min', 'depthwise_kernel_max',
28
- 'reduce_mean_quantizer_vars_min', 'reduce_mean_quantizer_vars_max',
29
- 'quantize_layer_min', 'quantize_layer_max',
30
- 'quantize_layer_1_min', 'quantize_layer_1_max',
31
- 'quantize_layer_2_min', 'quantize_layer_2_max',
32
- 'quantize_layer_3_min', 'quantize_layer_3_max',
33
- 'post_activation_min', 'post_activation_max',
29
+ 'output_max',
30
+ 'output_min',
31
+ 'optimizer_step',
32
+ 'kernel_min',
33
+ 'kernel_max',
34
+ 'add_three_min',
35
+ 'add_three_max',
36
+ 'divide_six_min',
37
+ 'divide_six_max',
38
+ 'depthwise_kernel_min',
39
+ 'depthwise_kernel_max',
40
+ 'pointwise_kernel_min',
41
+ 'pointwise_kernel_max',
42
+ 'reduce_mean_quantizer_vars_min',
43
+ 'reduce_mean_quantizer_vars_max',
44
+ 'quantize_layer_min',
45
+ 'quantize_layer_max',
46
+ 'quantize_layer_1_min',
47
+ 'quantize_layer_1_max',
48
+ 'quantize_layer_2_min',
49
+ 'quantize_layer_2_max',
50
+ 'quantize_layer_3_min',
51
+ 'quantize_layer_3_max',
52
+ 'post_activation_min',
53
+ 'post_activation_max',
34
54
  ]
35
55
 
36
56
  _ORIGINAL_WEIGHT_NAME = [
37
- 'kernel', 'depthwise_kernel', 'gamma', 'beta', 'moving_mean',
38
- 'moving_variance', 'bias'
57
+ 'kernel',
58
+ 'depthwise_kernel',
59
+ 'pointwise_kernel',
60
+ 'gamma',
61
+ 'beta',
62
+ 'moving_mean',
63
+ 'moving_variance',
64
+ 'bias',
39
65
  ]
40
66
 
41
67
 
@@ -141,6 +167,84 @@ def norm_by_activation(activation, norm_quantized, norm_no_quantized):
141
167
  return norm_no_quantized
142
168
 
143
169
 
170
+ class SeparableConv2DQuantized(tf.keras.layers.Layer):
171
+ """Quantized SeperableConv2D."""
172
+
173
+ def __init__(
174
+ self,
175
+ name: Optional[str] = None,
176
+ last_quantize: bool = False,
177
+ **conv_kwargs,
178
+ ):
179
+ """Initializes a SeparableConv2DQuantized.
180
+
181
+ Args:
182
+ name: The name of the layer.
183
+ last_quantize: A `bool` indicates whether add quantization for the output.
184
+ **conv_kwargs: A keyword arguments to be used for conv and dwconv.
185
+ """
186
+
187
+ super().__init__(name=name)
188
+ self._conv_kwargs = copy.deepcopy(conv_kwargs)
189
+ self._name = name
190
+ self._last_quantize = last_quantize
191
+
192
+ def build(self, input_shape: Union[tf.TensorShape, List[tf.TensorShape]]):
193
+ """Creates the child layers of the layer."""
194
+ depthwise_conv2d_quantized = quantize_wrapped_layer(
195
+ tf.keras.layers.DepthwiseConv2D,
196
+ configs.Default8BitConvQuantizeConfig(['depthwise_kernel'], [], True),
197
+ )
198
+ conv2d_quantized = quantize_wrapped_layer(
199
+ tf.keras.layers.Conv2D,
200
+ configs.Default8BitConvQuantizeConfig(
201
+ ['kernel'], [], self._last_quantize
202
+ ),
203
+ )
204
+
205
+ dwconv_kwargs = self._conv_kwargs.copy()
206
+ # Depthwise conv input filters is always equal to output filters.
207
+ # This filters argument only needed for the point-wise conv2d op.
208
+ del dwconv_kwargs['filters']
209
+ dwconv_kwargs.update({
210
+ 'activation': None,
211
+ 'use_bias': False,
212
+ })
213
+ self.dw_conv = depthwise_conv2d_quantized(name='dw', **dwconv_kwargs)
214
+
215
+ conv_kwargs = self._conv_kwargs.copy()
216
+ conv_kwargs.update({
217
+ 'kernel_size': (1, 1),
218
+ 'strides': (1, 1),
219
+ 'padding': 'valid',
220
+ 'groups': 1,
221
+ })
222
+
223
+ self.conv = conv2d_quantized(name='pw', **conv_kwargs)
224
+
225
+ def call(self, inputs: tf.Tensor) -> tf.Tensor:
226
+ """Call the separable conv layer."""
227
+ x = self.dw_conv(inputs)
228
+ outputs = self.conv(x)
229
+ return outputs
230
+
231
+ def get_config(self) -> Dict[str, Any]:
232
+ """Returns the config of the layer."""
233
+ config = self._conv_kwargs.copy()
234
+ config.update({
235
+ 'name': self._name,
236
+ 'last_quantize': self._last_quantize,
237
+ })
238
+ return config
239
+
240
+ @classmethod
241
+ def from_config(
242
+ cls: Type[SeparableConv2DQuantized], config: Dict[str, Any]
243
+ ) -> SeparableConv2DQuantized:
244
+ """Creates a layer from its config."""
245
+ return cls(**config)
246
+
247
+
144
248
  Conv2DQuantized = quantize_wrapped_layer(
145
249
  tf.keras.layers.Conv2D,
146
250
  configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'], False))
@@ -82,7 +82,10 @@ class DarknetTest(parameterized.TestCase, tf.test.TestCase):
82
82
 
83
83
  with strategy.scope():
84
84
  network = darknet.Darknet(
85
- model_id='darknet53', min_size=3, max_size=5, use_sync_bn=use_sync_bn
85
+ model_id='darknet53',
86
+ min_level=3,
87
+ max_level=5,
88
+ use_sync_bn=use_sync_bn,
86
89
  )
87
90
  _ = network(inputs)
88
91
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: tf-models-nightly
3
- Version: 2.12.0.dev20230522
3
+ Version: 2.12.0.dev20230524
4
4
  Summary: TensorFlow Official Models
5
5
  Home-page: https://github.com/tensorflow/models
6
6
  Author: Google Inc.
@@ -637,7 +637,7 @@ official/projects/qat/vision/modeling/factory.py,sha256=R3pujxDYJXKN7FGOP2ow5X94
637
637
  official/projects/qat/vision/modeling/factory_test.py,sha256=inWex80tdR56kNQtsEpPEIUTEYH7xElcH6Rsr2xyGrI,9737
638
638
  official/projects/qat/vision/modeling/segmentation_model.py,sha256=00zq2AWwjzTJ_9wp8TGc8PY-0tHFwYTK4s9SPUsgHRY,3077
639
639
  official/projects/qat/vision/modeling/heads/__init__.py,sha256=tjLYHwSnUDvWNhlwGa5XXF8jzKY0_5R5DxVgEDbzz4U,743
640
- official/projects/qat/vision/modeling/heads/dense_prediction_heads.py,sha256=zKZ7DGyKSyxaTxzZ1ofuPqrBobwAGh4HX_5Us1d0_dU,18206
640
+ official/projects/qat/vision/modeling/heads/dense_prediction_heads.py,sha256=cYAGkcG3q6IqrC8gFIaNIQ8LrxhOGs8dHOsxd5XkE-U,15779
641
641
  official/projects/qat/vision/modeling/heads/dense_prediction_heads_test.py,sha256=W2EUV9h46Rtx72w6lWzmtsUM_thgsUkAOWO0SX4L3F4,3980
642
642
  official/projects/qat/vision/modeling/layers/__init__.py,sha256=Xgv0erxZbpq2F_NSo4xwAhM4v6XEzzO522CAsP9VIRw,991
643
643
  official/projects/qat/vision/modeling/layers/nn_blocks.py,sha256=Gznf3bUDunLUmGD_9dXl-ylvwbVyOA01sBaFT1oBhYc,30118
@@ -654,7 +654,7 @@ official/projects/qat/vision/n_bit/schemes.py,sha256=-879p8-PBmBdBuSegkfqGnNmVhn
654
654
  official/projects/qat/vision/quantization/__init__.py,sha256=mXv8p6ih9kX5K-JXs9M30OtFK7oYcZ-pfC5OKg6nitY,643
655
655
  official/projects/qat/vision/quantization/configs.py,sha256=TJlF55rQ8j3WOIXK1qds0nZZPuG7fSwJA9319IQi4Ug,11684
656
656
  official/projects/qat/vision/quantization/configs_test.py,sha256=BybuVWfy1hHPWl6HYKLz6U_m_h40Z82A0HBAOq1sKAk,6797
657
- official/projects/qat/vision/quantization/helper.py,sha256=xQFwIV3bLRTXZFT-h8kSwJhsRvhi0eY50sHWQzT8sII,7199
657
+ official/projects/qat/vision/quantization/helper.py,sha256=nvq6EVDEgwBoeYe6p5mXy_lrccpzOsS0uKXYHgposaA,9789
658
658
  official/projects/qat/vision/quantization/helper_test.py,sha256=0308fCkPFyGdt1FK2bEvngptzWCdb2g5pQCf0sg15LM,1928
659
659
  official/projects/qat/vision/quantization/layer_transforms.py,sha256=vr9C73HDPMEh6gmpPaBopGFKu_cDW3QUEg6mzgf4VYY,5057
660
660
  official/projects/qat/vision/quantization/schemes.py,sha256=v8NyDpCUKelzKvBjjyHjlUbzLZfHkA2LJPufXb3UXNM,3089
@@ -743,7 +743,7 @@ official/projects/yolo/modeling/yolo_model.py,sha256=qZsrAaH3F9o2d94Jdbldou-XF_0
743
743
  official/projects/yolo/modeling/yolov7_model.py,sha256=4uxBffJZ7J29dsUUCsP2_xhLZTVfsJV48j5Hc63H1Po,3205
744
744
  official/projects/yolo/modeling/backbones/__init__.py,sha256=1ToRMjre4mErL4Ek4_dMVxMjXNPossNXggV8fqbISao,609
745
745
  official/projects/yolo/modeling/backbones/darknet.py,sha256=uj7iQkEsVM1lgYYTdsh0s8HXRIYUcCARzzPqC5-s-fo,22015
746
- official/projects/yolo/modeling/backbones/darknet_test.py,sha256=AoaAofPCyT2uDHvACTwsNuaZv_0t2x6SI3k-08zZNlg,4725
746
+ official/projects/yolo/modeling/backbones/darknet_test.py,sha256=dCynaTj8OE0D191EKnA4qEk3Qvu5MroPtcK-iZgwymk,4758
747
747
  official/projects/yolo/modeling/backbones/yolov7.py,sha256=PCuJAeLachDFfewKBb0kmh9Fy9QwzJAykYUK0zSmnhs,12875
748
748
  official/projects/yolo/modeling/backbones/yolov7_test.py,sha256=kutqiGaxf_prQglxSaihhdYDer8ZzQRHMl7w7z76znc,3049
749
749
  official/projects/yolo/modeling/decoders/__init__.py,sha256=1ToRMjre4mErL4Ek4_dMVxMjXNPossNXggV8fqbISao,609
@@ -1102,9 +1102,9 @@ tensorflow_models/__init__.py,sha256=Ciz_YBke6teb6y42QyQTUBDdXJAiV7Qdu1zOoZvYiKw
1102
1102
  tensorflow_models/tensorflow_models_test.py,sha256=Kz2y4V-rtBhZFFfKD2soCq52hviSfJVV1L2ztqS-9oM,1385
1103
1103
  tensorflow_models/nlp/__init__.py,sha256=3dULDpUBpDi9vljpXadq6oJrWH4y6z42Bz2d3hopYZw,807
1104
1104
  tensorflow_models/vision/__init__.py,sha256=4y77XkHaH8qLls3-6ta4tMp3Xj8CLbB0ihH91HsQ9z4,833
1105
- tf_models_nightly-2.12.0.dev20230522.dist-info/AUTHORS,sha256=1dG3fXVu9jlo7bul8xuix5F5vOnczMk7_yWn4y70uw0,337
1106
- tf_models_nightly-2.12.0.dev20230522.dist-info/LICENSE,sha256=WxeBS_DejPZQabxtfMOM_xn8qoZNJDQjrT7z2wG1I4U,11512
1107
- tf_models_nightly-2.12.0.dev20230522.dist-info/METADATA,sha256=E1Nu7QMA-rikmDz9e1a7FeacwUGNGfGQ4B27Z_aqhaU,1393
1108
- tf_models_nightly-2.12.0.dev20230522.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
1109
- tf_models_nightly-2.12.0.dev20230522.dist-info/top_level.txt,sha256=gum2FfO5R4cvjl2-QtP-S1aNmsvIZaFFT6VFzU0f4-g,33
1110
- tf_models_nightly-2.12.0.dev20230522.dist-info/RECORD,,
1105
+ tf_models_nightly-2.12.0.dev20230524.dist-info/AUTHORS,sha256=1dG3fXVu9jlo7bul8xuix5F5vOnczMk7_yWn4y70uw0,337
1106
+ tf_models_nightly-2.12.0.dev20230524.dist-info/LICENSE,sha256=WxeBS_DejPZQabxtfMOM_xn8qoZNJDQjrT7z2wG1I4U,11512
1107
+ tf_models_nightly-2.12.0.dev20230524.dist-info/METADATA,sha256=-S1EbFFT1XhMFrxG6OG7NJJ3KVsCEqH1A0uu-crodxc,1393
1108
+ tf_models_nightly-2.12.0.dev20230524.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
1109
+ tf_models_nightly-2.12.0.dev20230524.dist-info/top_level.txt,sha256=gum2FfO5R4cvjl2-QtP-S1aNmsvIZaFFT6VFzU0f4-g,33
1110
+ tf_models_nightly-2.12.0.dev20230524.dist-info/RECORD,,