onnx2tf 1.29.15__py3-none-any.whl → 1.29.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -11,6 +11,47 @@ from onnx2tf.utils.common_functions import (
11
11
  make_tf_node_info,
12
12
  )
13
13
 
14
+ def _get_qmin_qmax(dtype: tf.dtypes.DType):
15
+ if dtype == tf.uint8:
16
+ return 0.0, 255.0
17
+ if dtype == tf.int8:
18
+ return -128.0, 127.0
19
+ if dtype == tf.uint16:
20
+ return 0.0, 65535.0
21
+ if dtype == tf.int16:
22
+ return -32768.0, 32767.0
23
+ return None, None
24
+
25
+
26
+ def _reshape_for_axis(
27
+ *,
28
+ value,
29
+ input_tensor,
30
+ axis: int,
31
+ ):
32
+ value_rank = len(value.shape)
33
+ input_rank = len(input_tensor.shape)
34
+ if value_rank == 1 and input_rank is not None:
35
+ shape = [1] * input_rank
36
+ shape[axis] = -1
37
+ return tf.reshape(value, shape)
38
+ return value
39
+
40
+
41
+ def _reshape_for_output(
42
+ *,
43
+ value,
44
+ output_tensor,
45
+ ):
46
+ value_rank = len(value.shape)
47
+ output_rank = len(output_tensor.shape)
48
+ if value_rank == 1 and output_rank is not None and output_rank >= 2:
49
+ if output_tensor.shape[-2] == value.shape[0]:
50
+ shape = [1] * output_rank
51
+ shape[-2] = -1
52
+ return tf.reshape(value, shape)
53
+ return value
54
+
14
55
 
15
56
  @print_node_info
16
57
  @inverted_operation_enable_disable
@@ -76,12 +117,18 @@ def make_node(
76
117
 
77
118
  a = tf_layers_dict[graph_node_input_1.name]['tf_node'] \
78
119
  if isinstance(graph_node_input_1, gs.Variable) else graph_node_input_1
120
+ a_is_dequantized = False
121
+ if isinstance(graph_node_input_1, gs.Variable):
122
+ a_is_dequantized = tf_layers_dict.get(graph_node_input_1.name, {}).get('is_dequantized', False)
79
123
  a_scale = tf_layers_dict[graph_node_input_2.name]['tf_node'] \
80
124
  if isinstance(graph_node_input_2, gs.Variable) else graph_node_input_2
81
125
  a_zero_point = tf_layers_dict[graph_node_input_3.name]['tf_node'] \
82
126
  if isinstance(graph_node_input_3, gs.Variable) else graph_node_input_3
83
127
  b = tf_layers_dict[graph_node_input_4.name]['tf_node'] \
84
128
  if isinstance(graph_node_input_4, gs.Variable) else graph_node_input_4
129
+ b_is_dequantized = False
130
+ if isinstance(graph_node_input_4, gs.Variable):
131
+ b_is_dequantized = tf_layers_dict.get(graph_node_input_4.name, {}).get('is_dequantized', False)
85
132
  b_scale = tf_layers_dict[graph_node_input_5.name]['tf_node'] \
86
133
  if isinstance(graph_node_input_5, gs.Variable) else graph_node_input_5
87
134
  b_zero_point = tf_layers_dict[graph_node_input_6.name]['tf_node'] \
@@ -90,50 +137,60 @@ def make_node(
90
137
  if isinstance(graph_node_input_7, gs.Variable) else graph_node_input_7
91
138
  y_zero_point = tf_layers_dict[graph_node_input_8.name]['tf_node'] \
92
139
  if isinstance(graph_node_input_8, gs.Variable) else graph_node_input_8
93
- y_dtype = y_zero_point.dtype if y_zero_point.dtype not in [tf.int8, tf.uint8] else tf.float32
140
+ y_dtype = y_zero_point.dtype
94
141
 
95
142
  # Preserving Graph Structure (Dict)
96
143
  tf_layers_dict[graph_node_output.name] = {
97
144
  'optype': graph_node.op,
98
145
  'shape': shape,
99
146
  'dtype': dtype,
147
+ 'is_dequantized': True,
100
148
  }
101
149
 
102
150
  # Generation of TF OP
103
151
 
104
- # reshape 1-D a_scale, a_zero_point, y_scale and
105
- # y_zero_point so it can broadcast in arithmetic
106
- # operations later
107
- a_scale_shape = a_scale.shape
108
- if a_scale_shape and a_scale_shape[0] > 1:
109
- a_scale = tf.reshape(a_scale, [a_scale_shape[0], 1])
110
- a_zero_point = tf.reshape(a_zero_point, [a_scale_shape[0], 1])
111
- y_scale_shape = y_scale.shape
112
- if y_scale_shape and y_scale_shape[0] > 1:
113
- y_scale = tf.reshape(y_scale, [y_scale_shape[0], 1])
114
- y_zero_point = tf.reshape(y_zero_point, [y_scale_shape[0], 1])
152
+ # reshape a_scale and a_zero_point to broadcast on row axis (second last)
153
+ a_scale = _reshape_for_axis(value=a_scale, input_tensor=a, axis=-2)
154
+ a_zero_point = _reshape_for_axis(value=a_zero_point, input_tensor=a, axis=-2)
155
+ # reshape b_scale and b_zero_point to broadcast on column axis (last)
156
+ b_scale = _reshape_for_axis(value=b_scale, input_tensor=b, axis=-1)
157
+ b_zero_point = _reshape_for_axis(value=b_zero_point, input_tensor=b, axis=-1)
115
158
 
116
159
  # cast all inputs to float32
117
160
  a = tf.cast(a, tf.float32)
161
+ a_scale = tf.cast(a_scale, tf.float32)
118
162
  a_zero_point = tf.cast(a_zero_point, tf.float32)
119
163
  b = tf.cast(b, tf.float32)
164
+ b_scale = tf.cast(b_scale, tf.float32)
120
165
  b_zero_point = tf.cast(b_zero_point, tf.float32)
166
+ y_scale = tf.cast(y_scale, tf.float32)
121
167
  y_zero_point = tf.cast(y_zero_point, tf.float32)
122
168
 
123
169
  # dequantize a and b
124
- dequantized_a = tf.subtract(a, a_zero_point)
125
- dequantized_a = tf.multiply(dequantized_a, a_scale)
126
- dequantized_b = tf.subtract(b, b_zero_point)
127
- dequantized_b = tf.multiply(dequantized_b, b_scale)
170
+ if a_is_dequantized:
171
+ dequantized_a = tf.cast(a, tf.float32)
172
+ else:
173
+ dequantized_a = tf.multiply(tf.subtract(a, a_zero_point), a_scale)
174
+
175
+ if b_is_dequantized:
176
+ dequantized_b = tf.cast(b, tf.float32)
177
+ else:
178
+ dequantized_b = tf.multiply(tf.subtract(b, b_zero_point), b_scale)
128
179
 
129
180
  # matmul
130
181
  x = tf.matmul(dequantized_a, dequantized_b)
131
182
 
132
- # quantize x
133
- y = tf.divide(x, y_scale)
134
- y = tf.round(y)
183
+ # broadcast output scale/zero_point if needed
184
+ y_scale = _reshape_for_output(value=y_scale, output_tensor=x)
185
+ y_zero_point = _reshape_for_output(value=y_zero_point, output_tensor=x)
186
+
187
+ # quantize then dequantize to float32
188
+ y = tf.round(tf.divide(x, y_scale))
135
189
  y = tf.add(y, y_zero_point)
136
- y = tf.saturate_cast(y, y_dtype)
190
+ qmin, qmax = _get_qmin_qmax(y_dtype)
191
+ if qmin is not None and qmax is not None:
192
+ y = tf.clip_by_value(y, qmin, qmax)
193
+ y = tf.multiply(tf.subtract(y, y_zero_point), y_scale)
137
194
 
138
195
  tf_layers_dict[graph_node_output.name]['tf_node'] = y
139
196
 
@@ -11,6 +11,49 @@ from onnx2tf.utils.common_functions import (
11
11
  make_tf_node_info,
12
12
  convert_axis,
13
13
  )
14
+ from onnx2tf.utils.enums import ONNX_DTYPES_TO_TF_DTYPES
15
+
16
+
17
+ def _get_qmin_qmax(dtype: tf.dtypes.DType):
18
+ if dtype == tf.uint8:
19
+ return 0.0, 255.0
20
+ if dtype == tf.int8:
21
+ return -128.0, 127.0
22
+ if dtype == tf.uint16:
23
+ return 0.0, 65535.0
24
+ if dtype == tf.int16:
25
+ return -32768.0, 32767.0
26
+ return None, None
27
+
28
+
29
+ def _expand_scale_or_zero_point(
30
+ *,
31
+ value,
32
+ input_tensor,
33
+ axis: int,
34
+ block_size: int,
35
+ ):
36
+ value_rank = len(value.shape)
37
+ input_rank = len(input_tensor.shape)
38
+
39
+ if value_rank == 0:
40
+ return value
41
+
42
+ if block_size > 0 and value_rank == input_rank:
43
+ if value.shape[axis] is None \
44
+ or input_tensor.shape[axis] is None \
45
+ or value.shape[axis] != input_tensor.shape[axis]:
46
+ expanded = tf.repeat(value, repeats=block_size, axis=axis)
47
+ expanded = tf.slice(expanded, [0] * input_rank, tf.shape(input_tensor))
48
+ return expanded
49
+ return value
50
+
51
+ if value_rank == 1 and input_rank is not None:
52
+ shape = [1] * input_rank
53
+ shape[axis] = -1
54
+ return tf.reshape(value, shape)
55
+
56
+ return value
14
57
 
15
58
 
16
59
  @print_node_info
@@ -60,12 +103,12 @@ def make_node(
60
103
 
61
104
  input_tensor = tf_layers_dict[graph_node_input_1.name]['tf_node'] \
62
105
  if isinstance(graph_node_input_1, gs.Variable) else graph_node_input_1
63
- input_tensor_shape = input_tensor.shape
64
- input_tensor_rank = len(input_tensor_shape)
106
+ input_nhwc = False
107
+ if isinstance(graph_node_input_1, gs.Variable):
108
+ input_nhwc = tf_layers_dict.get(graph_node_input_1.name, {}).get('nhwc', False)
109
+ input_tensor_rank = len(input_tensor.shape)
65
110
  y_scale = tf_layers_dict[graph_node_input_2.name]['tf_node'] \
66
111
  if isinstance(graph_node_input_2, gs.Variable) else graph_node_input_2
67
- y_scale_shape = y_scale.shape
68
- y_scale_rank = len(y_scale_shape)
69
112
  y_zero_point = tf_layers_dict[graph_node_input_3.name]['tf_node'] \
70
113
  if isinstance(graph_node_input_3, gs.Variable) else graph_node_input_3
71
114
 
@@ -81,6 +124,8 @@ def make_node(
81
124
  'optype': graph_node.op,
82
125
  'shape': shape,
83
126
  'dtype': dtype,
127
+ 'is_dequantized': True,
128
+ 'nhwc': input_nhwc,
84
129
  }
85
130
 
86
131
  # Generation of TF OP
@@ -88,51 +133,79 @@ def make_node(
88
133
  x=input_tensor,
89
134
  dtype=tf.float32,
90
135
  )
91
- x_shape = input_tensor_shape
92
- x_rank = input_tensor_rank
93
- y_scale_shape = y_scale_shape
94
-
95
- # Reshape process is needed for per-axis quantization
96
- # when scale is a 1-D tensor
97
- if y_scale_rank == 1:
98
- shape_broadcast = list(
99
- [1 for _ in range(axis)] \
100
- + [x_shape[axis]] \
101
- + [1 for _ in range(axis + 1, x_rank)]
102
- )
103
- y_scale = tf.reshape(
104
- tensor=y_scale,
105
- shape=shape_broadcast,
106
- )
107
- y = tf.divide(
108
- x=input_tensor,
109
- y=y_scale,
136
+
137
+ # If QuantizeLinear is immediately followed by Cast -> DequantizeLinear
138
+ # or DequantizeLinear only, bypass fake-quant to avoid generating
139
+ # Mul/Round/Min/Relu/Mul chains in TF/TFLite.
140
+ bypass_fake_quant = False
141
+ if graph_node.outputs and len(graph_node.outputs) > 0:
142
+ consumers = graph_node.outputs[0].outputs
143
+ if consumers:
144
+ bypass_fake_quant = True
145
+ for consumer in consumers:
146
+ if consumer.op == 'DequantizeLinear':
147
+ continue
148
+ if consumer.op == 'Cast':
149
+ cast_outs = consumer.outputs[0].outputs if consumer.outputs else []
150
+ if not cast_outs or any(grand.op != 'DequantizeLinear' for grand in cast_outs):
151
+ bypass_fake_quant = False
152
+ break
153
+ else:
154
+ bypass_fake_quant = False
155
+ break
156
+
157
+ if bypass_fake_quant:
158
+ tf_layers_dict[graph_node_output.name]['tf_node'] = input_tensor
159
+ tf_layers_dict[graph_node_output.name]['tf_node_info'] = \
160
+ make_tf_node_info(
161
+ node_info={
162
+ 'tf_op_type': 'QuantizeLinear',
163
+ 'tf_inputs': {
164
+ 'x': input_tensor,
165
+ },
166
+ 'tf_outputs': {
167
+ 'output': tf_layers_dict[graph_node_output.name]['tf_node'],
168
+ },
169
+ }
170
+ )
171
+ return
172
+ y_scale = tf.cast(y_scale, tf.float32)
173
+
174
+ block_size = int(graph_node.attrs.get('block_size', 0))
175
+ y_scale = _expand_scale_or_zero_point(
176
+ value=y_scale,
177
+ input_tensor=input_tensor,
178
+ axis=axis,
179
+ block_size=block_size,
110
180
  )
111
- y = tf.round(y)
112
181
 
113
- if y_zero_point is not None:
114
- y_dtype = y_zero_point.dtype if y_zero_point.dtype not in [tf.int8, tf.uint8] else tf.float32
115
- y_zero_point = tf.cast(
116
- x=y_zero_point,
117
- dtype=tf.float32,
118
- )
119
- y_zero_point = tf.reshape(
120
- tensor=y_zero_point,
121
- shape=shape_broadcast,
122
- ) if y_scale_rank == 1 else y_zero_point
123
- y = tf.add(
124
- x=y,
125
- y=y_zero_point,
182
+ output_dtype_attr = int(graph_node.attrs.get('output_dtype', 0))
183
+ if y_zero_point is None:
184
+ output_dtype = ONNX_DTYPES_TO_TF_DTYPES.get(output_dtype_attr, tf.uint8) \
185
+ if output_dtype_attr != 0 else tf.uint8
186
+ y_zero_point = tf.zeros_like(y_scale)
187
+ else:
188
+ output_dtype = y_zero_point.dtype
189
+ y_zero_point = tf.cast(y_zero_point, tf.float32)
190
+ y_zero_point = _expand_scale_or_zero_point(
191
+ value=y_zero_point,
192
+ input_tensor=input_tensor,
193
+ axis=axis,
194
+ block_size=block_size,
126
195
  )
127
- else: # y_zero_point default dtype = uint8
128
- y_dtype = tf.uint8
129
196
 
130
- # Generation of TF OP
197
+ y = tf.round(tf.divide(input_tensor, y_scale))
198
+ y = tf.add(y, y_zero_point)
199
+
200
+ qmin, qmax = _get_qmin_qmax(output_dtype)
201
+ if qmin is not None and qmax is not None:
202
+ y = tf.clip_by_value(y, qmin, qmax)
203
+
204
+ # dequantize to float32 output
131
205
  tf_layers_dict[graph_node_output.name]['tf_node'] = \
132
- tf.saturate_cast(
133
- value=y,
134
- dtype=y_dtype,
135
- name=graph_node.name,
206
+ tf.multiply(
207
+ x=tf.subtract(y, y_zero_point),
208
+ y=y_scale,
136
209
  )
137
210
 
138
211
  # Generation of Debug Info
onnx2tf/ops/Split.py CHANGED
@@ -124,6 +124,32 @@ def make_node(
124
124
  **kwargs,
125
125
  )
126
126
 
127
+ def _infer_split_axis_runtime(input_tensor, sum_split, fallback_axis):
128
+ if sum_split is None:
129
+ return tf.cast(fallback_axis, tf.int32)
130
+ shape = tf.shape(input_tensor)
131
+ eq = tf.equal(shape, tf.cast(sum_split, tf.int32))
132
+ mask = tf.cast(eq, tf.int32)
133
+ count = tf.reduce_sum(mask)
134
+ axis_from = tf.argmax(mask, axis=0, output_type=tf.int32)
135
+ fallback_axis_tensor = tf.cast(fallback_axis, tf.int32)
136
+ is_single = tf.cast(tf.equal(count, 1), tf.int32)
137
+ return axis_from * is_single + fallback_axis_tensor * (1 - is_single)
138
+
139
+ axis_for_split = axis
140
+ sum_split = None
141
+ split_list = None
142
+ if isinstance(split, np.ndarray):
143
+ split_list = list(split)
144
+ elif isinstance(split, (list, tuple)):
145
+ split_list = list(split)
146
+ if split_list is not None and len(split_list) > 1:
147
+ if len(split_list) == sum([1 for dim in split_list if isinstance(dim, (np.int64, int))]):
148
+ sum_split = int(np.sum(split_list))
149
+ axis_dim = input_tensor_shape[axis] if axis < len(input_tensor_shape) else None
150
+ if axis_dim is None or (isinstance(axis_dim, int) and axis_dim != sum_split):
151
+ axis_for_split = _infer_split_axis_runtime(input_tensor, sum_split, axis)
152
+
127
153
  # Generation of TF OP
128
154
  splited_tensors = None
129
155
  if (
@@ -225,18 +251,17 @@ def make_node(
225
251
  num=None,
226
252
  name=graph_node.name,
227
253
  )
228
- elif isinstance(split, np.ndarray) \
254
+ elif isinstance(split, (list, tuple, np.ndarray)) \
229
255
  and len(list(split)) > 1 \
230
- and np.prod(split) != 1 \
231
- and isinstance(input_tensor_shape[axis], int) \
232
- and len(split) == sum([1 for dim in split if isinstance(dim, np.int64) or isinstance(dim, int)]) \
233
- and len(split) != sum([1 for dim in split if split[0] == dim]) \
234
- and np.sum(split) == input_tensor_shape[axis]:
256
+ and (np.prod(split) != 1 if isinstance(split, np.ndarray) else True) \
257
+ and len(list(split)) == sum([1 for dim in list(split) if isinstance(dim, (np.int64, int))]) \
258
+ and len(list(split)) != sum([1 for dim in list(split) if list(split)[0] == dim]) \
259
+ and (not isinstance(input_tensor_shape[axis], int) or np.sum(list(split)) == input_tensor_shape[axis]):
235
260
  # Suppression of FlexSplitV generation
236
261
  # SplitV -> Strided_Slice
237
262
  splited_tensors = []
238
263
  begin_stock = []
239
- for split_idx, split_dim in enumerate(split):
264
+ for split_idx, split_dim in enumerate(list(split)):
240
265
  begin_ = []
241
266
  end_ = []
242
267
  begin_mask_ = 0
@@ -269,7 +294,7 @@ def make_node(
269
294
  tf.split(
270
295
  value=input_tensor,
271
296
  num_or_size_splits=split,
272
- axis=axis,
297
+ axis=axis_for_split,
273
298
  num=num_outputs,
274
299
  name=graph_node.name,
275
300
  )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: onnx2tf
3
- Version: 1.29.15
3
+ Version: 1.29.16
4
4
  Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
5
5
  Keywords: onnx,tensorflow,tflite,keras,deep-learning,machine-learning
6
6
  Author: Katsuya Hyodo
@@ -364,7 +364,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
364
364
  docker run --rm -it \
365
365
  -v `pwd`:/workdir \
366
366
  -w /workdir \
367
- ghcr.io/pinto0309/onnx2tf:1.29.15
367
+ ghcr.io/pinto0309/onnx2tf:1.29.16
368
368
 
369
369
  or
370
370
 
@@ -372,7 +372,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
372
372
  docker run --rm -it \
373
373
  -v `pwd`:/workdir \
374
374
  -w /workdir \
375
- docker.io/pinto0309/onnx2tf:1.29.15
375
+ docker.io/pinto0309/onnx2tf:1.29.16
376
376
 
377
377
  or
378
378
 
@@ -1,6 +1,6 @@
1
- onnx2tf/__init__.py,sha256=OazM0la-RBJsi-HQv3oIVGCRDNETzahP6Tlad2NZV5k,67
1
+ onnx2tf/__init__.py,sha256=eUTDHQ-QolqzjE7OAU5EnMP1epQf_1sc4vD2dRt8Bj4,67
2
2
  onnx2tf/__main__.py,sha256=2RSCQ7d4lc6CwD-rlGn9UicPFg-P5du7ZD_yh-kuBEU,57
3
- onnx2tf/onnx2tf.py,sha256=O3B_ME8omswggw4xtjxxnC8_uaPHH3Ly8dwSv7w75no,157060
3
+ onnx2tf/onnx2tf.py,sha256=y8FewjpNYAFnUs0cjq6JzdYkiXQSm1o_sZ3PXLJzK64,161921
4
4
  onnx2tf/ops/Abs.py,sha256=V7btmCG_ZvK_qJovUsguq0ZMJ349mhNQ4FHSgzP_Yuo,4029
5
5
  onnx2tf/ops/Acos.py,sha256=Fo8YkFKuWq8Fi2xUrBdKcAH1yJ8r5pjSD0wgLttTNdk,4003
6
6
  onnx2tf/ops/Acosh.py,sha256=ATQj2cT5JS_mTfXi0kXqJ1yzSZu5J0zHA5VjV3j7uKY,3588
@@ -29,7 +29,7 @@ onnx2tf/ops/Celu.py,sha256=9g7WNKo4G_jMtUXcoOfpNdLYqEsuyXLPkkyQZxDuL4U,3853
29
29
  onnx2tf/ops/Clip.py,sha256=K3Pgt9BXl5_rzg6s-kPFmwElL5COsvolRY1BUTo7UWw,8753
30
30
  onnx2tf/ops/Col2Im.py,sha256=MDqck00gURrbsroJAgUDkxmdsGyE9v2ez4NKdvdq5IY,7514
31
31
  onnx2tf/ops/Compress.py,sha256=NvDGr9gCNl-8YG41xDBfe3UvhRP03K-ktdtY_MoytBc,3667
32
- onnx2tf/ops/Concat.py,sha256=CKfJbiAwP7h9sFFVyueHJCbwMkUo3NXqkTuRc8v7Tw8,31215
32
+ onnx2tf/ops/Concat.py,sha256=M0KSwymMIYR-YOIlylFe708TDdOvWNMxDE6V4UKE1MI,32570
33
33
  onnx2tf/ops/ConcatFromSequence.py,sha256=z8pNmGQRGq9cxWORW330NZS_0zsmhFudLswMyPn8AXU,3086
34
34
  onnx2tf/ops/Constant.py,sha256=BNZLzNI4rK9kXgVWwD-2RFsDsH7mMy7AY2JSgTNXIWk,10696
35
35
  onnx2tf/ops/ConstantOfShape.py,sha256=6eYm-niow-6fHVEyNyi81BdrVe3IbcdazCp2nySWExA,2331
@@ -41,11 +41,11 @@ onnx2tf/ops/Cosh.py,sha256=-L3QkQtiVBJIv1sSxbXtetVIwgI_2T4WC1O4t2aJ8Gc,3585
41
41
  onnx2tf/ops/CumProd.py,sha256=k4hTEQrkwS7vk7pEy2Btvy2y0o70NlWj1MgsNomfOPg,3957
42
42
  onnx2tf/ops/CumSum.py,sha256=SYKmD5r9Cm9gsCkJPNFoHigvvBO1PmRYRrVmn1HE78o,3954
43
43
  onnx2tf/ops/DepthToSpace.py,sha256=BiyBZ88dmXQAkZ5Jc-Ddo-5Kn8dRYCnoik_XnOFzqXc,14449
44
- onnx2tf/ops/DequantizeLinear.py,sha256=cNbGw4ITg_BsrXYkSb7fD05XEkQgz7v__-StQtvIvB4,5220
44
+ onnx2tf/ops/DequantizeLinear.py,sha256=1v43E1hUqO3g7N-PL1fy_cGj4oUgbphh7vXIGhUAyGc,6463
45
45
  onnx2tf/ops/Det.py,sha256=kxuHkpv_KNHkof0uBv2RLtr3G1uA76MFHyCiCYCBXkw,3590
46
46
  onnx2tf/ops/Div.py,sha256=NyAsvCxI41hyBX_kiCEILHY6QQkas_o4wRY8zkDUiwk,16248
47
47
  onnx2tf/ops/Dropout.py,sha256=KZKVqlnbq875awsNvJaQRvkO3XgqxeAmjbikXymRCtA,5860
48
- onnx2tf/ops/DynamicQuantizeLinear.py,sha256=UGmN2nXBBQHXcNlorEQfnKDnnoOadt4TNzXox-Xki2U,4759
48
+ onnx2tf/ops/DynamicQuantizeLinear.py,sha256=oLsj1cfzawPXo5TokfJOx-rR-Ls7OXDLLk_TLYtFgiA,4962
49
49
  onnx2tf/ops/Einsum.py,sha256=YBw0JmSglOVVje80RqmqIjgsc7V5SnYS6s1Ysa2NUPA,12369
50
50
  onnx2tf/ops/Elu.py,sha256=VDd5cKc1h-8nd0bVwWR_CkgfomrBl4NMbjRtAvkoNks,4025
51
51
  onnx2tf/ops/Equal.py,sha256=ni0gf7nJex8S-oG61bnHc_xn8LuMits3gM6IzGNT65w,4579
@@ -118,14 +118,14 @@ onnx2tf/ops/PRelu.py,sha256=pHbsffhb2rLZPPb9NdKUT4f5-lC0TXmbZVafookXo90,6314
118
118
  onnx2tf/ops/Pad.py,sha256=xZOkZK-53sXU-d0nADAjR1wOpKqfzHeJjTmzwon6G4A,11883
119
119
  onnx2tf/ops/Pow.py,sha256=DZjrWQSyLw_BPXrKyoTqT9KJIxPfNxnYVcoTDBagDgM,7056
120
120
  onnx2tf/ops/QLinearAdd.py,sha256=OssQI0pd8KXdnCC8urCPKP8bpcvSX0D76bS7q4-xMSY,5027
121
- onnx2tf/ops/QLinearConcat.py,sha256=ZAde6h_OZ35gROmenr__pjrrni310TmOqjYUEu_gXIo,4530
122
- onnx2tf/ops/QLinearConv.py,sha256=hgtvQdhy1Xsrdx-ThCT4L6v7zElWvgkt0-gJqdko8LY,12146
121
+ onnx2tf/ops/QLinearConcat.py,sha256=TWYqxADiAq51kcGq2EA1K9FQnx5RgGFxPWHxMxbUz4M,13296
122
+ onnx2tf/ops/QLinearConv.py,sha256=rRQGCf1pqC-hvsXRaN6BVZwzARb5JJNdu9R6Hb4nptU,11771
123
123
  onnx2tf/ops/QLinearLeakyRelu.py,sha256=8Egl-ACtsnBrdcrm8LHzyWJEhcBi1dMp4lEBsqEGWA4,4521
124
- onnx2tf/ops/QLinearMatMul.py,sha256=7OLXnwUoLAtpQRsK2w_RP_YXVwR0GQDyuLPGjVTnXOQ,5636
124
+ onnx2tf/ops/QLinearMatMul.py,sha256=DCClPZKLyyef10ZogbZLbrByQuocUnAWDHVevAd_MmM,7477
125
125
  onnx2tf/ops/QLinearMul.py,sha256=QUqevMwVcDlSqAWlQ9ZTpNcvRlDXO1j3wWzEQZGEdq8,5056
126
126
  onnx2tf/ops/QLinearSigmoid.py,sha256=pV18RrqC64ADQQMaxJIO1iwrjbf2hpUVcvBQfntiBJ0,3931
127
127
  onnx2tf/ops/QLinearSoftmax.py,sha256=GtfT2gVH-V2j4NRqBbDFFfZWygp7TIjP662vo8k6dbU,4256
128
- onnx2tf/ops/QuantizeLinear.py,sha256=tYKnsWZ_LBSMteVOTi8UV3F_dyTiE9tK8IeOmZjitVM,4563
128
+ onnx2tf/ops/QuantizeLinear.py,sha256=g_kZy7Ei4Ey_rGQWiSKDPaY9TOONegLxV1Jyt_gTP0k,7255
129
129
  onnx2tf/ops/RNN.py,sha256=55G5muM0BmJU9xIUU7hWsxhz5npisTfLJipR1w83ZDk,28143
130
130
  onnx2tf/ops/RandomNormal.py,sha256=g1HvpScrHBOffqPT6yhSV1y2fNx7klruD6Vkolfl0to,2013
131
131
  onnx2tf/ops/RandomNormalLike.py,sha256=BKguRxj48JhJ68Hce6xO8eE0OE-mTwnpymxBlV81ofw,2772
@@ -173,7 +173,7 @@ onnx2tf/ops/Softmax.py,sha256=CEnHcSm25v1QC4QVDg4fz1NooYY1v-Uq4GORd8dnnr8,14773
173
173
  onnx2tf/ops/Softplus.py,sha256=R44YMo8G2Ig15jBO6T2VOI6RhpUmjD70qvSCXFylU-Q,3605
174
174
  onnx2tf/ops/Softsign.py,sha256=2ZdKH3KVHZXDzyO7S8f-O_aqRugurbRxd1i2g_fwCos,3600
175
175
  onnx2tf/ops/SpaceToDepth.py,sha256=rWtPQNm2rErYs20gQyz-tFYsImAIUBGtdvfMVkJg5bo,2809
176
- onnx2tf/ops/Split.py,sha256=ukm7QZmSwYwUwGLbVGsOiCEB3YfrFMl0cozn1kwgCv0,10728
176
+ onnx2tf/ops/Split.py,sha256=Z2UwbEBnG8nY3fED__ijgD9KikTuqPBv5ZjHEeoNURU,12103
177
177
  onnx2tf/ops/SplitToSequence.py,sha256=BS_JEd7DC7vuPfs5oRRW774mtlK--kqf9DJUalv-Agk,5062
178
178
  onnx2tf/ops/Sqrt.py,sha256=-xE8Tk_6unSR56k9g3R46lML4Nht5kQwqJT0JYkn5ko,3585
179
179
  onnx2tf/ops/Squeeze.py,sha256=FLIt2qjWh1IJyti1c4YHuepH2Fkxt40rnEKszzmwsnE,7980
@@ -199,7 +199,7 @@ onnx2tf/utils/enums.py,sha256=7c5TqetqB07VjyHoxJHfLgtqBqk9ZRyUF33fPOJR1IM,1649
199
199
  onnx2tf/utils/iterative_json_optimizer.py,sha256=qqeIxWGxrhcCYk8-ebWnblnOkzDCwi-nseipHzHR_bk,10436
200
200
  onnx2tf/utils/json_auto_generator.py,sha256=OC-SfKtUg7zUxaXTAg6kT0ShzIc3ByjDa3FNp173DtA,60302
201
201
  onnx2tf/utils/logging.py,sha256=yUCmPuJ_XiUItM3sZMcaMO24JErkQy7zZwVTYWAuiKg,1982
202
- onnx2tf-1.29.15.dist-info/WHEEL,sha256=e_m4S054HL0hyR3CpOk-b7Q7fDX6BuFkgL5OjAExXas,80
203
- onnx2tf-1.29.15.dist-info/entry_points.txt,sha256=GuhvLu7ZlYECumbmoiFlKX0mFPtFi_Ti9L-E5yuQqKs,42
204
- onnx2tf-1.29.15.dist-info/METADATA,sha256=pvIhTXrSp_ldpl6zX8fXfwfvCnOw7_W4K6FZhK7YeAI,154244
205
- onnx2tf-1.29.15.dist-info/RECORD,,
202
+ onnx2tf-1.29.16.dist-info/WHEEL,sha256=e_m4S054HL0hyR3CpOk-b7Q7fDX6BuFkgL5OjAExXas,80
203
+ onnx2tf-1.29.16.dist-info/entry_points.txt,sha256=GuhvLu7ZlYECumbmoiFlKX0mFPtFi_Ti9L-E5yuQqKs,42
204
+ onnx2tf-1.29.16.dist-info/METADATA,sha256=2h83lAHOcFRpBz9nj2MaYmQZlNG74IdwlHkah1pZpeI,154244
205
+ onnx2tf-1.29.16.dist-info/RECORD,,