onnx2tf 1.29.22__py3-none-any.whl → 1.29.24__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- onnx2tf/__init__.py +1 -1
- onnx2tf/ops/CenterCropPad.py +192 -0
- onnx2tf/ops/GroupNormalization.py +234 -0
- onnx2tf/ops/Optional.py +127 -0
- onnx2tf/ops/OptionalGetElement.py +3 -13
- onnx2tf/ops/OptionalHasElement.py +3 -13
- onnx2tf/ops/PRelu.py +44 -11
- onnx2tf/ops/TfIdfVectorizer.py +431 -0
- {onnx2tf-1.29.22.dist-info → onnx2tf-1.29.24.dist-info}/METADATA +16 -9
- {onnx2tf-1.29.22.dist-info → onnx2tf-1.29.24.dist-info}/RECORD +12 -8
- {onnx2tf-1.29.22.dist-info → onnx2tf-1.29.24.dist-info}/WHEEL +1 -1
- {onnx2tf-1.29.22.dist-info → onnx2tf-1.29.24.dist-info}/entry_points.txt +0 -0
onnx2tf/__init__.py
CHANGED
|
@@ -0,0 +1,192 @@
|
|
|
1
|
+
import random
|
|
2
|
+
random.seed(0)
|
|
3
|
+
import numpy as np
|
|
4
|
+
np.random.seed(0)
|
|
5
|
+
import tensorflow as tf
|
|
6
|
+
import tf_keras
|
|
7
|
+
import onnx_graphsurgeon as gs
|
|
8
|
+
from onnx2tf.utils.common_functions import (
|
|
9
|
+
get_constant_or_variable,
|
|
10
|
+
print_node_info,
|
|
11
|
+
inverted_operation_enable_disable,
|
|
12
|
+
make_tf_node_info,
|
|
13
|
+
get_replacement_parameter,
|
|
14
|
+
pre_process_transpose,
|
|
15
|
+
post_process_transpose,
|
|
16
|
+
convert_axis,
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@print_node_info
|
|
21
|
+
@inverted_operation_enable_disable
|
|
22
|
+
@get_replacement_parameter
|
|
23
|
+
def make_node(
|
|
24
|
+
*,
|
|
25
|
+
graph_node: gs.Node,
|
|
26
|
+
tf_layers_dict: dict,
|
|
27
|
+
**kwargs: dict,
|
|
28
|
+
):
|
|
29
|
+
"""CenterCropPad
|
|
30
|
+
|
|
31
|
+
Parameters
|
|
32
|
+
----------
|
|
33
|
+
graph_node: gs.Node
|
|
34
|
+
graph_surgeon Node
|
|
35
|
+
|
|
36
|
+
tf_layers_dict: dict
|
|
37
|
+
optype, shape, dtype, tensorflow graph
|
|
38
|
+
"""
|
|
39
|
+
before_op_output_shape_trans_1 = \
|
|
40
|
+
tf_layers_dict.get(graph_node.inputs[0].name, {}).get('before_op_output_shape_trans', True)
|
|
41
|
+
before_op_output_shape_trans = \
|
|
42
|
+
before_op_output_shape_trans_1
|
|
43
|
+
|
|
44
|
+
graph_node_input = get_constant_or_variable(
|
|
45
|
+
graph_node.inputs[0],
|
|
46
|
+
before_op_output_shape_trans,
|
|
47
|
+
)
|
|
48
|
+
graph_node_shape = get_constant_or_variable(
|
|
49
|
+
graph_node.inputs[1],
|
|
50
|
+
False,
|
|
51
|
+
)
|
|
52
|
+
graph_node_output: gs.Variable = graph_node.outputs[0]
|
|
53
|
+
shape = graph_node_output.shape
|
|
54
|
+
dtype = graph_node_output.dtype
|
|
55
|
+
|
|
56
|
+
input_tensor = tf_layers_dict[graph_node_input.name]['tf_node'] \
|
|
57
|
+
if isinstance(graph_node_input, gs.Variable) else graph_node_input
|
|
58
|
+
target_shape = tf_layers_dict[graph_node_shape.name]['tf_node'] \
|
|
59
|
+
if isinstance(graph_node_shape, gs.Variable) else graph_node_shape
|
|
60
|
+
|
|
61
|
+
# Preserving Graph Structure (Dict)
|
|
62
|
+
tf_layers_dict[graph_node_output.name] = {
|
|
63
|
+
'optype': graph_node.op,
|
|
64
|
+
'shape': shape,
|
|
65
|
+
'dtype': dtype,
|
|
66
|
+
'nhwc': tf_layers_dict[graph_node_input.name]['nhwc'] \
|
|
67
|
+
if isinstance(graph_node_input, gs.Variable) \
|
|
68
|
+
and 'nhwc' in tf_layers_dict[graph_node_input.name].keys() else False
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
# Pre-process transpose
|
|
72
|
+
input_tensor = pre_process_transpose(
|
|
73
|
+
value_before_transpose=input_tensor,
|
|
74
|
+
param_target='inputs',
|
|
75
|
+
param_name=graph_node.inputs[0].name,
|
|
76
|
+
**kwargs,
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
input_rank = input_tensor.shape.rank
|
|
80
|
+
if input_rank is None:
|
|
81
|
+
input_rank = tf.rank(input_tensor)
|
|
82
|
+
|
|
83
|
+
axes = graph_node.attrs.get('axes', None)
|
|
84
|
+
if isinstance(axes, np.ndarray):
|
|
85
|
+
axes = axes.tolist()
|
|
86
|
+
|
|
87
|
+
if axes is None:
|
|
88
|
+
if isinstance(input_rank, int):
|
|
89
|
+
axes_list = list(range(input_rank))
|
|
90
|
+
if before_op_output_shape_trans:
|
|
91
|
+
axes_list = [
|
|
92
|
+
convert_axis(
|
|
93
|
+
axis=axis,
|
|
94
|
+
tensor_rank=input_rank,
|
|
95
|
+
before_op_output_shape_trans=before_op_output_shape_trans,
|
|
96
|
+
) for axis in axes_list
|
|
97
|
+
]
|
|
98
|
+
axes_tensor = tf.constant(axes_list, dtype=tf.int32)
|
|
99
|
+
else:
|
|
100
|
+
rank_t = tf.cast(input_rank, tf.int32)
|
|
101
|
+
axes_tensor = tf.range(rank_t)
|
|
102
|
+
if before_op_output_shape_trans:
|
|
103
|
+
axes_tensor = tf.where(
|
|
104
|
+
tf.equal(axes_tensor, 0),
|
|
105
|
+
0,
|
|
106
|
+
tf.where(tf.equal(axes_tensor, 1), rank_t - 1, axes_tensor - 1),
|
|
107
|
+
)
|
|
108
|
+
else:
|
|
109
|
+
if not isinstance(axes, list):
|
|
110
|
+
axes = [axes]
|
|
111
|
+
if isinstance(input_rank, int):
|
|
112
|
+
axes_conv = [
|
|
113
|
+
convert_axis(
|
|
114
|
+
axis=axis,
|
|
115
|
+
tensor_rank=input_rank,
|
|
116
|
+
before_op_output_shape_trans=before_op_output_shape_trans,
|
|
117
|
+
) for axis in axes
|
|
118
|
+
]
|
|
119
|
+
axes_tensor = tf.constant(axes_conv, dtype=tf.int32)
|
|
120
|
+
else:
|
|
121
|
+
axes_tensor = tf.convert_to_tensor(axes, dtype=tf.int32)
|
|
122
|
+
if before_op_output_shape_trans:
|
|
123
|
+
rank_t = tf.cast(input_rank, tf.int32)
|
|
124
|
+
axes_tensor = tf.where(axes_tensor < 0, axes_tensor + rank_t, axes_tensor)
|
|
125
|
+
axes_tensor = tf.where(
|
|
126
|
+
tf.equal(axes_tensor, 0),
|
|
127
|
+
0,
|
|
128
|
+
tf.where(tf.equal(axes_tensor, 1), rank_t - 1, axes_tensor - 1),
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
if isinstance(target_shape, list):
|
|
132
|
+
target_shape = tf.constant(np.asarray(target_shape, dtype=np.int32))
|
|
133
|
+
elif isinstance(target_shape, np.ndarray):
|
|
134
|
+
target_shape = tf.convert_to_tensor(target_shape.astype(np.int32))
|
|
135
|
+
else:
|
|
136
|
+
target_shape = tf.cast(target_shape, tf.int32)
|
|
137
|
+
|
|
138
|
+
input_shape = tf.shape(input_tensor, out_type=tf.int32)
|
|
139
|
+
target_shape_full = tf.tensor_scatter_nd_update(
|
|
140
|
+
input_shape,
|
|
141
|
+
tf.expand_dims(axes_tensor, axis=1),
|
|
142
|
+
target_shape,
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
diff = target_shape_full - input_shape
|
|
146
|
+
|
|
147
|
+
pad_before = tf.where(diff > 0, tf.math.floordiv(diff, 2), 0)
|
|
148
|
+
pad_after = tf.where(diff > 0, diff - tf.math.floordiv(diff, 2), 0)
|
|
149
|
+
crop_before = tf.where(diff < 0, tf.math.floordiv(-diff, 2), 0)
|
|
150
|
+
crop_after = tf.where(diff < 0, (-diff) - tf.math.floordiv(-diff, 2), 0)
|
|
151
|
+
|
|
152
|
+
begin = crop_before
|
|
153
|
+
size = input_shape - crop_before - crop_after
|
|
154
|
+
cropped = tf.slice(input_tensor, begin, size)
|
|
155
|
+
|
|
156
|
+
paddings = tf.stack([pad_before, pad_after], axis=1)
|
|
157
|
+
if input_tensor.dtype == tf.string:
|
|
158
|
+
pad_value = tf.constant('', dtype=tf.string)
|
|
159
|
+
else:
|
|
160
|
+
pad_value = tf.cast(0, input_tensor.dtype)
|
|
161
|
+
|
|
162
|
+
tf_layers_dict[graph_node_output.name]['tf_node'] = \
|
|
163
|
+
tf.pad(
|
|
164
|
+
tensor=cropped,
|
|
165
|
+
paddings=paddings,
|
|
166
|
+
constant_values=pad_value,
|
|
167
|
+
name=graph_node.name,
|
|
168
|
+
)
|
|
169
|
+
|
|
170
|
+
# Post-process transpose
|
|
171
|
+
tf_layers_dict[graph_node_output.name]['tf_node'] = post_process_transpose(
|
|
172
|
+
value_before_transpose=tf_layers_dict[graph_node_output.name]['tf_node'],
|
|
173
|
+
param_target='outputs',
|
|
174
|
+
param_name=graph_node.outputs[0].name,
|
|
175
|
+
**kwargs,
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
# Generation of Debug Info
|
|
179
|
+
tf_layers_dict[graph_node_output.name]['tf_node_info'] = \
|
|
180
|
+
make_tf_node_info(
|
|
181
|
+
node_info={
|
|
182
|
+
'tf_op_type': 'CenterCropPad',
|
|
183
|
+
'tf_inputs': {
|
|
184
|
+
'input': input_tensor,
|
|
185
|
+
'shape': target_shape,
|
|
186
|
+
'axes': axes,
|
|
187
|
+
},
|
|
188
|
+
'tf_outputs': {
|
|
189
|
+
'output': tf_layers_dict[graph_node_output.name]['tf_node'],
|
|
190
|
+
},
|
|
191
|
+
}
|
|
192
|
+
)
|
|
@@ -0,0 +1,234 @@
|
|
|
1
|
+
import random
|
|
2
|
+
random.seed(0)
|
|
3
|
+
import numpy as np
|
|
4
|
+
np.random.seed(0)
|
|
5
|
+
import tensorflow as tf
|
|
6
|
+
import tf_keras
|
|
7
|
+
import onnx_graphsurgeon as gs
|
|
8
|
+
from onnx2tf.utils.common_functions import (
|
|
9
|
+
get_constant_or_variable,
|
|
10
|
+
print_node_info,
|
|
11
|
+
inverted_operation_enable_disable,
|
|
12
|
+
make_tf_node_info,
|
|
13
|
+
get_replacement_parameter,
|
|
14
|
+
pre_process_transpose,
|
|
15
|
+
post_process_transpose,
|
|
16
|
+
transpose_with_flexing_deterrence,
|
|
17
|
+
)
|
|
18
|
+
from onnx2tf.utils.enums import NUMPY_DTYPES_TO_TF_DTYPES
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@print_node_info
|
|
22
|
+
@inverted_operation_enable_disable
|
|
23
|
+
@get_replacement_parameter
|
|
24
|
+
def make_node(
|
|
25
|
+
*,
|
|
26
|
+
graph_node: gs.Node,
|
|
27
|
+
tf_layers_dict: dict,
|
|
28
|
+
**kwargs: dict,
|
|
29
|
+
):
|
|
30
|
+
"""GroupNormalization
|
|
31
|
+
|
|
32
|
+
Parameters
|
|
33
|
+
----------
|
|
34
|
+
graph_node: gs.Node
|
|
35
|
+
graph_surgeon Node
|
|
36
|
+
|
|
37
|
+
tf_layers_dict: dict
|
|
38
|
+
optype, shape, dtype, tensorflow graph
|
|
39
|
+
"""
|
|
40
|
+
before_op_output_shape_trans_1 = \
|
|
41
|
+
tf_layers_dict.get(graph_node.inputs[0].name, {}).get('before_op_output_shape_trans', True)
|
|
42
|
+
before_op_output_shape_trans_2 = \
|
|
43
|
+
tf_layers_dict.get(graph_node.inputs[1].name, {}).get('before_op_output_shape_trans', True)
|
|
44
|
+
before_op_output_shape_trans_3 = \
|
|
45
|
+
tf_layers_dict.get(graph_node.inputs[2].name, {}).get('before_op_output_shape_trans', True)
|
|
46
|
+
before_op_output_shape_trans = \
|
|
47
|
+
before_op_output_shape_trans_1 \
|
|
48
|
+
and before_op_output_shape_trans_2 \
|
|
49
|
+
and before_op_output_shape_trans_3
|
|
50
|
+
|
|
51
|
+
graph_node_input = get_constant_or_variable(
|
|
52
|
+
graph_node.inputs[0],
|
|
53
|
+
before_op_output_shape_trans,
|
|
54
|
+
)
|
|
55
|
+
input_tensor = tf_layers_dict[graph_node_input.name]['tf_node'] \
|
|
56
|
+
if isinstance(graph_node_input, gs.Variable) else graph_node_input
|
|
57
|
+
|
|
58
|
+
# Pre-process transpose
|
|
59
|
+
input_tensor = pre_process_transpose(
|
|
60
|
+
value_before_transpose=input_tensor,
|
|
61
|
+
param_target='inputs',
|
|
62
|
+
param_name=graph_node.inputs[0].name,
|
|
63
|
+
**kwargs,
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
scale = get_constant_or_variable(
|
|
67
|
+
graph_node.inputs[1],
|
|
68
|
+
before_op_output_shape_trans \
|
|
69
|
+
if graph_node.inputs[1].shape is not None and len(graph_node.inputs[1].shape) != 1 else False,
|
|
70
|
+
is_bias=True,
|
|
71
|
+
)
|
|
72
|
+
scale_dtype = NUMPY_DTYPES_TO_TF_DTYPES[scale.dtype] \
|
|
73
|
+
if isinstance(scale.dtype, np.dtype) else scale.dtype
|
|
74
|
+
scale = tf.convert_to_tensor(scale, dtype=scale_dtype) \
|
|
75
|
+
if isinstance(scale, np.ndarray) else scale
|
|
76
|
+
|
|
77
|
+
bias = get_constant_or_variable(
|
|
78
|
+
graph_node.inputs[2],
|
|
79
|
+
before_op_output_shape_trans \
|
|
80
|
+
if graph_node.inputs[2].shape is not None and len(graph_node.inputs[2].shape) != 1 else False,
|
|
81
|
+
is_bias=True,
|
|
82
|
+
)
|
|
83
|
+
bias_dtype = NUMPY_DTYPES_TO_TF_DTYPES[bias.dtype] \
|
|
84
|
+
if isinstance(bias.dtype, np.dtype) else bias.dtype
|
|
85
|
+
bias = tf.convert_to_tensor(bias, dtype=bias_dtype) \
|
|
86
|
+
if isinstance(bias, np.ndarray) else bias
|
|
87
|
+
|
|
88
|
+
graph_node_output: gs.Variable = graph_node.outputs[0]
|
|
89
|
+
shape = graph_node_output.shape
|
|
90
|
+
dtype = graph_node_output.dtype
|
|
91
|
+
|
|
92
|
+
epsilon = graph_node.attrs.get('epsilon', 1e-05)
|
|
93
|
+
num_groups = int(graph_node.attrs.get('num_groups', 1))
|
|
94
|
+
stash_type = int(graph_node.attrs.get('stash_type', 1))
|
|
95
|
+
opset = kwargs.get('opset', None)
|
|
96
|
+
|
|
97
|
+
# Preserving Graph Structure (Dict)
|
|
98
|
+
tf_layers_dict[graph_node_output.name] = {
|
|
99
|
+
'optype': graph_node.op,
|
|
100
|
+
'shape': shape,
|
|
101
|
+
'dtype': dtype,
|
|
102
|
+
'nhwc': tf_layers_dict[graph_node_input.name]['nhwc'] \
|
|
103
|
+
if isinstance(graph_node_input, gs.Variable) \
|
|
104
|
+
and 'nhwc' in tf_layers_dict[graph_node_input.name].keys() else False
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
input_rank = input_tensor.shape.rank
|
|
108
|
+
if input_rank is None:
|
|
109
|
+
input_rank = tf.rank(input_tensor)
|
|
110
|
+
|
|
111
|
+
channel_axis = -1 if before_op_output_shape_trans else 1
|
|
112
|
+
channel_axis_idx = channel_axis
|
|
113
|
+
if isinstance(input_rank, int):
|
|
114
|
+
channel_axis_idx = channel_axis if channel_axis >= 0 else input_rank + channel_axis
|
|
115
|
+
|
|
116
|
+
internal_perm = None
|
|
117
|
+
internal_inverse_perm = None
|
|
118
|
+
if isinstance(input_rank, int) and channel_axis_idx != (input_rank - 1):
|
|
119
|
+
perm = [i for i in range(input_rank) if i != channel_axis_idx] + [channel_axis_idx]
|
|
120
|
+
internal_perm = perm
|
|
121
|
+
internal_inverse_perm = [0] * input_rank
|
|
122
|
+
for i, p in enumerate(perm):
|
|
123
|
+
internal_inverse_perm[p] = i
|
|
124
|
+
elif not isinstance(input_rank, int) and channel_axis != -1:
|
|
125
|
+
rank_t = tf.cast(input_rank, tf.int32)
|
|
126
|
+
perm = tf.concat([
|
|
127
|
+
tf.range(channel_axis),
|
|
128
|
+
tf.range(channel_axis + 1, rank_t),
|
|
129
|
+
[channel_axis],
|
|
130
|
+
], axis=0)
|
|
131
|
+
internal_perm = perm
|
|
132
|
+
internal_inverse_perm = tf.argsort(perm)
|
|
133
|
+
|
|
134
|
+
x = input_tensor
|
|
135
|
+
if internal_perm is not None:
|
|
136
|
+
x = transpose_with_flexing_deterrence(
|
|
137
|
+
input_tensor=x,
|
|
138
|
+
perm=internal_perm,
|
|
139
|
+
**kwargs,
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
input_dtype = x.dtype
|
|
143
|
+
calc_dtype = tf.float32 if stash_type == 1 else input_dtype
|
|
144
|
+
x = tf.cast(x, calc_dtype)
|
|
145
|
+
|
|
146
|
+
x_shape = tf.shape(x, out_type=tf.int32)
|
|
147
|
+
channels = x_shape[-1]
|
|
148
|
+
group_size = tf.math.floordiv(channels, num_groups)
|
|
149
|
+
|
|
150
|
+
group_shape = tf.stack([num_groups, group_size], axis=0)
|
|
151
|
+
new_shape = tf.concat([x_shape[:-1], group_shape], axis=0)
|
|
152
|
+
x_grouped = tf.reshape(x, new_shape)
|
|
153
|
+
|
|
154
|
+
rank_with_group = tf.rank(x_grouped)
|
|
155
|
+
spatial_axes = tf.range(1, rank_with_group - 2)
|
|
156
|
+
reduce_axes = tf.concat(
|
|
157
|
+
[spatial_axes, tf.expand_dims(rank_with_group - 1, axis=0)],
|
|
158
|
+
axis=0,
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
mean, variance = tf.nn.moments(x_grouped, axes=reduce_axes, keepdims=True)
|
|
162
|
+
x_norm = (x_grouped - mean) * tf.math.rsqrt(variance + tf.cast(epsilon, calc_dtype))
|
|
163
|
+
x_norm = tf.cast(x_norm, input_dtype)
|
|
164
|
+
|
|
165
|
+
if opset is not None and opset < 21:
|
|
166
|
+
rank_with_group = x_grouped.shape.rank
|
|
167
|
+
if rank_with_group is not None:
|
|
168
|
+
scale_shape = [1] * (rank_with_group - 2) + [num_groups, 1]
|
|
169
|
+
scale_group = tf.reshape(scale, scale_shape)
|
|
170
|
+
bias_group = tf.reshape(bias, scale_shape)
|
|
171
|
+
else:
|
|
172
|
+
rank_with_group = tf.rank(x_grouped)
|
|
173
|
+
prefix_ones = tf.fill([rank_with_group - 2], 1)
|
|
174
|
+
scale_shape = tf.concat(
|
|
175
|
+
[prefix_ones, tf.constant([num_groups, 1], dtype=tf.int32)],
|
|
176
|
+
axis=0,
|
|
177
|
+
)
|
|
178
|
+
scale_group = tf.reshape(scale, scale_shape)
|
|
179
|
+
bias_group = tf.reshape(bias, scale_shape)
|
|
180
|
+
x_norm = x_norm * tf.cast(scale_group, input_dtype) + tf.cast(bias_group, input_dtype)
|
|
181
|
+
|
|
182
|
+
x_norm = tf.reshape(x_norm, x_shape)
|
|
183
|
+
|
|
184
|
+
if opset is None or opset >= 21:
|
|
185
|
+
rank_out = x_norm.shape.rank
|
|
186
|
+
if rank_out is not None:
|
|
187
|
+
scale_reshape = tf.reshape(scale, [1] * (rank_out - 1) + [-1])
|
|
188
|
+
bias_reshape = tf.reshape(bias, [1] * (rank_out - 1) + [-1])
|
|
189
|
+
else:
|
|
190
|
+
rank_out = tf.rank(x_norm)
|
|
191
|
+
prefix_ones = tf.fill([rank_out - 1], 1)
|
|
192
|
+
scale_shape = tf.concat(
|
|
193
|
+
[prefix_ones, tf.constant([-1], dtype=tf.int32)],
|
|
194
|
+
axis=0,
|
|
195
|
+
)
|
|
196
|
+
scale_reshape = tf.reshape(scale, scale_shape)
|
|
197
|
+
bias_reshape = tf.reshape(bias, scale_shape)
|
|
198
|
+
x_norm = x_norm * tf.cast(scale_reshape, input_dtype) + tf.cast(bias_reshape, input_dtype)
|
|
199
|
+
|
|
200
|
+
if internal_inverse_perm is not None:
|
|
201
|
+
x_norm = transpose_with_flexing_deterrence(
|
|
202
|
+
input_tensor=x_norm,
|
|
203
|
+
perm=internal_inverse_perm,
|
|
204
|
+
**kwargs,
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
tf_layers_dict[graph_node_output.name]['tf_node'] = x_norm
|
|
208
|
+
|
|
209
|
+
# Post-process transpose
|
|
210
|
+
tf_layers_dict[graph_node_output.name]['tf_node'] = post_process_transpose(
|
|
211
|
+
value_before_transpose=tf_layers_dict[graph_node_output.name]['tf_node'],
|
|
212
|
+
param_target='outputs',
|
|
213
|
+
param_name=graph_node.outputs[0].name,
|
|
214
|
+
**kwargs,
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
# Generation of Debug Info
|
|
218
|
+
tf_layers_dict[graph_node_output.name]['tf_node_info'] = \
|
|
219
|
+
make_tf_node_info(
|
|
220
|
+
node_info={
|
|
221
|
+
'tf_op_type': 'GroupNormalization',
|
|
222
|
+
'tf_inputs': {
|
|
223
|
+
'x': input_tensor,
|
|
224
|
+
'scale': scale,
|
|
225
|
+
'bias': bias,
|
|
226
|
+
'num_groups': num_groups,
|
|
227
|
+
'epsilon': epsilon,
|
|
228
|
+
'stash_type': stash_type,
|
|
229
|
+
},
|
|
230
|
+
'tf_outputs': {
|
|
231
|
+
'output': tf_layers_dict[graph_node_output.name]['tf_node'],
|
|
232
|
+
},
|
|
233
|
+
}
|
|
234
|
+
)
|
onnx2tf/ops/Optional.py
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
import random
|
|
2
|
+
random.seed(0)
|
|
3
|
+
import numpy as np
|
|
4
|
+
np.random.seed(0)
|
|
5
|
+
import tensorflow as tf
|
|
6
|
+
import tf_keras
|
|
7
|
+
import onnx_graphsurgeon as gs
|
|
8
|
+
from onnx2tf.utils.common_functions import (
|
|
9
|
+
get_constant_or_variable,
|
|
10
|
+
print_node_info,
|
|
11
|
+
inverted_operation_enable_disable,
|
|
12
|
+
make_tf_node_info,
|
|
13
|
+
get_replacement_parameter,
|
|
14
|
+
)
|
|
15
|
+
from onnx2tf.utils.enums import ONNX_DTYPES_TO_TF_DTYPES
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def _type_proto_to_spec(type_proto):
|
|
19
|
+
if type_proto is None:
|
|
20
|
+
return None
|
|
21
|
+
|
|
22
|
+
if hasattr(type_proto, 'optional_type') and type_proto.HasField('optional_type'):
|
|
23
|
+
return _type_proto_to_spec(type_proto.optional_type.elem_type)
|
|
24
|
+
|
|
25
|
+
if hasattr(type_proto, 'tensor_type') and type_proto.HasField('tensor_type'):
|
|
26
|
+
elem_type = type_proto.tensor_type.elem_type
|
|
27
|
+
tf_dtype = ONNX_DTYPES_TO_TF_DTYPES.get(elem_type, tf.float32)
|
|
28
|
+
dims = []
|
|
29
|
+
for dim in type_proto.tensor_type.shape.dim:
|
|
30
|
+
if dim.HasField('dim_value'):
|
|
31
|
+
dims.append(dim.dim_value)
|
|
32
|
+
else:
|
|
33
|
+
dims.append(None)
|
|
34
|
+
return tf.TensorSpec(shape=dims, dtype=tf_dtype)
|
|
35
|
+
|
|
36
|
+
if hasattr(type_proto, 'sequence_type') and type_proto.HasField('sequence_type'):
|
|
37
|
+
elem_spec = _type_proto_to_spec(type_proto.sequence_type.elem_type)
|
|
38
|
+
if isinstance(elem_spec, tf.TensorSpec):
|
|
39
|
+
elem_shape = list(elem_spec.shape)
|
|
40
|
+
return tf.RaggedTensorSpec(
|
|
41
|
+
shape=[None] + elem_shape,
|
|
42
|
+
dtype=elem_spec.dtype,
|
|
43
|
+
ragged_rank=1,
|
|
44
|
+
)
|
|
45
|
+
return None
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
@print_node_info
|
|
49
|
+
@inverted_operation_enable_disable
|
|
50
|
+
@get_replacement_parameter
|
|
51
|
+
def make_node(
|
|
52
|
+
*,
|
|
53
|
+
graph_node: gs.Node,
|
|
54
|
+
tf_layers_dict: dict,
|
|
55
|
+
**kwargs: dict,
|
|
56
|
+
):
|
|
57
|
+
"""Optional
|
|
58
|
+
|
|
59
|
+
Parameters
|
|
60
|
+
----------
|
|
61
|
+
graph_node: gs.Node
|
|
62
|
+
graph_surgeon Node
|
|
63
|
+
|
|
64
|
+
tf_layers_dict: dict
|
|
65
|
+
optype, shape, dtype, tensorflow graph
|
|
66
|
+
"""
|
|
67
|
+
graph_node_input = None
|
|
68
|
+
input_tensor = None
|
|
69
|
+
if len(graph_node.inputs) >= 1 and graph_node.inputs[0].name != '':
|
|
70
|
+
graph_node_input = get_constant_or_variable(
|
|
71
|
+
graph_node.inputs[0],
|
|
72
|
+
before_op_output_shape_trans=False,
|
|
73
|
+
)
|
|
74
|
+
input_tensor = tf_layers_dict[graph_node_input.name]['tf_node'] \
|
|
75
|
+
if isinstance(graph_node_input, gs.Variable) else graph_node_input
|
|
76
|
+
|
|
77
|
+
graph_node_output: gs.Variable = graph_node.outputs[0]
|
|
78
|
+
shape = graph_node_output.shape
|
|
79
|
+
dtype = graph_node_output.dtype
|
|
80
|
+
|
|
81
|
+
# Preserving Graph Structure (Dict)
|
|
82
|
+
tf_layers_dict[graph_node_output.name] = {
|
|
83
|
+
'optype': graph_node.op,
|
|
84
|
+
'shape': shape,
|
|
85
|
+
'dtype': dtype,
|
|
86
|
+
'nhwc': tf_layers_dict[graph_node_input.name]['nhwc'] \
|
|
87
|
+
if isinstance(graph_node_input, gs.Variable) \
|
|
88
|
+
and 'nhwc' in tf_layers_dict[graph_node_input.name].keys() else False
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
# Generation of TF OP
|
|
92
|
+
if input_tensor is None:
|
|
93
|
+
type_proto = graph_node.attrs.get('type', None)
|
|
94
|
+
if type_proto is None and hasattr(dtype, 'HasField'):
|
|
95
|
+
type_proto = dtype
|
|
96
|
+
spec = _type_proto_to_spec(type_proto)
|
|
97
|
+
if spec is None:
|
|
98
|
+
spec = tf.TensorSpec(shape=None, dtype=tf.float32)
|
|
99
|
+
tf_layers_dict[graph_node_output.name]['tf_node'] = \
|
|
100
|
+
tf.experimental.Optional.empty(
|
|
101
|
+
element_spec=spec,
|
|
102
|
+
)
|
|
103
|
+
elif isinstance(input_tensor, tf.experimental.Optional):
|
|
104
|
+
tf_layers_dict[graph_node_output.name]['tf_node'] = input_tensor
|
|
105
|
+
else:
|
|
106
|
+
value = input_tensor
|
|
107
|
+
if isinstance(input_tensor, np.ndarray):
|
|
108
|
+
value = tf.convert_to_tensor(input_tensor)
|
|
109
|
+
tf_layers_dict[graph_node_output.name]['tf_node'] = \
|
|
110
|
+
tf.experimental.Optional.from_value(
|
|
111
|
+
value=value,
|
|
112
|
+
name=graph_node.name,
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
# Generation of Debug Info
|
|
116
|
+
tf_layers_dict[graph_node_output.name]['tf_node_info'] = \
|
|
117
|
+
make_tf_node_info(
|
|
118
|
+
node_info={
|
|
119
|
+
'tf_op_type': tf.experimental.Optional,
|
|
120
|
+
'tf_inputs': {
|
|
121
|
+
'input': input_tensor,
|
|
122
|
+
},
|
|
123
|
+
'tf_outputs': {
|
|
124
|
+
'output': tf_layers_dict[graph_node_output.name]['tf_node'],
|
|
125
|
+
},
|
|
126
|
+
}
|
|
127
|
+
)
|
|
@@ -63,24 +63,14 @@ def make_node(
|
|
|
63
63
|
)
|
|
64
64
|
|
|
65
65
|
# Generation of TF OP
|
|
66
|
-
if isinstance(input_tensor,
|
|
66
|
+
if isinstance(input_tensor, tf.experimental.Optional):
|
|
67
|
+
optional = input_tensor
|
|
68
|
+
else:
|
|
67
69
|
optional = \
|
|
68
70
|
tf.experimental.Optional.from_value(
|
|
69
71
|
value=tf.convert_to_tensor(input_tensor),
|
|
70
72
|
name=graph_node.name,
|
|
71
73
|
)
|
|
72
|
-
else:
|
|
73
|
-
converted_tenosr = tf.convert_to_tensor(input_tensor)
|
|
74
|
-
spec = None
|
|
75
|
-
if tf_keras.backend.is_keras_tensor(converted_tenosr):
|
|
76
|
-
spec = converted_tenosr.type_spec
|
|
77
|
-
else:
|
|
78
|
-
spec = tf.TensorSpec.from_tensor(converted_tenosr)
|
|
79
|
-
|
|
80
|
-
optional = \
|
|
81
|
-
tf.experimental.Optional.empty(
|
|
82
|
-
element_spec=spec,
|
|
83
|
-
)
|
|
84
74
|
tf_layers_dict[graph_node_output.name]['tf_node'] = optional.get_value()
|
|
85
75
|
|
|
86
76
|
# Post-process transpose
|
|
@@ -63,24 +63,14 @@ def make_node(
|
|
|
63
63
|
)
|
|
64
64
|
|
|
65
65
|
# Generation of TF OP
|
|
66
|
-
if isinstance(input_tensor,
|
|
66
|
+
if isinstance(input_tensor, tf.experimental.Optional):
|
|
67
|
+
optional = input_tensor
|
|
68
|
+
else:
|
|
67
69
|
optional = \
|
|
68
70
|
tf.experimental.Optional.from_value(
|
|
69
71
|
value=tf.convert_to_tensor(input_tensor),
|
|
70
72
|
name=graph_node.name,
|
|
71
73
|
)
|
|
72
|
-
else:
|
|
73
|
-
converted_tenosr = tf.convert_to_tensor(input_tensor)
|
|
74
|
-
spec = None
|
|
75
|
-
if tf_keras.backend.is_keras_tensor(converted_tenosr):
|
|
76
|
-
spec = converted_tenosr.type_spec
|
|
77
|
-
else:
|
|
78
|
-
spec = tf.TensorSpec.from_tensor(converted_tenosr)
|
|
79
|
-
|
|
80
|
-
optional = \
|
|
81
|
-
tf.experimental.Optional.empty(
|
|
82
|
-
element_spec=spec,
|
|
83
|
-
)
|
|
84
74
|
tf_layers_dict[graph_node_output.name]['tf_node'] = optional.has_value()
|
|
85
75
|
|
|
86
76
|
# Post-process transpose
|
onnx2tf/ops/PRelu.py
CHANGED
|
@@ -124,22 +124,55 @@ def make_node(
|
|
|
124
124
|
tf_layers_dict[graph_node_output.name].pop('nhwc')
|
|
125
125
|
|
|
126
126
|
# Generation of TF OP
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
127
|
+
shared_axes = None
|
|
128
|
+
input_shape = input_tensor.shape
|
|
129
|
+
slope_shape = slope.shape if hasattr(slope, 'shape') else None
|
|
130
|
+
if input_shape is not None and slope_shape is not None:
|
|
131
|
+
input_rank = len(input_shape)
|
|
132
|
+
if len(slope_shape) == input_rank - 1:
|
|
133
|
+
shared_axes = [
|
|
134
|
+
i + 1 for i, dim in enumerate(slope_shape)
|
|
135
|
+
if dim is not None and dim == 1
|
|
136
|
+
]
|
|
137
|
+
elif len(slope_shape) == 1 and input_rank >= 3:
|
|
138
|
+
slope_dim = slope_shape[0]
|
|
139
|
+
channel_axis = None
|
|
140
|
+
if isinstance(slope_dim, int):
|
|
141
|
+
if input_shape[1] == slope_dim:
|
|
142
|
+
channel_axis = 1
|
|
143
|
+
elif input_shape[-1] == slope_dim:
|
|
144
|
+
channel_axis = input_rank - 1
|
|
145
|
+
if channel_axis is not None:
|
|
146
|
+
shared_axes = [ax for ax in range(1, input_rank) if ax != channel_axis]
|
|
147
|
+
|
|
148
|
+
if shared_axes is None:
|
|
132
149
|
if slope.shape is not None \
|
|
133
150
|
and len(slope.shape) > 0 \
|
|
134
151
|
and sum([1 if dim is not None and dim == 1 else 0 for dim in slope.shape]) == len(slope.shape):
|
|
135
152
|
shared_axes = [val + 1 for val in range(len(input_tensor.shape) - 1)]
|
|
136
153
|
else:
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
shared_axes=
|
|
142
|
-
|
|
154
|
+
input_nhwc = tf_layers_dict.get(graph_node_output.name, {}).get('nhwc', False)
|
|
155
|
+
if input_nhwc:
|
|
156
|
+
shared_axes = [val + 1 for val in range(len(input_tensor.shape) - 2)]
|
|
157
|
+
else:
|
|
158
|
+
shared_axes = [val + 2 for val in range(len(input_tensor.shape) - 2)]
|
|
159
|
+
|
|
160
|
+
use_native_prelu = not replace_prelu_to_pseudo_prelu
|
|
161
|
+
if not use_native_prelu:
|
|
162
|
+
pos = tf.nn.relu(input_tensor)
|
|
163
|
+
neg = (input_tensor - abs(input_tensor)) * (slope * 0.5)
|
|
164
|
+
tf_layers_dict[graph_node_output.name]['tf_node'] = pos + neg
|
|
165
|
+
else:
|
|
166
|
+
try:
|
|
167
|
+
tf_layers_dict[graph_node_output.name]['tf_node'] = \
|
|
168
|
+
PReLU(
|
|
169
|
+
weights=slope,
|
|
170
|
+
shared_axes=shared_axes,
|
|
171
|
+
)(input_tensor)
|
|
172
|
+
except Exception:
|
|
173
|
+
pos = tf.nn.relu(input_tensor)
|
|
174
|
+
neg = (input_tensor - abs(input_tensor)) * (slope * 0.5)
|
|
175
|
+
tf_layers_dict[graph_node_output.name]['tf_node'] = pos + neg
|
|
143
176
|
|
|
144
177
|
# Post-process transpose
|
|
145
178
|
before_trans_shape = tf_layers_dict[graph_node_output.name]['tf_node'].shape
|
|
@@ -0,0 +1,431 @@
|
|
|
1
|
+
import random
|
|
2
|
+
random.seed(0)
|
|
3
|
+
import collections
|
|
4
|
+
from enum import IntEnum
|
|
5
|
+
import numpy as np
|
|
6
|
+
np.random.seed(0)
|
|
7
|
+
import tensorflow as tf
|
|
8
|
+
import tf_keras
|
|
9
|
+
import onnx_graphsurgeon as gs
|
|
10
|
+
from onnx2tf.utils.common_functions import (
|
|
11
|
+
get_constant_or_variable,
|
|
12
|
+
print_node_info,
|
|
13
|
+
inverted_operation_enable_disable,
|
|
14
|
+
make_tf_node_info,
|
|
15
|
+
get_replacement_parameter,
|
|
16
|
+
pre_process_transpose,
|
|
17
|
+
post_process_transpose,
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class IntMap(collections.UserDict):
|
|
22
|
+
def __init__(self):
|
|
23
|
+
super().__init__()
|
|
24
|
+
self.added_keys = []
|
|
25
|
+
|
|
26
|
+
def emplace(self, key, value):
|
|
27
|
+
if not isinstance(key, (int, str, bytes, np.bytes_)):
|
|
28
|
+
raise TypeError(f"key must be a int or str not {type(key)}.")
|
|
29
|
+
if not isinstance(value, NgramPart):
|
|
30
|
+
raise TypeError(f"value must be a NGramPart not {type(value)}.")
|
|
31
|
+
if key not in self:
|
|
32
|
+
self.added_keys.append(key)
|
|
33
|
+
self.data[key] = value
|
|
34
|
+
return self.data[key]
|
|
35
|
+
|
|
36
|
+
@property
|
|
37
|
+
def first_key(self):
|
|
38
|
+
if len(self) == 0:
|
|
39
|
+
raise ValueError("IntMap is empty.")
|
|
40
|
+
return self.added_keys[0]
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class NgramPart:
|
|
44
|
+
def __init__(self, nid: int):
|
|
45
|
+
self.id_ = nid # 0 - means no entry, search for a bigger N
|
|
46
|
+
self._leafs_ = None
|
|
47
|
+
|
|
48
|
+
def init(self):
|
|
49
|
+
self._leafs_ = IntMap()
|
|
50
|
+
|
|
51
|
+
def empty(self):
|
|
52
|
+
return self._leafs_ is None
|
|
53
|
+
|
|
54
|
+
def has_leaves(self):
|
|
55
|
+
return self._leafs_ is not None and len(self._leafs_) > 0
|
|
56
|
+
|
|
57
|
+
@property
|
|
58
|
+
def leafs_(self):
|
|
59
|
+
if self._leafs_ is None:
|
|
60
|
+
raise RuntimeError("NgramPart was not initialized.")
|
|
61
|
+
return self._leafs_
|
|
62
|
+
|
|
63
|
+
def find(self, key):
|
|
64
|
+
if not self.has_leaves():
|
|
65
|
+
return None
|
|
66
|
+
if key in self._leafs_:
|
|
67
|
+
return key
|
|
68
|
+
return None
|
|
69
|
+
|
|
70
|
+
def emplace(self, key, value):
|
|
71
|
+
return self.leafs_.emplace(key, value)
|
|
72
|
+
|
|
73
|
+
def __getitem__(self, key):
|
|
74
|
+
return self._leafs_[key]
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
class WeightingCriteria(IntEnum):
|
|
78
|
+
NONE = 0
|
|
79
|
+
TF = 1
|
|
80
|
+
IDF = 2
|
|
81
|
+
TFIDF = 3
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def populate_grams(
|
|
85
|
+
els,
|
|
86
|
+
els_index,
|
|
87
|
+
n_ngrams: int,
|
|
88
|
+
ngram_size: int,
|
|
89
|
+
ngram_id: int,
|
|
90
|
+
c,
|
|
91
|
+
):
|
|
92
|
+
for _ngrams in range(n_ngrams, 0, -1):
|
|
93
|
+
n = 1
|
|
94
|
+
m = c
|
|
95
|
+
while els_index < len(els):
|
|
96
|
+
p = m.emplace(els[els_index], NgramPart(0))
|
|
97
|
+
if n == ngram_size:
|
|
98
|
+
p.id_ = ngram_id
|
|
99
|
+
ngram_id += 1
|
|
100
|
+
els_index += 1
|
|
101
|
+
break
|
|
102
|
+
if p.empty():
|
|
103
|
+
p.init()
|
|
104
|
+
m = p.leafs_
|
|
105
|
+
n += 1
|
|
106
|
+
els_index += 1
|
|
107
|
+
return ngram_id
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
class _TfIdfVectorizerImpl:
|
|
111
|
+
def __init__(
|
|
112
|
+
self,
|
|
113
|
+
*,
|
|
114
|
+
max_gram_length,
|
|
115
|
+
max_skip_count,
|
|
116
|
+
min_gram_length,
|
|
117
|
+
mode,
|
|
118
|
+
ngram_counts,
|
|
119
|
+
ngram_indexes,
|
|
120
|
+
pool_int64s,
|
|
121
|
+
pool_strings,
|
|
122
|
+
weights,
|
|
123
|
+
):
|
|
124
|
+
if mode == "TF":
|
|
125
|
+
self.weighting_criteria_ = WeightingCriteria.TF
|
|
126
|
+
elif mode == "IDF":
|
|
127
|
+
self.weighting_criteria_ = WeightingCriteria.IDF
|
|
128
|
+
elif mode == "TFIDF":
|
|
129
|
+
self.weighting_criteria_ = WeightingCriteria.TFIDF
|
|
130
|
+
else:
|
|
131
|
+
self.weighting_criteria_ = WeightingCriteria.NONE
|
|
132
|
+
|
|
133
|
+
self.min_gram_length_ = int(min_gram_length)
|
|
134
|
+
self.max_gram_length_ = int(max_gram_length)
|
|
135
|
+
self.max_skip_count_ = int(max_skip_count)
|
|
136
|
+
self.ngram_counts_ = list(ngram_counts)
|
|
137
|
+
self.ngram_indexes_ = list(ngram_indexes)
|
|
138
|
+
self.output_size_ = max(self.ngram_indexes_) + 1 if len(self.ngram_indexes_) > 0 else 0
|
|
139
|
+
self.weights_ = list(weights) if weights is not None else []
|
|
140
|
+
self.pool_int64s_ = list(pool_int64s) if pool_int64s is not None else []
|
|
141
|
+
self.pool_strings_ = list(pool_strings) if pool_strings is not None else []
|
|
142
|
+
|
|
143
|
+
self.int64_map_ = NgramPart(-10)
|
|
144
|
+
self.int64_map_.init()
|
|
145
|
+
|
|
146
|
+
total_items = len(self.pool_int64s_ or self.pool_strings_)
|
|
147
|
+
ngram_id = 1 # start with 1, 0 - means no n-gram
|
|
148
|
+
ngram_size = 1
|
|
149
|
+
for i in range(len(self.ngram_counts_)):
|
|
150
|
+
start_idx = self.ngram_counts_[i]
|
|
151
|
+
end_idx = (
|
|
152
|
+
self.ngram_counts_[i + 1]
|
|
153
|
+
if (i + 1) < len(self.ngram_counts_)
|
|
154
|
+
else total_items
|
|
155
|
+
)
|
|
156
|
+
items = end_idx - start_idx
|
|
157
|
+
if items > 0:
|
|
158
|
+
ngrams = items // ngram_size
|
|
159
|
+
if (
|
|
160
|
+
ngram_size >= self.min_gram_length_
|
|
161
|
+
and ngram_size <= self.max_gram_length_
|
|
162
|
+
):
|
|
163
|
+
ngram_id = populate_grams(
|
|
164
|
+
self.pool_int64s_ or self.pool_strings_,
|
|
165
|
+
start_idx,
|
|
166
|
+
ngrams,
|
|
167
|
+
ngram_size,
|
|
168
|
+
ngram_id,
|
|
169
|
+
self.int64_map_,
|
|
170
|
+
)
|
|
171
|
+
else:
|
|
172
|
+
ngram_id += ngrams
|
|
173
|
+
ngram_size += 1
|
|
174
|
+
|
|
175
|
+
def increment_count(self, ngram_id: int, row_num: int, frequencies: np.ndarray) -> None:
|
|
176
|
+
ngram_id -= 1
|
|
177
|
+
output_idx = row_num * self.output_size_ + self.ngram_indexes_[ngram_id]
|
|
178
|
+
frequencies[output_idx] += 1
|
|
179
|
+
|
|
180
|
+
def output_result(self, B: int, frequencies: np.ndarray) -> np.ndarray:
|
|
181
|
+
if B == 0:
|
|
182
|
+
output_dims = (self.output_size_,)
|
|
183
|
+
B = 1
|
|
184
|
+
else:
|
|
185
|
+
output_dims = (B, self.output_size_)
|
|
186
|
+
|
|
187
|
+
row_size = self.output_size_
|
|
188
|
+
total_dims = int(np.prod(output_dims))
|
|
189
|
+
Y = np.empty((total_dims,), dtype=np.float32)
|
|
190
|
+
|
|
191
|
+
w = self.weights_
|
|
192
|
+
if self.weighting_criteria_ == WeightingCriteria.TF:
|
|
193
|
+
for i, f in enumerate(frequencies):
|
|
194
|
+
Y[i] = f
|
|
195
|
+
elif self.weighting_criteria_ == WeightingCriteria.IDF:
|
|
196
|
+
if len(w) > 0:
|
|
197
|
+
p = 0
|
|
198
|
+
for _batch in range(B):
|
|
199
|
+
for i in range(row_size):
|
|
200
|
+
Y[p] = w[i] if frequencies[p] > 0 else 0
|
|
201
|
+
p += 1
|
|
202
|
+
else:
|
|
203
|
+
p = 0
|
|
204
|
+
for f in frequencies:
|
|
205
|
+
Y[p] = 1 if f > 0 else 0
|
|
206
|
+
p += 1
|
|
207
|
+
elif self.weighting_criteria_ == WeightingCriteria.TFIDF:
|
|
208
|
+
if len(w) > 0:
|
|
209
|
+
p = 0
|
|
210
|
+
for _batch in range(B):
|
|
211
|
+
for i in range(row_size):
|
|
212
|
+
Y[p] = w[i] * frequencies[p]
|
|
213
|
+
p += 1
|
|
214
|
+
else:
|
|
215
|
+
p = 0
|
|
216
|
+
for f in frequencies:
|
|
217
|
+
Y[p] = f
|
|
218
|
+
p += 1
|
|
219
|
+
else:
|
|
220
|
+
raise RuntimeError("Unexpected weighting_criteria.")
|
|
221
|
+
|
|
222
|
+
return Y.reshape(output_dims)
|
|
223
|
+
|
|
224
|
+
def compute_impl(self, X: np.ndarray, row_num: int, row_size: int, frequencies: np.ndarray) -> None:
|
|
225
|
+
X_flat = X[row_num] if len(X.shape) > 1 else X
|
|
226
|
+
row_begin = 0
|
|
227
|
+
row_end = row_begin + row_size
|
|
228
|
+
|
|
229
|
+
max_skip_distance = self.max_skip_count_ + 1
|
|
230
|
+
start_ngram_size = self.min_gram_length_
|
|
231
|
+
|
|
232
|
+
for skip_distance in range(1, max_skip_distance + 1):
|
|
233
|
+
ngram_start = row_begin
|
|
234
|
+
ngram_row_end = row_end
|
|
235
|
+
|
|
236
|
+
while ngram_start < ngram_row_end:
|
|
237
|
+
at_least_this = ngram_start + skip_distance * (start_ngram_size - 1)
|
|
238
|
+
if at_least_this >= ngram_row_end:
|
|
239
|
+
break
|
|
240
|
+
|
|
241
|
+
ngram_item = ngram_start
|
|
242
|
+
int_map = self.int64_map_
|
|
243
|
+
ngram_size = 1
|
|
244
|
+
while (
|
|
245
|
+
int_map.has_leaves()
|
|
246
|
+
and ngram_size <= self.max_gram_length_
|
|
247
|
+
and ngram_item < ngram_row_end
|
|
248
|
+
):
|
|
249
|
+
val = X_flat[ngram_item]
|
|
250
|
+
hit = int_map.find(val)
|
|
251
|
+
if hit is None:
|
|
252
|
+
break
|
|
253
|
+
hit = int_map[val].id_
|
|
254
|
+
if ngram_size >= start_ngram_size and hit != 0:
|
|
255
|
+
self.increment_count(hit, row_num, frequencies)
|
|
256
|
+
int_map = int_map[val]
|
|
257
|
+
ngram_size += 1
|
|
258
|
+
ngram_item += skip_distance
|
|
259
|
+
|
|
260
|
+
ngram_start += 1
|
|
261
|
+
|
|
262
|
+
if start_ngram_size == 1:
|
|
263
|
+
start_ngram_size += 1
|
|
264
|
+
if start_ngram_size > self.max_gram_length_:
|
|
265
|
+
break
|
|
266
|
+
|
|
267
|
+
def run(self, X: np.ndarray) -> np.ndarray:
|
|
268
|
+
total_items = int(np.prod(X.shape))
|
|
269
|
+
|
|
270
|
+
num_rows = 0
|
|
271
|
+
B = 0
|
|
272
|
+
C = 0
|
|
273
|
+
input_dims = X.shape
|
|
274
|
+
if len(input_dims) == 0:
|
|
275
|
+
num_rows = 1
|
|
276
|
+
C = 1
|
|
277
|
+
if total_items != 1:
|
|
278
|
+
raise ValueError(f"Unexpected total of items {total_items}.")
|
|
279
|
+
elif len(input_dims) == 1:
|
|
280
|
+
num_rows = 1
|
|
281
|
+
C = input_dims[0]
|
|
282
|
+
elif len(input_dims) == 2:
|
|
283
|
+
B = input_dims[0]
|
|
284
|
+
C = input_dims[1]
|
|
285
|
+
num_rows = B
|
|
286
|
+
if B < 1:
|
|
287
|
+
raise ValueError(
|
|
288
|
+
f"Input shape must have either [C] or [B,C] dimensions with B > 0, B={B}, C={C}."
|
|
289
|
+
)
|
|
290
|
+
else:
|
|
291
|
+
raise ValueError(
|
|
292
|
+
f"Input shape must have either [C] or [B,C] dimensions with B > 0, B={B}, C={C}."
|
|
293
|
+
)
|
|
294
|
+
|
|
295
|
+
if num_rows * C != total_items:
|
|
296
|
+
raise ValueError(
|
|
297
|
+
f"Unexpected total of items, num_rows * C = {num_rows * C} != total_items = {total_items}."
|
|
298
|
+
)
|
|
299
|
+
|
|
300
|
+
frequencies = np.zeros((num_rows * self.output_size_,), dtype=np.int64)
|
|
301
|
+
|
|
302
|
+
if total_items == 0 or self.int64_map_.empty():
|
|
303
|
+
return self.output_result(B, frequencies)
|
|
304
|
+
|
|
305
|
+
for i in range(num_rows):
|
|
306
|
+
self.compute_impl(X, i, C, frequencies)
|
|
307
|
+
|
|
308
|
+
return self.output_result(B, frequencies)
|
|
309
|
+
|
|
310
|
+
|
|
311
|
+
@print_node_info
|
|
312
|
+
@inverted_operation_enable_disable
|
|
313
|
+
@get_replacement_parameter
|
|
314
|
+
def make_node(
|
|
315
|
+
*,
|
|
316
|
+
graph_node: gs.Node,
|
|
317
|
+
tf_layers_dict: dict,
|
|
318
|
+
**kwargs: dict,
|
|
319
|
+
):
|
|
320
|
+
"""TfIdfVectorizer
|
|
321
|
+
|
|
322
|
+
Parameters
|
|
323
|
+
----------
|
|
324
|
+
graph_node: gs.Node
|
|
325
|
+
graph_surgeon Node
|
|
326
|
+
|
|
327
|
+
tf_layers_dict: dict
|
|
328
|
+
optype, shape, dtype, tensorflow graph
|
|
329
|
+
"""
|
|
330
|
+
before_op_output_shape_trans_1 = \
|
|
331
|
+
tf_layers_dict.get(graph_node.inputs[0].name, {}).get('before_op_output_shape_trans', True)
|
|
332
|
+
before_op_output_shape_trans = \
|
|
333
|
+
before_op_output_shape_trans_1
|
|
334
|
+
|
|
335
|
+
graph_node_input = get_constant_or_variable(
|
|
336
|
+
graph_node.inputs[0],
|
|
337
|
+
before_op_output_shape_trans,
|
|
338
|
+
)
|
|
339
|
+
input_tensor = tf_layers_dict[graph_node_input.name]['tf_node'] \
|
|
340
|
+
if isinstance(graph_node_input, gs.Variable) else graph_node_input
|
|
341
|
+
|
|
342
|
+
graph_node_output: gs.Variable = graph_node.outputs[0]
|
|
343
|
+
shape = graph_node_output.shape
|
|
344
|
+
dtype = graph_node_output.dtype
|
|
345
|
+
|
|
346
|
+
# Preserving Graph Structure (Dict)
|
|
347
|
+
tf_layers_dict[graph_node_output.name] = {
|
|
348
|
+
'optype': graph_node.op,
|
|
349
|
+
'shape': shape,
|
|
350
|
+
'dtype': dtype,
|
|
351
|
+
'nhwc': tf_layers_dict[graph_node_input.name]['nhwc'] \
|
|
352
|
+
if isinstance(graph_node_input, gs.Variable) \
|
|
353
|
+
and 'nhwc' in tf_layers_dict[graph_node_input.name].keys() else False
|
|
354
|
+
}
|
|
355
|
+
|
|
356
|
+
# Pre-process transpose
|
|
357
|
+
input_tensor = pre_process_transpose(
|
|
358
|
+
value_before_transpose=input_tensor,
|
|
359
|
+
param_target='inputs',
|
|
360
|
+
param_name=graph_node.inputs[0].name,
|
|
361
|
+
**kwargs,
|
|
362
|
+
)
|
|
363
|
+
|
|
364
|
+
max_gram_length = int(graph_node.attrs.get('max_gram_length', 1))
|
|
365
|
+
max_skip_count = int(graph_node.attrs.get('max_skip_count', 0))
|
|
366
|
+
min_gram_length = int(graph_node.attrs.get('min_gram_length', 1))
|
|
367
|
+
mode = graph_node.attrs.get('mode', 'TF')
|
|
368
|
+
ngram_counts = graph_node.attrs.get('ngram_counts', [])
|
|
369
|
+
ngram_indexes = graph_node.attrs.get('ngram_indexes', [])
|
|
370
|
+
pool_int64s = graph_node.attrs.get('pool_int64s', None)
|
|
371
|
+
pool_strings = graph_node.attrs.get('pool_strings', None)
|
|
372
|
+
weights = graph_node.attrs.get('weights', None)
|
|
373
|
+
|
|
374
|
+
impl = _TfIdfVectorizerImpl(
|
|
375
|
+
max_gram_length=max_gram_length,
|
|
376
|
+
max_skip_count=max_skip_count,
|
|
377
|
+
min_gram_length=min_gram_length,
|
|
378
|
+
mode=mode,
|
|
379
|
+
ngram_counts=ngram_counts,
|
|
380
|
+
ngram_indexes=ngram_indexes,
|
|
381
|
+
pool_int64s=pool_int64s,
|
|
382
|
+
pool_strings=pool_strings,
|
|
383
|
+
weights=weights,
|
|
384
|
+
)
|
|
385
|
+
|
|
386
|
+
def _tfidf_numpy(x):
|
|
387
|
+
return impl.run(x)
|
|
388
|
+
|
|
389
|
+
tf_layers_dict[graph_node_output.name]['tf_node'] = \
|
|
390
|
+
tf.numpy_function(
|
|
391
|
+
func=_tfidf_numpy,
|
|
392
|
+
inp=[input_tensor],
|
|
393
|
+
Tout=tf.float32,
|
|
394
|
+
name=graph_node.name,
|
|
395
|
+
)
|
|
396
|
+
if shape is not None:
|
|
397
|
+
tf_layers_dict[graph_node_output.name]['tf_node'] = \
|
|
398
|
+
tf.ensure_shape(
|
|
399
|
+
tf_layers_dict[graph_node_output.name]['tf_node'],
|
|
400
|
+
shape,
|
|
401
|
+
)
|
|
402
|
+
|
|
403
|
+
# Post-process transpose
|
|
404
|
+
tf_layers_dict[graph_node_output.name]['tf_node'] = post_process_transpose(
|
|
405
|
+
value_before_transpose=tf_layers_dict[graph_node_output.name]['tf_node'],
|
|
406
|
+
param_target='outputs',
|
|
407
|
+
param_name=graph_node.outputs[0].name,
|
|
408
|
+
**kwargs,
|
|
409
|
+
)
|
|
410
|
+
|
|
411
|
+
# Generation of Debug Info
|
|
412
|
+
tf_layers_dict[graph_node_output.name]['tf_node_info'] = \
|
|
413
|
+
make_tf_node_info(
|
|
414
|
+
node_info={
|
|
415
|
+
'tf_op_type': 'TfIdfVectorizer',
|
|
416
|
+
'tf_inputs': {
|
|
417
|
+
'max_gram_length': max_gram_length,
|
|
418
|
+
'max_skip_count': max_skip_count,
|
|
419
|
+
'min_gram_length': min_gram_length,
|
|
420
|
+
'mode': mode,
|
|
421
|
+
'ngram_counts': ngram_counts,
|
|
422
|
+
'ngram_indexes': ngram_indexes,
|
|
423
|
+
'pool_int64s': pool_int64s,
|
|
424
|
+
'pool_strings': pool_strings,
|
|
425
|
+
'weights': weights,
|
|
426
|
+
},
|
|
427
|
+
'tf_outputs': {
|
|
428
|
+
'output': tf_layers_dict[graph_node_output.name]['tf_node'],
|
|
429
|
+
},
|
|
430
|
+
}
|
|
431
|
+
)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: onnx2tf
|
|
3
|
-
Version: 1.29.
|
|
3
|
+
Version: 1.29.24
|
|
4
4
|
Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
|
|
5
5
|
Keywords: onnx,tensorflow,tflite,keras,deep-learning,machine-learning
|
|
6
6
|
Author: Katsuya Hyodo
|
|
@@ -122,7 +122,7 @@ https://github.com/PINTO0309/onnx2tf/wiki/model_status
|
|
|
122
122
|
|Cast|:heavy_check_mark:|
|
|
123
123
|
|Ceil|:heavy_check_mark:|
|
|
124
124
|
|Celu|:heavy_check_mark:|
|
|
125
|
-
|CenterCropPad
|
|
125
|
+
|CenterCropPad|:heavy_check_mark:|
|
|
126
126
|
|Clip|:heavy_check_mark:|
|
|
127
127
|
|Col2Im|:white_check_mark:|
|
|
128
128
|
|Compress|:heavy_check_mark:|
|
|
@@ -166,7 +166,7 @@ https://github.com/PINTO0309/onnx2tf/wiki/model_status
|
|
|
166
166
|
|GreaterOrEqual|:heavy_check_mark:|
|
|
167
167
|
|Greater|:heavy_check_mark:|
|
|
168
168
|
|GridSample|:white_check_mark:|
|
|
169
|
-
|GroupNormalization
|
|
169
|
+
|GroupNormalization|:heavy_check_mark:|
|
|
170
170
|
|GRU|:heavy_check_mark:|
|
|
171
171
|
|HammingWindow|:white_check_mark:|
|
|
172
172
|
|HannWindow|:white_check_mark:|
|
|
@@ -210,7 +210,7 @@ https://github.com/PINTO0309/onnx2tf/wiki/model_status
|
|
|
210
210
|
|NegativeLogLikelihoodLoss|:heavy_check_mark:|
|
|
211
211
|
|NonMaxSuppression|:heavy_check_mark:|
|
|
212
212
|
|NonZero|:heavy_check_mark:|
|
|
213
|
-
|Optional
|
|
213
|
+
|Optional|:heavy_check_mark:|
|
|
214
214
|
|OptionalGetElement|:heavy_check_mark:|
|
|
215
215
|
|OptionalHasElement|:heavy_check_mark:|
|
|
216
216
|
|Not|:heavy_check_mark:|
|
|
@@ -291,7 +291,7 @@ https://github.com/PINTO0309/onnx2tf/wiki/model_status
|
|
|
291
291
|
|Tan|:heavy_check_mark:|
|
|
292
292
|
|Tanh|:heavy_check_mark:|
|
|
293
293
|
|TensorScatter|:heavy_check_mark:|
|
|
294
|
-
|TfIdfVectorizer
|
|
294
|
+
|TfIdfVectorizer|:white_check_mark:|
|
|
295
295
|
|ThresholdedRelu|:heavy_check_mark:|
|
|
296
296
|
|Tile|:heavy_check_mark:|
|
|
297
297
|
|TopK|:heavy_check_mark:|
|
|
@@ -365,7 +365,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
365
365
|
docker run --rm -it \
|
|
366
366
|
-v `pwd`:/workdir \
|
|
367
367
|
-w /workdir \
|
|
368
|
-
ghcr.io/pinto0309/onnx2tf:1.29.
|
|
368
|
+
ghcr.io/pinto0309/onnx2tf:1.29.24
|
|
369
369
|
|
|
370
370
|
or
|
|
371
371
|
|
|
@@ -373,7 +373,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
373
373
|
docker run --rm -it \
|
|
374
374
|
-v `pwd`:/workdir \
|
|
375
375
|
-w /workdir \
|
|
376
|
-
docker.io/pinto0309/onnx2tf:1.29.
|
|
376
|
+
docker.io/pinto0309/onnx2tf:1.29.24
|
|
377
377
|
|
|
378
378
|
or
|
|
379
379
|
|
|
@@ -493,13 +493,20 @@ onnx2tf -i resnet18-v1-7.onnx -v info
|
|
|
493
493
|
# without input OP name.
|
|
494
494
|
# Note that if there are multiple input OPs, the zero dimension of all input OPs is
|
|
495
495
|
# forced to be rewritten.
|
|
496
|
-
# The `-
|
|
497
|
-
#
|
|
496
|
+
# The `-sh/--shape-hints` option provides shape hints for input tensors with undefined
|
|
497
|
+
# dimensions, significantly improving the conversion success rate for models with dynamic
|
|
498
|
+
# input shapes. Specifying this option in combination with the `-b` option will further
|
|
499
|
+
# improve the success rate of model conversion. The `-sh` option does not change ONNX
|
|
500
|
+
# input OPs to static shapes.
|
|
501
|
+
# The `-ois/--overwrite_input_shape` option allows undefined dimensions in all dimensions,
|
|
502
|
+
# including the zero dimensionality, to be overwritten to a static shape, but requires
|
|
498
503
|
# the input OP name to be specified.
|
|
499
504
|
# e.g. -ois data1:1,3,224,224 data2:1,255 data3:1,224,6
|
|
500
505
|
wget https://github.com/PINTO0309/onnx2tf/releases/download/0.0.2/resnet18-v1-7.onnx
|
|
501
506
|
onnx2tf -i resnet18-v1-7.onnx -b 1
|
|
502
507
|
or
|
|
508
|
+
onnx2tf -i resnet18-v1-7.onnx -sh data:1,3,224,224 -b 1
|
|
509
|
+
or
|
|
503
510
|
onnx2tf -i resnet18-v1-7.onnx -ois data:1,3,224,224
|
|
504
511
|
|
|
505
512
|
# Suppress automatic transposition of input OPs from NCW, NCHW, NCDHW to NWC, NHWC, NDHWC.
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
onnx2tf/__init__.py,sha256=
|
|
1
|
+
onnx2tf/__init__.py,sha256=W5COczfCiRCsNJlBLly_Dss2ma8U9Wk7EMVTrBRezXY,67
|
|
2
2
|
onnx2tf/__main__.py,sha256=2RSCQ7d4lc6CwD-rlGn9UicPFg-P5du7ZD_yh-kuBEU,57
|
|
3
3
|
onnx2tf/onnx2tf.py,sha256=BC-BFMf8QUG7PtOvpwglhe1sc4FhTO8AMrdlxKUN5jc,208204
|
|
4
4
|
onnx2tf/ops/Abs.py,sha256=V7btmCG_ZvK_qJovUsguq0ZMJ349mhNQ4FHSgzP_Yuo,4029
|
|
@@ -26,6 +26,7 @@ onnx2tf/ops/BlackmanWindow.py,sha256=o_wLhYAmMearuJNlSdUfDeQm7D6g_y_H21uG-foctbA
|
|
|
26
26
|
onnx2tf/ops/Cast.py,sha256=M0LRClHPgZ_8NubwME6ipKrAqcY9aKC5ihQXCkTkNkM,4601
|
|
27
27
|
onnx2tf/ops/Ceil.py,sha256=0-jaueltpQSwpOIDUmy9DdTy98qN-XimYu5cHVPnUIs,3586
|
|
28
28
|
onnx2tf/ops/Celu.py,sha256=9g7WNKo4G_jMtUXcoOfpNdLYqEsuyXLPkkyQZxDuL4U,3853
|
|
29
|
+
onnx2tf/ops/CenterCropPad.py,sha256=fQH_z4nSUCPPA1ar-VWzR2PHMYXLPvPKvN3EexzTOHg,6459
|
|
29
30
|
onnx2tf/ops/Clip.py,sha256=K3Pgt9BXl5_rzg6s-kPFmwElL5COsvolRY1BUTo7UWw,8753
|
|
30
31
|
onnx2tf/ops/Col2Im.py,sha256=8n66z3O59VJvJRlcrj93a5TLJ_qh-aSdR_-8SAQIlRo,7658
|
|
31
32
|
onnx2tf/ops/Compress.py,sha256=NvDGr9gCNl-8YG41xDBfe3UvhRP03K-ktdtY_MoytBc,3667
|
|
@@ -71,6 +72,7 @@ onnx2tf/ops/Greater.py,sha256=fhMFF0fGt2c1W_rHCy0yKAXUYThLgBVnoFmCYLPD12Q,4585
|
|
|
71
72
|
onnx2tf/ops/GreaterOrEqual.py,sha256=sfNBveEyoU2oIlFILKlZ3jopeCnnPH2ij4J08QtIX8I,4604
|
|
72
73
|
onnx2tf/ops/GridSample.py,sha256=3THBiJcB9J5eFoobiwwqqQ-BJ0pr7xK9JyTCxXixLs0,31360
|
|
73
74
|
onnx2tf/ops/GroupNorm.py,sha256=zMjgkTDhb8OySDa4ZBg-45rWQQ5dy3wmqAY-Aj7izac,12026
|
|
75
|
+
onnx2tf/ops/GroupNormalization.py,sha256=-ZQUoZKJkEatVQ6qdHpzUpyCeJQWTqmwGjZgkYf_PDg,8453
|
|
74
76
|
onnx2tf/ops/HammingWindow.py,sha256=PY6NVvzutmFKB8UyJYl2LcwqzZGhRMg0jot96m0isCc,2891
|
|
75
77
|
onnx2tf/ops/HannWindow.py,sha256=vMvtn3JwjxUqPXTXdNzk3QjH87JFAEStwwEnIl_5jKY,2882
|
|
76
78
|
onnx2tf/ops/HardSigmoid.py,sha256=KDP_t-Z70sDsHMOYxyJ7ZNH31zqkrViOKYCcRG5NJHc,3662
|
|
@@ -115,10 +117,11 @@ onnx2tf/ops/NonMaxSuppression.py,sha256=nHeiX5eMGQAq_51KoljNZGlZddJ89Oe7Yfe33xLh
|
|
|
115
117
|
onnx2tf/ops/NonZero.py,sha256=2EYZFMNIejeqR2azHw0CT2mthiKuRPQepUafzeVE8Nk,2788
|
|
116
118
|
onnx2tf/ops/Not.py,sha256=wn3nThGf4gtpQdHjP7OX2xlhyaNQGeHifjZ18O5shhg,3599
|
|
117
119
|
onnx2tf/ops/OneHot.py,sha256=OThLm1MF1X75zx7gep_qdnRHsTRZX_tqZxjt6pAVi7E,6489
|
|
118
|
-
onnx2tf/ops/
|
|
119
|
-
onnx2tf/ops/
|
|
120
|
+
onnx2tf/ops/Optional.py,sha256=x8tVVrFyX9i8u04XPF-v7XK6cH5Ney6QKCMRcuOuGxU,4252
|
|
121
|
+
onnx2tf/ops/OptionalGetElement.py,sha256=YTSx7cAc74aQXarG9ChAtfXOvZaxNedLH62DZe0c_uQ,2694
|
|
122
|
+
onnx2tf/ops/OptionalHasElement.py,sha256=1QD94dVcoonou5u9H4Wp8gy93R7htY3P1u_XXhI5iWI,2694
|
|
120
123
|
onnx2tf/ops/Or.py,sha256=7gyUSgbEVVQBp2t3G93pZlHNn0ejJfZ3rbSDOnFgUi0,4586
|
|
121
|
-
onnx2tf/ops/PRelu.py,sha256=
|
|
124
|
+
onnx2tf/ops/PRelu.py,sha256=DxLBUgxQdO5IL1--xA5lbGq7dI-kr8UKn2Mf-4j8L5Q,7769
|
|
122
125
|
onnx2tf/ops/Pad.py,sha256=xZOkZK-53sXU-d0nADAjR1wOpKqfzHeJjTmzwon6G4A,11883
|
|
123
126
|
onnx2tf/ops/Pow.py,sha256=DZjrWQSyLw_BPXrKyoTqT9KJIxPfNxnYVcoTDBagDgM,7056
|
|
124
127
|
onnx2tf/ops/QLinearAdd.py,sha256=OssQI0pd8KXdnCC8urCPKP8bpcvSX0D76bS7q4-xMSY,5027
|
|
@@ -194,6 +197,7 @@ onnx2tf/ops/Sum.py,sha256=wtI0SbGuNFxkLskBk68ZhOAg3XyrIx-9xGYy1GZCVSo,3073
|
|
|
194
197
|
onnx2tf/ops/Tan.py,sha256=Ncig8clGvY7GWshqxRDRdcxjcbf_HTKGdpDw5ValrKI,3582
|
|
195
198
|
onnx2tf/ops/Tanh.py,sha256=PIQUvxS_AIDufblC2vc573nse2UCRA9z5yWd7kB-51s,3585
|
|
196
199
|
onnx2tf/ops/TensorScatter.py,sha256=9M1L8ys2FodscRZXdjme5NQYrCFX_nZH7wm8vx-PXcc,8176
|
|
200
|
+
onnx2tf/ops/TfIdfVectorizer.py,sha256=q574eLsJYj1hOx4Pnh0X5TdNHBSLOwAok1liI-r2TAY,13868
|
|
197
201
|
onnx2tf/ops/ThresholdedRelu.py,sha256=ArF3uRH7jN8kdYYDNcivJgv9UTFl5aqqSH2Qu79j4sY,3769
|
|
198
202
|
onnx2tf/ops/Tile.py,sha256=xkprg6yTaykivcHFJ644opzVPctaeplu-Ed-OpS98Gg,12720
|
|
199
203
|
onnx2tf/ops/TopK.py,sha256=f6OG-DcMWneXwSjIkmY935SPyOMD5tMteHnlQHoJwQo,6348
|
|
@@ -211,7 +215,7 @@ onnx2tf/utils/enums.py,sha256=7c5TqetqB07VjyHoxJHfLgtqBqk9ZRyUF33fPOJR1IM,1649
|
|
|
211
215
|
onnx2tf/utils/iterative_json_optimizer.py,sha256=qqeIxWGxrhcCYk8-ebWnblnOkzDCwi-nseipHzHR_bk,10436
|
|
212
216
|
onnx2tf/utils/json_auto_generator.py,sha256=OC-SfKtUg7zUxaXTAg6kT0ShzIc3ByjDa3FNp173DtA,60302
|
|
213
217
|
onnx2tf/utils/logging.py,sha256=yUCmPuJ_XiUItM3sZMcaMO24JErkQy7zZwVTYWAuiKg,1982
|
|
214
|
-
onnx2tf-1.29.
|
|
215
|
-
onnx2tf-1.29.
|
|
216
|
-
onnx2tf-1.29.
|
|
217
|
-
onnx2tf-1.29.
|
|
218
|
+
onnx2tf-1.29.24.dist-info/WHEEL,sha256=5DEXXimM34_d4Gx1AuF9ysMr1_maoEtGKjaILM3s4w4,80
|
|
219
|
+
onnx2tf-1.29.24.dist-info/entry_points.txt,sha256=GuhvLu7ZlYECumbmoiFlKX0mFPtFi_Ti9L-E5yuQqKs,42
|
|
220
|
+
onnx2tf-1.29.24.dist-info/METADATA,sha256=rGGwnT0qMDRf78t__34bQ73PElC_VPkuFYJATdo76zk,156543
|
|
221
|
+
onnx2tf-1.29.24.dist-info/RECORD,,
|
|
File without changes
|