onnx2tf 1.29.19__py3-none-any.whl → 1.29.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -57,9 +57,10 @@ def make_node(
57
57
  graph_node.inputs[0],
58
58
  before_op_output_shape_trans,
59
59
  )
60
+ # Indices must not be layout-transposed.
60
61
  graph_node_input_2 = get_constant_or_variable(
61
62
  graph_node.inputs[1],
62
- before_op_output_shape_trans,
63
+ False,
63
64
  )
64
65
  graph_node_output: gs.Variable = graph_node.outputs[0]
65
66
  shape = graph_node_output.shape
@@ -77,12 +78,29 @@ def make_node(
77
78
  param_name=graph_node.inputs[0].name,
78
79
  **kwargs,
79
80
  )
80
- indices_tensor = pre_process_transpose(
81
- value_before_transpose=indices_tensor,
82
- param_target='inputs',
83
- param_name=graph_node.inputs[1].name,
84
- **kwargs,
85
- )
81
+ # If input is transposed by replacement params, align indices tensor shape.
82
+ op_rep_params = kwargs.get('op_rep_params', [])
83
+ params_perm = None
84
+ indices_perm = None
85
+ for op_rep_param in op_rep_params:
86
+ if op_rep_param['param_target'] == 'inputs' \
87
+ and op_rep_param['param_name'] == graph_node.inputs[0].name:
88
+ params_perm = op_rep_param.get('pre_process_transpose_perm', None)
89
+ if op_rep_param['param_target'] == 'inputs' \
90
+ and op_rep_param['param_name'] == graph_node.inputs[1].name:
91
+ indices_perm = op_rep_param.get('pre_process_transpose_perm', None)
92
+ target_perm = indices_perm if indices_perm is not None else params_perm
93
+ if target_perm is not None:
94
+ try:
95
+ rank = len(indices_tensor.shape) if hasattr(indices_tensor, "shape") else None
96
+ if rank is None or rank == len(target_perm):
97
+ indices_tensor = transpose_with_flexing_deterrence(
98
+ input_tensor=indices_tensor,
99
+ perm=target_perm,
100
+ **kwargs,
101
+ )
102
+ except Exception:
103
+ pass
86
104
 
87
105
  tensor_rank = len(input_tensor.shape)
88
106
 
onnx2tf/ops/GatherND.py CHANGED
@@ -51,9 +51,10 @@ def make_node(
51
51
  graph_node.inputs[0],
52
52
  before_op_output_shape_trans,
53
53
  )
54
+ # Indices must not be layout-transposed.
54
55
  graph_node_input_2 = get_constant_or_variable(
55
56
  graph_node.inputs[1],
56
- before_op_output_shape_trans,
57
+ False,
57
58
  )
58
59
  graph_node_output: gs.Variable = graph_node.outputs[0]
59
60
  shape = graph_node_output.shape
@@ -89,6 +90,32 @@ def make_node(
89
90
 
90
91
  replace_gathernd_to_pseudo_gathernd = "gathernd" in kwargs['replace_to_pseudo_operators']
91
92
 
93
+ # If params is transposed, adjust indices to match the transposed layout.
94
+ op_rep_params = kwargs.get('op_rep_params', [])
95
+ params_perm = None
96
+ indices_perm_specified = False
97
+ for op_rep_param in op_rep_params:
98
+ if op_rep_param['param_target'] == 'inputs' and op_rep_param['param_name'] == graph_node.inputs[0].name:
99
+ params_perm = op_rep_param.get('pre_process_transpose_perm', None)
100
+ if op_rep_param['param_target'] == 'inputs' and op_rep_param['param_name'] == graph_node.inputs[1].name:
101
+ if op_rep_param.get('pre_process_transpose_perm', None) is not None:
102
+ indices_perm_specified = True
103
+ if params_perm is not None and not indices_perm_specified:
104
+ # Only handle standard layout swaps that keep batch dims at the front.
105
+ if batch_dims <= len(params_perm) \
106
+ and list(params_perm[:batch_dims]) == list(range(batch_dims)):
107
+ perm_tail = [p - batch_dims for p in params_perm if p >= batch_dims]
108
+ try:
109
+ if isinstance(indices_tensor, np.ndarray):
110
+ if indices_tensor.shape and indices_tensor.shape[-1] == len(perm_tail):
111
+ indices_tensor = indices_tensor[..., perm_tail]
112
+ else:
113
+ idx_last = indices_tensor.shape[-1] if indices_tensor.shape is not None else None
114
+ if idx_last is None or idx_last == len(perm_tail):
115
+ indices_tensor = tf.gather(indices_tensor, perm_tail, axis=-1)
116
+ except Exception:
117
+ pass
118
+
92
119
  # Preserving Graph Structure (Dict)
93
120
  tf_layers_dict[graph_node_output.name] = {
94
121
  'optype': graph_node.op,
@@ -55,9 +55,10 @@ def make_node(
55
55
  graph_node.inputs[0],
56
56
  before_op_output_shape_trans,
57
57
  )
58
+ # Indices must not be layout-transposed.
58
59
  graph_node_input_2 = get_constant_or_variable(
59
60
  graph_node.inputs[1],
60
- before_op_output_shape_trans,
61
+ False,
61
62
  )
62
63
  graph_node_input_3 = get_constant_or_variable(
63
64
  graph_node.inputs[2],
@@ -81,12 +82,29 @@ def make_node(
81
82
  indices_tensor = tf_layers_dict[graph_node_input_2.name]['tf_node'] \
82
83
  if isinstance(graph_node_input_2, gs.Variable) else graph_node_input_2
83
84
  # Pre-process transpose
84
- indices_tensor = pre_process_transpose(
85
- value_before_transpose=indices_tensor,
86
- param_target='inputs',
87
- param_name=graph_node.inputs[1].name,
88
- **kwargs,
89
- )
85
+ # If input is transposed by replacement params, align indices tensor shape.
86
+ op_rep_params = kwargs.get('op_rep_params', [])
87
+ params_perm = None
88
+ indices_perm = None
89
+ for op_rep_param in op_rep_params:
90
+ if op_rep_param['param_target'] == 'inputs' \
91
+ and op_rep_param['param_name'] == graph_node.inputs[0].name:
92
+ params_perm = op_rep_param.get('pre_process_transpose_perm', None)
93
+ if op_rep_param['param_target'] == 'inputs' \
94
+ and op_rep_param['param_name'] == graph_node.inputs[1].name:
95
+ indices_perm = op_rep_param.get('pre_process_transpose_perm', None)
96
+ target_perm = indices_perm if indices_perm is not None else params_perm
97
+ if target_perm is not None:
98
+ try:
99
+ rank = len(indices_tensor.shape) if hasattr(indices_tensor, "shape") else None
100
+ if rank is None or rank == len(target_perm):
101
+ indices_tensor = transpose_with_flexing_deterrence(
102
+ input_tensor=indices_tensor,
103
+ perm=target_perm,
104
+ **kwargs,
105
+ )
106
+ except Exception:
107
+ pass
90
108
  updates_tensor = tf_layers_dict[graph_node_input_3.name]['tf_node'] \
91
109
  if isinstance(graph_node_input_3, gs.Variable) else graph_node_input_3
92
110
  # Pre-process transpose
onnx2tf/ops/ScatterND.py CHANGED
@@ -13,6 +13,7 @@ from onnx2tf.utils.common_functions import (
13
13
  get_replacement_parameter,
14
14
  pre_process_transpose,
15
15
  post_process_transpose,
16
+ transpose_with_flexing_deterrence,
16
17
  )
17
18
 
18
19
 
@@ -79,6 +80,32 @@ def make_node(
79
80
  and 'nhwc' in tf_layers_dict[graph_node_input_1.name].keys() else False
80
81
  }
81
82
 
83
+ op_rep_params = kwargs.get('op_rep_params', [])
84
+ params_perm = None
85
+ indices_perm = None
86
+ for op_rep_param in op_rep_params:
87
+ if op_rep_param['param_target'] == 'inputs' \
88
+ and op_rep_param['param_name'] == graph_node.inputs[0].name:
89
+ params_perm = op_rep_param.get('pre_process_transpose_perm', None)
90
+ if op_rep_param['param_target'] == 'inputs' \
91
+ and op_rep_param['param_name'] == graph_node.inputs[1].name:
92
+ indices_perm = op_rep_param.get('pre_process_transpose_perm', None)
93
+
94
+ def reorder_indices_last_dim(target_indices, perm):
95
+ if perm is None:
96
+ return target_indices
97
+ try:
98
+ if isinstance(target_indices, np.ndarray):
99
+ if target_indices.shape and target_indices.shape[-1] == len(perm):
100
+ return target_indices[..., perm]
101
+ else:
102
+ idx_last = target_indices.shape[-1] if target_indices.shape is not None else None
103
+ if idx_last is None or idx_last == len(perm):
104
+ return tf.gather(target_indices, perm, axis=-1)
105
+ except Exception:
106
+ pass
107
+ return target_indices
108
+
82
109
  # Pre-process transpose
83
110
  input_tensor = pre_process_transpose(
84
111
  value_before_transpose=input_tensor,
@@ -86,18 +113,26 @@ def make_node(
86
113
  param_name=graph_node.inputs[0].name,
87
114
  **kwargs,
88
115
  )
89
- indices_tensor = pre_process_transpose(
90
- value_before_transpose=indices_tensor,
91
- param_target='inputs',
92
- param_name=graph_node.inputs[1].name,
93
- **kwargs,
94
- )
116
+ # Indices must not be layout-transposed; apply explicit perm only if specified.
117
+ if indices_perm is not None:
118
+ try:
119
+ rank = len(indices_tensor.shape) if hasattr(indices_tensor, "shape") else None
120
+ if rank is None or rank == len(indices_perm):
121
+ indices_tensor = transpose_with_flexing_deterrence(
122
+ input_tensor=indices_tensor,
123
+ perm=indices_perm,
124
+ **kwargs,
125
+ )
126
+ except Exception:
127
+ pass
95
128
  updates_tensor = pre_process_transpose(
96
129
  value_before_transpose=updates_tensor,
97
130
  param_target='inputs',
98
131
  param_name=graph_node.inputs[2].name,
99
132
  **kwargs,
100
133
  )
134
+ if params_perm is not None and indices_perm is None:
135
+ indices_tensor = reorder_indices_last_dim(indices_tensor, params_perm)
101
136
 
102
137
  # When NHWC is fixed, return to NCHW format before processing.
103
138
  data_nhwc = tf_layers_dict[graph_node_input_1.name]['nhwc'] \
@@ -119,6 +154,8 @@ def make_node(
119
154
  and len(input_tensor.shape) >= 3:
120
155
  perm = [0, len(input_tensor.shape)-1] + [i for i in range(1, len(input_tensor.shape)-1)]
121
156
  input_tensor = tf.transpose(a=input_tensor, perm=perm)
157
+ if indices_perm is None:
158
+ indices_tensor = reorder_indices_last_dim(indices_tensor, perm)
122
159
  nchw = True
123
160
  elif not data_nhwc \
124
161
  and len(input_tensor.shape) >= 3 \
@@ -126,6 +163,8 @@ def make_node(
126
163
  and input_tensor.shape != graph_node.inputs[0].shape:
127
164
  perm = [0, len(input_tensor.shape)-1] + [i for i in range(1, len(input_tensor.shape)-1)]
128
165
  input_tensor = tf.transpose(a=input_tensor, perm=perm)
166
+ if indices_perm is None:
167
+ indices_tensor = reorder_indices_last_dim(indices_tensor, perm)
129
168
  nchw = True
130
169
  ## indices
131
170
  if indices_nhwc \
@@ -14,6 +14,7 @@ from onnx2tf.utils.common_functions import (
14
14
  get_replacement_parameter,
15
15
  pre_process_transpose,
16
16
  post_process_transpose,
17
+ transpose_with_flexing_deterrence,
17
18
  )
18
19
  from onnx2tf.utils.enums import NUMPY_DTYPES_TO_TF_DTYPES
19
20
  from onnx2tf.utils.logging import *
@@ -112,12 +113,25 @@ def make_node(
112
113
  **kwargs,
113
114
  )
114
115
  if write_indices is not None:
115
- write_indices = pre_process_transpose(
116
- value_before_transpose=write_indices,
117
- param_target='inputs',
118
- param_name=graph_node.inputs[2].name,
119
- **kwargs,
120
- )
116
+ # Indices must not be layout-transposed; apply explicit perm only if specified.
117
+ op_rep_params = kwargs.get('op_rep_params', [])
118
+ indices_perm = None
119
+ for op_rep_param in op_rep_params:
120
+ if op_rep_param['param_target'] == 'inputs' \
121
+ and op_rep_param['param_name'] == graph_node.inputs[2].name:
122
+ indices_perm = op_rep_param.get('pre_process_transpose_perm', None)
123
+ break
124
+ if indices_perm is not None:
125
+ try:
126
+ rank = len(write_indices.shape) if hasattr(write_indices, "shape") else None
127
+ if rank is None or rank == len(indices_perm):
128
+ write_indices = transpose_with_flexing_deterrence(
129
+ input_tensor=write_indices,
130
+ perm=indices_perm,
131
+ **kwargs,
132
+ )
133
+ except Exception:
134
+ pass
121
135
 
122
136
  # Generation of TF OP
123
137
  past_cache = _as_tensor(past_cache)
@@ -26,6 +26,7 @@ from tensorflow.python.keras.layers import Lambda
26
26
  from tensorflow.python.keras.utils import conv_utils
27
27
  import onnx
28
28
  from onnx.serialization import ProtoSerializer
29
+ from onnx.external_data_helper import uses_external_data
29
30
  import onnx_graphsurgeon as gs
30
31
  try:
31
32
  import onnxruntime as ort
@@ -45,6 +46,8 @@ INF_INDEX_VALUE: int = 4294967296
45
46
  ONNX_INF_INDEX_VALUE = sys.maxsize # 9223372036854775807
46
47
 
47
48
 
49
+
50
+
48
51
  def get_replacement_parameter(func):
49
52
  @wraps(func)
50
53
  def get_replacement_parameter_wrapper_func(*args, **kwargs):
@@ -4046,6 +4049,9 @@ def dummy_onnx_inference(
4046
4049
  input_sizes[i] = updated_shape
4047
4050
 
4048
4051
  input_dtypes: List[Any] = [inp.dtype for inp in onnx_inputs]
4052
+ input_size_map = {
4053
+ name: tuple(size) for name, size in zip(input_names, input_sizes)
4054
+ }
4049
4055
  input_datas = {}
4050
4056
 
4051
4057
  # -cid
@@ -4059,7 +4065,16 @@ def dummy_onnx_inference(
4059
4065
  if input_op_info is not None:
4060
4066
  ncw_nchw_ncdhw_perm: List = input_op_info.get('ncw_nchw_ncdhw_perm', None)
4061
4067
  if ncw_nchw_ncdhw_perm is not None:
4062
- custom_input_data = custom_input_data.transpose(ncw_nchw_ncdhw_perm)
4068
+ expected_shape = input_size_map.get(
4069
+ input_op_name,
4070
+ tuple(custom_input_data.shape),
4071
+ )
4072
+ if tuple(custom_input_data.shape) != expected_shape:
4073
+ permuted_shape = tuple(
4074
+ custom_input_data.shape[i] for i in ncw_nchw_ncdhw_perm
4075
+ )
4076
+ if permuted_shape == expected_shape:
4077
+ custom_input_data = custom_input_data.transpose(ncw_nchw_ncdhw_perm)
4063
4078
  onnx_batch_size = input_op_info['shape'][0]
4064
4079
  cdata_batch_size = custom_input_data.shape[0]
4065
4080
  if isinstance(onnx_batch_size, int) and onnx_batch_size != cdata_batch_size and cdata_batch_size > 1:
@@ -4231,6 +4246,7 @@ def dummy_tf_inference(
4231
4246
  custom_input_op_name_np_data_path: Optional[str] = None,
4232
4247
  shape_hints: Optional[List[str]] = None,
4233
4248
  input_datas_for_validation: Optional[Dict[str, np.ndarray]] = None,
4249
+ prefilled_input_datas: Optional[Dict[str, np.ndarray]] = None,
4234
4250
  keep_shape_absolutely_input_names: Optional[List[str]] = None,
4235
4251
  keep_ncw_or_nchw_or_ncdhw_input_names: Optional[List[str]] = None,
4236
4252
  keep_nwc_or_nhwc_or_ndhwc_input_names: Optional[List[str]] = None,
@@ -4264,6 +4280,8 @@ def dummy_tf_inference(
4264
4280
  """
4265
4281
  input_names: List[str] = [inp.name for inp in inputs]
4266
4282
  input_sizes: List[int] = [inp.shape for inp in inputs]
4283
+ input_size_map = {name: size for name, size in zip(input_names, input_sizes)}
4284
+ input_index_map = {name: i for i, name in enumerate(input_names)}
4267
4285
 
4268
4286
  if shape_hints is None:
4269
4287
  new_input_sizes = []
@@ -4338,10 +4356,20 @@ def dummy_tf_inference(
4338
4356
 
4339
4357
  # -cid
4340
4358
  if custom_input_op_name_np_data_path:
4341
- for idx, param in enumerate(custom_input_op_name_np_data_path):
4359
+ for param in custom_input_op_name_np_data_path:
4360
+ if len(param) < 2:
4361
+ continue
4362
+ input_name = str(param[0])
4342
4363
  numpy_file_path = str(param[1])
4364
+ if input_name not in input_index_map:
4365
+ continue
4366
+ idx = input_index_map[input_name]
4367
+ tf_input_name = input_names[idx]
4368
+ if prefilled_input_datas and tf_input_name in prefilled_input_datas:
4369
+ continue
4343
4370
  custom_input_data = np.load(numpy_file_path)
4344
4371
  input_size = input_sizes[idx]
4372
+ input_dtype = input_dtypes[idx] if idx < len(input_dtypes) else np.float32
4345
4373
 
4346
4374
  tf_batch_size = input_size[0]
4347
4375
  cdata_batch_size = custom_input_data.shape[0]
@@ -4351,6 +4379,24 @@ def dummy_tf_inference(
4351
4379
  custom_input_data = custom_input_data[0:1, ...]
4352
4380
 
4353
4381
  if list(custom_input_data.shape) != input_size:
4382
+ auto_split_input = (
4383
+ 'onnx2tf_split_' in numpy_file_path
4384
+ or os.path.basename(numpy_file_path).startswith('part_')
4385
+ )
4386
+ if auto_split_input:
4387
+ warn(
4388
+ 'Auto-split custom input shape does not match TF input shape. '
4389
+ f'input_name={input_name} '
4390
+ f'tf_shape={input_size} '
4391
+ f'numpy_shape={list(custom_input_data.shape)} '
4392
+ f'path={numpy_file_path} '
4393
+ 'Fallback to dummy input for this tensor.'
4394
+ )
4395
+ input_datas[input_names[idx]] = np.ones(
4396
+ input_size,
4397
+ dtype=TF_DTYPES_TO_NUMPY_DTYPES[input_dtype],
4398
+ )
4399
+ continue
4354
4400
  error_msg = f'' + \
4355
4401
  Color.RED(f'ERROR:') + ' ' + \
4356
4402
  f"The format of custom input data is different from Tensorflow's format. " + \
@@ -4397,6 +4443,33 @@ def dummy_tf_inference(
4397
4443
  if input_datas_for_validation is not None:
4398
4444
  input_datas_for_validation.update(input_datas)
4399
4445
 
4446
+ if prefilled_input_datas:
4447
+ for input_name, input_data in prefilled_input_datas.items():
4448
+ expected = None
4449
+ if input_name in input_datas:
4450
+ expected = input_datas[input_name].shape
4451
+ elif input_name in input_size_map:
4452
+ expected = input_size_map[input_name]
4453
+ else:
4454
+ continue
4455
+ data = input_data
4456
+ try:
4457
+ if expected is not None and tuple(data.shape) != tuple(expected):
4458
+ if data.size == np.prod(expected):
4459
+ data = data.reshape(expected)
4460
+ else:
4461
+ continue
4462
+ target_dtype = None
4463
+ if input_name in input_datas:
4464
+ target_dtype = input_datas[input_name].dtype
4465
+ elif input_name in input_index_map:
4466
+ target_dtype = input_dtypes[input_index_map[input_name]]
4467
+ if target_dtype is not None and data.dtype != target_dtype:
4468
+ data = data.astype(target_dtype)
4469
+ input_datas[input_name] = data
4470
+ except Exception:
4471
+ continue
4472
+
4400
4473
  outputs = model(
4401
4474
  inputs={
4402
4475
  input.name: input_datas[input.name] for input in inputs
@@ -6057,6 +6130,8 @@ def acquisition_of_validation_data(
6057
6130
  kwargs['test_data_nhwc']
6058
6131
  custom_input_op_name_np_data_path: str = \
6059
6132
  kwargs['custom_input_op_name_np_data_path']
6133
+ tf_input_cache: Optional[Dict[str, np.ndarray]] = \
6134
+ kwargs.get('tf_input_cache', None)
6060
6135
 
6061
6136
  # Get the output tensor of one previous OP of TensorFlow only once
6062
6137
  tf_model_inputs = get_tf_model_inputs(
@@ -6108,6 +6183,7 @@ def acquisition_of_validation_data(
6108
6183
  inputs=tf_model_inputs,
6109
6184
  test_data_nhwc=test_data_nhwc,
6110
6185
  custom_input_op_name_np_data_path=custom_input_op_name_np_data_path,
6186
+ prefilled_input_datas=tf_input_cache,
6111
6187
  )
6112
6188
  except Exception as ex:
6113
6189
  pass
@@ -6526,3 +6602,24 @@ def define_reduceXXX(
6526
6602
  keepdims=target_keepdims,
6527
6603
  )
6528
6604
  return reduced_tensor
6605
+
6606
+ def check_has_external_data(input_onnx_file_path: str) -> bool:
6607
+ model = onnx.load(input_onnx_file_path, load_external_data=False)
6608
+ def iter_tensors_in_graph(g):
6609
+ for t in g.initializer:
6610
+ yield t
6611
+ for t in g.sparse_initializer:
6612
+ yield t
6613
+ for n in g.node:
6614
+ for a in n.attribute:
6615
+ if a.type == onnx.AttributeProto.TENSOR:
6616
+ yield a.t
6617
+ elif a.type == onnx.AttributeProto.TENSORS:
6618
+ for t in a.tensors:
6619
+ yield t
6620
+ elif a.type == onnx.AttributeProto.GRAPH:
6621
+ yield from iter_tensors_in_graph(a.g)
6622
+ elif a.type == onnx.AttributeProto.GRAPHS:
6623
+ for sg in a.graphs:
6624
+ yield from iter_tensors_in_graph(sg)
6625
+ return any(uses_external_data(t) for t in iter_tensors_in_graph(model.graph))
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: onnx2tf
3
- Version: 1.29.19
3
+ Version: 1.29.20
4
4
  Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
5
5
  Keywords: onnx,tensorflow,tflite,keras,deep-learning,machine-learning
6
6
  Author: Katsuya Hyodo
@@ -365,7 +365,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
365
365
  docker run --rm -it \
366
366
  -v `pwd`:/workdir \
367
367
  -w /workdir \
368
- ghcr.io/pinto0309/onnx2tf:1.29.19
368
+ ghcr.io/pinto0309/onnx2tf:1.29.20
369
369
 
370
370
  or
371
371
 
@@ -373,7 +373,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
373
373
  docker run --rm -it \
374
374
  -v `pwd`:/workdir \
375
375
  -w /workdir \
376
- docker.io/pinto0309/onnx2tf:1.29.19
376
+ docker.io/pinto0309/onnx2tf:1.29.20
377
377
 
378
378
  or
379
379
 
@@ -1887,6 +1887,15 @@ optional arguments:
1887
1887
  model partitioned into subgraphs.
1888
1888
  e.g. --output_names_to_interrupt_model_conversion "output0" "output1" "output2"
1889
1889
 
1890
+ -easm, --enable_auto_split_model
1891
+ Force auto split regardless of the ONNX file size.
1892
+ Uses --auto_split_max_size_mb as the target partition size.
1893
+
1894
+ -asmsm AUTO_SPLIT_MAX_SIZE_MB, --auto_split_max_size_mb AUTO_SPLIT_MAX_SIZE_MB
1895
+ Target maximum size per partition in MB based on ONNX initializer sizes.
1896
+ Used when auto-split is triggered or forced.
1897
+ Default: 1024
1898
+
1890
1899
  -dgc, --disable_group_convolution
1891
1900
  Disable GroupConvolution and replace it with SeparableConvolution for
1892
1901
  output to saved_model format.
@@ -2156,6 +2165,8 @@ convert(
2156
2165
  keep_shape_absolutely_input_names: Optional[List[str]] = None,
2157
2166
  input_names_to_interrupt_model_conversion: Union[List[str], NoneType] = None,
2158
2167
  output_names_to_interrupt_model_conversion: Union[List[str], NoneType] = None,
2168
+ enable_auto_split_model: Optional[bool] = False,
2169
+ auto_split_max_size_mb: Optional[int] = 1024,
2159
2170
  disable_group_convolution: Union[bool, NoneType] = False,
2160
2171
  enable_batchmatmul_unfold: Optional[bool] = False,
2161
2172
  enable_rnn_unroll: Optional[bool] = False,
@@ -2424,6 +2435,17 @@ convert(
2424
2435
  e.g.
2425
2436
  output_names_to_interrupt_model_conversion=['output0','output1','output2']
2426
2437
 
2438
+ enable_auto_split_model: Optional[bool]
2439
+ Force auto split regardless of the ONNX file size.
2440
+ Uses auto_split_max_size_mb as the target partition size.
2441
+ Short option: -easm
2442
+ Default: False
2443
+
2444
+ auto_split_max_size_mb: Optional[int]
2445
+ Target maximum size per partition in MB based on ONNX initializer sizes.
2446
+ Used when auto-split is triggered or forced.
2447
+ Default: 1024
2448
+
2427
2449
  disable_group_convolution: Optional[bool]
2428
2450
  Disable GroupConvolution and replace it with SeparableConvolution for
2429
2451
  output to saved_model format.
@@ -1,6 +1,6 @@
1
- onnx2tf/__init__.py,sha256=j0g0sP9V7WMAY5PXs_oOHLBgUSJ1dcjLILPUq39xOnU,67
1
+ onnx2tf/__init__.py,sha256=eeSEsd3Wo8LJdK15-iysWnXuxIbvnuVUXyPA9PbSr1o,67
2
2
  onnx2tf/__main__.py,sha256=2RSCQ7d4lc6CwD-rlGn9UicPFg-P5du7ZD_yh-kuBEU,57
3
- onnx2tf/onnx2tf.py,sha256=y8FewjpNYAFnUs0cjq6JzdYkiXQSm1o_sZ3PXLJzK64,161921
3
+ onnx2tf/onnx2tf.py,sha256=pBrhrEzDqFKOgvXEr75vMJ61P_m3_f5ogUegD_vDEnc,207077
4
4
  onnx2tf/ops/Abs.py,sha256=V7btmCG_ZvK_qJovUsguq0ZMJ349mhNQ4FHSgzP_Yuo,4029
5
5
  onnx2tf/ops/Acos.py,sha256=Fo8YkFKuWq8Fi2xUrBdKcAH1yJ8r5pjSD0wgLttTNdk,4003
6
6
  onnx2tf/ops/Acosh.py,sha256=ATQj2cT5JS_mTfXi0kXqJ1yzSZu5J0zHA5VjV3j7uKY,3588
@@ -60,8 +60,8 @@ onnx2tf/ops/Floor.py,sha256=8izJrNmw8wNmjF_YabIpLs4jm82J-gKcyAQbwV7Yqpc,3589
60
60
  onnx2tf/ops/FusedConv.py,sha256=gslI50V3yvt4l0mmodnyHFAu0cORx1J_ZL5cE0rZ8qs,4523
61
61
  onnx2tf/ops/GRU.py,sha256=kBHiZlhlPIV2DQCoFYFHxCTwOATeguJy1MSfj2kxqDM,30732
62
62
  onnx2tf/ops/Gather.py,sha256=ezsUTN8nWau4-kB696xjonlVWU6XQ6BjtyjSebt1EXg,15216
63
- onnx2tf/ops/GatherElements.py,sha256=pR9EuOkYRBKPntmnj9DYpoBESc35EGv3RHfl0HCSmao,15026
64
- onnx2tf/ops/GatherND.py,sha256=sdHaBeY2ycN9gRc_ahaZo2QI9XbV8PBthefm-JPiPnE,7642
63
+ onnx2tf/ops/GatherElements.py,sha256=qF8milhgXOOc_G3W80U2rK7Q2SsHlPNHrs4VX20ddDY,16002
64
+ onnx2tf/ops/GatherND.py,sha256=2PwSyXHwPP9_xADPasjxj-IXAvLNqKuGm3P5K3GOiwE,9239
65
65
  onnx2tf/ops/Gelu.py,sha256=ms9oHnESOuiIPxl_8YU2WEnQo_BVKRPKo5UJsvsWyEA,4321
66
66
  onnx2tf/ops/Gemm.py,sha256=8vGtXwx_V59JIDh3EBPuFVQSbIVql45zEHUlVGV3coU,7587
67
67
  onnx2tf/ops/GlobalAveragePool.py,sha256=GrDDOywtO6peW79mBPmBJX9MrEU2PXso94xazAzx_xk,5704
@@ -160,8 +160,8 @@ onnx2tf/ops/STFT.py,sha256=LDKN309_dBu4v9AYpz70uMJbNjRFiOte9O3wUL4bIJw,4463
160
160
  onnx2tf/ops/ScaleAndTranslate.py,sha256=VQDDhSs9TyMLQy0mF7n8pZ2TuvoKY-Lhlzd7Inf4UdI,11989
161
161
  onnx2tf/ops/Scan.py,sha256=hfN-DX6Gp-dG5158WMoHRrDWZAra3VSbsjsiphNqRIQ,16293
162
162
  onnx2tf/ops/Scatter.py,sha256=5_rTM60FPCq8unyNPDO-BZXcuz6w9Uyl2Xqx-zJTpgg,746
163
- onnx2tf/ops/ScatterElements.py,sha256=7u9-_pjS_x3JQsBCVnQyu6sPfuGx2o9qAW_RSZszOTs,7585
164
- onnx2tf/ops/ScatterND.py,sha256=Y949fYKSAvkPW1s-58P7suafnna9hDLoTg0UA8cs2Ag,9087
163
+ onnx2tf/ops/ScatterElements.py,sha256=mp-TmswDTA9Nv0B3G3b-khOCPCKHnhCI97jDRofoEM0,8561
164
+ onnx2tf/ops/ScatterND.py,sha256=-mVbxXjQor2T6HVHSJy5e0FHQmEfaHknaKPuSc3Oz4o,11005
165
165
  onnx2tf/ops/Selu.py,sha256=CD0SqQlTTe0chO7lebkrdfDFSk6Cg9zLhvrKomsSH4Y,3799
166
166
  onnx2tf/ops/SequenceAt.py,sha256=jpjl9gVJFagtg223YY26I0pUUEgEFjJGvSZWwbo2-mQ,3278
167
167
  onnx2tf/ops/SequenceConstruct.py,sha256=KKbnpnitdAky23WF_DS49ot7ZxVoqBEU2ChgYEcXshY,2639
@@ -193,7 +193,7 @@ onnx2tf/ops/Sub.py,sha256=JCUWNmRLrwJEB8_0MPRTzmZ4KAV_HLXNivUd_jNqPQI,11012
193
193
  onnx2tf/ops/Sum.py,sha256=wtI0SbGuNFxkLskBk68ZhOAg3XyrIx-9xGYy1GZCVSo,3073
194
194
  onnx2tf/ops/Tan.py,sha256=Ncig8clGvY7GWshqxRDRdcxjcbf_HTKGdpDw5ValrKI,3582
195
195
  onnx2tf/ops/Tanh.py,sha256=PIQUvxS_AIDufblC2vc573nse2UCRA9z5yWd7kB-51s,3585
196
- onnx2tf/ops/TensorScatter.py,sha256=xOB1HVeHXFUUTmKJfZuUBEyPSLpJYjzUf0cAMqblsnc,7413
196
+ onnx2tf/ops/TensorScatter.py,sha256=9M1L8ys2FodscRZXdjme5NQYrCFX_nZH7wm8vx-PXcc,8176
197
197
  onnx2tf/ops/ThresholdedRelu.py,sha256=ArF3uRH7jN8kdYYDNcivJgv9UTFl5aqqSH2Qu79j4sY,3769
198
198
  onnx2tf/ops/Tile.py,sha256=xkprg6yTaykivcHFJ644opzVPctaeplu-Ed-OpS98Gg,12720
199
199
  onnx2tf/ops/TopK.py,sha256=f6OG-DcMWneXwSjIkmY935SPyOMD5tMteHnlQHoJwQo,6348
@@ -206,12 +206,12 @@ onnx2tf/ops/Where.py,sha256=MaCcY9g4mKZQqCgh4xtoylicP-xVu9f4boKiu_q9Ow8,7711
206
206
  onnx2tf/ops/Xor.py,sha256=2ceqxHSI1Wtez_CIh8gFfvcu45Xboqfyp1iy3v2vuIs,4590
207
207
  onnx2tf/ops/__init__.py,sha256=jnmUWWa-3dHzBZV9bmPzXu6eoz2dumJTzO7i8JdcgSM,25
208
208
  onnx2tf/utils/__init__.py,sha256=E9FM9He68VIASDnYp-OrxvHFVn55GzWqw2OEkCqn1zg,27
209
- onnx2tf/utils/common_functions.py,sha256=j8bRC3RK5NlNAV9vwxj38DwDaaCLR2iprRdDjBgv_RA,260619
209
+ onnx2tf/utils/common_functions.py,sha256=NPsgAhZTgLZABrVK1x_mWs5_nXDC9tyWFAfvW9qqNLI,265023
210
210
  onnx2tf/utils/enums.py,sha256=7c5TqetqB07VjyHoxJHfLgtqBqk9ZRyUF33fPOJR1IM,1649
211
211
  onnx2tf/utils/iterative_json_optimizer.py,sha256=qqeIxWGxrhcCYk8-ebWnblnOkzDCwi-nseipHzHR_bk,10436
212
212
  onnx2tf/utils/json_auto_generator.py,sha256=OC-SfKtUg7zUxaXTAg6kT0ShzIc3ByjDa3FNp173DtA,60302
213
213
  onnx2tf/utils/logging.py,sha256=yUCmPuJ_XiUItM3sZMcaMO24JErkQy7zZwVTYWAuiKg,1982
214
- onnx2tf-1.29.19.dist-info/WHEEL,sha256=fAguSjoiATBe7TNBkJwOjyL1Tt4wwiaQGtNtjRPNMQA,80
215
- onnx2tf-1.29.19.dist-info/entry_points.txt,sha256=GuhvLu7ZlYECumbmoiFlKX0mFPtFi_Ti9L-E5yuQqKs,42
216
- onnx2tf-1.29.19.dist-info/METADATA,sha256=rSyPbOdWaW3QovkZCVvFg5zn_INzTCm8KN_rjZBah0Q,154312
217
- onnx2tf-1.29.19.dist-info/RECORD,,
214
+ onnx2tf-1.29.20.dist-info/WHEEL,sha256=fAguSjoiATBe7TNBkJwOjyL1Tt4wwiaQGtNtjRPNMQA,80
215
+ onnx2tf-1.29.20.dist-info/entry_points.txt,sha256=GuhvLu7ZlYECumbmoiFlKX0mFPtFi_Ti9L-E5yuQqKs,42
216
+ onnx2tf-1.29.20.dist-info/METADATA,sha256=4DMYV8XRObEn9G6cqsBYyWC-Vcj42dbDs8IAxPKeg0Q,155198
217
+ onnx2tf-1.29.20.dist-info/RECORD,,