onnx 1.16.0__cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl → 1.16.1__cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of onnx might be problematic. Click here for more details.

@@ -202,7 +202,7 @@ class DequantizeLinear(Base):
202
202
  # scalar zero point and scale
203
203
  x = make_tensor("x", TensorProto.UINT4, [5], [0, 1, 7, 10, 15])
204
204
  x_scale = np.float32(2)
205
- x_zero_point = make_tensor("zero_point", TensorProto.UINT4, (1,), [1])
205
+ x_zero_point = make_tensor("x_zero_point", TensorProto.UINT4, (1,), [1])
206
206
  y = np.array([-2, 0, 12, 18, 28], dtype=np.float32)
207
207
 
208
208
  expect(
@@ -224,7 +224,7 @@ class DequantizeLinear(Base):
224
224
  # scalar zero point and scale
225
225
  x = make_tensor("x", TensorProto.INT4, [5], [0, 1, 7, -4, -8])
226
226
  x_scale = np.float32(2)
227
- x_zero_point = make_tensor("zero_point", TensorProto.INT4, (1,), [1])
227
+ x_zero_point = make_tensor("x_zero_point", TensorProto.INT4, (1,), [1])
228
228
  y = np.array([-2, 0, 12, -10, -18], dtype=np.float32)
229
229
 
230
230
  expect(
@@ -73,7 +73,7 @@ class QuantizeLinear(Base):
73
73
 
74
74
  x = np.array([0.0, 1.0, 2.0, 100000.0, 200.0]).astype(np.float32)
75
75
  y_scale = np.float32(2)
76
- y_zero_point = make_tensor("zero_point", TensorProto.FLOAT8E4M3FN, [1], [0])
76
+ y_zero_point = make_tensor("y_zero_point", TensorProto.FLOAT8E4M3FN, [1], [0])
77
77
  y = make_tensor("y", TensorProto.FLOAT8E4M3FN, [5], [0, 0.5, 1, 448, 96])
78
78
 
79
79
  expect(
@@ -93,7 +93,7 @@ class QuantizeLinear(Base):
93
93
 
94
94
  x = np.array([0.0, 1.0, 2.0, 100000.0, 200.0]).astype(np.float32)
95
95
  y_scale = np.float32(2)
96
- y_zero_point = make_tensor("zero_point", TensorProto.FLOAT8E5M2, [1], [0.0])
96
+ y_zero_point = make_tensor("y_zero_point", TensorProto.FLOAT8E5M2, [1], [0.0])
97
97
  y = make_tensor("y", TensorProto.FLOAT8E5M2, [5], [0, 0.5, 1, 49152, 96])
98
98
 
99
99
  expect(
@@ -230,7 +230,7 @@ class QuantizeLinear(Base):
230
230
 
231
231
  y_scale = np.asarray([2.0, 3.0, 4.0], dtype=np.float32)
232
232
  y_zero_point = make_tensor(
233
- "zero_point", TensorProto.UINT4, y_scale.shape, np.ones_like(y_scale)
233
+ "y_zero_point", TensorProto.UINT4, y_scale.shape, np.ones_like(y_scale)
234
234
  )
235
235
  y = make_tensor(
236
236
  "y", TensorProto.UINT4, x.shape, [1, 2, 3, 5, -1, -1, 3, 4, 4, 5, 5, 11]
@@ -262,7 +262,7 @@ class QuantizeLinear(Base):
262
262
 
263
263
  y_scale = np.asarray([2.0, 3.0, 4.0], dtype=np.float32)
264
264
  y_zero_point = make_tensor(
265
- "zero_point", TensorProto.INT4, y_scale.shape, np.ones_like(y_scale)
265
+ "y_zero_point", TensorProto.INT4, y_scale.shape, np.ones_like(y_scale)
266
266
  )
267
267
  y = make_tensor(
268
268
  "y", TensorProto.INT4, x.shape, [1, 2, 3, 5, -8, -6, 3, 4, 4, 5, 5, 7]
@@ -1,2 +1 @@
1
- *B
2
- zero_point
1
+ *B x_zero_point
@@ -1,2 +1 @@
1
- *B
2
- zero_point
1
+ *B x_zero_point
@@ -1,2 +1 @@
1
- *B
2
- zero_point
1
+ *B y_zero_point
@@ -1,2 +1 @@
1
- *B
2
- zero_point
1
+ *B y_zero_point
onnx/common/version.h CHANGED
@@ -9,6 +9,6 @@
9
9
  namespace ONNX_NAMESPACE {
10
10
 
11
11
  // Represents the most recent release version. Updated with every release.
12
- constexpr const char* LAST_RELEASE_VERSION = "1.16.0";
12
+ constexpr const char* LAST_RELEASE_VERSION = "1.16.1";
13
13
 
14
14
  } // namespace ONNX_NAMESPACE
@@ -200,6 +200,9 @@ ONNX_OPERATOR_SET_SCHEMA(
200
200
  .SetDoc(DequantizeLinear_ver21_doc)
201
201
  .TypeAndShapeInferenceFunction([](ONNX_NAMESPACE::InferenceContext& ctx) {
202
202
  propagateElemTypeFromInputToOutput(ctx, 1, 0);
203
+ if (!hasInputShape(ctx, 0)) {
204
+ return;
205
+ }
203
206
  auto& input_shape = getInputShape(ctx, 0);
204
207
  updateOutputShape(ctx, 0, input_shape);
205
208
  }));
@@ -130,6 +130,9 @@ ONNX_OPERATOR_SET_SCHEMA(
130
130
  .SetDoc(DequantizeLinear_ver19_doc)
131
131
  .TypeAndShapeInferenceFunction([](ONNX_NAMESPACE::InferenceContext& ctx) {
132
132
  propagateElemTypeFromInputToOutput(ctx, 1, 0);
133
+ if (!hasInputShape(ctx, 0)) {
134
+ return;
135
+ }
133
136
  auto& input_shape = getInputShape(ctx, 0);
134
137
  updateOutputShape(ctx, 0, input_shape);
135
138
  }));
@@ -181,7 +184,6 @@ ONNX_OPERATOR_SET_SCHEMA(
181
184
  if (!hasInputShape(ctx, 0)) {
182
185
  return;
183
186
  }
184
-
185
187
  auto& input_shape = getInputShape(ctx, 0);
186
188
  updateOutputShape(ctx, 0, input_shape);
187
189
  }));
@@ -488,29 +488,29 @@ class ShapeInferenceImplBase {
488
488
  ProcessCall(n, *(iter->second), ctx);
489
489
  } else {
490
490
  has_unsupported_op = true;
491
+ return;
491
492
  }
492
493
  } else {
493
494
  has_unsupported_op = true;
495
+ return;
494
496
  }
495
- if (!has_unsupported_op) {
496
- for (int i = 0; i < n.output_size(); ++i) {
497
- // skip type and shape propagation for missing optional outputs.
498
- if (!n.output(i).empty())
499
- UpdateType(n.output(i), ctx.getOutputType(i));
500
- }
501
- // Constant values are tracked to improve inference/checking for subsequent nodes.
502
- ProcessConstant(n);
503
- // If data-propagation is enabled, partial-evaluation (aka data-propagation) is performed
504
- // to improve inference/checking for subsequent nodes.
505
- if (options.enable_data_propagation && schema && schema->has_data_propagation_function()) {
506
- if (generated_shape_data_by_name == nullptr) {
507
- fail_shape_inference(
508
- "Container for generated shape data cannot be nullptr when enable_data_propagation option is set.");
509
- }
510
- DataPropagationContextImpl data_propagation_ctx(
511
- n, value_types_by_name, input_data_by_name, *generated_shape_data_by_name);
512
- schema->GetDataPropagationFunction()(data_propagation_ctx);
497
+ for (int i = 0; i < n.output_size(); ++i) {
498
+ // skip type and shape propagation for missing optional outputs.
499
+ if (!n.output(i).empty())
500
+ UpdateType(n.output(i), ctx.getOutputType(i));
501
+ }
502
+ // Constant values are tracked to improve inference/checking for subsequent nodes.
503
+ ProcessConstant(n);
504
+ // If data-propagation is enabled, partial-evaluation (aka data-propagation) is performed
505
+ // to improve inference/checking for subsequent nodes.
506
+ if (options.enable_data_propagation && schema && schema->has_data_propagation_function()) {
507
+ if (generated_shape_data_by_name == nullptr) {
508
+ fail_shape_inference(
509
+ "Container for generated shape data cannot be nullptr when enable_data_propagation option is set.");
513
510
  }
511
+ DataPropagationContextImpl data_propagation_ctx(
512
+ n, value_types_by_name, input_data_by_name, *generated_shape_data_by_name);
513
+ schema->GetDataPropagationFunction()(data_propagation_ctx);
514
514
  }
515
515
  }
516
516
  ONNX_CATCH(const ONNX_NAMESPACE::InferenceError& ex) {
onnx/version.py CHANGED
@@ -1,5 +1,5 @@
1
1
  # This file is generated by setup.py. DO NOT EDIT!
2
2
 
3
3
 
4
- version = "1.16.0"
4
+ version = "1.16.1"
5
5
  git_version = ""
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: onnx
3
- Version: 1.16.0
3
+ Version: 1.16.1
4
4
  Summary: Open Neural Network Exchange
5
5
  Author-email: ONNX Contributors <onnx-technical-discuss@lists.lfaidata.foundation>
6
6
  License: Apache License v2.0