onnx 1.16.0__cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl → 1.16.2__cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of onnx might be problematic. Click here for more details.

@@ -202,7 +202,7 @@ class DequantizeLinear(Base):
202
202
  # scalar zero point and scale
203
203
  x = make_tensor("x", TensorProto.UINT4, [5], [0, 1, 7, 10, 15])
204
204
  x_scale = np.float32(2)
205
- x_zero_point = make_tensor("zero_point", TensorProto.UINT4, (1,), [1])
205
+ x_zero_point = make_tensor("x_zero_point", TensorProto.UINT4, (1,), [1])
206
206
  y = np.array([-2, 0, 12, 18, 28], dtype=np.float32)
207
207
 
208
208
  expect(
@@ -224,7 +224,7 @@ class DequantizeLinear(Base):
224
224
  # scalar zero point and scale
225
225
  x = make_tensor("x", TensorProto.INT4, [5], [0, 1, 7, -4, -8])
226
226
  x_scale = np.float32(2)
227
- x_zero_point = make_tensor("zero_point", TensorProto.INT4, (1,), [1])
227
+ x_zero_point = make_tensor("x_zero_point", TensorProto.INT4, (1,), [1])
228
228
  y = np.array([-2, 0, 12, -10, -18], dtype=np.float32)
229
229
 
230
230
  expect(
@@ -73,7 +73,7 @@ class QuantizeLinear(Base):
73
73
 
74
74
  x = np.array([0.0, 1.0, 2.0, 100000.0, 200.0]).astype(np.float32)
75
75
  y_scale = np.float32(2)
76
- y_zero_point = make_tensor("zero_point", TensorProto.FLOAT8E4M3FN, [1], [0])
76
+ y_zero_point = make_tensor("y_zero_point", TensorProto.FLOAT8E4M3FN, [1], [0])
77
77
  y = make_tensor("y", TensorProto.FLOAT8E4M3FN, [5], [0, 0.5, 1, 448, 96])
78
78
 
79
79
  expect(
@@ -93,7 +93,7 @@ class QuantizeLinear(Base):
93
93
 
94
94
  x = np.array([0.0, 1.0, 2.0, 100000.0, 200.0]).astype(np.float32)
95
95
  y_scale = np.float32(2)
96
- y_zero_point = make_tensor("zero_point", TensorProto.FLOAT8E5M2, [1], [0.0])
96
+ y_zero_point = make_tensor("y_zero_point", TensorProto.FLOAT8E5M2, [1], [0.0])
97
97
  y = make_tensor("y", TensorProto.FLOAT8E5M2, [5], [0, 0.5, 1, 49152, 96])
98
98
 
99
99
  expect(
@@ -230,7 +230,7 @@ class QuantizeLinear(Base):
230
230
 
231
231
  y_scale = np.asarray([2.0, 3.0, 4.0], dtype=np.float32)
232
232
  y_zero_point = make_tensor(
233
- "zero_point", TensorProto.UINT4, y_scale.shape, np.ones_like(y_scale)
233
+ "y_zero_point", TensorProto.UINT4, y_scale.shape, np.ones_like(y_scale)
234
234
  )
235
235
  y = make_tensor(
236
236
  "y", TensorProto.UINT4, x.shape, [1, 2, 3, 5, -1, -1, 3, 4, 4, 5, 5, 11]
@@ -262,7 +262,7 @@ class QuantizeLinear(Base):
262
262
 
263
263
  y_scale = np.asarray([2.0, 3.0, 4.0], dtype=np.float32)
264
264
  y_zero_point = make_tensor(
265
- "zero_point", TensorProto.INT4, y_scale.shape, np.ones_like(y_scale)
265
+ "y_zero_point", TensorProto.INT4, y_scale.shape, np.ones_like(y_scale)
266
266
  )
267
267
  y = make_tensor(
268
268
  "y", TensorProto.INT4, x.shape, [1, 2, 3, 5, -8, -6, 3, 4, 4, 5, 5, 7]
@@ -1,2 +1 @@
1
- *B
2
- zero_point
1
+ *B x_zero_point
@@ -1,2 +1 @@
1
- *B
2
- zero_point
1
+ *B x_zero_point
@@ -1,2 +1 @@
1
- *B
2
- zero_point
1
+ *B y_zero_point
@@ -1,2 +1 @@
1
- *B
2
- zero_point
1
+ *B y_zero_point
@@ -10,7 +10,6 @@ import os
10
10
  import re
11
11
  import shutil
12
12
  import sys
13
- import tarfile
14
13
  import tempfile
15
14
  import time
16
15
  import unittest
@@ -238,8 +237,7 @@ class Runner:
238
237
  )
239
238
  urlretrieve(model_test.url, download_file.name)
240
239
  print("Done")
241
- with tarfile.open(download_file.name) as t:
242
- t.extractall(models_dir)
240
+ onnx.utils._extract_model_safe(download_file.name, models_dir)
243
241
  except Exception as e:
244
242
  print(f"Failed to prepare data for model {model_test.model_name}: {e}")
245
243
  raise
onnx/common/version.h CHANGED
@@ -9,6 +9,6 @@
9
9
  namespace ONNX_NAMESPACE {
10
10
 
11
11
  // Represents the most recent release version. Updated with every release.
12
- constexpr const char* LAST_RELEASE_VERSION = "1.16.0";
12
+ constexpr const char* LAST_RELEASE_VERSION = "1.16.2";
13
13
 
14
14
  } // namespace ONNX_NAMESPACE
onnx/defs/math/old.cc CHANGED
@@ -2322,10 +2322,15 @@ ONNX_OPERATOR_SET_SCHEMA(
2322
2322
  auto transBAttr = ctx.getAttribute("transB");
2323
2323
  bool transB = transBAttr ? static_cast<int>(transBAttr->i()) != 0 : false;
2324
2324
 
2325
+ checkInputRank(ctx, 0, 2);
2326
+ checkInputRank(ctx, 1, 2);
2327
+
2328
+ auto& first_input_shape = getInputShape(ctx, 0);
2329
+ auto& second_input_shape = getInputShape(ctx, 1);
2325
2330
  *ctx.getOutputType(0)->mutable_tensor_type()->mutable_shape()->add_dim() =
2326
- ctx.getInputType(0)->tensor_type().shape().dim(transA ? 1 : 0);
2331
+ first_input_shape.dim(transA ? 1 : 0);
2327
2332
  *ctx.getOutputType(0)->mutable_tensor_type()->mutable_shape()->add_dim() =
2328
- ctx.getInputType(1)->tensor_type().shape().dim(transB ? 0 : 1);
2333
+ second_input_shape.dim(transB ? 0 : 1);
2329
2334
  } else if (
2330
2335
  hasInputShape(ctx, 2) &&
2331
2336
  (!ctx.getAttribute("broadcast") || static_cast<int>(ctx.getAttribute("broadcast")->i()) == 0)) {
@@ -200,6 +200,9 @@ ONNX_OPERATOR_SET_SCHEMA(
200
200
  .SetDoc(DequantizeLinear_ver21_doc)
201
201
  .TypeAndShapeInferenceFunction([](ONNX_NAMESPACE::InferenceContext& ctx) {
202
202
  propagateElemTypeFromInputToOutput(ctx, 1, 0);
203
+ if (!hasInputShape(ctx, 0)) {
204
+ return;
205
+ }
203
206
  auto& input_shape = getInputShape(ctx, 0);
204
207
  updateOutputShape(ctx, 0, input_shape);
205
208
  }));
@@ -130,6 +130,9 @@ ONNX_OPERATOR_SET_SCHEMA(
130
130
  .SetDoc(DequantizeLinear_ver19_doc)
131
131
  .TypeAndShapeInferenceFunction([](ONNX_NAMESPACE::InferenceContext& ctx) {
132
132
  propagateElemTypeFromInputToOutput(ctx, 1, 0);
133
+ if (!hasInputShape(ctx, 0)) {
134
+ return;
135
+ }
133
136
  auto& input_shape = getInputShape(ctx, 0);
134
137
  updateOutputShape(ctx, 0, input_shape);
135
138
  }));
@@ -181,7 +184,6 @@ ONNX_OPERATOR_SET_SCHEMA(
181
184
  if (!hasInputShape(ctx, 0)) {
182
185
  return;
183
186
  }
184
-
185
187
  auto& input_shape = getInputShape(ctx, 0);
186
188
  updateOutputShape(ctx, 0, input_shape);
187
189
  }));
onnx/defs/tensor/old.cc CHANGED
@@ -1380,7 +1380,7 @@ ONNX_OPERATOR_SET_SCHEMA(
1380
1380
 
1381
1381
  static const char* Slice_ver11_doc = R"DOC(
1382
1382
  Produces a slice of the input tensor along multiple axes. Similar to numpy:
1383
- https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
1383
+ https://numpy.org/doc/stable/reference/routines.indexing.html
1384
1384
  Slices uses `starts`, `ends`, `axes` and `steps` inputs to specify the start and end
1385
1385
  dimension and step for each axis in the list of axes, it uses this information to
1386
1386
  slice the input `data` tensor. If a negative value is passed for any of the
@@ -4443,7 +4443,7 @@ ONNX_OPERATOR_SET_SCHEMA(
4443
4443
 
4444
4444
  static const char* Slice_ver1_doc = R"DOC(
4445
4445
  Produces a slice of the input tensor along multiple axes. Similar to numpy:
4446
- https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
4446
+ https://numpy.org/doc/stable/reference/routines.indexing.html
4447
4447
  Slices uses `axes`, `starts` and `ends` attributes to specify the start and end
4448
4448
  dimension for each axis in the list of axes, it uses this information to
4449
4449
  slice the input `data` tensor. If a negative value is passed for any of the
@@ -4559,7 +4559,7 @@ ONNX_OPERATOR_SET_SCHEMA(
4559
4559
 
4560
4560
  static const char* Slice_ver10_doc = R"DOC(
4561
4561
  Produces a slice of the input tensor along multiple axes. Similar to numpy:
4562
- https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
4562
+ https://numpy.org/doc/stable/reference/routines.indexing.html
4563
4563
  Slices uses `starts`, `ends`, `axes` and `steps` inputs to specify the start and end
4564
4564
  dimension and step for each axis in the list of axes, it uses this information to
4565
4565
  slice the input `data` tensor. If a negative value is passed for any of the
onnx/hub.py CHANGED
@@ -9,7 +9,6 @@ import hashlib
9
9
  import json
10
10
  import os
11
11
  import sys
12
- import tarfile
13
12
  from io import BytesIO
14
13
  from os.path import join
15
14
  from typing import IO, Any, Dict, List, Optional, Set, Tuple, cast
@@ -296,6 +295,7 @@ def download_model_with_test_data(
296
295
  silent: bool = False,
297
296
  ) -> Optional[str]:
298
297
  """Downloads a model along with test data by name from the onnx model hub and returns the directory to which the files have been extracted.
298
+ Users are responsible for making sure the model comes from a trusted source, and the data is safe to be extracted.
299
299
 
300
300
  Args:
301
301
  model: The name of the onnx model in the manifest. This field is
@@ -361,12 +361,14 @@ def download_model_with_test_data(
361
361
  "download the model from the model hub."
362
362
  )
363
363
 
364
- with tarfile.open(local_model_with_data_path) as model_with_data_zipped:
365
- # FIXME: Avoid index manipulation with magic numbers
366
- local_model_with_data_dir_path = local_model_with_data_path[
367
- 0 : len(local_model_with_data_path) - 7
368
- ]
369
- model_with_data_zipped.extractall(local_model_with_data_dir_path)
364
+ # FIXME: Avoid index manipulation with magic numbers,
365
+ # remove ".tar.gz"
366
+ local_model_with_data_dir_path = local_model_with_data_path[
367
+ 0 : len(local_model_with_data_path) - 7
368
+ ]
369
+ onnx.utils._extract_model_safe(
370
+ local_model_with_data_path, local_model_with_data_dir_path
371
+ )
370
372
  model_with_data_path = (
371
373
  local_model_with_data_dir_path
372
374
  + "/"
@@ -488,29 +488,29 @@ class ShapeInferenceImplBase {
488
488
  ProcessCall(n, *(iter->second), ctx);
489
489
  } else {
490
490
  has_unsupported_op = true;
491
+ return;
491
492
  }
492
493
  } else {
493
494
  has_unsupported_op = true;
495
+ return;
494
496
  }
495
- if (!has_unsupported_op) {
496
- for (int i = 0; i < n.output_size(); ++i) {
497
- // skip type and shape propagation for missing optional outputs.
498
- if (!n.output(i).empty())
499
- UpdateType(n.output(i), ctx.getOutputType(i));
500
- }
501
- // Constant values are tracked to improve inference/checking for subsequent nodes.
502
- ProcessConstant(n);
503
- // If data-propagation is enabled, partial-evaluation (aka data-propagation) is performed
504
- // to improve inference/checking for subsequent nodes.
505
- if (options.enable_data_propagation && schema && schema->has_data_propagation_function()) {
506
- if (generated_shape_data_by_name == nullptr) {
507
- fail_shape_inference(
508
- "Container for generated shape data cannot be nullptr when enable_data_propagation option is set.");
509
- }
510
- DataPropagationContextImpl data_propagation_ctx(
511
- n, value_types_by_name, input_data_by_name, *generated_shape_data_by_name);
512
- schema->GetDataPropagationFunction()(data_propagation_ctx);
497
+ for (int i = 0; i < n.output_size(); ++i) {
498
+ // skip type and shape propagation for missing optional outputs.
499
+ if (!n.output(i).empty())
500
+ UpdateType(n.output(i), ctx.getOutputType(i));
501
+ }
502
+ // Constant values are tracked to improve inference/checking for subsequent nodes.
503
+ ProcessConstant(n);
504
+ // If data-propagation is enabled, partial-evaluation (aka data-propagation) is performed
505
+ // to improve inference/checking for subsequent nodes.
506
+ if (options.enable_data_propagation && schema && schema->has_data_propagation_function()) {
507
+ if (generated_shape_data_by_name == nullptr) {
508
+ fail_shape_inference(
509
+ "Container for generated shape data cannot be nullptr when enable_data_propagation option is set.");
513
510
  }
511
+ DataPropagationContextImpl data_propagation_ctx(
512
+ n, value_types_by_name, input_data_by_name, *generated_shape_data_by_name);
513
+ schema->GetDataPropagationFunction()(data_propagation_ctx);
514
514
  }
515
515
  }
516
516
  ONNX_CATCH(const ONNX_NAMESPACE::InferenceError& ex) {
onnx/tools/net_drawer.py CHANGED
@@ -3,7 +3,7 @@
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
  # A library and utility for drawing ONNX nets. Most of this implementation has
5
5
  # been borrowed from the caffe2 implementation
6
- # https://github.com/pytorch/pytorch/blob/master/caffe2/python/net_drawer.py
6
+ # https://github.com/pytorch/pytorch/blob/v2.3.1/caffe2/python/net_drawer.py
7
7
  #
8
8
  # The script takes two required arguments:
9
9
  # -input: a path to a serialized ModelProto .pb file.
onnx/utils.py CHANGED
@@ -4,6 +4,7 @@
4
4
  from __future__ import annotations
5
5
 
6
6
  import os
7
+ import tarfile
7
8
 
8
9
  import onnx.checker
9
10
  import onnx.helper
@@ -212,3 +213,65 @@ def extract_model(
212
213
  onnx.save(extracted, output_path)
213
214
  if check_model:
214
215
  onnx.checker.check_model(output_path)
216
+
217
+
218
+ def _tar_members_filter(
219
+ tar: tarfile.TarFile, base: str | os.PathLike
220
+ ) -> list[tarfile.TarInfo]:
221
+ """Check that the content of ``tar`` will be extracted safely
222
+
223
+ Args:
224
+ tar: The tarball file
225
+ base: The directory where the tarball will be extracted
226
+
227
+ Returns:
228
+ list of tarball members
229
+ """
230
+ result = []
231
+ for member in tar:
232
+ member_path = os.path.join(base, member.name)
233
+ abs_base = os.path.abspath(base)
234
+ abs_member = os.path.abspath(member_path)
235
+ if not abs_member.startswith(abs_base):
236
+ raise RuntimeError(
237
+ f"The tarball member {member_path} in downloading model contains "
238
+ f"directory traversal sequence which may contain harmful payload."
239
+ )
240
+ elif member.issym() or member.islnk():
241
+ raise RuntimeError(
242
+ f"The tarball member {member_path} in downloading model contains "
243
+ f"symbolic links which may contain harmful payload."
244
+ )
245
+ result.append(member)
246
+ return result
247
+
248
+
249
+ def _extract_model_safe(
250
+ model_tar_path: str | os.PathLike, local_model_with_data_dir_path: str | os.PathLike
251
+ ) -> None:
252
+ """Safely extracts a tar file to a specified directory.
253
+
254
+ This function ensures that the extraction process mitigates against
255
+ directory traversal vulnerabilities by validating or sanitizing paths
256
+ within the tar file. It also provides compatibility for different versions
257
+ of the tarfile module by checking for the availability of certain attributes
258
+ or methods before invoking them.
259
+
260
+ Args:
261
+ model_tar_path: The path to the tar file to be extracted.
262
+ local_model_with_data_dir_path: The directory path where the tar file
263
+ contents will be extracted to.
264
+ """
265
+ with tarfile.open(model_tar_path) as model_with_data_zipped:
266
+ # Mitigate tarball directory traversal risks
267
+ if hasattr(tarfile, "data_filter"):
268
+ model_with_data_zipped.extractall(
269
+ path=local_model_with_data_dir_path, filter="data"
270
+ )
271
+ else:
272
+ model_with_data_zipped.extractall(
273
+ path=local_model_with_data_dir_path,
274
+ members=_tar_members_filter(
275
+ model_with_data_zipped, local_model_with_data_dir_path
276
+ ),
277
+ )
onnx/version.py CHANGED
@@ -1,5 +1,5 @@
1
1
  # This file is generated by setup.py. DO NOT EDIT!
2
2
 
3
3
 
4
- version = "1.16.0"
4
+ version = "1.16.2"
5
5
  git_version = ""
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: onnx
3
- Version: 1.16.0
3
+ Version: 1.16.2
4
4
  Summary: Open Neural Network Exchange
5
5
  Author-email: ONNX Contributors <onnx-technical-discuss@lists.lfaidata.foundation>
6
6
  License: Apache License v2.0
@@ -14,7 +14,7 @@ Requires-Dist: numpy >=1.20
14
14
  Requires-Dist: protobuf >=3.20.2
15
15
  Provides-Extra: reference
16
16
  Requires-Dist: google-re2 ; extra == 'reference'
17
- Requires-Dist: Pillow ; extra == 'reference'
17
+ Requires-Dist: pillow ; extra == 'reference'
18
18
 
19
19
  <!--
20
20
  Copyright (c) ONNX Project Contributors