onnx 1.14.0__cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl → 1.14.1__cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of onnx might be problematic. Click here for more details.

onnx/checker.cc CHANGED
@@ -151,8 +151,8 @@ void check_tensor(const TensorProto& tensor, const CheckerContext& ctx) {
151
151
  "' points outside the directory");
152
152
  }
153
153
  std::wstring data_path = path_join(utf8str_to_wstring(ctx.get_model_dir()), relative_path);
154
- struct _stat buff;
155
- if (_wstat(data_path.c_str(), &buff) != 0) {
154
+ struct _stat64 buff;
155
+ if (_wstat64(data_path.c_str(), &buff) != 0) {
156
156
  fail_check(
157
157
  "Data of TensorProto ( tensor name: ",
158
158
  tensor.name(),
@@ -183,9 +183,14 @@ void check_tensor(const TensorProto& tensor, const CheckerContext& ctx) {
183
183
  "' points outside the directory");
184
184
  }
185
185
  std::string data_path = path_join(ctx.get_model_dir(), relative_path);
186
- // use stat to check whether the file exists
187
- struct stat buffer;
186
+ // use stat64 to check whether the file exists
187
+ #if defined(__APPLE__) || defined(__wasm__)
188
+ struct stat buffer; // APPLE does not have stat64
188
189
  if (stat((data_path).c_str(), &buffer) != 0) {
190
+ #else
191
+ struct stat64 buffer; // All POSIX except APPLE have stat64
192
+ if (stat64((data_path).c_str(), &buffer) != 0) {
193
+ #endif
189
194
  fail_check(
190
195
  "Data of TensorProto ( tensor name: ",
191
196
  tensor.name(),
onnx/common/version.h CHANGED
@@ -9,6 +9,6 @@
9
9
  namespace ONNX_NAMESPACE {
10
10
 
11
11
  // Represents the most recent release version. Updated with every release.
12
- constexpr const char* LAST_RELEASE_VERSION = "1.14.0";
12
+ constexpr const char* LAST_RELEASE_VERSION = "1.14.1";
13
13
 
14
14
  } // namespace ONNX_NAMESPACE
@@ -12,6 +12,116 @@ This operator produces a constant tensor. Exactly one of the provided attributes
12
12
  or value_* must be specified.
13
13
  )DOC";
14
14
 
15
+ void ConstantInference(InferenceContext& ctx) {
16
+ auto* value = ctx.getAttribute("value");
17
+ auto* sparse_value = ctx.getAttribute("sparse_value");
18
+ auto* value_int = ctx.getAttribute("value_int");
19
+ auto* value_ints = ctx.getAttribute("value_ints");
20
+ auto* value_float = ctx.getAttribute("value_float");
21
+ auto* value_floats = ctx.getAttribute("value_floats");
22
+ auto* value_string = ctx.getAttribute("value_string");
23
+ auto* value_strings = ctx.getAttribute("value_strings");
24
+
25
+ std::vector<bool> non_null_attr = {
26
+ (nullptr != value),
27
+ (nullptr != sparse_value),
28
+ (nullptr != value_int),
29
+ (nullptr != value_ints),
30
+ (nullptr != value_float),
31
+ (nullptr != value_floats),
32
+ (nullptr != value_string),
33
+ (nullptr != value_strings)};
34
+ if (std::count(non_null_attr.begin(), non_null_attr.end(), true) != 1) {
35
+ fail_shape_inference(
36
+ "One and only one of the attributes 'value', 'value_*' or 'sparse_value' must be specified for a Constant node.");
37
+ }
38
+
39
+ if (nullptr != value) {
40
+ // OpSchema::Verify check ensures that the attribute value has_t():
41
+ const TensorProto& tensor_proto = value->t();
42
+ updateOutputElemType(ctx, 0, tensor_proto.data_type());
43
+ updateOutputShape(ctx, 0, tensor_proto);
44
+ return;
45
+ }
46
+
47
+ if (nullptr != value_int) {
48
+ // OpSchema::Verify check ensures that the attribute value has_i():
49
+ if (!value_int->has_i()) {
50
+ fail_shape_inference("Attribute 'value_int' expect an integer.")
51
+ }
52
+ updateOutputElemType(ctx, 0, TensorProto::INT64);
53
+ updateOutputShape(ctx, 0, TensorShapeProto());
54
+ return;
55
+ }
56
+
57
+ if (nullptr != value_ints) {
58
+ // OpSchema::Verify check ensures that the attribute value has ints.
59
+ if (value_ints->ints_size() < 1) {
60
+ fail_shape_inference("Attribute 'value_ints' expect a list of integers.");
61
+ }
62
+ updateOutputElemType(ctx, 0, TensorProto::INT64);
63
+ appendDim(getOutputShape(ctx, 0), value_ints->ints_size());
64
+ return;
65
+ }
66
+
67
+ if (nullptr != value_float) {
68
+ // OpSchema::Verify check ensures that the attribute value has_i():
69
+ if (!value_float->has_f()) {
70
+ fail_shape_inference("Attribute 'value_float' expect a float.");
71
+ }
72
+ updateOutputElemType(ctx, 0, TensorProto::FLOAT);
73
+ updateOutputShape(ctx, 0, TensorShapeProto());
74
+ return;
75
+ }
76
+
77
+ if (nullptr != value_floats) {
78
+ // OpSchema::Verify check ensures that the attribute value has ints.
79
+ if (value_floats->floats_size() < 1) {
80
+ fail_shape_inference("Attribute 'value_floats' expect a list of floats.");
81
+ }
82
+ updateOutputElemType(ctx, 0, TensorProto::FLOAT);
83
+ appendDim(getOutputShape(ctx, 0), value_floats->floats_size());
84
+ return;
85
+ }
86
+
87
+ if (nullptr != value_string) {
88
+ // OpSchema::Verify check ensures that the attribute value has_i():
89
+ if (!value_string->has_s()) {
90
+ fail_shape_inference("Attribute 'value_string' expect a string.");
91
+ }
92
+ updateOutputElemType(ctx, 0, TensorProto::STRING);
93
+ updateOutputShape(ctx, 0, TensorShapeProto());
94
+ return;
95
+ }
96
+
97
+ if (nullptr != value_strings) {
98
+ // OpSchema::Verify check ensures that the attribute value has ints.
99
+ if (value_strings->strings_size() < 1) {
100
+ fail_shape_inference("Attribute 'value_strings' expect a list of strings.");
101
+ }
102
+ updateOutputElemType(ctx, 0, TensorProto::STRING);
103
+ appendDim(getOutputShape(ctx, 0), value_strings->strings_size());
104
+ return;
105
+ }
106
+
107
+ if (nullptr != sparse_value) {
108
+ // OpSchema::Verify check ensures that the attribute value
109
+ // has_sparse_tensor():
110
+ const SparseTensorProto& sparse = sparse_value->sparse_tensor();
111
+ // checker.cc::check_sparse_tensor checks that the sparse-value is
112
+ // well-formed
113
+ updateOutputElemType(ctx, 0, sparse.values().data_type());
114
+ auto* output_shape = getOutputShape(ctx, 0);
115
+ for (int i = 0; i < sparse.dims_size(); ++i)
116
+ appendDim(output_shape, sparse.dims(i));
117
+ return;
118
+ }
119
+
120
+ fail_shape_inference(
121
+ "TypeAndShapeInferenceFunction implementation incomplete: "
122
+ "this line should never be reached.");
123
+ }
124
+
15
125
  ONNX_OPERATOR_SET_SCHEMA(
16
126
  Constant,
17
127
  13,
@@ -55,115 +165,7 @@ ONNX_OPERATOR_SET_SCHEMA(
55
165
  false)
56
166
  .Output(0, "output", "Output tensor containing the same value of the provided tensor.", "T")
57
167
  .TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to all tensor types.")
58
- .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
59
- auto* value = ctx.getAttribute("value");
60
- auto* sparse_value = ctx.getAttribute("sparse_value");
61
- auto* value_int = ctx.getAttribute("value_int");
62
- auto* value_ints = ctx.getAttribute("value_ints");
63
- auto* value_float = ctx.getAttribute("value_float");
64
- auto* value_floats = ctx.getAttribute("value_floats");
65
- auto* value_string = ctx.getAttribute("value_string");
66
- auto* value_strings = ctx.getAttribute("value_strings");
67
-
68
- std::vector<bool> non_null_attr = {
69
- (nullptr != value),
70
- (nullptr != sparse_value),
71
- (nullptr != value_int),
72
- (nullptr != value_ints),
73
- (nullptr != value_float),
74
- (nullptr != value_floats),
75
- (nullptr != value_string),
76
- (nullptr != value_strings)};
77
- if (std::count(non_null_attr.begin(), non_null_attr.end(), true) != 1) {
78
- fail_shape_inference(
79
- "One and only one of the attributes 'value', 'value_*' or 'sparse_value' must be specified for a Constant node.");
80
- }
81
-
82
- if (nullptr != value) {
83
- // OpSchema::Verify check ensures that the attribute value has_t():
84
- const TensorProto& tensor_proto = value->t();
85
- updateOutputElemType(ctx, 0, tensor_proto.data_type());
86
- updateOutputShape(ctx, 0, tensor_proto);
87
- return;
88
- }
89
-
90
- if (nullptr != value_int) {
91
- // OpSchema::Verify check ensures that the attribute value has_i():
92
- if (!value_int->has_i()) {
93
- fail_shape_inference("Attribute 'value_int' expect an integer.")
94
- }
95
- updateOutputElemType(ctx, 0, TensorProto::INT64);
96
- updateOutputShape(ctx, 0, TensorShapeProto());
97
- return;
98
- }
99
-
100
- if (nullptr != value_ints) {
101
- // OpSchema::Verify check ensures that the attribute value has ints.
102
- if (value_ints->ints_size() < 1) {
103
- fail_shape_inference("Attribute 'value_ints' expect a list of integers.");
104
- }
105
- updateOutputElemType(ctx, 0, TensorProto::INT64);
106
- appendDim(getOutputShape(ctx, 0), value_ints->ints_size());
107
- return;
108
- }
109
-
110
- if (nullptr != value_float) {
111
- // OpSchema::Verify check ensures that the attribute value has_i():
112
- if (!value_float->has_f()) {
113
- fail_shape_inference("Attribute 'value_float' expect a float.");
114
- }
115
- updateOutputElemType(ctx, 0, TensorProto::FLOAT);
116
- updateOutputShape(ctx, 0, TensorShapeProto());
117
- return;
118
- }
119
-
120
- if (nullptr != value_floats) {
121
- // OpSchema::Verify check ensures that the attribute value has ints.
122
- if (value_floats->floats_size() < 1) {
123
- fail_shape_inference("Attribute 'value_floats' expect a list of floats.");
124
- }
125
- updateOutputElemType(ctx, 0, TensorProto::FLOAT);
126
- appendDim(getOutputShape(ctx, 0), value_floats->floats_size());
127
- return;
128
- }
129
-
130
- if (nullptr != value_string) {
131
- // OpSchema::Verify check ensures that the attribute value has_i():
132
- if (!value_string->has_s()) {
133
- fail_shape_inference("Attribute 'value_string' expect a string.");
134
- }
135
- updateOutputElemType(ctx, 0, TensorProto::STRING);
136
- updateOutputShape(ctx, 0, TensorShapeProto());
137
- return;
138
- }
139
-
140
- if (nullptr != value_strings) {
141
- // OpSchema::Verify check ensures that the attribute value has ints.
142
- if (value_strings->strings_size() < 1) {
143
- fail_shape_inference("Attribute 'value_strings' expect a list of strings.");
144
- }
145
- updateOutputElemType(ctx, 0, TensorProto::STRING);
146
- appendDim(getOutputShape(ctx, 0), value_strings->strings_size());
147
- return;
148
- }
149
-
150
- if (nullptr != sparse_value) {
151
- // OpSchema::Verify check ensures that the attribute value
152
- // has_sparse_tensor():
153
- const SparseTensorProto& sparse = sparse_value->sparse_tensor();
154
- // checker.cc::check_sparse_tensor checks that the sparse-value is
155
- // well-formed
156
- updateOutputElemType(ctx, 0, sparse.values().data_type());
157
- auto* output_shape = getOutputShape(ctx, 0);
158
- for (int i = 0; i < sparse.dims_size(); ++i)
159
- appendDim(output_shape, sparse.dims(i));
160
- return;
161
- }
162
-
163
- fail_shape_inference(
164
- "TypeAndShapeInferenceFunction implementation incomplete: "
165
- "this line should never be reached.");
166
- }));
168
+ .TypeAndShapeInferenceFunction(ConstantInference));
167
169
 
168
170
  static const char* Constant_ver12_doc = R"DOC(
169
171
  This operator produces a constant tensor. Exactly one of the provided attributes, either value, sparse_value,
onnx/defs/tensor/defs.cc CHANGED
@@ -520,7 +520,7 @@ ONNX_OPERATOR_SET_SCHEMA(
520
520
  output_length->set_dim_value((end - start) < 0 ? 0 : (end - start));
521
521
  })
522
522
  .PartialDataPropagationFunction([](DataPropagationContext& ctx) {
523
- if (ctx.getInputType(0)->tensor_type().has_shape()) {
523
+ if (hasInputShape(ctx, 0)) {
524
524
  auto& input_shape = ctx.getInputType(0)->tensor_type().shape();
525
525
  int64_t rank = static_cast<int64_t>(input_shape.dim_size());
526
526
  int64_t start = getAttribute(ctx, "start", 0);
onnx/defs/tensor/old.cc CHANGED
@@ -5160,7 +5160,7 @@ ONNX_OPERATOR_SET_SCHEMA(
5160
5160
  output_length->set_dim_value((end - start) < 0 ? 0 : (end - start));
5161
5161
  })
5162
5162
  .PartialDataPropagationFunction([](DataPropagationContext& ctx) {
5163
- if (ctx.getInputType(0)->tensor_type().has_shape()) {
5163
+ if (hasInputShape(ctx, 0)) {
5164
5164
  auto& input_shape = ctx.getInputType(0)->tensor_type().shape();
5165
5165
  int64_t rank = static_cast<int64_t>(input_shape.dim_size());
5166
5166
  int64_t start = getAttribute(ctx, "start", 0);
onnx/helper.py CHANGED
@@ -72,6 +72,7 @@ VERSION_TABLE: VersionTableType = [
72
72
  ("1.13.0", 8, 18, 3, 1),
73
73
  ("1.13.1", 8, 18, 3, 1),
74
74
  ("1.14.0", 9, 19, 3, 1),
75
+ ("1.14.1", 9, 19, 3, 1),
75
76
  ]
76
77
 
77
78
  VersionMapType = Dict[Tuple[str, int], int]
@@ -3,6 +3,7 @@
3
3
  // SPDX-License-Identifier: Apache-2.0
4
4
 
5
5
  #include "onnx/shape_inference/implementation.h"
6
+ #include <algorithm>
6
7
  #include <fstream>
7
8
  #include <list>
8
9
  #include "onnx/checker.h"
@@ -340,6 +341,56 @@ class ShapeInferenceImplBase {
340
341
  }
341
342
  }
342
343
 
344
+ // Initialize a DataValueMap for a called function from the DataValueMap of the caller
345
+ void bindValuesOnCall(
346
+ const DataValueMap& caller_map,
347
+ const NodeProto& caller,
348
+ DataValueMap& callee_map,
349
+ const FunctionProto& callee) {
350
+ auto num_inputs = (std::min)(caller.input_size(), callee.input_size());
351
+ for (int i = 0; i < num_inputs; ++i) {
352
+ const std::string& actual = caller.input(i);
353
+ const std::string& formal = callee.input(i);
354
+ if (!actual.empty()) {
355
+ auto it = caller_map.find(actual);
356
+ if (it != caller_map.end()) {
357
+ callee_map[formal] = it->second;
358
+ }
359
+ }
360
+ }
361
+ }
362
+
363
+ // Update a DataValueMap for a calling function from the DataValueMap of the callee
364
+ void bindValuesOnReturn(
365
+ const DataValueMap& callee_map,
366
+ const FunctionProto& callee,
367
+ DataValueMap& caller_map,
368
+ const NodeProto& caller) {
369
+ auto num_outputs = (std::min)(caller.output_size(), callee.output_size());
370
+ for (int i = 0; i < num_outputs; ++i) {
371
+ const std::string& actual = caller.output(i);
372
+ const std::string& formal = callee.output(i);
373
+ if (!actual.empty()) {
374
+ auto it = callee_map.find(formal);
375
+ if (it != callee_map.end()) {
376
+ caller_map[actual] = it->second;
377
+ }
378
+ }
379
+ }
380
+ }
381
+
382
+ void processCall(const NodeProto& caller, const FunctionProto& callee, InferenceContext& ctx) {
383
+ DataValueMap callee_value_map;
384
+ if (generated_shape_data_by_name != nullptr) {
385
+ bindValuesOnCall(*generated_shape_data_by_name, caller, callee_value_map, callee);
386
+ }
387
+ InferShapeForFunctionNode(
388
+ callee, schema_registry, ctx, options, model_local_functions_map, symbol_table, &callee_value_map);
389
+ if (generated_shape_data_by_name != nullptr) {
390
+ bindValuesOnReturn(callee_value_map, callee, *generated_shape_data_by_name, caller);
391
+ }
392
+ }
393
+
343
394
  void process(NodeProto& n) {
344
395
  // Resolve domain for node
345
396
  auto dit = opset_imports.find(n.domain());
@@ -373,14 +424,7 @@ class ShapeInferenceImplBase {
373
424
  if (schema->has_type_and_shape_inference_function()) {
374
425
  schema->GetTypeAndShapeInferenceFunction()(ctx);
375
426
  } else if (schema->HasFunction()) {
376
- InferShapeForFunctionNode(
377
- *(schema->GetFunction()),
378
- schema_registry,
379
- ctx,
380
- options,
381
- model_local_functions_map,
382
- symbol_table,
383
- generated_shape_data_by_name);
427
+ processCall(n, *(schema->GetFunction()), ctx);
384
428
  } else {
385
429
  // Continue with inference for remaining nodes
386
430
  return;
@@ -388,14 +432,7 @@ class ShapeInferenceImplBase {
388
432
  } else if (model_local_functions_map.size() > 0) {
389
433
  auto iter = model_local_functions_map.find(GetModelLocalFunctionsMapIdentifier(n.domain(), n.op_type()));
390
434
  if (iter != model_local_functions_map.end()) {
391
- InferShapeForFunctionNode(
392
- *(iter->second),
393
- schema_registry,
394
- ctx,
395
- options,
396
- model_local_functions_map,
397
- symbol_table,
398
- generated_shape_data_by_name);
435
+ processCall(n, *(iter->second), ctx);
399
436
  } else {
400
437
  has_unsupported_op = true;
401
438
  return;
@@ -588,10 +625,12 @@ class ShapeInferenceImplBase {
588
625
  // Create a temporary initializer value map
589
626
  for (int i = 0; i < num_actual_inputs && i < num_func_inputs; ++i) {
590
627
  const TypeProto* type = ctx.getInputType(i);
591
- if (type->value_case() == TypeProto::kTensorType && ctx.getInputData(i) != nullptr) {
592
- input_data_by_name[func_proto.input().Get(i)] = ctx.getInputData(i);
593
- } else if (type->value_case() == TypeProto::kSparseTensorType && ctx.getInputSparseData(i) != nullptr) {
594
- input_sparse_data_by_name[func_proto.input().Get(i)] = ctx.getInputSparseData(i);
628
+ if (type != nullptr) {
629
+ if (type->value_case() == TypeProto::kTensorType && ctx.getInputData(i) != nullptr) {
630
+ input_data_by_name[func_proto.input().Get(i)] = ctx.getInputData(i);
631
+ } else if (type->value_case() == TypeProto::kSparseTensorType && ctx.getInputSparseData(i) != nullptr) {
632
+ input_sparse_data_by_name[func_proto.input().Get(i)] = ctx.getInputSparseData(i);
633
+ }
595
634
  }
596
635
  }
597
636
 
@@ -602,6 +641,12 @@ class ShapeInferenceImplBase {
602
641
  }
603
642
  }
604
643
 
644
+ for (auto& default_value : func_proto.attribute_proto()) {
645
+ const std::string& name = default_value.name();
646
+ const AttributeProto* value = ctx.getAttribute(name);
647
+ attr_map[name] = (value != nullptr) ? value : &default_value;
648
+ }
649
+
605
650
  for (auto& n : func_proto.node()) {
606
651
  process(n, attr_map);
607
652
  }
@@ -630,7 +675,7 @@ class ShapeInferenceImplBase {
630
675
  SymbolTable* symbol_table_in,
631
676
  const ModelLocalFunctionsMap& model_local_functions_map_in,
632
677
  const ISchemaRegistry* schema_registry_in = OpSchemaRegistry::Instance(),
633
- std::unordered_map<std::string, TensorShapeProto>* generated_shape_data_by_name_in = nullptr,
678
+ DataValueMap* generated_shape_data_by_name_in = nullptr,
634
679
  const int ir_version_in = IR_VERSION // default the latest one
635
680
  )
636
681
  : g(*g_in),
@@ -669,7 +714,7 @@ class ShapeInferenceImplBase {
669
714
  SymbolTable* symbol_table;
670
715
  const ModelLocalFunctionsMap& model_local_functions_map;
671
716
  const ISchemaRegistry* schema_registry;
672
- std::unordered_map<std::string, TensorShapeProto>* generated_shape_data_by_name;
717
+ DataValueMap* generated_shape_data_by_name;
673
718
  int ir_version;
674
719
  GraphInferenceContext graph_inference_context;
675
720
 
@@ -698,10 +743,10 @@ static void InferShapesImpl(
698
743
  SymbolTable* symbol_table,
699
744
  const ModelLocalFunctionsMap& model_local_functions_map,
700
745
  const ISchemaRegistry* schema_registry = OpSchemaRegistry::Instance(),
701
- std::unordered_map<std::string, TensorShapeProto>* generated_shape_data_by_name = nullptr,
746
+ DataValueMap* generated_shape_data_by_name = nullptr,
702
747
  const int ir_version = IR_VERSION // default the latest one
703
748
  ) {
704
- std::unordered_map<std::string, TensorShapeProto> empty;
749
+ DataValueMap empty;
705
750
  if (generated_shape_data_by_name == nullptr) {
706
751
  generated_shape_data_by_name = &empty;
707
752
  }
@@ -749,7 +794,7 @@ void InferShapes(
749
794
  ModelProto& m,
750
795
  const ISchemaRegistry* schema_registry,
751
796
  const ShapeInferenceOptions& options,
752
- std::unordered_map<std::string, TensorShapeProto>* generated_shape_data_by_name) {
797
+ DataValueMap* generated_shape_data_by_name) {
753
798
  auto opset_imports = GetOpsetImportsFromProto(m);
754
799
  SymbolTableImpl symbol_table;
755
800
  ModelLocalFunctionsMap model_local_functions_by_id;
@@ -774,7 +819,7 @@ void InferShapes(
774
819
  const std::string& save_path,
775
820
  const ISchemaRegistry* schema_registry,
776
821
  const ShapeInferenceOptions& options,
777
- std::unordered_map<std::string, TensorShapeProto>* generated_shape_data_by_name) {
822
+ DataValueMap* generated_shape_data_by_name) {
778
823
  ModelProto model;
779
824
  LoadProtoFromPath(model_path, model);
780
825
  InferShapes(model, schema_registry, options, generated_shape_data_by_name);
@@ -800,7 +845,7 @@ void InferShapeForFunctionNode(
800
845
  const ShapeInferenceOptions& options,
801
846
  const std::unordered_map<std::string, const FunctionProto*>& model_local_functions_map,
802
847
  SymbolTable* symbol_table,
803
- std::unordered_map<std::string, TensorShapeProto>* generated_shape_data_by_name) {
848
+ DataValueMap* generated_shape_data_by_name) {
804
849
  GraphProto g;
805
850
  ShapeInferenceImplBase base(
806
851
  &g,
@@ -821,7 +866,7 @@ void InferShapeForFunctionNode(
821
866
  const ShapeInferenceOptions& options,
822
867
  const std::unordered_map<std::string, const FunctionProto*>& model_local_functions_map,
823
868
  SymbolTable* symbol_table,
824
- std::unordered_map<std::string, TensorShapeProto>* generated_shape_data_by_name) {
869
+ DataValueMap* generated_shape_data_by_name) {
825
870
  auto opset_imports = GetOpsetImportsFromProto(function_proto);
826
871
  InferShapeForFunctionNode(
827
872
  function_proto,
@@ -866,7 +911,14 @@ struct FunctionInferenceContext : public InferenceContext {
866
911
  }
867
912
 
868
913
  const TypeProto* getInputType(size_t index) const override {
869
- return (index < input_types_.size()) ? &input_types_[index] : nullptr;
914
+ // We should return nullptr for missing optional parameters.
915
+ // An uninitialized TypeProto() is used for missing optional parameters, and
916
+ // is mapped to a nullptr here.
917
+ if (index >= input_types_.size())
918
+ return nullptr;
919
+ if (input_types_[index].value_case() == TypeProto::ValueCase::VALUE_NOT_SET)
920
+ return nullptr;
921
+ return &input_types_[index];
870
922
  }
871
923
 
872
924
  TypeProto* getOutputType(size_t index) override {
@@ -14,6 +14,13 @@ namespace shape_inference {
14
14
 
15
15
  using ModelLocalFunctionsMap = std::unordered_map<std::string, const FunctionProto*>;
16
16
 
17
+ // We reuse TensorShapeProto to propagate statically known (partial) information about
18
+ // the values of tensors. It is intended for tensors used to store shape information
19
+ // (the return values of ops like Shape and input values of ops like Reshape/Expand).
20
+
21
+ // A DataValueMap is used to store the statically known (partial) values of variables.
22
+ using DataValueMap = std::unordered_map<std::string, TensorShapeProto>;
23
+
17
24
  class SymbolTableImpl : public SymbolTable {
18
25
  public:
19
26
  SymbolTableImpl() : index_(0) {}
@@ -87,7 +94,7 @@ struct GraphInferenceContext {
87
94
  SymbolTable* symbol_table_in = nullptr,
88
95
  const ModelLocalFunctionsMap& model_local_functions_in = {},
89
96
  const ISchemaRegistry* schema_registry_in = OpSchemaRegistry::Instance(),
90
- std::unordered_map<std::string, TensorShapeProto>* generated_shape_data_by_name_in = nullptr,
97
+ DataValueMap* generated_shape_data_by_name_in = nullptr,
91
98
  const int ir_version_in = IR_VERSION)
92
99
  : outer_scope_value_types_by_name{&outer_scope_value_types_by_name_in},
93
100
  opset_imports{opset_imports_in},
@@ -102,7 +109,7 @@ struct GraphInferenceContext {
102
109
  SymbolTable* symbol_table;
103
110
  const ModelLocalFunctionsMap& model_local_functions;
104
111
  const ISchemaRegistry* schema_registry;
105
- std::unordered_map<std::string, TensorShapeProto>* generated_shape_data_by_name;
112
+ DataValueMap* generated_shape_data_by_name;
106
113
  const int ir_version;
107
114
  };
108
115
 
@@ -125,7 +132,7 @@ struct InferenceContextImpl : public InferenceContext {
125
132
  const std::unordered_map<std::string, TypeProto*>& valueTypesByName,
126
133
  const std::unordered_map<std::string, const TensorProto*>& inputDataByName,
127
134
  const std::unordered_map<std::string, const SparseTensorProto*>& inputSparseDataByName,
128
- std::unordered_map<std::string, TensorShapeProto>* generatedShapeData = nullptr,
135
+ DataValueMap* generatedShapeData = nullptr,
129
136
  GraphInferenceContext* graphInferenceContext = nullptr)
130
137
  : graphInferenceContext_{graphInferenceContext} {
131
138
  for (auto& attr : *n.mutable_attribute()) {
@@ -276,7 +283,7 @@ struct DataPropagationContextImpl : public DataPropagationContext {
276
283
  NodeProto& n,
277
284
  const std::unordered_map<std::string, TypeProto*>& valueTypesByName,
278
285
  const std::unordered_map<std::string, const TensorProto*>& inputDataByName,
279
- std::unordered_map<std::string, TensorShapeProto>& generatedShapeData)
286
+ DataValueMap& generatedShapeData)
280
287
  : generatedShapeData_(generatedShapeData) {
281
288
  size_t input_idx = 0;
282
289
 
@@ -400,7 +407,7 @@ struct DataPropagationContextImpl : public DataPropagationContext {
400
407
  std::unordered_map<size_t, std::string> outputIndexToNameMap_;
401
408
  std::vector<const TypeProto*> allInputTypes_;
402
409
  std::vector<TypeProto> allOutputTypes_;
403
- std::unordered_map<std::string, TensorShapeProto>& generatedShapeData_;
410
+ DataValueMap& generatedShapeData_;
404
411
  std::unordered_map<std::string, const AttributeProto*> attributesByName_;
405
412
  };
406
413
 
@@ -436,14 +443,14 @@ void InferShapes(
436
443
  ModelProto& m,
437
444
  const ISchemaRegistry* schema_registry = OpSchemaRegistry::Instance(),
438
445
  const ShapeInferenceOptions& options = {},
439
- std::unordered_map<std::string, TensorShapeProto>* generated_shape_data_by_name = nullptr);
446
+ DataValueMap* generated_shape_data_by_name = nullptr);
440
447
 
441
448
  void InferShapes(
442
449
  const std::string& model_path,
443
450
  const std::string& save_path = "",
444
451
  const ISchemaRegistry* schema_registry = OpSchemaRegistry::Instance(),
445
452
  const ShapeInferenceOptions& options = {},
446
- std::unordered_map<std::string, TensorShapeProto>* generated_shape_data_by_name = nullptr);
453
+ DataValueMap* generated_shape_data_by_name = nullptr);
447
454
 
448
455
  ///
449
456
  /// ModelLocalFunctionsMap is a map of function id -> model local function proto
@@ -456,7 +463,7 @@ void InferShapeForFunctionNode(
456
463
  const ShapeInferenceOptions& options = {},
457
464
  const ModelLocalFunctionsMap& model_local_functions_map = {},
458
465
  SymbolTable* symbolTable = nullptr,
459
- std::unordered_map<std::string, TensorShapeProto>* generated_shape_data_by_name = nullptr);
466
+ DataValueMap* generated_shape_data_by_name = nullptr);
460
467
 
461
468
  ///
462
469
  /// ModelLocalFunctionsMap is a map of function id -> model local function proto
@@ -470,13 +477,15 @@ void InferShapeForFunctionNode(
470
477
  const ShapeInferenceOptions& options = {},
471
478
  const ModelLocalFunctionsMap& model_local_functions_map = {},
472
479
  SymbolTable* symbolTable = nullptr,
473
- std::unordered_map<std::string, TensorShapeProto>* generated_shape_data_by_name = nullptr);
480
+ DataValueMap* generated_shape_data_by_name = nullptr);
474
481
 
475
482
  ///
476
483
  /// Apply type-and-shape-inference based checks to a Function body.
477
484
  /// Returns the inferred types of the outputs of the function.
478
485
  /// Inference depends on the types of the inputs of the function as well as
479
486
  /// the attribute values supplied.
487
+ /// A TypeProto with value_case() == TypeProto::ValueCase::VALUE_NOT_SET is used
488
+ /// for missing optional parameters.
480
489
  ///
481
490
  std::vector<TypeProto> InferFunctionOutputTypes(
482
491
  const FunctionProto& func_proto,
onnx/version.py CHANGED
@@ -1,5 +1,5 @@
1
1
  # This file is generated by setup.py. DO NOT EDIT!
2
2
 
3
3
 
4
- version = "1.14.0"
4
+ version = "1.14.1"
5
5
  git_version = "None"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: onnx
3
- Version: 1.14.0
3
+ Version: 1.14.1
4
4
  Summary: Open Neural Network Exchange
5
5
  Home-page: https://github.com/onnx/onnx
6
6
  Author: ONNX
@@ -10,11 +10,11 @@ Classifier: Programming Language :: Python :: 3
10
10
  Description-Content-Type: text/markdown
11
11
  License-File: LICENSE
12
12
  Requires-Dist: numpy
13
- Requires-Dist: protobuf (>=3.20.2)
14
- Requires-Dist: typing-extensions (>=3.6.2.1)
13
+ Requires-Dist: protobuf >=3.20.2
14
+ Requires-Dist: typing-extensions >=3.6.2.1
15
15
  Provides-Extra: lint
16
- Requires-Dist: lintrunner (>=0.10.0) ; extra == 'lint'
17
- Requires-Dist: lintrunner-adapters (>=0.3) ; extra == 'lint'
16
+ Requires-Dist: lintrunner >=0.10.0 ; extra == 'lint'
17
+ Requires-Dist: lintrunner-adapters >=0.3 ; extra == 'lint'
18
18
 
19
19
  <!--
20
20
  Copyright (c) ONNX Project Contributors