JSTprove 1.2.0__py3-none-macosx_11_0_arm64.whl → 1.3.0__py3-none-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of JSTprove might be problematic. Click here for more details.
- {jstprove-1.2.0.dist-info → jstprove-1.3.0.dist-info}/METADATA +1 -1
- {jstprove-1.2.0.dist-info → jstprove-1.3.0.dist-info}/RECORD +30 -24
- python/core/binaries/onnx_generic_circuit_1-3-0 +0 -0
- python/core/circuits/base.py +29 -12
- python/core/circuits/errors.py +1 -2
- python/core/model_processing/converters/base.py +3 -3
- python/core/model_processing/onnx_custom_ops/__init__.py +5 -4
- python/core/model_processing/onnx_quantizer/exceptions.py +2 -2
- python/core/model_processing/onnx_quantizer/layers/base.py +34 -0
- python/core/model_processing/onnx_quantizer/layers/clip.py +92 -0
- python/core/model_processing/onnx_quantizer/layers/max.py +49 -0
- python/core/model_processing/onnx_quantizer/layers/min.py +54 -0
- python/core/model_processing/onnx_quantizer/onnx_op_quantizer.py +6 -0
- python/core/model_templates/circuit_template.py +48 -38
- python/core/utils/errors.py +1 -1
- python/core/utils/scratch_tests.py +29 -23
- python/tests/circuit_e2e_tests/circuit_model_developer_test.py +18 -14
- python/tests/circuit_e2e_tests/helper_fns_for_tests.py +11 -13
- python/tests/circuit_parent_classes/test_ort_custom_layers.py +35 -53
- python/tests/onnx_quantizer_tests/layers/base.py +1 -3
- python/tests/onnx_quantizer_tests/layers/clip_config.py +127 -0
- python/tests/onnx_quantizer_tests/layers/max_config.py +100 -0
- python/tests/onnx_quantizer_tests/layers/min_config.py +94 -0
- python/tests/onnx_quantizer_tests/layers_tests/test_integration.py +6 -5
- python/tests/onnx_quantizer_tests/layers_tests/test_quantize.py +6 -1
- python/tests/onnx_quantizer_tests/test_registered_quantizers.py +17 -8
- python/core/binaries/onnx_generic_circuit_1-2-0 +0 -0
- {jstprove-1.2.0.dist-info → jstprove-1.3.0.dist-info}/WHEEL +0 -0
- {jstprove-1.2.0.dist-info → jstprove-1.3.0.dist-info}/entry_points.txt +0 -0
- {jstprove-1.2.0.dist-info → jstprove-1.3.0.dist-info}/licenses/LICENSE +0 -0
- {jstprove-1.2.0.dist-info → jstprove-1.3.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
|
|
5
|
+
from python.tests.onnx_quantizer_tests import TEST_RNG_SEED
|
|
6
|
+
from python.tests.onnx_quantizer_tests.layers.base import (
|
|
7
|
+
e2e_test,
|
|
8
|
+
edge_case_test,
|
|
9
|
+
valid_test,
|
|
10
|
+
)
|
|
11
|
+
from python.tests.onnx_quantizer_tests.layers.factory import (
|
|
12
|
+
BaseLayerConfigProvider,
|
|
13
|
+
LayerTestConfig,
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class ClipConfigProvider(BaseLayerConfigProvider):
|
|
18
|
+
"""Test configuration provider for elementwise Clip."""
|
|
19
|
+
|
|
20
|
+
@property
|
|
21
|
+
def layer_name(self) -> str:
|
|
22
|
+
return "Clip"
|
|
23
|
+
|
|
24
|
+
def get_config(self) -> LayerTestConfig:
|
|
25
|
+
# Treat min / max as optional extra inputs, scalar-shaped by default.
|
|
26
|
+
# Scalars are encoded as shape [1] for the test harness; ONNX/ORT
|
|
27
|
+
# will still broadcast them over A.
|
|
28
|
+
return LayerTestConfig(
|
|
29
|
+
op_type="Clip",
|
|
30
|
+
valid_inputs=["A", "min", "max"],
|
|
31
|
+
valid_attributes={}, # no Clip-specific attrs
|
|
32
|
+
required_initializers={}, # by default, all three can be dynamic inputs
|
|
33
|
+
input_shapes={
|
|
34
|
+
"A": [1, 3, 4, 4],
|
|
35
|
+
"min": [1], # scalar-ish bound
|
|
36
|
+
"max": [1], # scalar-ish bound
|
|
37
|
+
},
|
|
38
|
+
output_shapes={
|
|
39
|
+
"clip_output": [1, 3, 4, 4],
|
|
40
|
+
},
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
def get_test_specs(self) -> list:
|
|
44
|
+
rng = np.random.default_rng(TEST_RNG_SEED)
|
|
45
|
+
|
|
46
|
+
return [
|
|
47
|
+
# --- VALID TESTS ---
|
|
48
|
+
# Basic Clip with scalar min/max as dynamic inputs.
|
|
49
|
+
valid_test("basic_scalar_bounds")
|
|
50
|
+
.description("Clip with A, min, max all as inputs; min/max are scalars.")
|
|
51
|
+
.override_input_shapes(A=[1, 3, 4, 4], min=[1], max=[1])
|
|
52
|
+
.override_output_shapes(clip_output=[1, 3, 4, 4])
|
|
53
|
+
.tags("basic", "elementwise", "clip")
|
|
54
|
+
.build(),
|
|
55
|
+
# This keeps the name used by the integration tests:
|
|
56
|
+
# Clip_broadcast_bounds
|
|
57
|
+
# Broadcasting here is just scalar → full tensor broadcast.
|
|
58
|
+
valid_test("broadcast_bounds")
|
|
59
|
+
.description(
|
|
60
|
+
"Clip with scalar bounds broadcast over all elements of A "
|
|
61
|
+
"(mirrors Max/Min broadcast tests but respects ORT's scalar bound "
|
|
62
|
+
"rules).",
|
|
63
|
+
)
|
|
64
|
+
.override_input_shapes(A=[1, 3, 2, 4], min=[1], max=[1])
|
|
65
|
+
.override_output_shapes(clip_output=[1, 3, 2, 4])
|
|
66
|
+
.tags("broadcast", "elementwise", "clip", "onnxruntime")
|
|
67
|
+
.build(),
|
|
68
|
+
# This keeps the name used by the integration tests:
|
|
69
|
+
# Clip_initializer_bounds
|
|
70
|
+
valid_test("initializer_bounds")
|
|
71
|
+
.description(
|
|
72
|
+
"Clip where min/max are scalar initializers instead of inputs.",
|
|
73
|
+
)
|
|
74
|
+
.override_input_shapes(A=[1, 3, 4, 4]) # only A is a true input
|
|
75
|
+
# Scalar numpy values → ONNX initializers with shape ()
|
|
76
|
+
.override_initializer(
|
|
77
|
+
"min",
|
|
78
|
+
np.array(rng.uniform(-1.0, 0.0), dtype=np.float64),
|
|
79
|
+
)
|
|
80
|
+
.override_initializer(
|
|
81
|
+
"max",
|
|
82
|
+
np.array(rng.uniform(0.0, 2.0), dtype=np.float64),
|
|
83
|
+
)
|
|
84
|
+
.override_output_shapes(clip_output=[1, 3, 4, 4])
|
|
85
|
+
.tags("initializer", "elementwise", "clip", "onnxruntime")
|
|
86
|
+
.build(),
|
|
87
|
+
# --- E2E TESTS ---
|
|
88
|
+
e2e_test("e2e_small").description(
|
|
89
|
+
"End-to-end Clip with small random tensor and scalar bounds.",
|
|
90
|
+
)
|
|
91
|
+
# All three are treated as runtime inputs here;
|
|
92
|
+
# min/max are scalar-shaped [1].
|
|
93
|
+
.override_input_shapes(
|
|
94
|
+
A=[1, 3, 4, 4],
|
|
95
|
+
min=[1],
|
|
96
|
+
max=[1],
|
|
97
|
+
)
|
|
98
|
+
.override_output_shapes(clip_output=[1, 3, 4, 4])
|
|
99
|
+
.tags("e2e", "clip")
|
|
100
|
+
.build(),
|
|
101
|
+
e2e_test("e2e_initializer_bounds").description(
|
|
102
|
+
"End-to-end Clip where min/max are scalar initializers "
|
|
103
|
+
"instead of inputs.",
|
|
104
|
+
)
|
|
105
|
+
# Only A is a true runtime input; min/max are scalar initializers.
|
|
106
|
+
.override_input_shapes(
|
|
107
|
+
A=[1, 3, 4, 4],
|
|
108
|
+
)
|
|
109
|
+
.override_initializer(
|
|
110
|
+
"min",
|
|
111
|
+
np.array(rng.uniform(-1.0, 0.0), dtype=np.float64),
|
|
112
|
+
)
|
|
113
|
+
.override_initializer(
|
|
114
|
+
"max",
|
|
115
|
+
np.array(rng.uniform(0.0, 2.0), dtype=np.float64),
|
|
116
|
+
)
|
|
117
|
+
.override_output_shapes(clip_output=[1, 3, 4, 4])
|
|
118
|
+
.tags("e2e", "initializer", "clip")
|
|
119
|
+
.build(),
|
|
120
|
+
# --- EDGE / STRESS ---
|
|
121
|
+
edge_case_test("empty_tensor")
|
|
122
|
+
.description("Clip with empty tensor input and scalar bounds.")
|
|
123
|
+
.override_input_shapes(A=[0], min=[1], max=[1])
|
|
124
|
+
.override_output_shapes(clip_output=[0])
|
|
125
|
+
.tags("edge", "empty", "clip")
|
|
126
|
+
.build(),
|
|
127
|
+
]
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
|
|
5
|
+
from python.tests.onnx_quantizer_tests import TEST_RNG_SEED
|
|
6
|
+
from python.tests.onnx_quantizer_tests.layers.base import (
|
|
7
|
+
e2e_test,
|
|
8
|
+
edge_case_test,
|
|
9
|
+
valid_test,
|
|
10
|
+
)
|
|
11
|
+
from python.tests.onnx_quantizer_tests.layers.factory import (
|
|
12
|
+
BaseLayerConfigProvider,
|
|
13
|
+
LayerTestConfig,
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class MaxConfigProvider(BaseLayerConfigProvider):
|
|
18
|
+
"""Test configuration provider for elementwise Max"""
|
|
19
|
+
|
|
20
|
+
@property
|
|
21
|
+
def layer_name(self) -> str:
|
|
22
|
+
return "Max"
|
|
23
|
+
|
|
24
|
+
def get_config(self) -> LayerTestConfig:
|
|
25
|
+
return LayerTestConfig(
|
|
26
|
+
op_type="Max",
|
|
27
|
+
valid_inputs=["A", "B"],
|
|
28
|
+
valid_attributes={}, # Max has no layer-specific attributes
|
|
29
|
+
required_initializers={}, # default: both A and B are dynamic inputs
|
|
30
|
+
input_shapes={
|
|
31
|
+
"A": [1, 3, 4, 4],
|
|
32
|
+
"B": [1, 3, 4, 4],
|
|
33
|
+
},
|
|
34
|
+
output_shapes={
|
|
35
|
+
"max_output": [1, 3, 4, 4],
|
|
36
|
+
},
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
def get_test_specs(self) -> list:
|
|
40
|
+
rng = np.random.default_rng(TEST_RNG_SEED)
|
|
41
|
+
return [
|
|
42
|
+
# --- VALID TESTS ---
|
|
43
|
+
valid_test("basic")
|
|
44
|
+
.description("Basic elementwise Max of two same-shaped tensors")
|
|
45
|
+
.override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 4, 4])
|
|
46
|
+
.tags("basic", "elementwise", "max")
|
|
47
|
+
.build(),
|
|
48
|
+
valid_test("broadcast_max")
|
|
49
|
+
.description("Max with Numpy-style broadcasting along spatial dimensions")
|
|
50
|
+
.override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 1, 1])
|
|
51
|
+
.tags("broadcast", "elementwise", "max", "onnx14")
|
|
52
|
+
.build(),
|
|
53
|
+
valid_test("initializer_max")
|
|
54
|
+
.description("Max where B is an initializer instead of an input")
|
|
55
|
+
.override_input_shapes(A=[1, 3, 4, 4])
|
|
56
|
+
.override_initializer(
|
|
57
|
+
"B",
|
|
58
|
+
rng.normal(0, 1, (1, 3, 4, 4)).astype(np.float32),
|
|
59
|
+
)
|
|
60
|
+
.tags("initializer", "elementwise", "max", "onnxruntime")
|
|
61
|
+
.build(),
|
|
62
|
+
# --- E2E TESTS ---
|
|
63
|
+
e2e_test("e2e_max")
|
|
64
|
+
.description("End-to-end Max test with random inputs")
|
|
65
|
+
.override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 4, 4])
|
|
66
|
+
.override_output_shapes(max_output=[1, 3, 4, 4])
|
|
67
|
+
.tags("e2e", "max", "2d")
|
|
68
|
+
.build(),
|
|
69
|
+
e2e_test("e2e_broadcast_max")
|
|
70
|
+
.description(
|
|
71
|
+
"End-to-end Max with Numpy-style broadcasting along spatial dimensions",
|
|
72
|
+
)
|
|
73
|
+
.override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 1, 1])
|
|
74
|
+
.override_output_shapes(max_output=[1, 3, 4, 4])
|
|
75
|
+
.tags("e2e", "broadcast", "elementwise", "max", "onnx14")
|
|
76
|
+
.build(),
|
|
77
|
+
e2e_test("e2e_initializer_max")
|
|
78
|
+
.description("End-to-end Max where B is an initializer")
|
|
79
|
+
.override_input_shapes(A=[1, 3, 4, 4])
|
|
80
|
+
.override_initializer(
|
|
81
|
+
"B",
|
|
82
|
+
rng.normal(0, 1, (1, 3, 4, 4)).astype(np.float32),
|
|
83
|
+
)
|
|
84
|
+
.override_output_shapes(max_output=[1, 3, 4, 4])
|
|
85
|
+
.tags("e2e", "initializer", "elementwise", "max", "onnxruntime")
|
|
86
|
+
.build(),
|
|
87
|
+
# --- EDGE / STRESS ---
|
|
88
|
+
edge_case_test("empty_tensor")
|
|
89
|
+
.description("Max with empty tensor input (zero elements)")
|
|
90
|
+
.override_input_shapes(A=[0], B=[0])
|
|
91
|
+
.override_output_shapes(max_output=[0])
|
|
92
|
+
.tags("edge", "empty", "max")
|
|
93
|
+
.build(),
|
|
94
|
+
valid_test("large_tensor")
|
|
95
|
+
.description("Large tensor max performance/stress test")
|
|
96
|
+
.override_input_shapes(A=[1, 64, 256, 256], B=[1, 64, 256, 256])
|
|
97
|
+
.tags("large", "performance", "max")
|
|
98
|
+
.skip("Performance test, skipped by default")
|
|
99
|
+
.build(),
|
|
100
|
+
]
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
|
|
5
|
+
from python.tests.onnx_quantizer_tests import TEST_RNG_SEED
|
|
6
|
+
from python.tests.onnx_quantizer_tests.layers.base import (
|
|
7
|
+
e2e_test,
|
|
8
|
+
edge_case_test,
|
|
9
|
+
valid_test,
|
|
10
|
+
)
|
|
11
|
+
from python.tests.onnx_quantizer_tests.layers.factory import (
|
|
12
|
+
BaseLayerConfigProvider,
|
|
13
|
+
LayerTestConfig,
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class MinConfigProvider(BaseLayerConfigProvider):
|
|
18
|
+
"""Test configuration provider for elementwise Min"""
|
|
19
|
+
|
|
20
|
+
@property
|
|
21
|
+
def layer_name(self) -> str:
|
|
22
|
+
return "Min"
|
|
23
|
+
|
|
24
|
+
def get_config(self) -> LayerTestConfig:
|
|
25
|
+
return LayerTestConfig(
|
|
26
|
+
op_type="Min",
|
|
27
|
+
valid_inputs=["A", "B"],
|
|
28
|
+
valid_attributes={}, # Min has no layer-specific attributes
|
|
29
|
+
required_initializers={}, # default: both A and B are dynamic inputs
|
|
30
|
+
input_shapes={
|
|
31
|
+
"A": [1, 3, 4, 4],
|
|
32
|
+
"B": [1, 3, 4, 4],
|
|
33
|
+
},
|
|
34
|
+
output_shapes={
|
|
35
|
+
"min_output": [1, 3, 4, 4],
|
|
36
|
+
},
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
def get_test_specs(self) -> list:
|
|
40
|
+
rng = np.random.default_rng(TEST_RNG_SEED)
|
|
41
|
+
return [
|
|
42
|
+
# --- VALID TESTS ---
|
|
43
|
+
valid_test("basic")
|
|
44
|
+
.description("Basic elementwise Min of two same-shaped tensors")
|
|
45
|
+
.override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 4, 4])
|
|
46
|
+
.tags("basic", "elementwise", "min")
|
|
47
|
+
.build(),
|
|
48
|
+
valid_test("broadcast_min")
|
|
49
|
+
.description("Min with Numpy-style broadcasting along spatial dimensions")
|
|
50
|
+
.override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 1, 1])
|
|
51
|
+
.tags("broadcast", "elementwise", "min", "onnx14")
|
|
52
|
+
.build(),
|
|
53
|
+
valid_test("initializer_min")
|
|
54
|
+
.description("Min where B is an initializer instead of an input")
|
|
55
|
+
.override_input_shapes(A=[1, 3, 4, 4])
|
|
56
|
+
.override_initializer("B", rng.normal(0, 1, (1, 3, 4, 4)))
|
|
57
|
+
.tags("initializer", "elementwise", "min", "onnxruntime")
|
|
58
|
+
.build(),
|
|
59
|
+
# --- E2E TESTS ---
|
|
60
|
+
e2e_test("e2e_min")
|
|
61
|
+
.description("End-to-end Min test with random inputs")
|
|
62
|
+
.override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 4, 4])
|
|
63
|
+
.override_output_shapes(min_output=[1, 3, 4, 4])
|
|
64
|
+
.tags("e2e", "min", "2d")
|
|
65
|
+
.build(),
|
|
66
|
+
e2e_test("e2e_broadcast_min")
|
|
67
|
+
.description(
|
|
68
|
+
"End-to-end Min with Numpy-style broadcasting along spatial dimensions",
|
|
69
|
+
)
|
|
70
|
+
.override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 1, 1])
|
|
71
|
+
.override_output_shapes(min_output=[1, 3, 4, 4])
|
|
72
|
+
.tags("e2e", "broadcast", "elementwise", "min", "onnx14")
|
|
73
|
+
.build(),
|
|
74
|
+
e2e_test("e2e_initializer_min")
|
|
75
|
+
.description("End-to-end Min where B is an initializer")
|
|
76
|
+
.override_input_shapes(A=[1, 3, 4, 4])
|
|
77
|
+
.override_initializer("B", rng.normal(0, 1, (1, 3, 4, 4)))
|
|
78
|
+
.override_output_shapes(min_output=[1, 3, 4, 4])
|
|
79
|
+
.tags("e2e", "initializer", "elementwise", "min", "onnxruntime")
|
|
80
|
+
.build(),
|
|
81
|
+
# --- EDGE / STRESS ---
|
|
82
|
+
edge_case_test("empty_tensor")
|
|
83
|
+
.description("Min with empty tensor input (zero elements)")
|
|
84
|
+
.override_input_shapes(A=[0], B=[0])
|
|
85
|
+
.override_output_shapes(min_output=[0])
|
|
86
|
+
.tags("edge", "empty", "min")
|
|
87
|
+
.build(),
|
|
88
|
+
valid_test("large_tensor")
|
|
89
|
+
.description("Large tensor min performance/stress test")
|
|
90
|
+
.override_input_shapes(A=[1, 64, 256, 256], B=[1, 64, 256, 256])
|
|
91
|
+
.tags("large", "performance", "min")
|
|
92
|
+
.skip("Performance test, skipped by default")
|
|
93
|
+
.build(),
|
|
94
|
+
]
|
|
@@ -159,21 +159,22 @@ class TestIntegration(BaseQuantizerTest):
|
|
|
159
159
|
quantized_input_names = [inp.name for inp in quantized_session.get_inputs()]
|
|
160
160
|
quantized_output_name = quantized_session.get_outputs()[0].name
|
|
161
161
|
|
|
162
|
-
# For quantized model,
|
|
163
|
-
|
|
162
|
+
# For the quantized model, cast inputs to float64 for ORT
|
|
163
|
+
quantized_inputs = {}
|
|
164
164
|
for name in quantized_input_names:
|
|
165
165
|
if name in dummy_inputs:
|
|
166
|
-
|
|
166
|
+
quantized_inputs[name] = dummy_inputs[name].astype(np.float64)
|
|
167
167
|
else:
|
|
168
|
-
# If quantized model has different inputs, skip
|
|
168
|
+
# If quantized model has different inputs, skip this case
|
|
169
169
|
pytest.skip(
|
|
170
170
|
f"Quantized model input mismatch for {layer_name}.{test_spec.name}",
|
|
171
171
|
)
|
|
172
172
|
|
|
173
173
|
quantized_output = quantized_session.run(
|
|
174
174
|
[quantized_output_name],
|
|
175
|
-
|
|
175
|
+
quantized_inputs,
|
|
176
176
|
)[0]
|
|
177
|
+
|
|
177
178
|
quantized_output = quantized_output / (scale_base ** (scale_exponent))
|
|
178
179
|
|
|
179
180
|
ratio = np.mean(quantized_output / (original_output + 1e-12))
|
|
@@ -37,7 +37,10 @@ class TestQuantize(BaseQuantizerTest):
|
|
|
37
37
|
scale_base: int = 10,
|
|
38
38
|
*,
|
|
39
39
|
rescale: bool = True,
|
|
40
|
-
) -> tuple[
|
|
40
|
+
) -> tuple[
|
|
41
|
+
list[onnx.NodeProto],
|
|
42
|
+
tuple[str, LayerTestConfig, LayerTestSpec, NodeProto],
|
|
43
|
+
]:
|
|
41
44
|
"""Common setup for quantization tests"""
|
|
42
45
|
layer_name, config, test_spec = test_case_data
|
|
43
46
|
|
|
@@ -65,6 +68,8 @@ class TestQuantize(BaseQuantizerTest):
|
|
|
65
68
|
initializer_map=initializer_map,
|
|
66
69
|
)
|
|
67
70
|
|
|
71
|
+
if not isinstance(result, list):
|
|
72
|
+
result = [result]
|
|
68
73
|
return result, (layer_name, config, test_spec, node)
|
|
69
74
|
|
|
70
75
|
@pytest.mark.unit
|
|
@@ -49,11 +49,10 @@ def validate_quantized_node(node_result: onnx.NodeProto, op_type: str) -> None:
|
|
|
49
49
|
assert node_result.output, f"Missing outputs for {op_type}"
|
|
50
50
|
|
|
51
51
|
try:
|
|
52
|
-
# Create a minimal
|
|
52
|
+
# Create a minimal graph with dummy IOs to satisfy ONNX requirements
|
|
53
53
|
temp_graph = onnx.GraphProto()
|
|
54
54
|
temp_graph.name = "temp_graph"
|
|
55
55
|
|
|
56
|
-
# Add dummy inputs/outputs to satisfy graph requirements
|
|
57
56
|
for inp in node_result.input:
|
|
58
57
|
if not any(vi.name == inp for vi in temp_graph.input):
|
|
59
58
|
temp_graph.input.append(
|
|
@@ -63,6 +62,7 @@ def validate_quantized_node(node_result: onnx.NodeProto, op_type: str) -> None:
|
|
|
63
62
|
[1],
|
|
64
63
|
),
|
|
65
64
|
)
|
|
65
|
+
|
|
66
66
|
for out in node_result.output:
|
|
67
67
|
if not any(vi.name == out for vi in temp_graph.output):
|
|
68
68
|
temp_graph.output.append(
|
|
@@ -74,12 +74,16 @@ def validate_quantized_node(node_result: onnx.NodeProto, op_type: str) -> None:
|
|
|
74
74
|
)
|
|
75
75
|
|
|
76
76
|
temp_graph.node.append(node_result)
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
77
|
+
|
|
78
|
+
# Explicit opset imports for default and contrib domains
|
|
79
|
+
temp_model = onnx.helper.make_model(
|
|
80
|
+
temp_graph,
|
|
81
|
+
opset_imports=[
|
|
82
|
+
onnx.helper.make_opsetid("", 22),
|
|
83
|
+
onnx.helper.make_opsetid("ai.onnx.contrib", 1),
|
|
84
|
+
],
|
|
81
85
|
)
|
|
82
|
-
|
|
86
|
+
|
|
83
87
|
onnx.checker.check_model(temp_model)
|
|
84
88
|
except onnx.checker.ValidationError as e:
|
|
85
89
|
pytest.fail(f"ONNX node validation failed for {op_type}: {e}")
|
|
@@ -117,5 +121,10 @@ def test_registered_quantizer_quantize(
|
|
|
117
121
|
for node_result in result:
|
|
118
122
|
validate_quantized_node(node_result, op_type)
|
|
119
123
|
else:
|
|
120
|
-
|
|
124
|
+
if inputs:
|
|
125
|
+
# Only assert if this op actually requires inputs
|
|
126
|
+
assert (
|
|
127
|
+
result.input
|
|
128
|
+
), f"Missing inputs for {op_type}; required_inputs={inputs}"
|
|
129
|
+
|
|
121
130
|
validate_quantized_node(result, op_type)
|
|
Binary file
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|