JSTprove 1.0.0__py3-none-macosx_11_0_arm64.whl → 1.2.0__py3-none-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of JSTprove might be problematic. Click here for more details.
- {jstprove-1.0.0.dist-info → jstprove-1.2.0.dist-info}/METADATA +3 -3
- {jstprove-1.0.0.dist-info → jstprove-1.2.0.dist-info}/RECORD +60 -25
- python/core/binaries/onnx_generic_circuit_1-2-0 +0 -0
- python/core/circuit_models/generic_onnx.py +43 -9
- python/core/circuits/base.py +231 -71
- python/core/model_processing/converters/onnx_converter.py +114 -59
- python/core/model_processing/onnx_custom_ops/batchnorm.py +64 -0
- python/core/model_processing/onnx_custom_ops/maxpool.py +1 -1
- python/core/model_processing/onnx_custom_ops/mul.py +66 -0
- python/core/model_processing/onnx_custom_ops/relu.py +1 -1
- python/core/model_processing/onnx_quantizer/layers/add.py +54 -0
- python/core/model_processing/onnx_quantizer/layers/base.py +188 -1
- python/core/model_processing/onnx_quantizer/layers/batchnorm.py +224 -0
- python/core/model_processing/onnx_quantizer/layers/constant.py +1 -1
- python/core/model_processing/onnx_quantizer/layers/conv.py +20 -68
- python/core/model_processing/onnx_quantizer/layers/gemm.py +20 -66
- python/core/model_processing/onnx_quantizer/layers/maxpool.py +53 -43
- python/core/model_processing/onnx_quantizer/layers/mul.py +53 -0
- python/core/model_processing/onnx_quantizer/layers/relu.py +20 -35
- python/core/model_processing/onnx_quantizer/layers/sub.py +54 -0
- python/core/model_processing/onnx_quantizer/onnx_op_quantizer.py +43 -1
- python/core/utils/general_layer_functions.py +17 -12
- python/core/utils/model_registry.py +6 -3
- python/scripts/gen_and_bench.py +2 -2
- python/tests/circuit_e2e_tests/other_e2e_test.py +202 -9
- python/tests/circuit_parent_classes/test_circuit.py +561 -38
- python/tests/circuit_parent_classes/test_onnx_converter.py +22 -13
- python/tests/onnx_quantizer_tests/__init__.py +1 -0
- python/tests/onnx_quantizer_tests/layers/__init__.py +13 -0
- python/tests/onnx_quantizer_tests/layers/add_config.py +102 -0
- python/tests/onnx_quantizer_tests/layers/base.py +279 -0
- python/tests/onnx_quantizer_tests/layers/batchnorm_config.py +190 -0
- python/tests/onnx_quantizer_tests/layers/constant_config.py +39 -0
- python/tests/onnx_quantizer_tests/layers/conv_config.py +154 -0
- python/tests/onnx_quantizer_tests/layers/factory.py +142 -0
- python/tests/onnx_quantizer_tests/layers/flatten_config.py +61 -0
- python/tests/onnx_quantizer_tests/layers/gemm_config.py +160 -0
- python/tests/onnx_quantizer_tests/layers/maxpool_config.py +82 -0
- python/tests/onnx_quantizer_tests/layers/mul_config.py +102 -0
- python/tests/onnx_quantizer_tests/layers/relu_config.py +61 -0
- python/tests/onnx_quantizer_tests/layers/reshape_config.py +61 -0
- python/tests/onnx_quantizer_tests/layers/sub_config.py +102 -0
- python/tests/onnx_quantizer_tests/layers_tests/__init__.py +0 -0
- python/tests/onnx_quantizer_tests/layers_tests/base_test.py +94 -0
- python/tests/onnx_quantizer_tests/layers_tests/test_check_model.py +115 -0
- python/tests/onnx_quantizer_tests/layers_tests/test_e2e.py +196 -0
- python/tests/onnx_quantizer_tests/layers_tests/test_error_cases.py +59 -0
- python/tests/onnx_quantizer_tests/layers_tests/test_integration.py +198 -0
- python/tests/onnx_quantizer_tests/layers_tests/test_quantize.py +267 -0
- python/tests/onnx_quantizer_tests/layers_tests/test_scalability.py +109 -0
- python/tests/onnx_quantizer_tests/layers_tests/test_validation.py +45 -0
- python/tests/onnx_quantizer_tests/test_base_layer.py +228 -0
- python/tests/onnx_quantizer_tests/test_exceptions.py +99 -0
- python/tests/onnx_quantizer_tests/test_onnx_op_quantizer.py +246 -0
- python/tests/onnx_quantizer_tests/test_registered_quantizers.py +121 -0
- python/tests/onnx_quantizer_tests/testing_helper_functions.py +17 -0
- python/core/binaries/onnx_generic_circuit_1-0-0 +0 -0
- {jstprove-1.0.0.dist-info → jstprove-1.2.0.dist-info}/WHEEL +0 -0
- {jstprove-1.0.0.dist-info → jstprove-1.2.0.dist-info}/entry_points.txt +0 -0
- {jstprove-1.0.0.dist-info → jstprove-1.2.0.dist-info}/licenses/LICENSE +0 -0
- {jstprove-1.0.0.dist-info → jstprove-1.2.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,160 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
|
|
3
|
+
from python.core.model_processing.onnx_quantizer.exceptions import InvalidParamError
|
|
4
|
+
from python.tests.onnx_quantizer_tests import TEST_RNG_SEED
|
|
5
|
+
from python.tests.onnx_quantizer_tests.layers.base import (
|
|
6
|
+
LayerTestSpec,
|
|
7
|
+
e2e_test,
|
|
8
|
+
error_test,
|
|
9
|
+
valid_test,
|
|
10
|
+
)
|
|
11
|
+
from python.tests.onnx_quantizer_tests.layers.factory import (
|
|
12
|
+
BaseLayerConfigProvider,
|
|
13
|
+
LayerTestConfig,
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class GemmConfigProvider(BaseLayerConfigProvider):
|
|
18
|
+
"""Test configuration provider for Gemm layers"""
|
|
19
|
+
|
|
20
|
+
@property
|
|
21
|
+
def layer_name(self) -> str:
|
|
22
|
+
return "Gemm"
|
|
23
|
+
|
|
24
|
+
def get_config(self) -> LayerTestConfig:
|
|
25
|
+
rng = np.random.default_rng(TEST_RNG_SEED)
|
|
26
|
+
return LayerTestConfig(
|
|
27
|
+
op_type="Gemm",
|
|
28
|
+
valid_inputs=["input", "gemm_weight", "gemm_bias"],
|
|
29
|
+
valid_attributes={"alpha": 1.0, "beta": 1.0, "transA": 0, "transB": 0},
|
|
30
|
+
required_initializers={
|
|
31
|
+
"gemm_weight": rng.normal(0, 1, (128, 256)),
|
|
32
|
+
"gemm_bias": rng.normal(0, 1, (1, 256)),
|
|
33
|
+
},
|
|
34
|
+
input_shapes={"input": [1, 128]}, # Match weight input dimension K=128
|
|
35
|
+
output_shapes={
|
|
36
|
+
"gemm_output": [1, 256],
|
|
37
|
+
}, # Match weight output dimension N=256
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
def get_test_specs(self) -> list[LayerTestSpec]:
|
|
41
|
+
"""Return test specifications for Gemm layers"""
|
|
42
|
+
rng = np.random.default_rng(TEST_RNG_SEED)
|
|
43
|
+
return [
|
|
44
|
+
# --- VALID TESTS ---
|
|
45
|
+
valid_test("basic")
|
|
46
|
+
.description("Basic Gemm operation (no transposes, alpha=1, beta=1)")
|
|
47
|
+
.tags("basic")
|
|
48
|
+
.build(),
|
|
49
|
+
valid_test("transposed_weights")
|
|
50
|
+
.description("Gemm with transposed weight matrix (transB=1)")
|
|
51
|
+
.override_attrs(transB=1)
|
|
52
|
+
.override_initializer(
|
|
53
|
+
"gemm_weight",
|
|
54
|
+
rng.normal(0, 1, (256, 128)),
|
|
55
|
+
) # Transposed shape
|
|
56
|
+
.tags("transpose", "transB")
|
|
57
|
+
.build(),
|
|
58
|
+
valid_test("transposed_input")
|
|
59
|
+
.description("Gemm with transposed input (transA=1)")
|
|
60
|
+
.override_attrs(transA=1)
|
|
61
|
+
.override_input_shapes(input=[128, 1]) # Aᵀ shape → (K, M)
|
|
62
|
+
.override_output_shapes(gemm_output=[1, 256])
|
|
63
|
+
.tags("transpose", "transA")
|
|
64
|
+
.build(),
|
|
65
|
+
valid_test("double_transpose")
|
|
66
|
+
.description("Gemm with transA=1 and transB=1")
|
|
67
|
+
.override_attrs(transA=1, transB=1)
|
|
68
|
+
.override_input_shapes(input=[128, 1])
|
|
69
|
+
.override_initializer("gemm_weight", rng.normal(0, 1, (256, 128)))
|
|
70
|
+
.override_output_shapes(gemm_output=[1, 256])
|
|
71
|
+
.tags("transpose", "transA", "transB")
|
|
72
|
+
.build(),
|
|
73
|
+
e2e_test("e2e_basic")
|
|
74
|
+
.description("End-to-end test for basic Gemm layer")
|
|
75
|
+
.override_attrs(alpha=1.0, beta=1.0, transA=0, transB=0)
|
|
76
|
+
.override_input_shapes(input=[1, 4])
|
|
77
|
+
.override_output_shapes(gemm_output=[1, 8])
|
|
78
|
+
.override_initializer("gemm_weight", rng.normal(0, 1, (4, 8)))
|
|
79
|
+
.override_initializer("gemm_bias", rng.normal(0, 1, (1, 8)))
|
|
80
|
+
.tags("e2e", "basic")
|
|
81
|
+
.build(),
|
|
82
|
+
e2e_test("e2e_transA_small")
|
|
83
|
+
.description("Small end-to-end Gemm test with transposed input (transA=1)")
|
|
84
|
+
.override_attrs(transA=1, transB=0, alpha=1.0, beta=1.0)
|
|
85
|
+
.override_input_shapes(input=[4, 1]) # A^T shape → (K, M)
|
|
86
|
+
.override_output_shapes(gemm_output=[1, 6])
|
|
87
|
+
.override_initializer("gemm_weight", rng.normal(0, 1, (4, 6)))
|
|
88
|
+
.override_initializer("gemm_bias", rng.normal(0, 1, (1, 6)))
|
|
89
|
+
.tags("e2e", "transpose", "transA", "small")
|
|
90
|
+
.build(),
|
|
91
|
+
e2e_test("e2e_transB_small")
|
|
92
|
+
.description(
|
|
93
|
+
"Small end-to-end Gemm test with transposed weights (transB=1)",
|
|
94
|
+
)
|
|
95
|
+
.override_attrs(transA=0, transB=1, alpha=1.0, beta=1.0)
|
|
96
|
+
.override_input_shapes(input=[1, 4]) # A shape
|
|
97
|
+
.override_output_shapes(gemm_output=[1, 6])
|
|
98
|
+
.override_initializer("gemm_weight", rng.normal(0, 1, (6, 4))) # B^T shape
|
|
99
|
+
.override_initializer("gemm_bias", rng.normal(0, 1, (1, 6)))
|
|
100
|
+
.tags("e2e", "transpose", "transB", "small")
|
|
101
|
+
.build(),
|
|
102
|
+
e2e_test("e2e_transA_transB_small")
|
|
103
|
+
.description("Small end-to-end Gemm test with both matrices transposed")
|
|
104
|
+
.override_attrs(transA=1, transB=1, alpha=1.0, beta=1.0)
|
|
105
|
+
.override_input_shapes(input=[4, 1]) # A^T shape
|
|
106
|
+
.override_output_shapes(gemm_output=[1, 6])
|
|
107
|
+
.override_initializer("gemm_weight", rng.normal(0, 1, (6, 4))) # B^T shape
|
|
108
|
+
.override_initializer("gemm_bias", rng.normal(0, 1, (1, 6)))
|
|
109
|
+
.tags("e2e", "transpose", "transA", "transB", "small")
|
|
110
|
+
.build(),
|
|
111
|
+
# --- ERROR TESTS ---
|
|
112
|
+
# Add check on weights matrix in check_supported
|
|
113
|
+
error_test("invalid_alpha_type")
|
|
114
|
+
.description("Invalid alpha type (should be numeric)")
|
|
115
|
+
.override_attrs(alpha=-1.0)
|
|
116
|
+
.expects_error(
|
|
117
|
+
InvalidParamError,
|
|
118
|
+
"alpha value of -1.0 not supported [Attribute: alpha] [Expected: 1.0]",
|
|
119
|
+
)
|
|
120
|
+
.tags("invalid_param", "alpha")
|
|
121
|
+
.build(),
|
|
122
|
+
error_test("no_bias")
|
|
123
|
+
.description("Gemm without bias term (beta=0 should ignore bias)")
|
|
124
|
+
.override_inputs("input", "gemm_weight")
|
|
125
|
+
.override_attrs(beta=0.0)
|
|
126
|
+
.expects_error(InvalidParamError, match="3 inputs")
|
|
127
|
+
.tags("no_bias")
|
|
128
|
+
.build(),
|
|
129
|
+
error_test("different_alpha_beta")
|
|
130
|
+
.description("Gemm with different alpha and beta scaling factors")
|
|
131
|
+
.override_attrs(alpha=0.5, beta=2.0)
|
|
132
|
+
.expects_error(
|
|
133
|
+
InvalidParamError,
|
|
134
|
+
"alpha value of 0.5 not supported [Attribute: alpha] [Expected: 1.0]",
|
|
135
|
+
)
|
|
136
|
+
.tags("scaling", "alpha_beta")
|
|
137
|
+
.build(),
|
|
138
|
+
error_test("invalid_transA_value")
|
|
139
|
+
.description("transA must be 0 or 1")
|
|
140
|
+
.override_attrs(transA=2)
|
|
141
|
+
.expects_error(InvalidParamError, "transA value of 2 not supported")
|
|
142
|
+
.tags("transpose", "invalid_attr")
|
|
143
|
+
.build(),
|
|
144
|
+
error_test("invalid_transB_value")
|
|
145
|
+
.description("transB must be 0 or 1")
|
|
146
|
+
.override_attrs(transB=-1)
|
|
147
|
+
.expects_error(InvalidParamError, "transB value of -1 not supported")
|
|
148
|
+
.tags("transpose", "invalid_attr")
|
|
149
|
+
.build(),
|
|
150
|
+
# --- EDGE CASE / SKIPPED TESTS ---
|
|
151
|
+
valid_test("large_matrix")
|
|
152
|
+
.description("Large matrix multiplication performance test")
|
|
153
|
+
.override_initializer("gemm_weight", rng.normal(0, 1, (1024, 2048)))
|
|
154
|
+
.override_initializer("gemm_bias", rng.normal(0, 1, (1, 2048)))
|
|
155
|
+
.override_input_shapes(input=[1, 1024])
|
|
156
|
+
.override_output_shapes(gemm_output=[1, 2048])
|
|
157
|
+
.tags("large", "performance")
|
|
158
|
+
.skip("Performance test, not run by default")
|
|
159
|
+
.build(),
|
|
160
|
+
]
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
from python.core.model_processing.onnx_quantizer.exceptions import InvalidParamError
|
|
2
|
+
from python.tests.onnx_quantizer_tests.layers.base import (
|
|
3
|
+
e2e_test,
|
|
4
|
+
error_test,
|
|
5
|
+
valid_test,
|
|
6
|
+
)
|
|
7
|
+
from python.tests.onnx_quantizer_tests.layers.factory import (
|
|
8
|
+
BaseLayerConfigProvider,
|
|
9
|
+
LayerTestConfig,
|
|
10
|
+
)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class MaxPoolConfigProvider(BaseLayerConfigProvider):
|
|
14
|
+
"""Test configuration provider for MaxPool layers"""
|
|
15
|
+
|
|
16
|
+
@property
|
|
17
|
+
def layer_name(self) -> str:
|
|
18
|
+
return "MaxPool"
|
|
19
|
+
|
|
20
|
+
def get_config(self) -> LayerTestConfig:
|
|
21
|
+
return LayerTestConfig(
|
|
22
|
+
op_type="MaxPool",
|
|
23
|
+
valid_inputs=["input"],
|
|
24
|
+
valid_attributes={
|
|
25
|
+
"kernel_shape": [2, 2],
|
|
26
|
+
"strides": [2, 2],
|
|
27
|
+
"dilations": [1, 1],
|
|
28
|
+
"pads": [0, 0, 0, 0],
|
|
29
|
+
},
|
|
30
|
+
required_initializers={},
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
def get_test_specs(self) -> list:
|
|
34
|
+
return [
|
|
35
|
+
# --- VALID TESTS ---
|
|
36
|
+
valid_test("basic")
|
|
37
|
+
.description("Basic MaxPool with 2x2 kernel and stride 2")
|
|
38
|
+
.tags("basic", "pool", "2d")
|
|
39
|
+
.build(),
|
|
40
|
+
valid_test("larger_kernel")
|
|
41
|
+
.description("MaxPool with 3x3 kernel and stride 1")
|
|
42
|
+
.override_attrs(kernel_shape=[3, 3], strides=[1, 1])
|
|
43
|
+
.tags("kernel_3x3", "stride_1", "pool")
|
|
44
|
+
.build(),
|
|
45
|
+
valid_test("dilated_pool")
|
|
46
|
+
.description("MaxPool with dilation > 1")
|
|
47
|
+
.override_attrs(dilations=[2, 2])
|
|
48
|
+
.tags("dilation", "pool")
|
|
49
|
+
.build(),
|
|
50
|
+
valid_test("stride_one")
|
|
51
|
+
.description("MaxPool with stride 1 (overlapping windows)")
|
|
52
|
+
.override_attrs(strides=[1, 1])
|
|
53
|
+
.tags("stride_1", "pool", "overlap")
|
|
54
|
+
.build(),
|
|
55
|
+
e2e_test("e2e_basic")
|
|
56
|
+
.description("End-to-end test for 2D MaxPool")
|
|
57
|
+
.override_input_shapes(input=[1, 3, 4, 4])
|
|
58
|
+
.override_output_shapes(maxpool_output=[1, 3, 2, 2])
|
|
59
|
+
.tags("e2e", "pool", "2d")
|
|
60
|
+
.build(),
|
|
61
|
+
# # --- ERROR TESTS ---
|
|
62
|
+
error_test("asymmetric_padding")
|
|
63
|
+
.description("MaxPool with asymmetric padding")
|
|
64
|
+
.override_attrs(pads=[1, 0, 2, 1])
|
|
65
|
+
.expects_error(InvalidParamError, "pads[2]=2 >= kernel[0]=2")
|
|
66
|
+
.tags("padding", "asymmetric", "pool")
|
|
67
|
+
.build(),
|
|
68
|
+
error_test("invalid_kernel_shape")
|
|
69
|
+
.description("Invalid kernel shape length (3D instead of 2D)")
|
|
70
|
+
.override_attrs(kernel_shape=[2, 2, 2])
|
|
71
|
+
.expects_error(InvalidParamError, "Currently only MaxPool2D is supported")
|
|
72
|
+
.tags("invalid_attr_length", "kernel_shape")
|
|
73
|
+
.build(),
|
|
74
|
+
# --- EDGE CASE / SKIPPED TEST ---
|
|
75
|
+
valid_test("large_input")
|
|
76
|
+
.description("Large MaxPool input (performance/stress test)")
|
|
77
|
+
.override_input_shapes(input=[1, 3, 64, 64])
|
|
78
|
+
.override_attrs(kernel_shape=[3, 3], strides=[2, 2])
|
|
79
|
+
.tags("large", "performance", "pool")
|
|
80
|
+
.skip("Performance test, skipped by default")
|
|
81
|
+
.build(),
|
|
82
|
+
]
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
|
|
3
|
+
from python.tests.onnx_quantizer_tests import TEST_RNG_SEED
|
|
4
|
+
from python.tests.onnx_quantizer_tests.layers.base import (
|
|
5
|
+
BaseLayerConfigProvider,
|
|
6
|
+
LayerTestConfig,
|
|
7
|
+
LayerTestSpec,
|
|
8
|
+
e2e_test,
|
|
9
|
+
edge_case_test,
|
|
10
|
+
valid_test,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class MulConfigProvider(BaseLayerConfigProvider):
|
|
15
|
+
"""Test configuration provider for Mul layer"""
|
|
16
|
+
|
|
17
|
+
@property
|
|
18
|
+
def layer_name(self) -> str:
|
|
19
|
+
return "Mul"
|
|
20
|
+
|
|
21
|
+
def get_config(self) -> LayerTestConfig:
|
|
22
|
+
return LayerTestConfig(
|
|
23
|
+
op_type="Mul",
|
|
24
|
+
valid_inputs=["A", "B"],
|
|
25
|
+
valid_attributes={}, # Mul has no layer-specific attributes
|
|
26
|
+
required_initializers={},
|
|
27
|
+
input_shapes={
|
|
28
|
+
"A": [1, 3, 4, 4],
|
|
29
|
+
"B": [1, 3, 4, 4],
|
|
30
|
+
},
|
|
31
|
+
output_shapes={
|
|
32
|
+
"mul_output": [1, 3, 4, 4],
|
|
33
|
+
},
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
def get_test_specs(self) -> list[LayerTestSpec]:
|
|
37
|
+
rng = np.random.default_rng(TEST_RNG_SEED)
|
|
38
|
+
return [
|
|
39
|
+
# --- VALID TESTS ---
|
|
40
|
+
valid_test("basic")
|
|
41
|
+
.description("Basic elementwise Mul of two same-shaped tensors")
|
|
42
|
+
.override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 4, 4])
|
|
43
|
+
.tags("basic", "elementwise", "Mul")
|
|
44
|
+
.build(),
|
|
45
|
+
valid_test("broadcast_mul")
|
|
46
|
+
.description("mul with Numpy-style broadcasting along spatial dimensions")
|
|
47
|
+
.override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 1, 1])
|
|
48
|
+
.tags("broadcast", "elementwise", "mul", "onnx14")
|
|
49
|
+
.build(),
|
|
50
|
+
valid_test("initializer_mul")
|
|
51
|
+
.description(
|
|
52
|
+
"mul where second input (B) is a tensor initializer instead of input",
|
|
53
|
+
)
|
|
54
|
+
.override_input_shapes(A=[1, 3, 4, 4])
|
|
55
|
+
.override_initializer("B", rng.normal(0, 1, (1, 3, 4, 4)))
|
|
56
|
+
.tags("initializer", "elementwise", "mul", "onnxruntime")
|
|
57
|
+
.build(),
|
|
58
|
+
valid_test("scalar_mul")
|
|
59
|
+
.description("mul scalar (initializer) to tensor")
|
|
60
|
+
.override_input_shapes(A=[1, 3, 4, 4])
|
|
61
|
+
.override_initializer("B", np.array([2.0], dtype=np.float32))
|
|
62
|
+
.tags("scalar", "elementwise", "mul")
|
|
63
|
+
.build(),
|
|
64
|
+
# # --- E2E TESTS ---
|
|
65
|
+
e2e_test("e2e_mul")
|
|
66
|
+
.description("End-to-end mul test with random inputs")
|
|
67
|
+
.override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 4, 4])
|
|
68
|
+
.override_output_shapes(mul_output=[1, 3, 4, 4])
|
|
69
|
+
.tags("e2e", "mul", "2d")
|
|
70
|
+
.build(),
|
|
71
|
+
e2e_test("e2e_initializer_mul")
|
|
72
|
+
.description(
|
|
73
|
+
"mul where second input (B) is a tensor initializer instead of input",
|
|
74
|
+
)
|
|
75
|
+
.override_input_shapes(A=[1, 3, 4, 4])
|
|
76
|
+
.override_initializer("B", rng.normal(0, 1, (1, 3, 4, 4)))
|
|
77
|
+
.tags("initializer", "elementwise", "mul", "onnxruntime")
|
|
78
|
+
.build(),
|
|
79
|
+
e2e_test("e2e_broadcast_mul")
|
|
80
|
+
.description("mul with Numpy-style broadcasting along spatial dimensions")
|
|
81
|
+
.override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 1, 1])
|
|
82
|
+
.tags("broadcast", "elementwise", "mul", "onnx14")
|
|
83
|
+
.build(),
|
|
84
|
+
e2e_test("e2e_scalar_mul")
|
|
85
|
+
.description("mul scalar (initializer) to tensor")
|
|
86
|
+
.override_input_shapes(A=[1, 3, 4, 4])
|
|
87
|
+
.override_initializer("B", np.array([2.0], dtype=np.float32))
|
|
88
|
+
.tags("scalar", "elementwise", "mul")
|
|
89
|
+
.build(),
|
|
90
|
+
# # --- EDGE CASES ---
|
|
91
|
+
edge_case_test("empty_tensor")
|
|
92
|
+
.description("mul with empty tensor input (zero elements)")
|
|
93
|
+
.override_input_shapes(A=[0], B=[0])
|
|
94
|
+
.tags("edge", "empty", "mul")
|
|
95
|
+
.build(),
|
|
96
|
+
edge_case_test("large_tensor")
|
|
97
|
+
.description("Large tensor mul performance/stress test")
|
|
98
|
+
.override_input_shapes(A=[1, 64, 256, 256], B=[1, 64, 256, 256])
|
|
99
|
+
.tags("large", "performance", "mul")
|
|
100
|
+
.skip("Performance test, skipped by default")
|
|
101
|
+
.build(),
|
|
102
|
+
]
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
from python.tests.onnx_quantizer_tests.layers.base import (
|
|
2
|
+
e2e_test,
|
|
3
|
+
valid_test,
|
|
4
|
+
)
|
|
5
|
+
from python.tests.onnx_quantizer_tests.layers.factory import (
|
|
6
|
+
BaseLayerConfigProvider,
|
|
7
|
+
LayerTestConfig,
|
|
8
|
+
)
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class ReluConfigProvider(BaseLayerConfigProvider):
|
|
12
|
+
"""Test configuration provider for Relu layers"""
|
|
13
|
+
|
|
14
|
+
@property
|
|
15
|
+
def layer_name(self) -> str:
|
|
16
|
+
return "Relu"
|
|
17
|
+
|
|
18
|
+
def get_config(self) -> LayerTestConfig:
|
|
19
|
+
return LayerTestConfig(
|
|
20
|
+
op_type="Relu",
|
|
21
|
+
valid_inputs=["input"],
|
|
22
|
+
valid_attributes={},
|
|
23
|
+
required_initializers={},
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
def get_test_specs(self) -> list:
|
|
27
|
+
return [
|
|
28
|
+
# --- VALID TESTS ---
|
|
29
|
+
valid_test("basic")
|
|
30
|
+
.description("Basic ReLU activation")
|
|
31
|
+
.tags("basic", "activation")
|
|
32
|
+
.build(),
|
|
33
|
+
valid_test("negative_inputs")
|
|
34
|
+
.description("ReLU should zero out negative input values")
|
|
35
|
+
.override_input_shapes(input=[1, 3, 4, 4])
|
|
36
|
+
.tags("activation", "negative_values")
|
|
37
|
+
.build(),
|
|
38
|
+
valid_test("high_dimension_input")
|
|
39
|
+
.description("ReLU applied to a 5D input tensor (NCHWT layout)")
|
|
40
|
+
.override_input_shapes(input=[1, 3, 4, 4, 2])
|
|
41
|
+
.tags("activation", "high_dim", "5d")
|
|
42
|
+
.build(),
|
|
43
|
+
valid_test("scalar_input")
|
|
44
|
+
.description("ReLU with scalar input (edge case)")
|
|
45
|
+
.override_input_shapes(input=[1])
|
|
46
|
+
.tags("activation", "scalar")
|
|
47
|
+
.build(),
|
|
48
|
+
e2e_test("e2e_basic")
|
|
49
|
+
.description("End-to-end test for ReLU activation")
|
|
50
|
+
.override_input_shapes(input=[1, 3, 4, 4])
|
|
51
|
+
.override_output_shapes(relu_output=[1, 3, 4, 4])
|
|
52
|
+
.tags("e2e", "activation")
|
|
53
|
+
.build(),
|
|
54
|
+
# --- EDGE CASE / SKIPPED TEST ---
|
|
55
|
+
valid_test("large_input")
|
|
56
|
+
.description("Large input tensor for ReLU (performance/stress test)")
|
|
57
|
+
.override_input_shapes(input=[1, 3, 512, 512])
|
|
58
|
+
.tags("large", "performance", "activation")
|
|
59
|
+
.skip("Performance test, skipped by default")
|
|
60
|
+
.build(),
|
|
61
|
+
]
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
|
|
3
|
+
from python.tests.onnx_quantizer_tests.layers.base import (
|
|
4
|
+
e2e_test,
|
|
5
|
+
valid_test,
|
|
6
|
+
)
|
|
7
|
+
from python.tests.onnx_quantizer_tests.layers.factory import (
|
|
8
|
+
BaseLayerConfigProvider,
|
|
9
|
+
LayerTestConfig,
|
|
10
|
+
)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class ReshapeConfigProvider(BaseLayerConfigProvider):
|
|
14
|
+
"""Test configuration provider for Reshape layers"""
|
|
15
|
+
|
|
16
|
+
@property
|
|
17
|
+
def layer_name(self) -> str:
|
|
18
|
+
return "Reshape"
|
|
19
|
+
|
|
20
|
+
def get_config(self) -> LayerTestConfig:
|
|
21
|
+
return LayerTestConfig(
|
|
22
|
+
op_type="Reshape",
|
|
23
|
+
valid_inputs=["input", "shape"],
|
|
24
|
+
valid_attributes={},
|
|
25
|
+
required_initializers={"shape": np.array([1, -1])},
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
def get_test_specs(self) -> list:
|
|
29
|
+
return [
|
|
30
|
+
# --- VALID TESTS ---
|
|
31
|
+
valid_test("basic")
|
|
32
|
+
.description("Basic Reshape from (1,2,3,4) to (1,24)")
|
|
33
|
+
.tags("basic", "reshape")
|
|
34
|
+
.build(),
|
|
35
|
+
valid_test("reshape_expand_dims")
|
|
36
|
+
.description("Reshape expanding dimensions (1,24) → (1,3,8)")
|
|
37
|
+
.override_input_shapes(input=[1, 24])
|
|
38
|
+
.tags("reshape", "expand")
|
|
39
|
+
.build(),
|
|
40
|
+
valid_test("reshape_flatten")
|
|
41
|
+
.description("Reshape to flatten spatial dimensions (1,3,4,4) → (1,48)")
|
|
42
|
+
.override_input_shapes(input=[1, 24])
|
|
43
|
+
.override_initializer("shape", np.array([1, 3, -1]))
|
|
44
|
+
.tags("reshape", "flatten")
|
|
45
|
+
.build(),
|
|
46
|
+
e2e_test("e2e_basic")
|
|
47
|
+
.description("End-to-end test for Reshape layer")
|
|
48
|
+
.override_input_shapes(input=[1, 2, 3, 4])
|
|
49
|
+
.override_output_shapes(reshape_output=[1, 24])
|
|
50
|
+
.override_initializer("shape", np.array([1, -1]))
|
|
51
|
+
.tags("e2e", "reshape")
|
|
52
|
+
.build(),
|
|
53
|
+
# --- EDGE CASE / SKIPPED TEST ---
|
|
54
|
+
valid_test("large_input")
|
|
55
|
+
.description("Large reshape performance test")
|
|
56
|
+
.override_input_shapes(input=[1, 3, 256, 256])
|
|
57
|
+
.override_initializer("shape", np.array([1, -1]))
|
|
58
|
+
.tags("large", "performance", "reshape")
|
|
59
|
+
# .skip("Performance test, skipped by default")
|
|
60
|
+
.build(),
|
|
61
|
+
]
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
|
|
3
|
+
from python.tests.onnx_quantizer_tests import TEST_RNG_SEED
|
|
4
|
+
from python.tests.onnx_quantizer_tests.layers.base import (
|
|
5
|
+
BaseLayerConfigProvider,
|
|
6
|
+
LayerTestConfig,
|
|
7
|
+
LayerTestSpec,
|
|
8
|
+
e2e_test,
|
|
9
|
+
edge_case_test,
|
|
10
|
+
valid_test,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class SubConfigProvider(BaseLayerConfigProvider):
|
|
15
|
+
"""Test configuration provider for Sub layer"""
|
|
16
|
+
|
|
17
|
+
@property
|
|
18
|
+
def layer_name(self) -> str:
|
|
19
|
+
return "Sub"
|
|
20
|
+
|
|
21
|
+
def get_config(self) -> LayerTestConfig:
|
|
22
|
+
return LayerTestConfig(
|
|
23
|
+
op_type="Sub",
|
|
24
|
+
valid_inputs=["A", "B"],
|
|
25
|
+
valid_attributes={}, # Sub has no layer-specific attributes
|
|
26
|
+
required_initializers={},
|
|
27
|
+
input_shapes={
|
|
28
|
+
"A": [1, 3, 4, 4],
|
|
29
|
+
"B": [1, 3, 4, 4],
|
|
30
|
+
},
|
|
31
|
+
output_shapes={
|
|
32
|
+
"sub_output": [1, 3, 4, 4],
|
|
33
|
+
},
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
def get_test_specs(self) -> list[LayerTestSpec]:
|
|
37
|
+
rng = np.random.default_rng(TEST_RNG_SEED)
|
|
38
|
+
return [
|
|
39
|
+
# --- VALID TESTS ---
|
|
40
|
+
valid_test("basic")
|
|
41
|
+
.description("Basic elementwise Sub of two same-shaped tensors")
|
|
42
|
+
.override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 4, 4])
|
|
43
|
+
.tags("basic", "elementwise", "Sub")
|
|
44
|
+
.build(),
|
|
45
|
+
valid_test("broadcast_Sub")
|
|
46
|
+
.description("Sub with Numpy-style broadcasting along spatial dimensions")
|
|
47
|
+
.override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 1, 1])
|
|
48
|
+
.tags("broadcast", "elementwise", "Sub", "onnx14")
|
|
49
|
+
.build(),
|
|
50
|
+
valid_test("initializer_Sub")
|
|
51
|
+
.description(
|
|
52
|
+
"Sub where second input (B) is a tensor initializer instead of input",
|
|
53
|
+
)
|
|
54
|
+
.override_input_shapes(A=[1, 3, 4, 4])
|
|
55
|
+
.override_initializer("B", rng.normal(0, 1, (1, 3, 4, 4)))
|
|
56
|
+
.tags("initializer", "elementwise", "Sub", "onnxruntime")
|
|
57
|
+
.build(),
|
|
58
|
+
valid_test("scalar_Sub")
|
|
59
|
+
.description("Sub scalar (initializer) to tensor")
|
|
60
|
+
.override_input_shapes(A=[1, 3, 4, 4])
|
|
61
|
+
.override_initializer("B", np.array([2.0], dtype=np.float32))
|
|
62
|
+
.tags("scalar", "elementwise", "Sub")
|
|
63
|
+
.build(),
|
|
64
|
+
# --- E2E TESTS ---
|
|
65
|
+
e2e_test("e2e_Sub")
|
|
66
|
+
.description("End-to-end Sub test with random inputs")
|
|
67
|
+
.override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 4, 4])
|
|
68
|
+
.override_output_shapes(sub_output=[1, 3, 4, 4])
|
|
69
|
+
.tags("e2e", "Sub", "2d")
|
|
70
|
+
.build(),
|
|
71
|
+
e2e_test("e2e_initializer_Sub")
|
|
72
|
+
.description(
|
|
73
|
+
"Sub where second input (B) is a tensor initializer instead of input",
|
|
74
|
+
)
|
|
75
|
+
.override_input_shapes(A=[1, 3, 4, 4])
|
|
76
|
+
.override_initializer("B", rng.normal(0, 1, (1, 3, 4, 4)))
|
|
77
|
+
.tags("initializer", "elementwise", "Sub", "onnxruntime")
|
|
78
|
+
.build(),
|
|
79
|
+
e2e_test("e2e_broadcast_Sub")
|
|
80
|
+
.description("Sub with Numpy-style broadcasting along spatial dimensions")
|
|
81
|
+
.override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 1, 1])
|
|
82
|
+
.tags("broadcast", "elementwise", "Sub", "onnx14")
|
|
83
|
+
.build(),
|
|
84
|
+
e2e_test("e2e_scalar_Sub")
|
|
85
|
+
.description("Sub scalar (initializer) to tensor")
|
|
86
|
+
.override_input_shapes(A=[1, 3, 4, 4])
|
|
87
|
+
.override_initializer("B", np.array([2.0], dtype=np.float32))
|
|
88
|
+
.tags("scalar", "elementwise", "Sub")
|
|
89
|
+
.build(),
|
|
90
|
+
# # --- EDGE CASES ---
|
|
91
|
+
edge_case_test("empty_tensor")
|
|
92
|
+
.description("Sub with empty tensor input (zero elements)")
|
|
93
|
+
.override_input_shapes(A=[0], B=[0])
|
|
94
|
+
.tags("edge", "empty", "Sub")
|
|
95
|
+
.build(),
|
|
96
|
+
edge_case_test("large_tensor")
|
|
97
|
+
.description("Large tensor Sub performance/stress test")
|
|
98
|
+
.override_input_shapes(A=[1, 64, 256, 256], B=[1, 64, 256, 256])
|
|
99
|
+
.tags("large", "performance", "Sub")
|
|
100
|
+
.skip("Performance test, skipped by default")
|
|
101
|
+
.build(),
|
|
102
|
+
]
|
|
File without changes
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import TYPE_CHECKING, ClassVar
|
|
4
|
+
|
|
5
|
+
import pytest
|
|
6
|
+
from onnx import TensorProto, helper
|
|
7
|
+
|
|
8
|
+
if TYPE_CHECKING:
|
|
9
|
+
from onnx import ModelProto
|
|
10
|
+
|
|
11
|
+
from python.tests.onnx_quantizer_tests.layers.base import (
|
|
12
|
+
LayerTestConfig,
|
|
13
|
+
LayerTestSpec,
|
|
14
|
+
)
|
|
15
|
+
from python.core.model_processing.onnx_quantizer.onnx_op_quantizer import (
|
|
16
|
+
ONNXOpQuantizer,
|
|
17
|
+
)
|
|
18
|
+
from python.tests.onnx_quantizer_tests.layers.factory import TestLayerFactory
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class BaseQuantizerTest:
|
|
22
|
+
"""Base test utilities for ONNX quantizer tests."""
|
|
23
|
+
|
|
24
|
+
__test__ = False # Prevent pytest from collecting this class directly
|
|
25
|
+
|
|
26
|
+
_validation_failed_cases: ClassVar[set[str]] = set()
|
|
27
|
+
|
|
28
|
+
@pytest.fixture
|
|
29
|
+
def quantizer(self) -> ONNXOpQuantizer:
|
|
30
|
+
return ONNXOpQuantizer()
|
|
31
|
+
|
|
32
|
+
@pytest.fixture
|
|
33
|
+
def layer_configs(self) -> dict[str, LayerTestConfig]:
|
|
34
|
+
return TestLayerFactory.get_layer_configs()
|
|
35
|
+
|
|
36
|
+
@staticmethod
|
|
37
|
+
def _generate_test_id(
|
|
38
|
+
test_case_tuple: tuple[str, LayerTestConfig, LayerTestSpec],
|
|
39
|
+
) -> str:
|
|
40
|
+
try:
|
|
41
|
+
layer_name, _, test_spec = test_case_tuple
|
|
42
|
+
except Exception:
|
|
43
|
+
return str(test_case_tuple)
|
|
44
|
+
else:
|
|
45
|
+
return f"{layer_name}_{test_spec.name}"
|
|
46
|
+
|
|
47
|
+
@classmethod
|
|
48
|
+
def _check_validation_dependency(
|
|
49
|
+
cls: BaseQuantizerTest,
|
|
50
|
+
test_case_data: tuple[str, LayerTestConfig, LayerTestSpec],
|
|
51
|
+
) -> None:
|
|
52
|
+
layer_name, _, test_spec = test_case_data
|
|
53
|
+
test_case_id = f"{layer_name}_{test_spec.name}"
|
|
54
|
+
if test_case_id in cls._validation_failed_cases:
|
|
55
|
+
pytest.skip(f"Skipping because ONNX validation failed for {test_case_id}")
|
|
56
|
+
|
|
57
|
+
@staticmethod
|
|
58
|
+
def create_model_with_layers(
|
|
59
|
+
layer_types: list[str],
|
|
60
|
+
layer_configs: dict[str, LayerTestConfig],
|
|
61
|
+
) -> ModelProto:
|
|
62
|
+
"""Create a model composed of several layers."""
|
|
63
|
+
nodes, all_initializers = [], {}
|
|
64
|
+
|
|
65
|
+
for i, layer_type in enumerate(layer_types):
|
|
66
|
+
config = layer_configs[layer_type]
|
|
67
|
+
node = config.create_node(name_suffix=f"_{i}")
|
|
68
|
+
if i > 0:
|
|
69
|
+
prev_output = f"{layer_types[i-1].lower()}_output_{i-1}"
|
|
70
|
+
if node.input:
|
|
71
|
+
node.input[0] = prev_output
|
|
72
|
+
nodes.append(node)
|
|
73
|
+
all_initializers.update(config.create_initializers())
|
|
74
|
+
|
|
75
|
+
graph = helper.make_graph(
|
|
76
|
+
nodes,
|
|
77
|
+
"test_graph",
|
|
78
|
+
[
|
|
79
|
+
helper.make_tensor_value_info(
|
|
80
|
+
"input",
|
|
81
|
+
TensorProto.FLOAT,
|
|
82
|
+
[1, 16, 224, 224],
|
|
83
|
+
),
|
|
84
|
+
],
|
|
85
|
+
[
|
|
86
|
+
helper.make_tensor_value_info(
|
|
87
|
+
f"{layer_types[-1].lower()}_output_{len(layer_types)-1}",
|
|
88
|
+
TensorProto.FLOAT,
|
|
89
|
+
[1, 10],
|
|
90
|
+
),
|
|
91
|
+
],
|
|
92
|
+
initializer=list(all_initializers.values()),
|
|
93
|
+
)
|
|
94
|
+
return helper.make_model(graph)
|