JSTprove 1.1.0__py3-none-macosx_11_0_arm64.whl → 1.3.0__py3-none-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of JSTprove might be problematic. Click here for more details.

Files changed (41) hide show
  1. {jstprove-1.1.0.dist-info → jstprove-1.3.0.dist-info}/METADATA +3 -3
  2. {jstprove-1.1.0.dist-info → jstprove-1.3.0.dist-info}/RECORD +40 -26
  3. python/core/binaries/onnx_generic_circuit_1-3-0 +0 -0
  4. python/core/circuits/base.py +29 -12
  5. python/core/circuits/errors.py +1 -2
  6. python/core/model_processing/converters/base.py +3 -3
  7. python/core/model_processing/converters/onnx_converter.py +28 -27
  8. python/core/model_processing/onnx_custom_ops/__init__.py +5 -4
  9. python/core/model_processing/onnx_custom_ops/batchnorm.py +64 -0
  10. python/core/model_processing/onnx_custom_ops/mul.py +66 -0
  11. python/core/model_processing/onnx_quantizer/exceptions.py +2 -2
  12. python/core/model_processing/onnx_quantizer/layers/base.py +101 -0
  13. python/core/model_processing/onnx_quantizer/layers/batchnorm.py +224 -0
  14. python/core/model_processing/onnx_quantizer/layers/clip.py +92 -0
  15. python/core/model_processing/onnx_quantizer/layers/max.py +49 -0
  16. python/core/model_processing/onnx_quantizer/layers/min.py +54 -0
  17. python/core/model_processing/onnx_quantizer/layers/mul.py +53 -0
  18. python/core/model_processing/onnx_quantizer/layers/sub.py +54 -0
  19. python/core/model_processing/onnx_quantizer/onnx_op_quantizer.py +43 -0
  20. python/core/model_templates/circuit_template.py +48 -38
  21. python/core/utils/errors.py +1 -1
  22. python/core/utils/scratch_tests.py +29 -23
  23. python/scripts/gen_and_bench.py +2 -2
  24. python/tests/circuit_e2e_tests/circuit_model_developer_test.py +18 -14
  25. python/tests/circuit_e2e_tests/helper_fns_for_tests.py +11 -13
  26. python/tests/circuit_parent_classes/test_ort_custom_layers.py +35 -53
  27. python/tests/onnx_quantizer_tests/layers/base.py +1 -3
  28. python/tests/onnx_quantizer_tests/layers/batchnorm_config.py +190 -0
  29. python/tests/onnx_quantizer_tests/layers/clip_config.py +127 -0
  30. python/tests/onnx_quantizer_tests/layers/max_config.py +100 -0
  31. python/tests/onnx_quantizer_tests/layers/min_config.py +94 -0
  32. python/tests/onnx_quantizer_tests/layers/mul_config.py +102 -0
  33. python/tests/onnx_quantizer_tests/layers/sub_config.py +102 -0
  34. python/tests/onnx_quantizer_tests/layers_tests/test_integration.py +6 -5
  35. python/tests/onnx_quantizer_tests/layers_tests/test_quantize.py +8 -1
  36. python/tests/onnx_quantizer_tests/test_registered_quantizers.py +17 -8
  37. python/core/binaries/onnx_generic_circuit_1-1-0 +0 -0
  38. {jstprove-1.1.0.dist-info → jstprove-1.3.0.dist-info}/WHEEL +0 -0
  39. {jstprove-1.1.0.dist-info → jstprove-1.3.0.dist-info}/entry_points.txt +0 -0
  40. {jstprove-1.1.0.dist-info → jstprove-1.3.0.dist-info}/licenses/LICENSE +0 -0
  41. {jstprove-1.1.0.dist-info → jstprove-1.3.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,190 @@
1
+ import numpy as np
2
+
3
+ from python.tests.onnx_quantizer_tests import TEST_RNG_SEED
4
+ from python.tests.onnx_quantizer_tests.layers.base import (
5
+ BaseLayerConfigProvider,
6
+ LayerTestConfig,
7
+ LayerTestSpec,
8
+ e2e_test,
9
+ valid_test,
10
+ )
11
+
12
+
13
+ class BatchNormConfigProvider(BaseLayerConfigProvider):
14
+ """Test configuration provider for BatchNorm (ONNX BatchNormalization op)"""
15
+
16
+ @property
17
+ def layer_name(self) -> str:
18
+ return "BatchNormalization"
19
+
20
+ def get_config(self) -> LayerTestConfig:
21
+ rng = np.random.default_rng(TEST_RNG_SEED)
22
+
23
+ # default shapes: N x C x H x W
24
+ default_input_shape = [1, 3, 4, 4]
25
+ c = default_input_shape[1]
26
+
27
+ # typical required initializers (scale, bias, mean, var) are length C
28
+ return LayerTestConfig(
29
+ op_type="BatchNormalization",
30
+ valid_inputs=["X", "scale", "B", "input_mean", "input_var"],
31
+ valid_attributes={
32
+ "epsilon": 1e-5,
33
+ "momentum": 0.9,
34
+ "training_mode": 0,
35
+ },
36
+ required_initializers={
37
+ # Defaults are stored as numpy arrays with shape (C,)
38
+ "scale": rng.normal(1.0, 0.5, c).astype(np.float32),
39
+ "B": rng.normal(0.0, 0.5, c).astype(np.float32),
40
+ "input_mean": rng.normal(0.0, 1.0, c).astype(np.float32),
41
+ "input_var": np.abs(rng.normal(1.0, 0.5, c)).astype(np.float32),
42
+ },
43
+ input_shapes={"X": default_input_shape},
44
+ output_shapes={"batchnormalization_output": default_input_shape},
45
+ )
46
+
47
+ def get_test_specs(self) -> list[LayerTestSpec]:
48
+ rng = np.random.default_rng(TEST_RNG_SEED)
49
+ c = 3
50
+
51
+ return [
52
+ # Basic valid tests
53
+ valid_test("basic_inference")
54
+ .description("Basic BatchNormalization inference: standard shapes")
55
+ .tags("basic", "inference", "batchnorm")
56
+ .build(),
57
+ valid_test("different_input_shape")
58
+ .description("Inference with different spatial dims")
59
+ .override_input_shapes(X=[2, c, 8, 8])
60
+ .override_output_shapes(batchnormalization_output=[2, c, 8, 8])
61
+ .tags("inference", "spatial")
62
+ .build(),
63
+ valid_test("epsilon_variation")
64
+ .description("Inference with larger epsilon for numerical stability")
65
+ .override_attrs(epsilon=1e-3)
66
+ .tags("epsilon")
67
+ .build(),
68
+ valid_test("momentum_variation")
69
+ .description(
70
+ "Inference with non-default momentum (has no effect in inference mode)",
71
+ )
72
+ .override_attrs(momentum=0.5)
73
+ .tags("momentum")
74
+ .build(),
75
+ valid_test("zero_mean_input")
76
+ .description("Input with zero mean")
77
+ .override_initializer("input_mean", np.zeros((c,), dtype=np.float32))
78
+ .tags("edge", "zero_mean")
79
+ .build(),
80
+ # Scalar / broadcast style tests
81
+ valid_test("per_channel_zero_variance")
82
+ .description(
83
+ "Edge case: very small variance values (clamped by epsilon), inference",
84
+ )
85
+ .override_initializer("input_var", np.full((c,), 1e-8, dtype=np.float32))
86
+ .override_attrs(epsilon=1e-5)
87
+ .tags("edge", "small_variance")
88
+ .build(),
89
+ # E2E tests that set explicit initializer values
90
+ e2e_test("e2e_inference")
91
+ .description("E2E inference test with explicit initializers")
92
+ .override_input_shapes(X=[1, c, 2, 2])
93
+ .override_output_shapes(batchnormalization_output=[1, c, 2, 2])
94
+ .override_initializer("scale", rng.normal(1.0, 0.1, c).astype(np.float32))
95
+ .override_initializer("B", rng.normal(0.0, 0.1, c).astype(np.float32))
96
+ .override_initializer(
97
+ "input_mean",
98
+ rng.normal(0.0, 0.1, c).astype(np.float32),
99
+ )
100
+ .override_initializer(
101
+ "input_var",
102
+ np.abs(rng.normal(0.5, 0.2, c)).astype(np.float32),
103
+ )
104
+ .tags("e2e", "inference")
105
+ .build(),
106
+ e2e_test("e2e_inference_small_2x2")
107
+ .description("E2E inference with small 2x2 spatial input")
108
+ .override_input_shapes(X=[1, 3, 2, 2])
109
+ .override_output_shapes(batchnormalization_output=[1, 3, 2, 2])
110
+ .override_initializer("scale", np.array([1.0, 0.9, 1.1], dtype=np.float32))
111
+ .override_initializer("B", np.array([0.0, 0.1, -0.1], dtype=np.float32))
112
+ .override_initializer(
113
+ "input_mean",
114
+ np.array([0.5, -0.5, 0.0], dtype=np.float32),
115
+ )
116
+ .override_initializer(
117
+ "input_var",
118
+ np.array([0.25, 0.5, 0.1], dtype=np.float32),
119
+ )
120
+ .tags("e2e", "small", "2x2")
121
+ .build(),
122
+ e2e_test("e2e_inference_wide_input")
123
+ .description("E2E inference with wider input shape (C=4, H=2, W=8)")
124
+ .override_input_shapes(X=[2, 4, 2, 8])
125
+ .override_output_shapes(batchnormalization_output=[2, 4, 2, 8])
126
+ .override_initializer(
127
+ "scale",
128
+ np.array([1.0, 0.8, 1.2, 0.9], dtype=np.float32),
129
+ )
130
+ .override_initializer(
131
+ "B",
132
+ np.array([0.0, 0.1, -0.1, 0.05], dtype=np.float32),
133
+ )
134
+ .override_initializer(
135
+ "input_mean",
136
+ np.array([0.0, 0.5, -0.5, 0.2], dtype=np.float32),
137
+ )
138
+ .override_initializer(
139
+ "input_var",
140
+ np.array([1.0, 0.5, 0.25, 0.1], dtype=np.float32),
141
+ )
142
+ .tags("e2e", "wide", "C4")
143
+ .build(),
144
+ e2e_test("e2e_inference_batch2_channels3")
145
+ .description("E2E inference with batch size 2 and 3 channels")
146
+ .override_input_shapes(X=[2, 3, 4, 4])
147
+ .override_output_shapes(batchnormalization_output=[2, 3, 4, 4])
148
+ .override_initializer("scale", np.array([0.5, 1.0, 1.5], dtype=np.float32))
149
+ .override_initializer("B", np.array([0.0, 0.0, 0.0], dtype=np.float32))
150
+ .override_initializer(
151
+ "input_mean",
152
+ np.array([-0.5, 0.0, 0.5], dtype=np.float32),
153
+ )
154
+ .override_initializer(
155
+ "input_var",
156
+ np.array([0.2, 0.5, 0.8], dtype=np.float32),
157
+ )
158
+ .tags("e2e", "batch2", "C3")
159
+ .build(),
160
+ e2e_test("e2e_inference_high_epsilon")
161
+ .description("E2E inference with high epsilon for numerical stability")
162
+ .override_input_shapes(X=[1, 2, 4, 4])
163
+ .override_output_shapes(batchnormalization_output=[1, 2, 4, 4])
164
+ .override_initializer("scale", np.array([1.0, 1.0], dtype=np.float32))
165
+ .override_initializer("B", np.array([0.1, -0.1], dtype=np.float32))
166
+ .override_initializer("input_mean", np.array([0.0, 0.5], dtype=np.float32))
167
+ .override_initializer(
168
+ "input_var",
169
+ np.array([0.0, 0.0], dtype=np.float32),
170
+ ) # tiny variance
171
+ .override_attrs(epsilon=1e-2)
172
+ .tags("e2e", "high_epsilon", "numerical_stability")
173
+ .build(),
174
+ e2e_test("e2e_inference_non_square")
175
+ .description("E2E inference with non-square spatial dimensions")
176
+ .override_input_shapes(X=[1, 3, 2, 5])
177
+ .override_output_shapes(batchnormalization_output=[1, 3, 2, 5])
178
+ .override_initializer("scale", np.array([1.0, 0.9, 1.1], dtype=np.float32))
179
+ .override_initializer("B", np.array([0.0, 0.1, -0.1], dtype=np.float32))
180
+ .override_initializer(
181
+ "input_mean",
182
+ np.array([0.1, -0.1, 0.0], dtype=np.float32),
183
+ )
184
+ .override_initializer(
185
+ "input_var",
186
+ np.array([0.5, 0.25, 0.75], dtype=np.float32),
187
+ )
188
+ .tags("e2e", "non_square", "C3")
189
+ .build(),
190
+ ]
@@ -0,0 +1,127 @@
1
+ from __future__ import annotations
2
+
3
+ import numpy as np
4
+
5
+ from python.tests.onnx_quantizer_tests import TEST_RNG_SEED
6
+ from python.tests.onnx_quantizer_tests.layers.base import (
7
+ e2e_test,
8
+ edge_case_test,
9
+ valid_test,
10
+ )
11
+ from python.tests.onnx_quantizer_tests.layers.factory import (
12
+ BaseLayerConfigProvider,
13
+ LayerTestConfig,
14
+ )
15
+
16
+
17
+ class ClipConfigProvider(BaseLayerConfigProvider):
18
+ """Test configuration provider for elementwise Clip."""
19
+
20
+ @property
21
+ def layer_name(self) -> str:
22
+ return "Clip"
23
+
24
+ def get_config(self) -> LayerTestConfig:
25
+ # Treat min / max as optional extra inputs, scalar-shaped by default.
26
+ # Scalars are encoded as shape [1] for the test harness; ONNX/ORT
27
+ # will still broadcast them over A.
28
+ return LayerTestConfig(
29
+ op_type="Clip",
30
+ valid_inputs=["A", "min", "max"],
31
+ valid_attributes={}, # no Clip-specific attrs
32
+ required_initializers={}, # by default, all three can be dynamic inputs
33
+ input_shapes={
34
+ "A": [1, 3, 4, 4],
35
+ "min": [1], # scalar-ish bound
36
+ "max": [1], # scalar-ish bound
37
+ },
38
+ output_shapes={
39
+ "clip_output": [1, 3, 4, 4],
40
+ },
41
+ )
42
+
43
+ def get_test_specs(self) -> list:
44
+ rng = np.random.default_rng(TEST_RNG_SEED)
45
+
46
+ return [
47
+ # --- VALID TESTS ---
48
+ # Basic Clip with scalar min/max as dynamic inputs.
49
+ valid_test("basic_scalar_bounds")
50
+ .description("Clip with A, min, max all as inputs; min/max are scalars.")
51
+ .override_input_shapes(A=[1, 3, 4, 4], min=[1], max=[1])
52
+ .override_output_shapes(clip_output=[1, 3, 4, 4])
53
+ .tags("basic", "elementwise", "clip")
54
+ .build(),
55
+ # This keeps the name used by the integration tests:
56
+ # Clip_broadcast_bounds
57
+ # Broadcasting here is just scalar → full tensor broadcast.
58
+ valid_test("broadcast_bounds")
59
+ .description(
60
+ "Clip with scalar bounds broadcast over all elements of A "
61
+ "(mirrors Max/Min broadcast tests but respects ORT's scalar bound "
62
+ "rules).",
63
+ )
64
+ .override_input_shapes(A=[1, 3, 2, 4], min=[1], max=[1])
65
+ .override_output_shapes(clip_output=[1, 3, 2, 4])
66
+ .tags("broadcast", "elementwise", "clip", "onnxruntime")
67
+ .build(),
68
+ # This keeps the name used by the integration tests:
69
+ # Clip_initializer_bounds
70
+ valid_test("initializer_bounds")
71
+ .description(
72
+ "Clip where min/max are scalar initializers instead of inputs.",
73
+ )
74
+ .override_input_shapes(A=[1, 3, 4, 4]) # only A is a true input
75
+ # Scalar numpy values → ONNX initializers with shape ()
76
+ .override_initializer(
77
+ "min",
78
+ np.array(rng.uniform(-1.0, 0.0), dtype=np.float64),
79
+ )
80
+ .override_initializer(
81
+ "max",
82
+ np.array(rng.uniform(0.0, 2.0), dtype=np.float64),
83
+ )
84
+ .override_output_shapes(clip_output=[1, 3, 4, 4])
85
+ .tags("initializer", "elementwise", "clip", "onnxruntime")
86
+ .build(),
87
+ # --- E2E TESTS ---
88
+ e2e_test("e2e_small").description(
89
+ "End-to-end Clip with small random tensor and scalar bounds.",
90
+ )
91
+ # All three are treated as runtime inputs here;
92
+ # min/max are scalar-shaped [1].
93
+ .override_input_shapes(
94
+ A=[1, 3, 4, 4],
95
+ min=[1],
96
+ max=[1],
97
+ )
98
+ .override_output_shapes(clip_output=[1, 3, 4, 4])
99
+ .tags("e2e", "clip")
100
+ .build(),
101
+ e2e_test("e2e_initializer_bounds").description(
102
+ "End-to-end Clip where min/max are scalar initializers "
103
+ "instead of inputs.",
104
+ )
105
+ # Only A is a true runtime input; min/max are scalar initializers.
106
+ .override_input_shapes(
107
+ A=[1, 3, 4, 4],
108
+ )
109
+ .override_initializer(
110
+ "min",
111
+ np.array(rng.uniform(-1.0, 0.0), dtype=np.float64),
112
+ )
113
+ .override_initializer(
114
+ "max",
115
+ np.array(rng.uniform(0.0, 2.0), dtype=np.float64),
116
+ )
117
+ .override_output_shapes(clip_output=[1, 3, 4, 4])
118
+ .tags("e2e", "initializer", "clip")
119
+ .build(),
120
+ # --- EDGE / STRESS ---
121
+ edge_case_test("empty_tensor")
122
+ .description("Clip with empty tensor input and scalar bounds.")
123
+ .override_input_shapes(A=[0], min=[1], max=[1])
124
+ .override_output_shapes(clip_output=[0])
125
+ .tags("edge", "empty", "clip")
126
+ .build(),
127
+ ]
@@ -0,0 +1,100 @@
1
+ from __future__ import annotations
2
+
3
+ import numpy as np
4
+
5
+ from python.tests.onnx_quantizer_tests import TEST_RNG_SEED
6
+ from python.tests.onnx_quantizer_tests.layers.base import (
7
+ e2e_test,
8
+ edge_case_test,
9
+ valid_test,
10
+ )
11
+ from python.tests.onnx_quantizer_tests.layers.factory import (
12
+ BaseLayerConfigProvider,
13
+ LayerTestConfig,
14
+ )
15
+
16
+
17
+ class MaxConfigProvider(BaseLayerConfigProvider):
18
+ """Test configuration provider for elementwise Max"""
19
+
20
+ @property
21
+ def layer_name(self) -> str:
22
+ return "Max"
23
+
24
+ def get_config(self) -> LayerTestConfig:
25
+ return LayerTestConfig(
26
+ op_type="Max",
27
+ valid_inputs=["A", "B"],
28
+ valid_attributes={}, # Max has no layer-specific attributes
29
+ required_initializers={}, # default: both A and B are dynamic inputs
30
+ input_shapes={
31
+ "A": [1, 3, 4, 4],
32
+ "B": [1, 3, 4, 4],
33
+ },
34
+ output_shapes={
35
+ "max_output": [1, 3, 4, 4],
36
+ },
37
+ )
38
+
39
+ def get_test_specs(self) -> list:
40
+ rng = np.random.default_rng(TEST_RNG_SEED)
41
+ return [
42
+ # --- VALID TESTS ---
43
+ valid_test("basic")
44
+ .description("Basic elementwise Max of two same-shaped tensors")
45
+ .override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 4, 4])
46
+ .tags("basic", "elementwise", "max")
47
+ .build(),
48
+ valid_test("broadcast_max")
49
+ .description("Max with Numpy-style broadcasting along spatial dimensions")
50
+ .override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 1, 1])
51
+ .tags("broadcast", "elementwise", "max", "onnx14")
52
+ .build(),
53
+ valid_test("initializer_max")
54
+ .description("Max where B is an initializer instead of an input")
55
+ .override_input_shapes(A=[1, 3, 4, 4])
56
+ .override_initializer(
57
+ "B",
58
+ rng.normal(0, 1, (1, 3, 4, 4)).astype(np.float32),
59
+ )
60
+ .tags("initializer", "elementwise", "max", "onnxruntime")
61
+ .build(),
62
+ # --- E2E TESTS ---
63
+ e2e_test("e2e_max")
64
+ .description("End-to-end Max test with random inputs")
65
+ .override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 4, 4])
66
+ .override_output_shapes(max_output=[1, 3, 4, 4])
67
+ .tags("e2e", "max", "2d")
68
+ .build(),
69
+ e2e_test("e2e_broadcast_max")
70
+ .description(
71
+ "End-to-end Max with Numpy-style broadcasting along spatial dimensions",
72
+ )
73
+ .override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 1, 1])
74
+ .override_output_shapes(max_output=[1, 3, 4, 4])
75
+ .tags("e2e", "broadcast", "elementwise", "max", "onnx14")
76
+ .build(),
77
+ e2e_test("e2e_initializer_max")
78
+ .description("End-to-end Max where B is an initializer")
79
+ .override_input_shapes(A=[1, 3, 4, 4])
80
+ .override_initializer(
81
+ "B",
82
+ rng.normal(0, 1, (1, 3, 4, 4)).astype(np.float32),
83
+ )
84
+ .override_output_shapes(max_output=[1, 3, 4, 4])
85
+ .tags("e2e", "initializer", "elementwise", "max", "onnxruntime")
86
+ .build(),
87
+ # --- EDGE / STRESS ---
88
+ edge_case_test("empty_tensor")
89
+ .description("Max with empty tensor input (zero elements)")
90
+ .override_input_shapes(A=[0], B=[0])
91
+ .override_output_shapes(max_output=[0])
92
+ .tags("edge", "empty", "max")
93
+ .build(),
94
+ valid_test("large_tensor")
95
+ .description("Large tensor max performance/stress test")
96
+ .override_input_shapes(A=[1, 64, 256, 256], B=[1, 64, 256, 256])
97
+ .tags("large", "performance", "max")
98
+ .skip("Performance test, skipped by default")
99
+ .build(),
100
+ ]
@@ -0,0 +1,94 @@
1
+ from __future__ import annotations
2
+
3
+ import numpy as np
4
+
5
+ from python.tests.onnx_quantizer_tests import TEST_RNG_SEED
6
+ from python.tests.onnx_quantizer_tests.layers.base import (
7
+ e2e_test,
8
+ edge_case_test,
9
+ valid_test,
10
+ )
11
+ from python.tests.onnx_quantizer_tests.layers.factory import (
12
+ BaseLayerConfigProvider,
13
+ LayerTestConfig,
14
+ )
15
+
16
+
17
+ class MinConfigProvider(BaseLayerConfigProvider):
18
+ """Test configuration provider for elementwise Min"""
19
+
20
+ @property
21
+ def layer_name(self) -> str:
22
+ return "Min"
23
+
24
+ def get_config(self) -> LayerTestConfig:
25
+ return LayerTestConfig(
26
+ op_type="Min",
27
+ valid_inputs=["A", "B"],
28
+ valid_attributes={}, # Min has no layer-specific attributes
29
+ required_initializers={}, # default: both A and B are dynamic inputs
30
+ input_shapes={
31
+ "A": [1, 3, 4, 4],
32
+ "B": [1, 3, 4, 4],
33
+ },
34
+ output_shapes={
35
+ "min_output": [1, 3, 4, 4],
36
+ },
37
+ )
38
+
39
+ def get_test_specs(self) -> list:
40
+ rng = np.random.default_rng(TEST_RNG_SEED)
41
+ return [
42
+ # --- VALID TESTS ---
43
+ valid_test("basic")
44
+ .description("Basic elementwise Min of two same-shaped tensors")
45
+ .override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 4, 4])
46
+ .tags("basic", "elementwise", "min")
47
+ .build(),
48
+ valid_test("broadcast_min")
49
+ .description("Min with Numpy-style broadcasting along spatial dimensions")
50
+ .override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 1, 1])
51
+ .tags("broadcast", "elementwise", "min", "onnx14")
52
+ .build(),
53
+ valid_test("initializer_min")
54
+ .description("Min where B is an initializer instead of an input")
55
+ .override_input_shapes(A=[1, 3, 4, 4])
56
+ .override_initializer("B", rng.normal(0, 1, (1, 3, 4, 4)))
57
+ .tags("initializer", "elementwise", "min", "onnxruntime")
58
+ .build(),
59
+ # --- E2E TESTS ---
60
+ e2e_test("e2e_min")
61
+ .description("End-to-end Min test with random inputs")
62
+ .override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 4, 4])
63
+ .override_output_shapes(min_output=[1, 3, 4, 4])
64
+ .tags("e2e", "min", "2d")
65
+ .build(),
66
+ e2e_test("e2e_broadcast_min")
67
+ .description(
68
+ "End-to-end Min with Numpy-style broadcasting along spatial dimensions",
69
+ )
70
+ .override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 1, 1])
71
+ .override_output_shapes(min_output=[1, 3, 4, 4])
72
+ .tags("e2e", "broadcast", "elementwise", "min", "onnx14")
73
+ .build(),
74
+ e2e_test("e2e_initializer_min")
75
+ .description("End-to-end Min where B is an initializer")
76
+ .override_input_shapes(A=[1, 3, 4, 4])
77
+ .override_initializer("B", rng.normal(0, 1, (1, 3, 4, 4)))
78
+ .override_output_shapes(min_output=[1, 3, 4, 4])
79
+ .tags("e2e", "initializer", "elementwise", "min", "onnxruntime")
80
+ .build(),
81
+ # --- EDGE / STRESS ---
82
+ edge_case_test("empty_tensor")
83
+ .description("Min with empty tensor input (zero elements)")
84
+ .override_input_shapes(A=[0], B=[0])
85
+ .override_output_shapes(min_output=[0])
86
+ .tags("edge", "empty", "min")
87
+ .build(),
88
+ valid_test("large_tensor")
89
+ .description("Large tensor min performance/stress test")
90
+ .override_input_shapes(A=[1, 64, 256, 256], B=[1, 64, 256, 256])
91
+ .tags("large", "performance", "min")
92
+ .skip("Performance test, skipped by default")
93
+ .build(),
94
+ ]
@@ -0,0 +1,102 @@
1
+ import numpy as np
2
+
3
+ from python.tests.onnx_quantizer_tests import TEST_RNG_SEED
4
+ from python.tests.onnx_quantizer_tests.layers.base import (
5
+ BaseLayerConfigProvider,
6
+ LayerTestConfig,
7
+ LayerTestSpec,
8
+ e2e_test,
9
+ edge_case_test,
10
+ valid_test,
11
+ )
12
+
13
+
14
+ class MulConfigProvider(BaseLayerConfigProvider):
15
+ """Test configuration provider for Mul layer"""
16
+
17
+ @property
18
+ def layer_name(self) -> str:
19
+ return "Mul"
20
+
21
+ def get_config(self) -> LayerTestConfig:
22
+ return LayerTestConfig(
23
+ op_type="Mul",
24
+ valid_inputs=["A", "B"],
25
+ valid_attributes={}, # Mul has no layer-specific attributes
26
+ required_initializers={},
27
+ input_shapes={
28
+ "A": [1, 3, 4, 4],
29
+ "B": [1, 3, 4, 4],
30
+ },
31
+ output_shapes={
32
+ "mul_output": [1, 3, 4, 4],
33
+ },
34
+ )
35
+
36
+ def get_test_specs(self) -> list[LayerTestSpec]:
37
+ rng = np.random.default_rng(TEST_RNG_SEED)
38
+ return [
39
+ # --- VALID TESTS ---
40
+ valid_test("basic")
41
+ .description("Basic elementwise Mul of two same-shaped tensors")
42
+ .override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 4, 4])
43
+ .tags("basic", "elementwise", "Mul")
44
+ .build(),
45
+ valid_test("broadcast_mul")
46
+ .description("mul with Numpy-style broadcasting along spatial dimensions")
47
+ .override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 1, 1])
48
+ .tags("broadcast", "elementwise", "mul", "onnx14")
49
+ .build(),
50
+ valid_test("initializer_mul")
51
+ .description(
52
+ "mul where second input (B) is a tensor initializer instead of input",
53
+ )
54
+ .override_input_shapes(A=[1, 3, 4, 4])
55
+ .override_initializer("B", rng.normal(0, 1, (1, 3, 4, 4)))
56
+ .tags("initializer", "elementwise", "mul", "onnxruntime")
57
+ .build(),
58
+ valid_test("scalar_mul")
59
+ .description("mul scalar (initializer) to tensor")
60
+ .override_input_shapes(A=[1, 3, 4, 4])
61
+ .override_initializer("B", np.array([2.0], dtype=np.float32))
62
+ .tags("scalar", "elementwise", "mul")
63
+ .build(),
64
+ # # --- E2E TESTS ---
65
+ e2e_test("e2e_mul")
66
+ .description("End-to-end mul test with random inputs")
67
+ .override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 4, 4])
68
+ .override_output_shapes(mul_output=[1, 3, 4, 4])
69
+ .tags("e2e", "mul", "2d")
70
+ .build(),
71
+ e2e_test("e2e_initializer_mul")
72
+ .description(
73
+ "mul where second input (B) is a tensor initializer instead of input",
74
+ )
75
+ .override_input_shapes(A=[1, 3, 4, 4])
76
+ .override_initializer("B", rng.normal(0, 1, (1, 3, 4, 4)))
77
+ .tags("initializer", "elementwise", "mul", "onnxruntime")
78
+ .build(),
79
+ e2e_test("e2e_broadcast_mul")
80
+ .description("mul with Numpy-style broadcasting along spatial dimensions")
81
+ .override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 1, 1])
82
+ .tags("broadcast", "elementwise", "mul", "onnx14")
83
+ .build(),
84
+ e2e_test("e2e_scalar_mul")
85
+ .description("mul scalar (initializer) to tensor")
86
+ .override_input_shapes(A=[1, 3, 4, 4])
87
+ .override_initializer("B", np.array([2.0], dtype=np.float32))
88
+ .tags("scalar", "elementwise", "mul")
89
+ .build(),
90
+ # # --- EDGE CASES ---
91
+ edge_case_test("empty_tensor")
92
+ .description("mul with empty tensor input (zero elements)")
93
+ .override_input_shapes(A=[0], B=[0])
94
+ .tags("edge", "empty", "mul")
95
+ .build(),
96
+ edge_case_test("large_tensor")
97
+ .description("Large tensor mul performance/stress test")
98
+ .override_input_shapes(A=[1, 64, 256, 256], B=[1, 64, 256, 256])
99
+ .tags("large", "performance", "mul")
100
+ .skip("Performance test, skipped by default")
101
+ .build(),
102
+ ]