JSTprove 1.2.0__py3-none-macosx_11_0_arm64.whl → 1.4.0__py3-none-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. {jstprove-1.2.0.dist-info → jstprove-1.4.0.dist-info}/METADATA +1 -1
  2. {jstprove-1.2.0.dist-info → jstprove-1.4.0.dist-info}/RECORD +32 -26
  3. python/core/binaries/onnx_generic_circuit_1-4-0 +0 -0
  4. python/core/circuits/base.py +29 -12
  5. python/core/circuits/errors.py +1 -2
  6. python/core/model_processing/converters/base.py +3 -3
  7. python/core/model_processing/onnx_custom_ops/__init__.py +5 -4
  8. python/core/model_processing/onnx_quantizer/exceptions.py +2 -2
  9. python/core/model_processing/onnx_quantizer/layers/base.py +79 -2
  10. python/core/model_processing/onnx_quantizer/layers/clip.py +92 -0
  11. python/core/model_processing/onnx_quantizer/layers/max.py +49 -0
  12. python/core/model_processing/onnx_quantizer/layers/maxpool.py +79 -4
  13. python/core/model_processing/onnx_quantizer/layers/min.py +54 -0
  14. python/core/model_processing/onnx_quantizer/onnx_op_quantizer.py +6 -0
  15. python/core/model_templates/circuit_template.py +48 -38
  16. python/core/utils/errors.py +1 -1
  17. python/core/utils/scratch_tests.py +29 -23
  18. python/tests/circuit_e2e_tests/circuit_model_developer_test.py +18 -14
  19. python/tests/circuit_e2e_tests/helper_fns_for_tests.py +11 -13
  20. python/tests/circuit_parent_classes/test_ort_custom_layers.py +35 -53
  21. python/tests/onnx_quantizer_tests/layers/base.py +1 -3
  22. python/tests/onnx_quantizer_tests/layers/clip_config.py +127 -0
  23. python/tests/onnx_quantizer_tests/layers/max_config.py +100 -0
  24. python/tests/onnx_quantizer_tests/layers/maxpool_config.py +106 -0
  25. python/tests/onnx_quantizer_tests/layers/min_config.py +94 -0
  26. python/tests/onnx_quantizer_tests/layers_tests/test_integration.py +6 -5
  27. python/tests/onnx_quantizer_tests/layers_tests/test_quantize.py +6 -1
  28. python/tests/onnx_quantizer_tests/test_registered_quantizers.py +17 -8
  29. python/core/binaries/onnx_generic_circuit_1-2-0 +0 -0
  30. {jstprove-1.2.0.dist-info → jstprove-1.4.0.dist-info}/WHEEL +0 -0
  31. {jstprove-1.2.0.dist-info → jstprove-1.4.0.dist-info}/entry_points.txt +0 -0
  32. {jstprove-1.2.0.dist-info → jstprove-1.4.0.dist-info}/licenses/LICENSE +0 -0
  33. {jstprove-1.2.0.dist-info → jstprove-1.4.0.dist-info}/top_level.txt +0 -0
@@ -1,115 +1,97 @@
1
- import pytest
1
+ from pathlib import Path
2
+
2
3
  import numpy as np
3
- import torch
4
4
  import onnx
5
-
6
- from onnx import TensorProto, shape_inference, helper, numpy_helper
5
+ import pytest
6
+ import torch
7
+ from onnx import TensorProto, helper, shape_inference
7
8
 
8
9
  from python.core.model_processing.converters.onnx_converter import ONNXConverter
9
- from python.core.model_processing.onnx_custom_ops.onnx_helpers import extract_shape_dict
10
- from python.core.model_processing.onnx_quantizer.onnx_op_quantizer import ONNXOpQuantizer
11
-
12
- from onnxruntime import InferenceSession, SessionOptions
13
- from onnxruntime_extensions import get_library_path, OrtPyFunction
14
- from python.core.model_processing.onnx_custom_ops import conv
15
-
16
- from python.core.model_processing.onnx_custom_ops.conv import int64_conv
17
- from python.core.model_processing.onnx_custom_ops.gemm import int64_gemm7
18
10
 
19
11
 
20
12
  @pytest.fixture
21
- def tiny_conv_model_path(tmp_path):
13
+ def tiny_conv_model_path(tmp_path: Path) -> Path:
22
14
  # Create input and output tensor info
23
- input_tensor = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 4, 4])
24
- output_tensor = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 1, 2, 2])
15
+ input_tensor = helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 1, 4, 4])
16
+ output_tensor = helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 1, 2, 2])
25
17
 
26
18
  # Kernel weights (3x3 ones)
27
- W_init = helper.make_tensor(
28
- name='W',
19
+ w_init = helper.make_tensor(
20
+ name="W",
29
21
  data_type=TensorProto.FLOAT,
30
22
  dims=[1, 1, 3, 3],
31
- vals=np.ones((1 * 1 * 3 * 3), dtype=np.float32).tolist()
23
+ vals=np.ones((1 * 1 * 3 * 3), dtype=np.float32).tolist(),
32
24
  )
33
- Z_init = helper.make_tensor(
34
- name='Z',
25
+ z_init = helper.make_tensor(
26
+ name="Z",
35
27
  data_type=TensorProto.FLOAT,
36
28
  dims=[1],
37
- vals=np.ones(( 1), dtype=np.float32).tolist()
29
+ vals=np.ones((1), dtype=np.float32).tolist(),
38
30
  )
39
31
 
40
32
  # Conv node with no padding, stride 1
41
33
  conv_node = helper.make_node(
42
- 'Conv',
43
- inputs=['X', 'W', 'Z'],
44
- outputs=['Y'],
34
+ "Conv",
35
+ inputs=["X", "W", "Z"],
36
+ outputs=["Y"],
45
37
  kernel_shape=[3, 3],
46
38
  pads=[0, 0, 0, 0],
47
39
  strides=[1, 1],
48
- dilations = [1,1]
40
+ dilations=[1, 1],
49
41
  )
50
42
 
51
43
  # Build graph and model
52
44
  graph = helper.make_graph(
53
45
  nodes=[conv_node],
54
- name='TinyConvGraph',
46
+ name="TinyConvGraph",
55
47
  inputs=[input_tensor],
56
48
  outputs=[output_tensor],
57
- initializer=[W_init, Z_init]
49
+ initializer=[w_init, z_init],
58
50
  )
59
51
 
60
- model = helper.make_model(graph, producer_name='tiny-conv-example')
52
+ model = helper.make_model(graph, producer_name="tiny-conv-example")
61
53
 
62
54
  # Save to a temporary file
63
55
  model_path = tmp_path / "tiny_conv.onnx"
64
56
  onnx.save(model, str(model_path))
65
57
 
66
- return str(model_path)
58
+ return model_path
59
+
67
60
 
68
61
  @pytest.mark.integration
69
- def test_tiny_conv(tiny_conv_model_path):
62
+ def test_tiny_conv(tiny_conv_model_path: Path, tmp_path: Path) -> None:
70
63
  path = tiny_conv_model_path
71
64
 
72
65
  converter = ONNXConverter()
73
66
 
74
- X_input = np.arange(16, dtype=np.float32).reshape(1, 1, 4, 4)
75
- id_count = 0
67
+ # Load and validate original model
76
68
  model = onnx.load(path)
77
- # Fix, can remove this next line
78
- onnx.checker.check_model(model)
79
-
80
- # Check the model and print Y"s shape information
81
69
  onnx.checker.check_model(model)
82
- print(f"Before shape inference, the shape info of Y is:\n{model.graph.value_info}")
83
70
 
84
- # Apply shape inference on the model
71
+ # Apply shape inference and validate
85
72
  inferred_model = shape_inference.infer_shapes(model)
86
-
87
- # Check the model and print Y"s shape information
88
73
  onnx.checker.check_model(inferred_model)
89
- # print(f"After shape inference, the shape info of Y is:\n{inferred_model.graph.value_info}")
90
-
91
-
92
- domain_to_version = {opset.domain: opset.version for opset in model.opset_import}
93
-
94
- inferred_model = shape_inference.infer_shapes(model)
95
- output_name_to_shape = extract_shape_dict(inferred_model)
96
- id_count = 0
97
74
 
75
+ # Quantize and add custom domain
98
76
  new_model = converter.quantize_model(model, 2, 21)
99
77
  custom_domain = onnx.helper.make_operatorsetid(domain="ai.onnx.contrib", version=1)
100
78
  new_model.opset_import.append(custom_domain)
101
79
  onnx.checker.check_model(new_model)
102
80
 
103
- with open("model.onnx", "wb") as f:
81
+ # Save quantized model
82
+ out_path = tmp_path / "model_quant.onnx"
83
+ with out_path.open("wb") as f:
104
84
  f.write(new_model.SerializeToString())
105
85
 
106
- model = onnx.load("model.onnx")
107
- onnx.checker.check_model(model) # This throws a descriptive error
86
+ # Reload quantized model to ensure it is valid
87
+ model_quant = onnx.load(str(out_path))
88
+ onnx.checker.check_model(model_quant)
108
89
 
90
+ # Prepare inputs and compare outputs
109
91
  inputs = np.arange(16, dtype=np.float32).reshape(1, 1, 4, 4)
110
92
  outputs_true = converter.run_model_onnx_runtime(path, inputs)
93
+ outputs_quant = converter.run_model_onnx_runtime(out_path, inputs)
111
94
 
112
- outputs_quant = converter.run_model_onnx_runtime("model.onnx", inputs)
113
95
  true = torch.tensor(np.array(outputs_true), dtype=torch.float32)
114
96
  quant = torch.tensor(np.array(outputs_quant), dtype=torch.float32) / (2**21)
115
97
 
@@ -113,9 +113,7 @@ class LayerTestConfig:
113
113
  # respect that; otherwise use original valid_inputs.
114
114
  inputs = test_spec.input_overrides or self.valid_inputs
115
115
 
116
- # Prepare attributes
117
- attrs = {**self.valid_attributes, **test_spec.attr_overrides}
118
- # Remove omitted attributes if specified
116
+ # Prepare attributes and remove omitted attributes if specified
119
117
  attrs = {**self.valid_attributes, **test_spec.attr_overrides}
120
118
  for key in getattr(test_spec, "omit_attrs", []):
121
119
  attrs.pop(key, None)
@@ -0,0 +1,127 @@
1
+ from __future__ import annotations
2
+
3
+ import numpy as np
4
+
5
+ from python.tests.onnx_quantizer_tests import TEST_RNG_SEED
6
+ from python.tests.onnx_quantizer_tests.layers.base import (
7
+ e2e_test,
8
+ edge_case_test,
9
+ valid_test,
10
+ )
11
+ from python.tests.onnx_quantizer_tests.layers.factory import (
12
+ BaseLayerConfigProvider,
13
+ LayerTestConfig,
14
+ )
15
+
16
+
17
+ class ClipConfigProvider(BaseLayerConfigProvider):
18
+ """Test configuration provider for elementwise Clip."""
19
+
20
+ @property
21
+ def layer_name(self) -> str:
22
+ return "Clip"
23
+
24
+ def get_config(self) -> LayerTestConfig:
25
+ # Treat min / max as optional extra inputs, scalar-shaped by default.
26
+ # Scalars are encoded as shape [1] for the test harness; ONNX/ORT
27
+ # will still broadcast them over A.
28
+ return LayerTestConfig(
29
+ op_type="Clip",
30
+ valid_inputs=["A", "min", "max"],
31
+ valid_attributes={}, # no Clip-specific attrs
32
+ required_initializers={}, # by default, all three can be dynamic inputs
33
+ input_shapes={
34
+ "A": [1, 3, 4, 4],
35
+ "min": [1], # scalar-ish bound
36
+ "max": [1], # scalar-ish bound
37
+ },
38
+ output_shapes={
39
+ "clip_output": [1, 3, 4, 4],
40
+ },
41
+ )
42
+
43
+ def get_test_specs(self) -> list:
44
+ rng = np.random.default_rng(TEST_RNG_SEED)
45
+
46
+ return [
47
+ # --- VALID TESTS ---
48
+ # Basic Clip with scalar min/max as dynamic inputs.
49
+ valid_test("basic_scalar_bounds")
50
+ .description("Clip with A, min, max all as inputs; min/max are scalars.")
51
+ .override_input_shapes(A=[1, 3, 4, 4], min=[1], max=[1])
52
+ .override_output_shapes(clip_output=[1, 3, 4, 4])
53
+ .tags("basic", "elementwise", "clip")
54
+ .build(),
55
+ # This keeps the name used by the integration tests:
56
+ # Clip_broadcast_bounds
57
+ # Broadcasting here is just scalar → full tensor broadcast.
58
+ valid_test("broadcast_bounds")
59
+ .description(
60
+ "Clip with scalar bounds broadcast over all elements of A "
61
+ "(mirrors Max/Min broadcast tests but respects ORT's scalar bound "
62
+ "rules).",
63
+ )
64
+ .override_input_shapes(A=[1, 3, 2, 4], min=[1], max=[1])
65
+ .override_output_shapes(clip_output=[1, 3, 2, 4])
66
+ .tags("broadcast", "elementwise", "clip", "onnxruntime")
67
+ .build(),
68
+ # This keeps the name used by the integration tests:
69
+ # Clip_initializer_bounds
70
+ valid_test("initializer_bounds")
71
+ .description(
72
+ "Clip where min/max are scalar initializers instead of inputs.",
73
+ )
74
+ .override_input_shapes(A=[1, 3, 4, 4]) # only A is a true input
75
+ # Scalar numpy values → ONNX initializers with shape ()
76
+ .override_initializer(
77
+ "min",
78
+ np.array(rng.uniform(-1.0, 0.0), dtype=np.float64),
79
+ )
80
+ .override_initializer(
81
+ "max",
82
+ np.array(rng.uniform(0.0, 2.0), dtype=np.float64),
83
+ )
84
+ .override_output_shapes(clip_output=[1, 3, 4, 4])
85
+ .tags("initializer", "elementwise", "clip", "onnxruntime")
86
+ .build(),
87
+ # --- E2E TESTS ---
88
+ e2e_test("e2e_small").description(
89
+ "End-to-end Clip with small random tensor and scalar bounds.",
90
+ )
91
+ # All three are treated as runtime inputs here;
92
+ # min/max are scalar-shaped [1].
93
+ .override_input_shapes(
94
+ A=[1, 3, 4, 4],
95
+ min=[1],
96
+ max=[1],
97
+ )
98
+ .override_output_shapes(clip_output=[1, 3, 4, 4])
99
+ .tags("e2e", "clip")
100
+ .build(),
101
+ e2e_test("e2e_initializer_bounds").description(
102
+ "End-to-end Clip where min/max are scalar initializers "
103
+ "instead of inputs.",
104
+ )
105
+ # Only A is a true runtime input; min/max are scalar initializers.
106
+ .override_input_shapes(
107
+ A=[1, 3, 4, 4],
108
+ )
109
+ .override_initializer(
110
+ "min",
111
+ np.array(rng.uniform(-1.0, 0.0), dtype=np.float64),
112
+ )
113
+ .override_initializer(
114
+ "max",
115
+ np.array(rng.uniform(0.0, 2.0), dtype=np.float64),
116
+ )
117
+ .override_output_shapes(clip_output=[1, 3, 4, 4])
118
+ .tags("e2e", "initializer", "clip")
119
+ .build(),
120
+ # --- EDGE / STRESS ---
121
+ edge_case_test("empty_tensor")
122
+ .description("Clip with empty tensor input and scalar bounds.")
123
+ .override_input_shapes(A=[0], min=[1], max=[1])
124
+ .override_output_shapes(clip_output=[0])
125
+ .tags("edge", "empty", "clip")
126
+ .build(),
127
+ ]
@@ -0,0 +1,100 @@
1
+ from __future__ import annotations
2
+
3
+ import numpy as np
4
+
5
+ from python.tests.onnx_quantizer_tests import TEST_RNG_SEED
6
+ from python.tests.onnx_quantizer_tests.layers.base import (
7
+ e2e_test,
8
+ edge_case_test,
9
+ valid_test,
10
+ )
11
+ from python.tests.onnx_quantizer_tests.layers.factory import (
12
+ BaseLayerConfigProvider,
13
+ LayerTestConfig,
14
+ )
15
+
16
+
17
+ class MaxConfigProvider(BaseLayerConfigProvider):
18
+ """Test configuration provider for elementwise Max"""
19
+
20
+ @property
21
+ def layer_name(self) -> str:
22
+ return "Max"
23
+
24
+ def get_config(self) -> LayerTestConfig:
25
+ return LayerTestConfig(
26
+ op_type="Max",
27
+ valid_inputs=["A", "B"],
28
+ valid_attributes={}, # Max has no layer-specific attributes
29
+ required_initializers={}, # default: both A and B are dynamic inputs
30
+ input_shapes={
31
+ "A": [1, 3, 4, 4],
32
+ "B": [1, 3, 4, 4],
33
+ },
34
+ output_shapes={
35
+ "max_output": [1, 3, 4, 4],
36
+ },
37
+ )
38
+
39
+ def get_test_specs(self) -> list:
40
+ rng = np.random.default_rng(TEST_RNG_SEED)
41
+ return [
42
+ # --- VALID TESTS ---
43
+ valid_test("basic")
44
+ .description("Basic elementwise Max of two same-shaped tensors")
45
+ .override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 4, 4])
46
+ .tags("basic", "elementwise", "max")
47
+ .build(),
48
+ valid_test("broadcast_max")
49
+ .description("Max with Numpy-style broadcasting along spatial dimensions")
50
+ .override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 1, 1])
51
+ .tags("broadcast", "elementwise", "max", "onnx14")
52
+ .build(),
53
+ valid_test("initializer_max")
54
+ .description("Max where B is an initializer instead of an input")
55
+ .override_input_shapes(A=[1, 3, 4, 4])
56
+ .override_initializer(
57
+ "B",
58
+ rng.normal(0, 1, (1, 3, 4, 4)).astype(np.float32),
59
+ )
60
+ .tags("initializer", "elementwise", "max", "onnxruntime")
61
+ .build(),
62
+ # --- E2E TESTS ---
63
+ e2e_test("e2e_max")
64
+ .description("End-to-end Max test with random inputs")
65
+ .override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 4, 4])
66
+ .override_output_shapes(max_output=[1, 3, 4, 4])
67
+ .tags("e2e", "max", "2d")
68
+ .build(),
69
+ e2e_test("e2e_broadcast_max")
70
+ .description(
71
+ "End-to-end Max with Numpy-style broadcasting along spatial dimensions",
72
+ )
73
+ .override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 1, 1])
74
+ .override_output_shapes(max_output=[1, 3, 4, 4])
75
+ .tags("e2e", "broadcast", "elementwise", "max", "onnx14")
76
+ .build(),
77
+ e2e_test("e2e_initializer_max")
78
+ .description("End-to-end Max where B is an initializer")
79
+ .override_input_shapes(A=[1, 3, 4, 4])
80
+ .override_initializer(
81
+ "B",
82
+ rng.normal(0, 1, (1, 3, 4, 4)).astype(np.float32),
83
+ )
84
+ .override_output_shapes(max_output=[1, 3, 4, 4])
85
+ .tags("e2e", "initializer", "elementwise", "max", "onnxruntime")
86
+ .build(),
87
+ # --- EDGE / STRESS ---
88
+ edge_case_test("empty_tensor")
89
+ .description("Max with empty tensor input (zero elements)")
90
+ .override_input_shapes(A=[0], B=[0])
91
+ .override_output_shapes(max_output=[0])
92
+ .tags("edge", "empty", "max")
93
+ .build(),
94
+ valid_test("large_tensor")
95
+ .description("Large tensor max performance/stress test")
96
+ .override_input_shapes(A=[1, 64, 256, 256], B=[1, 64, 256, 256])
97
+ .tags("large", "performance", "max")
98
+ .skip("Performance test, skipped by default")
99
+ .build(),
100
+ ]
@@ -28,6 +28,8 @@ class MaxPoolConfigProvider(BaseLayerConfigProvider):
28
28
  "pads": [0, 0, 0, 0],
29
29
  },
30
30
  required_initializers={},
31
+ input_shapes={"input": [1, 3, 4, 4]},
32
+ output_shapes={"maxpool_output": [1, 3, 2, 2]},
31
33
  )
32
34
 
33
35
  def get_test_specs(self) -> list:
@@ -52,12 +54,97 @@ class MaxPoolConfigProvider(BaseLayerConfigProvider):
52
54
  .override_attrs(strides=[1, 1])
53
55
  .tags("stride_1", "pool", "overlap")
54
56
  .build(),
57
+ valid_test("missing_dilations_attr")
58
+ .description("MaxPool without dilations attribute should default to [1, 1]")
59
+ .override_attrs(dilations=None)
60
+ .tags("default_attr", "dilations", "pool")
61
+ .build(),
62
+ valid_test("non_default_dilations")
63
+ .description("MaxPool with explicit non-default dilations")
64
+ .override_attrs(dilations=[2, 2])
65
+ .tags("dilations", "non_default", "pool")
66
+ .override_output_shapes(maxpool_output=[1, 3, 1, 1])
67
+ .build(),
68
+ valid_test("missing_optional_attrs")
69
+ .description(
70
+ "MaxPool without pads/strides/dilations should use default values",
71
+ )
72
+ .override_attrs(
73
+ pads=None,
74
+ strides=None,
75
+ dilations=None,
76
+ )
77
+ .tags("defaults", "optional_attrs", "pool")
78
+ .build(),
79
+ valid_test("non_default_pads")
80
+ .description("MaxPool with explicit non-default pads")
81
+ .override_attrs(pads=[1, 1, 1, 1])
82
+ .override_output_shapes(maxpool_output=[1, 3, 3, 3])
83
+ .tags("pads", "non_default", "pool")
84
+ .build(),
85
+ valid_test("non_default_strides")
86
+ .description("MaxPool with explicit non-default strides [3, 3]")
87
+ .override_attrs(strides=[3, 3])
88
+ .override_output_shapes(maxpool_output=[1, 3, 1, 1])
89
+ .tags("strides", "non_default", "pool")
90
+ .build(),
91
+ valid_test("rectangular_strides")
92
+ .description("MaxPool with non-square strides [2, 1]")
93
+ .override_attrs(strides=[2, 1])
94
+ .override_output_shapes(maxpool_output=[1, 3, 2, 3])
95
+ .tags("strides", "non_square", "pool")
96
+ .build(),
97
+ # --- E2E TESTS ---
55
98
  e2e_test("e2e_basic")
56
99
  .description("End-to-end test for 2D MaxPool")
57
100
  .override_input_shapes(input=[1, 3, 4, 4])
58
101
  .override_output_shapes(maxpool_output=[1, 3, 2, 2])
59
102
  .tags("e2e", "pool", "2d")
60
103
  .build(),
104
+ e2e_test("missing_dilations_attr")
105
+ .description("MaxPool without dilations attribute should default to [1, 1]")
106
+ .override_attrs(dilations=None)
107
+ .tags("default_attr", "dilations", "pool")
108
+ .build(),
109
+ e2e_test("non_default_dilations")
110
+ .description("MaxPool with explicit non-default dilations")
111
+ .override_attrs(dilations=[2, 2])
112
+ .override_output_shapes(maxpool_output=[1, 3, 1, 1])
113
+ .tags("dilations", "non_default", "pool")
114
+ .build(),
115
+ e2e_test("e2e_defaults_applied")
116
+ .description("E2E MaxPool with default pads/strides/dilations applied")
117
+ .override_attrs(
118
+ pads=None,
119
+ strides=None,
120
+ dilations=None,
121
+ )
122
+ .override_input_shapes(input=[1, 1, 4, 4])
123
+ .override_output_shapes(maxpool_output=[1, 1, 3, 3])
124
+ .tags("e2e", "defaults", "pool")
125
+ .build(),
126
+ e2e_test("non_default_pads")
127
+ .description("MaxPool with explicit non-default pads")
128
+ .override_attrs(pads=[1, 1, 1, 1])
129
+ .override_output_shapes(maxpool_output=[1, 3, 3, 3])
130
+ .tags("pads", "non_default", "pool")
131
+ .build(),
132
+ e2e_test("non_default_strides")
133
+ .description("E2E MaxPool with explicit non-default strides")
134
+ .override_attrs(strides=[3, 3])
135
+ .override_input_shapes(input=[1, 3, 4, 4])
136
+ .override_output_shapes(maxpool_output=[1, 3, 1, 1])
137
+ .tags("e2e", "strides", "non_default", "pool")
138
+ .build(),
139
+ e2e_test("missing_strides_attr_defaults_applied")
140
+ .description(
141
+ "E2E MaxPool without strides attribute should default to [1, 1]",
142
+ )
143
+ .override_attrs(strides=None)
144
+ .override_input_shapes(input=[1, 1, 4, 4])
145
+ .override_output_shapes(maxpool_output=[1, 1, 3, 3])
146
+ .tags("e2e", "defaults", "strides", "pool")
147
+ .build(),
61
148
  # # --- ERROR TESTS ---
62
149
  error_test("asymmetric_padding")
63
150
  .description("MaxPool with asymmetric padding")
@@ -71,6 +158,25 @@ class MaxPoolConfigProvider(BaseLayerConfigProvider):
71
158
  .expects_error(InvalidParamError, "Currently only MaxPool2D is supported")
72
159
  .tags("invalid_attr_length", "kernel_shape")
73
160
  .build(),
161
+ error_test("auto_pad_not_supported")
162
+ .description("MaxPool with auto_pad should be rejected")
163
+ .override_attrs(auto_pad="SAME_UPPER")
164
+ .expects_error(InvalidParamError, "auto_pad must be NOTSET")
165
+ .tags("invalid", "auto_pad", "pool")
166
+ .build(),
167
+ error_test("ceil_mode_not_supported")
168
+ .description("MaxPool with ceil_mode != 0 should be rejected")
169
+ .override_attrs(ceil_mode=1)
170
+ .expects_error(InvalidParamError, "ceil_mode must be 0")
171
+ .tags("invalid", "ceil_mode", "pool")
172
+ .build(),
173
+ error_test("storage_order_not_supported")
174
+ .description("MaxPool with storage_order != 0 should be rejected")
175
+ .override_attrs(storage_order=1)
176
+ .expects_error(InvalidParamError, "storage_order must be 0")
177
+ .tags("invalid", "storage_order", "pool")
178
+ .build(),
179
+ # This can be removed if we start to support explicit None strides
74
180
  # --- EDGE CASE / SKIPPED TEST ---
75
181
  valid_test("large_input")
76
182
  .description("Large MaxPool input (performance/stress test)")
@@ -0,0 +1,94 @@
1
+ from __future__ import annotations
2
+
3
+ import numpy as np
4
+
5
+ from python.tests.onnx_quantizer_tests import TEST_RNG_SEED
6
+ from python.tests.onnx_quantizer_tests.layers.base import (
7
+ e2e_test,
8
+ edge_case_test,
9
+ valid_test,
10
+ )
11
+ from python.tests.onnx_quantizer_tests.layers.factory import (
12
+ BaseLayerConfigProvider,
13
+ LayerTestConfig,
14
+ )
15
+
16
+
17
+ class MinConfigProvider(BaseLayerConfigProvider):
18
+ """Test configuration provider for elementwise Min"""
19
+
20
+ @property
21
+ def layer_name(self) -> str:
22
+ return "Min"
23
+
24
+ def get_config(self) -> LayerTestConfig:
25
+ return LayerTestConfig(
26
+ op_type="Min",
27
+ valid_inputs=["A", "B"],
28
+ valid_attributes={}, # Min has no layer-specific attributes
29
+ required_initializers={}, # default: both A and B are dynamic inputs
30
+ input_shapes={
31
+ "A": [1, 3, 4, 4],
32
+ "B": [1, 3, 4, 4],
33
+ },
34
+ output_shapes={
35
+ "min_output": [1, 3, 4, 4],
36
+ },
37
+ )
38
+
39
+ def get_test_specs(self) -> list:
40
+ rng = np.random.default_rng(TEST_RNG_SEED)
41
+ return [
42
+ # --- VALID TESTS ---
43
+ valid_test("basic")
44
+ .description("Basic elementwise Min of two same-shaped tensors")
45
+ .override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 4, 4])
46
+ .tags("basic", "elementwise", "min")
47
+ .build(),
48
+ valid_test("broadcast_min")
49
+ .description("Min with Numpy-style broadcasting along spatial dimensions")
50
+ .override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 1, 1])
51
+ .tags("broadcast", "elementwise", "min", "onnx14")
52
+ .build(),
53
+ valid_test("initializer_min")
54
+ .description("Min where B is an initializer instead of an input")
55
+ .override_input_shapes(A=[1, 3, 4, 4])
56
+ .override_initializer("B", rng.normal(0, 1, (1, 3, 4, 4)))
57
+ .tags("initializer", "elementwise", "min", "onnxruntime")
58
+ .build(),
59
+ # --- E2E TESTS ---
60
+ e2e_test("e2e_min")
61
+ .description("End-to-end Min test with random inputs")
62
+ .override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 4, 4])
63
+ .override_output_shapes(min_output=[1, 3, 4, 4])
64
+ .tags("e2e", "min", "2d")
65
+ .build(),
66
+ e2e_test("e2e_broadcast_min")
67
+ .description(
68
+ "End-to-end Min with Numpy-style broadcasting along spatial dimensions",
69
+ )
70
+ .override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 1, 1])
71
+ .override_output_shapes(min_output=[1, 3, 4, 4])
72
+ .tags("e2e", "broadcast", "elementwise", "min", "onnx14")
73
+ .build(),
74
+ e2e_test("e2e_initializer_min")
75
+ .description("End-to-end Min where B is an initializer")
76
+ .override_input_shapes(A=[1, 3, 4, 4])
77
+ .override_initializer("B", rng.normal(0, 1, (1, 3, 4, 4)))
78
+ .override_output_shapes(min_output=[1, 3, 4, 4])
79
+ .tags("e2e", "initializer", "elementwise", "min", "onnxruntime")
80
+ .build(),
81
+ # --- EDGE / STRESS ---
82
+ edge_case_test("empty_tensor")
83
+ .description("Min with empty tensor input (zero elements)")
84
+ .override_input_shapes(A=[0], B=[0])
85
+ .override_output_shapes(min_output=[0])
86
+ .tags("edge", "empty", "min")
87
+ .build(),
88
+ valid_test("large_tensor")
89
+ .description("Large tensor min performance/stress test")
90
+ .override_input_shapes(A=[1, 64, 256, 256], B=[1, 64, 256, 256])
91
+ .tags("large", "performance", "min")
92
+ .skip("Performance test, skipped by default")
93
+ .build(),
94
+ ]
@@ -159,21 +159,22 @@ class TestIntegration(BaseQuantizerTest):
159
159
  quantized_input_names = [inp.name for inp in quantized_session.get_inputs()]
160
160
  quantized_output_name = quantized_session.get_outputs()[0].name
161
161
 
162
- # For quantized model, scale the inputs
163
- scaled_inputs = {}
162
+ # For the quantized model, cast inputs to float64 for ORT
163
+ quantized_inputs = {}
164
164
  for name in quantized_input_names:
165
165
  if name in dummy_inputs:
166
- scaled_inputs[name] = (dummy_inputs[name]).astype(np.float64)
166
+ quantized_inputs[name] = dummy_inputs[name].astype(np.float64)
167
167
  else:
168
- # If quantized model has different inputs, skip or handle
168
+ # If quantized model has different inputs, skip this case
169
169
  pytest.skip(
170
170
  f"Quantized model input mismatch for {layer_name}.{test_spec.name}",
171
171
  )
172
172
 
173
173
  quantized_output = quantized_session.run(
174
174
  [quantized_output_name],
175
- scaled_inputs,
175
+ quantized_inputs,
176
176
  )[0]
177
+
177
178
  quantized_output = quantized_output / (scale_base ** (scale_exponent))
178
179
 
179
180
  ratio = np.mean(quantized_output / (original_output + 1e-12))
@@ -37,7 +37,10 @@ class TestQuantize(BaseQuantizerTest):
37
37
  scale_base: int = 10,
38
38
  *,
39
39
  rescale: bool = True,
40
- ) -> tuple[onnx.NodeProto, tuple[str, LayerTestConfig, LayerTestSpec, NodeProto]]:
40
+ ) -> tuple[
41
+ list[onnx.NodeProto],
42
+ tuple[str, LayerTestConfig, LayerTestSpec, NodeProto],
43
+ ]:
41
44
  """Common setup for quantization tests"""
42
45
  layer_name, config, test_spec = test_case_data
43
46
 
@@ -65,6 +68,8 @@ class TestQuantize(BaseQuantizerTest):
65
68
  initializer_map=initializer_map,
66
69
  )
67
70
 
71
+ if not isinstance(result, list):
72
+ result = [result]
68
73
  return result, (layer_name, config, test_spec, node)
69
74
 
70
75
  @pytest.mark.unit