JSTprove 1.1.0__py3-none-macosx_11_0_arm64.whl → 1.3.0__py3-none-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of JSTprove might be problematic. Click here for more details.

Files changed (41) hide show
  1. {jstprove-1.1.0.dist-info → jstprove-1.3.0.dist-info}/METADATA +3 -3
  2. {jstprove-1.1.0.dist-info → jstprove-1.3.0.dist-info}/RECORD +40 -26
  3. python/core/binaries/onnx_generic_circuit_1-3-0 +0 -0
  4. python/core/circuits/base.py +29 -12
  5. python/core/circuits/errors.py +1 -2
  6. python/core/model_processing/converters/base.py +3 -3
  7. python/core/model_processing/converters/onnx_converter.py +28 -27
  8. python/core/model_processing/onnx_custom_ops/__init__.py +5 -4
  9. python/core/model_processing/onnx_custom_ops/batchnorm.py +64 -0
  10. python/core/model_processing/onnx_custom_ops/mul.py +66 -0
  11. python/core/model_processing/onnx_quantizer/exceptions.py +2 -2
  12. python/core/model_processing/onnx_quantizer/layers/base.py +101 -0
  13. python/core/model_processing/onnx_quantizer/layers/batchnorm.py +224 -0
  14. python/core/model_processing/onnx_quantizer/layers/clip.py +92 -0
  15. python/core/model_processing/onnx_quantizer/layers/max.py +49 -0
  16. python/core/model_processing/onnx_quantizer/layers/min.py +54 -0
  17. python/core/model_processing/onnx_quantizer/layers/mul.py +53 -0
  18. python/core/model_processing/onnx_quantizer/layers/sub.py +54 -0
  19. python/core/model_processing/onnx_quantizer/onnx_op_quantizer.py +43 -0
  20. python/core/model_templates/circuit_template.py +48 -38
  21. python/core/utils/errors.py +1 -1
  22. python/core/utils/scratch_tests.py +29 -23
  23. python/scripts/gen_and_bench.py +2 -2
  24. python/tests/circuit_e2e_tests/circuit_model_developer_test.py +18 -14
  25. python/tests/circuit_e2e_tests/helper_fns_for_tests.py +11 -13
  26. python/tests/circuit_parent_classes/test_ort_custom_layers.py +35 -53
  27. python/tests/onnx_quantizer_tests/layers/base.py +1 -3
  28. python/tests/onnx_quantizer_tests/layers/batchnorm_config.py +190 -0
  29. python/tests/onnx_quantizer_tests/layers/clip_config.py +127 -0
  30. python/tests/onnx_quantizer_tests/layers/max_config.py +100 -0
  31. python/tests/onnx_quantizer_tests/layers/min_config.py +94 -0
  32. python/tests/onnx_quantizer_tests/layers/mul_config.py +102 -0
  33. python/tests/onnx_quantizer_tests/layers/sub_config.py +102 -0
  34. python/tests/onnx_quantizer_tests/layers_tests/test_integration.py +6 -5
  35. python/tests/onnx_quantizer_tests/layers_tests/test_quantize.py +8 -1
  36. python/tests/onnx_quantizer_tests/test_registered_quantizers.py +17 -8
  37. python/core/binaries/onnx_generic_circuit_1-1-0 +0 -0
  38. {jstprove-1.1.0.dist-info → jstprove-1.3.0.dist-info}/WHEEL +0 -0
  39. {jstprove-1.1.0.dist-info → jstprove-1.3.0.dist-info}/entry_points.txt +0 -0
  40. {jstprove-1.1.0.dist-info → jstprove-1.3.0.dist-info}/licenses/LICENSE +0 -0
  41. {jstprove-1.1.0.dist-info → jstprove-1.3.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,102 @@
1
+ import numpy as np
2
+
3
+ from python.tests.onnx_quantizer_tests import TEST_RNG_SEED
4
+ from python.tests.onnx_quantizer_tests.layers.base import (
5
+ BaseLayerConfigProvider,
6
+ LayerTestConfig,
7
+ LayerTestSpec,
8
+ e2e_test,
9
+ edge_case_test,
10
+ valid_test,
11
+ )
12
+
13
+
14
+ class SubConfigProvider(BaseLayerConfigProvider):
15
+ """Test configuration provider for Sub layer"""
16
+
17
+ @property
18
+ def layer_name(self) -> str:
19
+ return "Sub"
20
+
21
+ def get_config(self) -> LayerTestConfig:
22
+ return LayerTestConfig(
23
+ op_type="Sub",
24
+ valid_inputs=["A", "B"],
25
+ valid_attributes={}, # Sub has no layer-specific attributes
26
+ required_initializers={},
27
+ input_shapes={
28
+ "A": [1, 3, 4, 4],
29
+ "B": [1, 3, 4, 4],
30
+ },
31
+ output_shapes={
32
+ "sub_output": [1, 3, 4, 4],
33
+ },
34
+ )
35
+
36
+ def get_test_specs(self) -> list[LayerTestSpec]:
37
+ rng = np.random.default_rng(TEST_RNG_SEED)
38
+ return [
39
+ # --- VALID TESTS ---
40
+ valid_test("basic")
41
+ .description("Basic elementwise Sub of two same-shaped tensors")
42
+ .override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 4, 4])
43
+ .tags("basic", "elementwise", "Sub")
44
+ .build(),
45
+ valid_test("broadcast_Sub")
46
+ .description("Sub with Numpy-style broadcasting along spatial dimensions")
47
+ .override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 1, 1])
48
+ .tags("broadcast", "elementwise", "Sub", "onnx14")
49
+ .build(),
50
+ valid_test("initializer_Sub")
51
+ .description(
52
+ "Sub where second input (B) is a tensor initializer instead of input",
53
+ )
54
+ .override_input_shapes(A=[1, 3, 4, 4])
55
+ .override_initializer("B", rng.normal(0, 1, (1, 3, 4, 4)))
56
+ .tags("initializer", "elementwise", "Sub", "onnxruntime")
57
+ .build(),
58
+ valid_test("scalar_Sub")
59
+ .description("Sub scalar (initializer) to tensor")
60
+ .override_input_shapes(A=[1, 3, 4, 4])
61
+ .override_initializer("B", np.array([2.0], dtype=np.float32))
62
+ .tags("scalar", "elementwise", "Sub")
63
+ .build(),
64
+ # --- E2E TESTS ---
65
+ e2e_test("e2e_Sub")
66
+ .description("End-to-end Sub test with random inputs")
67
+ .override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 4, 4])
68
+ .override_output_shapes(sub_output=[1, 3, 4, 4])
69
+ .tags("e2e", "Sub", "2d")
70
+ .build(),
71
+ e2e_test("e2e_initializer_Sub")
72
+ .description(
73
+ "Sub where second input (B) is a tensor initializer instead of input",
74
+ )
75
+ .override_input_shapes(A=[1, 3, 4, 4])
76
+ .override_initializer("B", rng.normal(0, 1, (1, 3, 4, 4)))
77
+ .tags("initializer", "elementwise", "Sub", "onnxruntime")
78
+ .build(),
79
+ e2e_test("e2e_broadcast_Sub")
80
+ .description("Sub with Numpy-style broadcasting along spatial dimensions")
81
+ .override_input_shapes(A=[1, 3, 4, 4], B=[1, 3, 1, 1])
82
+ .tags("broadcast", "elementwise", "Sub", "onnx14")
83
+ .build(),
84
+ e2e_test("e2e_scalar_Sub")
85
+ .description("Sub scalar (initializer) to tensor")
86
+ .override_input_shapes(A=[1, 3, 4, 4])
87
+ .override_initializer("B", np.array([2.0], dtype=np.float32))
88
+ .tags("scalar", "elementwise", "Sub")
89
+ .build(),
90
+ # # --- EDGE CASES ---
91
+ edge_case_test("empty_tensor")
92
+ .description("Sub with empty tensor input (zero elements)")
93
+ .override_input_shapes(A=[0], B=[0])
94
+ .tags("edge", "empty", "Sub")
95
+ .build(),
96
+ edge_case_test("large_tensor")
97
+ .description("Large tensor Sub performance/stress test")
98
+ .override_input_shapes(A=[1, 64, 256, 256], B=[1, 64, 256, 256])
99
+ .tags("large", "performance", "Sub")
100
+ .skip("Performance test, skipped by default")
101
+ .build(),
102
+ ]
@@ -159,21 +159,22 @@ class TestIntegration(BaseQuantizerTest):
159
159
  quantized_input_names = [inp.name for inp in quantized_session.get_inputs()]
160
160
  quantized_output_name = quantized_session.get_outputs()[0].name
161
161
 
162
- # For quantized model, scale the inputs
163
- scaled_inputs = {}
162
+ # For the quantized model, cast inputs to float64 for ORT
163
+ quantized_inputs = {}
164
164
  for name in quantized_input_names:
165
165
  if name in dummy_inputs:
166
- scaled_inputs[name] = (dummy_inputs[name]).astype(np.float64)
166
+ quantized_inputs[name] = dummy_inputs[name].astype(np.float64)
167
167
  else:
168
- # If quantized model has different inputs, skip or handle
168
+ # If quantized model has different inputs, skip this case
169
169
  pytest.skip(
170
170
  f"Quantized model input mismatch for {layer_name}.{test_spec.name}",
171
171
  )
172
172
 
173
173
  quantized_output = quantized_session.run(
174
174
  [quantized_output_name],
175
- scaled_inputs,
175
+ quantized_inputs,
176
176
  )[0]
177
+
177
178
  quantized_output = quantized_output / (scale_base ** (scale_exponent))
178
179
 
179
180
  ratio = np.mean(quantized_output / (original_output + 1e-12))
@@ -37,7 +37,10 @@ class TestQuantize(BaseQuantizerTest):
37
37
  scale_base: int = 10,
38
38
  *,
39
39
  rescale: bool = True,
40
- ) -> tuple[onnx.NodeProto, tuple[str, LayerTestConfig, LayerTestSpec, NodeProto]]:
40
+ ) -> tuple[
41
+ list[onnx.NodeProto],
42
+ tuple[str, LayerTestConfig, LayerTestSpec, NodeProto],
43
+ ]:
41
44
  """Common setup for quantization tests"""
42
45
  layer_name, config, test_spec = test_case_data
43
46
 
@@ -65,6 +68,8 @@ class TestQuantize(BaseQuantizerTest):
65
68
  initializer_map=initializer_map,
66
69
  )
67
70
 
71
+ if not isinstance(result, list):
72
+ result = [result]
68
73
  return result, (layer_name, config, test_spec, node)
69
74
 
70
75
  @pytest.mark.unit
@@ -139,6 +144,8 @@ class TestQuantize(BaseQuantizerTest):
139
144
  node: NodeProto,
140
145
  result_node: NodeProto,
141
146
  ) -> bool:
147
+ if node.op_type == "BatchNormalization":
148
+ pytest.skip(f"{node.op_type} alters the node structure by design")
142
149
  if node.op_type in result_node.op_type:
143
150
  # Assert there are no less attributes in the new node
144
151
  assert len(node.attribute) <= len(result_node.attribute)
@@ -49,11 +49,10 @@ def validate_quantized_node(node_result: onnx.NodeProto, op_type: str) -> None:
49
49
  assert node_result.output, f"Missing outputs for {op_type}"
50
50
 
51
51
  try:
52
- # Create a minimal model with custom opset for validation
52
+ # Create a minimal graph with dummy IOs to satisfy ONNX requirements
53
53
  temp_graph = onnx.GraphProto()
54
54
  temp_graph.name = "temp_graph"
55
55
 
56
- # Add dummy inputs/outputs to satisfy graph requirements
57
56
  for inp in node_result.input:
58
57
  if not any(vi.name == inp for vi in temp_graph.input):
59
58
  temp_graph.input.append(
@@ -63,6 +62,7 @@ def validate_quantized_node(node_result: onnx.NodeProto, op_type: str) -> None:
63
62
  [1],
64
63
  ),
65
64
  )
65
+
66
66
  for out in node_result.output:
67
67
  if not any(vi.name == out for vi in temp_graph.output):
68
68
  temp_graph.output.append(
@@ -74,12 +74,16 @@ def validate_quantized_node(node_result: onnx.NodeProto, op_type: str) -> None:
74
74
  )
75
75
 
76
76
  temp_graph.node.append(node_result)
77
- temp_model = onnx.helper.make_model(temp_graph)
78
- custom_domain = onnx.helper.make_operatorsetid(
79
- domain="ai.onnx.contrib",
80
- version=1,
77
+
78
+ # Explicit opset imports for default and contrib domains
79
+ temp_model = onnx.helper.make_model(
80
+ temp_graph,
81
+ opset_imports=[
82
+ onnx.helper.make_opsetid("", 22),
83
+ onnx.helper.make_opsetid("ai.onnx.contrib", 1),
84
+ ],
81
85
  )
82
- temp_model.opset_import.append(custom_domain)
86
+
83
87
  onnx.checker.check_model(temp_model)
84
88
  except onnx.checker.ValidationError as e:
85
89
  pytest.fail(f"ONNX node validation failed for {op_type}: {e}")
@@ -117,5 +121,10 @@ def test_registered_quantizer_quantize(
117
121
  for node_result in result:
118
122
  validate_quantized_node(node_result, op_type)
119
123
  else:
120
- assert result.input, f"Missing inputs for {op_type}"
124
+ if inputs:
125
+ # Only assert if this op actually requires inputs
126
+ assert (
127
+ result.input
128
+ ), f"Missing inputs for {op_type}; required_inputs={inputs}"
129
+
121
130
  validate_quantized_node(result, op_type)