JSTprove 1.2.0__py3-none-macosx_11_0_arm64.whl → 1.3.0__py3-none-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of JSTprove might be problematic. Click here for more details.
- {jstprove-1.2.0.dist-info → jstprove-1.3.0.dist-info}/METADATA +1 -1
- {jstprove-1.2.0.dist-info → jstprove-1.3.0.dist-info}/RECORD +30 -24
- python/core/binaries/onnx_generic_circuit_1-3-0 +0 -0
- python/core/circuits/base.py +29 -12
- python/core/circuits/errors.py +1 -2
- python/core/model_processing/converters/base.py +3 -3
- python/core/model_processing/onnx_custom_ops/__init__.py +5 -4
- python/core/model_processing/onnx_quantizer/exceptions.py +2 -2
- python/core/model_processing/onnx_quantizer/layers/base.py +34 -0
- python/core/model_processing/onnx_quantizer/layers/clip.py +92 -0
- python/core/model_processing/onnx_quantizer/layers/max.py +49 -0
- python/core/model_processing/onnx_quantizer/layers/min.py +54 -0
- python/core/model_processing/onnx_quantizer/onnx_op_quantizer.py +6 -0
- python/core/model_templates/circuit_template.py +48 -38
- python/core/utils/errors.py +1 -1
- python/core/utils/scratch_tests.py +29 -23
- python/tests/circuit_e2e_tests/circuit_model_developer_test.py +18 -14
- python/tests/circuit_e2e_tests/helper_fns_for_tests.py +11 -13
- python/tests/circuit_parent_classes/test_ort_custom_layers.py +35 -53
- python/tests/onnx_quantizer_tests/layers/base.py +1 -3
- python/tests/onnx_quantizer_tests/layers/clip_config.py +127 -0
- python/tests/onnx_quantizer_tests/layers/max_config.py +100 -0
- python/tests/onnx_quantizer_tests/layers/min_config.py +94 -0
- python/tests/onnx_quantizer_tests/layers_tests/test_integration.py +6 -5
- python/tests/onnx_quantizer_tests/layers_tests/test_quantize.py +6 -1
- python/tests/onnx_quantizer_tests/test_registered_quantizers.py +17 -8
- python/core/binaries/onnx_generic_circuit_1-2-0 +0 -0
- {jstprove-1.2.0.dist-info → jstprove-1.3.0.dist-info}/WHEEL +0 -0
- {jstprove-1.2.0.dist-info → jstprove-1.3.0.dist-info}/entry_points.txt +0 -0
- {jstprove-1.2.0.dist-info → jstprove-1.3.0.dist-info}/licenses/LICENSE +0 -0
- {jstprove-1.2.0.dist-info → jstprove-1.3.0.dist-info}/top_level.txt +0 -0
|
@@ -1,22 +1,22 @@
|
|
|
1
|
-
jstprove-1.
|
|
1
|
+
jstprove-1.3.0.dist-info/licenses/LICENSE,sha256=UXQRcYRUH-PfN27n3P-FMaZFY6jr9jFPKcwT7CWbljw,1160
|
|
2
2
|
python/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
3
3
|
python/core/__init__.py,sha256=RlfbqGAaUulKl44QGMCkkGJBQZ8R_AgC5bU5zS7BjnA,97
|
|
4
4
|
python/core/binaries/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
5
5
|
python/core/binaries/expander-exec,sha256=C_1JcezdfLp9sFOQ2z3wp2gcq1k8zjIR09CxJKGGIuM,7095168
|
|
6
|
-
python/core/binaries/onnx_generic_circuit_1-
|
|
6
|
+
python/core/binaries/onnx_generic_circuit_1-3-0,sha256=qbHC9SV_NuNv-vZs5MwXV0NxXkqTTZlmLH59WYCVrC8,3221088
|
|
7
7
|
python/core/circuit_models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
8
8
|
python/core/circuit_models/generic_onnx.py,sha256=P65UZkfVBTE6YhaQ951S6QoTHPuU5ntDt8QL5pXghvw,8787
|
|
9
9
|
python/core/circuit_models/simple_circuit.py,sha256=igQrZtQyreyHc26iAgCyDb0TuD2bJAoumYhc1pYPDzQ,4682
|
|
10
10
|
python/core/circuits/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
11
|
-
python/core/circuits/base.py,sha256=
|
|
12
|
-
python/core/circuits/errors.py,sha256=
|
|
11
|
+
python/core/circuits/base.py,sha256=_XSs2LFyBMZEkBSvRp53zc-XaGOopNxYg2xHQz9sqt0,41991
|
|
12
|
+
python/core/circuits/errors.py,sha256=JDNa23wMwNQDTFY0IpDpHDMZ9gOdjDdQmB4GBhL_DCg,5913
|
|
13
13
|
python/core/circuits/zk_model_base.py,sha256=5ggOaJjs2_MJvn-PO1cPN3i7U-XR4L-0zJGYuLVKOLc,820
|
|
14
14
|
python/core/model_processing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
15
15
|
python/core/model_processing/errors.py,sha256=uh2YFjuuU5JM3anMtSTLAH-zjlNAKStmLDZqRUgBWS8,4611
|
|
16
16
|
python/core/model_processing/converters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
17
|
-
python/core/model_processing/converters/base.py,sha256=
|
|
17
|
+
python/core/model_processing/converters/base.py,sha256=o6bNwmqD9sOM9taqMb0ed6804RugQiU3va0rY_EA5SE,4265
|
|
18
18
|
python/core/model_processing/converters/onnx_converter.py,sha256=-eXdF6tfluFRxGgnQtJQ8R2309aYX-8z8HzMxk_Qv8I,44340
|
|
19
|
-
python/core/model_processing/onnx_custom_ops/__init__.py,sha256=
|
|
19
|
+
python/core/model_processing/onnx_custom_ops/__init__.py,sha256=ZKUC4ToRxgEEMHcTyERATVEN0KSDs-9cM1T-tTw3I1g,525
|
|
20
20
|
python/core/model_processing/onnx_custom_ops/batchnorm.py,sha256=8kg4iGGdt6B_fIJkpt4v5eNFpoHa4bjTB0NnCSmKFvE,1693
|
|
21
21
|
python/core/model_processing/onnx_custom_ops/conv.py,sha256=6jJm3fcGWzcU4RjVgf179mPFCqsl4C3AR7bqQTffDgA,3464
|
|
22
22
|
python/core/model_processing/onnx_custom_ops/custom_helpers.py,sha256=2WdnHw9NAoN_6wjIBoAQDyL6wEIlZOqo6ysCZp5DpZs,1844
|
|
@@ -26,29 +26,32 @@ python/core/model_processing/onnx_custom_ops/mul.py,sha256=w6X1sl1HnzoUJx2Mm_Lao
|
|
|
26
26
|
python/core/model_processing/onnx_custom_ops/onnx_helpers.py,sha256=utnJuc5sgb_z1LgxuY9y2cQbMpdEJ8xOOrcP8DhfDCM,5686
|
|
27
27
|
python/core/model_processing/onnx_custom_ops/relu.py,sha256=pZsPXC_r0FPggURKDphh8P1IRXY0w4hH7ExBmYTlWjE,1202
|
|
28
28
|
python/core/model_processing/onnx_quantizer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
29
|
-
python/core/model_processing/onnx_quantizer/exceptions.py,sha256=
|
|
30
|
-
python/core/model_processing/onnx_quantizer/onnx_op_quantizer.py,sha256=
|
|
29
|
+
python/core/model_processing/onnx_quantizer/exceptions.py,sha256=vzxBRbpvk4ZZbgacDISnqmQQKj7Ls46V08ilHnhaJy0,5645
|
|
30
|
+
python/core/model_processing/onnx_quantizer/onnx_op_quantizer.py,sha256=5I67frJn4j2T1LTvODHixQK4VaqazJFJ0T1BCvqLPgg,9655
|
|
31
31
|
python/core/model_processing/onnx_quantizer/layers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
32
32
|
python/core/model_processing/onnx_quantizer/layers/add.py,sha256=AGxzqMa0jABIEKOIgPqEAA7EpZtynQtnD9nxI2NHc0s,1409
|
|
33
|
-
python/core/model_processing/onnx_quantizer/layers/base.py,sha256=
|
|
33
|
+
python/core/model_processing/onnx_quantizer/layers/base.py,sha256=zUUAZpXCtbxbhHzuYczTNZPe-xWr6TxpmoAIDe4kCo4,21176
|
|
34
34
|
python/core/model_processing/onnx_quantizer/layers/batchnorm.py,sha256=KSBDPHd52f5Qyf-cnIDFPmfzssaJgMPiTmpIWEdM41U,7718
|
|
35
|
+
python/core/model_processing/onnx_quantizer/layers/clip.py,sha256=HrhiLtqC3cIAvU0wRCqp8_8ZSFH8a3F1Jf_qkXlY44s,3043
|
|
35
36
|
python/core/model_processing/onnx_quantizer/layers/constant.py,sha256=l1IvgvXkmFMiaBsym8wchPF-y1ZH-c5PmFUy92IXWok,3694
|
|
36
37
|
python/core/model_processing/onnx_quantizer/layers/conv.py,sha256=TlUpCRO6PPqH7MPkIrEiEcVfzuiN1WMYEiNIjhYXtWM,4451
|
|
37
38
|
python/core/model_processing/onnx_quantizer/layers/gemm.py,sha256=7fCUMv8OLVZ45a2lYjA2XNvcW3By7lSbX7zeForNK-0,3950
|
|
39
|
+
python/core/model_processing/onnx_quantizer/layers/max.py,sha256=3gUxrdXwcVAtgR-_j4xQ0085Wj0oEBLT897TImxF2d4,1343
|
|
38
40
|
python/core/model_processing/onnx_quantizer/layers/maxpool.py,sha256=PJ8hZPPBpfWV_RZdySl50-BU8TATjcg8Tg_mrAVS1Ic,4916
|
|
41
|
+
python/core/model_processing/onnx_quantizer/layers/min.py,sha256=cQbXzGOApR6HUJZMARXy87W8IbUC562jnAQm8J8ynQI,1709
|
|
39
42
|
python/core/model_processing/onnx_quantizer/layers/mul.py,sha256=qHsmnYPH-c5uiFeDCvV6e1xSgmIXJ64Sjvh0LYDYEqQ,1396
|
|
40
43
|
python/core/model_processing/onnx_quantizer/layers/relu.py,sha256=d-5fyeKNLTgKKnqCwURpxkjl7QdbJQpuovtCFBM03FA,1685
|
|
41
44
|
python/core/model_processing/onnx_quantizer/layers/sub.py,sha256=M7D98TZBNP9-2R9MX6mcpYlrWFxTiX9JCs3XNcg1U-Q,1409
|
|
42
45
|
python/core/model_templates/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
43
|
-
python/core/model_templates/circuit_template.py,sha256=
|
|
46
|
+
python/core/model_templates/circuit_template.py,sha256=OAqMRshi9OiJYoqpjkg5tUfNf18MfZmhsxxD6SANm_4,2106
|
|
44
47
|
python/core/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
45
48
|
python/core/utils/benchmarking_helpers.py,sha256=0nT38SCrjP_BlvJODsc9twF9ZmIFg_1sAvSyeNfv4mQ,5235
|
|
46
49
|
python/core/utils/constants.py,sha256=Qu5_6OUe1XIsL-IY5_4923eN7x1-SPv6ohQonztAobA,102
|
|
47
|
-
python/core/utils/errors.py,sha256=
|
|
50
|
+
python/core/utils/errors.py,sha256=Uf57cRKpot_u5Yr8HRmjLmInkdd_x0x5YpTGBncZgl4,3722
|
|
48
51
|
python/core/utils/general_layer_functions.py,sha256=tg2WWhmR-4TlKn8OeCu1qNbLf8qdKVP3jl9mhZn_sTg,9781
|
|
49
52
|
python/core/utils/helper_functions.py,sha256=3JwJa4wHoUBteukDw4bAetqMsQLeJ0_sJ0qIdKy7GCY,37097
|
|
50
53
|
python/core/utils/model_registry.py,sha256=aZg_9LEqsBXK84oxQ8A3NGZl-9aGnLgfR-kgxkOwV50,4895
|
|
51
|
-
python/core/utils/scratch_tests.py,sha256=
|
|
54
|
+
python/core/utils/scratch_tests.py,sha256=o2VDTk8QBKA3UHHE-h7Ghtoge6kGG7G-8qwvesuTFFc,2281
|
|
52
55
|
python/core/utils/witness_utils.py,sha256=ukvbF6EaHMPzRQVZad9wQ9gISRwBGQ1hEAHzc5TpGuw,9488
|
|
53
56
|
python/frontend/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
54
57
|
python/frontend/cli.py,sha256=lkvhzQC6bv0AgWUypg_cH-JT574r89qgTIsgHDT9GRg,3106
|
|
@@ -71,29 +74,32 @@ python/scripts/gen_and_bench.py,sha256=V36x7djYmHlveAJgYzMlXwnmF0gAGO3-1mg9PWOmp
|
|
|
71
74
|
python/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
72
75
|
python/tests/test_cli.py,sha256=OiAyG3aBpukk0i5FFWbiKaF42wf-7By-UWDHNjwtsqo,27042
|
|
73
76
|
python/tests/circuit_e2e_tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
74
|
-
python/tests/circuit_e2e_tests/circuit_model_developer_test.py,sha256=
|
|
75
|
-
python/tests/circuit_e2e_tests/helper_fns_for_tests.py,sha256=
|
|
77
|
+
python/tests/circuit_e2e_tests/circuit_model_developer_test.py,sha256=8hl8SKw7obXplo0jsiKoKIZLxlu1_HhXvGDeSBDBars,39456
|
|
78
|
+
python/tests/circuit_e2e_tests/helper_fns_for_tests.py,sha256=uEThqTsRdNJivHwAv-aJIUtSPlmVHdhMZqZSH1OqhDE,5177
|
|
76
79
|
python/tests/circuit_e2e_tests/other_e2e_test.py,sha256=amWRa1tIBHdQpd9-XS7vBXG0tkdV_9K9fH-FT5LFh7E,11301
|
|
77
80
|
python/tests/circuit_parent_classes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
78
81
|
python/tests/circuit_parent_classes/test_circuit.py,sha256=5vgcZHD2wY_pIRFNAhEZuBJD4uw2QyTck75Z9CJaACE,45968
|
|
79
82
|
python/tests/circuit_parent_classes/test_onnx_converter.py,sha256=sJ0o8sducNUtmYKmsqfx7WEsIEd6oNbnWk71rXS_nIU,6575
|
|
80
|
-
python/tests/circuit_parent_classes/test_ort_custom_layers.py,sha256=
|
|
83
|
+
python/tests/circuit_parent_classes/test_ort_custom_layers.py,sha256=PBKjt6Mu3lRco4ijD2BLwAHPWRFic-OUwWPVsvBoEpU,3042
|
|
81
84
|
python/tests/onnx_quantizer_tests/__init__.py,sha256=IZPGWHgjoay3gM1p2WJNh5cnZ79EP2VP-bcKy8AfJjY,18
|
|
82
85
|
python/tests/onnx_quantizer_tests/test_base_layer.py,sha256=Ro7k-eUbGCyfIZ-OVNjLlCIz3mb02uHFWboFuWOdXKs,6526
|
|
83
86
|
python/tests/onnx_quantizer_tests/test_exceptions.py,sha256=pwhARalEXx7REkcnIVZPi-4J1wgzgZN4xG-wLsx4rTs,3473
|
|
84
87
|
python/tests/onnx_quantizer_tests/test_onnx_op_quantizer.py,sha256=m6mNe1KDRFIE2P0YURTIAim9-Di0BoPPAaaOOlorDIk,7367
|
|
85
|
-
python/tests/onnx_quantizer_tests/test_registered_quantizers.py,sha256=
|
|
88
|
+
python/tests/onnx_quantizer_tests/test_registered_quantizers.py,sha256=lw_jYSbQ9ZM9P-jSt_LFSuve9vQ22cLtSui0W3zGqpo,4209
|
|
86
89
|
python/tests/onnx_quantizer_tests/testing_helper_functions.py,sha256=N0fQv2pYzUCVZ7wkcR8gEKs5zTXT1hWrK-HKSTQYvYU,534
|
|
87
90
|
python/tests/onnx_quantizer_tests/layers/__init__.py,sha256=xP-RmW6LfIANgK1s9Q0KZet2yvNr-3c6YIVLAAQqGUY,404
|
|
88
91
|
python/tests/onnx_quantizer_tests/layers/add_config.py,sha256=T3tGddupDtrvLck2SL2yETDblNtv0aU7Tl7fNyZUhO4,4133
|
|
89
|
-
python/tests/onnx_quantizer_tests/layers/base.py,sha256=
|
|
92
|
+
python/tests/onnx_quantizer_tests/layers/base.py,sha256=3nqmU2PgOdK_mPkz-YHg3idgr-PXYbu5kCIY-Uic5yo,9317
|
|
90
93
|
python/tests/onnx_quantizer_tests/layers/batchnorm_config.py,sha256=P-sZuHAdEfNczcgTeLjqJnEbpqN3dKTsbqvY4-SBqiQ,8231
|
|
94
|
+
python/tests/onnx_quantizer_tests/layers/clip_config.py,sha256=-OuhnUgz6xY4iW1jUR7W-J__Ie9lXI9vplmzp8qXqRc,4973
|
|
91
95
|
python/tests/onnx_quantizer_tests/layers/constant_config.py,sha256=RdrKNMNZjI3Sk5o8WLNqmBUyYVJRWgtFbQ6oFWMwyQk,1193
|
|
92
96
|
python/tests/onnx_quantizer_tests/layers/conv_config.py,sha256=H0ioW4H3ei5IK4tKhrA0ffThxJ4K5oO9jIs9A0T0VaM,6005
|
|
93
97
|
python/tests/onnx_quantizer_tests/layers/factory.py,sha256=WLLEP9ECmSpTliSjhtdWOHcX1xOi6HM10S9Y4re1A74,4844
|
|
94
98
|
python/tests/onnx_quantizer_tests/layers/flatten_config.py,sha256=Xln5Hh6gyeM5gGRCjLGvIL-u08NEs1tXSF32urCqPfE,2110
|
|
95
99
|
python/tests/onnx_quantizer_tests/layers/gemm_config.py,sha256=t7nJY-Wnj6YUD821-jaWzgrQVPa6ytwER3hFMsvyY6Y,7294
|
|
100
|
+
python/tests/onnx_quantizer_tests/layers/max_config.py,sha256=vzR8-2wbPGcH0GMmAJ_sXSEdMtZOjVNGufU__N3Jfyw,3906
|
|
96
101
|
python/tests/onnx_quantizer_tests/layers/maxpool_config.py,sha256=XfTPk_ZQXEzaCjHHymSLVv2HS-PKH1rS9IuyyoEtM78,3176
|
|
102
|
+
python/tests/onnx_quantizer_tests/layers/min_config.py,sha256=izKtCaMXoQHiAfmcGlJRQdKMQz3Su8n0p2mEn0y56Do,3774
|
|
97
103
|
python/tests/onnx_quantizer_tests/layers/mul_config.py,sha256=_Oy4b97ORxFlF3w0BmJ94hNA968HQx2AvwYiASrGPxw,4135
|
|
98
104
|
python/tests/onnx_quantizer_tests/layers/relu_config.py,sha256=_aHuddDApLUBOa0FiR9h4fNfmMSnH5r4JzOMLW0KaTk,2197
|
|
99
105
|
python/tests/onnx_quantizer_tests/layers/reshape_config.py,sha256=fZchSqIAy76m7j97wVC_UI6slSpv8nbwukhkbGR2sRE,2203
|
|
@@ -103,14 +109,14 @@ python/tests/onnx_quantizer_tests/layers_tests/base_test.py,sha256=UgbcT97tgcuTt
|
|
|
103
109
|
python/tests/onnx_quantizer_tests/layers_tests/test_check_model.py,sha256=Vxn4LEWHZeGa_vS1-7ptFqSSBb0D-3BG-ETocP4pvsI,3651
|
|
104
110
|
python/tests/onnx_quantizer_tests/layers_tests/test_e2e.py,sha256=40779aaHgdryVwLlIO18F1d7uSLSXdJUG5Uj_5-xD4U,6712
|
|
105
111
|
python/tests/onnx_quantizer_tests/layers_tests/test_error_cases.py,sha256=t5c_zqO4Ex3HIFWcykX4PTftdKN7UWnEOF5blShL0Ik,1881
|
|
106
|
-
python/tests/onnx_quantizer_tests/layers_tests/test_integration.py,sha256=
|
|
107
|
-
python/tests/onnx_quantizer_tests/layers_tests/test_quantize.py,sha256=
|
|
112
|
+
python/tests/onnx_quantizer_tests/layers_tests/test_integration.py,sha256=xNt2STeXB33NcpteDThwGTSW1Hm15POf8a4aPBSVrvI,7254
|
|
113
|
+
python/tests/onnx_quantizer_tests/layers_tests/test_quantize.py,sha256=DatmgvibQazP100B4NHDu7u-O2-f90juPKvPOXuPnXo,9491
|
|
108
114
|
python/tests/onnx_quantizer_tests/layers_tests/test_scalability.py,sha256=RfnIIiYbgPbU3620H6MPvSxE3MNR2G1yPELwdWV3mK4,4107
|
|
109
115
|
python/tests/onnx_quantizer_tests/layers_tests/test_validation.py,sha256=jz-WtIEP-jjUklOOAnznwPUXbf07U2PAMGrhzMWP0JU,1371
|
|
110
116
|
python/tests/utils_testing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
111
117
|
python/tests/utils_testing/test_helper_functions.py,sha256=xmeGQieh4LE9U-CDKBlHhSWqH0cAmmDU3qXNbDkkvms,27192
|
|
112
|
-
jstprove-1.
|
|
113
|
-
jstprove-1.
|
|
114
|
-
jstprove-1.
|
|
115
|
-
jstprove-1.
|
|
116
|
-
jstprove-1.
|
|
118
|
+
jstprove-1.3.0.dist-info/METADATA,sha256=CqGuzrQWy_MUYtOcy-0i8mB_2eAKUvQ1R3tjEX-3N4o,14100
|
|
119
|
+
jstprove-1.3.0.dist-info/WHEEL,sha256=jc2C2uw104ioj1TL9cE0YO67_kdAwX4W8JgYPomxr5M,105
|
|
120
|
+
jstprove-1.3.0.dist-info/entry_points.txt,sha256=nGcTSO-4q08gPl1IoWdrPaiY7IbO7XvmXKkd34dYHc8,49
|
|
121
|
+
jstprove-1.3.0.dist-info/top_level.txt,sha256=J-z0poNcsv31IHB413--iOY8LoHBKiTHeybHX3abokI,7
|
|
122
|
+
jstprove-1.3.0.dist-info/RECORD,,
|
|
Binary file
|
python/core/circuits/base.py
CHANGED
|
@@ -4,13 +4,12 @@ import logging
|
|
|
4
4
|
from pathlib import Path
|
|
5
5
|
from typing import TYPE_CHECKING, Any
|
|
6
6
|
|
|
7
|
-
|
|
7
|
+
import numpy as np
|
|
8
8
|
|
|
9
9
|
from python.core.utils.errors import ShapeMismatchError
|
|
10
10
|
from python.core.utils.witness_utils import compare_witness_to_io, load_witness
|
|
11
11
|
|
|
12
12
|
if TYPE_CHECKING:
|
|
13
|
-
import numpy as np
|
|
14
13
|
import torch
|
|
15
14
|
|
|
16
15
|
from python.core.circuits.errors import (
|
|
@@ -775,18 +774,18 @@ class Circuit:
|
|
|
775
774
|
def reshape_inputs_for_inference(
|
|
776
775
|
self: Circuit,
|
|
777
776
|
inputs: dict[str],
|
|
778
|
-
) -> ndarray | dict[str, ndarray]:
|
|
777
|
+
) -> np.ndarray | dict[str, np.ndarray]:
|
|
779
778
|
"""
|
|
780
779
|
Reshape input tensors to match the model's expected input shape.
|
|
781
780
|
|
|
782
781
|
Parameters
|
|
783
782
|
----------
|
|
784
|
-
inputs : dict[str] or ndarray
|
|
783
|
+
inputs : dict[str] or np.ndarray
|
|
785
784
|
Input tensors or a dictionary of tensors.
|
|
786
785
|
|
|
787
786
|
Returns
|
|
788
787
|
-------
|
|
789
|
-
ndarray or dict[str, ndarray]
|
|
788
|
+
np.ndarray or dict[str, np.ndarray]
|
|
790
789
|
Reshaped input(s) ready for inference.
|
|
791
790
|
"""
|
|
792
791
|
|
|
@@ -801,15 +800,33 @@ class Circuit:
|
|
|
801
800
|
if isinstance(inputs, dict):
|
|
802
801
|
if len(inputs) == 1:
|
|
803
802
|
only_key = next(iter(inputs))
|
|
804
|
-
|
|
803
|
+
value = np.asarray(inputs[only_key])
|
|
804
|
+
|
|
805
|
+
# If shape is a dict, extract the shape for this key
|
|
806
|
+
if isinstance(shape, dict):
|
|
807
|
+
key_shape = shape.get(only_key, None)
|
|
808
|
+
if key_shape is None:
|
|
809
|
+
raise CircuitConfigurationError(
|
|
810
|
+
missing_attributes=[f"input_shape[{only_key!r}]"],
|
|
811
|
+
)
|
|
812
|
+
shape = key_shape
|
|
813
|
+
|
|
814
|
+
# From here on, treat it as a regular reshape
|
|
815
|
+
inputs = value
|
|
805
816
|
else:
|
|
806
817
|
return self._reshape_dict_inputs(inputs, shape)
|
|
807
818
|
|
|
808
819
|
# --- Regular reshape ---
|
|
820
|
+
if not isinstance(shape, (list, tuple)):
|
|
821
|
+
msg = (
|
|
822
|
+
f"Expected list or tuple shape for reshape, got {type(shape).__name__}"
|
|
823
|
+
)
|
|
824
|
+
raise CircuitInputError(msg)
|
|
825
|
+
|
|
809
826
|
try:
|
|
810
|
-
return asarray(inputs).reshape(shape)
|
|
827
|
+
return np.asarray(inputs).reshape(shape)
|
|
811
828
|
except Exception as e:
|
|
812
|
-
raise ShapeMismatchError(shape, list(asarray(inputs).shape)) from e
|
|
829
|
+
raise ShapeMismatchError(shape, list(np.asarray(inputs).shape)) from e
|
|
813
830
|
|
|
814
831
|
def _reshape_dict_inputs(
|
|
815
832
|
self: Circuit,
|
|
@@ -824,7 +841,7 @@ class Circuit:
|
|
|
824
841
|
)
|
|
825
842
|
raise CircuitInputError(msg, parameter="shape", expected="dict")
|
|
826
843
|
for key, value in inputs.items():
|
|
827
|
-
tensor = asarray(value)
|
|
844
|
+
tensor = np.asarray(value)
|
|
828
845
|
try:
|
|
829
846
|
inputs[key] = tensor.reshape(shape[key])
|
|
830
847
|
except Exception as e:
|
|
@@ -867,16 +884,16 @@ class Circuit:
|
|
|
867
884
|
value = inputs[key]
|
|
868
885
|
|
|
869
886
|
# --- handle unsupported input types BEFORE entering try ---
|
|
870
|
-
if not isinstance(value, (ndarray, list, tuple)):
|
|
887
|
+
if not isinstance(value, (np.ndarray, list, tuple)):
|
|
871
888
|
msg = f"Unsupported input type for key '{key}': {type(value).__name__}"
|
|
872
889
|
raise CircuitProcessingError(message=msg)
|
|
873
890
|
|
|
874
891
|
try:
|
|
875
892
|
# Convert to tensor, flatten, and back to list
|
|
876
|
-
if isinstance(value, ndarray):
|
|
893
|
+
if isinstance(value, np.ndarray):
|
|
877
894
|
flattened = value.flatten().tolist()
|
|
878
895
|
else:
|
|
879
|
-
flattened = asarray(value).flatten().tolist()
|
|
896
|
+
flattened = np.asarray(value).flatten().tolist()
|
|
880
897
|
except Exception as e:
|
|
881
898
|
msg = f"Failed to flatten input '{key}' (type {type(value).__name__})"
|
|
882
899
|
raise CircuitProcessingError(message=msg) from e
|
python/core/circuits/errors.py
CHANGED
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
# python/core/utils/exceptions.py
|
|
2
1
|
from __future__ import annotations
|
|
3
2
|
|
|
4
3
|
from python.core.utils.helper_functions import RunType
|
|
@@ -68,7 +67,7 @@ class CircuitInputError(CircuitError):
|
|
|
68
67
|
actual (any): Actual value encountered (optional).
|
|
69
68
|
"""
|
|
70
69
|
|
|
71
|
-
def __init__(
|
|
70
|
+
def __init__(
|
|
72
71
|
self: CircuitInputError,
|
|
73
72
|
message: str | None = None,
|
|
74
73
|
parameter: str | None = None,
|
|
@@ -2,7 +2,7 @@ from __future__ import annotations
|
|
|
2
2
|
|
|
3
3
|
from abc import ABC, abstractmethod
|
|
4
4
|
from enum import Enum
|
|
5
|
-
from typing import TYPE_CHECKING
|
|
5
|
+
from typing import TYPE_CHECKING
|
|
6
6
|
|
|
7
7
|
if TYPE_CHECKING:
|
|
8
8
|
import numpy as np
|
|
@@ -16,10 +16,10 @@ class ModelType(Enum):
|
|
|
16
16
|
|
|
17
17
|
ONNXLayerDict = dict[
|
|
18
18
|
str,
|
|
19
|
-
|
|
19
|
+
int | str | list[str] | dict[str, list[int]] | list | None | dict,
|
|
20
20
|
]
|
|
21
21
|
|
|
22
|
-
CircuitParamsDict = dict[str,
|
|
22
|
+
CircuitParamsDict = dict[str, int | dict[str, bool]]
|
|
23
23
|
|
|
24
24
|
|
|
25
25
|
class ModelConverter(ABC):
|
|
@@ -1,16 +1,17 @@
|
|
|
1
1
|
import importlib
|
|
2
2
|
import pkgutil
|
|
3
|
-
import
|
|
3
|
+
from pathlib import Path
|
|
4
4
|
|
|
5
5
|
# Get the package name of the current module
|
|
6
6
|
package_name = __name__
|
|
7
7
|
|
|
8
8
|
# Dynamically import all .py files in this package directory (except __init__.py)
|
|
9
|
-
package_dir =
|
|
9
|
+
package_dir = Path(__file__).parent.as_posix()
|
|
10
10
|
|
|
11
|
-
|
|
11
|
+
|
|
12
|
+
__all__: list[str] = []
|
|
12
13
|
|
|
13
14
|
for _, module_name, is_pkg in pkgutil.iter_modules([package_dir]):
|
|
14
15
|
if not is_pkg and (module_name != "custom_helpers"):
|
|
15
16
|
importlib.import_module(f"{package_name}.{module_name}")
|
|
16
|
-
__all__.append(module_name)
|
|
17
|
+
__all__.append(module_name) # noqa: PYI056
|
|
@@ -31,7 +31,7 @@ class InvalidParamError(QuantizationError):
|
|
|
31
31
|
quantization the quantization process.
|
|
32
32
|
"""
|
|
33
33
|
|
|
34
|
-
def __init__(
|
|
34
|
+
def __init__(
|
|
35
35
|
self: QuantizationError,
|
|
36
36
|
node_name: str,
|
|
37
37
|
op_type: str,
|
|
@@ -151,7 +151,7 @@ class InvalidConfigError(QuantizationError):
|
|
|
151
151
|
def __init__(
|
|
152
152
|
self: QuantizationError,
|
|
153
153
|
key: str,
|
|
154
|
-
value: str | float | bool | None,
|
|
154
|
+
value: str | float | bool | None, # noqa: FBT001
|
|
155
155
|
expected: str | None = None,
|
|
156
156
|
) -> None:
|
|
157
157
|
"""Initialize InvalidConfigError with context about the bad config.
|
|
@@ -418,6 +418,40 @@ class BaseOpQuantizer:
|
|
|
418
418
|
|
|
419
419
|
|
|
420
420
|
class QuantizerBase:
|
|
421
|
+
"""
|
|
422
|
+
Shared mixin implementing the generic INT64 quantization pipeline.
|
|
423
|
+
|
|
424
|
+
IMPORTANT:
|
|
425
|
+
QuantizerBase is *not* a standalone quantizer. It must always be
|
|
426
|
+
combined with BaseOpQuantizer via multiple inheritance:
|
|
427
|
+
|
|
428
|
+
class FooQuantizer(BaseOpQuantizer, QuantizeFoo):
|
|
429
|
+
...
|
|
430
|
+
|
|
431
|
+
BaseOpQuantizer supplies required methods and attributes that
|
|
432
|
+
QuantizerBase relies on:
|
|
433
|
+
- add_scaled_initializer_inputs
|
|
434
|
+
- insert_scale_node
|
|
435
|
+
- get_scaling
|
|
436
|
+
- new_initializers (initializer buffer shared with converter)
|
|
437
|
+
|
|
438
|
+
If a subclass inherits QuantizerBase without BaseOpQuantizer,
|
|
439
|
+
QuantizerBase.quantize() will raise attribute errors at runtime.
|
|
440
|
+
|
|
441
|
+
This mixin centralizes:
|
|
442
|
+
- attribute extraction/merging
|
|
443
|
+
- optional initializer scaling (USE_WB + SCALE_PLAN)
|
|
444
|
+
- optional rescaling of outputs (USE_SCALING)
|
|
445
|
+
- creation of the final quantized NodeProto
|
|
446
|
+
|
|
447
|
+
The Quantize<Op> mixins should define:
|
|
448
|
+
- OP_TYPE
|
|
449
|
+
- DOMAIN
|
|
450
|
+
- USE_WB (bool)
|
|
451
|
+
- USE_SCALING (bool)
|
|
452
|
+
- SCALE_PLAN (dict[int,int]) if initializer scaling is enabled
|
|
453
|
+
"""
|
|
454
|
+
|
|
421
455
|
OP_TYPE = None
|
|
422
456
|
DOMAIN = "ai.onnx.contrib"
|
|
423
457
|
DEFAULT_ATTRS: ClassVar = {}
|
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import TYPE_CHECKING, ClassVar
|
|
4
|
+
|
|
5
|
+
if TYPE_CHECKING:
|
|
6
|
+
import onnx
|
|
7
|
+
|
|
8
|
+
from python.core.model_processing.onnx_quantizer.layers.base import (
|
|
9
|
+
BaseOpQuantizer,
|
|
10
|
+
QuantizerBase,
|
|
11
|
+
ScaleConfig,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class QuantizeClip(QuantizerBase):
|
|
16
|
+
"""
|
|
17
|
+
Quantization traits for ONNX Clip.
|
|
18
|
+
|
|
19
|
+
Semantics:
|
|
20
|
+
- X is already scaled/cast to INT64 at the graph boundary by the converter.
|
|
21
|
+
- Clip is elementwise + broadcasting.
|
|
22
|
+
- The bound inputs (min, max) should live in the *same* fixed-point scale
|
|
23
|
+
as X so that Clip(alpha*x; alpha*a, alpha*b) matches the original Clip(x; a, b).
|
|
24
|
+
|
|
25
|
+
Implementation:
|
|
26
|
+
- Treat inputs 1 and 2 (min, max) like "WB-style" slots: we let the
|
|
27
|
+
QuantizerBase machinery rescale / cast those inputs using the same
|
|
28
|
+
global scale factor.
|
|
29
|
+
- No extra internal scaling input is added (USE_SCALING = False).
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
OP_TYPE = "Clip"
|
|
33
|
+
DOMAIN = "" # standard ONNX domain
|
|
34
|
+
|
|
35
|
+
# We DO want WB-style handling so that min/max initializers get quantized:
|
|
36
|
+
USE_WB = True
|
|
37
|
+
|
|
38
|
+
# Clip does not introduce its own scale input; it just runs in the
|
|
39
|
+
# existing fixed-point scale.
|
|
40
|
+
USE_SCALING = False
|
|
41
|
+
|
|
42
|
+
# Scale-plan for WB-style slots:
|
|
43
|
+
# - Input index 1: min
|
|
44
|
+
# - Input index 2: max
|
|
45
|
+
# Each should be scaled once by the global alpha (same as activations).
|
|
46
|
+
SCALE_PLAN: ClassVar = {1: 1, 2: 1}
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class ClipQuantizer(BaseOpQuantizer, QuantizeClip):
|
|
50
|
+
"""
|
|
51
|
+
Quantizer for ONNX Clip.
|
|
52
|
+
|
|
53
|
+
- Keeps the node op_type as "Clip".
|
|
54
|
+
- Ensures that any bound inputs (min, max), whether they are dynamic
|
|
55
|
+
inputs or initializers, are converted to the same INT64 fixed-point
|
|
56
|
+
representation as A.
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
def __init__(
|
|
60
|
+
self,
|
|
61
|
+
new_initializers: dict[str, onnx.TensorProto] | None = None,
|
|
62
|
+
) -> None:
|
|
63
|
+
# Match Max/Min/Add: we simply share the new_initializers dict
|
|
64
|
+
# with the converter so any constants we add are collected.
|
|
65
|
+
self.new_initializers = new_initializers
|
|
66
|
+
|
|
67
|
+
def quantize(
|
|
68
|
+
self,
|
|
69
|
+
node: onnx.NodeProto,
|
|
70
|
+
graph: onnx.GraphProto,
|
|
71
|
+
scale_config: ScaleConfig,
|
|
72
|
+
initializer_map: dict[str, onnx.TensorProto],
|
|
73
|
+
) -> list[onnx.NodeProto]:
|
|
74
|
+
# Delegate to the shared QuantizerBase logic, which will:
|
|
75
|
+
# - keep X as-is (already scaled/cast by the converter),
|
|
76
|
+
# - rescale / cast min/max according to SCALE_PLAN,
|
|
77
|
+
# - update initializers as needed.
|
|
78
|
+
return QuantizeClip.quantize(self, node, graph, scale_config, initializer_map)
|
|
79
|
+
|
|
80
|
+
def check_supported(
|
|
81
|
+
self,
|
|
82
|
+
node: onnx.NodeProto,
|
|
83
|
+
initializer_map: dict[str, onnx.TensorProto] | None = None,
|
|
84
|
+
) -> None:
|
|
85
|
+
"""
|
|
86
|
+
Minimal support check for Clip:
|
|
87
|
+
|
|
88
|
+
- Clip is variadic elementwise with optional min/max as inputs or attrs.
|
|
89
|
+
- We accept both forms; if attrs are present, ORT enforces semantics.
|
|
90
|
+
- Broadcasting is ONNX-standard; we don't restrict further here.
|
|
91
|
+
"""
|
|
92
|
+
_ = node, initializer_map
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import TYPE_CHECKING, ClassVar
|
|
4
|
+
|
|
5
|
+
if TYPE_CHECKING:
|
|
6
|
+
import onnx
|
|
7
|
+
|
|
8
|
+
from python.core.model_processing.onnx_quantizer.layers.base import (
|
|
9
|
+
BaseOpQuantizer,
|
|
10
|
+
QuantizerBase,
|
|
11
|
+
ScaleConfig,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class QuantizeMax(QuantizerBase):
|
|
16
|
+
OP_TYPE = "Max"
|
|
17
|
+
DOMAIN = ""
|
|
18
|
+
USE_WB = True
|
|
19
|
+
USE_SCALING = False
|
|
20
|
+
SCALE_PLAN: ClassVar = {1: 1}
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class MaxQuantizer(BaseOpQuantizer, QuantizeMax):
|
|
24
|
+
def __init__(
|
|
25
|
+
self,
|
|
26
|
+
new_initializers: list[onnx.TensorProto] | None = None,
|
|
27
|
+
) -> None:
|
|
28
|
+
super().__init__()
|
|
29
|
+
if new_initializers is not None:
|
|
30
|
+
# Share the caller-provided buffer instead of the default list.
|
|
31
|
+
self.new_initializers = new_initializers
|
|
32
|
+
|
|
33
|
+
def quantize(
|
|
34
|
+
self,
|
|
35
|
+
node: onnx.NodeProto,
|
|
36
|
+
graph: onnx.GraphProto,
|
|
37
|
+
scale_config: ScaleConfig,
|
|
38
|
+
initializer_map: dict[str, onnx.TensorProto],
|
|
39
|
+
) -> list[onnx.NodeProto]:
|
|
40
|
+
# Delegate to the shared QuantizerBase logic
|
|
41
|
+
return QuantizeMax.quantize(self, node, graph, scale_config, initializer_map)
|
|
42
|
+
|
|
43
|
+
def check_supported(
|
|
44
|
+
self,
|
|
45
|
+
node: onnx.NodeProto,
|
|
46
|
+
initializer_map: dict[str, onnx.TensorProto] | None = None,
|
|
47
|
+
) -> None:
|
|
48
|
+
# If later we want to enforce/relax broadcasting, add it here.
|
|
49
|
+
pass
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import TYPE_CHECKING, ClassVar
|
|
4
|
+
|
|
5
|
+
if TYPE_CHECKING:
|
|
6
|
+
import onnx
|
|
7
|
+
|
|
8
|
+
from python.core.model_processing.onnx_quantizer.layers.base import (
|
|
9
|
+
BaseOpQuantizer,
|
|
10
|
+
QuantizerBase,
|
|
11
|
+
ScaleConfig,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class QuantizeMin(QuantizerBase):
|
|
16
|
+
OP_TYPE = "Min"
|
|
17
|
+
DOMAIN = "" # standard ONNX domain
|
|
18
|
+
USE_WB = True # let framework wire inputs/outputs normally
|
|
19
|
+
USE_SCALING = False # passthrough: no internal scaling
|
|
20
|
+
SCALE_PLAN: ClassVar = {1: 1} # elementwise arity plan
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class MinQuantizer(BaseOpQuantizer, QuantizeMin):
|
|
24
|
+
"""
|
|
25
|
+
Passthrough quantizer for elementwise Min.
|
|
26
|
+
We rely on the converter to quantize graph inputs; no extra scaling here.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
def __init__(
|
|
30
|
+
self: MinQuantizer,
|
|
31
|
+
new_initializers: list[onnx.TensorProto] | None = None,
|
|
32
|
+
) -> None:
|
|
33
|
+
super().__init__()
|
|
34
|
+
if new_initializers is not None:
|
|
35
|
+
self.new_initializers = new_initializers
|
|
36
|
+
|
|
37
|
+
def quantize(
|
|
38
|
+
self: MinQuantizer,
|
|
39
|
+
node: onnx.NodeProto,
|
|
40
|
+
graph: onnx.GraphProto,
|
|
41
|
+
scale_config: ScaleConfig,
|
|
42
|
+
initializer_map: dict[str, onnx.TensorProto],
|
|
43
|
+
) -> list[onnx.NodeProto]:
|
|
44
|
+
# Delegate to QuantizerBase's generic passthrough implementation.
|
|
45
|
+
return QuantizeMin.quantize(self, node, graph, scale_config, initializer_map)
|
|
46
|
+
|
|
47
|
+
def check_supported(
|
|
48
|
+
self: MinQuantizer,
|
|
49
|
+
node: onnx.NodeProto,
|
|
50
|
+
initializer_map: dict[str, onnx.TensorProto] | None = None,
|
|
51
|
+
) -> None:
|
|
52
|
+
# Min has no attributes; elementwise, variadic ≥ 1 input per ONNX spec.
|
|
53
|
+
# We mirror Add/Max broadcasting behavior; no extra checks here.
|
|
54
|
+
_ = node, initializer_map
|
|
@@ -20,12 +20,15 @@ from python.core.model_processing.onnx_quantizer.layers.base import (
|
|
|
20
20
|
from python.core.model_processing.onnx_quantizer.layers.batchnorm import (
|
|
21
21
|
BatchnormQuantizer,
|
|
22
22
|
)
|
|
23
|
+
from python.core.model_processing.onnx_quantizer.layers.clip import ClipQuantizer
|
|
23
24
|
from python.core.model_processing.onnx_quantizer.layers.constant import (
|
|
24
25
|
ConstantQuantizer,
|
|
25
26
|
)
|
|
26
27
|
from python.core.model_processing.onnx_quantizer.layers.conv import ConvQuantizer
|
|
27
28
|
from python.core.model_processing.onnx_quantizer.layers.gemm import GemmQuantizer
|
|
29
|
+
from python.core.model_processing.onnx_quantizer.layers.max import MaxQuantizer
|
|
28
30
|
from python.core.model_processing.onnx_quantizer.layers.maxpool import MaxpoolQuantizer
|
|
31
|
+
from python.core.model_processing.onnx_quantizer.layers.min import MinQuantizer
|
|
29
32
|
from python.core.model_processing.onnx_quantizer.layers.mul import MulQuantizer
|
|
30
33
|
from python.core.model_processing.onnx_quantizer.layers.relu import ReluQuantizer
|
|
31
34
|
from python.core.model_processing.onnx_quantizer.layers.sub import SubQuantizer
|
|
@@ -74,6 +77,7 @@ class ONNXOpQuantizer:
|
|
|
74
77
|
|
|
75
78
|
# Register handlers
|
|
76
79
|
self.register("Add", AddQuantizer(self.new_initializers))
|
|
80
|
+
self.register("Clip", ClipQuantizer(self.new_initializers))
|
|
77
81
|
self.register("Sub", SubQuantizer(self.new_initializers))
|
|
78
82
|
self.register("Mul", MulQuantizer(self.new_initializers))
|
|
79
83
|
self.register("Conv", ConvQuantizer(self.new_initializers))
|
|
@@ -83,6 +87,8 @@ class ONNXOpQuantizer:
|
|
|
83
87
|
self.register("Constant", ConstantQuantizer())
|
|
84
88
|
self.register("MaxPool", MaxpoolQuantizer())
|
|
85
89
|
self.register("Flatten", PassthroughQuantizer())
|
|
90
|
+
self.register("Max", MaxQuantizer(self.new_initializers))
|
|
91
|
+
self.register("Min", MinQuantizer(self.new_initializers))
|
|
86
92
|
self.register("BatchNormalization", BatchnormQuantizer(self.new_initializers))
|
|
87
93
|
|
|
88
94
|
def register(
|