onnx 1.15.0__cp39-cp39-win32.whl → 1.16.1__cp39-cp39-win32.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of onnx might be problematic. Click here for more details.
- onnx/__init__.py +10 -10
- onnx/backend/base.py +13 -14
- onnx/backend/sample/ops/abs.py +1 -1
- onnx/backend/test/case/model/__init__.py +0 -1
- onnx/backend/test/case/node/ai_onnx_ml/tree_ensemble.py +122 -0
- onnx/backend/test/case/node/averagepool.py +15 -30
- onnx/backend/test/case/node/cast.py +88 -11
- onnx/backend/test/case/node/dequantizelinear.py +155 -0
- onnx/backend/test/case/node/groupnormalization.py +13 -9
- onnx/backend/test/case/node/gru.py +2 -2
- onnx/backend/test/case/node/isinf.py +4 -4
- onnx/backend/test/case/node/isnan.py +2 -2
- onnx/backend/test/case/node/lppool.py +8 -16
- onnx/backend/test/case/node/lstm.py +1 -1
- onnx/backend/test/case/node/maxpool.py +40 -34
- onnx/backend/test/case/node/pow.py +1 -1
- onnx/backend/test/case/node/qlinearmatmul.py +143 -109
- onnx/backend/test/case/node/quantizelinear.py +298 -7
- onnx/backend/test/case/node/reducemax.py +26 -0
- onnx/backend/test/case/node/rnn.py +1 -1
- onnx/backend/test/case/node/scan.py +6 -2
- onnx/backend/test/case/node/scatterelements.py +1 -1
- onnx/backend/test/case/node/topk.py +1 -1
- onnx/backend/test/case/utils.py +1 -3
- onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_set_membership/model.onnx +0 -0
- onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_set_membership/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_set_membership/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_single_tree/model.onnx +0 -0
- onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_single_tree/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_single_tree/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_BFLOAT16_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_DOUBLE_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_DOUBLE_to_FLOAT16/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_DOUBLE/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FN/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FN/test_data_set_0/input_0.pb +2 -2
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FN/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FNUZ/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FNUZ/test_data_set_0/input_0.pb +2 -2
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FNUZ/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2/test_data_set_0/input_0.pb +2 -2
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2FNUZ/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2FNUZ/test_data_set_0/input_0.pb +2 -2
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2FNUZ/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_INT4/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_INT4/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_INT4/test_data_set_0/output_0.pb +1 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_UINT4/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_UINT4/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_UINT4/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT16/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT16/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT16/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT16/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT16/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT16/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT16/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT16/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT16/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT16/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT16/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT16/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_BFLOAT16/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_DOUBLE/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT16/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FN/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FNUZ/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2FNUZ/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_INT4/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_INT4/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_INT4/test_data_set_0/output_0.pb +1 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_STRING/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_UINT4/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_UINT4/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_UINT4/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_INT4_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_INT4_to_FLOAT/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_cast_INT4_to_FLOAT/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_INT4_to_FLOAT16/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_INT4_to_FLOAT16/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_cast_INT4_to_FLOAT16/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_INT4_to_INT8/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_INT4_to_INT8/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_cast_INT4_to_INT8/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_STRING_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_UINT4_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_UINT4_to_FLOAT/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_UINT4_to_FLOAT/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_UINT4_to_FLOAT16/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_UINT4_to_FLOAT16/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_UINT4_to_FLOAT16/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_UINT4_to_UINT8/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_UINT4_to_UINT8/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_UINT4_to_UINT8/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FN/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FN/test_data_set_0/input_0.pb +2 -2
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FN/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FNUZ/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FNUZ/test_data_set_0/input_0.pb +2 -2
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FNUZ/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2/test_data_set_0/input_0.pb +2 -2
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2FNUZ/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2FNUZ/test_data_set_0/input_0.pb +2 -2
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2FNUZ/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FN/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FNUZ/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2FNUZ/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_castlike_BFLOAT16_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_BFLOAT16_to_FLOAT_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_DOUBLE_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_DOUBLE_to_FLOAT16/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_DOUBLE_to_FLOAT16_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_DOUBLE_to_FLOAT_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT16_to_DOUBLE/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT16_to_DOUBLE_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT16_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT16_to_FLOAT_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_BFLOAT16/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_BFLOAT16_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_DOUBLE/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_DOUBLE_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT16/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT16_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_STRING/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_STRING_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_STRING_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_STRING_to_FLOAT_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_constant/model.onnx +0 -0
- onnx/backend/test/data/node/test_constant_pad/model.onnx +0 -0
- onnx/backend/test/data/node/test_constant_pad_axes/model.onnx +0 -0
- onnx/backend/test/data/node/test_constant_pad_negative_axes/model.onnx +0 -0
- onnx/backend/test/data/node/test_constantofshape_float_ones/model.onnx +0 -0
- onnx/backend/test/data/node/test_constantofshape_int_shape_zero/model.onnx +0 -0
- onnx/backend/test/data/node/test_constantofshape_int_zeros/model.onnx +0 -0
- onnx/backend/test/data/node/test_dequantizelinear/model.onnx +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_axis/model.onnx +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_blocked/model.onnx +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_blocked/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_dequantizelinear_blocked/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_blocked/test_data_set_0/input_2.pb +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_blocked/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_e4m3fn/model.onnx +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_e4m3fn_float16/model.onnx +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_e4m3fn_zero_point/model.onnx +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_e5m2/model.onnx +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_int16/model.onnx +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_int16/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_dequantizelinear_int16/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_int16/test_data_set_0/input_2.pb +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_int16/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_int4/model.onnx +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_int4/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_dequantizelinear_int4/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_int4/test_data_set_0/input_2.pb +1 -0
- onnx/backend/test/data/node/test_dequantizelinear_int4/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_uint16/model.onnx +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_uint16/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_uint16/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_uint16/test_data_set_0/input_2.pb +1 -0
- onnx/backend/test/data/node/test_dequantizelinear_uint16/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_uint4/model.onnx +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_uint4/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_dequantizelinear_uint4/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_uint4/test_data_set_0/input_2.pb +1 -0
- onnx/backend/test/data/node/test_dequantizelinear_uint4/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_edge_pad/model.onnx +0 -0
- onnx/backend/test/data/node/test_flatten_axis0/model.onnx +0 -0
- onnx/backend/test/data/node/test_flatten_axis1/model.onnx +0 -0
- onnx/backend/test/data/node/test_flatten_axis2/model.onnx +0 -0
- onnx/backend/test/data/node/test_flatten_axis3/model.onnx +0 -0
- onnx/backend/test/data/node/test_flatten_default_axis/model.onnx +0 -0
- onnx/backend/test/data/node/test_flatten_negative_axis1/model.onnx +0 -0
- onnx/backend/test/data/node/test_flatten_negative_axis2/model.onnx +0 -0
- onnx/backend/test/data/node/test_flatten_negative_axis3/model.onnx +0 -0
- onnx/backend/test/data/node/test_flatten_negative_axis4/model.onnx +0 -0
- onnx/backend/test/data/node/test_group_normalization_epsilon/model.onnx +0 -0
- onnx/backend/test/data/node/test_group_normalization_epsilon/test_data_set_0/input_0.pb +1 -1
- onnx/backend/test/data/node/test_group_normalization_epsilon/test_data_set_0/input_1.pb +1 -1
- onnx/backend/test/data/node/test_group_normalization_epsilon/test_data_set_0/input_2.pb +1 -1
- onnx/backend/test/data/node/test_group_normalization_epsilon/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_group_normalization_epsilon_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_group_normalization_epsilon_expanded/test_data_set_0/input_0.pb +1 -1
- onnx/backend/test/data/node/test_group_normalization_epsilon_expanded/test_data_set_0/input_1.pb +1 -1
- onnx/backend/test/data/node/test_group_normalization_epsilon_expanded/test_data_set_0/input_2.pb +1 -1
- onnx/backend/test/data/node/test_group_normalization_epsilon_expanded/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_group_normalization_example/model.onnx +0 -0
- onnx/backend/test/data/node/test_group_normalization_example/test_data_set_0/input_1.pb +1 -1
- onnx/backend/test/data/node/test_group_normalization_example/test_data_set_0/input_2.pb +1 -1
- onnx/backend/test/data/node/test_group_normalization_example/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_group_normalization_example_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_group_normalization_example_expanded/test_data_set_0/input_1.pb +1 -1
- onnx/backend/test/data/node/test_group_normalization_example_expanded/test_data_set_0/input_2.pb +1 -1
- onnx/backend/test/data/node/test_group_normalization_example_expanded/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_identity/model.onnx +0 -0
- onnx/backend/test/data/node/test_identity_sequence/model.onnx +0 -0
- onnx/backend/test/data/node/test_lrn_default/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_maxpool_2d_ceil_output_size_reduce_by_one/model.onnx +0 -0
- onnx/backend/test/data/node/test_maxpool_2d_ceil_output_size_reduce_by_one/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_maxpool_2d_ceil_output_size_reduce_by_one/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_mvn/test_data_set_0/output_0.pb +1 -1
- onnx/backend/test/data/node/test_mvn_expanded/test_data_set_0/output_0.pb +1 -1
- onnx/backend/test/data/node/test_mvn_expanded_ver18/test_data_set_0/output_0.pb +1 -1
- onnx/backend/test/data/node/test_pow/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float16/model.onnx +0 -0
- onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float16/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float16/test_data_set_0/input_1.pb +2 -0
- onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float16/test_data_set_0/input_2.pb +1 -0
- onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float16/test_data_set_0/input_3.pb +0 -0
- onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float16/test_data_set_0/input_4.pb +2 -0
- onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float16/test_data_set_0/input_5.pb +1 -0
- onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float16/test_data_set_0/input_6.pb +2 -0
- onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float16/test_data_set_0/input_7.pb +1 -0
- onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float16/test_data_set_0/output_0.pb +1 -0
- onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float32/model.onnx +0 -0
- onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float32/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float32/test_data_set_0/input_2.pb +1 -0
- onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float32/test_data_set_0/input_3.pb +0 -0
- onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float32/test_data_set_0/input_5.pb +1 -0
- onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float32/test_data_set_0/input_7.pb +1 -0
- onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float32/test_data_set_0/output_0.pb +1 -0
- onnx/backend/test/data/node/test_qlinearmatmul_2D_uint8_float16/model.onnx +0 -0
- onnx/backend/test/data/node/test_qlinearmatmul_2D_uint8_float16/test_data_set_0/input_1.pb +2 -0
- onnx/backend/test/data/node/test_qlinearmatmul_2D_uint8_float16/test_data_set_0/input_4.pb +2 -0
- onnx/backend/test/data/node/test_qlinearmatmul_2D_uint8_float16/test_data_set_0/input_6.pb +2 -0
- onnx/backend/test/data/node/{test_qlinearmatmul_2D → test_qlinearmatmul_2D_uint8_float32}/model.onnx +0 -0
- onnx/backend/test/data/node/test_qlinearmatmul_2D_uint8_float32/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_qlinearmatmul_2D_uint8_float32/test_data_set_0/input_3.pb +0 -0
- onnx/backend/test/data/node/test_qlinearmatmul_2D_uint8_float32/test_data_set_0/output_0.pb +1 -0
- onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float16/model.onnx +0 -0
- onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float16/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float16/test_data_set_0/input_1.pb +2 -0
- onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float16/test_data_set_0/input_2.pb +1 -0
- onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float16/test_data_set_0/input_3.pb +0 -0
- onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float16/test_data_set_0/input_4.pb +2 -0
- onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float16/test_data_set_0/input_5.pb +1 -0
- onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float16/test_data_set_0/input_6.pb +2 -0
- onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float16/test_data_set_0/input_7.pb +1 -0
- onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float16/test_data_set_0/output_0.pb +1 -0
- onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float32/model.onnx +0 -0
- onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float32/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float32/test_data_set_0/input_1.pb +1 -0
- onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float32/test_data_set_0/input_2.pb +1 -0
- onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float32/test_data_set_0/input_3.pb +0 -0
- onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float32/test_data_set_0/input_4.pb +1 -0
- onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float32/test_data_set_0/input_5.pb +1 -0
- onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float32/test_data_set_0/input_6.pb +1 -0
- onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float32/test_data_set_0/input_7.pb +1 -0
- onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float32/test_data_set_0/output_0.pb +1 -0
- onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float16/model.onnx +0 -0
- onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float16/test_data_set_0/input_1.pb +2 -0
- onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float16/test_data_set_0/input_2.pb +1 -0
- onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float16/test_data_set_0/input_4.pb +2 -0
- onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float16/test_data_set_0/input_5.pb +1 -0
- onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float16/test_data_set_0/input_6.pb +2 -0
- onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float16/test_data_set_0/input_7.pb +1 -0
- onnx/backend/test/data/node/{test_qlinearmatmul_3D → test_qlinearmatmul_3D_uint8_float32}/model.onnx +0 -0
- onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float32/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float32/test_data_set_0/input_1.pb +1 -0
- onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float32/test_data_set_0/input_2.pb +1 -0
- onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float32/test_data_set_0/input_3.pb +0 -0
- onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float32/test_data_set_0/input_4.pb +1 -0
- onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float32/test_data_set_0/input_5.pb +1 -0
- onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float32/test_data_set_0/input_6.pb +1 -0
- onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float32/test_data_set_0/input_7.pb +1 -0
- onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float32/test_data_set_0/output_0.pb +1 -0
- onnx/backend/test/data/node/test_quantizelinear/model.onnx +0 -0
- onnx/backend/test/data/node/test_quantizelinear_axis/model.onnx +0 -0
- onnx/backend/test/data/node/test_quantizelinear_blocked_asymmetric/model.onnx +0 -0
- onnx/backend/test/data/node/test_quantizelinear_blocked_asymmetric/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear_blocked_asymmetric/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear_blocked_asymmetric/test_data_set_0/input_2.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear_blocked_asymmetric/test_data_set_0/output_0.pb +1 -0
- onnx/backend/test/data/node/test_quantizelinear_blocked_symmetric/model.onnx +0 -0
- onnx/backend/test/data/node/test_quantizelinear_blocked_symmetric/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear_blocked_symmetric/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear_blocked_symmetric/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear_e4m3fn/model.onnx +0 -0
- onnx/backend/test/data/node/test_quantizelinear_e4m3fn/test_data_set_0/input_2.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear_e4m3fn/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear_e5m2/model.onnx +0 -0
- onnx/backend/test/data/node/test_quantizelinear_e5m2/test_data_set_0/input_2.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear_e5m2/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear_int16/model.onnx +0 -0
- onnx/backend/test/data/node/test_quantizelinear_int16/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear_int16/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear_int16/test_data_set_0/input_2.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear_int16/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear_int4/model.onnx +0 -0
- onnx/backend/test/data/node/test_quantizelinear_int4/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear_int4/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear_int4/test_data_set_0/input_2.pb +1 -0
- onnx/backend/test/data/node/test_quantizelinear_int4/test_data_set_0/output_0.pb +1 -0
- onnx/backend/test/data/node/test_quantizelinear_uint16/model.onnx +0 -0
- onnx/backend/test/data/node/test_quantizelinear_uint16/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear_uint16/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear_uint16/test_data_set_0/input_2.pb +1 -0
- onnx/backend/test/data/node/test_quantizelinear_uint16/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear_uint4/model.onnx +0 -0
- onnx/backend/test/data/node/test_quantizelinear_uint4/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear_uint4/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear_uint4/test_data_set_0/input_2.pb +1 -0
- onnx/backend/test/data/node/test_quantizelinear_uint4/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_reflect_pad/model.onnx +0 -0
- onnx/backend/test/data/node/test_reshape_allowzero_reordered/model.onnx +0 -0
- onnx/backend/test/data/node/test_reshape_extended_dims/model.onnx +0 -0
- onnx/backend/test/data/node/test_reshape_negative_dim/model.onnx +0 -0
- onnx/backend/test/data/node/test_reshape_negative_extended_dims/model.onnx +0 -0
- onnx/backend/test/data/node/test_reshape_one_dim/model.onnx +0 -0
- onnx/backend/test/data/node/test_reshape_reduced_dims/model.onnx +0 -0
- onnx/backend/test/data/node/test_reshape_reordered_all_dims/model.onnx +0 -0
- onnx/backend/test/data/node/test_reshape_reordered_last_dims/model.onnx +0 -0
- onnx/backend/test/data/node/test_reshape_zero_and_negative_dim/model.onnx +0 -0
- onnx/backend/test/data/node/test_reshape_zero_dim/model.onnx +0 -0
- onnx/backend/test/data/node/test_shape/model.onnx +0 -0
- onnx/backend/test/data/node/test_shape_clip_end/model.onnx +0 -0
- onnx/backend/test/data/node/test_shape_clip_start/model.onnx +0 -0
- onnx/backend/test/data/node/test_shape_end_1/model.onnx +0 -0
- onnx/backend/test/data/node/test_shape_end_negative_1/model.onnx +0 -0
- onnx/backend/test/data/node/test_shape_example/model.onnx +0 -0
- onnx/backend/test/data/node/test_shape_start_1/model.onnx +0 -0
- onnx/backend/test/data/node/test_shape_start_1_end_2/model.onnx +0 -0
- onnx/backend/test/data/node/test_shape_start_1_end_negative_1/model.onnx +0 -0
- onnx/backend/test/data/node/test_shape_start_negative_1/model.onnx +0 -0
- onnx/backend/test/data/node/test_size/model.onnx +0 -0
- onnx/backend/test/data/node/test_size_example/model.onnx +0 -0
- onnx/backend/test/data/node/test_squeeze/model.onnx +0 -0
- onnx/backend/test/data/node/test_squeeze_negative_axes/model.onnx +0 -0
- onnx/backend/test/data/node/test_transpose_all_permutations_0/model.onnx +0 -0
- onnx/backend/test/data/node/test_transpose_all_permutations_1/model.onnx +0 -0
- onnx/backend/test/data/node/test_transpose_all_permutations_2/model.onnx +0 -0
- onnx/backend/test/data/node/test_transpose_all_permutations_3/model.onnx +0 -0
- onnx/backend/test/data/node/test_transpose_all_permutations_4/model.onnx +0 -0
- onnx/backend/test/data/node/test_transpose_all_permutations_5/model.onnx +0 -0
- onnx/backend/test/data/node/test_transpose_default/model.onnx +0 -0
- onnx/backend/test/data/node/test_unsqueeze_axis_0/model.onnx +0 -0
- onnx/backend/test/data/node/test_unsqueeze_axis_1/model.onnx +0 -0
- onnx/backend/test/data/node/test_unsqueeze_axis_2/model.onnx +0 -0
- onnx/backend/test/data/node/test_unsqueeze_negative_axes/model.onnx +0 -0
- onnx/backend/test/data/node/test_unsqueeze_three_axes/model.onnx +0 -0
- onnx/backend/test/data/node/test_unsqueeze_two_axes/model.onnx +0 -0
- onnx/backend/test/data/node/test_unsqueeze_unsorted_axes/model.onnx +0 -0
- onnx/backend/test/data/node/test_wrap_pad/model.onnx +0 -0
- onnx/backend/test/loader/__init__.py +0 -1
- onnx/backend/test/runner/__init__.py +43 -15
- onnx/checker.cc +104 -99
- onnx/checker.h +23 -3
- onnx/checker.py +56 -20
- onnx/common/assertions.cc +10 -5
- onnx/common/common.h +19 -0
- onnx/common/file_utils.h +3 -1
- onnx/common/interned_strings.h +7 -1
- onnx/common/ir.h +30 -7
- onnx/common/ir_pb_converter.cc +6 -0
- onnx/common/path.h +18 -2
- onnx/common/proto_util.h +43 -0
- onnx/common/version.h +1 -1
- onnx/cpp2py_export.cc +88 -56
- onnx/defs/__init__.py +29 -8
- onnx/defs/controlflow/defs.cc +16 -16
- onnx/defs/controlflow/old.cc +177 -0
- onnx/defs/data_propagators.h +2 -0
- onnx/defs/data_type_utils.cc +2 -0
- onnx/defs/generator/defs.cc +6 -4
- onnx/defs/generator/old.cc +115 -0
- onnx/defs/math/defs.cc +37 -142
- onnx/defs/math/old.cc +96 -12
- onnx/defs/math/utils.cc +127 -0
- onnx/defs/math/utils.h +8 -0
- onnx/defs/nn/defs.cc +72 -59
- onnx/defs/nn/old.cc +181 -2
- onnx/defs/object_detection/defs.cc +2 -2
- onnx/defs/object_detection/old.cc +2 -2
- onnx/defs/operator_sets.h +51 -0
- onnx/defs/operator_sets_ml.h +14 -0
- onnx/defs/parser.cc +112 -54
- onnx/defs/parser.h +14 -2
- onnx/defs/printer.cc +14 -7
- onnx/defs/quantization/defs.cc +111 -44
- onnx/defs/quantization/old.cc +130 -1
- onnx/defs/schema.cc +62 -18
- onnx/defs/schema.h +194 -48
- onnx/defs/shape_inference.cc +28 -19
- onnx/defs/shape_inference.h +2 -0
- onnx/defs/tensor/defs.cc +54 -96
- onnx/defs/tensor/old.cc +939 -34
- onnx/defs/tensor/utils.cc +6 -3
- onnx/defs/tensor/utils.h +5 -1
- onnx/defs/tensor_proto_util.cc +2 -0
- onnx/defs/tensor_util.cc +2 -0
- onnx/defs/traditionalml/defs.cc +273 -117
- onnx/defs/traditionalml/old.cc +329 -14
- onnx/defs/traditionalml/utils.h +27 -0
- onnx/external_data_helper.py +12 -26
- onnx/helper.py +242 -169
- onnx/hub.py +104 -70
- onnx/inliner/inliner.cc +89 -31
- onnx/inliner/inliner.h +5 -0
- onnx/inliner.py +2 -0
- onnx/mapping.py +9 -0
- onnx/model_container.py +346 -0
- onnx/numpy_helper.py +100 -38
- onnx/onnx-ml.proto +50 -13
- onnx/onnx.in.proto +50 -13
- onnx/onnx.proto +50 -13
- onnx/onnx_cpp2py_export/__init__.pyi +5 -0
- onnx/onnx_cpp2py_export/checker.pyi +21 -0
- onnx/onnx_cpp2py_export/defs.pyi +202 -0
- onnx/onnx_cpp2py_export/inliner.pyi +19 -0
- onnx/onnx_cpp2py_export/parser.pyi +32 -0
- onnx/onnx_cpp2py_export/printer.pyi +3 -0
- onnx/onnx_cpp2py_export/shape_inference.pyi +16 -0
- onnx/onnx_cpp2py_export/version_converter.pyi +4 -0
- onnx/onnx_cpp2py_export.cp39-win32.pyd +0 -0
- onnx/onnx_data_pb2.pyi +146 -0
- onnx/onnx_ml_pb2.py +52 -52
- onnx/onnx_ml_pb2.pyi +663 -0
- onnx/onnx_operators_ml_pb2.pyi +67 -0
- onnx/reference/__init__.py +2 -0
- onnx/reference/custom_element_types.py +2 -0
- onnx/reference/op_run.py +166 -121
- onnx/reference/ops/_op.py +27 -50
- onnx/reference/ops/_op_list.py +36 -24
- onnx/reference/ops/aionnx_preview_training/_op_list.py +15 -8
- onnx/reference/ops/aionnxml/_common_classifier.py +3 -5
- onnx/reference/ops/aionnxml/_op_list.py +16 -8
- onnx/reference/ops/aionnxml/op_array_feature_extractor.py +4 -6
- onnx/reference/ops/aionnxml/op_linear_classifier.py +1 -2
- onnx/reference/ops/aionnxml/op_normalizer.py +3 -3
- onnx/reference/ops/aionnxml/op_svm_helper.py +1 -3
- onnx/reference/ops/aionnxml/op_svm_regressor.py +1 -3
- onnx/reference/ops/aionnxml/op_tree_ensemble.py +257 -0
- onnx/reference/ops/aionnxml/op_tree_ensemble_helper.py +2 -6
- onnx/reference/ops/aionnxml/op_tree_ensemble_regressor.py +4 -4
- onnx/reference/ops/experimental/_op_list.py +15 -8
- onnx/reference/ops/op_blackman_window.py +5 -6
- onnx/reference/ops/op_cast.py +22 -0
- onnx/reference/ops/op_cast_like.py +6 -0
- onnx/reference/ops/op_clip.py +5 -8
- onnx/reference/ops/op_col2im.py +1 -3
- onnx/reference/ops/op_constant.py +7 -1
- onnx/reference/ops/op_dequantize_linear.py +43 -40
- onnx/reference/ops/op_det.py +1 -1
- onnx/reference/ops/op_dynamic_quantize_linear.py +2 -2
- onnx/reference/ops/op_grid_sample.py +2 -4
- onnx/reference/ops/op_hamming_window.py +3 -6
- onnx/reference/ops/op_hann_window.py +3 -6
- onnx/reference/ops/op_if.py +4 -3
- onnx/reference/ops/op_loop.py +7 -9
- onnx/reference/ops/op_matmul.py +1 -2
- onnx/reference/ops/op_max_pool.py +5 -0
- onnx/reference/ops/op_optional.py +1 -1
- onnx/reference/ops/op_pool_common.py +3 -6
- onnx/reference/ops/op_qlinear_matmul.py +2 -2
- onnx/reference/ops/op_quantize_linear.py +166 -71
- onnx/reference/ops/op_resize.py +25 -21
- onnx/reference/ops/op_rnn.py +20 -12
- onnx/reference/ops/op_scan.py +23 -15
- onnx/reference/ops/op_scatter_elements.py +7 -6
- onnx/reference/ops/op_stft.py +3 -5
- onnx/reference/ops/op_string_normalizer.py +7 -7
- onnx/reference/ops/op_tfidf_vectorizer.py +7 -8
- onnx/reference/ops/op_topk.py +9 -11
- onnx/reference/ops/op_unique.py +1 -1
- onnx/reference/reference_evaluator.py +119 -63
- onnx/shape_inference/implementation.cc +160 -127
- onnx/shape_inference.py +11 -10
- onnx/subbyte.py +72 -0
- onnx/test/__init__.pyi +6 -0
- onnx/test/checker_test.py +21 -1
- onnx/test/compose_test.py +26 -74
- onnx/test/cpp/inliner_test.cc +76 -1
- onnx/test/cpp/ir_test.cc +60 -0
- onnx/test/cpp/parser_test.cc +106 -0
- onnx/test/function_test.py +1 -3
- onnx/test/helper_test.py +64 -4
- onnx/test/model_container_refeval_test.py +139 -0
- onnx/test/model_container_test.py +136 -0
- onnx/test/model_inference_test.py +44 -0
- onnx/test/reference_evaluator_ml_test.py +448 -47
- onnx/test/reference_evaluator_model_test.py +130 -0
- onnx/test/reference_evaluator_test.py +901 -14
- onnx/test/schema_test.py +166 -1
- onnx/test/shape_inference_test.py +285 -6
- onnx/test/symbolic_shape_test.py +3 -8
- onnx/test/test_backend_onnxruntime.py +238 -224
- onnx/test/test_backend_reference.py +11 -0
- onnx/test/test_external_data.py +51 -2
- onnx/test/version_converter/automatic_conversion_test_base.py +2 -1
- onnx/test/version_converter/automatic_upgrade_test.py +12 -10
- onnx/test/version_converter_test.py +166 -0
- onnx/tools/replace_constants.py +23 -26
- onnx/tools/update_model_dims.py +1 -2
- onnx/version.py +2 -2
- onnx/version_converter/adapters/group_normalization_20_21.h +128 -0
- onnx/version_converter/adapters/q_dq_21_20.h +77 -0
- onnx/version_converter/convert.h +67 -2
- onnx/version_converter.py +6 -142
- {onnx-1.15.0.dist-info → onnx-1.16.1.dist-info}/METADATA +18 -15
- {onnx-1.15.0.dist-info → onnx-1.16.1.dist-info}/RECORD +572 -406
- {onnx-1.15.0.dist-info → onnx-1.16.1.dist-info}/WHEEL +1 -1
- onnx/examples/Protobufs.ipynb +0 -639
- onnx/examples/check_model.ipynb +0 -128
- onnx/examples/load_model.ipynb +0 -116
- onnx/examples/make_model.ipynb +0 -176
- onnx/examples/np_array_tensorproto.ipynb +0 -136
- onnx/examples/resources/single_relu.onnx +0 -12
- onnx/examples/resources/single_relu_new.onnx +0 -12
- onnx/examples/resources/tensor.pb +0 -0
- onnx/examples/resources/two_transposes.onnx +0 -0
- onnx/examples/save_model.ipynb +0 -56
- onnx/examples/shape_inference.ipynb +0 -111
- onnx/test/reference_evaluator_backend_test.py +0 -876
- /onnx/backend/test/data/node/{test_qlinearmatmul_2D → test_qlinearmatmul_2D_int8_float32}/test_data_set_0/input_1.pb +0 -0
- /onnx/backend/test/data/node/{test_qlinearmatmul_2D → test_qlinearmatmul_2D_int8_float32}/test_data_set_0/input_4.pb +0 -0
- /onnx/backend/test/data/node/{test_qlinearmatmul_2D → test_qlinearmatmul_2D_int8_float32}/test_data_set_0/input_6.pb +0 -0
- /onnx/backend/test/data/node/{test_qlinearmatmul_2D → test_qlinearmatmul_2D_uint8_float16}/test_data_set_0/input_0.pb +0 -0
- /onnx/backend/test/data/node/{test_qlinearmatmul_2D → test_qlinearmatmul_2D_uint8_float16}/test_data_set_0/input_2.pb +0 -0
- /onnx/backend/test/data/node/{test_qlinearmatmul_2D → test_qlinearmatmul_2D_uint8_float16}/test_data_set_0/input_3.pb +0 -0
- /onnx/backend/test/data/node/{test_qlinearmatmul_2D → test_qlinearmatmul_2D_uint8_float16}/test_data_set_0/input_5.pb +0 -0
- /onnx/backend/test/data/node/{test_qlinearmatmul_2D → test_qlinearmatmul_2D_uint8_float16}/test_data_set_0/input_7.pb +0 -0
- /onnx/backend/test/data/node/{test_qlinearmatmul_2D → test_qlinearmatmul_2D_uint8_float16}/test_data_set_0/output_0.pb +0 -0
- /onnx/backend/test/data/node/{test_qlinearmatmul_3D → test_qlinearmatmul_2D_uint8_float32}/test_data_set_0/input_1.pb +0 -0
- /onnx/backend/test/data/node/{test_qlinearmatmul_3D → test_qlinearmatmul_2D_uint8_float32}/test_data_set_0/input_2.pb +0 -0
- /onnx/backend/test/data/node/{test_qlinearmatmul_3D → test_qlinearmatmul_2D_uint8_float32}/test_data_set_0/input_4.pb +0 -0
- /onnx/backend/test/data/node/{test_qlinearmatmul_3D → test_qlinearmatmul_2D_uint8_float32}/test_data_set_0/input_5.pb +0 -0
- /onnx/backend/test/data/node/{test_qlinearmatmul_3D → test_qlinearmatmul_2D_uint8_float32}/test_data_set_0/input_6.pb +0 -0
- /onnx/backend/test/data/node/{test_qlinearmatmul_3D → test_qlinearmatmul_2D_uint8_float32}/test_data_set_0/input_7.pb +0 -0
- /onnx/backend/test/data/node/{test_qlinearmatmul_3D → test_qlinearmatmul_3D_uint8_float16}/test_data_set_0/input_0.pb +0 -0
- /onnx/backend/test/data/node/{test_qlinearmatmul_3D → test_qlinearmatmul_3D_uint8_float16}/test_data_set_0/input_3.pb +0 -0
- /onnx/backend/test/data/node/{test_qlinearmatmul_3D → test_qlinearmatmul_3D_uint8_float16}/test_data_set_0/output_0.pb +0 -0
- {onnx-1.15.0.dist-info → onnx-1.16.1.dist-info}/LICENSE +0 -0
- {onnx-1.15.0.dist-info → onnx-1.16.1.dist-info}/entry_points.txt +0 -0
- {onnx-1.15.0.dist-info → onnx-1.16.1.dist-info}/top_level.txt +0 -0
onnx/defs/math/old.cc
CHANGED
|
@@ -2631,10 +2631,10 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
2631
2631
|
|
|
2632
2632
|
static const char* TopK_ver1_doc = R"DOC(
|
|
2633
2633
|
Retrieve the top-K elements along a specified axis. Given an input tensor of
|
|
2634
|
-
shape [
|
|
2635
|
-
-Value tensor of shape [
|
|
2634
|
+
shape [a_0, a_1, ..., a_{n-1}] and integer argument k, return two outputs:
|
|
2635
|
+
-Value tensor of shape [a_0, a_1, ..., a_{axis-1}, k, a_{axis+1}, ... a_{n-1}]
|
|
2636
2636
|
which contains the values of the top k elements along the specified axis
|
|
2637
|
-
-Index tensor of shape [
|
|
2637
|
+
-Index tensor of shape [a_0, a_1, ..., a_{axis-1}, k, a_{axis+1}, ... a_{n-1}] which
|
|
2638
2638
|
contains the indices of the top k elements (original indices from the input
|
|
2639
2639
|
tensor).
|
|
2640
2640
|
Given two equivalent values, this operator uses the indices along the axis as
|
|
@@ -2646,17 +2646,17 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
2646
2646
|
1,
|
|
2647
2647
|
OpSchema()
|
|
2648
2648
|
.SetDoc(TopK_ver1_doc)
|
|
2649
|
-
.Input(0, "X", "Tensor of shape [
|
|
2649
|
+
.Input(0, "X", "Tensor of shape [a_0, a_1, ..., a_{n-1}]", "T")
|
|
2650
2650
|
.Output(
|
|
2651
2651
|
0,
|
|
2652
2652
|
"Values",
|
|
2653
|
-
"Tensor of shape [
|
|
2653
|
+
"Tensor of shape [a_0, a_1, ..., a_{axis-1}, k, a_{axis+1}, ... a_{n-1}] "
|
|
2654
2654
|
"containing top K values from the input tensor",
|
|
2655
2655
|
"T")
|
|
2656
2656
|
.Output(
|
|
2657
2657
|
1,
|
|
2658
2658
|
"Indices",
|
|
2659
|
-
"Tensor of shape [
|
|
2659
|
+
"Tensor of shape [a_0, a_1, ..., a_{axis-1}, k, a_{axis+1}, ... a_{n-1}] "
|
|
2660
2660
|
"containing the corresponding input tensor indices for the top K "
|
|
2661
2661
|
"values.",
|
|
2662
2662
|
"I")
|
|
@@ -2697,10 +2697,10 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
2697
2697
|
|
|
2698
2698
|
static const char* TopK_ver10_doc = R"DOC(
|
|
2699
2699
|
Retrieve the top-K elements along a specified axis. Given an input tensor of
|
|
2700
|
-
shape [
|
|
2701
|
-
-Value tensor of shape [
|
|
2700
|
+
shape [a_0, a_1, ..., a_{n-1}] and integer argument k, return two outputs:
|
|
2701
|
+
-Value tensor of shape [a_0, a_1, ..., a_{axis-1}, k, a_{axis+1}, ... a_{n-1}]
|
|
2702
2702
|
which contains the values of the top k elements along the specified axis
|
|
2703
|
-
-Index tensor of shape [
|
|
2703
|
+
-Index tensor of shape [a_0, a_1, ..., a_{axis-1}, k, a_{axis+1}, ... a_{n-1}] which
|
|
2704
2704
|
contains the indices of the top k elements (original indices from the input
|
|
2705
2705
|
tensor).
|
|
2706
2706
|
|
|
@@ -2713,7 +2713,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
2713
2713
|
10,
|
|
2714
2714
|
OpSchema()
|
|
2715
2715
|
.SetDoc(TopK_ver10_doc)
|
|
2716
|
-
.Input(0, "X", "Tensor of shape [
|
|
2716
|
+
.Input(0, "X", "Tensor of shape [a_0, a_1, ..., a_{n-1}]", "T")
|
|
2717
2717
|
.Input(
|
|
2718
2718
|
1,
|
|
2719
2719
|
"K",
|
|
@@ -2722,13 +2722,13 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
2722
2722
|
.Output(
|
|
2723
2723
|
0,
|
|
2724
2724
|
"Values",
|
|
2725
|
-
"Tensor of shape [
|
|
2725
|
+
"Tensor of shape [a_0, a_1, ..., a_{axis-1}, k, a_{axis+1}, ... a_{n-1}] "
|
|
2726
2726
|
"containing top K values from the input tensor",
|
|
2727
2727
|
"T")
|
|
2728
2728
|
.Output(
|
|
2729
2729
|
1,
|
|
2730
2730
|
"Indices",
|
|
2731
|
-
"Tensor of shape [
|
|
2731
|
+
"Tensor of shape [a_0, a_1, ..., a_{axis-1}, k, a_{axis+1}, ... a_{n-1}] "
|
|
2732
2732
|
"containing the corresponding input tensor indices for the top K "
|
|
2733
2733
|
"values.",
|
|
2734
2734
|
"I")
|
|
@@ -3107,4 +3107,88 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
3107
3107
|
updateOutputShape(ctx, 0, result_shape_proto);
|
|
3108
3108
|
}));
|
|
3109
3109
|
|
|
3110
|
+
ONNX_OPERATOR_SET_SCHEMA(
|
|
3111
|
+
QLinearMatMul,
|
|
3112
|
+
10,
|
|
3113
|
+
OpSchema()
|
|
3114
|
+
.SetDoc(defs::math::utils::QLinearMatMulDoc())
|
|
3115
|
+
.Input(0, "a", "N-dimensional quantized matrix a", "T1", OpSchema::Single, true, 1, OpSchema::NonDifferentiable)
|
|
3116
|
+
.Input(
|
|
3117
|
+
1,
|
|
3118
|
+
"a_scale",
|
|
3119
|
+
"scale of quantized input a",
|
|
3120
|
+
"tensor(float)",
|
|
3121
|
+
OpSchema::Single,
|
|
3122
|
+
true,
|
|
3123
|
+
1,
|
|
3124
|
+
OpSchema::NonDifferentiable)
|
|
3125
|
+
.Input(
|
|
3126
|
+
2,
|
|
3127
|
+
"a_zero_point",
|
|
3128
|
+
"zero point of quantized input a",
|
|
3129
|
+
"T1",
|
|
3130
|
+
OpSchema::Single,
|
|
3131
|
+
true,
|
|
3132
|
+
1,
|
|
3133
|
+
OpSchema::NonDifferentiable)
|
|
3134
|
+
.Input(3, "b", "N-dimensional quantized matrix b", "T2", OpSchema::Single, true, 1, OpSchema::NonDifferentiable)
|
|
3135
|
+
.Input(
|
|
3136
|
+
4,
|
|
3137
|
+
"b_scale",
|
|
3138
|
+
"scale of quantized input b",
|
|
3139
|
+
"tensor(float)",
|
|
3140
|
+
OpSchema::Single,
|
|
3141
|
+
true,
|
|
3142
|
+
1,
|
|
3143
|
+
OpSchema::NonDifferentiable)
|
|
3144
|
+
.Input(
|
|
3145
|
+
5,
|
|
3146
|
+
"b_zero_point",
|
|
3147
|
+
"zero point of quantized input b",
|
|
3148
|
+
"T2",
|
|
3149
|
+
OpSchema::Single,
|
|
3150
|
+
true,
|
|
3151
|
+
1,
|
|
3152
|
+
OpSchema::NonDifferentiable)
|
|
3153
|
+
.Input(
|
|
3154
|
+
6,
|
|
3155
|
+
"y_scale",
|
|
3156
|
+
"scale of quantized output y",
|
|
3157
|
+
"tensor(float)",
|
|
3158
|
+
OpSchema::Single,
|
|
3159
|
+
true,
|
|
3160
|
+
1,
|
|
3161
|
+
OpSchema::NonDifferentiable)
|
|
3162
|
+
.Input(
|
|
3163
|
+
7,
|
|
3164
|
+
"y_zero_point",
|
|
3165
|
+
"zero point of quantized output y",
|
|
3166
|
+
"T3",
|
|
3167
|
+
OpSchema::Single,
|
|
3168
|
+
true,
|
|
3169
|
+
1,
|
|
3170
|
+
OpSchema::NonDifferentiable)
|
|
3171
|
+
.Output(
|
|
3172
|
+
0,
|
|
3173
|
+
"y",
|
|
3174
|
+
"Quantized matrix multiply results from a * b",
|
|
3175
|
+
"T3",
|
|
3176
|
+
OpSchema::Single,
|
|
3177
|
+
true,
|
|
3178
|
+
1,
|
|
3179
|
+
OpSchema::NonDifferentiable)
|
|
3180
|
+
.TypeConstraint(
|
|
3181
|
+
"T1",
|
|
3182
|
+
{"tensor(int8)", "tensor(uint8)"},
|
|
3183
|
+
"Constrain input a and its zero point data type to 8-bit integer tensor.")
|
|
3184
|
+
.TypeConstraint(
|
|
3185
|
+
"T2",
|
|
3186
|
+
{"tensor(int8)", "tensor(uint8)"},
|
|
3187
|
+
"Constrain input b and its zero point data type to 8-bit integer tensor.")
|
|
3188
|
+
.TypeConstraint(
|
|
3189
|
+
"T3",
|
|
3190
|
+
{"tensor(int8)", "tensor(uint8)"},
|
|
3191
|
+
"Constrain output y and its zero point data type to 8-bit integer tensor.")
|
|
3192
|
+
.TypeAndShapeInferenceFunction(defs::math::utils::QLinearMatMulShapeInference));
|
|
3193
|
+
|
|
3110
3194
|
} // namespace ONNX_NAMESPACE
|
onnx/defs/math/utils.cc
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
/*
|
|
2
|
+
* SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
#include "onnx/defs/math/utils.h"
|
|
6
|
+
|
|
7
|
+
#include <string>
|
|
8
|
+
|
|
9
|
+
namespace ONNX_NAMESPACE {
|
|
10
|
+
namespace defs {
|
|
11
|
+
namespace math {
|
|
12
|
+
namespace utils {
|
|
13
|
+
|
|
14
|
+
void MatMulShapeInference(ONNX_NAMESPACE::InferenceContext& ctx, int input1Idx, int input2Idx) {
|
|
15
|
+
if (!hasInputShape(ctx, input1Idx) || !hasInputShape(ctx, input2Idx)) {
|
|
16
|
+
return;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
const auto shape0 = ctx.getInputType(input1Idx)->tensor_type().shape();
|
|
20
|
+
const auto shape1 = ctx.getInputType(input2Idx)->tensor_type().shape();
|
|
21
|
+
|
|
22
|
+
if (shape0.dim_size() == 0 || shape1.dim_size() == 0) {
|
|
23
|
+
fail_shape_inference("Input tensors of wrong rank (0).");
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
ONNX_NAMESPACE::TensorShapeProto shapeL, shapeR;
|
|
27
|
+
|
|
28
|
+
// First promote each shape to at least rank-2. This logic is
|
|
29
|
+
// specific to matmul, not generic broadcasting.
|
|
30
|
+
{
|
|
31
|
+
if (shape0.dim_size() == 1) {
|
|
32
|
+
shapeL.add_dim()->set_dim_value(1);
|
|
33
|
+
*shapeL.add_dim() = shape0.dim(0);
|
|
34
|
+
} else {
|
|
35
|
+
*shapeL.mutable_dim() = shape0.dim();
|
|
36
|
+
}
|
|
37
|
+
if (shape1.dim_size() == 1) {
|
|
38
|
+
*shapeR.add_dim() = shape1.dim(0);
|
|
39
|
+
shapeR.add_dim()->set_dim_value(1);
|
|
40
|
+
} else {
|
|
41
|
+
*shapeR.mutable_dim() = shape1.dim();
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
// Check for compatible matrix multiply dimensions
|
|
46
|
+
{
|
|
47
|
+
auto dimL = shapeL.dim(shapeL.dim_size() - 1);
|
|
48
|
+
auto dimR = shapeR.dim(shapeR.dim_size() - 2);
|
|
49
|
+
if (dimL.has_dim_value() && dimR.has_dim_value() && dimL.dim_value() != dimR.dim_value()) {
|
|
50
|
+
fail_shape_inference("Incompatible dimensions for matrix multiplication");
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
ONNX_NAMESPACE::TensorShapeProto resultShape;
|
|
55
|
+
|
|
56
|
+
// Now call out to generic multidimensional broadcasting for
|
|
57
|
+
// the broadcastable prefixes.
|
|
58
|
+
{
|
|
59
|
+
ONNX_NAMESPACE::TensorShapeProto prefixShapeL, prefixShapeR;
|
|
60
|
+
for (int i = 0; i < shapeL.dim_size() - 2; ++i) {
|
|
61
|
+
*prefixShapeL.add_dim() = shapeL.dim(i);
|
|
62
|
+
}
|
|
63
|
+
for (int i = 0; i < shapeR.dim_size() - 2; ++i) {
|
|
64
|
+
*prefixShapeR.add_dim() = shapeR.dim(i);
|
|
65
|
+
}
|
|
66
|
+
bidirectionalBroadcastShapeInference(prefixShapeL, prefixShapeR, resultShape);
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
// Back to matmul-specific. Add the trailing dimensions back in.
|
|
70
|
+
{
|
|
71
|
+
if (shape0.dim_size() != 1) {
|
|
72
|
+
*resultShape.add_dim() = shapeL.dim(shapeL.dim_size() - 2);
|
|
73
|
+
}
|
|
74
|
+
if (shape1.dim_size() != 1) {
|
|
75
|
+
*resultShape.add_dim() = shapeR.dim(shapeR.dim_size() - 1);
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
*ctx.getOutputType(0)->mutable_tensor_type()->mutable_shape() = resultShape;
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
void QLinearMatMulShapeInference(ONNX_NAMESPACE::InferenceContext& ctx) {
|
|
83
|
+
auto a_type = ctx.getInputType(0);
|
|
84
|
+
auto b_type = ctx.getInputType(3);
|
|
85
|
+
if (nullptr == a_type || nullptr == b_type || a_type->value_case() != ONNX_NAMESPACE::TypeProto::kTensorType ||
|
|
86
|
+
b_type->value_case() != ONNX_NAMESPACE::TypeProto::kTensorType) {
|
|
87
|
+
fail_type_inference("inputs are expected to have tensor type.");
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
auto a_zero_point_type = ctx.getInputType(2);
|
|
91
|
+
if (nullptr == a_zero_point_type ||
|
|
92
|
+
a_zero_point_type->tensor_type().elem_type() != a_type->tensor_type().elem_type()) {
|
|
93
|
+
fail_type_inference("input and zero_point pair is expected to have be same type.");
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
auto b_zero_point_type = ctx.getInputType(5);
|
|
97
|
+
if (nullptr == b_zero_point_type ||
|
|
98
|
+
b_zero_point_type->tensor_type().elem_type() != b_type->tensor_type().elem_type()) {
|
|
99
|
+
fail_type_inference("input and zero_point pair is expected to have same type.");
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
propagateElemTypeFromInputToOutput(ctx, 7, 0);
|
|
103
|
+
|
|
104
|
+
MatMulShapeInference(ctx, 0, 3);
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
const char* QLinearMatMulDoc() {
|
|
108
|
+
static const char* QLinearMatMul_doc = R"DOC(
|
|
109
|
+
Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html.
|
|
110
|
+
It consumes two quantized input tensors, their scales and zero points, scale and zero point of output,
|
|
111
|
+
and computes the quantized output. The quantization formula is y = saturate((x / y_scale) + y_zero_point).
|
|
112
|
+
For (x / y_scale), it is rounding to nearest ties to even. Refer to https://en.wikipedia.org/wiki/Rounding for details.
|
|
113
|
+
Scale and zero point must have same shape. They must be either scalar (per tensor) or N-D tensor
|
|
114
|
+
(per row for 'a' and per column for 'b'). Scalar refers to per tensor quantization whereas N-D refers to per row
|
|
115
|
+
or per column quantization. If the input is 2D of shape [M, K] then zero point and scale tensor may be
|
|
116
|
+
an M element vector [v_1, v_2, ..., v_M] for per row quantization and K element vector of shape [v_1, v_2, ..., v_K]
|
|
117
|
+
for per column quantization. If the input is N-D tensor with shape [D1, D2, M, K] then zero point and scale tensor may
|
|
118
|
+
have shape [D1, D2, M, 1] for per row quantization and shape [D1, D2, 1, K] for per column quantization.
|
|
119
|
+
Production must never overflow, and accumulation may overflow if and only if in 32 bits.
|
|
120
|
+
)DOC";
|
|
121
|
+
return QLinearMatMul_doc;
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
} // namespace utils
|
|
125
|
+
} // namespace math
|
|
126
|
+
} // namespace defs
|
|
127
|
+
} // namespace ONNX_NAMESPACE
|
onnx/defs/math/utils.h
CHANGED
|
@@ -12,6 +12,7 @@ namespace ONNX_NAMESPACE {
|
|
|
12
12
|
namespace defs {
|
|
13
13
|
namespace math {
|
|
14
14
|
namespace utils {
|
|
15
|
+
|
|
15
16
|
template <typename T>
|
|
16
17
|
T GetScalarValueFromTensor(const ONNX_NAMESPACE::TensorProto* t) {
|
|
17
18
|
if (t == nullptr) {
|
|
@@ -32,6 +33,13 @@ T GetScalarValueFromTensor(const ONNX_NAMESPACE::TensorProto* t) {
|
|
|
32
33
|
fail_shape_inference("Unsupported input data type of ", data_type);
|
|
33
34
|
}
|
|
34
35
|
}
|
|
36
|
+
|
|
37
|
+
void MatMulShapeInference(ONNX_NAMESPACE::InferenceContext& ctx, int input1Idx, int input2Idx);
|
|
38
|
+
|
|
39
|
+
void QLinearMatMulShapeInference(ONNX_NAMESPACE::InferenceContext& ctx);
|
|
40
|
+
|
|
41
|
+
const char* QLinearMatMulDoc();
|
|
42
|
+
|
|
35
43
|
} // namespace utils
|
|
36
44
|
} // namespace math
|
|
37
45
|
} // namespace defs
|
onnx/defs/nn/defs.cc
CHANGED
|
@@ -164,25 +164,23 @@ void convPoolShapeInference(
|
|
|
164
164
|
continue;
|
|
165
165
|
}
|
|
166
166
|
// how big is the input, including padding
|
|
167
|
-
int64_t
|
|
168
|
-
effective_input_size
|
|
169
|
-
effective_input_size += pads[i + kernel_shape_size];
|
|
167
|
+
int64_t input_size = input_shape.dim(2 + i).dim_value();
|
|
168
|
+
int64_t effective_input_size = input_size + pads[i] + pads[i + kernel_shape_size];
|
|
170
169
|
|
|
171
170
|
// default is floor mode .i.e. ceil_mode is set to 0
|
|
172
171
|
auto ceil_mode = getAttribute(ctx, "ceil_mode", 0);
|
|
173
172
|
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
173
|
+
int64_t output_size =
|
|
174
|
+
(effective_input_size - effective_kernel_shape[i] + (ceil_mode ? strides[i] - 1 : 0)) / strides[i] + 1;
|
|
175
|
+
if (ceil_mode == 1 && (output_size - 1) * strides[i] >= (input_size + pads[i])) {
|
|
176
|
+
// we need to match pytorch's behavior of "Sliding windows that would start in the right padded region are
|
|
177
|
+
// ignored." (https://pytorch.org/docs/stable/generated/torch.nn.MaxPool1d.html#maxpool1d). this code follows the
|
|
178
|
+
// same logic as PyTorch's C++ implementation:
|
|
179
|
+
// https://github.com/pytorch/pytorch/blob/f1cdb39da3850c47d51ec6a5b1ae864c32b3accf/aten/src/ATen/native/Pool.h#L54C21-L54C21
|
|
180
|
+
--output_size;
|
|
181
|
+
}
|
|
183
182
|
|
|
184
|
-
|
|
185
|
-
newdim->set_dim_value(1 + strided_kernel_positions);
|
|
183
|
+
newdim->set_dim_value(output_size);
|
|
186
184
|
}
|
|
187
185
|
|
|
188
186
|
if (ctx.getNumOutputs() > 1) {
|
|
@@ -223,7 +221,7 @@ std::function<void(OpSchema&)> PoolOpSchemaGenerator(
|
|
|
223
221
|
```
|
|
224
222
|
output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - dilation[i] * (kernel_shape[i] - 1) - 1) / strides_spatial_shape[i] + 1)
|
|
225
223
|
```
|
|
226
|
-
if ceil_mode is enabled. `pad_shape[i]` is the sum of pads along axis `i`.
|
|
224
|
+
if ceil_mode is enabled. `pad_shape[i]` is the sum of pads along axis `i`. Sliding windows that would start in the right padded region are ignored.
|
|
227
225
|
|
|
228
226
|
`auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following when ceil_mode is enabled:
|
|
229
227
|
```
|
|
@@ -456,7 +454,7 @@ void maxUnpoolShapeInference(InferenceContext& ctx) {
|
|
|
456
454
|
}
|
|
457
455
|
}
|
|
458
456
|
|
|
459
|
-
static const char*
|
|
457
|
+
static const char* MaxUnpool_ver11_doc = R"DOC(
|
|
460
458
|
MaxUnpool essentially computes the partial inverse of the MaxPool op.
|
|
461
459
|
The input information to this op is typically the output information from a MaxPool op. The first
|
|
462
460
|
input tensor X is the tensor that needs to be unpooled, which is typically the pooled tensor (first output)
|
|
@@ -481,7 +479,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
481
479
|
MaxUnpool,
|
|
482
480
|
11,
|
|
483
481
|
OpSchema()
|
|
484
|
-
.SetDoc(
|
|
482
|
+
.SetDoc(MaxUnpool_ver11_doc)
|
|
485
483
|
.Attr("kernel_shape", "The size of the kernel along each axis.", AttributeProto::INTS)
|
|
486
484
|
.Attr(
|
|
487
485
|
"strides",
|
|
@@ -1657,8 +1655,8 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
1657
1655
|
0.9f)
|
|
1658
1656
|
.Attr(
|
|
1659
1657
|
"training_mode",
|
|
1660
|
-
"If set to true, it indicates BatchNormalization is being used for training, and outputs 1
|
|
1661
|
-
"2
|
|
1658
|
+
"If set to true, it indicates BatchNormalization is being used for training, and outputs 1 "
|
|
1659
|
+
"and 2 are to be computed.",
|
|
1662
1660
|
AttributeProto::INT,
|
|
1663
1661
|
static_cast<int64_t>(0))
|
|
1664
1662
|
.Input(
|
|
@@ -1997,7 +1995,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
1997
1995
|
)ONNX",
|
|
1998
1996
|
18));
|
|
1999
1997
|
|
|
2000
|
-
static const char*
|
|
1998
|
+
static const char* Flatten_ver11_doc = R"DOC(
|
|
2001
1999
|
Flattens the input tensor into a 2D matrix. If input tensor has shape
|
|
2002
2000
|
(d_0, d_1, ... d_n) then the output will have shape
|
|
2003
2001
|
(d_0 X d_1 ... d_(axis-1), d_axis X d_(axis+1) ... X dn).
|
|
@@ -2005,9 +2003,9 @@ Flattens the input tensor into a 2D matrix. If input tensor has shape
|
|
|
2005
2003
|
|
|
2006
2004
|
ONNX_OPERATOR_SET_SCHEMA(
|
|
2007
2005
|
Flatten,
|
|
2008
|
-
|
|
2006
|
+
21,
|
|
2009
2007
|
OpSchema()
|
|
2010
|
-
.SetDoc(
|
|
2008
|
+
.SetDoc(Flatten_ver11_doc)
|
|
2011
2009
|
.Input(0, "input", "A tensor of rank >= axis.", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
2012
2010
|
.Output(
|
|
2013
2011
|
0,
|
|
@@ -2021,7 +2019,10 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
2021
2019
|
true,
|
|
2022
2020
|
1,
|
|
2023
2021
|
OpSchema::Differentiable)
|
|
2024
|
-
.TypeConstraint(
|
|
2022
|
+
.TypeConstraint(
|
|
2023
|
+
"T",
|
|
2024
|
+
OpSchema::all_tensor_types_ir10(),
|
|
2025
|
+
"Constrain input and output to all tensor types up to IRv10.")
|
|
2025
2026
|
.Attr(
|
|
2026
2027
|
"axis",
|
|
2027
2028
|
"Indicate up to which input dimensions "
|
|
@@ -2516,7 +2517,9 @@ static const char* LayerNormalization_ver17_doc = R"DOC(
|
|
|
2516
2517
|
Let `d[i]` indicate the i-th dimension of `X`.
|
|
2517
2518
|
If `X`'s shape is `[d[0], ..., d[axis-1], d[axis], ..., d[rank-1]]`,
|
|
2518
2519
|
the shape of `Mean` and `InvStdDev` is `[d[0], ..., d[axis-1], 1, ..., 1]`.
|
|
2519
|
-
`Y` and `X` have the same shape.
|
|
2520
|
+
`Y` and `X` have the same shape. This operator supports unidirectional broadcasting
|
|
2521
|
+
(tensors `Scale` and `B` should be unidirectional broadcastable to tensor `X`);
|
|
2522
|
+
for more details please check [the doc](Broadcasting.md).
|
|
2520
2523
|
)DOC";
|
|
2521
2524
|
|
|
2522
2525
|
bool BuildContextDependentFunctionBodyLayerNormalization(
|
|
@@ -2705,7 +2708,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
2705
2708
|
}
|
|
2706
2709
|
}));
|
|
2707
2710
|
|
|
2708
|
-
static const char*
|
|
2711
|
+
static const char* GroupNormalization_ver21_doc = R"DOC(
|
|
2709
2712
|
A GroupNormalization function. Carries out group normalization as described in
|
|
2710
2713
|
the paper https://arxiv.org/abs/1803.08494
|
|
2711
2714
|
|
|
@@ -2718,6 +2721,14 @@ where the mean and variance are computed per instance per group of channels, and
|
|
|
2718
2721
|
groups `num_groups` should be divisible by the number of channels so that there are
|
|
2719
2722
|
an equal number of channels per group.
|
|
2720
2723
|
|
|
2724
|
+
The overall computation has two stages: the first stage normalizes the elements to
|
|
2725
|
+
have zero mean and unit variance for each instance in each group, and the second
|
|
2726
|
+
stage scales and shifts the results of the first stage. The floating-point precision
|
|
2727
|
+
used in the first stage is determined by the `stash_type` attribute. For example,
|
|
2728
|
+
if `stash_type` is 1, the operator casts all input variables to 32-bit float,
|
|
2729
|
+
performs the computation, and finally casts the normalized results back to the
|
|
2730
|
+
original type of `X`. The second stage does not depend on `stash_type`.
|
|
2731
|
+
|
|
2721
2732
|
When the number of groups is the same as the number of channels, this operator is
|
|
2722
2733
|
equivalent to InstanceNormalization. When there is only one group, this operator
|
|
2723
2734
|
is equivalent to LayerNormalization.
|
|
@@ -2725,15 +2736,20 @@ is equivalent to LayerNormalization.
|
|
|
2725
2736
|
|
|
2726
2737
|
ONNX_OPERATOR_SET_SCHEMA(
|
|
2727
2738
|
GroupNormalization,
|
|
2728
|
-
|
|
2739
|
+
21,
|
|
2729
2740
|
OpSchema()
|
|
2730
|
-
.SetDoc(
|
|
2741
|
+
.SetDoc(GroupNormalization_ver21_doc)
|
|
2731
2742
|
.Attr("epsilon", "The epsilon value to use to avoid division by zero.", AttributeProto::FLOAT, 1e-5f)
|
|
2732
2743
|
.Attr(
|
|
2733
2744
|
"num_groups",
|
|
2734
2745
|
"The number of groups of channels. It should be a divisor of the number of channels `C`.",
|
|
2735
2746
|
AttributeProto::INT,
|
|
2736
2747
|
true)
|
|
2748
|
+
.Attr(
|
|
2749
|
+
"stash_type",
|
|
2750
|
+
"The floating-point precision used in stage one of the computation.",
|
|
2751
|
+
AttributeProto::INT,
|
|
2752
|
+
static_cast<int64_t>(ONNX_NAMESPACE::TensorProto_DataType_FLOAT))
|
|
2737
2753
|
.Input(
|
|
2738
2754
|
0,
|
|
2739
2755
|
"X",
|
|
@@ -2746,24 +2762,8 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
2746
2762
|
true,
|
|
2747
2763
|
1,
|
|
2748
2764
|
OpSchema::Differentiable)
|
|
2749
|
-
.Input(
|
|
2750
|
-
|
|
2751
|
-
"scale",
|
|
2752
|
-
"Scale tensor of shape `(num_groups)`.",
|
|
2753
|
-
"T",
|
|
2754
|
-
OpSchema::Single,
|
|
2755
|
-
true,
|
|
2756
|
-
1,
|
|
2757
|
-
OpSchema::Differentiable)
|
|
2758
|
-
.Input(
|
|
2759
|
-
2,
|
|
2760
|
-
"bias",
|
|
2761
|
-
"Bias tensor of shape `(num_groups)`.",
|
|
2762
|
-
"T",
|
|
2763
|
-
OpSchema::Single,
|
|
2764
|
-
true,
|
|
2765
|
-
1,
|
|
2766
|
-
OpSchema::Differentiable)
|
|
2765
|
+
.Input(1, "scale", "Scale tensor of shape `(C)`.", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
2766
|
+
.Input(2, "bias", "Bias tensor of shape `(C)`.", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
2767
2767
|
.Output(
|
|
2768
2768
|
0,
|
|
2769
2769
|
"Y",
|
|
@@ -2773,17 +2773,14 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
2773
2773
|
true,
|
|
2774
2774
|
1,
|
|
2775
2775
|
OpSchema::Differentiable)
|
|
2776
|
-
.TypeConstraint(
|
|
2777
|
-
"T",
|
|
2778
|
-
{"tensor(float16)", "tensor(float)", "tensor(double)", "tensor(bfloat16)"},
|
|
2779
|
-
"Constrain input and output types to float tensors.")
|
|
2776
|
+
.TypeConstraint("T", OpSchema::all_float_types_ir4(), "Constrain input and output types to float tensors.")
|
|
2780
2777
|
.SetContextDependentFunctionBodyBuilder(
|
|
2781
2778
|
[](const FunctionBodyBuildContext& ctx, const OpSchema& schema, FunctionProto& functionProto) {
|
|
2782
2779
|
// GroupNormalization <epsilon, num_groups> (X, scale, bias) => (Y)
|
|
2783
2780
|
auto* tp = ctx.getInputType(0);
|
|
2784
2781
|
if ((tp == nullptr) || (!tp->has_tensor_type()))
|
|
2785
2782
|
return false;
|
|
2786
|
-
int64_t
|
|
2783
|
+
int64_t in_type = tp->tensor_type().elem_type();
|
|
2787
2784
|
|
|
2788
2785
|
auto* epsilon_attr = ctx.getAttribute("epsilon");
|
|
2789
2786
|
float epsilon = (epsilon_attr != nullptr) ? epsilon_attr->f() : 1e-5f;
|
|
@@ -2792,10 +2789,21 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
2792
2789
|
return false;
|
|
2793
2790
|
int64_t num_groups = num_groups_attr->i();
|
|
2794
2791
|
|
|
2792
|
+
auto stash_type_attr = ctx.getAttribute("stash_type");
|
|
2793
|
+
int64_t stash_type = (stash_type_attr != nullptr)
|
|
2794
|
+
? stash_type_attr->i()
|
|
2795
|
+
: static_cast<int64_t>(ONNX_NAMESPACE::TensorProto_DataType_FLOAT);
|
|
2796
|
+
if ((stash_type != ONNX_NAMESPACE::TensorProto_DataType_FLOAT) &&
|
|
2797
|
+
(stash_type != ONNX_NAMESPACE::TensorProto_DataType_BFLOAT16) &&
|
|
2798
|
+
(stash_type != ONNX_NAMESPACE::TensorProto_DataType_FLOAT16) &&
|
|
2799
|
+
(stash_type != ONNX_NAMESPACE::TensorProto_DataType_DOUBLE))
|
|
2800
|
+
return false; // Error
|
|
2801
|
+
|
|
2795
2802
|
FunctionBuilder builder(functionProto);
|
|
2796
2803
|
builder.Const1D("FloatEpsilon", epsilon)
|
|
2797
|
-
.Add("Epsilon = Cast (FloatEpsilon)", "to",
|
|
2798
|
-
.Add("
|
|
2804
|
+
.Add("Epsilon = Cast (FloatEpsilon)", "to", stash_type)
|
|
2805
|
+
.Add("XU = Cast (X)", "to", stash_type)
|
|
2806
|
+
.Add("XShape = Shape (XU)") // shape of input tensor: 1D tensor
|
|
2799
2807
|
.Add("C = Shape <start = 1, end = 2> (X)")
|
|
2800
2808
|
.Const1D("NumGroups", num_groups)
|
|
2801
2809
|
.Add("GroupSize = Div (C, NumGroups)")
|
|
@@ -2804,11 +2812,11 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
2804
2812
|
|
|
2805
2813
|
// NewShape = [N, num_groups, group_size, H, W, (...)]
|
|
2806
2814
|
.Add("NewShape = Concat <axis = 0> (N, NumGroups, GroupSize, InstanceShape)")
|
|
2807
|
-
.Add("XReshaped = Reshape (
|
|
2815
|
+
.Add("XReshaped = Reshape (XU, NewShape)")
|
|
2808
2816
|
|
|
2809
2817
|
// Flatten into 3D tensor: [N, num_groups, group_size x H x W (x ...)]
|
|
2810
2818
|
.Add("Shape3D = Constant <value_ints = [0, 0, -1]> ()")
|
|
2811
|
-
.Add("X3D = Reshape(XReshaped, Shape3D)")
|
|
2819
|
+
.Add("X3D = Reshape (XReshaped, Shape3D)")
|
|
2812
2820
|
|
|
2813
2821
|
// Calculate statistics
|
|
2814
2822
|
.Const1D("Axes2", (int64_t)2)
|
|
@@ -2820,17 +2828,22 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
2820
2828
|
.Add("VarPlusEpsilon = Add (Var, Epsilon)")
|
|
2821
2829
|
.Add("StdDev = Sqrt (VarPlusEpsilon)")
|
|
2822
2830
|
.Add("Deviation = Sub (X3D, Mean)")
|
|
2823
|
-
.Add("
|
|
2831
|
+
.Add("NormalizedU = Div (Deviation, StdDev)")
|
|
2832
|
+
|
|
2833
|
+
// Reshape to [N, C, H x W (x ...)] and cast to original type
|
|
2834
|
+
.Add("NormalizedOriginalShape = Reshape (NormalizedU, XShape)")
|
|
2835
|
+
.Add("NormalizedNC = Reshape (NormalizedOriginalShape, Shape3D)")
|
|
2836
|
+
.Add("NormalizedT = Cast (NormalizedNC)", "to", in_type)
|
|
2824
2837
|
|
|
2825
|
-
// Reshape scale and bias for broadcasting
|
|
2838
|
+
// Reshape scale and bias to [1, C, 1] for broadcasting
|
|
2826
2839
|
.Add("ScaleShape = Constant <value_ints = [1, -1, 1]> ()")
|
|
2827
|
-
.Add("ScaleT = Cast (scale)", "to",
|
|
2828
|
-
.Add("BiasT = Cast (bias)", "to",
|
|
2840
|
+
.Add("ScaleT = Cast (scale)", "to", in_type)
|
|
2841
|
+
.Add("BiasT = Cast (bias)", "to", in_type)
|
|
2829
2842
|
.Add("ScaleReshaped = Reshape (ScaleT, ScaleShape)")
|
|
2830
2843
|
.Add("BiasReshaped = Reshape (BiasT, ScaleShape)")
|
|
2831
2844
|
|
|
2832
2845
|
// Calculate scaled and biased output
|
|
2833
|
-
.Add("Scaled = Mul (ScaleReshaped,
|
|
2846
|
+
.Add("Scaled = Mul (ScaleReshaped, NormalizedT)")
|
|
2834
2847
|
.Add("Biased = Add (Scaled, BiasReshaped)")
|
|
2835
2848
|
.Add("Y = Reshape (Biased, XShape)");
|
|
2836
2849
|
|