onnx 1.16.2__cp38-cp38-win32.whl → 1.17.0__cp38-cp38-win32.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of onnx might be problematic. Click here for more details.
- onnx/__init__.py +3 -1
- onnx/_custom_element_types.py +63 -0
- onnx/backend/base.py +17 -15
- onnx/backend/sample/ops/__init__.py +4 -4
- onnx/backend/sample/ops/abs.py +1 -0
- onnx/backend/test/__init__.py +1 -0
- onnx/backend/test/case/__init__.py +2 -2
- onnx/backend/test/case/base.py +6 -5
- onnx/backend/test/case/model/__init__.py +4 -3
- onnx/backend/test/case/model/expand.py +1 -0
- onnx/backend/test/case/model/gradient.py +1 -0
- onnx/backend/test/case/model/sequence.py +3 -1
- onnx/backend/test/case/model/shrink.py +1 -0
- onnx/backend/test/case/model/sign.py +1 -0
- onnx/backend/test/case/model/single-relu.py +1 -0
- onnx/backend/test/case/model/stringnormalizer.py +1 -1
- onnx/backend/test/case/node/__init__.py +31 -22
- onnx/backend/test/case/node/_image_decoder_data.py +1 -0
- onnx/backend/test/case/node/abs.py +1 -0
- onnx/backend/test/case/node/acos.py +1 -0
- onnx/backend/test/case/node/acosh.py +1 -0
- onnx/backend/test/case/node/adagrad.py +2 -1
- onnx/backend/test/case/node/adam.py +4 -1
- onnx/backend/test/case/node/add.py +1 -0
- onnx/backend/test/case/node/affinegrid.py +1 -0
- onnx/backend/test/case/node/ai_onnx_ml/array_feature_extractor.py +1 -0
- onnx/backend/test/case/node/ai_onnx_ml/binarizer.py +1 -0
- onnx/backend/test/case/node/ai_onnx_ml/label_encoder.py +1 -0
- onnx/backend/test/case/node/ai_onnx_ml/tree_ensemble.py +1 -0
- onnx/backend/test/case/node/and.py +1 -0
- onnx/backend/test/case/node/argmax.py +1 -0
- onnx/backend/test/case/node/argmin.py +1 -0
- onnx/backend/test/case/node/asin.py +1 -0
- onnx/backend/test/case/node/asinh.py +1 -0
- onnx/backend/test/case/node/atan.py +1 -0
- onnx/backend/test/case/node/atanh.py +1 -0
- onnx/backend/test/case/node/averagepool.py +1 -0
- onnx/backend/test/case/node/batchnorm.py +1 -0
- onnx/backend/test/case/node/bernoulli.py +1 -0
- onnx/backend/test/case/node/bitshift.py +1 -0
- onnx/backend/test/case/node/bitwiseand.py +1 -0
- onnx/backend/test/case/node/bitwisenot.py +1 -0
- onnx/backend/test/case/node/bitwiseor.py +1 -0
- onnx/backend/test/case/node/bitwisexor.py +1 -0
- onnx/backend/test/case/node/blackmanwindow.py +13 -3
- onnx/backend/test/case/node/cast.py +2 -1
- onnx/backend/test/case/node/castlike.py +1 -0
- onnx/backend/test/case/node/ceil.py +1 -0
- onnx/backend/test/case/node/celu.py +1 -0
- onnx/backend/test/case/node/center_crop_pad.py +1 -0
- onnx/backend/test/case/node/clip.py +1 -0
- onnx/backend/test/case/node/col2im.py +1 -1
- onnx/backend/test/case/node/compress.py +1 -0
- onnx/backend/test/case/node/concat.py +3 -2
- onnx/backend/test/case/node/constant.py +1 -0
- onnx/backend/test/case/node/constantofshape.py +1 -0
- onnx/backend/test/case/node/conv.py +1 -0
- onnx/backend/test/case/node/convinteger.py +1 -0
- onnx/backend/test/case/node/convtranspose.py +135 -0
- onnx/backend/test/case/node/cos.py +1 -0
- onnx/backend/test/case/node/cosh.py +1 -0
- onnx/backend/test/case/node/cumsum.py +1 -0
- onnx/backend/test/case/node/deformconv.py +17 -26
- onnx/backend/test/case/node/depthtospace.py +1 -0
- onnx/backend/test/case/node/dequantizelinear.py +1 -0
- onnx/backend/test/case/node/det.py +1 -0
- onnx/backend/test/case/node/dft.py +1 -0
- onnx/backend/test/case/node/div.py +1 -0
- onnx/backend/test/case/node/dropout.py +1 -0
- onnx/backend/test/case/node/dynamicquantizelinear.py +1 -0
- onnx/backend/test/case/node/einsum.py +2 -3
- onnx/backend/test/case/node/elu.py +1 -0
- onnx/backend/test/case/node/equal.py +1 -0
- onnx/backend/test/case/node/erf.py +1 -0
- onnx/backend/test/case/node/exp.py +1 -0
- onnx/backend/test/case/node/expand.py +1 -0
- onnx/backend/test/case/node/eyelike.py +1 -0
- onnx/backend/test/case/node/flatten.py +1 -0
- onnx/backend/test/case/node/floor.py +1 -0
- onnx/backend/test/case/node/gather.py +1 -0
- onnx/backend/test/case/node/gatherelements.py +1 -0
- onnx/backend/test/case/node/gathernd.py +1 -0
- onnx/backend/test/case/node/gelu.py +1 -0
- onnx/backend/test/case/node/gemm.py +3 -4
- onnx/backend/test/case/node/globalaveragepool.py +1 -0
- onnx/backend/test/case/node/globalmaxpool.py +1 -0
- onnx/backend/test/case/node/greater.py +1 -0
- onnx/backend/test/case/node/greater_equal.py +1 -0
- onnx/backend/test/case/node/gridsample.py +1 -0
- onnx/backend/test/case/node/groupnormalization.py +1 -0
- onnx/backend/test/case/node/gru.py +3 -2
- onnx/backend/test/case/node/hammingwindow.py +13 -2
- onnx/backend/test/case/node/hannwindow.py +10 -2
- onnx/backend/test/case/node/hardmax.py +1 -0
- onnx/backend/test/case/node/hardsigmoid.py +1 -0
- onnx/backend/test/case/node/hardswish.py +1 -0
- onnx/backend/test/case/node/identity.py +1 -0
- onnx/backend/test/case/node/if.py +1 -0
- onnx/backend/test/case/node/instancenorm.py +1 -0
- onnx/backend/test/case/node/isinf.py +1 -0
- onnx/backend/test/case/node/isnan.py +1 -0
- onnx/backend/test/case/node/layernormalization.py +1 -0
- onnx/backend/test/case/node/leakyrelu.py +1 -0
- onnx/backend/test/case/node/less.py +1 -0
- onnx/backend/test/case/node/less_equal.py +1 -0
- onnx/backend/test/case/node/log.py +1 -0
- onnx/backend/test/case/node/logsoftmax.py +1 -0
- onnx/backend/test/case/node/loop.py +4 -3
- onnx/backend/test/case/node/lppool.py +1 -0
- onnx/backend/test/case/node/lrn.py +1 -0
- onnx/backend/test/case/node/lstm.py +3 -2
- onnx/backend/test/case/node/matmul.py +1 -0
- onnx/backend/test/case/node/matmulinteger.py +1 -0
- onnx/backend/test/case/node/max.py +1 -0
- onnx/backend/test/case/node/maxpool.py +1 -0
- onnx/backend/test/case/node/maxunpool.py +1 -0
- onnx/backend/test/case/node/mean.py +1 -0
- onnx/backend/test/case/node/meanvariancenormalization.py +1 -0
- onnx/backend/test/case/node/melweightmatrix.py +1 -0
- onnx/backend/test/case/node/min.py +1 -0
- onnx/backend/test/case/node/mish.py +1 -0
- onnx/backend/test/case/node/mod.py +1 -0
- onnx/backend/test/case/node/momentum.py +1 -0
- onnx/backend/test/case/node/mul.py +1 -0
- onnx/backend/test/case/node/neg.py +1 -0
- onnx/backend/test/case/node/negativeloglikelihoodloss.py +4 -1
- onnx/backend/test/case/node/nonmaxsuppression.py +1 -0
- onnx/backend/test/case/node/nonzero.py +1 -0
- onnx/backend/test/case/node/not.py +1 -0
- onnx/backend/test/case/node/onehot.py +1 -0
- onnx/backend/test/case/node/optionalgetelement.py +3 -2
- onnx/backend/test/case/node/optionalhaselement.py +2 -3
- onnx/backend/test/case/node/or.py +1 -0
- onnx/backend/test/case/node/pad.py +2 -1
- onnx/backend/test/case/node/pow.py +1 -0
- onnx/backend/test/case/node/prelu.py +1 -0
- onnx/backend/test/case/node/qlinearconv.py +1 -0
- onnx/backend/test/case/node/qlinearmatmul.py +1 -0
- onnx/backend/test/case/node/quantizelinear.py +1 -0
- onnx/backend/test/case/node/rangeop.py +1 -0
- onnx/backend/test/case/node/reciprocal.py +1 -0
- onnx/backend/test/case/node/reduce_log_sum.py +1 -0
- onnx/backend/test/case/node/reduce_log_sum_exp.py +1 -0
- onnx/backend/test/case/node/reducel1.py +1 -0
- onnx/backend/test/case/node/reducel2.py +1 -0
- onnx/backend/test/case/node/reducemax.py +2 -1
- onnx/backend/test/case/node/reducemean.py +1 -0
- onnx/backend/test/case/node/reducemin.py +1 -0
- onnx/backend/test/case/node/reduceprod.py +1 -0
- onnx/backend/test/case/node/reducesum.py +2 -1
- onnx/backend/test/case/node/reducesumsquare.py +1 -0
- onnx/backend/test/case/node/regex_full_match.py +1 -0
- onnx/backend/test/case/node/relu.py +1 -0
- onnx/backend/test/case/node/reshape.py +1 -0
- onnx/backend/test/case/node/resize.py +3 -2
- onnx/backend/test/case/node/reversesequence.py +1 -0
- onnx/backend/test/case/node/rnn.py +3 -2
- onnx/backend/test/case/node/roialign.py +1 -0
- onnx/backend/test/case/node/round.py +4 -3
- onnx/backend/test/case/node/scan.py +1 -0
- onnx/backend/test/case/node/scatter.py +1 -0
- onnx/backend/test/case/node/scatterelements.py +7 -3
- onnx/backend/test/case/node/scatternd.py +1 -0
- onnx/backend/test/case/node/selu.py +1 -0
- onnx/backend/test/case/node/sequence_map.py +1 -0
- onnx/backend/test/case/node/sequenceinsert.py +4 -3
- onnx/backend/test/case/node/shape.py +1 -0
- onnx/backend/test/case/node/shrink.py +1 -0
- onnx/backend/test/case/node/sigmoid.py +1 -0
- onnx/backend/test/case/node/sign.py +1 -0
- onnx/backend/test/case/node/sin.py +1 -0
- onnx/backend/test/case/node/sinh.py +1 -0
- onnx/backend/test/case/node/size.py +1 -0
- onnx/backend/test/case/node/slice.py +1 -0
- onnx/backend/test/case/node/softmax.py +1 -0
- onnx/backend/test/case/node/softmaxcrossentropy.py +4 -1
- onnx/backend/test/case/node/softplus.py +1 -0
- onnx/backend/test/case/node/softsign.py +1 -0
- onnx/backend/test/case/node/spacetodepth.py +1 -0
- onnx/backend/test/case/node/split.py +1 -0
- onnx/backend/test/case/node/splittosequence.py +1 -0
- onnx/backend/test/case/node/sqrt.py +1 -0
- onnx/backend/test/case/node/squeeze.py +1 -0
- onnx/backend/test/case/node/stft.py +4 -1
- onnx/backend/test/case/node/string_concat.py +1 -0
- onnx/backend/test/case/node/string_split.py +1 -0
- onnx/backend/test/case/node/stringnormalizer.py +1 -0
- onnx/backend/test/case/node/sub.py +1 -0
- onnx/backend/test/case/node/sum.py +1 -0
- onnx/backend/test/case/node/tan.py +1 -0
- onnx/backend/test/case/node/tanh.py +1 -0
- onnx/backend/test/case/node/tfidfvectorizer.py +1 -0
- onnx/backend/test/case/node/thresholdedrelu.py +1 -0
- onnx/backend/test/case/node/tile.py +1 -0
- onnx/backend/test/case/node/topk.py +1 -0
- onnx/backend/test/case/node/transpose.py +1 -0
- onnx/backend/test/case/node/trilu.py +1 -0
- onnx/backend/test/case/node/unique.py +7 -0
- onnx/backend/test/case/node/unsqueeze.py +1 -0
- onnx/backend/test/case/node/upsample.py +1 -0
- onnx/backend/test/case/node/where.py +1 -0
- onnx/backend/test/case/node/xor.py +1 -0
- onnx/backend/test/case/test_case.py +6 -5
- onnx/backend/test/case/utils.py +2 -2
- onnx/backend/test/cmd_tools.py +1 -0
- onnx/backend/test/data/node/test_acos/model.onnx +0 -0
- onnx/backend/test/data/node/test_acos/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_acos_example/model.onnx +0 -0
- onnx/backend/test/data/node/test_acosh/model.onnx +0 -0
- onnx/backend/test/data/node/test_acosh/test_data_set_0/output_0.pb +1 -1
- onnx/backend/test/data/node/test_acosh_example/model.onnx +0 -0
- onnx/backend/test/data/node/test_asin/model.onnx +0 -0
- onnx/backend/test/data/node/test_asin/test_data_set_0/output_0.pb +1 -1
- onnx/backend/test/data/node/test_asin_example/model.onnx +0 -0
- onnx/backend/test/data/node/test_asinh/model.onnx +0 -0
- onnx/backend/test/data/node/test_asinh/test_data_set_0/output_0.pb +1 -1
- onnx/backend/test/data/node/test_asinh_example/model.onnx +0 -0
- onnx/backend/test/data/node/test_atan/model.onnx +0 -0
- onnx/backend/test/data/node/test_atan/test_data_set_0/output_0.pb +1 -1
- onnx/backend/test/data/node/test_atan_example/model.onnx +0 -0
- onnx/backend/test/data/node/test_atanh/model.onnx +0 -0
- onnx/backend/test/data/node/test_atanh/test_data_set_0/output_0.pb +2 -2
- onnx/backend/test/data/node/test_atanh_example/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_1d_default/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_ceil/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_default/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_dilations/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_pads/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_pads_count_include_pad/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_precomputed_pads/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_precomputed_pads_count_include_pad/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_precomputed_same_upper/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_precomputed_strides/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_same_lower/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_same_upper/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_strides/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_3d_default/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_0_ceil_mode_is_False/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_0_ceil_mode_is_True/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_1_ceil_mode_is_False/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_1_ceil_mode_is_True/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_3d_dilations_small/model.onnx +0 -0
- onnx/backend/test/data/node/test_basic_conv_with_padding/model.onnx +0 -0
- onnx/backend/test/data/node/test_basic_conv_without_padding/model.onnx +0 -0
- onnx/backend/test/data/node/test_basic_deform_conv_with_padding/model.onnx +0 -0
- onnx/backend/test/data/node/test_basic_deform_conv_without_padding/model.onnx +0 -0
- onnx/backend/test/data/node/test_bernoulli/model.onnx +0 -0
- onnx/backend/test/data/node/test_bernoulli_double/model.onnx +0 -0
- onnx/backend/test/data/node/test_bernoulli_double_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_bernoulli_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_bernoulli_seed/model.onnx +0 -0
- onnx/backend/test/data/node/test_bernoulli_seed_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_blackmanwindow/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_blackmanwindow_expanded/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_blackmanwindow_symmetric/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_blackmanwindow_symmetric_expanded/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_INT4/test_data_set_0/output_0.pb +1 -1
- onnx/backend/test/data/node/test_cast_FLOAT_to_INT4/test_data_set_0/output_0.pb +1 -1
- onnx/backend/test/data/node/test_cast_INT4_to_FLOAT/test_data_set_0/input_0.pb +1 -1
- onnx/backend/test/data/node/test_cast_INT4_to_FLOAT16/test_data_set_0/input_0.pb +1 -1
- onnx/backend/test/data/node/test_cast_INT4_to_INT8/test_data_set_0/input_0.pb +1 -1
- onnx/backend/test/data/node/test_conv_with_autopad_same/model.onnx +0 -0
- onnx/backend/test/data/node/test_conv_with_strides_and_asymmetric_padding/model.onnx +0 -0
- onnx/backend/test/data/node/test_conv_with_strides_no_padding/model.onnx +0 -0
- onnx/backend/test/data/node/test_conv_with_strides_padding/model.onnx +0 -0
- onnx/backend/test/data/node/test_convtranspose/model.onnx +0 -0
- onnx/backend/test/data/node/test_convtranspose_1d/model.onnx +0 -0
- onnx/backend/test/data/node/test_convtranspose_3d/model.onnx +0 -0
- onnx/backend/test/data/node/test_convtranspose_autopad_same/model.onnx +0 -0
- onnx/backend/test/data/node/test_convtranspose_dilations/model.onnx +0 -0
- onnx/backend/test/data/node/test_convtranspose_group_2/model.onnx +0 -0
- onnx/backend/test/data/node/test_convtranspose_group_2/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_convtranspose_group_2/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_convtranspose_group_2/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_convtranspose_group_2_image_3/model.onnx +0 -0
- onnx/backend/test/data/node/test_convtranspose_group_2_image_3/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_convtranspose_group_2_image_3/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_convtranspose_group_2_image_3/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_convtranspose_kernel_shape/model.onnx +0 -0
- onnx/backend/test/data/node/test_convtranspose_output_shape/model.onnx +0 -0
- onnx/backend/test/data/node/test_convtranspose_pad/model.onnx +0 -0
- onnx/backend/test/data/node/test_convtranspose_pads/model.onnx +0 -0
- onnx/backend/test/data/node/test_cos/model.onnx +0 -0
- onnx/backend/test/data/node/test_cos_example/model.onnx +0 -0
- onnx/backend/test/data/node/test_cosh/model.onnx +0 -0
- onnx/backend/test/data/node/test_cosh/test_data_set_0/output_0.pb +1 -1
- onnx/backend/test/data/node/test_cosh_example/model.onnx +0 -0
- onnx/backend/test/data/node/test_cosh_example/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_deform_conv_with_mask_bias/model.onnx +0 -0
- onnx/backend/test/data/node/test_deform_conv_with_multiple_offset_groups/model.onnx +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_int4/test_data_set_0/input_0.pb +1 -1
- onnx/backend/test/data/node/test_det_2d/model.onnx +0 -0
- onnx/backend/test/data/node/test_det_nd/model.onnx +0 -0
- onnx/backend/test/data/node/test_dft/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_dft_axis/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_dft_axis_opset19/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_dft_inverse/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_dft_inverse_opset19/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_dft_opset19/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_dropout_default/model.onnx +0 -0
- onnx/backend/test/data/node/test_dropout_default_mask/model.onnx +0 -0
- onnx/backend/test/data/node/test_dropout_default_mask_ratio/model.onnx +0 -0
- onnx/backend/test/data/node/test_dropout_default_ratio/model.onnx +0 -0
- onnx/backend/test/data/node/test_elu/model.onnx +0 -0
- onnx/backend/test/data/node/test_elu_default/model.onnx +0 -0
- onnx/backend/test/data/node/test_elu_example/model.onnx +0 -0
- onnx/backend/test/data/node/test_eyelike_populate_off_main_diagonal/model.onnx +0 -0
- onnx/backend/test/data/node/test_eyelike_with_dtype/model.onnx +0 -0
- onnx/backend/test/data/node/test_eyelike_without_dtype/model.onnx +0 -0
- onnx/backend/test/data/node/test_gelu_default_1/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_gelu_default_1_expanded/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_gelu_default_2/test_data_set_0/output_0.pb +4 -3
- onnx/backend/test/data/node/test_gelu_default_2_expanded/test_data_set_0/output_0.pb +4 -3
- onnx/backend/test/data/node/test_gelu_tanh_2/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_gelu_tanh_2_expanded/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_globalaveragepool/model.onnx +0 -0
- onnx/backend/test/data/node/test_globalaveragepool_precomputed/model.onnx +0 -0
- onnx/backend/test/data/node/test_globalmaxpool/model.onnx +0 -0
- onnx/backend/test/data/node/test_globalmaxpool_precomputed/model.onnx +0 -0
- onnx/backend/test/data/node/test_gridsample/model.onnx +0 -0
- onnx/backend/test/data/node/test_gridsample_aligncorners_true/model.onnx +0 -0
- onnx/backend/test/data/node/test_gridsample_bicubic/model.onnx +0 -0
- onnx/backend/test/data/node/test_gridsample_bicubic_align_corners_0_additional_1/model.onnx +0 -0
- onnx/backend/test/data/node/test_gridsample_bicubic_align_corners_1_additional_1/model.onnx +0 -0
- onnx/backend/test/data/node/test_gridsample_bilinear/model.onnx +0 -0
- onnx/backend/test/data/node/test_gridsample_bilinear_align_corners_0_additional_1/model.onnx +0 -0
- onnx/backend/test/data/node/test_gridsample_bilinear_align_corners_1_additional_1/model.onnx +0 -0
- onnx/backend/test/data/node/test_gridsample_border_padding/model.onnx +0 -0
- onnx/backend/test/data/node/test_gridsample_nearest/model.onnx +0 -0
- onnx/backend/test/data/node/test_gridsample_nearest_align_corners_0_additional_1/model.onnx +0 -0
- onnx/backend/test/data/node/test_gridsample_nearest_align_corners_1_additional_1/model.onnx +0 -0
- onnx/backend/test/data/node/test_gridsample_reflection_padding/model.onnx +0 -0
- onnx/backend/test/data/node/test_gridsample_volumetric_bilinear_align_corners_0/model.onnx +0 -0
- onnx/backend/test/data/node/test_gridsample_volumetric_bilinear_align_corners_1/model.onnx +0 -0
- onnx/backend/test/data/node/test_gridsample_volumetric_nearest_align_corners_0/model.onnx +0 -0
- onnx/backend/test/data/node/test_gridsample_volumetric_nearest_align_corners_1/model.onnx +0 -0
- onnx/backend/test/data/node/test_gridsample_zeros_padding/model.onnx +0 -0
- onnx/backend/test/data/node/test_gru_batchwise/model.onnx +0 -0
- onnx/backend/test/data/node/test_gru_defaults/model.onnx +0 -0
- onnx/backend/test/data/node/test_gru_seq_length/model.onnx +0 -0
- onnx/backend/test/data/node/test_gru_with_initial_bias/model.onnx +0 -0
- onnx/backend/test/data/node/test_hammingwindow/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_hammingwindow_expanded/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_hammingwindow_symmetric/test_data_set_0/output_0.pb +1 -1
- onnx/backend/test/data/node/test_hammingwindow_symmetric_expanded/test_data_set_0/output_0.pb +1 -1
- onnx/backend/test/data/node/test_hannwindow/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_hannwindow_expanded/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_hannwindow_symmetric/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_hannwindow_symmetric_expanded/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_hardsigmoid/model.onnx +0 -0
- onnx/backend/test/data/node/test_hardsigmoid_default/model.onnx +0 -0
- onnx/backend/test/data/node/test_hardsigmoid_example/model.onnx +0 -0
- onnx/backend/test/data/node/test_hardswish/model.onnx +0 -0
- onnx/backend/test/data/node/test_hardswish_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_image_decoder_decode_jpeg2k_rgb/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_instancenorm_epsilon/model.onnx +0 -0
- onnx/backend/test/data/node/test_instancenorm_example/model.onnx +0 -0
- onnx/backend/test/data/node/test_lppool_1d_default/model.onnx +0 -0
- onnx/backend/test/data/node/test_lppool_1d_default/test_data_set_0/output_0.pb +2 -2
- onnx/backend/test/data/node/test_lppool_2d_default/model.onnx +0 -0
- onnx/backend/test/data/node/test_lppool_2d_default/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_lppool_2d_dilations/model.onnx +0 -0
- onnx/backend/test/data/node/test_lppool_2d_pads/model.onnx +0 -0
- onnx/backend/test/data/node/test_lppool_2d_pads/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_lppool_2d_same_lower/model.onnx +0 -0
- onnx/backend/test/data/node/test_lppool_2d_same_lower/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_lppool_2d_same_upper/model.onnx +0 -0
- onnx/backend/test/data/node/test_lppool_2d_same_upper/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_lppool_2d_strides/model.onnx +0 -0
- onnx/backend/test/data/node/test_lppool_2d_strides/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_lppool_3d_default/model.onnx +0 -0
- onnx/backend/test/data/node/test_lppool_3d_default/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_lstm_batchwise/model.onnx +0 -0
- onnx/backend/test/data/node/test_lstm_defaults/model.onnx +0 -0
- onnx/backend/test/data/node/test_lstm_with_initial_bias/model.onnx +0 -0
- onnx/backend/test/data/node/test_lstm_with_peepholes/model.onnx +0 -0
- onnx/backend/test/data/node/test_maxpool_1d_default/model.onnx +0 -0
- onnx/backend/test/data/node/test_maxpool_2d_ceil/model.onnx +0 -0
- onnx/backend/test/data/node/test_maxpool_2d_ceil_output_size_reduce_by_one/model.onnx +0 -0
- onnx/backend/test/data/node/test_maxpool_2d_default/model.onnx +0 -0
- onnx/backend/test/data/node/test_maxpool_2d_dilations/model.onnx +0 -0
- onnx/backend/test/data/node/test_maxpool_2d_pads/model.onnx +0 -0
- onnx/backend/test/data/node/test_maxpool_2d_precomputed_pads/model.onnx +0 -0
- onnx/backend/test/data/node/test_maxpool_2d_precomputed_same_upper/model.onnx +0 -0
- onnx/backend/test/data/node/test_maxpool_2d_precomputed_strides/model.onnx +0 -0
- onnx/backend/test/data/node/test_maxpool_2d_same_lower/model.onnx +0 -0
- onnx/backend/test/data/node/test_maxpool_2d_same_upper/model.onnx +0 -0
- onnx/backend/test/data/node/test_maxpool_2d_strides/model.onnx +0 -0
- onnx/backend/test/data/node/test_maxpool_2d_uint8/model.onnx +0 -0
- onnx/backend/test/data/node/test_maxpool_3d_default/model.onnx +0 -0
- onnx/backend/test/data/node/test_maxpool_3d_dilations/model.onnx +0 -0
- onnx/backend/test/data/node/test_maxpool_3d_dilations_use_ref_impl/model.onnx +0 -0
- onnx/backend/test/data/node/test_maxpool_3d_dilations_use_ref_impl_large/model.onnx +0 -0
- onnx/backend/test/data/node/test_maxpool_with_argmax_2d_precomputed_pads/model.onnx +0 -0
- onnx/backend/test/data/node/test_maxpool_with_argmax_2d_precomputed_strides/model.onnx +0 -0
- onnx/backend/test/data/node/test_maxunpool_export_with_output_shape/model.onnx +0 -0
- onnx/backend/test/data/node/test_maxunpool_export_without_output_shape/model.onnx +0 -0
- onnx/backend/test/data/node/test_mish/model.onnx +0 -0
- onnx/backend/test/data/node/test_mish/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_mish_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_mish_expanded/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_nllloss_NC/model.onnx +0 -0
- onnx/backend/test/data/node/test_nllloss_NC_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_nllloss_NCd1/model.onnx +0 -0
- onnx/backend/test/data/node/test_nllloss_NCd1_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_nllloss_NCd1_ii/model.onnx +0 -0
- onnx/backend/test/data/node/test_nllloss_NCd1_ii_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_nllloss_NCd1_mean_weight_negative_ii/model.onnx +0 -0
- onnx/backend/test/data/node/test_nllloss_NCd1_mean_weight_negative_ii_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_nllloss_NCd1_weight/model.onnx +0 -0
- onnx/backend/test/data/node/test_nllloss_NCd1_weight_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_nllloss_NCd1_weight_ii/model.onnx +0 -0
- onnx/backend/test/data/node/test_nllloss_NCd1_weight_ii_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_nllloss_NCd1d2/model.onnx +0 -0
- onnx/backend/test/data/node/test_nllloss_NCd1d2_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_nllloss_NCd1d2_no_weight_reduction_mean_ii/model.onnx +0 -0
- onnx/backend/test/data/node/test_nllloss_NCd1d2_no_weight_reduction_mean_ii_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_nllloss_NCd1d2_reduction_mean/model.onnx +0 -0
- onnx/backend/test/data/node/test_nllloss_NCd1d2_reduction_mean_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_nllloss_NCd1d2_reduction_sum/model.onnx +0 -0
- onnx/backend/test/data/node/test_nllloss_NCd1d2_reduction_sum_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_nllloss_NCd1d2_with_weight/model.onnx +0 -0
- onnx/backend/test/data/node/test_nllloss_NCd1d2_with_weight_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_nllloss_NCd1d2_with_weight_reduction_mean/model.onnx +0 -0
- onnx/backend/test/data/node/test_nllloss_NCd1d2_with_weight_reduction_mean_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_nllloss_NCd1d2_with_weight_reduction_sum/model.onnx +0 -0
- onnx/backend/test/data/node/test_nllloss_NCd1d2_with_weight_reduction_sum_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_nllloss_NCd1d2_with_weight_reduction_sum_ii/model.onnx +0 -0
- onnx/backend/test/data/node/test_nllloss_NCd1d2_with_weight_reduction_sum_ii_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_nllloss_NCd1d2d3_none_no_weight_negative_ii/model.onnx +0 -0
- onnx/backend/test/data/node/test_nllloss_NCd1d2d3_none_no_weight_negative_ii_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_nllloss_NCd1d2d3_sum_weight_high_ii/model.onnx +0 -0
- onnx/backend/test/data/node/test_nllloss_NCd1d2d3_sum_weight_high_ii_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_nllloss_NCd1d2d3d4d5_mean_weight/model.onnx +0 -0
- onnx/backend/test/data/node/test_nllloss_NCd1d2d3d4d5_mean_weight_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_nllloss_NCd1d2d3d4d5_none_no_weight/model.onnx +0 -0
- onnx/backend/test/data/node/test_nllloss_NCd1d2d3d4d5_none_no_weight_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_quantizelinear_int4/test_data_set_0/output_0.pb +1 -1
- onnx/backend/test/data/node/test_reduce_log_sum_exp_do_not_keepdims_random/test_data_set_0/output_0.pb +1 -1
- onnx/backend/test/data/node/test_reduce_log_sum_exp_do_not_keepdims_random_expanded/test_data_set_0/output_0.pb +1 -1
- onnx/backend/test/data/node/test_reduce_log_sum_exp_keepdims_random/test_data_set_0/output_0.pb +1 -1
- onnx/backend/test/data/node/test_reduce_log_sum_exp_keepdims_random_expanded/test_data_set_0/output_0.pb +1 -1
- onnx/backend/test/data/node/test_reduce_log_sum_exp_negative_axes_keepdims_random/test_data_set_0/output_0.pb +1 -1
- onnx/backend/test/data/node/test_reduce_log_sum_exp_negative_axes_keepdims_random_expanded/test_data_set_0/output_0.pb +1 -1
- onnx/backend/test/data/node/test_reduce_max_empty_set/model.onnx +0 -0
- onnx/backend/test/data/node/test_reduce_max_empty_set/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_reduce_max_empty_set/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_reduce_max_empty_set/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_reduce_sum_empty_axes_input_noop/model.onnx +0 -0
- onnx/backend/test/data/node/test_reduce_sum_empty_axes_input_noop/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_reduce_sum_empty_axes_input_noop/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_reduce_sum_empty_axes_input_noop/test_data_set_0/output_0.pb +1 -0
- onnx/backend/test/data/node/test_reduce_sum_negative_axes_keepdims_random/model.onnx +0 -0
- onnx/backend/test/data/node/test_reduce_sum_negative_axes_keepdims_random/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_reduce_sum_negative_axes_keepdims_random/test_data_set_0/output_0.pb +1 -1
- onnx/backend/test/data/node/test_resize_tf_crop_and_resize/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_tf_crop_and_resize/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_resize_tf_crop_and_resize/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_resize_tf_crop_and_resize_extrapolation_value/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_tf_crop_and_resize_extrapolation_value/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_resize_tf_crop_and_resize_extrapolation_value/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_resize_tf_crop_and_resize_extrapolation_value/test_data_set_0/input_2.pb +0 -0
- onnx/backend/test/data/node/test_resize_tf_crop_and_resize_extrapolation_value/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_resize_upsample_sizes_nearest_not_larger/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_sizes_nearest_not_larger/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_resize_upsample_sizes_nearest_not_smaller/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_sizes_nearest_not_smaller/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_resize_upsample_sizes_nearest_not_smaller/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_resize_upsample_sizes_nearest_not_smaller/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_rnn_seq_length/model.onnx +0 -0
- onnx/backend/test/data/node/test_roialign_aligned_false/model.onnx +0 -0
- onnx/backend/test/data/node/test_roialign_aligned_true/model.onnx +0 -0
- onnx/backend/test/data/node/test_roialign_mode_max/model.onnx +0 -0
- onnx/backend/test/data/node/test_round/model.onnx +0 -0
- onnx/backend/test/data/node/test_selu/model.onnx +0 -0
- onnx/backend/test/data/node/test_selu_default/model.onnx +0 -0
- onnx/backend/test/data/node/test_selu_example/model.onnx +0 -0
- onnx/backend/test/data/node/test_simple_rnn_batchwise/model.onnx +0 -0
- onnx/backend/test/data/node/test_simple_rnn_defaults/model.onnx +0 -0
- onnx/backend/test/data/node/test_simple_rnn_with_initial_bias/model.onnx +0 -0
- onnx/backend/test/data/node/test_sin/model.onnx +0 -0
- onnx/backend/test/data/node/test_sin_example/model.onnx +0 -0
- onnx/backend/test/data/node/test_sinh/model.onnx +0 -0
- onnx/backend/test/data/node/test_sinh/test_data_set_0/output_0.pb +1 -1
- onnx/backend/test/data/node/test_sinh_example/model.onnx +0 -0
- onnx/backend/test/data/node/test_softplus/model.onnx +0 -0
- onnx/backend/test/data/node/test_softplus_example/model.onnx +0 -0
- onnx/backend/test/data/node/test_softsign/model.onnx +0 -0
- onnx/backend/test/data/node/test_softsign_example/model.onnx +0 -0
- onnx/backend/test/data/node/test_stft_with_window/test_data_set_0/input_2.pb +0 -0
- onnx/backend/test/data/node/test_stft_with_window/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_tan/model.onnx +0 -0
- onnx/backend/test/data/node/test_tan/test_data_set_0/output_0.pb +1 -1
- onnx/backend/test/data/node/test_tan_example/model.onnx +0 -0
- onnx/backend/test/data/node/test_thresholdedrelu/model.onnx +0 -0
- onnx/backend/test/data/node/test_thresholdedrelu_default/model.onnx +0 -0
- onnx/backend/test/data/node/test_thresholdedrelu_example/model.onnx +0 -0
- onnx/backend/test/data/node/test_training_dropout/model.onnx +0 -0
- onnx/backend/test/data/node/test_training_dropout_default/model.onnx +0 -0
- onnx/backend/test/data/node/test_training_dropout_default_mask/model.onnx +0 -0
- onnx/backend/test/data/node/test_training_dropout_mask/model.onnx +0 -0
- onnx/backend/test/data/node/test_training_dropout_zero_ratio/model.onnx +0 -0
- onnx/backend/test/data/node/test_training_dropout_zero_ratio_mask/model.onnx +0 -0
- onnx/backend/test/loader/__init__.py +11 -6
- onnx/backend/test/report/__init__.py +4 -3
- onnx/backend/test/report/base.py +1 -0
- onnx/backend/test/report/coverage.py +21 -20
- onnx/backend/test/runner/__init__.py +12 -8
- onnx/backend/test/runner/item.py +3 -2
- onnx/backend/test/stat_coverage.py +6 -5
- onnx/bin/checker.py +1 -0
- onnx/checker.cc +6 -1
- onnx/common/version.h +1 -1
- onnx/compose.py +66 -50
- onnx/cpp2py_export.cc +4 -0
- onnx/defs/__init__.py +2 -2
- onnx/defs/data_type_utils.cc +0 -1
- onnx/defs/gen_doc.py +9 -8
- onnx/defs/gen_shape_inference_information.py +1 -0
- onnx/defs/generator/defs.cc +32 -84
- onnx/defs/generator/old.cc +389 -0
- onnx/defs/math/defs.cc +308 -313
- onnx/defs/math/old.cc +989 -7
- onnx/defs/math/utils.cc +12 -1
- onnx/defs/math/utils.h +2 -0
- onnx/defs/nn/defs.cc +57 -75
- onnx/defs/nn/old.cc +1536 -2
- onnx/defs/object_detection/defs.cc +4 -7
- onnx/defs/object_detection/old.cc +117 -0
- onnx/defs/operator_sets.h +108 -1
- onnx/defs/parser.cc +10 -1
- onnx/defs/quantization/defs.cc +3 -2
- onnx/defs/quantization/old.cc +4 -1
- onnx/defs/rnn/defs.cc +10 -13
- onnx/defs/rnn/old.cc +517 -2
- onnx/defs/schema.cc +53 -59
- onnx/defs/schema.h +58 -2
- onnx/defs/shape_inference.h +67 -18
- onnx/defs/tensor/defs.cc +22 -20
- onnx/defs/tensor/old.cc +111 -0
- onnx/external_data_helper.py +27 -14
- onnx/gen_proto.py +3 -2
- onnx/helper.py +86 -61
- onnx/hub.py +30 -28
- onnx/inliner/inliner.cc +0 -1
- onnx/mapping.py +3 -2
- onnx/numpy_helper.py +159 -23
- onnx/onnx-ml.proto +1 -1
- onnx/onnx.in.proto +1 -1
- onnx/onnx.proto +1 -1
- onnx/onnx_cpp2py_export/defs.pyi +0 -2
- onnx/onnx_cpp2py_export/inliner.pyi +0 -4
- onnx/onnx_cpp2py_export/parser.pyi +0 -4
- onnx/onnx_cpp2py_export.cp38-win32.pyd +0 -0
- onnx/parser.py +1 -0
- onnx/printer.py +2 -3
- onnx/reference/__init__.py +1 -0
- onnx/reference/custom_element_types.py +73 -8
- onnx/reference/op_run.py +13 -58
- onnx/reference/ops/__init__.py +1 -0
- onnx/reference/ops/_helpers.py +6 -4
- onnx/reference/ops/_op.py +16 -5
- onnx/reference/ops/_op_common_indices.py +1 -1
- onnx/reference/ops/_op_common_pool.py +38 -29
- onnx/reference/ops/_op_common_random.py +1 -1
- onnx/reference/ops/_op_common_window.py +2 -2
- onnx/reference/ops/_op_list.py +9 -6
- onnx/reference/ops/aionnx_preview_training/__init__.py +1 -0
- onnx/reference/ops/aionnx_preview_training/_op_list.py +5 -7
- onnx/reference/ops/aionnx_preview_training/_op_run_training.py +1 -1
- onnx/reference/ops/aionnx_preview_training/op_adagrad.py +14 -5
- onnx/reference/ops/aionnx_preview_training/op_adam.py +2 -2
- onnx/reference/ops/aionnx_preview_training/op_momentum.py +14 -2
- onnx/reference/ops/aionnxml/__init__.py +1 -0
- onnx/reference/ops/aionnxml/_common_classifier.py +1 -0
- onnx/reference/ops/aionnxml/_op_list.py +5 -6
- onnx/reference/ops/aionnxml/_op_run_aionnxml.py +1 -1
- onnx/reference/ops/aionnxml/op_array_feature_extractor.py +1 -1
- onnx/reference/ops/aionnxml/op_binarizer.py +1 -1
- onnx/reference/ops/aionnxml/op_dict_vectorizer.py +2 -2
- onnx/reference/ops/aionnxml/op_feature_vectorizer.py +1 -1
- onnx/reference/ops/aionnxml/op_imputer.py +3 -3
- onnx/reference/ops/aionnxml/op_label_encoder.py +1 -1
- onnx/reference/ops/aionnxml/op_linear_classifier.py +2 -2
- onnx/reference/ops/aionnxml/op_linear_regressor.py +1 -1
- onnx/reference/ops/aionnxml/op_normalizer.py +1 -1
- onnx/reference/ops/aionnxml/op_one_hot_encoder.py +1 -1
- onnx/reference/ops/aionnxml/op_scaler.py +1 -1
- onnx/reference/ops/aionnxml/op_svm_classifier.py +10 -7
- onnx/reference/ops/aionnxml/op_svm_helper.py +2 -2
- onnx/reference/ops/aionnxml/op_svm_regressor.py +1 -1
- onnx/reference/ops/aionnxml/op_tree_ensemble.py +3 -3
- onnx/reference/ops/aionnxml/op_tree_ensemble_classifier.py +1 -1
- onnx/reference/ops/aionnxml/op_tree_ensemble_helper.py +2 -2
- onnx/reference/ops/aionnxml/op_tree_ensemble_regressor.py +5 -3
- onnx/reference/ops/experimental/__init__.py +1 -0
- onnx/reference/ops/experimental/_op_list.py +6 -12
- onnx/reference/ops/experimental/_op_run_experimental.py +1 -1
- onnx/reference/ops/experimental/op_im2col.py +1 -1
- onnx/reference/ops/op_abs.py +1 -1
- onnx/reference/ops/op_acos.py +1 -1
- onnx/reference/ops/op_acosh.py +1 -1
- onnx/reference/ops/op_add.py +1 -1
- onnx/reference/ops/op_affine_grid.py +1 -1
- onnx/reference/ops/op_and.py +1 -1
- onnx/reference/ops/op_argmax.py +1 -1
- onnx/reference/ops/op_argmin.py +1 -1
- onnx/reference/ops/op_asin.py +1 -1
- onnx/reference/ops/op_asinh.py +1 -1
- onnx/reference/ops/op_atan.py +1 -1
- onnx/reference/ops/op_atanh.py +1 -1
- onnx/reference/ops/op_attribute_has_value.py +15 -15
- onnx/reference/ops/op_average_pool.py +1 -1
- onnx/reference/ops/op_batch_normalization.py +13 -2
- onnx/reference/ops/op_bernoulli.py +1 -1
- onnx/reference/ops/op_bitshift.py +1 -1
- onnx/reference/ops/op_bitwise_and.py +1 -1
- onnx/reference/ops/op_bitwise_not.py +1 -1
- onnx/reference/ops/op_bitwise_or.py +1 -1
- onnx/reference/ops/op_bitwise_xor.py +1 -1
- onnx/reference/ops/op_blackman_window.py +1 -1
- onnx/reference/ops/op_cast.py +11 -10
- onnx/reference/ops/op_cast_like.py +1 -1
- onnx/reference/ops/op_ceil.py +1 -1
- onnx/reference/ops/op_celu.py +1 -1
- onnx/reference/ops/op_center_crop_pad.py +1 -1
- onnx/reference/ops/op_clip.py +1 -1
- onnx/reference/ops/op_col2im.py +10 -4
- onnx/reference/ops/op_compress.py +1 -1
- onnx/reference/ops/op_concat.py +1 -1
- onnx/reference/ops/op_concat_from_sequence.py +3 -3
- onnx/reference/ops/op_constant.py +2 -2
- onnx/reference/ops/op_constant_of_shape.py +1 -1
- onnx/reference/ops/op_conv.py +22 -17
- onnx/reference/ops/op_conv_integer.py +1 -1
- onnx/reference/ops/op_conv_transpose.py +37 -6
- onnx/reference/ops/op_cos.py +1 -1
- onnx/reference/ops/op_cosh.py +1 -1
- onnx/reference/ops/op_cum_sum.py +1 -1
- onnx/reference/ops/op_deform_conv.py +1 -1
- onnx/reference/ops/op_depth_to_space.py +1 -1
- onnx/reference/ops/op_dequantize_linear.py +7 -9
- onnx/reference/ops/op_det.py +1 -1
- onnx/reference/ops/op_dft.py +16 -2
- onnx/reference/ops/op_div.py +1 -1
- onnx/reference/ops/op_dropout.py +9 -8
- onnx/reference/ops/op_dynamic_quantize_linear.py +1 -1
- onnx/reference/ops/op_einsum.py +1 -1
- onnx/reference/ops/op_elu.py +1 -1
- onnx/reference/ops/op_equal.py +1 -1
- onnx/reference/ops/op_erf.py +1 -1
- onnx/reference/ops/op_exp.py +1 -1
- onnx/reference/ops/op_expand.py +1 -1
- onnx/reference/ops/op_eyelike.py +2 -2
- onnx/reference/ops/op_flatten.py +1 -1
- onnx/reference/ops/op_floor.py +1 -1
- onnx/reference/ops/op_gather.py +1 -1
- onnx/reference/ops/op_gather_elements.py +3 -3
- onnx/reference/ops/op_gathernd.py +2 -4
- onnx/reference/ops/op_gemm.py +12 -2
- onnx/reference/ops/op_global_average_pool.py +1 -1
- onnx/reference/ops/op_global_max_pool.py +1 -1
- onnx/reference/ops/op_greater.py +1 -1
- onnx/reference/ops/op_greater_or_equal.py +1 -1
- onnx/reference/ops/op_grid_sample.py +2 -3
- onnx/reference/ops/op_gru.py +7 -7
- onnx/reference/ops/op_hamming_window.py +1 -1
- onnx/reference/ops/op_hann_window.py +1 -1
- onnx/reference/ops/op_hard_sigmoid.py +1 -1
- onnx/reference/ops/op_hardmax.py +5 -2
- onnx/reference/ops/op_identity.py +3 -3
- onnx/reference/ops/op_if.py +2 -2
- onnx/reference/ops/op_instance_normalization.py +1 -1
- onnx/reference/ops/op_isinf.py +1 -1
- onnx/reference/ops/op_isnan.py +1 -1
- onnx/reference/ops/op_layer_normalization.py +2 -4
- onnx/reference/ops/op_leaky_relu.py +1 -1
- onnx/reference/ops/op_less.py +1 -1
- onnx/reference/ops/op_less_or_equal.py +1 -1
- onnx/reference/ops/op_log.py +1 -1
- onnx/reference/ops/op_log_softmax.py +1 -1
- onnx/reference/ops/op_loop.py +4 -2
- onnx/reference/ops/op_lp_normalization.py +1 -1
- onnx/reference/ops/op_lp_pool.py +4 -2
- onnx/reference/ops/op_lrn.py +1 -1
- onnx/reference/ops/op_lstm.py +9 -11
- onnx/reference/ops/op_matmul.py +1 -1
- onnx/reference/ops/op_matmul_integer.py +1 -1
- onnx/reference/ops/op_max.py +1 -1
- onnx/reference/ops/op_max_pool.py +8 -8
- onnx/reference/ops/op_max_unpool.py +5 -3
- onnx/reference/ops/op_mean.py +1 -1
- onnx/reference/ops/op_mel_weight_matrix.py +1 -1
- onnx/reference/ops/op_min.py +1 -1
- onnx/reference/ops/op_mod.py +1 -1
- onnx/reference/ops/op_mul.py +1 -1
- onnx/reference/ops/op_neg.py +1 -1
- onnx/reference/ops/op_negative_log_likelihood_loss.py +4 -2
- onnx/reference/ops/op_non_max_suppression.py +10 -11
- onnx/reference/ops/op_non_zero.py +1 -1
- onnx/reference/ops/op_not.py +1 -1
- onnx/reference/ops/op_one_hot.py +1 -1
- onnx/reference/ops/op_optional.py +1 -1
- onnx/reference/ops/op_optional_get_element.py +1 -1
- onnx/reference/ops/op_optional_has_element.py +1 -1
- onnx/reference/ops/op_or.py +1 -1
- onnx/reference/ops/op_pad.py +1 -1
- onnx/reference/ops/op_pool_common.py +7 -6
- onnx/reference/ops/op_pow.py +1 -1
- onnx/reference/ops/op_prelu.py +3 -3
- onnx/reference/ops/op_qlinear_conv.py +1 -1
- onnx/reference/ops/op_qlinear_matmul.py +1 -1
- onnx/reference/ops/op_quantize_linear.py +15 -9
- onnx/reference/ops/op_random_normal.py +1 -1
- onnx/reference/ops/op_random_normal_like.py +1 -1
- onnx/reference/ops/op_random_uniform.py +1 -1
- onnx/reference/ops/op_random_uniform_like.py +1 -1
- onnx/reference/ops/op_range.py +1 -1
- onnx/reference/ops/op_reciprocal.py +1 -1
- onnx/reference/ops/op_reduce_l1.py +1 -1
- onnx/reference/ops/op_reduce_l2.py +1 -1
- onnx/reference/ops/op_reduce_log_sum.py +1 -1
- onnx/reference/ops/op_reduce_log_sum_exp.py +1 -1
- onnx/reference/ops/op_reduce_max.py +1 -1
- onnx/reference/ops/op_reduce_mean.py +2 -2
- onnx/reference/ops/op_reduce_min.py +1 -1
- onnx/reference/ops/op_reduce_prod.py +1 -1
- onnx/reference/ops/op_reduce_sum.py +2 -2
- onnx/reference/ops/op_reduce_sum_square.py +1 -1
- onnx/reference/ops/op_regex_full_match.py +1 -1
- onnx/reference/ops/op_relu.py +1 -1
- onnx/reference/ops/op_reshape.py +1 -1
- onnx/reference/ops/op_reverse_sequence.py +1 -1
- onnx/reference/ops/op_rnn.py +10 -8
- onnx/reference/ops/op_roi_align.py +5 -5
- onnx/reference/ops/op_round.py +1 -1
- onnx/reference/ops/op_scan.py +8 -8
- onnx/reference/ops/op_scatter_elements.py +19 -50
- onnx/reference/ops/op_scatternd.py +1 -1
- onnx/reference/ops/op_selu.py +1 -1
- onnx/reference/ops/op_sequence_at.py +1 -1
- onnx/reference/ops/op_sequence_construct.py +1 -1
- onnx/reference/ops/op_sequence_empty.py +2 -2
- onnx/reference/ops/op_sequence_erase.py +1 -1
- onnx/reference/ops/op_sequence_insert.py +6 -6
- onnx/reference/ops/op_sequence_length.py +1 -1
- onnx/reference/ops/op_sequence_map.py +1 -1
- onnx/reference/ops/op_shape.py +2 -6
- onnx/reference/ops/op_shrink.py +1 -1
- onnx/reference/ops/op_sigmoid.py +1 -1
- onnx/reference/ops/op_sign.py +1 -1
- onnx/reference/ops/op_sin.py +1 -1
- onnx/reference/ops/op_sinh.py +1 -1
- onnx/reference/ops/op_size.py +1 -1
- onnx/reference/ops/op_slice.py +3 -5
- onnx/reference/ops/op_softmax.py +1 -1
- onnx/reference/ops/op_softmax_cross_entropy_loss.py +1 -1
- onnx/reference/ops/op_softplus.py +1 -1
- onnx/reference/ops/op_softsign.py +1 -1
- onnx/reference/ops/op_space_to_depth.py +1 -1
- onnx/reference/ops/op_split.py +1 -1
- onnx/reference/ops/op_split_to_sequence.py +5 -7
- onnx/reference/ops/op_sqrt.py +1 -1
- onnx/reference/ops/op_squeeze.py +1 -1
- onnx/reference/ops/op_stft.py +3 -2
- onnx/reference/ops/op_string_concat.py +1 -1
- onnx/reference/ops/op_string_normalizer.py +8 -8
- onnx/reference/ops/op_string_split.py +2 -4
- onnx/reference/ops/op_sub.py +1 -1
- onnx/reference/ops/op_sum.py +1 -1
- onnx/reference/ops/op_tan.py +1 -1
- onnx/reference/ops/op_tanh.py +1 -1
- onnx/reference/ops/op_tfidf_vectorizer.py +11 -12
- onnx/reference/ops/op_thresholded_relu.py +1 -1
- onnx/reference/ops/op_tile.py +1 -1
- onnx/reference/ops/op_topk.py +7 -2
- onnx/reference/ops/op_transpose.py +1 -1
- onnx/reference/ops/op_trilu.py +1 -1
- onnx/reference/ops/op_unique.py +3 -1
- onnx/reference/ops/op_unsqueeze.py +2 -2
- onnx/reference/ops/op_upsample.py +1 -1
- onnx/reference/ops/op_where.py +1 -1
- onnx/reference/ops/op_xor.py +1 -1
- onnx/reference/ops_optimized/__init__.py +1 -0
- onnx/reference/ops_optimized/op_conv_optimized.py +1 -1
- onnx/reference/reference_evaluator.py +27 -13
- onnx/serialization.py +1 -1
- onnx/shape_inference/implementation.cc +15 -1
- onnx/shape_inference/implementation.h +15 -1
- onnx/shape_inference.py +1 -1
- onnx/subbyte.py +6 -6
- onnx/test/basic_test.py +1 -0
- onnx/test/checker_test.py +37 -2
- onnx/test/compose_test.py +12 -11
- onnx/test/cpp/schema_registration_test.cc +3 -3
- onnx/test/cpp/shape_inference_test.cc +38 -2
- onnx/test/elu_test.py +2 -0
- onnx/test/function_inference_test.py +2 -0
- onnx/test/function_test.py +1 -0
- onnx/test/helper_test.py +77 -16
- onnx/test/hub_test.py +1 -1
- onnx/test/inference_function_test.py +25 -8
- onnx/test/inliner_test.py +2 -0
- onnx/test/model_container_refeval_test.py +2 -1
- onnx/test/model_container_test.py +1 -0
- onnx/test/model_inference_test.py +2 -0
- onnx/test/numpy_helper_test.py +56 -1
- onnx/test/parser_test.py +48 -2
- onnx/test/printer_test.py +2 -0
- onnx/test/reference_evaluator_ml_test.py +2 -3
- onnx/test/reference_evaluator_model_test.py +2 -0
- onnx/test/reference_evaluator_test.py +173 -19
- onnx/test/relu_test.py +2 -0
- onnx/test/schema_test.py +4 -2
- onnx/test/serialization_test.py +2 -0
- onnx/test/shape_inference_test.py +349 -19
- onnx/test/symbolic_shape_test.py +3 -3
- onnx/test/test_backend_onnxruntime.py +272 -1
- onnx/test/test_backend_reference.py +24 -3
- onnx/test/test_backend_test.py +6 -5
- onnx/test/test_external_data.py +91 -2
- onnx/test/test_with_ort.py +1 -0
- onnx/test/tools_test.py +15 -14
- onnx/test/training_tool_test.py +1 -0
- onnx/test/utils_test.py +1 -0
- onnx/test/version_converter/automatic_downgrade_test.py +2 -0
- onnx/test/version_converter/automatic_upgrade_test.py +2 -0
- onnx/test/version_converter_test.py +26 -7
- onnx/test/version_utils.py +8 -0
- onnx/tools/net_drawer.py +6 -5
- onnx/tools/replace_constants.py +11 -11
- onnx/tools/update_model_dims.py +7 -6
- onnx/utils.py +41 -21
- onnx/version.py +2 -2
- onnx/version_converter/adapters/split_17_18.h +1 -1
- onnx/version_converter/convert.h +107 -2
- onnx/version_converter.py +3 -2
- {onnx-1.16.2.dist-info → onnx-1.17.0.dist-info}/METADATA +9 -12
- {onnx-1.16.2.dist-info → onnx-1.17.0.dist-info}/RECORD +843 -817
- {onnx-1.16.2.dist-info → onnx-1.17.0.dist-info}/WHEEL +1 -1
- {onnx-1.16.2.dist-info → onnx-1.17.0.dist-info}/LICENSE +0 -0
- {onnx-1.16.2.dist-info → onnx-1.17.0.dist-info}/entry_points.txt +0 -0
- {onnx-1.16.2.dist-info → onnx-1.17.0.dist-info}/top_level.txt +0 -0
onnx/defs/math/old.cc
CHANGED
|
@@ -4,6 +4,7 @@
|
|
|
4
4
|
|
|
5
5
|
#include <functional>
|
|
6
6
|
|
|
7
|
+
#include "onnx/defs/data_type_utils.h"
|
|
7
8
|
#include "onnx/defs/function.h"
|
|
8
9
|
#include "onnx/defs/math/utils.h"
|
|
9
10
|
#include "onnx/defs/schema.h"
|
|
@@ -11,6 +12,978 @@
|
|
|
11
12
|
|
|
12
13
|
namespace ONNX_NAMESPACE {
|
|
13
14
|
|
|
15
|
+
bool BuildContextDependentFunctionBody_opset13(
|
|
16
|
+
const FunctionBodyBuildContext& ctx,
|
|
17
|
+
const OpSchema& schema,
|
|
18
|
+
FunctionProto& functionProto) {
|
|
19
|
+
if (ctx.getInputType(0) == nullptr) {
|
|
20
|
+
// we cannot create a correct function body without knowing the input type
|
|
21
|
+
return false;
|
|
22
|
+
}
|
|
23
|
+
auto input_type = ctx.getInputType(0)->tensor_type().elem_type();
|
|
24
|
+
bool float_input = input_type == TensorProto_DataType_FLOAT;
|
|
25
|
+
auto reduction_attr_proto = ctx.getAttribute("reduction");
|
|
26
|
+
std::string reduction_attr =
|
|
27
|
+
reduction_attr_proto != nullptr && reduction_attr_proto->has_s() ? reduction_attr_proto->s() : "mean";
|
|
28
|
+
|
|
29
|
+
FunctionBuilder builder(functionProto);
|
|
30
|
+
builder.Const1D("const_zero", int64_t(0))
|
|
31
|
+
.Const1D("const_one", int64_t(1))
|
|
32
|
+
.Const1D("axes", int64_t(1))
|
|
33
|
+
.Add("expanded_target = Unsqueeze (target, axes)");
|
|
34
|
+
|
|
35
|
+
if (ctx.getAttribute("ignore_index") == nullptr) {
|
|
36
|
+
builder.Add(R"(
|
|
37
|
+
input_gather_element = GatherElements <axis = 1> (input, expanded_target)
|
|
38
|
+
loss_NCdd = Neg (input_gather_element)
|
|
39
|
+
loss_N1dd = Slice (loss_NCdd, const_zero, const_one, const_one)
|
|
40
|
+
)");
|
|
41
|
+
|
|
42
|
+
if (!ctx.hasInput(2)) {
|
|
43
|
+
if (reduction_attr == "none") {
|
|
44
|
+
builder.Add("loss = Squeeze (loss_N1dd, axes)");
|
|
45
|
+
} else {
|
|
46
|
+
builder.Add("loss_Ndd = Squeeze (loss_N1dd, axes)");
|
|
47
|
+
if (reduction_attr == "mean") {
|
|
48
|
+
builder.Add("loss = ReduceMean <keepdims = 0> (loss_Ndd)");
|
|
49
|
+
} else {
|
|
50
|
+
builder.Add("loss = ReduceSum <keepdims = 0> (loss_Ndd)");
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
} else {
|
|
54
|
+
builder.Add("weight_gather = Gather (weight, target)");
|
|
55
|
+
builder.Add("loss_unweighted = Squeeze (loss_N1dd, axes)");
|
|
56
|
+
if (reduction_attr == "none") {
|
|
57
|
+
builder.Add("loss = Mul (loss_unweighted, weight_gather)");
|
|
58
|
+
} else {
|
|
59
|
+
builder.Add("loss_Ndd = Mul (loss_unweighted, weight_gather)");
|
|
60
|
+
if (reduction_attr == "mean") {
|
|
61
|
+
builder.Add(R"(
|
|
62
|
+
loss_sum = ReduceSum <keepdims = 0> (loss_Ndd)
|
|
63
|
+
weight_gather_sum = ReduceSum <keepdims = 0> (weight_gather)
|
|
64
|
+
loss = Div (loss_sum, weight_gather_sum)
|
|
65
|
+
)");
|
|
66
|
+
} else {
|
|
67
|
+
builder.Add("loss = ReduceSum <keepdims = 0> (loss_Ndd)");
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
} else {
|
|
72
|
+
builder.Const1D("const_ignore_index", ctx.getAttribute("ignore_index")->i());
|
|
73
|
+
builder.Add(R"(
|
|
74
|
+
const_zero_target_typed = Sub (expanded_target, expanded_target)
|
|
75
|
+
expanded_target_int64 = Cast <to = 7> (expanded_target)
|
|
76
|
+
mask = Equal (expanded_target_int64, const_ignore_index)
|
|
77
|
+
transform_targets = Where (mask, const_zero_target_typed, expanded_target)
|
|
78
|
+
)");
|
|
79
|
+
builder.Add("input_gather_element = GatherElements <axis = 1> (input, transform_targets)");
|
|
80
|
+
builder.Const1D("const_zero_float", 0.0f);
|
|
81
|
+
if (!float_input) {
|
|
82
|
+
builder.Add("const_zero_casted = Cast (const_zero_float)", "to", static_cast<int64_t>(input_type))
|
|
83
|
+
.Add("input_gather_element_transform = Where (mask, const_zero_casted, input_gather_element)");
|
|
84
|
+
} else
|
|
85
|
+
builder.Add("input_gather_element_transform = Where (mask, const_zero_float, input_gather_element)");
|
|
86
|
+
builder.Add("loss_NCdd = Neg (input_gather_element_transform)");
|
|
87
|
+
builder.Add("loss_N1dd = Slice (loss_NCdd, const_zero, const_one, const_one)");
|
|
88
|
+
|
|
89
|
+
if (!ctx.hasInput(2)) {
|
|
90
|
+
builder.Add("squeeze_mask = Squeeze (mask, axes)");
|
|
91
|
+
builder.Const1D("const_one_float", 1.0f);
|
|
92
|
+
if (!float_input) {
|
|
93
|
+
builder.Add("const_one_casted = Cast (const_one_float)", "to", static_cast<int64_t>(input_type))
|
|
94
|
+
.Add("weight_gather = Where (squeeze_mask, const_zero_casted, const_one_casted)");
|
|
95
|
+
} else
|
|
96
|
+
builder.Add("weight_gather = Where (squeeze_mask, const_zero_float, const_one_float)");
|
|
97
|
+
|
|
98
|
+
} else {
|
|
99
|
+
builder.Add("weight_gather_temp = Gather (weight, transform_targets)");
|
|
100
|
+
builder.Add(
|
|
101
|
+
float_input ? "weight_gather_temp_1 = Where (mask, const_zero_float, weight_gather_temp)"
|
|
102
|
+
: "weight_gather_temp_1 = Where (mask, const_zero_casted, weight_gather_temp)");
|
|
103
|
+
builder.Add("weight_gather = Squeeze (weight_gather_temp_1, axes)");
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
builder.Add("loss_unweighted = Squeeze (loss_N1dd, axes)");
|
|
107
|
+
if (reduction_attr == "none") {
|
|
108
|
+
builder.Add("loss = Mul (loss_unweighted, weight_gather)");
|
|
109
|
+
} else {
|
|
110
|
+
builder.Add("loss_Ndd = Mul (loss_unweighted, weight_gather)");
|
|
111
|
+
if (reduction_attr == "mean") {
|
|
112
|
+
builder.Add(R"(
|
|
113
|
+
loss_sum = ReduceSum <keepdims = 0> (loss_Ndd)
|
|
114
|
+
weight_gather_sum = ReduceSum <keepdims = 0> (weight_gather)
|
|
115
|
+
loss = Div (loss_sum, weight_gather_sum)
|
|
116
|
+
)");
|
|
117
|
+
} else {
|
|
118
|
+
builder.Add("loss = ReduceSum <keepdims = 0> (loss_Ndd)");
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
schema.BuildFunction(functionProto);
|
|
124
|
+
return true;
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
static const char* NegativeLogLikelihoodLoss_ver13_doc = R"DOC(
|
|
128
|
+
A NegativeLogLikelihoodLoss operator computes (weighted) negative log likelihood loss.
|
|
129
|
+
Its "input" tensor has the shape of (N, C, d1, d2, ..., dk) where k >= 0.
|
|
130
|
+
The "input" tensor contains log-probabilities for input[n, :, d_1, d_2,..., d_k] being in a class of [0, C).
|
|
131
|
+
The operator's "target" input tensor has the shape of (N, d1, d2, ..., dk). It encodes class labels (one of C classes)
|
|
132
|
+
or it may contain a special value (indicated by an attribute ignore_index) for N x d1 x d2 x ... x dk samples.
|
|
133
|
+
The loss value for input[n, :, d_1, d_2,...d_k] being classified as class c = target[n][d_1][d_2]...[d_k] is computed as:
|
|
134
|
+
|
|
135
|
+
```
|
|
136
|
+
loss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k].
|
|
137
|
+
```
|
|
138
|
+
|
|
139
|
+
When an optional "weight" is provided, the sample loss is calculated as:
|
|
140
|
+
|
|
141
|
+
```
|
|
142
|
+
loss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k] * weight[c].
|
|
143
|
+
```
|
|
144
|
+
|
|
145
|
+
loss is zero for the case when target-value equals ignore_index.
|
|
146
|
+
|
|
147
|
+
```
|
|
148
|
+
loss[n][d_1][d_2]...[d_k] = 0, when target[n][d_1][d_2]...[d_k] = ignore_index
|
|
149
|
+
```
|
|
150
|
+
|
|
151
|
+
If "reduction" attribute is set to "none", the operator's output will be the above loss with shape (N, d1, d2, ..., dk).
|
|
152
|
+
If "reduction" attribute is set to "mean" (the default attribute value), the output loss is (weight) averaged:
|
|
153
|
+
|
|
154
|
+
```
|
|
155
|
+
mean(loss), if "weight" is not provided,
|
|
156
|
+
```
|
|
157
|
+
|
|
158
|
+
or if weight is provided,
|
|
159
|
+
|
|
160
|
+
```
|
|
161
|
+
sum(loss) / sum(weight[target[n][d_1][d_2]...[d_k]]]), for all samples.
|
|
162
|
+
```
|
|
163
|
+
|
|
164
|
+
If "reduction" attribute is set to "sum", the output is a scalar: `sum(loss)`.
|
|
165
|
+
|
|
166
|
+
See also https://pytorch.org/docs/stable/nn.html#torch.nn.NLLLoss.
|
|
167
|
+
|
|
168
|
+
Example 1:
|
|
169
|
+
|
|
170
|
+
```
|
|
171
|
+
// negative log likelihood loss, "none" reduction
|
|
172
|
+
N, C, d1 = 2, 3, 2
|
|
173
|
+
input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],
|
|
174
|
+
[[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]
|
|
175
|
+
target = [[2, 1], [0, 2]]
|
|
176
|
+
|
|
177
|
+
loss = np.zeros((N, d1))
|
|
178
|
+
for n in range(N):
|
|
179
|
+
for d_1 in range(d1):
|
|
180
|
+
c = target[n][d_1]
|
|
181
|
+
loss[n][d_1] = -input[n][c][d_1]
|
|
182
|
+
|
|
183
|
+
// print(loss)
|
|
184
|
+
// [[-3. -2.]
|
|
185
|
+
// [-0. -2.]]
|
|
186
|
+
```
|
|
187
|
+
|
|
188
|
+
Example 2:
|
|
189
|
+
|
|
190
|
+
```
|
|
191
|
+
// weighted negative log likelihood loss, sum reduction
|
|
192
|
+
N, C, d1 = 2, 3, 2
|
|
193
|
+
input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],
|
|
194
|
+
[[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]
|
|
195
|
+
target = [[2, 1], [0, 2]]
|
|
196
|
+
weight = [0.2, 0.3, 0.1]
|
|
197
|
+
loss = np.zeros((N, d1))
|
|
198
|
+
for n in range(N):
|
|
199
|
+
for d_1 in range(d1):
|
|
200
|
+
c = target[n][d_1]
|
|
201
|
+
loss[n][d_1] = -input[n][c][d_1] * weight[c]
|
|
202
|
+
|
|
203
|
+
loss = np.sum(loss)
|
|
204
|
+
// print(loss)
|
|
205
|
+
// -1.1
|
|
206
|
+
```
|
|
207
|
+
|
|
208
|
+
Example 3:
|
|
209
|
+
|
|
210
|
+
```
|
|
211
|
+
// weighted negative log likelihood loss, mean reduction
|
|
212
|
+
N, C, d1 = 2, 3, 2
|
|
213
|
+
input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],
|
|
214
|
+
[[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]
|
|
215
|
+
target = [[2, 1], [0, 2]]
|
|
216
|
+
weight = [0.2, 0.3, 0.1]
|
|
217
|
+
loss = np.zeros((N, d1))
|
|
218
|
+
weight_total = 0
|
|
219
|
+
for n in range(N):
|
|
220
|
+
for d_1 in range(d1):
|
|
221
|
+
c = target[n][d_1]
|
|
222
|
+
loss[n][d_1] = -input[n][c][d_1] * weight[c]
|
|
223
|
+
weight_total = weight_total + weight[c]
|
|
224
|
+
|
|
225
|
+
loss = np.sum(loss) / weight_total
|
|
226
|
+
// print(loss)
|
|
227
|
+
// -1.57
|
|
228
|
+
```
|
|
229
|
+
)DOC";
|
|
230
|
+
|
|
231
|
+
ONNX_OPERATOR_SET_SCHEMA(
|
|
232
|
+
NegativeLogLikelihoodLoss,
|
|
233
|
+
13,
|
|
234
|
+
OpSchema()
|
|
235
|
+
.SetDoc(NegativeLogLikelihoodLoss_ver13_doc)
|
|
236
|
+
.Input(
|
|
237
|
+
0,
|
|
238
|
+
"input",
|
|
239
|
+
"Input tensor of shape (N, C) or (N, C, d1, d2, ..., dk).",
|
|
240
|
+
"T",
|
|
241
|
+
OpSchema::Single,
|
|
242
|
+
true,
|
|
243
|
+
1,
|
|
244
|
+
OpSchema::Differentiable)
|
|
245
|
+
.Input(
|
|
246
|
+
1,
|
|
247
|
+
"target",
|
|
248
|
+
"Target tensor of shape (N) or (N, d1, d2, ..., dk). Target element value shall be in range of [0, C). "
|
|
249
|
+
"If ignore_index is specified, it may have a value outside [0, C) and the target values should either be "
|
|
250
|
+
"in the range [0, C) or have the value ignore_index.",
|
|
251
|
+
"Tind",
|
|
252
|
+
OpSchema::Single,
|
|
253
|
+
true,
|
|
254
|
+
1,
|
|
255
|
+
OpSchema::NonDifferentiable)
|
|
256
|
+
.Input(
|
|
257
|
+
2,
|
|
258
|
+
"weight",
|
|
259
|
+
"Optional rescaling weight tensor. "
|
|
260
|
+
"If given, it has to be a tensor of size C. Otherwise, it is treated as if having all ones.",
|
|
261
|
+
"T",
|
|
262
|
+
OpSchema::Optional,
|
|
263
|
+
true,
|
|
264
|
+
1,
|
|
265
|
+
OpSchema::NonDifferentiable)
|
|
266
|
+
.Output(0, "loss", "The negative log likelihood loss", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
267
|
+
.Attr(
|
|
268
|
+
"reduction",
|
|
269
|
+
"Type of reduction to apply to loss: none, sum, mean (default). "
|
|
270
|
+
"'none': the output is the loss for each sample. "
|
|
271
|
+
"'sum': the output will be summed. "
|
|
272
|
+
"'mean': the sum of the output will be divided by the sum of applied weights.",
|
|
273
|
+
AttributeProto::STRING,
|
|
274
|
+
std::string("mean"))
|
|
275
|
+
.Attr(
|
|
276
|
+
"ignore_index",
|
|
277
|
+
"Specifies a target value that is ignored and does not contribute to the input gradient. It's an optional value.",
|
|
278
|
+
AttributeProto::INT,
|
|
279
|
+
false)
|
|
280
|
+
.TypeConstraint(
|
|
281
|
+
"T",
|
|
282
|
+
{"tensor(float16)", "tensor(float)", "tensor(double)"},
|
|
283
|
+
"Constrain input, weight, and output types to floating-point tensors.")
|
|
284
|
+
.TypeConstraint("Tind", {"tensor(int32)", "tensor(int64)"}, "Constrain target to integer types")
|
|
285
|
+
.SetContextDependentFunctionBodyBuilder(BuildContextDependentFunctionBody_opset13)
|
|
286
|
+
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
287
|
+
// Type inference
|
|
288
|
+
propagateElemTypeFromInputToOutput(ctx, 0, 0);
|
|
289
|
+
|
|
290
|
+
// Shape inference
|
|
291
|
+
if (hasNInputShapes(ctx, 2)) {
|
|
292
|
+
const TensorShapeProto& input_shape = ctx.getInputType(0)->tensor_type().shape();
|
|
293
|
+
const TensorShapeProto& target_shape = ctx.getInputType(1)->tensor_type().shape();
|
|
294
|
+
|
|
295
|
+
const int input_rank = static_cast<int>(input_shape.dim_size());
|
|
296
|
+
const int target_rank = static_cast<int>(target_shape.dim_size());
|
|
297
|
+
|
|
298
|
+
if (input_rank < 2) {
|
|
299
|
+
fail_shape_inference("Input rank must be >= 2.")
|
|
300
|
+
}
|
|
301
|
+
if (target_rank != input_rank - 1) {
|
|
302
|
+
fail_shape_inference("Target rank must be 1 less than the input rank.");
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
// match input dimensions (N, C, d1, ..., dk) with target
|
|
306
|
+
// dimensions of (C, d1, ..., dk)
|
|
307
|
+
for (int dim = 0; dim < target_rank; dim++) {
|
|
308
|
+
const auto input_dim = dim == 0 ? input_shape.dim(dim) : input_shape.dim(dim + 1);
|
|
309
|
+
const auto target_dim = target_shape.dim(dim);
|
|
310
|
+
if (input_dim.has_dim_value() && target_dim.has_dim_value() &&
|
|
311
|
+
input_dim.dim_value() != target_dim.dim_value())
|
|
312
|
+
fail_shape_inference("Input and target dimension value mismatch.");
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
if (ctx.getNumInputs() == 3 && hasInputShape(ctx, 2)) {
|
|
316
|
+
const TensorShapeProto& weight_shape = ctx.getInputType(2)->tensor_type().shape();
|
|
317
|
+
if (weight_shape.dim_size() != 1) {
|
|
318
|
+
fail_shape_inference("Weight rank must be 1.");
|
|
319
|
+
}
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
TensorShapeProto* output_shape = ctx.getOutputType(0)->mutable_tensor_type()->mutable_shape();
|
|
323
|
+
|
|
324
|
+
if (getAttribute(ctx, "reduction", "mean") == "none") {
|
|
325
|
+
// output tensor is of shape (N, d1, d2, ..., dk) if
|
|
326
|
+
// reduction attribute is "none".
|
|
327
|
+
for (int i = 0; i < input_rank - 1; i++) {
|
|
328
|
+
auto* dim = output_shape->add_dim();
|
|
329
|
+
if (i == 0)
|
|
330
|
+
*dim = input_shape.dim(i);
|
|
331
|
+
else
|
|
332
|
+
*dim = input_shape.dim(i + 1);
|
|
333
|
+
}
|
|
334
|
+
}
|
|
335
|
+
// otherwise output is a scalar.
|
|
336
|
+
}
|
|
337
|
+
}));
|
|
338
|
+
|
|
339
|
+
static const char* Det_ver11_doc = R"DOC(
|
|
340
|
+
Det calculates determinant of a square matrix or batches of square matrices.
|
|
341
|
+
Det takes one input tensor of shape `[*, M, M]`, where `*` is zero or more batch dimensions,
|
|
342
|
+
and the inner-most 2 dimensions form square matrices.
|
|
343
|
+
The output is a tensor of shape `[*]`, containing the determinants of all input submatrices.
|
|
344
|
+
e.g., When the input is 2-D, the output is a scalar(shape is empty: `[]`).
|
|
345
|
+
)DOC";
|
|
346
|
+
|
|
347
|
+
ONNX_OPERATOR_SET_SCHEMA(
|
|
348
|
+
Det,
|
|
349
|
+
11,
|
|
350
|
+
OpSchema()
|
|
351
|
+
.SetDoc(Det_ver11_doc)
|
|
352
|
+
.Input(0, "X", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
353
|
+
.Output(0, "Y", "Output tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
354
|
+
.TypeConstraint(
|
|
355
|
+
"T",
|
|
356
|
+
{"tensor(float16)", "tensor(float)", "tensor(double)"},
|
|
357
|
+
"Constrain input and output types to floating-point tensors.")
|
|
358
|
+
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
359
|
+
// Type inference
|
|
360
|
+
propagateElemTypeFromInputToOutput(ctx, 0, 0);
|
|
361
|
+
|
|
362
|
+
// Shape inference
|
|
363
|
+
if (hasInputShape(ctx, 0)) {
|
|
364
|
+
const TensorShapeProto& input_shape = ctx.getInputType(0)->tensor_type().shape();
|
|
365
|
+
TensorShapeProto* output_shape = ctx.getOutputType(0)->mutable_tensor_type()->mutable_shape();
|
|
366
|
+
const int rank = static_cast<int>(input_shape.dim_size());
|
|
367
|
+
|
|
368
|
+
if (rank < 2) {
|
|
369
|
+
fail_shape_inference("Input rank must be >= 2.");
|
|
370
|
+
}
|
|
371
|
+
|
|
372
|
+
const auto mat_w = input_shape.dim(rank - 1);
|
|
373
|
+
const auto mat_h = input_shape.dim(rank - 2);
|
|
374
|
+
if (mat_w.has_dim_value() && mat_h.has_dim_value() && (mat_w.dim_value() != mat_h.dim_value())) {
|
|
375
|
+
fail_shape_inference(
|
|
376
|
+
"The inner-most 2 dimensions must have the same size (mat_w:",
|
|
377
|
+
mat_w.dim_value(),
|
|
378
|
+
" != mat_h:",
|
|
379
|
+
mat_h.dim_value(),
|
|
380
|
+
").");
|
|
381
|
+
}
|
|
382
|
+
|
|
383
|
+
for (int i = 0; i < rank - 2; ++i) {
|
|
384
|
+
auto* dim = output_shape->add_dim();
|
|
385
|
+
*dim = input_shape.dim(i);
|
|
386
|
+
}
|
|
387
|
+
}
|
|
388
|
+
}));
|
|
389
|
+
|
|
390
|
+
static const char* Round_ver11_doc = R"DOC(
|
|
391
|
+
Round takes one input Tensor and rounds the values, element-wise, meaning
|
|
392
|
+
it finds the nearest integer for each value.
|
|
393
|
+
In case of halves, the rule is to round them to the nearest even integer.
|
|
394
|
+
If input x is integral, +0, -0, NaN, or infinite, x itself is returned.
|
|
395
|
+
The output tensor has the same shape and type as the input.
|
|
396
|
+
|
|
397
|
+
Examples:
|
|
398
|
+
```
|
|
399
|
+
round([0.9]) = [1.0]
|
|
400
|
+
round([2.5]) = [2.0]
|
|
401
|
+
round([2.3]) = [2.0]
|
|
402
|
+
round([1.5]) = [2.0]
|
|
403
|
+
round([-4.5]) = [-4.0]
|
|
404
|
+
```
|
|
405
|
+
)DOC";
|
|
406
|
+
|
|
407
|
+
ONNX_OPERATOR_SET_SCHEMA(
|
|
408
|
+
Round,
|
|
409
|
+
11,
|
|
410
|
+
OpSchema()
|
|
411
|
+
.SetDoc(Round_ver11_doc)
|
|
412
|
+
.Input(0, "X", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::NonDifferentiable)
|
|
413
|
+
.Output(0, "Y", "Output tensor", "T", OpSchema::Single, true, 1, OpSchema::NonDifferentiable)
|
|
414
|
+
.TypeConstraint(
|
|
415
|
+
"T",
|
|
416
|
+
{"tensor(float16)", "tensor(float)", "tensor(double)"},
|
|
417
|
+
"Constrain input and output types to float tensors.")
|
|
418
|
+
.TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));
|
|
419
|
+
|
|
420
|
+
static const char* Atanh_ver9_doc = R"DOC(
|
|
421
|
+
Calculates the hyperbolic arctangent of the given input tensor element-wise.
|
|
422
|
+
)DOC";
|
|
423
|
+
|
|
424
|
+
ONNX_OPERATOR_SET_SCHEMA(
|
|
425
|
+
Atanh,
|
|
426
|
+
9,
|
|
427
|
+
OpSchema()
|
|
428
|
+
.SetDoc(Atanh_ver9_doc)
|
|
429
|
+
.Input(0, "input", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
430
|
+
.Output(
|
|
431
|
+
0,
|
|
432
|
+
"output",
|
|
433
|
+
"The hyperbolic arctangent values of the input tensor "
|
|
434
|
+
"computed element-wise",
|
|
435
|
+
"T",
|
|
436
|
+
OpSchema::Single,
|
|
437
|
+
true,
|
|
438
|
+
1,
|
|
439
|
+
OpSchema::Differentiable)
|
|
440
|
+
.TypeConstraint(
|
|
441
|
+
"T",
|
|
442
|
+
{"tensor(float16)", "tensor(float)", "tensor(double)"},
|
|
443
|
+
"Constrain input and output types to float tensors.")
|
|
444
|
+
.TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));
|
|
445
|
+
|
|
446
|
+
static const char* Acosh_ver9_doc = R"DOC(
|
|
447
|
+
Calculates the hyperbolic arccosine of the given input tensor element-wise.
|
|
448
|
+
)DOC";
|
|
449
|
+
|
|
450
|
+
ONNX_OPERATOR_SET_SCHEMA(
|
|
451
|
+
Acosh,
|
|
452
|
+
9,
|
|
453
|
+
OpSchema()
|
|
454
|
+
.SetDoc(Acosh_ver9_doc)
|
|
455
|
+
.Input(0, "input", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
456
|
+
.Output(
|
|
457
|
+
0,
|
|
458
|
+
"output",
|
|
459
|
+
"The hyperbolic arccosine values of the input tensor "
|
|
460
|
+
"computed element-wise",
|
|
461
|
+
"T",
|
|
462
|
+
OpSchema::Single,
|
|
463
|
+
true,
|
|
464
|
+
1,
|
|
465
|
+
OpSchema::Differentiable)
|
|
466
|
+
.TypeConstraint(
|
|
467
|
+
"T",
|
|
468
|
+
{"tensor(float16)", "tensor(float)", "tensor(double)"},
|
|
469
|
+
"Constrain input and output types to float tensors.")
|
|
470
|
+
.TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));
|
|
471
|
+
|
|
472
|
+
static const char* Asinh_ver9_doc = R"DOC(
|
|
473
|
+
Calculates the hyperbolic arcsine of the given input tensor element-wise.
|
|
474
|
+
)DOC";
|
|
475
|
+
|
|
476
|
+
ONNX_OPERATOR_SET_SCHEMA(
|
|
477
|
+
Asinh,
|
|
478
|
+
9,
|
|
479
|
+
OpSchema()
|
|
480
|
+
.SetDoc(Asinh_ver9_doc)
|
|
481
|
+
.Input(0, "input", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
482
|
+
.Output(
|
|
483
|
+
0,
|
|
484
|
+
"output",
|
|
485
|
+
"The hyperbolic arcsine values of the input tensor "
|
|
486
|
+
"computed element-wise",
|
|
487
|
+
"T",
|
|
488
|
+
OpSchema::Single,
|
|
489
|
+
true,
|
|
490
|
+
1,
|
|
491
|
+
OpSchema::Differentiable)
|
|
492
|
+
.TypeConstraint(
|
|
493
|
+
"T",
|
|
494
|
+
{"tensor(float16)", "tensor(float)", "tensor(double)"},
|
|
495
|
+
"Constrain input and output types to float tensors.")
|
|
496
|
+
.TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));
|
|
497
|
+
|
|
498
|
+
static const char* Cosh_ver9_doc = R"DOC(
|
|
499
|
+
Calculates the hyperbolic cosine of the given input tensor element-wise.
|
|
500
|
+
)DOC";
|
|
501
|
+
|
|
502
|
+
ONNX_OPERATOR_SET_SCHEMA(
|
|
503
|
+
Cosh,
|
|
504
|
+
9,
|
|
505
|
+
OpSchema()
|
|
506
|
+
.SetDoc(Cosh_ver9_doc)
|
|
507
|
+
.Input(0, "input", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
508
|
+
.Output(
|
|
509
|
+
0,
|
|
510
|
+
"output",
|
|
511
|
+
"The hyperbolic cosine values of the input tensor "
|
|
512
|
+
"computed element-wise",
|
|
513
|
+
"T",
|
|
514
|
+
OpSchema::Single,
|
|
515
|
+
true,
|
|
516
|
+
1,
|
|
517
|
+
OpSchema::Differentiable)
|
|
518
|
+
.TypeConstraint(
|
|
519
|
+
"T",
|
|
520
|
+
{"tensor(float16)", "tensor(float)", "tensor(double)"},
|
|
521
|
+
"Constrain input and output types to float tensors.")
|
|
522
|
+
.TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));
|
|
523
|
+
|
|
524
|
+
static const char* Sinh_ver9_doc = R"DOC(
|
|
525
|
+
Calculates the hyperbolic sine of the given input tensor element-wise.
|
|
526
|
+
)DOC";
|
|
527
|
+
|
|
528
|
+
ONNX_OPERATOR_SET_SCHEMA(
|
|
529
|
+
Sinh,
|
|
530
|
+
9,
|
|
531
|
+
OpSchema()
|
|
532
|
+
.SetDoc(Sinh_ver9_doc)
|
|
533
|
+
.Input(0, "input", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
534
|
+
.Output(
|
|
535
|
+
0,
|
|
536
|
+
"output",
|
|
537
|
+
"The hyperbolic sine values of the input tensor "
|
|
538
|
+
"computed element-wise",
|
|
539
|
+
"T",
|
|
540
|
+
OpSchema::Single,
|
|
541
|
+
true,
|
|
542
|
+
1,
|
|
543
|
+
OpSchema::Differentiable)
|
|
544
|
+
.TypeConstraint(
|
|
545
|
+
"T",
|
|
546
|
+
{"tensor(float16)", "tensor(float)", "tensor(double)"},
|
|
547
|
+
"Constrain input and output types to float tensors.")
|
|
548
|
+
.TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));
|
|
549
|
+
|
|
550
|
+
static const char* Atan_ver7_doc = R"DOC(
|
|
551
|
+
Calculates the arctangent (inverse of tangent) of the given input tensor, element-wise.
|
|
552
|
+
)DOC";
|
|
553
|
+
|
|
554
|
+
ONNX_OPERATOR_SET_SCHEMA(
|
|
555
|
+
Atan,
|
|
556
|
+
7,
|
|
557
|
+
OpSchema()
|
|
558
|
+
.SetDoc(Atan_ver7_doc)
|
|
559
|
+
.Input(0, "input", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
560
|
+
.Output(
|
|
561
|
+
0,
|
|
562
|
+
"output",
|
|
563
|
+
"The arctangent of the input tensor computed "
|
|
564
|
+
"element-wise",
|
|
565
|
+
"T",
|
|
566
|
+
OpSchema::Single,
|
|
567
|
+
true,
|
|
568
|
+
1,
|
|
569
|
+
OpSchema::Differentiable)
|
|
570
|
+
.TypeConstraint(
|
|
571
|
+
"T",
|
|
572
|
+
{"tensor(float16)", "tensor(float)", "tensor(double)"},
|
|
573
|
+
"Constrain input and output types to float tensors.")
|
|
574
|
+
.TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));
|
|
575
|
+
|
|
576
|
+
static const char* Acos_ver7_doc = R"DOC(
|
|
577
|
+
Calculates the arccosine (inverse of cosine) of the given input tensor, element-wise.
|
|
578
|
+
)DOC";
|
|
579
|
+
|
|
580
|
+
ONNX_OPERATOR_SET_SCHEMA(
|
|
581
|
+
Acos,
|
|
582
|
+
7,
|
|
583
|
+
OpSchema()
|
|
584
|
+
.SetDoc(Acos_ver7_doc)
|
|
585
|
+
.Input(0, "input", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
586
|
+
.Output(
|
|
587
|
+
0,
|
|
588
|
+
"output",
|
|
589
|
+
"The arccosine of the input tensor computed "
|
|
590
|
+
"element-wise",
|
|
591
|
+
"T",
|
|
592
|
+
OpSchema::Single,
|
|
593
|
+
true,
|
|
594
|
+
1,
|
|
595
|
+
OpSchema::Differentiable)
|
|
596
|
+
.TypeConstraint(
|
|
597
|
+
"T",
|
|
598
|
+
{"tensor(float16)", "tensor(float)", "tensor(double)"},
|
|
599
|
+
"Constrain input and output types to float tensors.")
|
|
600
|
+
.TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));
|
|
601
|
+
|
|
602
|
+
static const char* Asin_ver7_doc = R"DOC(
|
|
603
|
+
Calculates the arcsine (inverse of sine) of the given input tensor, element-wise.
|
|
604
|
+
)DOC";
|
|
605
|
+
|
|
606
|
+
ONNX_OPERATOR_SET_SCHEMA(
|
|
607
|
+
Asin,
|
|
608
|
+
7,
|
|
609
|
+
OpSchema()
|
|
610
|
+
.SetDoc(Asin_ver7_doc)
|
|
611
|
+
.Input(0, "input", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
612
|
+
.Output(
|
|
613
|
+
0,
|
|
614
|
+
"output",
|
|
615
|
+
"The arcsine of the input tensor computed "
|
|
616
|
+
"element-wise",
|
|
617
|
+
"T",
|
|
618
|
+
OpSchema::Single,
|
|
619
|
+
true,
|
|
620
|
+
1,
|
|
621
|
+
OpSchema::Differentiable)
|
|
622
|
+
.TypeConstraint(
|
|
623
|
+
"T",
|
|
624
|
+
{"tensor(float16)", "tensor(float)", "tensor(double)"},
|
|
625
|
+
"Constrain input and output types to float tensors.")
|
|
626
|
+
.TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));
|
|
627
|
+
|
|
628
|
+
static const char* Tan_ver7_doc = R"DOC(
|
|
629
|
+
Calculates the tangent of the given input tensor, element-wise.
|
|
630
|
+
)DOC";
|
|
631
|
+
|
|
632
|
+
ONNX_OPERATOR_SET_SCHEMA(
|
|
633
|
+
Tan,
|
|
634
|
+
7,
|
|
635
|
+
OpSchema()
|
|
636
|
+
.SetDoc(Tan_ver7_doc)
|
|
637
|
+
.Input(0, "input", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
638
|
+
.Output(
|
|
639
|
+
0,
|
|
640
|
+
"output",
|
|
641
|
+
"The tangent of the input tensor computed "
|
|
642
|
+
"element-wise",
|
|
643
|
+
"T",
|
|
644
|
+
OpSchema::Single,
|
|
645
|
+
true,
|
|
646
|
+
1,
|
|
647
|
+
OpSchema::Differentiable)
|
|
648
|
+
.TypeConstraint(
|
|
649
|
+
"T",
|
|
650
|
+
{"tensor(float16)", "tensor(float)", "tensor(double)"},
|
|
651
|
+
"Constrain input and output types to float tensors.")
|
|
652
|
+
.TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));
|
|
653
|
+
|
|
654
|
+
static const char* Cos_ver7_doc = R"DOC(
|
|
655
|
+
Calculates the cosine of the given input tensor, element-wise.
|
|
656
|
+
)DOC";
|
|
657
|
+
|
|
658
|
+
ONNX_OPERATOR_SET_SCHEMA(
|
|
659
|
+
Cos,
|
|
660
|
+
7,
|
|
661
|
+
OpSchema()
|
|
662
|
+
.SetDoc(Cos_ver7_doc)
|
|
663
|
+
.Input(0, "input", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
664
|
+
.Output(
|
|
665
|
+
0,
|
|
666
|
+
"output",
|
|
667
|
+
"The cosine of the input tensor computed "
|
|
668
|
+
"element-wise",
|
|
669
|
+
"T",
|
|
670
|
+
OpSchema::Single,
|
|
671
|
+
true,
|
|
672
|
+
1,
|
|
673
|
+
OpSchema::Differentiable)
|
|
674
|
+
.TypeConstraint(
|
|
675
|
+
"T",
|
|
676
|
+
{"tensor(float16)", "tensor(float)", "tensor(double)"},
|
|
677
|
+
"Constrain input and output types to float tensors.")
|
|
678
|
+
.TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));
|
|
679
|
+
|
|
680
|
+
static const char* Sin_ver7_doc = R"DOC(
|
|
681
|
+
Calculates the sine of the given input tensor, element-wise.
|
|
682
|
+
)DOC";
|
|
683
|
+
|
|
684
|
+
ONNX_OPERATOR_SET_SCHEMA(
|
|
685
|
+
Sin,
|
|
686
|
+
7,
|
|
687
|
+
OpSchema()
|
|
688
|
+
.SetDoc(Sin_ver7_doc)
|
|
689
|
+
.Input(0, "input", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
690
|
+
.Output(
|
|
691
|
+
0,
|
|
692
|
+
"output",
|
|
693
|
+
"The sine of the input tensor computed "
|
|
694
|
+
"element-wise",
|
|
695
|
+
"T",
|
|
696
|
+
OpSchema::Single,
|
|
697
|
+
true,
|
|
698
|
+
1,
|
|
699
|
+
OpSchema::Differentiable)
|
|
700
|
+
.TypeConstraint(
|
|
701
|
+
"T",
|
|
702
|
+
{"tensor(float16)", "tensor(float)", "tensor(double)"},
|
|
703
|
+
"Constrain input and output types to float tensors.")
|
|
704
|
+
.TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));
|
|
705
|
+
|
|
706
|
+
static const char* Softplus_ver1_doc = R"DOC(
|
|
707
|
+
Softplus takes one input data (Tensor<T>) and produces one output data
|
|
708
|
+
(Tensor<T>) where the softplus function, y = ln(exp(x) + 1), is applied to
|
|
709
|
+
the tensor elementwise.
|
|
710
|
+
)DOC";
|
|
711
|
+
|
|
712
|
+
ONNX_OPERATOR_SET_SCHEMA(
|
|
713
|
+
Softplus,
|
|
714
|
+
1,
|
|
715
|
+
OpSchema()
|
|
716
|
+
.SetDoc(Softplus_ver1_doc)
|
|
717
|
+
.Input(0, "X", "1D input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
718
|
+
.Output(0, "Y", "1D input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
719
|
+
.TypeConstraint(
|
|
720
|
+
"T",
|
|
721
|
+
{"tensor(float16)", "tensor(float)", "tensor(double)"},
|
|
722
|
+
"Constrain input and output types to float tensors.")
|
|
723
|
+
.TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput)
|
|
724
|
+
.FunctionBody(
|
|
725
|
+
R"ONNX(
|
|
726
|
+
{
|
|
727
|
+
exp_x = Exp (X)
|
|
728
|
+
one = Constant <value = float {1.0}>()
|
|
729
|
+
one_cast = CastLike (one, X)
|
|
730
|
+
exp_x_add_one = Add (exp_x, one_cast)
|
|
731
|
+
Y = Log (exp_x_add_one)
|
|
732
|
+
}
|
|
733
|
+
)ONNX",
|
|
734
|
+
18));
|
|
735
|
+
|
|
736
|
+
static const char* Softsign_ver1_doc = R"DOC(
|
|
737
|
+
Calculates the softsign (x/(1+|x|)) of the given input tensor element-wise.
|
|
738
|
+
)DOC";
|
|
739
|
+
|
|
740
|
+
ONNX_OPERATOR_SET_SCHEMA(
|
|
741
|
+
Softsign,
|
|
742
|
+
1,
|
|
743
|
+
OpSchema()
|
|
744
|
+
.SetDoc(Softsign_ver1_doc)
|
|
745
|
+
.Input(0, "input", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
746
|
+
.Output(
|
|
747
|
+
0,
|
|
748
|
+
"output",
|
|
749
|
+
"The softsign (x/(1+|x|)) values of the input tensor computed element-wise",
|
|
750
|
+
"T",
|
|
751
|
+
OpSchema::Single,
|
|
752
|
+
true,
|
|
753
|
+
1,
|
|
754
|
+
OpSchema::Differentiable)
|
|
755
|
+
.TypeConstraint(
|
|
756
|
+
"T",
|
|
757
|
+
{"tensor(float16)", "tensor(float)", "tensor(double)"},
|
|
758
|
+
"Constrain input and output types to float tensors.")
|
|
759
|
+
.TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput)
|
|
760
|
+
.FunctionBody(
|
|
761
|
+
R"ONNX(
|
|
762
|
+
{
|
|
763
|
+
One = Constant <value = float {1.0}>()
|
|
764
|
+
OneCast = CastLike (One, input)
|
|
765
|
+
AbsInput = Abs(input)
|
|
766
|
+
OneAddAbsInput = Add (OneCast, AbsInput)
|
|
767
|
+
output = Div(input, OneAddAbsInput)
|
|
768
|
+
}
|
|
769
|
+
)ONNX",
|
|
770
|
+
18));
|
|
771
|
+
|
|
772
|
+
static const char* HardSwish_ver14_doc = R"DOC(
|
|
773
|
+
HardSwish takes one input data (Tensor<T>) and produces one output data (Tensor<T>) where
|
|
774
|
+
the HardSwish function, y = x * max(0, min(1, alpha * x + beta)) = x * HardSigmoid<alpha, beta>(x),
|
|
775
|
+
where alpha = 1/6 and beta = 0.5, is applied to the tensor elementwise.
|
|
776
|
+
)DOC";
|
|
777
|
+
|
|
778
|
+
ONNX_OPERATOR_SET_SCHEMA(
|
|
779
|
+
HardSwish,
|
|
780
|
+
14,
|
|
781
|
+
OpSchema()
|
|
782
|
+
.SetDoc(HardSwish_ver14_doc)
|
|
783
|
+
.Input(0, "X", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
784
|
+
.Output(0, "Y", "Output tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
785
|
+
.TypeConstraint(
|
|
786
|
+
"T",
|
|
787
|
+
{"tensor(float16)", "tensor(float)", "tensor(double)"},
|
|
788
|
+
"Constrain input and output types to float tensors.")
|
|
789
|
+
.TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput)
|
|
790
|
+
.FunctionBody(R"ONNX(
|
|
791
|
+
{
|
|
792
|
+
HS_X = HardSigmoid<alpha = 0.16666667163372, beta = 0.5>(X)
|
|
793
|
+
Y = Mul (X, HS_X)
|
|
794
|
+
}
|
|
795
|
+
)ONNX"));
|
|
796
|
+
|
|
797
|
+
static const char* HardSigmoid_ver6_doc = R"DOC(
|
|
798
|
+
HardSigmoid takes one input data (Tensor<T>) and produces one output data
|
|
799
|
+
(Tensor<T>) where the HardSigmoid function, y = max(0, min(1, alpha * x + beta)),
|
|
800
|
+
is applied to the tensor elementwise.
|
|
801
|
+
)DOC";
|
|
802
|
+
|
|
803
|
+
ONNX_OPERATOR_SET_SCHEMA(
|
|
804
|
+
HardSigmoid,
|
|
805
|
+
6,
|
|
806
|
+
OpSchema()
|
|
807
|
+
.Attr("alpha", "Value of alpha.", AttributeProto::FLOAT, 0.2f)
|
|
808
|
+
.Attr("beta", "Value of beta.", AttributeProto::FLOAT, 0.5f)
|
|
809
|
+
.SetDoc(HardSigmoid_ver6_doc)
|
|
810
|
+
.Input(0, "X", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
811
|
+
.Output(0, "Y", "Output tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
812
|
+
.TypeConstraint(
|
|
813
|
+
"T",
|
|
814
|
+
{"tensor(float16)", "tensor(float)", "tensor(double)"},
|
|
815
|
+
"Constrain input and output types to float tensors.")
|
|
816
|
+
.TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput)
|
|
817
|
+
.FunctionBody(
|
|
818
|
+
R"ONNX(
|
|
819
|
+
{
|
|
820
|
+
Alpha = Constant <value_float: float = @alpha>()
|
|
821
|
+
AlphaCast = CastLike (Alpha, X)
|
|
822
|
+
Beta = Constant <value_float: float = @beta>()
|
|
823
|
+
BetaCast = CastLike (Beta, X)
|
|
824
|
+
Zero = Constant <value = float {0.0}>()
|
|
825
|
+
ZeroCast = CastLike (Zero, X)
|
|
826
|
+
One = Constant <value = float {1.0}>()
|
|
827
|
+
OneCast = CastLike (One, X)
|
|
828
|
+
AlphaMulX = Mul (X, AlphaCast)
|
|
829
|
+
AlphaMulXAddBeta = Add (AlphaMulX, BetaCast)
|
|
830
|
+
MinOneOrAlphaMulXAddBeta = Min (AlphaMulXAddBeta, OneCast)
|
|
831
|
+
Y = Max(MinOneOrAlphaMulXAddBeta, ZeroCast)
|
|
832
|
+
}
|
|
833
|
+
)ONNX",
|
|
834
|
+
18));
|
|
835
|
+
|
|
836
|
+
static const char* mish_ver18_doc = R"DOC(
|
|
837
|
+
Mish: A Self Regularized Non-Monotonic Neural Activation Function.
|
|
838
|
+
|
|
839
|
+
Perform the linear unit element-wise on the input tensor X using formula:
|
|
840
|
+
|
|
841
|
+
```
|
|
842
|
+
mish(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + e^{x}))
|
|
843
|
+
```
|
|
844
|
+
)DOC";
|
|
845
|
+
|
|
846
|
+
ONNX_OPERATOR_SET_SCHEMA(
|
|
847
|
+
Mish,
|
|
848
|
+
18,
|
|
849
|
+
OpSchema()
|
|
850
|
+
.SetDoc(mish_ver18_doc)
|
|
851
|
+
.Input(0, "X", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
852
|
+
.Output(0, "Y", "Output tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
853
|
+
.TypeConstraint(
|
|
854
|
+
"T",
|
|
855
|
+
{"tensor(float16)", "tensor(float)", "tensor(double)"},
|
|
856
|
+
"Constrain input X and output types to float tensors.")
|
|
857
|
+
.FunctionBody(R"ONNX(
|
|
858
|
+
{
|
|
859
|
+
Softplus_X = Softplus (X)
|
|
860
|
+
TanHSoftplusX = Tanh (Softplus_X)
|
|
861
|
+
Y = Mul (X, TanHSoftplusX)
|
|
862
|
+
}
|
|
863
|
+
)ONNX")
|
|
864
|
+
.TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));
|
|
865
|
+
|
|
866
|
+
static const char* Elu_ver6_doc = R"DOC(
|
|
867
|
+
Elu takes one input data (Tensor<T>) and produces one output data
|
|
868
|
+
(Tensor<T>) where the function `f(x) = alpha * (exp(x) - 1.) for x <
|
|
869
|
+
0`, `f(x) = x for x >= 0`., is applied to the tensor elementwise.
|
|
870
|
+
|
|
871
|
+
)DOC";
|
|
872
|
+
|
|
873
|
+
ONNX_OPERATOR_SET_SCHEMA(
|
|
874
|
+
Elu,
|
|
875
|
+
6,
|
|
876
|
+
OpSchema()
|
|
877
|
+
.Attr("alpha", "Coefficient of ELU.", AttributeProto::FLOAT, 1.0f)
|
|
878
|
+
.SetDoc(Elu_ver6_doc)
|
|
879
|
+
.Input(0, "X", "1D input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
880
|
+
.Output(0, "Y", "1D output tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
881
|
+
.TypeConstraint(
|
|
882
|
+
"T",
|
|
883
|
+
{"tensor(float16)", "tensor(float)", "tensor(double)"},
|
|
884
|
+
"Constrain input and output types to float tensors.")
|
|
885
|
+
.TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput)
|
|
886
|
+
.FunctionBody(
|
|
887
|
+
R"ONNX(
|
|
888
|
+
{
|
|
889
|
+
Alpha = Constant <value_float: float = @alpha>()
|
|
890
|
+
AlphaCast = CastLike (Alpha, X)
|
|
891
|
+
Zero = Constant <value = float {0.0}>()
|
|
892
|
+
ZeroCast = CastLike (Zero, X)
|
|
893
|
+
One = Constant <value = float {1.0}>()
|
|
894
|
+
OneCast = CastLike (One, X)
|
|
895
|
+
XLessThanZero = Less (X, ZeroCast)
|
|
896
|
+
ExpX = Exp (X)
|
|
897
|
+
ExpXSubOne = Sub (ExpX, OneCast)
|
|
898
|
+
AlphaMulExpXSubOne = Mul (AlphaCast, ExpXSubOne)
|
|
899
|
+
Y = Where(XLessThanZero, AlphaMulExpXSubOne, X)
|
|
900
|
+
}
|
|
901
|
+
)ONNX",
|
|
902
|
+
18));
|
|
903
|
+
|
|
904
|
+
static const char* Selu_ver6_doc = R"DOC(
|
|
905
|
+
Selu takes one input data (Tensor<T>) and produces one output data
|
|
906
|
+
(Tensor<T>) where the scaled exponential linear unit function,
|
|
907
|
+
`y = gamma * (alpha * e^x - alpha) for x <= 0`, `y = gamma * x for x > 0`,
|
|
908
|
+
is applied to the tensor elementwise.
|
|
909
|
+
)DOC";
|
|
910
|
+
|
|
911
|
+
ONNX_OPERATOR_SET_SCHEMA(
|
|
912
|
+
Selu,
|
|
913
|
+
6,
|
|
914
|
+
OpSchema()
|
|
915
|
+
.Attr(
|
|
916
|
+
"alpha",
|
|
917
|
+
"Coefficient of SELU default to 1.67326319217681884765625 "
|
|
918
|
+
"(i.e., float32 approximation of 1.6732632423543772848170429916717).",
|
|
919
|
+
AttributeProto::FLOAT,
|
|
920
|
+
1.67326319217681884765625f)
|
|
921
|
+
.Attr(
|
|
922
|
+
"gamma",
|
|
923
|
+
"Coefficient of SELU default to 1.05070102214813232421875 "
|
|
924
|
+
"(i.e., float32 approximation of 1.0507009873554804934193349852946).",
|
|
925
|
+
AttributeProto::FLOAT,
|
|
926
|
+
1.05070102214813232421875f)
|
|
927
|
+
.SetDoc(Selu_ver6_doc)
|
|
928
|
+
.Input(0, "X", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
929
|
+
.Output(0, "Y", "Output tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
930
|
+
.TypeConstraint(
|
|
931
|
+
"T",
|
|
932
|
+
{"tensor(float16)", "tensor(float)", "tensor(double)"},
|
|
933
|
+
"Constrain input and output types to float tensors.")
|
|
934
|
+
.TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput)
|
|
935
|
+
.FunctionBody(
|
|
936
|
+
R"ONNX(
|
|
937
|
+
{
|
|
938
|
+
Alpha = Constant <value_float: float = @alpha>()
|
|
939
|
+
AlphaCast = CastLike (Alpha, X)
|
|
940
|
+
Gamma = Constant <value_float: float = @gamma>()
|
|
941
|
+
GammaCast = CastLike (Gamma, X)
|
|
942
|
+
Zero = Constant <value = float {0.0}>()
|
|
943
|
+
ZeroCast = CastLike (Zero, X)
|
|
944
|
+
ExpX = Exp (X)
|
|
945
|
+
AlphaMulExpX = Mul(AlphaCast, ExpX)
|
|
946
|
+
AlphaMulExpXSubAlpha = Sub (AlphaMulExpX, AlphaCast)
|
|
947
|
+
Neg = Mul (GammaCast, AlphaMulExpXSubAlpha)
|
|
948
|
+
Pos = Mul (GammaCast, X)
|
|
949
|
+
XLessThanZero = Less (X, ZeroCast)
|
|
950
|
+
Y = Where(XLessThanZero, Neg, Pos)
|
|
951
|
+
}
|
|
952
|
+
)ONNX",
|
|
953
|
+
18));
|
|
954
|
+
|
|
955
|
+
static const char* ThresholdedRelu_ver10_doc = R"DOC(
|
|
956
|
+
ThresholdedRelu takes one input data (Tensor<T>) and produces one output data
|
|
957
|
+
(Tensor<T>) where the rectified linear function, y = x for x > alpha, y = 0 otherwise,
|
|
958
|
+
is applied to the tensor elementwise.
|
|
959
|
+
)DOC";
|
|
960
|
+
|
|
961
|
+
ONNX_OPERATOR_SET_SCHEMA(
|
|
962
|
+
ThresholdedRelu,
|
|
963
|
+
10,
|
|
964
|
+
OpSchema()
|
|
965
|
+
.SetDoc(ThresholdedRelu_ver10_doc)
|
|
966
|
+
.Attr("alpha", "Threshold value", AttributeProto::FLOAT, 1.0f)
|
|
967
|
+
.Input(0, "X", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
968
|
+
.Output(0, "Y", "Output tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
969
|
+
.TypeConstraint(
|
|
970
|
+
"T",
|
|
971
|
+
{"tensor(float16)", "tensor(float)", "tensor(double)"},
|
|
972
|
+
"Constrain input and output types to float tensors.")
|
|
973
|
+
.TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput)
|
|
974
|
+
.FunctionBody(
|
|
975
|
+
R"ONNX(
|
|
976
|
+
{
|
|
977
|
+
Alpha = Constant <value_float: float = @alpha>()
|
|
978
|
+
AlphaCast = CastLike (Alpha, X)
|
|
979
|
+
Zero = Constant <value = float {0.0}>()
|
|
980
|
+
ZeroCast = CastLike (Zero, X)
|
|
981
|
+
AlphaLessThanX = Less(AlphaCast, X)
|
|
982
|
+
Y = Where(AlphaLessThanX, X, ZeroCast)
|
|
983
|
+
}
|
|
984
|
+
)ONNX",
|
|
985
|
+
18));
|
|
986
|
+
|
|
14
987
|
std::function<void(OpSchema&)> MathDocGenerator_opset13(const char* name) {
|
|
15
988
|
return [=](OpSchema& schema) {
|
|
16
989
|
std::string doc;
|
|
@@ -796,7 +1769,7 @@ void matmulShapeInference_opset_9(ONNX_NAMESPACE::InferenceContext& ctx, int inp
|
|
|
796
1769
|
}
|
|
797
1770
|
|
|
798
1771
|
static const char* MatMul_ver9_doc = R"DOC(
|
|
799
|
-
Matrix product that behaves like numpy.matmul
|
|
1772
|
+
Matrix product that behaves like [numpy.matmul](https://numpy.org/doc/stable/reference/generated/numpy.matmul.html).
|
|
800
1773
|
)DOC";
|
|
801
1774
|
|
|
802
1775
|
ONNX_OPERATOR_SET_SCHEMA(
|
|
@@ -1294,10 +2267,14 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
1294
2267
|
const int target_rank = static_cast<int>(target_shape.dim_size());
|
|
1295
2268
|
|
|
1296
2269
|
if (input_rank < 2) {
|
|
1297
|
-
fail_shape_inference("Input rank must be >= 2.");
|
|
2270
|
+
fail_shape_inference("Input rank must be >= 2. input_rank=", input_rank);
|
|
1298
2271
|
}
|
|
1299
2272
|
if (target_rank != input_rank - 1) {
|
|
1300
|
-
fail_shape_inference(
|
|
2273
|
+
fail_shape_inference(
|
|
2274
|
+
"Target rank must be 1 less than the input rank. input_rank=",
|
|
2275
|
+
input_rank,
|
|
2276
|
+
", target_rank=",
|
|
2277
|
+
target_rank);
|
|
1301
2278
|
}
|
|
1302
2279
|
|
|
1303
2280
|
// match input dimensions (N, C, d1, ..., dk) with target
|
|
@@ -1307,13 +2284,18 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
1307
2284
|
const auto target_dim = target_shape.dim(dim);
|
|
1308
2285
|
if (input_dim.has_dim_value() && target_dim.has_dim_value() &&
|
|
1309
2286
|
input_dim.dim_value() != target_dim.dim_value())
|
|
1310
|
-
fail_shape_inference(
|
|
2287
|
+
fail_shape_inference(
|
|
2288
|
+
"Input and target dimension value mismatch. input_dim_value=",
|
|
2289
|
+
input_dim.dim_value(),
|
|
2290
|
+
" target_dim_value=",
|
|
2291
|
+
target_dim.dim_value());
|
|
1311
2292
|
}
|
|
1312
2293
|
|
|
1313
2294
|
if (ctx.getNumInputs() == 3 && hasInputShape(ctx, 2)) {
|
|
1314
2295
|
const TensorShapeProto& weight_shape = ctx.getInputType(2)->tensor_type().shape();
|
|
1315
|
-
|
|
1316
|
-
|
|
2296
|
+
const auto weight_rank = weight_shape.dim_size();
|
|
2297
|
+
if (weight_rank != 1) {
|
|
2298
|
+
fail_shape_inference("Weight rank must be 1. weight_rank=", weight_rank);
|
|
1317
2299
|
}
|
|
1318
2300
|
}
|
|
1319
2301
|
|
|
@@ -2549,7 +3531,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
2549
3531
|
.TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));
|
|
2550
3532
|
|
|
2551
3533
|
static const char* MatMul_ver1_doc = R"DOC(
|
|
2552
|
-
Matrix product that behaves like numpy.matmul
|
|
3534
|
+
Matrix product that behaves like [numpy.matmul](https://numpy.org/doc/stable/reference/generated/numpy.matmul.html).
|
|
2553
3535
|
)DOC";
|
|
2554
3536
|
|
|
2555
3537
|
ONNX_OPERATOR_SET_SCHEMA(
|