onnx 1.13.1__cp39-cp39-win_amd64.whl → 1.14.1__cp39-cp39-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of onnx might be problematic. Click here for more details.
- onnx/__init__.py +116 -70
- onnx/backend/__init__.py +2 -0
- onnx/backend/base.py +3 -0
- onnx/backend/sample/__init__.py +2 -0
- onnx/backend/sample/ops/__init__.py +8 -6
- onnx/backend/sample/ops/abs.py +1 -1
- onnx/backend/test/__init__.py +4 -1
- onnx/backend/test/case/__init__.py +4 -2
- onnx/backend/test/case/base.py +2 -0
- onnx/backend/test/case/model/__init__.py +8 -6
- onnx/backend/test/case/model/expand.py +4 -3
- onnx/backend/test/case/model/gradient.py +4 -3
- onnx/backend/test/case/model/sequence.py +4 -3
- onnx/backend/test/case/model/shrink.py +4 -3
- onnx/backend/test/case/model/sign.py +4 -3
- onnx/backend/test/case/model/single-relu.py +4 -3
- onnx/backend/test/case/model/stringnormalizer.py +4 -3
- onnx/backend/test/case/node/__init__.py +18 -12
- onnx/backend/test/case/node/abs.py +4 -3
- onnx/backend/test/case/node/acos.py +4 -3
- onnx/backend/test/case/node/acosh.py +4 -3
- onnx/backend/test/case/node/adagrad.py +4 -3
- onnx/backend/test/case/node/adam.py +4 -3
- onnx/backend/test/case/node/add.py +4 -3
- onnx/backend/test/case/node/ai_onnx_ml/__init__.py +0 -0
- onnx/backend/test/case/node/ai_onnx_ml/array_feature_extractor.py +30 -0
- onnx/backend/test/case/node/ai_onnx_ml/binarizer.py +27 -0
- onnx/backend/test/case/node/and.py +4 -3
- onnx/backend/test/case/node/argmax.py +4 -3
- onnx/backend/test/case/node/argmin.py +4 -3
- onnx/backend/test/case/node/asin.py +4 -3
- onnx/backend/test/case/node/asinh.py +4 -3
- onnx/backend/test/case/node/atan.py +4 -3
- onnx/backend/test/case/node/atanh.py +4 -3
- onnx/backend/test/case/node/averagepool.py +43 -4
- onnx/backend/test/case/node/batchnorm.py +4 -3
- onnx/backend/test/case/node/bernoulli.py +4 -3
- onnx/backend/test/case/node/bitshift.py +4 -3
- onnx/backend/test/case/node/bitwiseand.py +13 -11
- onnx/backend/test/case/node/bitwisenot.py +8 -6
- onnx/backend/test/case/node/bitwiseor.py +13 -11
- onnx/backend/test/case/node/bitwisexor.py +13 -11
- onnx/backend/test/case/node/blackmanwindow.py +4 -4
- onnx/backend/test/case/node/cast.py +218 -8
- onnx/backend/test/case/node/castlike.py +103 -9
- onnx/backend/test/case/node/ceil.py +4 -3
- onnx/backend/test/case/node/celu.py +4 -3
- onnx/backend/test/case/node/center_crop_pad.py +26 -3
- onnx/backend/test/case/node/clip.py +4 -3
- onnx/backend/test/case/node/col2im.py +5 -4
- onnx/backend/test/case/node/compress.py +4 -3
- onnx/backend/test/case/node/concat.py +4 -3
- onnx/backend/test/case/node/constant.py +4 -3
- onnx/backend/test/case/node/constantofshape.py +4 -3
- onnx/backend/test/case/node/conv.py +4 -3
- onnx/backend/test/case/node/convinteger.py +4 -3
- onnx/backend/test/case/node/convtranspose.py +4 -3
- onnx/backend/test/case/node/cos.py +4 -3
- onnx/backend/test/case/node/cosh.py +4 -3
- onnx/backend/test/case/node/cumsum.py +4 -3
- onnx/backend/test/case/node/deformconv.py +170 -0
- onnx/backend/test/case/node/depthtospace.py +4 -3
- onnx/backend/test/case/node/dequantizelinear.py +46 -3
- onnx/backend/test/case/node/det.py +4 -3
- onnx/backend/test/case/node/dft.py +4 -4
- onnx/backend/test/case/node/div.py +4 -3
- onnx/backend/test/case/node/dropout.py +4 -3
- onnx/backend/test/case/node/dynamicquantizelinear.py +4 -3
- onnx/backend/test/case/node/einsum.py +4 -4
- onnx/backend/test/case/node/elu.py +4 -3
- onnx/backend/test/case/node/equal.py +28 -3
- onnx/backend/test/case/node/erf.py +4 -3
- onnx/backend/test/case/node/exp.py +4 -3
- onnx/backend/test/case/node/expand.py +4 -3
- onnx/backend/test/case/node/eyelike.py +4 -3
- onnx/backend/test/case/node/flatten.py +4 -3
- onnx/backend/test/case/node/floor.py +4 -3
- onnx/backend/test/case/node/gather.py +4 -3
- onnx/backend/test/case/node/gatherelements.py +4 -3
- onnx/backend/test/case/node/gathernd.py +5 -4
- onnx/backend/test/case/node/gemm.py +4 -3
- onnx/backend/test/case/node/globalaveragepool.py +4 -3
- onnx/backend/test/case/node/globalmaxpool.py +4 -3
- onnx/backend/test/case/node/greater.py +4 -3
- onnx/backend/test/case/node/greater_equal.py +4 -3
- onnx/backend/test/case/node/gridsample.py +4 -3
- onnx/backend/test/case/node/groupnormalization.py +5 -4
- onnx/backend/test/case/node/gru.py +10 -9
- onnx/backend/test/case/node/hammingwindow.py +4 -4
- onnx/backend/test/case/node/hannwindow.py +4 -4
- onnx/backend/test/case/node/hardmax.py +4 -3
- onnx/backend/test/case/node/hardsigmoid.py +4 -3
- onnx/backend/test/case/node/hardswish.py +4 -3
- onnx/backend/test/case/node/identity.py +4 -3
- onnx/backend/test/case/node/if.py +4 -3
- onnx/backend/test/case/node/instancenorm.py +4 -3
- onnx/backend/test/case/node/isinf.py +4 -3
- onnx/backend/test/case/node/isnan.py +4 -3
- onnx/backend/test/case/node/layernormalization.py +4 -3
- onnx/backend/test/case/node/leakyrelu.py +4 -3
- onnx/backend/test/case/node/less.py +4 -3
- onnx/backend/test/case/node/less_equal.py +4 -3
- onnx/backend/test/case/node/log.py +4 -3
- onnx/backend/test/case/node/logsoftmax.py +4 -3
- onnx/backend/test/case/node/loop.py +4 -3
- onnx/backend/test/case/node/lppool.py +279 -0
- onnx/backend/test/case/node/lrn.py +4 -3
- onnx/backend/test/case/node/lstm.py +10 -9
- onnx/backend/test/case/node/matmul.py +4 -3
- onnx/backend/test/case/node/matmulinteger.py +4 -3
- onnx/backend/test/case/node/max.py +5 -4
- onnx/backend/test/case/node/maxpool.py +9 -4
- onnx/backend/test/case/node/maxunpool.py +4 -3
- onnx/backend/test/case/node/mean.py +4 -3
- onnx/backend/test/case/node/meanvariancenormalization.py +4 -3
- onnx/backend/test/case/node/melweightmatrix.py +4 -4
- onnx/backend/test/case/node/min.py +5 -4
- onnx/backend/test/case/node/mish.py +4 -3
- onnx/backend/test/case/node/mod.py +4 -3
- onnx/backend/test/case/node/momentum.py +4 -3
- onnx/backend/test/case/node/mul.py +4 -3
- onnx/backend/test/case/node/neg.py +4 -3
- onnx/backend/test/case/node/negativeloglikelihoodloss.py +4 -3
- onnx/backend/test/case/node/nonmaxsuppression.py +4 -3
- onnx/backend/test/case/node/nonzero.py +4 -3
- onnx/backend/test/case/node/not.py +4 -3
- onnx/backend/test/case/node/onehot.py +5 -4
- onnx/backend/test/case/node/optionalgetelement.py +4 -3
- onnx/backend/test/case/node/optionalhaselement.py +4 -3
- onnx/backend/test/case/node/or.py +4 -3
- onnx/backend/test/case/node/pad.py +36 -5
- onnx/backend/test/case/node/pool_op_common.py +20 -2
- onnx/backend/test/case/node/pow.py +4 -3
- onnx/backend/test/case/node/prelu.py +4 -3
- onnx/backend/test/case/node/qlinearconv.py +4 -3
- onnx/backend/test/case/node/qlinearmatmul.py +4 -3
- onnx/backend/test/case/node/quantizelinear.py +50 -3
- onnx/backend/test/case/node/rangeop.py +4 -3
- onnx/backend/test/case/node/reciprocal.py +4 -3
- onnx/backend/test/case/node/reduce_log_sum.py +4 -3
- onnx/backend/test/case/node/reduce_log_sum_exp.py +4 -3
- onnx/backend/test/case/node/reducel1.py +4 -3
- onnx/backend/test/case/node/reducel2.py +4 -3
- onnx/backend/test/case/node/reducemax.py +4 -3
- onnx/backend/test/case/node/reducemean.py +4 -3
- onnx/backend/test/case/node/reducemin.py +4 -3
- onnx/backend/test/case/node/reduceprod.py +4 -3
- onnx/backend/test/case/node/reducesum.py +4 -3
- onnx/backend/test/case/node/reducesumsquare.py +4 -3
- onnx/backend/test/case/node/relu.py +4 -3
- onnx/backend/test/case/node/reshape.py +4 -3
- onnx/backend/test/case/node/resize.py +73 -321
- onnx/backend/test/case/node/reversesequence.py +4 -3
- onnx/backend/test/case/node/rnn.py +10 -9
- onnx/backend/test/case/node/roialign.py +193 -3
- onnx/backend/test/case/node/round.py +4 -3
- onnx/backend/test/case/node/scan.py +4 -3
- onnx/backend/test/case/node/scatter.py +4 -3
- onnx/backend/test/case/node/scatterelements.py +4 -3
- onnx/backend/test/case/node/scatternd.py +4 -3
- onnx/backend/test/case/node/selu.py +4 -3
- onnx/backend/test/case/node/sequence_map.py +4 -4
- onnx/backend/test/case/node/sequenceinsert.py +4 -3
- onnx/backend/test/case/node/shape.py +4 -3
- onnx/backend/test/case/node/shrink.py +4 -3
- onnx/backend/test/case/node/sigmoid.py +4 -3
- onnx/backend/test/case/node/sign.py +4 -3
- onnx/backend/test/case/node/sin.py +4 -3
- onnx/backend/test/case/node/sinh.py +4 -3
- onnx/backend/test/case/node/size.py +4 -3
- onnx/backend/test/case/node/slice.py +4 -3
- onnx/backend/test/case/node/softmax.py +4 -3
- onnx/backend/test/case/node/softmaxcrossentropy.py +4 -3
- onnx/backend/test/case/node/softplus.py +4 -3
- onnx/backend/test/case/node/softsign.py +4 -3
- onnx/backend/test/case/node/spacetodepth.py +6 -3
- onnx/backend/test/case/node/split.py +4 -3
- onnx/backend/test/case/node/splittosequence.py +79 -0
- onnx/backend/test/case/node/sqrt.py +4 -3
- onnx/backend/test/case/node/squeeze.py +2 -0
- onnx/backend/test/case/node/stft.py +4 -4
- onnx/backend/test/case/node/stringnormalizer.py +4 -4
- onnx/backend/test/case/node/sub.py +4 -3
- onnx/backend/test/case/node/sum.py +4 -3
- onnx/backend/test/case/node/tan.py +4 -3
- onnx/backend/test/case/node/tanh.py +4 -3
- onnx/backend/test/case/node/tfidfvectorizer.py +4 -3
- onnx/backend/test/case/node/thresholdedrelu.py +4 -3
- onnx/backend/test/case/node/tile.py +4 -3
- onnx/backend/test/case/node/topk.py +4 -3
- onnx/backend/test/case/node/transpose.py +8 -7
- onnx/backend/test/case/node/trilu.py +4 -3
- onnx/backend/test/case/node/unique.py +4 -3
- onnx/backend/test/case/node/unsqueeze.py +4 -3
- onnx/backend/test/case/node/upsample.py +4 -3
- onnx/backend/test/case/node/where.py +4 -3
- onnx/backend/test/case/node/xor.py +4 -3
- onnx/backend/test/case/test_case.py +2 -0
- onnx/backend/test/case/utils.py +9 -0
- onnx/backend/test/cmd_tools.py +22 -13
- onnx/backend/test/data/light/README.md +16 -0
- onnx/backend/test/data/light/light_bvlc_alexnet.onnx +0 -0
- onnx/backend/test/data/light/light_bvlc_alexnet_output_0.pb +1 -0
- onnx/backend/test/data/light/light_densenet121.onnx +0 -0
- onnx/backend/test/data/light/light_densenet121_output_0.pb +1 -0
- onnx/backend/test/data/light/light_inception_v1.onnx +0 -0
- onnx/backend/test/data/light/light_inception_v1_output_0.pb +1 -0
- onnx/backend/test/data/light/light_inception_v2.onnx +0 -0
- onnx/backend/test/data/light/light_inception_v2_output_0.pb +1 -0
- onnx/backend/test/data/light/light_resnet50.onnx +0 -0
- onnx/backend/test/data/light/light_resnet50_output_0.pb +1 -0
- onnx/backend/test/data/light/light_shufflenet.onnx +0 -0
- onnx/backend/test/data/light/light_shufflenet_output_0.pb +1 -0
- onnx/backend/test/data/light/light_squeezenet.onnx +0 -0
- onnx/backend/test/data/light/light_squeezenet_output_0.pb +1 -0
- onnx/backend/test/data/light/light_vgg19.onnx +0 -0
- onnx/backend/test/data/light/light_vgg19_output_0.pb +1 -0
- onnx/backend/test/data/light/light_zfnet512.onnx +0 -0
- onnx/backend/test/data/light/light_zfnet512_output_0.pb +1 -0
- onnx/backend/test/data/node/test_acos/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_acosh/test_data_set_0/output_0.pb +1 -1
- onnx/backend/test/data/node/test_ai_onnx_ml_array_feature_extractor/model.onnx +19 -0
- onnx/backend/test/data/node/test_ai_onnx_ml_array_feature_extractor/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_ai_onnx_ml_array_feature_extractor/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_ai_onnx_ml_array_feature_extractor/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_ai_onnx_ml_binarizer/model.onnx +0 -0
- onnx/backend/test/data/node/test_ai_onnx_ml_binarizer/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_ai_onnx_ml_binarizer/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_asin/test_data_set_0/output_0.pb +1 -1
- onnx/backend/test/data/node/test_asinh/test_data_set_0/output_0.pb +1 -1
- onnx/backend/test/data/node/test_atan/test_data_set_0/output_0.pb +1 -1
- onnx/backend/test/data/node/test_atanh/test_data_set_0/output_0.pb +2 -2
- onnx/backend/test/data/node/test_averagepool_1d_default/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_ceil/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_default/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_dilations/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_dilations/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_dilations/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_pads/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_pads_count_include_pad/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_precomputed_pads/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_precomputed_pads_count_include_pad/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_precomputed_same_upper/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_precomputed_strides/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_same_lower/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_same_upper/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_strides/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_3d_default/model.onnx +0 -0
- onnx/backend/test/data/node/test_basic_deform_conv_with_padding/model.onnx +0 -0
- onnx/backend/test/data/node/test_basic_deform_conv_with_padding/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_basic_deform_conv_with_padding/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_basic_deform_conv_with_padding/test_data_set_0/input_2.pb +0 -0
- onnx/backend/test/data/node/test_basic_deform_conv_with_padding/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_basic_deform_conv_without_padding/model.onnx +0 -0
- onnx/backend/test/data/node/test_basic_deform_conv_without_padding/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_basic_deform_conv_without_padding/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_basic_deform_conv_without_padding/test_data_set_0/input_2.pb +0 -0
- onnx/backend/test/data/node/test_basic_deform_conv_without_padding/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_and_i16_3d/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_and_i16_3d/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_and_i16_3d/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_and_i32_2d/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_and_i32_2d/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_and_i32_2d/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_and_ui64_bcast_3v1d/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_and_ui64_bcast_3v1d/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_and_ui64_bcast_3v1d/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_and_ui8_bcast_4v3d/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_and_ui8_bcast_4v3d/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_and_ui8_bcast_4v3d/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_not_2d/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_not_2d/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_not_3d/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_not_3d/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_not_4d/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_not_4d/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_or_i16_4d/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_or_i16_4d/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_or_i16_4d/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_or_i32_2d/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_or_i32_2d/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_or_i32_2d/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_or_ui64_bcast_3v1d/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_or_ui64_bcast_3v1d/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_or_ui64_bcast_3v1d/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_or_ui8_bcast_4v3d/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_or_ui8_bcast_4v3d/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_or_ui8_bcast_4v3d/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_xor_i16_3d/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_xor_i16_3d/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_xor_i16_3d/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_xor_i32_2d/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_xor_i32_2d/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_xor_i32_2d/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_xor_ui64_bcast_3v1d/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_xor_ui64_bcast_3v1d/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_xor_ui64_bcast_3v1d/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_xor_ui8_bcast_4v3d/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_xor_ui8_bcast_4v3d/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_xor_ui8_bcast_4v3d/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_BFLOAT16_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_DOUBLE_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_DOUBLE_to_FLOAT16/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_DOUBLE/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FN/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FN/test_data_set_0/input_0.pb +2 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FN/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FNUZ/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FNUZ/test_data_set_0/input_0.pb +2 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FNUZ/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2/test_data_set_0/input_0.pb +2 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2FNUZ/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2FNUZ/test_data_set_0/input_0.pb +2 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2FNUZ/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT16/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT16/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT16/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT16/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT16/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT16/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT16/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT16/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT16/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT16/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT16/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT16/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_BFLOAT16/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_DOUBLE/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT16/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FN/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FNUZ/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2FNUZ/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_STRING/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_STRING_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FN/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FN/test_data_set_0/input_0.pb +2 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FN/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FNUZ/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FNUZ/test_data_set_0/input_0.pb +2 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FNUZ/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2/test_data_set_0/input_0.pb +2 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2FNUZ/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2FNUZ/test_data_set_0/input_0.pb +2 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2FNUZ/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FN/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FNUZ/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2FNUZ/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_castlike_BFLOAT16_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_BFLOAT16_to_FLOAT_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_DOUBLE_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_DOUBLE_to_FLOAT16/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_DOUBLE_to_FLOAT16_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_DOUBLE_to_FLOAT_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT16_to_DOUBLE/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT16_to_DOUBLE_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT16_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT16_to_FLOAT_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT_expanded/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT_expanded/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT_expanded/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT_expanded/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT_expanded/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT_expanded/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT_expanded/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT_expanded/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT_expanded/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT_expanded/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT_expanded/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT_expanded/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_BFLOAT16/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_BFLOAT16_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_DOUBLE/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_DOUBLE_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT16/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT16_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/input_1.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/output_0.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/input_1.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/output_0.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ_expanded/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ_expanded/test_data_set_0/input_1.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ_expanded/test_data_set_0/output_0.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN_expanded/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN_expanded/test_data_set_0/input_1.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN_expanded/test_data_set_0/output_0.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2/test_data_set_0/input_1.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2/test_data_set_0/output_0.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/input_1.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/output_0.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ_expanded/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ_expanded/test_data_set_0/input_1.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ_expanded/test_data_set_0/output_0.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2_expanded/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2_expanded/test_data_set_0/input_1.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2_expanded/test_data_set_0/output_0.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_STRING/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_STRING_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_STRING_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_STRING_to_FLOAT_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_center_crop_pad_crop_negative_axes_hwc/model.onnx +0 -0
- onnx/backend/test/data/node/test_center_crop_pad_crop_negative_axes_hwc/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_center_crop_pad_crop_negative_axes_hwc/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_center_crop_pad_crop_negative_axes_hwc/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_center_crop_pad_crop_negative_axes_hwc_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_center_crop_pad_crop_negative_axes_hwc_expanded/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_center_crop_pad_crop_negative_axes_hwc_expanded/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_center_crop_pad_crop_negative_axes_hwc_expanded/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_col2im_pads/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_constant/model.onnx +0 -0
- onnx/backend/test/data/node/test_constant_pad/model.onnx +0 -0
- onnx/backend/test/data/node/test_constant_pad_axes/model.onnx +0 -0
- onnx/backend/test/data/node/test_constant_pad_negative_axes/model.onnx +0 -0
- onnx/backend/test/data/node/test_constant_pad_negative_axes/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_constant_pad_negative_axes/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_constant_pad_negative_axes/test_data_set_0/input_2.pb +1 -0
- onnx/backend/test/data/node/test_constant_pad_negative_axes/test_data_set_0/input_3.pb +1 -0
- onnx/backend/test/data/node/test_constant_pad_negative_axes/test_data_set_0/output_0.pb +1 -0
- onnx/backend/test/data/node/test_cosh/test_data_set_0/output_0.pb +1 -1
- onnx/backend/test/data/node/test_cosh_example/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_deform_conv_with_mask_bias/model.onnx +0 -0
- onnx/backend/test/data/node/test_deform_conv_with_mask_bias/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_deform_conv_with_mask_bias/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_deform_conv_with_mask_bias/test_data_set_0/input_2.pb +0 -0
- onnx/backend/test/data/node/test_deform_conv_with_mask_bias/test_data_set_0/input_3.pb +0 -0
- onnx/backend/test/data/node/test_deform_conv_with_mask_bias/test_data_set_0/input_4.pb +0 -0
- onnx/backend/test/data/node/test_deform_conv_with_mask_bias/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_deform_conv_with_multiple_offset_groups/model.onnx +0 -0
- onnx/backend/test/data/node/test_deform_conv_with_multiple_offset_groups/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_deform_conv_with_multiple_offset_groups/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_deform_conv_with_multiple_offset_groups/test_data_set_0/input_2.pb +0 -0
- onnx/backend/test/data/node/test_deform_conv_with_multiple_offset_groups/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_dequantizelinear/model.onnx +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_axis/model.onnx +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_e4m3fn/model.onnx +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_e4m3fn/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_e4m3fn/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_e4m3fn/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_e5m2/model.onnx +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_e5m2/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_e5m2/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_e5m2/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_edge_pad/model.onnx +0 -0
- onnx/backend/test/data/node/test_equal/model.onnx +0 -0
- onnx/backend/test/data/node/test_equal_bcast/model.onnx +0 -0
- onnx/backend/test/data/node/test_equal_string/model.onnx +0 -0
- onnx/backend/test/data/node/test_equal_string/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_equal_string/test_data_set_0/input_1.pb +1 -0
- onnx/backend/test/data/node/test_equal_string/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_equal_string_broadcast/model.onnx +0 -0
- onnx/backend/test/data/node/test_equal_string_broadcast/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_equal_string_broadcast/test_data_set_0/input_1.pb +1 -0
- onnx/backend/test/data/node/test_equal_string_broadcast/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_identity/model.onnx +0 -0
- onnx/backend/test/data/node/test_identity_sequence/model.onnx +0 -0
- onnx/backend/test/data/node/test_lppool_1d_default/model.onnx +0 -0
- onnx/backend/test/data/node/test_lppool_1d_default/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_lppool_1d_default/test_data_set_0/output_0.pb +2 -0
- onnx/backend/test/data/node/test_lppool_2d_default/model.onnx +0 -0
- onnx/backend/test/data/node/test_lppool_2d_default/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_lppool_2d_default/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_lppool_2d_dilations/model.onnx +0 -0
- onnx/backend/test/data/node/test_lppool_2d_dilations/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_lppool_2d_dilations/test_data_set_0/output_0.pb +1 -0
- onnx/backend/test/data/node/test_lppool_2d_pads/model.onnx +0 -0
- onnx/backend/test/data/node/test_lppool_2d_pads/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_lppool_2d_pads/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_lppool_2d_same_lower/model.onnx +0 -0
- onnx/backend/test/data/node/test_lppool_2d_same_lower/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_lppool_2d_same_lower/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_lppool_2d_same_upper/model.onnx +0 -0
- onnx/backend/test/data/node/test_lppool_2d_same_upper/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_lppool_2d_same_upper/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_lppool_2d_strides/model.onnx +0 -0
- onnx/backend/test/data/node/test_lppool_2d_strides/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_lppool_2d_strides/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_lppool_3d_default/model.onnx +0 -0
- onnx/backend/test/data/node/test_lppool_3d_default/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_lppool_3d_default/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_mish/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_mish_expanded/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear/model.onnx +0 -0
- onnx/backend/test/data/node/test_quantizelinear_axis/model.onnx +0 -0
- onnx/backend/test/data/node/test_quantizelinear_e4m3fn/model.onnx +0 -0
- onnx/backend/test/data/node/test_quantizelinear_e4m3fn/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear_e4m3fn/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear_e4m3fn/test_data_set_0/input_2.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear_e4m3fn/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear_e5m2/model.onnx +0 -0
- onnx/backend/test/data/node/test_quantizelinear_e5m2/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear_e5m2/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear_e5m2/test_data_set_0/input_2.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear_e5m2/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_reflect_pad/model.onnx +0 -0
- onnx/backend/test/data/node/test_reshape_allowzero_reordered/model.onnx +0 -0
- onnx/backend/test/data/node/test_reshape_extended_dims/model.onnx +0 -0
- onnx/backend/test/data/node/test_reshape_negative_dim/model.onnx +0 -0
- onnx/backend/test/data/node/test_reshape_negative_extended_dims/model.onnx +0 -0
- onnx/backend/test/data/node/test_reshape_one_dim/model.onnx +0 -0
- onnx/backend/test/data/node/test_reshape_reduced_dims/model.onnx +0 -0
- onnx/backend/test/data/node/test_reshape_reordered_all_dims/model.onnx +0 -0
- onnx/backend/test/data/node/test_reshape_reordered_last_dims/model.onnx +0 -0
- onnx/backend/test/data/node/test_reshape_zero_and_negative_dim/model.onnx +0 -0
- onnx/backend/test/data/node/test_reshape_zero_dim/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_downsample_scales_cubic/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_downsample_scales_cubic_A_n0p5_exclude_outside/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_downsample_scales_cubic_align_corners/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_downsample_scales_cubic_antialias/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_downsample_scales_linear/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_downsample_scales_linear_align_corners/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_downsample_scales_linear_antialias/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_downsample_scales_linear_half_pixel_symmetric/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_downsample_scales_linear_half_pixel_symmetric/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_resize_downsample_scales_linear_half_pixel_symmetric/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_resize_downsample_scales_linear_half_pixel_symmetric/test_data_set_0/output_0.pb +1 -0
- onnx/backend/test/data/node/test_resize_downsample_scales_nearest/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_downsample_sizes_cubic/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_downsample_sizes_cubic_antialias/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_downsample_sizes_linear_antialias/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_downsample_sizes_linear_pytorch_half_pixel/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_downsample_sizes_nearest/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_downsample_sizes_nearest_not_larger/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_downsample_sizes_nearest_not_smaller/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_tf_crop_and_resize/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_tf_crop_and_resize_axes_2_3/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_tf_crop_and_resize_axes_3_2/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_scales_cubic/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_scales_cubic_A_n0p5_exclude_outside/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_scales_cubic_align_corners/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_scales_cubic_asymmetric/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_scales_linear/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_scales_linear_align_corners/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_scales_linear_half_pixel_symmetric/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_scales_linear_half_pixel_symmetric/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_resize_upsample_scales_linear_half_pixel_symmetric/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_resize_upsample_scales_linear_half_pixel_symmetric/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_resize_upsample_scales_nearest/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_scales_nearest_axes_2_3/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_scales_nearest_axes_3_2/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_sizes_cubic/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_sizes_nearest/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_sizes_nearest_axes_2_3/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_sizes_nearest_axes_3_2/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_sizes_nearest_ceil_half_pixel/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_sizes_nearest_floor_align_corners/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_sizes_nearest_not_larger/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric/model.onnx +0 -0
- onnx/backend/test/data/node/test_roialign_mode_max/model.onnx +0 -0
- onnx/backend/test/data/node/test_roialign_mode_max/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_roialign_mode_max/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_roialign_mode_max/test_data_set_0/input_2.pb +0 -0
- onnx/backend/test/data/node/test_roialign_mode_max/test_data_set_0/output_0.pb +2 -0
- onnx/backend/test/data/node/test_shape/model.onnx +0 -0
- onnx/backend/test/data/node/test_shape_clip_end/model.onnx +0 -0
- onnx/backend/test/data/node/test_shape_clip_start/model.onnx +0 -0
- onnx/backend/test/data/node/test_shape_end_1/model.onnx +0 -0
- onnx/backend/test/data/node/test_shape_end_negative_1/model.onnx +0 -0
- onnx/backend/test/data/node/test_shape_example/model.onnx +0 -0
- onnx/backend/test/data/node/test_shape_start_1/model.onnx +0 -0
- onnx/backend/test/data/node/test_shape_start_1_end_2/model.onnx +0 -0
- onnx/backend/test/data/node/test_shape_start_1_end_negative_1/model.onnx +0 -0
- onnx/backend/test/data/node/test_shape_start_negative_1/model.onnx +0 -0
- onnx/backend/test/data/node/test_sinh/test_data_set_0/output_0.pb +1 -1
- onnx/backend/test/data/node/test_size/model.onnx +0 -0
- onnx/backend/test/data/node/test_size_example/model.onnx +0 -0
- onnx/backend/test/data/node/test_softplus_example_expanded_ver18/model.onnx +0 -0
- onnx/backend/test/data/node/{test_softplus_expanded → test_softplus_expanded_ver18}/model.onnx +0 -0
- onnx/backend/test/data/node/test_split_to_sequence_1/model.onnx +0 -0
- onnx/backend/test/data/node/test_split_to_sequence_1/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_split_to_sequence_1/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_split_to_sequence_1/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_split_to_sequence_2/model.onnx +0 -0
- onnx/backend/test/data/node/test_split_to_sequence_2/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_split_to_sequence_2/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_split_to_sequence_2/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_split_to_sequence_nokeepdims/model.onnx +0 -0
- onnx/backend/test/data/node/test_split_to_sequence_nokeepdims/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_split_to_sequence_nokeepdims/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_tan/test_data_set_0/output_0.pb +1 -1
- onnx/backend/test/data/node/test_wrap_pad/model.onnx +0 -0
- onnx/backend/test/data/node/test_wrap_pad/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_wrap_pad/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_wrap_pad/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/real/test_bvlc_alexnet/data.json +1 -1
- onnx/backend/test/data/real/test_densenet121/data.json +1 -1
- onnx/backend/test/data/real/test_inception_v1/data.json +1 -1
- onnx/backend/test/data/real/test_inception_v2/data.json +1 -1
- onnx/backend/test/data/real/test_resnet50/data.json +1 -1
- onnx/backend/test/data/real/test_shufflenet/data.json +1 -1
- onnx/backend/test/data/real/test_squeezenet/data.json +1 -1
- onnx/backend/test/data/real/test_vgg19/data.json +1 -1
- onnx/backend/test/data/real/test_zfnet512/data.json +1 -1
- onnx/backend/test/loader/__init__.py +3 -1
- onnx/backend/test/report/__init__.py +3 -1
- onnx/backend/test/report/base.py +2 -0
- onnx/backend/test/report/coverage.py +8 -14
- onnx/backend/test/runner/__init__.py +146 -39
- onnx/backend/test/runner/item.py +2 -0
- onnx/backend/test/stat_coverage.py +23 -26
- onnx/bin/__init__.py +2 -0
- onnx/bin/checker.py +2 -0
- onnx/checker.cc +26 -9
- onnx/checker.h +3 -3
- onnx/checker.py +22 -5
- onnx/common/array_ref.h +2 -0
- onnx/common/assertions.cc +2 -0
- onnx/common/assertions.h +2 -0
- onnx/common/common.h +2 -0
- onnx/common/constants.h +3 -3
- onnx/common/file_utils.h +3 -1
- onnx/common/graph_node_list.h +2 -0
- onnx/common/interned_strings.cc +2 -0
- onnx/common/interned_strings.h +2 -0
- onnx/common/ir.h +2 -0
- onnx/common/ir_pb_converter.cc +7 -1
- onnx/common/ir_pb_converter.h +2 -0
- onnx/common/model_helpers.cc +3 -3
- onnx/common/model_helpers.h +3 -3
- onnx/common/path.cc +0 -1
- onnx/common/path.h +0 -1
- onnx/common/platform_helpers.h +2 -0
- onnx/common/status.cc +2 -0
- onnx/common/status.h +2 -0
- onnx/common/stl_backports.h +3 -3
- onnx/common/tensor.h +24 -171
- onnx/common/version.h +3 -1
- onnx/compose.py +40 -32
- onnx/cpp2py_export.cc +268 -89
- onnx/defs/__init__.py +9 -7
- onnx/defs/attr_proto_util.cc +2 -0
- onnx/defs/attr_proto_util.h +2 -0
- onnx/defs/controlflow/defs.cc +25 -369
- onnx/defs/controlflow/old.cc +444 -0
- onnx/defs/controlflow/utils.cc +357 -0
- onnx/defs/controlflow/utils.h +21 -0
- onnx/defs/data_propagators.h +2 -0
- onnx/defs/data_type_utils.cc +6 -2
- onnx/defs/gen_doc.py +32 -46
- onnx/defs/gen_shape_inference_information.py +2 -0
- onnx/defs/generator/defs.cc +21 -19
- onnx/defs/generator/old.cc +159 -0
- onnx/defs/logical/defs.cc +17 -16
- onnx/defs/logical/old.cc +23 -0
- onnx/defs/math/defs.cc +155 -131
- onnx/defs/math/old.cc +1 -1
- onnx/defs/nn/defs.cc +135 -45
- onnx/defs/nn/old.cc +142 -9
- onnx/defs/operator_sets.h +45 -0
- onnx/defs/optional/defs.cc +8 -4
- onnx/defs/parser.cc +50 -3
- onnx/defs/parser.h +43 -31
- onnx/defs/printer.cc +7 -1
- onnx/defs/printer.h +1 -1
- onnx/defs/quantization/defs.cc +63 -26
- onnx/defs/quantization/old.cc +102 -1
- onnx/defs/reduction/defs.cc +1 -1
- onnx/defs/reduction/utils.cc +5 -4
- onnx/defs/rnn/defs.cc +95 -173
- onnx/defs/schema.cc +45 -29
- onnx/defs/schema.h +125 -15
- onnx/defs/sequence/defs.cc +11 -8
- onnx/defs/shape_inference.cc +25 -4
- onnx/defs/shape_inference.h +29 -1
- onnx/defs/tensor/defs.cc +500 -566
- onnx/defs/tensor/old.cc +777 -47
- onnx/defs/tensor/utils.cc +130 -8
- onnx/defs/tensor/utils.h +2 -0
- onnx/defs/tensor_proto_util.cc +3 -0
- onnx/defs/traditionalml/defs.cc +19 -2
- onnx/examples/Protobufs.ipynb +129 -31
- onnx/examples/check_model.ipynb +29 -21
- onnx/examples/load_model.ipynb +25 -3
- onnx/examples/make_model.ipynb +32 -23
- onnx/external_data_helper.py +6 -6
- onnx/frontend/__init__.py +2 -0
- onnx/gen_proto.py +18 -24
- onnx/helper.py +393 -108
- onnx/hub.py +189 -20
- onnx/mapping.py +29 -3
- onnx/numpy_helper.py +263 -52
- onnx/onnx-ml.proto +28 -6
- onnx/onnx-operators-ml.proto +1 -1
- onnx/onnx-operators.in.proto +1 -1
- onnx/onnx-operators.proto +1 -1
- onnx/onnx.in.proto +28 -6
- onnx/onnx.proto +28 -6
- onnx/onnx_cpp2py_export.cp39-win_amd64.pyd +0 -0
- onnx/onnx_data_pb2.pyi +2 -1
- onnx/onnx_ml_pb2.py +33 -33
- onnx/onnx_ml_pb2.pyi +12 -2
- onnx/onnx_operators_ml_pb2.pyi +2 -1
- onnx/parser.py +29 -13
- onnx/printer.py +6 -4
- onnx/proto_utils.h +3 -3
- onnx/py_utils.h +3 -3
- onnx/reference/__init__.py +2 -0
- onnx/reference/custom_element_types.py +11 -0
- onnx/reference/op_run.py +84 -8
- onnx/reference/ops/__init__.py +5 -1
- onnx/reference/ops/_helpers.py +55 -0
- onnx/reference/ops/_op.py +19 -12
- onnx/reference/ops/_op_common_indices.py +2 -0
- onnx/reference/ops/_op_common_pool.py +4 -9
- onnx/reference/ops/_op_common_random.py +2 -0
- onnx/reference/ops/_op_common_window.py +2 -0
- onnx/reference/ops/_op_list.py +208 -214
- onnx/reference/ops/aionnx_preview_training/__init__.py +4 -2
- onnx/reference/ops/aionnx_preview_training/_op_list.py +15 -38
- onnx/reference/ops/aionnx_preview_training/_op_run_training.py +2 -0
- onnx/reference/ops/aionnx_preview_training/op_adagrad.py +3 -1
- onnx/reference/ops/aionnx_preview_training/op_adam.py +3 -1
- onnx/reference/ops/aionnx_preview_training/op_momentum.py +3 -1
- onnx/reference/ops/aionnxml/__init__.py +3 -0
- onnx/reference/ops/aionnxml/_common_classifier.py +81 -0
- onnx/reference/ops/aionnxml/_op_list.py +97 -0
- onnx/reference/ops/aionnxml/_op_run_aionnxml.py +8 -0
- onnx/reference/ops/aionnxml/op_array_feature_extractor.py +50 -0
- onnx/reference/ops/aionnxml/op_binarizer.py +15 -0
- onnx/reference/ops/aionnxml/op_dict_vectorizer.py +56 -0
- onnx/reference/ops/aionnxml/op_feature_vectorizer.py +30 -0
- onnx/reference/ops/aionnxml/op_imputer.py +47 -0
- onnx/reference/ops/aionnxml/op_label_encoder.py +52 -0
- onnx/reference/ops/aionnxml/op_linear_classifier.py +99 -0
- onnx/reference/ops/aionnxml/op_linear_regressor.py +26 -0
- onnx/reference/ops/aionnxml/op_normalizer.py +41 -0
- onnx/reference/ops/aionnxml/op_one_hot_encoder.py +55 -0
- onnx/reference/ops/aionnxml/op_scaler.py +12 -0
- onnx/reference/ops/aionnxml/op_svm_classifier.py +334 -0
- onnx/reference/ops/aionnxml/op_svm_helper.py +99 -0
- onnx/reference/ops/aionnxml/op_svm_regressor.py +45 -0
- onnx/reference/ops/aionnxml/op_tree_ensemble_classifier.py +132 -0
- onnx/reference/ops/aionnxml/op_tree_ensemble_helper.py +109 -0
- onnx/reference/ops/aionnxml/op_tree_ensemble_regressor.py +105 -0
- onnx/reference/ops/experimental/__init__.py +3 -1
- onnx/reference/ops/experimental/_op_list.py +15 -36
- onnx/reference/ops/experimental/_op_run_experimental.py +2 -0
- onnx/reference/ops/experimental/op_im2col.py +3 -2
- onnx/reference/ops/op_abs.py +3 -1
- onnx/reference/ops/op_acos.py +3 -1
- onnx/reference/ops/op_acosh.py +3 -1
- onnx/reference/ops/op_add.py +3 -1
- onnx/reference/ops/op_and.py +3 -1
- onnx/reference/ops/op_argmax.py +4 -9
- onnx/reference/ops/op_argmin.py +4 -9
- onnx/reference/ops/op_asin.py +3 -1
- onnx/reference/ops/op_asinh.py +3 -1
- onnx/reference/ops/op_atan.py +3 -1
- onnx/reference/ops/op_atanh.py +3 -1
- onnx/reference/ops/op_attribute_has_value.py +2 -0
- onnx/reference/ops/op_average_pool.py +80 -2
- onnx/reference/ops/op_batch_normalization.py +14 -11
- onnx/reference/ops/op_bernoulli.py +3 -2
- onnx/reference/ops/op_bitshift.py +3 -1
- onnx/reference/ops/op_bitwise_and.py +3 -1
- onnx/reference/ops/op_bitwise_not.py +3 -1
- onnx/reference/ops/op_bitwise_or.py +3 -1
- onnx/reference/ops/op_bitwise_xor.py +3 -1
- onnx/reference/ops/op_blackman_window.py +3 -1
- onnx/reference/ops/op_cast.py +91 -10
- onnx/reference/ops/op_cast_like.py +32 -7
- onnx/reference/ops/op_ceil.py +3 -1
- onnx/reference/ops/op_celu.py +3 -1
- onnx/reference/ops/op_center_crop_pad.py +7 -3
- onnx/reference/ops/op_clip.py +2 -7
- onnx/reference/ops/op_col2im.py +3 -2
- onnx/reference/ops/op_compress.py +2 -0
- onnx/reference/ops/op_concat.py +6 -5
- onnx/reference/ops/op_concat_from_sequence.py +2 -0
- onnx/reference/ops/op_constant.py +46 -35
- onnx/reference/ops/op_constant_of_shape.py +4 -0
- onnx/reference/ops/op_conv.py +62 -39
- onnx/reference/ops/op_conv_integer.py +3 -2
- onnx/reference/ops/op_conv_transpose.py +4 -3
- onnx/reference/ops/op_cos.py +3 -1
- onnx/reference/ops/op_cosh.py +3 -1
- onnx/reference/ops/op_cum_sum.py +2 -0
- onnx/reference/ops/op_deform_conv.py +178 -0
- onnx/reference/ops/op_depth_to_space.py +2 -0
- onnx/reference/ops/op_dequantize_linear.py +72 -21
- onnx/reference/ops/op_det.py +3 -4
- onnx/reference/ops/op_dft.py +2 -0
- onnx/reference/ops/op_div.py +3 -1
- onnx/reference/ops/op_dropout.py +2 -7
- onnx/reference/ops/op_dynamic_quantize_linear.py +2 -0
- onnx/reference/ops/op_einsum.py +2 -0
- onnx/reference/ops/op_elu.py +4 -2
- onnx/reference/ops/op_equal.py +3 -1
- onnx/reference/ops/op_erf.py +3 -1
- onnx/reference/ops/op_exp.py +4 -2
- onnx/reference/ops/op_expand.py +2 -0
- onnx/reference/ops/op_eyelike.py +9 -4
- onnx/reference/ops/op_flatten.py +3 -1
- onnx/reference/ops/op_floor.py +3 -1
- onnx/reference/ops/op_gather.py +2 -0
- onnx/reference/ops/op_gather_elements.py +2 -0
- onnx/reference/ops/op_gathernd.py +3 -1
- onnx/reference/ops/op_gemm.py +5 -10
- onnx/reference/ops/op_global_average_pool.py +6 -5
- onnx/reference/ops/op_global_max_pool.py +2 -0
- onnx/reference/ops/op_greater.py +3 -1
- onnx/reference/ops/op_greater_or_equal.py +3 -1
- onnx/reference/ops/op_grid_sample.py +3 -1
- onnx/reference/ops/op_gru.py +4 -1
- onnx/reference/ops/op_hamming_window.py +3 -1
- onnx/reference/ops/op_hann_window.py +3 -1
- onnx/reference/ops/op_hard_sigmoid.py +3 -1
- onnx/reference/ops/op_hardmax.py +3 -1
- onnx/reference/ops/op_identity.py +3 -1
- onnx/reference/ops/op_if.py +16 -7
- onnx/reference/ops/op_instance_normalization.py +2 -0
- onnx/reference/ops/op_isinf.py +2 -0
- onnx/reference/ops/op_isnan.py +3 -1
- onnx/reference/ops/op_layer_normalization.py +2 -0
- onnx/reference/ops/op_leaky_relu.py +4 -2
- onnx/reference/ops/op_less.py +3 -1
- onnx/reference/ops/op_less_or_equal.py +3 -1
- onnx/reference/ops/op_log.py +4 -2
- onnx/reference/ops/op_log_softmax.py +3 -1
- onnx/reference/ops/op_loop.py +4 -2
- onnx/reference/ops/op_lp_normalization.py +4 -2
- onnx/reference/ops/op_lp_pool.py +41 -0
- onnx/reference/ops/op_lrn.py +9 -5
- onnx/reference/ops/op_lstm.py +4 -2
- onnx/reference/ops/op_matmul.py +3 -1
- onnx/reference/ops/op_matmul_integer.py +2 -0
- onnx/reference/ops/op_max.py +3 -1
- onnx/reference/ops/op_max_pool.py +3 -1
- onnx/reference/ops/op_max_unpool.py +2 -0
- onnx/reference/ops/op_mean.py +3 -1
- onnx/reference/ops/op_mel_weight_matrix.py +2 -0
- onnx/reference/ops/op_min.py +3 -1
- onnx/reference/ops/op_mod.py +2 -0
- onnx/reference/ops/op_mul.py +3 -1
- onnx/reference/ops/op_neg.py +3 -1
- onnx/reference/ops/op_negative_log_likelihood_loss.py +3 -1
- onnx/reference/ops/op_non_max_suppression.py +22 -19
- onnx/reference/ops/op_non_zero.py +4 -1
- onnx/reference/ops/op_not.py +3 -1
- onnx/reference/ops/op_one_hot.py +3 -1
- onnx/reference/ops/op_optional.py +2 -0
- onnx/reference/ops/op_optional_get_element.py +4 -8
- onnx/reference/ops/op_optional_has_element.py +3 -9
- onnx/reference/ops/op_or.py +3 -1
- onnx/reference/ops/op_pad.py +18 -29
- onnx/reference/ops/op_pow.py +2 -0
- onnx/reference/ops/op_prelu.py +4 -2
- onnx/reference/ops/op_qlinear_conv.py +3 -2
- onnx/reference/ops/op_qlinear_matmul.py +2 -0
- onnx/reference/ops/op_quantize_linear.py +100 -15
- onnx/reference/ops/op_random_normal.py +3 -1
- onnx/reference/ops/op_random_normal_like.py +3 -2
- onnx/reference/ops/op_random_uniform.py +3 -1
- onnx/reference/ops/op_random_uniform_like.py +3 -2
- onnx/reference/ops/op_range.py +2 -0
- onnx/reference/ops/op_reciprocal.py +4 -2
- onnx/reference/ops/op_reduce_l1.py +17 -31
- onnx/reference/ops/op_reduce_l2.py +17 -35
- onnx/reference/ops/op_reduce_log_sum.py +6 -29
- onnx/reference/ops/op_reduce_log_sum_exp.py +6 -29
- onnx/reference/ops/op_reduce_max.py +15 -36
- onnx/reference/ops/op_reduce_mean.py +15 -33
- onnx/reference/ops/op_reduce_min.py +15 -32
- onnx/reference/ops/op_reduce_prod.py +15 -29
- onnx/reference/ops/op_reduce_sum.py +17 -45
- onnx/reference/ops/op_reduce_sum_square.py +15 -29
- onnx/reference/ops/op_relu.py +3 -1
- onnx/reference/ops/op_reshape.py +2 -7
- onnx/reference/ops/op_resize.py +59 -26
- onnx/reference/ops/op_reverse_sequence.py +2 -0
- onnx/reference/ops/op_rnn.py +3 -7
- onnx/reference/ops/op_roi_align.py +7 -5
- onnx/reference/ops/op_round.py +4 -2
- onnx/reference/ops/op_scan.py +5 -2
- onnx/reference/ops/op_scatter_elements.py +17 -4
- onnx/reference/ops/op_scatternd.py +2 -0
- onnx/reference/ops/op_selu.py +5 -1
- onnx/reference/ops/op_sequence_at.py +2 -0
- onnx/reference/ops/op_sequence_construct.py +2 -0
- onnx/reference/ops/op_sequence_empty.py +2 -0
- onnx/reference/ops/op_sequence_erase.py +2 -0
- onnx/reference/ops/op_sequence_insert.py +4 -2
- onnx/reference/ops/op_sequence_length.py +7 -1
- onnx/reference/ops/op_sequence_map.py +4 -2
- onnx/reference/ops/op_shape.py +2 -7
- onnx/reference/ops/op_shrink.py +3 -1
- onnx/reference/ops/op_sigmoid.py +7 -1
- onnx/reference/ops/op_sign.py +3 -1
- onnx/reference/ops/op_sin.py +3 -1
- onnx/reference/ops/op_sinh.py +3 -1
- onnx/reference/ops/op_size.py +2 -0
- onnx/reference/ops/op_slice.py +3 -9
- onnx/reference/ops/op_softmax.py +4 -2
- onnx/reference/ops/op_softmax_cross_entropy_loss.py +4 -1
- onnx/reference/ops/op_softplus.py +4 -2
- onnx/reference/ops/op_softsign.py +3 -1
- onnx/reference/ops/op_space_to_depth.py +3 -1
- onnx/reference/ops/op_split.py +7 -9
- onnx/reference/ops/op_split_to_sequence.py +41 -10
- onnx/reference/ops/op_sqrt.py +4 -2
- onnx/reference/ops/op_squeeze.py +3 -12
- onnx/reference/ops/op_stft.py +8 -7
- onnx/reference/ops/op_string_normalizer.py +4 -3
- onnx/reference/ops/op_sub.py +3 -1
- onnx/reference/ops/op_sum.py +3 -1
- onnx/reference/ops/op_tan.py +3 -1
- onnx/reference/ops/op_tanh.py +3 -1
- onnx/reference/ops/op_tfidf_vectorizer.py +15 -13
- onnx/reference/ops/op_thresholded_relu.py +4 -2
- onnx/reference/ops/op_tile.py +2 -0
- onnx/reference/ops/op_topk.py +12 -19
- onnx/reference/ops/op_transpose.py +2 -0
- onnx/reference/ops/op_trilu.py +3 -1
- onnx/reference/ops/op_unique.py +2 -0
- onnx/reference/ops/op_unsqueeze.py +2 -9
- onnx/reference/ops/op_upsample.py +9 -8
- onnx/reference/ops/op_where.py +7 -1
- onnx/reference/ops/op_xor.py +3 -1
- onnx/reference/reference_evaluator.py +64 -20
- onnx/shape_inference/implementation.cc +204 -43
- onnx/shape_inference/implementation.h +33 -13
- onnx/shape_inference.py +37 -12
- onnx/string_utils.h +3 -3
- onnx/test/cpp/common_path_test.cc +2 -0
- onnx/test/cpp/data_propagation_test.cc +2 -0
- onnx/test/cpp/function_context_test.cc +2 -0
- onnx/test/cpp/function_get_test.cc +2 -0
- onnx/test/cpp/function_verify_test.cc +176 -0
- onnx/test/cpp/op_reg_test.cc +2 -0
- onnx/test/cpp/parser_test.cc +37 -1
- onnx/test/cpp/schema_registration_test.cc +2 -0
- onnx/test/cpp/shape_inference_test.cc +2 -0
- onnx/test/cpp/test_main.cc +2 -0
- onnx/tools/__init__.py +2 -0
- onnx/tools/net_drawer.py +13 -9
- onnx/tools/replace_constants.py +429 -0
- onnx/tools/update_model_dims.py +7 -9
- onnx/utils.py +16 -6
- onnx/version.py +2 -2
- onnx/version_converter/BaseConverter.h +2 -0
- onnx/version_converter/adapters/adapter.h +2 -0
- onnx/version_converter/adapters/axes_attribute_to_input.h +2 -0
- onnx/version_converter/adapters/axes_input_to_attribute.h +2 -0
- onnx/version_converter/adapters/batch_normalization_13_14.h +2 -0
- onnx/version_converter/adapters/broadcast_backward_compatibility.h +2 -0
- onnx/version_converter/adapters/broadcast_forward_compatibility.h +2 -0
- onnx/version_converter/adapters/cast_9_8.h +2 -0
- onnx/version_converter/adapters/clip_10_11.h +2 -0
- onnx/version_converter/adapters/compatible.h +2 -0
- onnx/version_converter/adapters/dropout_11_12.h +2 -0
- onnx/version_converter/adapters/extend_supported_types.h +2 -0
- onnx/version_converter/adapters/gemm_6_7.h +2 -0
- onnx/version_converter/adapters/gemm_7_6.h +2 -0
- onnx/version_converter/adapters/maxpool_8_7.h +2 -0
- onnx/version_converter/adapters/no_previous_version.h +2 -0
- onnx/version_converter/adapters/pad_10_11.h +4 -0
- onnx/version_converter/adapters/remove_consumed_inputs.h +2 -0
- onnx/version_converter/adapters/reshape_4_5.h +2 -0
- onnx/version_converter/adapters/reshape_5_4.h +2 -0
- onnx/version_converter/adapters/resize_10_11.h +2 -0
- onnx/version_converter/adapters/scan_8_9.h +2 -0
- onnx/version_converter/adapters/scan_9_8.h +2 -0
- onnx/version_converter/adapters/scatter_10_11.h +2 -0
- onnx/version_converter/adapters/slice_9_10.h +2 -0
- onnx/version_converter/adapters/softmax_12_13.h +20 -28
- onnx/version_converter/adapters/split_12_13.h +2 -0
- onnx/version_converter/adapters/split_13_12.h +2 -0
- onnx/version_converter/adapters/split_17_18.h +2 -0
- onnx/version_converter/adapters/sum_8_7.h +2 -0
- onnx/version_converter/adapters/topk_9_10.h +2 -0
- onnx/version_converter/adapters/transformers.h +3 -1
- onnx/version_converter/adapters/type_restriction.h +2 -0
- onnx/version_converter/adapters/upsample_6_7.h +2 -0
- onnx/version_converter/adapters/upsample_8_9.h +2 -0
- onnx/version_converter/adapters/upsample_9_10.h +2 -0
- onnx/version_converter/adapters/upsample_9_8.h +2 -0
- onnx/version_converter/convert.cc +14 -7
- onnx/version_converter/convert.h +20 -0
- onnx/version_converter/helper.cc +3 -3
- onnx/version_converter/helper.h +3 -3
- onnx/version_converter.py +6 -3
- {onnx-1.13.1.dist-info → onnx-1.14.1.dist-info}/METADATA +96 -52
- {onnx-1.13.1.dist-info → onnx-1.14.1.dist-info}/RECORD +1056 -743
- {onnx-1.13.1.dist-info → onnx-1.14.1.dist-info}/WHEEL +1 -1
- onnx/backend/test/data/node/test_softplus_example_expanded/model.onnx +0 -0
- /onnx/backend/test/data/node/{test_softplus_example_expanded → test_softplus_example_expanded_ver18}/test_data_set_0/input_0.pb +0 -0
- /onnx/backend/test/data/node/{test_softplus_example_expanded → test_softplus_example_expanded_ver18}/test_data_set_0/output_0.pb +0 -0
- /onnx/backend/test/data/node/{test_softplus_expanded → test_softplus_expanded_ver18}/test_data_set_0/input_0.pb +0 -0
- /onnx/backend/test/data/node/{test_softplus_expanded → test_softplus_expanded_ver18}/test_data_set_0/output_0.pb +0 -0
- {onnx-1.13.1.dist-info → onnx-1.14.1.dist-info}/LICENSE +0 -0
- {onnx-1.13.1.dist-info → onnx-1.14.1.dist-info}/entry_points.txt +0 -0
- {onnx-1.13.1.dist-info → onnx-1.14.1.dist-info}/top_level.txt +0 -0
onnx/defs/tensor/defs.cc
CHANGED
|
@@ -13,7 +13,7 @@
|
|
|
13
13
|
|
|
14
14
|
namespace ONNX_NAMESPACE {
|
|
15
15
|
|
|
16
|
-
static const char*
|
|
16
|
+
static const char* Cast_ver19_doc = R"DOC(
|
|
17
17
|
The operator casts the elements of a given input tensor to a data type
|
|
18
18
|
specified by the 'to' argument and returns an output tensor of the same size in
|
|
19
19
|
the converted type. The 'to' argument must be one of the data types specified
|
|
@@ -21,7 +21,7 @@ in the 'DataType' enum field in the TensorProto message.
|
|
|
21
21
|
|
|
22
22
|
Casting from string tensor in plain (e.g., "3.14" and "1000") and scientific numeric representations
|
|
23
23
|
(e.g., "1e-5" and "1E8") to float types is supported. For example, converting string "100.5" to an integer may
|
|
24
|
-
result 100. There are some string literals reserved for special floating-point values;
|
|
24
|
+
yield result 100. There are some string literals reserved for special floating-point values;
|
|
25
25
|
"+INF" (and "INF"), "-INF", and "NaN" are positive infinity, negative infinity, and not-a-number, respectively.
|
|
26
26
|
Any string which can exactly match "+INF" in a case-insensitive way would be mapped to positive infinite. Similarly,
|
|
27
27
|
this case-insensitive rule is applied to "INF" and "NaN". When casting from numeric tensors
|
|
@@ -34,7 +34,8 @@ User must be aware of precision loss and value change caused by range difference
|
|
|
34
34
|
For example, a 64-bit float 3.1415926459 may be round to a 32-bit float 3.141592. Similarly, converting
|
|
35
35
|
an integer 36 to Boolean may produce 1 because we truncate bits which can't be stored in the targeted type.
|
|
36
36
|
|
|
37
|
-
In more detail, the conversion among numerical types should follow these rules
|
|
37
|
+
In more detail, the conversion among numerical types should follow these rules
|
|
38
|
+
if the destination type is not a float 8 type.
|
|
38
39
|
|
|
39
40
|
* Casting from floating point to:
|
|
40
41
|
* floating point: +/- infinity if OOR (out of range).
|
|
@@ -43,24 +44,60 @@ In more detail, the conversion among numerical types should follow these rules:
|
|
|
43
44
|
* Casting from fixed point to:
|
|
44
45
|
* floating point: +/- infinity if OOR. (+ infinity in the case of uint)
|
|
45
46
|
* fixed point: when OOR, discard higher bits and reinterpret (with respect to two's complement representation for
|
|
46
|
-
signed types). For example, 200 (int16) -> -56 (int8).
|
|
47
|
+
signed types). For example, 200 (int16) -> -56 (int8).
|
|
47
48
|
* bool: zero to False; nonzero to True.
|
|
48
49
|
* Casting from bool to:
|
|
49
50
|
* floating point: `{1.0, 0.0}`.
|
|
50
51
|
* fixed point: `{1, 0}`.
|
|
51
52
|
* bool: no change.
|
|
53
|
+
|
|
54
|
+
Float 8 type were introduced to speed up the training of
|
|
55
|
+
deep models. By default the conversion of a float *x* obeys
|
|
56
|
+
to the following rules. `[x]` means the value rounded to
|
|
57
|
+
the target mantissa width.
|
|
58
|
+
|
|
59
|
+
| x | E4M3FN | E4M3FNUZ | E5M2 | E5M2FNUZ |
|
|
60
|
+
|------|----|----|----|----|
|
|
61
|
+
| 0 | 0 | 0 | 0 | 0 |
|
|
62
|
+
|-0 | -0 | 0 | -0 | 0 |
|
|
63
|
+
| NaN | NaN | NaN | NaN | NaN |
|
|
64
|
+
| +/- Inf | +/- FLT_MAX | NaN | FLT_MAX | NaN |
|
|
65
|
+
| [x] > FLT_MAX | FLT_MAX | FLT_MAX | FLT_MAX | FLT_MAX |
|
|
66
|
+
| [x] < -FLT_MAX | -FLT_MAX | -FLT_MAX | -FLT_MAX | -FLT_MAX |
|
|
67
|
+
| else | RNE | RNE | RNE | RNE |
|
|
68
|
+
|
|
69
|
+
The behavior changes if the parameter 'saturate' is set to False.
|
|
70
|
+
The rules then become:
|
|
71
|
+
|
|
72
|
+
| x | E4M3FN | E4M3FNUZ | E5M2 | E5M2FNUZ |
|
|
73
|
+
|------|----|----|----|----|
|
|
74
|
+
| 0 | 0 | 0 | 0 | 0 |
|
|
75
|
+
|-0 | -0 | 0 | -0 | 0 |
|
|
76
|
+
| NaN | NaN | NaN | NaN | NaN |
|
|
77
|
+
| +/- Inf | NaN | NaN | +/- Inf | NaN |
|
|
78
|
+
| [x] > FLT_MAX | NaN | NaN | Inf | NaN |
|
|
79
|
+
| [x] < -FLT_MAX | NaN | NaN | -Inf | NaN |
|
|
80
|
+
| else | RNE | RNE | RNE | RNE |
|
|
52
81
|
)DOC";
|
|
53
82
|
|
|
54
83
|
ONNX_OPERATOR_SET_SCHEMA(
|
|
55
84
|
Cast,
|
|
56
|
-
|
|
85
|
+
19,
|
|
57
86
|
OpSchema()
|
|
58
|
-
.SetDoc(
|
|
87
|
+
.SetDoc(Cast_ver19_doc)
|
|
59
88
|
.Attr(
|
|
60
89
|
"to",
|
|
61
90
|
"The data type to which the elements of the input tensor are cast. "
|
|
62
91
|
"Strictly must be one of the types from DataType enum in TensorProto",
|
|
63
92
|
AttributeProto::INT)
|
|
93
|
+
.Attr(
|
|
94
|
+
"saturate",
|
|
95
|
+
"The parameter defines how the conversion behaves if an input value is out of "
|
|
96
|
+
"range of the destination type. It only applies for float 8 conversion "
|
|
97
|
+
"(float8e4m3fn, float8e4m3fnuz, float8e5m2, float8e5m2fnuz). It is true by default. "
|
|
98
|
+
"All cases are fully described in two tables inserted in the operator description.",
|
|
99
|
+
AttributeProto::INT,
|
|
100
|
+
static_cast<int64_t>(1))
|
|
64
101
|
.Input(0, "input", "Input tensor to be cast.", "T1", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
65
102
|
.Output(
|
|
66
103
|
0,
|
|
@@ -87,7 +124,11 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
87
124
|
"tensor(uint64)",
|
|
88
125
|
"tensor(bool)",
|
|
89
126
|
"tensor(string)",
|
|
90
|
-
"tensor(bfloat16)"
|
|
127
|
+
"tensor(bfloat16)",
|
|
128
|
+
"tensor(float8e4m3fn)",
|
|
129
|
+
"tensor(float8e4m3fnuz)",
|
|
130
|
+
"tensor(float8e5m2)",
|
|
131
|
+
"tensor(float8e5m2fnuz)"},
|
|
91
132
|
"Constrain input types. Casting from complex is not supported.")
|
|
92
133
|
.TypeConstraint(
|
|
93
134
|
"T2",
|
|
@@ -104,7 +145,11 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
104
145
|
"tensor(uint64)",
|
|
105
146
|
"tensor(bool)",
|
|
106
147
|
"tensor(string)",
|
|
107
|
-
"tensor(bfloat16)"
|
|
148
|
+
"tensor(bfloat16)",
|
|
149
|
+
"tensor(float8e4m3fn)",
|
|
150
|
+
"tensor(float8e4m3fnuz)",
|
|
151
|
+
"tensor(float8e5m2)",
|
|
152
|
+
"tensor(float8e5m2fnuz)"},
|
|
108
153
|
"Constrain output types. Casting to complex is not supported.")
|
|
109
154
|
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
110
155
|
propagateElemTypeFromAttributeToOutput(ctx, "to", 0);
|
|
@@ -116,7 +161,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
116
161
|
PropagateShapeDataFromInputToOutput(ctx, 0);
|
|
117
162
|
}));
|
|
118
163
|
|
|
119
|
-
static const char*
|
|
164
|
+
static const char* CastLike_ver19_doc = R"DOC(
|
|
120
165
|
The operator casts the elements of a given input tensor (the first input) to
|
|
121
166
|
the same data type as the elements of the second input tensor.
|
|
122
167
|
See documentation of the Cast operator for further details.
|
|
@@ -124,9 +169,17 @@ See documentation of the Cast operator for further details.
|
|
|
124
169
|
|
|
125
170
|
ONNX_OPERATOR_SET_SCHEMA(
|
|
126
171
|
CastLike,
|
|
127
|
-
|
|
172
|
+
19,
|
|
128
173
|
OpSchema()
|
|
129
|
-
.SetDoc(
|
|
174
|
+
.SetDoc(CastLike_ver19_doc)
|
|
175
|
+
.Attr(
|
|
176
|
+
"saturate",
|
|
177
|
+
"The parameter defines how the conversion behaves if an input value is out of "
|
|
178
|
+
"range of the destination type. It only applies for float 8 conversion "
|
|
179
|
+
"(float8e4m3fn, float8e4m3fnuz, float8e5m2, float8e5m2fnuz). It is true by default. "
|
|
180
|
+
"Please refer to operator Cast description for further details.",
|
|
181
|
+
AttributeProto::INT,
|
|
182
|
+
static_cast<int64_t>(1))
|
|
130
183
|
.Input(0, "input", "Input tensor to be cast.", "T1", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
131
184
|
.Input(
|
|
132
185
|
1,
|
|
@@ -161,7 +214,11 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
161
214
|
"tensor(uint64)",
|
|
162
215
|
"tensor(bool)",
|
|
163
216
|
"tensor(string)",
|
|
164
|
-
"tensor(bfloat16)"
|
|
217
|
+
"tensor(bfloat16)",
|
|
218
|
+
"tensor(float8e4m3fn)",
|
|
219
|
+
"tensor(float8e4m3fnuz)",
|
|
220
|
+
"tensor(float8e5m2)",
|
|
221
|
+
"tensor(float8e5m2fnuz)"},
|
|
165
222
|
"Constrain input types. Casting from complex is not supported.")
|
|
166
223
|
.TypeConstraint(
|
|
167
224
|
"T2",
|
|
@@ -178,7 +235,11 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
178
235
|
"tensor(uint64)",
|
|
179
236
|
"tensor(bool)",
|
|
180
237
|
"tensor(string)",
|
|
181
|
-
"tensor(bfloat16)"
|
|
238
|
+
"tensor(bfloat16)",
|
|
239
|
+
"tensor(float8e4m3fn)",
|
|
240
|
+
"tensor(float8e4m3fnuz)",
|
|
241
|
+
"tensor(float8e5m2)",
|
|
242
|
+
"tensor(float8e5m2fnuz)"},
|
|
182
243
|
"Constrain output types. Casting to complex is not supported.")
|
|
183
244
|
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
184
245
|
propagateElemTypeFromInputToOutput(ctx, 1, 0);
|
|
@@ -200,7 +261,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
200
261
|
return true;
|
|
201
262
|
}));
|
|
202
263
|
|
|
203
|
-
static const char*
|
|
264
|
+
static const char* Reshape_ver19_doc = R"DOC(
|
|
204
265
|
Reshape the input tensor similar to numpy.reshape.
|
|
205
266
|
First input is the data tensor, second input is a shape tensor which specifies the output shape. It outputs the reshaped tensor.
|
|
206
267
|
At most one dimension of the new shape can be -1. In this case, the value is
|
|
@@ -218,9 +279,9 @@ to -1 cannot be determined uniquely.
|
|
|
218
279
|
|
|
219
280
|
ONNX_OPERATOR_SET_SCHEMA(
|
|
220
281
|
Reshape,
|
|
221
|
-
|
|
282
|
+
19,
|
|
222
283
|
OpSchema()
|
|
223
|
-
.SetDoc(
|
|
284
|
+
.SetDoc(Reshape_ver19_doc)
|
|
224
285
|
.Attr(
|
|
225
286
|
"allowzero",
|
|
226
287
|
"(Optional) By default, when any value in the 'shape' input is equal to zero "
|
|
@@ -240,10 +301,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
240
301
|
1,
|
|
241
302
|
OpSchema::NonDifferentiable)
|
|
242
303
|
.Output(0, "reshaped", "Reshaped data.", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
243
|
-
.TypeConstraint(
|
|
244
|
-
"T",
|
|
245
|
-
OpSchema::all_tensor_types_with_bfloat(),
|
|
246
|
-
"Constrain input and output types to all tensor types.")
|
|
304
|
+
.TypeConstraint("T", OpSchema::all_tensor_types_ir9(), "Constrain input and output types to all tensor types.")
|
|
247
305
|
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
248
306
|
// Type inference
|
|
249
307
|
propagateElemTypeFromInputToOutput(ctx, 0, 0);
|
|
@@ -391,28 +449,37 @@ negative axis). Thus, specifying any end value > r is equivalent to specifying a
|
|
|
391
449
|
value of r, and specifying any start value < -r is equivalent to specifying a start
|
|
392
450
|
value of 0.
|
|
393
451
|
|
|
394
|
-
|
|
452
|
+
Examples:
|
|
453
|
+
|
|
454
|
+
```
|
|
395
455
|
Input tensor with shape: [2, 3, 4]
|
|
396
456
|
No attributes specified.
|
|
397
457
|
Output: [2, 3, 4]
|
|
458
|
+
```
|
|
398
459
|
|
|
460
|
+
```
|
|
399
461
|
Input tensor with shape: [2, 3, 4]
|
|
400
462
|
start: -1
|
|
401
463
|
Output: [4]
|
|
464
|
+
```
|
|
402
465
|
|
|
466
|
+
```
|
|
403
467
|
Input tensor with shape: [2, 3, 4]
|
|
404
468
|
end: -1
|
|
405
469
|
Output: [2, 3]
|
|
470
|
+
```
|
|
406
471
|
|
|
472
|
+
```
|
|
407
473
|
Input tensor with shape: [2, 3, 4]
|
|
408
474
|
start: 1
|
|
409
475
|
end: 2
|
|
410
476
|
Output: [3]
|
|
477
|
+
```
|
|
411
478
|
)DOC";
|
|
412
479
|
|
|
413
480
|
ONNX_OPERATOR_SET_SCHEMA(
|
|
414
481
|
Shape,
|
|
415
|
-
|
|
482
|
+
19,
|
|
416
483
|
OpSchema()
|
|
417
484
|
.SetDoc(Shape_ver15_doc)
|
|
418
485
|
.Input(0, "data", "An input tensor.", "T", OpSchema::Single, true, 1, OpSchema::NonDifferentiable)
|
|
@@ -430,7 +497,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
430
497
|
"If omitted, sizes of all axes upto (including) the last one will be included.",
|
|
431
498
|
AttributeProto::INT,
|
|
432
499
|
OPTIONAL_VALUE)
|
|
433
|
-
.TypeConstraint("T", OpSchema::
|
|
500
|
+
.TypeConstraint("T", OpSchema::all_tensor_types_ir9(), "Input tensor can be of arbitrary type.")
|
|
434
501
|
.TypeConstraint("T1", {"tensor(int64)"}, "Constrain output to int64 tensor.")
|
|
435
502
|
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
436
503
|
ctx.getOutputType(0)->mutable_tensor_type()->set_elem_type(TensorProto::INT64);
|
|
@@ -453,7 +520,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
453
520
|
output_length->set_dim_value((end - start) < 0 ? 0 : (end - start));
|
|
454
521
|
})
|
|
455
522
|
.PartialDataPropagationFunction([](DataPropagationContext& ctx) {
|
|
456
|
-
if (ctx
|
|
523
|
+
if (hasInputShape(ctx, 0)) {
|
|
457
524
|
auto& input_shape = ctx.getInputType(0)->tensor_type().shape();
|
|
458
525
|
int64_t rank = static_cast<int64_t>(input_shape.dim_size());
|
|
459
526
|
int64_t start = getAttribute(ctx, "start", 0);
|
|
@@ -478,7 +545,7 @@ Takes a tensor as input and outputs a int64 scalar that equals to the total numb
|
|
|
478
545
|
|
|
479
546
|
ONNX_OPERATOR_SET_SCHEMA(
|
|
480
547
|
Size,
|
|
481
|
-
|
|
548
|
+
19,
|
|
482
549
|
OpSchema()
|
|
483
550
|
.SetDoc(Size_ver13_doc)
|
|
484
551
|
.Input(0, "data", "An input tensor.", "T", OpSchema::Single, true, 1, OpSchema::NonDifferentiable)
|
|
@@ -491,7 +558,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
491
558
|
true,
|
|
492
559
|
1,
|
|
493
560
|
OpSchema::NonDifferentiable)
|
|
494
|
-
.TypeConstraint("T", OpSchema::
|
|
561
|
+
.TypeConstraint("T", OpSchema::all_tensor_types_ir9(), "Input tensor can be of arbitrary type.")
|
|
495
562
|
.TypeConstraint("T1", {"tensor(int64)"}, "Constrain output to int64 tensor, which should be a scalar though.")
|
|
496
563
|
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
497
564
|
ctx.getOutputType(0)->mutable_tensor_type()->set_elem_type(TensorProto::INT64);
|
|
@@ -528,7 +595,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
528
595
|
1,
|
|
529
596
|
OpSchema::Differentiable)
|
|
530
597
|
.Output(0, "concat_result", "Concatenated tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
531
|
-
.TypeConstraint("T", OpSchema::
|
|
598
|
+
.TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain output types to any tensor type.")
|
|
532
599
|
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
533
600
|
propagateElemTypeFromInputToOutput(ctx, 0, 0);
|
|
534
601
|
auto numInputs = ctx.getNumInputs();
|
|
@@ -640,10 +707,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
640
707
|
true,
|
|
641
708
|
1,
|
|
642
709
|
OpSchema::Differentiable)
|
|
643
|
-
.TypeConstraint(
|
|
644
|
-
"T",
|
|
645
|
-
OpSchema::all_tensor_types_with_bfloat(),
|
|
646
|
-
"Constrain input and output types to all tensor types.")
|
|
710
|
+
.TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to all tensor types.")
|
|
647
711
|
.Attr(
|
|
648
712
|
"axis",
|
|
649
713
|
"Which axis to split on. "
|
|
@@ -772,27 +836,34 @@ For slicing to the end of a dimension with unknown size, it is recommended to pa
|
|
|
772
836
|
in `INT_MAX` when slicing forward and 'INT_MIN' when slicing backward.
|
|
773
837
|
|
|
774
838
|
Example 1:
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
839
|
+
|
|
840
|
+
```
|
|
841
|
+
data = [
|
|
842
|
+
[1, 2, 3, 4],
|
|
843
|
+
[5, 6, 7, 8],
|
|
844
|
+
]
|
|
845
|
+
axes = [0, 1]
|
|
846
|
+
starts = [1, 0]
|
|
847
|
+
ends = [2, 3]
|
|
848
|
+
steps = [1, 2]
|
|
849
|
+
result = [
|
|
850
|
+
[5, 7],
|
|
851
|
+
]
|
|
852
|
+
```
|
|
853
|
+
|
|
786
854
|
Example 2:
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
855
|
+
|
|
856
|
+
```
|
|
857
|
+
data = [
|
|
858
|
+
[1, 2, 3, 4],
|
|
859
|
+
[5, 6, 7, 8],
|
|
860
|
+
]
|
|
861
|
+
starts = [0, 1]
|
|
862
|
+
ends = [-1, 1000]
|
|
863
|
+
result = [
|
|
864
|
+
[2, 3, 4],
|
|
865
|
+
]
|
|
866
|
+
```
|
|
796
867
|
)DOC";
|
|
797
868
|
|
|
798
869
|
inline void processSliceInputs(const int64_t input_rank, int64_t& start, int64_t& end, int64_t& step) {
|
|
@@ -874,10 +945,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
874
945
|
1,
|
|
875
946
|
OpSchema::NonDifferentiable)
|
|
876
947
|
.Output(0, "output", "Sliced data tensor.", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
877
|
-
.TypeConstraint(
|
|
878
|
-
"T",
|
|
879
|
-
OpSchema::all_tensor_types_with_bfloat(),
|
|
880
|
-
"Constrain input and output types to all tensor types.")
|
|
948
|
+
.TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to all tensor types.")
|
|
881
949
|
.TypeConstraint("Tind", {"tensor(int32)", "tensor(int64)"}, "Constrain indices to integer types")
|
|
882
950
|
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
883
951
|
size_t num_inputs = ctx.getNumInputs();
|
|
@@ -939,7 +1007,9 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
939
1007
|
fail_shape_inference("Input axes has incorrect length");
|
|
940
1008
|
}
|
|
941
1009
|
}
|
|
942
|
-
|
|
1010
|
+
checkAxesRange(axes, input_rank);
|
|
1011
|
+
adjustNegativeAxes(axes, input_rank);
|
|
1012
|
+
checkDuplicateAxes(axes, input_rank);
|
|
943
1013
|
std::vector<int64_t> steps;
|
|
944
1014
|
if (!stepsInitializer) {
|
|
945
1015
|
steps = std::vector<int64_t>(starts.size(), 1);
|
|
@@ -961,21 +1031,10 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
961
1031
|
}
|
|
962
1032
|
}
|
|
963
1033
|
|
|
964
|
-
std::unordered_set<int64_t> unique_axes;
|
|
965
1034
|
size_t axes_size = axes.size();
|
|
966
1035
|
for (size_t axis_index = 0; axis_index < axes_size; ++axis_index) {
|
|
967
1036
|
auto axis = axes[axis_index] < 0 ? axes[axis_index] + static_cast<int64_t>(input_rank) : axes[axis_index];
|
|
968
1037
|
|
|
969
|
-
if (axis >= static_cast<int64_t>(input_rank) || axis < 0) {
|
|
970
|
-
fail_shape_inference("Input axes has invalid data");
|
|
971
|
-
}
|
|
972
|
-
|
|
973
|
-
if (unique_axes.find(axis) != unique_axes.end()) {
|
|
974
|
-
fail_shape_inference("'axes' has duplicates");
|
|
975
|
-
}
|
|
976
|
-
|
|
977
|
-
unique_axes.insert(axis);
|
|
978
|
-
|
|
979
1038
|
auto input_dim = ctx.getInputType(0)->tensor_type().shape().dim((int)axis);
|
|
980
1039
|
|
|
981
1040
|
// input dim value is missing - cannot perform shape inference for
|
|
@@ -1089,10 +1148,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
1089
1148
|
OPTIONAL_VALUE)
|
|
1090
1149
|
.Input(0, "data", "An input tensor.", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
1091
1150
|
.Output(0, "transposed", "Transposed output.", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
1092
|
-
.TypeConstraint(
|
|
1093
|
-
"T",
|
|
1094
|
-
OpSchema::all_tensor_types_with_bfloat(),
|
|
1095
|
-
"Constrain input and output types to all tensor types.")
|
|
1151
|
+
.TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to all tensor types.")
|
|
1096
1152
|
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
1097
1153
|
propagateElemTypeFromInputToOutput(ctx, 0, 0);
|
|
1098
1154
|
if (!hasNInputShapes(ctx, 1)) {
|
|
@@ -1255,7 +1311,7 @@ specified by `updates` at specific index positions specified by `indices`. Its o
|
|
|
1255
1311
|
is the same as the shape of `data`.
|
|
1256
1312
|
|
|
1257
1313
|
`indices` is an integer tensor. Let k denote indices.shape[-1], the last dimension in the shape of `indices`.
|
|
1258
|
-
|
|
1314
|
+
`indices` is treated as a (q-1)-dimensional tensor of k-tuples, where each k-tuple is a partial-index into `data`.
|
|
1259
1315
|
Hence, k can be a value at most the rank of `data`. When k equals rank(data), each update entry specifies an
|
|
1260
1316
|
update to a single element of the tensor. When k is less than rank(data) each update entry specifies an
|
|
1261
1317
|
update to a slice of the tensor. Index values are allowed to be negative, as per the usual
|
|
@@ -1271,10 +1327,12 @@ of shapes.
|
|
|
1271
1327
|
|
|
1272
1328
|
The `output` is calculated via the following equation:
|
|
1273
1329
|
|
|
1274
|
-
|
|
1275
|
-
|
|
1276
|
-
|
|
1277
|
-
|
|
1330
|
+
```
|
|
1331
|
+
output = np.copy(data)
|
|
1332
|
+
update_indices = indices.shape[:-1]
|
|
1333
|
+
for idx in np.ndindex(update_indices):
|
|
1334
|
+
output[indices[idx]] = updates[idx]
|
|
1335
|
+
```
|
|
1278
1336
|
|
|
1279
1337
|
The order of iteration in the above loop is not specified.
|
|
1280
1338
|
In particular, indices should not have duplicate entries: that is, if idx1 != idx2, then indices[idx1] != indices[idx2].
|
|
@@ -1286,12 +1344,14 @@ In cases where `reduction` is set to "none", indices should not have duplicate e
|
|
|
1286
1344
|
then indices[idx1] != indices[idx2]. This ensures that the output value does not depend on the iteration order.
|
|
1287
1345
|
When `reduction` is set to some reduction function `f`, `output` is calculated as follows:
|
|
1288
1346
|
|
|
1289
|
-
|
|
1290
|
-
|
|
1291
|
-
|
|
1292
|
-
|
|
1347
|
+
```
|
|
1348
|
+
output = np.copy(data)
|
|
1349
|
+
update_indices = indices.shape[:-1]
|
|
1350
|
+
for idx in np.ndindex(update_indices):
|
|
1351
|
+
output[indices[idx]] = f(output[indices[idx]], updates[idx])
|
|
1352
|
+
```
|
|
1293
1353
|
|
|
1294
|
-
where the `f` is
|
|
1354
|
+
where the `f` is `+`, `*`, `max` or `min` as specified.
|
|
1295
1355
|
|
|
1296
1356
|
This operator is the inverse of GatherND.
|
|
1297
1357
|
|
|
@@ -1299,25 +1359,25 @@ This operator is the inverse of GatherND.
|
|
|
1299
1359
|
|
|
1300
1360
|
Example 1:
|
|
1301
1361
|
```
|
|
1302
|
-
|
|
1303
|
-
|
|
1304
|
-
|
|
1305
|
-
|
|
1362
|
+
data = [1, 2, 3, 4, 5, 6, 7, 8]
|
|
1363
|
+
indices = [[4], [3], [1], [7]]
|
|
1364
|
+
updates = [9, 10, 11, 12]
|
|
1365
|
+
output = [1, 11, 3, 10, 9, 6, 7, 12]
|
|
1306
1366
|
```
|
|
1307
1367
|
|
|
1308
1368
|
Example 2:
|
|
1309
1369
|
```
|
|
1310
|
-
|
|
1311
|
-
|
|
1312
|
-
|
|
1313
|
-
|
|
1314
|
-
|
|
1315
|
-
|
|
1316
|
-
|
|
1317
|
-
|
|
1318
|
-
|
|
1319
|
-
|
|
1320
|
-
|
|
1370
|
+
data = [[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
|
|
1371
|
+
[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
|
|
1372
|
+
[[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],
|
|
1373
|
+
[[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]
|
|
1374
|
+
indices = [[0], [2]]
|
|
1375
|
+
updates = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
|
|
1376
|
+
[[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]]]
|
|
1377
|
+
output = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
|
|
1378
|
+
[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
|
|
1379
|
+
[[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
|
|
1380
|
+
[[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]
|
|
1321
1381
|
```
|
|
1322
1382
|
)DOC";
|
|
1323
1383
|
|
|
@@ -1356,10 +1416,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
1356
1416
|
1,
|
|
1357
1417
|
OpSchema::Differentiable)
|
|
1358
1418
|
.Output(0, "output", "Tensor of rank r >= 1.", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
1359
|
-
.TypeConstraint(
|
|
1360
|
-
"T",
|
|
1361
|
-
OpSchema::all_tensor_types_with_bfloat(),
|
|
1362
|
-
"Constrain input and output types to any tensor type.")
|
|
1419
|
+
.TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to any tensor type.")
|
|
1363
1420
|
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
1364
1421
|
propagateElemTypeFromInputToOutput(ctx, 0, 0);
|
|
1365
1422
|
if (hasNInputShapes(ctx, 1)) {
|
|
@@ -1387,16 +1444,15 @@ In cases where `reduction` is set to "none", indices should not have duplicate e
|
|
|
1387
1444
|
then indices[idx1] != indices[idx2]. For instance, in a 2-D tensor case, the update
|
|
1388
1445
|
corresponding to the [i][j] entry is performed as below:
|
|
1389
1446
|
```
|
|
1390
|
-
|
|
1391
|
-
|
|
1447
|
+
output[indices[i][j]][j] = updates[i][j] if axis = 0,
|
|
1448
|
+
output[i][indices[i][j]] = updates[i][j] if axis = 1,
|
|
1392
1449
|
```
|
|
1393
1450
|
When `reduction` is set to some reduction function `f`, the update corresponding to the [i][j] entry is performed as below:
|
|
1394
1451
|
```
|
|
1395
|
-
|
|
1396
|
-
|
|
1452
|
+
output[indices[i][j]][j] += f(output[indices[i][j]][j], updates[i][j]) if axis = 0,
|
|
1453
|
+
output[i][indices[i][j]] += f(output[i][indices[i][j]], updates[i][j]) if axis = 1,
|
|
1397
1454
|
```
|
|
1398
|
-
where the `f` is
|
|
1399
|
-
|
|
1455
|
+
where the `f` is `+`, `*`, `max` or `min` as specified.
|
|
1400
1456
|
|
|
1401
1457
|
This operator is the inverse of GatherElements. It is similar to Torch's Scatter operation.
|
|
1402
1458
|
|
|
@@ -1404,32 +1460,32 @@ This operator is the inverse of GatherElements. It is similar to Torch's Scatter
|
|
|
1404
1460
|
|
|
1405
1461
|
Example 1:
|
|
1406
1462
|
```
|
|
1407
|
-
|
|
1408
|
-
|
|
1409
|
-
|
|
1410
|
-
|
|
1411
|
-
|
|
1412
|
-
|
|
1413
|
-
|
|
1414
|
-
|
|
1415
|
-
|
|
1416
|
-
|
|
1417
|
-
|
|
1418
|
-
|
|
1419
|
-
|
|
1420
|
-
|
|
1421
|
-
|
|
1422
|
-
|
|
1423
|
-
|
|
1424
|
-
|
|
1463
|
+
data = [
|
|
1464
|
+
[0.0, 0.0, 0.0],
|
|
1465
|
+
[0.0, 0.0, 0.0],
|
|
1466
|
+
[0.0, 0.0, 0.0],
|
|
1467
|
+
]
|
|
1468
|
+
indices = [
|
|
1469
|
+
[1, 0, 2],
|
|
1470
|
+
[0, 2, 1],
|
|
1471
|
+
]
|
|
1472
|
+
updates = [
|
|
1473
|
+
[1.0, 1.1, 1.2],
|
|
1474
|
+
[2.0, 2.1, 2.2],
|
|
1475
|
+
]
|
|
1476
|
+
output = [
|
|
1477
|
+
[2.0, 1.1, 0.0]
|
|
1478
|
+
[1.0, 0.0, 2.2]
|
|
1479
|
+
[0.0, 2.1, 1.2]
|
|
1480
|
+
]
|
|
1425
1481
|
```
|
|
1426
1482
|
Example 2:
|
|
1427
1483
|
```
|
|
1428
|
-
|
|
1429
|
-
|
|
1430
|
-
|
|
1431
|
-
|
|
1432
|
-
|
|
1484
|
+
data = [[1.0, 2.0, 3.0, 4.0, 5.0]]
|
|
1485
|
+
indices = [[1, 3]]
|
|
1486
|
+
updates = [[1.1, 2.1]]
|
|
1487
|
+
axis = 1
|
|
1488
|
+
output = [[1.0, 1.1, 3.0, 2.1, 5.0]]
|
|
1433
1489
|
```
|
|
1434
1490
|
)DOC";
|
|
1435
1491
|
|
|
@@ -1483,10 +1539,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
1483
1539
|
true,
|
|
1484
1540
|
1,
|
|
1485
1541
|
OpSchema::Differentiable)
|
|
1486
|
-
.TypeConstraint(
|
|
1487
|
-
"T",
|
|
1488
|
-
OpSchema::all_tensor_types_with_bfloat(),
|
|
1489
|
-
"Input and output types can be of any tensor type.")
|
|
1542
|
+
.TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Input and output types can be of any tensor type.")
|
|
1490
1543
|
.TypeConstraint("Tind", {"tensor(int32)", "tensor(int64)"}, "Constrain indices to integer types")
|
|
1491
1544
|
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
1492
1545
|
propagateElemTypeFromInputToOutput(ctx, 0, 0);
|
|
@@ -1500,56 +1553,49 @@ Given `data` tensor of rank r >= 1, and `indices` tensor of rank q, gather
|
|
|
1500
1553
|
entries of the axis dimension of `data` (by default outer-most one as axis=0) indexed by `indices`, and concatenates
|
|
1501
1554
|
them in an output tensor of rank q + (r - 1).
|
|
1502
1555
|
|
|
1503
|
-
axis = 0
|
|
1504
|
-
|
|
1505
|
-
Let
|
|
1506
|
-
k = indices[i_{0}, ..., i_{q-1}]
|
|
1507
|
-
Then
|
|
1508
|
-
output[i_{0}, ..., i_{q-1}, j_{0}, ..., j_{r-2}] = input[k , j_{0}, ..., j_{r-2}]
|
|
1556
|
+
If `axis = 0`, let `k = indices[i_{0}, ..., i_{q-1}]`
|
|
1557
|
+
then `output[i_{0}, ..., i_{q-1}, j_{0}, ..., j_{r-2}] = input[k , j_{0}, ..., j_{r-2}]`:
|
|
1509
1558
|
|
|
1510
1559
|
```
|
|
1511
|
-
|
|
1512
|
-
|
|
1513
|
-
|
|
1514
|
-
|
|
1515
|
-
|
|
1516
|
-
|
|
1517
|
-
|
|
1518
|
-
|
|
1519
|
-
|
|
1520
|
-
|
|
1521
|
-
|
|
1522
|
-
|
|
1523
|
-
|
|
1524
|
-
|
|
1525
|
-
|
|
1526
|
-
|
|
1527
|
-
|
|
1528
|
-
|
|
1529
|
-
|
|
1560
|
+
data = [
|
|
1561
|
+
[1.0, 1.2],
|
|
1562
|
+
[2.3, 3.4],
|
|
1563
|
+
[4.5, 5.7],
|
|
1564
|
+
]
|
|
1565
|
+
indices = [
|
|
1566
|
+
[0, 1],
|
|
1567
|
+
[1, 2],
|
|
1568
|
+
]
|
|
1569
|
+
output = [
|
|
1570
|
+
[
|
|
1571
|
+
[1.0, 1.2],
|
|
1572
|
+
[2.3, 3.4],
|
|
1573
|
+
],
|
|
1574
|
+
[
|
|
1575
|
+
[2.3, 3.4],
|
|
1576
|
+
[4.5, 5.7],
|
|
1577
|
+
],
|
|
1578
|
+
]
|
|
1530
1579
|
```
|
|
1531
|
-
axis = 1 :
|
|
1532
1580
|
|
|
1533
|
-
|
|
1534
|
-
|
|
1535
|
-
Then
|
|
1536
|
-
output[j_{0}, i_{0}, ..., i_{q-1}, j_{1}, ..., j_{r-2}] = input[j_{0}, k, j_{1}, ..., j_{r-2}]
|
|
1581
|
+
If `axis = 1`, let `k = indices[i_{0}, ..., i_{q-1}]`
|
|
1582
|
+
then `output[j_{0}, i_{0}, ..., i_{q-1}, j_{1}, ..., j_{r-2}] = input[j_{0}, k, j_{1}, ..., j_{r-2}]`:
|
|
1537
1583
|
|
|
1538
1584
|
```
|
|
1539
|
-
|
|
1540
|
-
|
|
1541
|
-
|
|
1542
|
-
|
|
1543
|
-
|
|
1544
|
-
|
|
1545
|
-
|
|
1546
|
-
|
|
1547
|
-
|
|
1548
|
-
|
|
1549
|
-
|
|
1550
|
-
|
|
1551
|
-
|
|
1552
|
-
|
|
1585
|
+
data = [
|
|
1586
|
+
[1.0, 1.2, 1.9],
|
|
1587
|
+
[2.3, 3.4, 3.9],
|
|
1588
|
+
[4.5, 5.7, 5.9],
|
|
1589
|
+
]
|
|
1590
|
+
indices = [
|
|
1591
|
+
[0, 2],
|
|
1592
|
+
]
|
|
1593
|
+
axis = 1,
|
|
1594
|
+
output = [
|
|
1595
|
+
[[1.0, 1.9]],
|
|
1596
|
+
[[2.3, 3.9]],
|
|
1597
|
+
[[4.5, 5.9]],
|
|
1598
|
+
]
|
|
1553
1599
|
```
|
|
1554
1600
|
)DOC";
|
|
1555
1601
|
|
|
@@ -1576,10 +1622,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
1576
1622
|
1,
|
|
1577
1623
|
OpSchema::NonDifferentiable)
|
|
1578
1624
|
.Output(0, "output", "Tensor of rank q + (r - 1).", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
1579
|
-
.TypeConstraint(
|
|
1580
|
-
"T",
|
|
1581
|
-
OpSchema::all_tensor_types_with_bfloat(),
|
|
1582
|
-
"Constrain input and output types to any tensor type.")
|
|
1625
|
+
.TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to any tensor type.")
|
|
1583
1626
|
.TypeConstraint("Tind", {"tensor(int32)", "tensor(int64)"}, "Constrain indices to integer types")
|
|
1584
1627
|
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
1585
1628
|
propagateElemTypeFromInputToOutput(ctx, 0, 0);
|
|
@@ -1627,45 +1670,45 @@ Its output shape is the same as the shape of `indices` and consists of one value
|
|
|
1627
1670
|
For instance, in the 3-D case (r = 3), the output produced is determined
|
|
1628
1671
|
by the following equations:
|
|
1629
1672
|
```
|
|
1630
|
-
|
|
1631
|
-
|
|
1632
|
-
|
|
1673
|
+
out[i][j][k] = input[index[i][j][k]][j][k] if axis = 0,
|
|
1674
|
+
out[i][j][k] = input[i][index[i][j][k]][k] if axis = 1,
|
|
1675
|
+
out[i][j][k] = input[i][j][index[i][j][k]] if axis = 2,
|
|
1633
1676
|
```
|
|
1634
1677
|
|
|
1635
1678
|
This operator is also the inverse of ScatterElements. It is similar to Torch's gather operation.
|
|
1636
1679
|
|
|
1637
1680
|
Example 1:
|
|
1638
1681
|
```
|
|
1639
|
-
|
|
1640
|
-
|
|
1641
|
-
|
|
1642
|
-
|
|
1643
|
-
|
|
1644
|
-
|
|
1645
|
-
|
|
1646
|
-
|
|
1647
|
-
|
|
1648
|
-
|
|
1649
|
-
|
|
1650
|
-
|
|
1651
|
-
|
|
1682
|
+
data = [
|
|
1683
|
+
[1, 2],
|
|
1684
|
+
[3, 4],
|
|
1685
|
+
]
|
|
1686
|
+
indices = [
|
|
1687
|
+
[0, 0],
|
|
1688
|
+
[1, 0],
|
|
1689
|
+
]
|
|
1690
|
+
axis = 1
|
|
1691
|
+
output = [
|
|
1692
|
+
[1, 1],
|
|
1693
|
+
[4, 3],
|
|
1694
|
+
]
|
|
1652
1695
|
```
|
|
1653
1696
|
Example 2:
|
|
1654
1697
|
```
|
|
1655
|
-
|
|
1656
|
-
|
|
1657
|
-
|
|
1658
|
-
|
|
1659
|
-
|
|
1660
|
-
|
|
1661
|
-
|
|
1662
|
-
|
|
1663
|
-
|
|
1664
|
-
|
|
1665
|
-
|
|
1666
|
-
|
|
1667
|
-
|
|
1668
|
-
|
|
1698
|
+
data = [
|
|
1699
|
+
[1, 2, 3],
|
|
1700
|
+
[4, 5, 6],
|
|
1701
|
+
[7, 8, 9],
|
|
1702
|
+
]
|
|
1703
|
+
indices = [
|
|
1704
|
+
[1, 2, 0],
|
|
1705
|
+
[2, 0, 0],
|
|
1706
|
+
]
|
|
1707
|
+
axis = 0
|
|
1708
|
+
output = [
|
|
1709
|
+
[4, 8, 3],
|
|
1710
|
+
[7, 2, 3],
|
|
1711
|
+
]
|
|
1669
1712
|
```
|
|
1670
1713
|
)DOC";
|
|
1671
1714
|
|
|
@@ -1700,10 +1743,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
1700
1743
|
true,
|
|
1701
1744
|
1,
|
|
1702
1745
|
OpSchema::Differentiable)
|
|
1703
|
-
.TypeConstraint(
|
|
1704
|
-
"T",
|
|
1705
|
-
OpSchema::all_tensor_types_with_bfloat(),
|
|
1706
|
-
"Constrain input and output types to any tensor type.")
|
|
1746
|
+
.TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to any tensor type.")
|
|
1707
1747
|
.TypeConstraint("Tind", {"tensor(int32)", "tensor(int64)"}, "Constrain indices to integer types")
|
|
1708
1748
|
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
1709
1749
|
propagateElemTypeFromInputToOutput(ctx, 0, 0);
|
|
@@ -1753,10 +1793,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
1753
1793
|
true,
|
|
1754
1794
|
1,
|
|
1755
1795
|
OpSchema::Differentiable)
|
|
1756
|
-
.TypeConstraint(
|
|
1757
|
-
"T",
|
|
1758
|
-
OpSchema::all_tensor_types_with_bfloat(),
|
|
1759
|
-
"Constrain input and output types to all tensor types.")
|
|
1796
|
+
.TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to all tensor types.")
|
|
1760
1797
|
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
1761
1798
|
propagateElemTypeFromInputToOutput(ctx, 0, 0);
|
|
1762
1799
|
if (!hasNInputShapes(ctx, 1)) {
|
|
@@ -1781,9 +1818,8 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
1781
1818
|
|
|
1782
1819
|
const auto& input_shape = ctx.getInputType(0)->tensor_type().shape();
|
|
1783
1820
|
const auto input_ndim = input_shape.dim_size();
|
|
1784
|
-
|
|
1785
|
-
|
|
1786
|
-
});
|
|
1821
|
+
checkAxesRange(axes, input_ndim);
|
|
1822
|
+
adjustNegativeAxes(axes, input_ndim);
|
|
1787
1823
|
|
|
1788
1824
|
for (int i = 0; i < input_ndim; ++i) {
|
|
1789
1825
|
if (!input_shape.dim(i).has_dim_value() && axes_not_specified) {
|
|
@@ -1821,15 +1857,13 @@ static const char* Unsqueeze_ver13_doc = R"DOC(
|
|
|
1821
1857
|
Insert single-dimensional entries to the shape of an input tensor (`data`).
|
|
1822
1858
|
Takes one required input `axes` - which contains a list of dimension indices and this operator will insert a dimension of value `1` into the corresponding index of the output tensor (`expanded`).
|
|
1823
1859
|
|
|
1824
|
-
For example
|
|
1825
|
-
|
|
1826
|
-
Unsqueeze(data, axes=[0, 4]) outputs a tensor (`expanded`) containing same data as `data` but with shape [1, 3, 4, 5, 1].
|
|
1860
|
+
For example, given an input tensor (`data`) of shape [3, 4, 5], then
|
|
1861
|
+
Unsqueeze(data, axes=[0, 4]) outputs a tensor (`expanded`) containing same data as `data` but with shape [1, 3, 4, 5, 1].
|
|
1827
1862
|
|
|
1828
1863
|
The input `axes` should not contain any duplicate entries. It is an error if it contains duplicates.
|
|
1829
1864
|
The rank of the output tensor (`output_rank`) is the rank of the input tensor (`data`) plus the number of values in `axes`.
|
|
1830
1865
|
Each value in `axes` should be within the (inclusive) range [-output_rank , output_rank - 1].
|
|
1831
1866
|
The order of values in `axes` does not matter and can come in any order.
|
|
1832
|
-
|
|
1833
1867
|
)DOC";
|
|
1834
1868
|
|
|
1835
1869
|
ONNX_OPERATOR_SET_SCHEMA(
|
|
@@ -1857,10 +1891,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
1857
1891
|
true,
|
|
1858
1892
|
1,
|
|
1859
1893
|
OpSchema::Differentiable)
|
|
1860
|
-
.TypeConstraint(
|
|
1861
|
-
"T",
|
|
1862
|
-
OpSchema::all_tensor_types_with_bfloat(),
|
|
1863
|
-
"Constrain input and output types to all tensor types.")
|
|
1894
|
+
.TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to all tensor types.")
|
|
1864
1895
|
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
1865
1896
|
propagateElemTypeFromInputToOutput(ctx, 0, 0);
|
|
1866
1897
|
if (!hasNInputShapes(ctx, 1)) {
|
|
@@ -1873,31 +1904,14 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
1873
1904
|
return;
|
|
1874
1905
|
}
|
|
1875
1906
|
axes = ParseData<int64_t>(axes_proto);
|
|
1876
|
-
|
|
1877
|
-
// validate 'axes' for duplicate entries
|
|
1878
|
-
std::unordered_set<int64_t> unique_values;
|
|
1879
|
-
for (const auto val : axes) {
|
|
1880
|
-
if (unique_values.find(val) != unique_values.end()) {
|
|
1881
|
-
fail_shape_inference("'axes' attribute must not contain any duplicates");
|
|
1882
|
-
}
|
|
1883
|
-
unique_values.insert(val);
|
|
1884
|
-
}
|
|
1885
|
-
|
|
1886
1907
|
ctx.getOutputType(0)->mutable_tensor_type()->mutable_shape();
|
|
1887
1908
|
const auto& input_shape = ctx.getInputType(0)->tensor_type().shape();
|
|
1888
1909
|
const auto input_ndim = input_shape.dim_size();
|
|
1889
1910
|
const auto output_ndim = input_ndim + static_cast<int>(axes.size());
|
|
1890
|
-
|
|
1891
|
-
|
|
1892
|
-
|
|
1893
|
-
|
|
1894
|
-
if (axe < 0) {
|
|
1895
|
-
axe += output_ndim;
|
|
1896
|
-
}
|
|
1897
|
-
}
|
|
1898
|
-
|
|
1899
|
-
// sort after correcting negative axes values (if any) in the previous
|
|
1900
|
-
// step
|
|
1911
|
+
checkAxesRange(axes, output_ndim);
|
|
1912
|
+
adjustNegativeAxes(axes, output_ndim);
|
|
1913
|
+
checkDuplicateAxes(axes, output_ndim);
|
|
1914
|
+
// sort after correcting negative axes values (if any)
|
|
1901
1915
|
std::sort(axes.begin(), axes.end());
|
|
1902
1916
|
|
|
1903
1917
|
int j = 0;
|
|
@@ -1951,10 +1965,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
1951
1965
|
true,
|
|
1952
1966
|
1,
|
|
1953
1967
|
OpSchema::Differentiable)
|
|
1954
|
-
.TypeConstraint(
|
|
1955
|
-
"T",
|
|
1956
|
-
OpSchema::all_tensor_types_with_bfloat(),
|
|
1957
|
-
"Constrain input and output types to all tensor types.")
|
|
1968
|
+
.TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to all tensor types.")
|
|
1958
1969
|
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
1959
1970
|
propagateElemTypeFromInputToOutput(ctx, 0, 0);
|
|
1960
1971
|
auto blocksize = getAttribute(ctx, "blocksize", 0);
|
|
@@ -1987,26 +1998,22 @@ and width dimensions. By default, `mode` = `DCR`.
|
|
|
1987
1998
|
In the DCR mode, elements along the depth dimension from the input tensor are rearranged in the
|
|
1988
1999
|
following order: depth, column, and then row. The output y is computed from the input x as below:
|
|
1989
2000
|
|
|
2001
|
+
```
|
|
1990
2002
|
b, c, h, w = x.shape
|
|
1991
|
-
|
|
1992
2003
|
tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])
|
|
1993
|
-
|
|
1994
2004
|
tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])
|
|
1995
|
-
|
|
1996
2005
|
y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])
|
|
1997
|
-
|
|
2006
|
+
```
|
|
1998
2007
|
|
|
1999
2008
|
In the CRD mode, elements along the depth dimension from the input tensor are rearranged in the
|
|
2000
2009
|
following order: column, row, and the depth. The output y is computed from the input x as below:
|
|
2001
2010
|
|
|
2011
|
+
```
|
|
2002
2012
|
b, c, h, w = x.shape
|
|
2003
|
-
|
|
2004
2013
|
tmp = np.reshape(x, [b, c // (blocksize ** 2), blocksize, blocksize, h, w])
|
|
2005
|
-
|
|
2006
2014
|
tmp = np.transpose(tmp, [0, 1, 4, 2, 5, 3])
|
|
2007
|
-
|
|
2008
2015
|
y = np.reshape(tmp, [b, c // (blocksize ** 2), h * blocksize, w * blocksize])
|
|
2009
|
-
|
|
2016
|
+
```
|
|
2010
2017
|
)DOC";
|
|
2011
2018
|
|
|
2012
2019
|
ONNX_OPERATOR_SET_SCHEMA(
|
|
@@ -2039,10 +2046,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
2039
2046
|
true,
|
|
2040
2047
|
1,
|
|
2041
2048
|
OpSchema::Differentiable)
|
|
2042
|
-
.TypeConstraint(
|
|
2043
|
-
"T",
|
|
2044
|
-
OpSchema::all_tensor_types_with_bfloat(),
|
|
2045
|
-
"Constrain input and output types to all tensor types.")
|
|
2049
|
+
.TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to all tensor types.")
|
|
2046
2050
|
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
2047
2051
|
propagateElemTypeFromInputToOutput(ctx, 0, 0);
|
|
2048
2052
|
auto blocksize = getAttribute(ctx, "blocksize", 0);
|
|
@@ -2099,10 +2103,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
2099
2103
|
true,
|
|
2100
2104
|
1,
|
|
2101
2105
|
OpSchema::Differentiable)
|
|
2102
|
-
.TypeConstraint(
|
|
2103
|
-
"T",
|
|
2104
|
-
OpSchema::all_tensor_types_with_bfloat(),
|
|
2105
|
-
"Constrain input and output types to all tensor types.")
|
|
2106
|
+
.TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to all tensor types.")
|
|
2106
2107
|
.TypeConstraint("T1", {"tensor(int64)"}, "Constrain repeat's type to int64 tensors.")
|
|
2107
2108
|
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
2108
2109
|
// Type inference
|
|
@@ -2184,51 +2185,81 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
2184
2185
|
.SetDoc(Upsample_ver10_doc)
|
|
2185
2186
|
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) { resizeShapeInference_opset7_to_10(ctx); }));
|
|
2186
2187
|
|
|
2187
|
-
static const char*
|
|
2188
|
+
static const char* Resize_ver19_doc = R"DOC(
|
|
2188
2189
|
Resize the input tensor. In general, it calculates every value in the output tensor as a weighted average of neighborhood (a.k.a. sampling locations) in the input tensor.
|
|
2189
|
-
Each dimension value of the output tensor is:
|
|
2190
|
-
|
|
2190
|
+
Each dimension value of the output tensor is:
|
|
2191
|
+
```
|
|
2192
|
+
output_dimension = floor(input_dimension * (roi_end - roi_start) * scale)
|
|
2193
|
+
```
|
|
2191
2194
|
if input \"sizes\" is not specified.
|
|
2192
2195
|
)DOC";
|
|
2193
2196
|
|
|
2194
|
-
static const char*
|
|
2195
|
-
This attribute describes how to transform the coordinate in the resized tensor to the coordinate in the original tensor.
|
|
2197
|
+
static const char* Resize_ver19_attr_coordinate_transformation_mode_doc = R"DOC(
|
|
2198
|
+
This attribute describes how to transform the coordinate in the resized tensor to the coordinate in the original tensor.
|
|
2196
2199
|
|
|
2197
2200
|
The coordinate of each dimension is transformed individually. Let's describe a case using axis x as an example.
|
|
2198
|
-
Denote x_resized as the coordinate of axis x in the resized tensor,
|
|
2201
|
+
Denote `x_resized` as the coordinate of axis x in the resized tensor,
|
|
2202
|
+
`x_original` as the coordinate of axis x in the original tensor,
|
|
2203
|
+
`length_original` as the length of the original tensor in axis x,
|
|
2204
|
+
`length_resized` as the length of the resized tensor in axis x,
|
|
2205
|
+
`scale = length_resized / length_original`,
|
|
2206
|
+
`output_width` the target length on the axis x which can be a fractional number when it is calculated out of a scale factor,
|
|
2207
|
+
and `output_width_int` the effective output width as an integer.
|
|
2208
|
+
|
|
2209
|
+
if coordinate_transformation_mode is `"half_pixel"`,
|
|
2210
|
+
```
|
|
2211
|
+
x_original = (x_resized + 0.5) / scale - 0.5
|
|
2212
|
+
```
|
|
2199
2213
|
|
|
2200
|
-
if coordinate_transformation_mode is `"
|
|
2201
|
-
|
|
2214
|
+
if coordinate_transformation_mode is `"half_pixel_symmetric"`,
|
|
2215
|
+
```
|
|
2216
|
+
adjustment = output_width_int / output_width
|
|
2217
|
+
center = input_width / 2
|
|
2218
|
+
offset = center * (1 - adjustment)
|
|
2219
|
+
x_ori = offset + (x + 0.5) / scale - 0.5
|
|
2220
|
+
```
|
|
2202
2221
|
|
|
2203
|
-
if coordinate_transformation_mode is `"pytorch_half_pixel"`,
|
|
2204
|
-
|
|
2222
|
+
if coordinate_transformation_mode is `"pytorch_half_pixel"`,
|
|
2223
|
+
```
|
|
2224
|
+
x_original = length_resized > 1 ? (x_resized + 0.5) / scale - 0.5 : 0
|
|
2225
|
+
```
|
|
2205
2226
|
|
|
2206
|
-
if coordinate_transformation_mode is `"align_corners"`,
|
|
2207
|
-
|
|
2227
|
+
if coordinate_transformation_mode is `"align_corners"`,
|
|
2228
|
+
```
|
|
2229
|
+
x_original = x_resized * (length_original - 1) / (length_resized - 1)
|
|
2230
|
+
```
|
|
2208
2231
|
|
|
2209
|
-
if coordinate_transformation_mode is `"asymmetric"`,
|
|
2210
|
-
|
|
2232
|
+
if coordinate_transformation_mode is `"asymmetric"`,
|
|
2233
|
+
```
|
|
2234
|
+
x_original = x_resized / scale
|
|
2235
|
+
```
|
|
2211
2236
|
|
|
2212
|
-
if coordinate_transformation_mode is `"tf_crop_and_resize"`,
|
|
2213
|
-
|
|
2237
|
+
if coordinate_transformation_mode is `"tf_crop_and_resize"`,
|
|
2238
|
+
```
|
|
2239
|
+
x_original = length_resized > 1 ? start_x * (length_original - 1) + x_resized * (end_x - start_x) * (length_original - 1) / (length_resized - 1) : 0.5 * (start_x + end_x) * (length_original - 1)
|
|
2240
|
+
```
|
|
2214
2241
|
.)DOC";
|
|
2215
2242
|
|
|
2216
|
-
static const char*
|
|
2243
|
+
static const char* Resize_ver19_attr_keep_aspect_ratio_policy_doc = R"DOC(
|
|
2217
2244
|
This attribute describes how to interpret the `sizes` input with regard to keeping the original aspect ratio of the input, and it is not applicable when
|
|
2218
|
-
the `scales` input is used.
|
|
2245
|
+
the `scales` input is used.
|
|
2219
2246
|
|
|
2220
|
-
Given a set of `sizes`, associated with a subset of `axes` (explicitly provided or default), and assuming `d = axes[i]`, with `i` being the index of the provided `sizes`.
|
|
2247
|
+
Given a set of `sizes`, associated with a subset of `axes` (explicitly provided or default), and assuming `d = axes[i]`, with `i` being the index of the provided `sizes`.
|
|
2221
2248
|
|
|
2222
|
-
If `keep_aspect_ratio_policy` is `"stretch"`, the original aspect ratio is disregarded, and the input is resized to the specified size:
|
|
2223
|
-
`out_size[d] = sizes[i]`
|
|
2249
|
+
If `keep_aspect_ratio_policy` is `"stretch"`, the original aspect ratio is disregarded, and the input is resized to the specified size:
|
|
2250
|
+
`out_size[d] = sizes[i]`
|
|
2224
2251
|
|
|
2225
|
-
If `keep_aspect_ratio_policy` is `"not_larger"`, the sizes are adjusted so that no extent of the output is larger than the specified size, while keeping the original aspect ratio:
|
|
2226
|
-
|
|
2227
|
-
|
|
2252
|
+
If `keep_aspect_ratio_policy` is `"not_larger"`, the sizes are adjusted so that no extent of the output is larger than the specified size, while keeping the original aspect ratio:
|
|
2253
|
+
```
|
|
2254
|
+
scale = Min(sizes[i] / in_size[d])
|
|
2255
|
+
out_size[d] = round_int(scale * in_size[i])
|
|
2256
|
+
```
|
|
2228
2257
|
|
|
2229
|
-
If `keep_aspect_ratio_policy` is `"not_smaller"`, the sizes are adjusted so that no extent of the output is smaller than the specified size, while keeping the original aspect ratio:
|
|
2230
|
-
|
|
2231
|
-
|
|
2258
|
+
If `keep_aspect_ratio_policy` is `"not_smaller"`, the sizes are adjusted so that no extent of the output is smaller than the specified size, while keeping the original aspect ratio:
|
|
2259
|
+
```
|
|
2260
|
+
scale = Max(sizes[i] / in_size[d])
|
|
2261
|
+
out_size[d] = round_int(scale * in_size[i])
|
|
2262
|
+
```
|
|
2232
2263
|
|
|
2233
2264
|
For non-resizable axes (those not specified in `axes`), the output size will be equal to the input size.
|
|
2234
2265
|
|
|
@@ -2236,7 +2267,7 @@ Note: `round_int` stands for computing the nearest integer value, rounding halfw
|
|
|
2236
2267
|
|
|
2237
2268
|
ONNX_OPERATOR_SET_SCHEMA(
|
|
2238
2269
|
Resize,
|
|
2239
|
-
|
|
2270
|
+
19,
|
|
2240
2271
|
OpSchema()
|
|
2241
2272
|
.Attr(
|
|
2242
2273
|
"mode",
|
|
@@ -2260,7 +2291,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
2260
2291
|
static_cast<int64_t>(0))
|
|
2261
2292
|
.Attr(
|
|
2262
2293
|
"coordinate_transformation_mode",
|
|
2263
|
-
|
|
2294
|
+
Resize_ver19_attr_coordinate_transformation_mode_doc,
|
|
2264
2295
|
AttributeProto::STRING,
|
|
2265
2296
|
std::string("half_pixel"))
|
|
2266
2297
|
.Attr(
|
|
@@ -2290,7 +2321,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
2290
2321
|
false)
|
|
2291
2322
|
.Attr(
|
|
2292
2323
|
"keep_aspect_ratio_policy",
|
|
2293
|
-
|
|
2324
|
+
Resize_ver19_attr_keep_aspect_ratio_policy_doc,
|
|
2294
2325
|
AttributeProto::STRING,
|
|
2295
2326
|
std::string("stretch"))
|
|
2296
2327
|
.Input(0, "X", "N-D tensor", "T1", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
@@ -2330,14 +2361,14 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
2330
2361
|
.Output(0, "Y", "N-D tensor after resizing", "T1", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
2331
2362
|
.TypeConstraint(
|
|
2332
2363
|
"T1",
|
|
2333
|
-
OpSchema::
|
|
2364
|
+
OpSchema::all_tensor_types_ir4(),
|
|
2334
2365
|
"Constrain input 'X' and output 'Y' to all tensor types.")
|
|
2335
2366
|
.TypeConstraint(
|
|
2336
2367
|
"T2",
|
|
2337
2368
|
{"tensor(float16)", "tensor(float)", "tensor(double)"},
|
|
2338
2369
|
"Constrain roi type to float or double.")
|
|
2339
|
-
.SetDoc(
|
|
2340
|
-
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
2370
|
+
.SetDoc(Resize_ver19_doc)
|
|
2371
|
+
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) { resizeShapeInference_opset18_to_19(ctx); }));
|
|
2341
2372
|
|
|
2342
2373
|
static const char* GridSample_ver16_doc = R"DOC(
|
|
2343
2374
|
Given an input `X` and a flow-field `grid`, computes the output `Y` using `X` values and pixel locations from `grid`.
|
|
@@ -2451,7 +2482,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
2451
2482
|
|
|
2452
2483
|
ONNX_OPERATOR_SET_SCHEMA(
|
|
2453
2484
|
Identity,
|
|
2454
|
-
|
|
2485
|
+
19,
|
|
2455
2486
|
OpSchema()
|
|
2456
2487
|
.SetDoc("Identity operator")
|
|
2457
2488
|
.Input(0, "input", "Input tensor", "V", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
@@ -2459,7 +2490,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
2459
2490
|
.TypeConstraint(
|
|
2460
2491
|
"V",
|
|
2461
2492
|
[]() {
|
|
2462
|
-
auto t = OpSchema::
|
|
2493
|
+
auto t = OpSchema::all_tensor_types_ir9();
|
|
2463
2494
|
auto s = OpSchema::all_tensor_sequence_types();
|
|
2464
2495
|
auto o = OpSchema::all_optional_types();
|
|
2465
2496
|
t.insert(t.end(), s.begin(), s.end());
|
|
@@ -2513,13 +2544,13 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
2513
2544
|
.TypeConstraint("T1", {"tensor(bool)"}, "Constrain to boolean tensors.")
|
|
2514
2545
|
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
2515
2546
|
propagateElemTypeFromInputToOutput(ctx, 0, 0);
|
|
2547
|
+
auto axisAttr = ctx.getAttribute("axis");
|
|
2516
2548
|
if (hasInputShape(ctx, 0)) {
|
|
2517
2549
|
const TensorShapeProto& indices_shape = ctx.getInputType(0)->tensor_type().shape();
|
|
2518
2550
|
int r = indices_shape.dim_size();
|
|
2519
2551
|
if (r < 1) {
|
|
2520
2552
|
fail_shape_inference("Indices tensor must have rank >= 1");
|
|
2521
2553
|
}
|
|
2522
|
-
auto axisAttr = ctx.getAttribute("axis");
|
|
2523
2554
|
if (axisAttr) {
|
|
2524
2555
|
int axis = static_cast<int>(axisAttr->i());
|
|
2525
2556
|
if (axis < -r || axis >= r) {
|
|
@@ -2528,8 +2559,18 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
2528
2559
|
if (axis < 0) {
|
|
2529
2560
|
axis += r;
|
|
2530
2561
|
}
|
|
2562
|
+
TensorShapeProto* shape = ctx.getOutputType(0)->mutable_tensor_type()->mutable_shape();
|
|
2563
|
+
for (int i = 0; i < indices_shape.dim_size(); i++) {
|
|
2564
|
+
auto* dim = shape->add_dim();
|
|
2565
|
+
if (i != axis) {
|
|
2566
|
+
*dim = indices_shape.dim(i);
|
|
2567
|
+
}
|
|
2568
|
+
}
|
|
2531
2569
|
}
|
|
2532
2570
|
}
|
|
2571
|
+
if (!axisAttr) {
|
|
2572
|
+
updateOutputShape(ctx, 0, {Dim()});
|
|
2573
|
+
}
|
|
2533
2574
|
}));
|
|
2534
2575
|
|
|
2535
2576
|
static const char* OneHot_ver11_doc = R"DOC(
|
|
@@ -2745,19 +2786,11 @@ with three parameters.
|
|
|
2745
2786
|
|
|
2746
2787
|
)DOC";
|
|
2747
2788
|
|
|
2748
|
-
static const char* Where_ver16_history = R"DOC(
|
|
2749
|
-
|
|
2750
|
-
**History**
|
|
2751
|
-
- Version 16 adds bfloat16 to the types allowed (for the second and third parameter).
|
|
2752
|
-
)DOC";
|
|
2753
|
-
|
|
2754
2789
|
ONNX_OPERATOR_SET_SCHEMA(
|
|
2755
2790
|
Where,
|
|
2756
2791
|
16,
|
|
2757
2792
|
OpSchema()
|
|
2758
|
-
.SetDoc(
|
|
2759
|
-
GET_OP_DOC_STR(std::string(Where_ver16_doc) + GenerateBroadcastingDocMul()) +
|
|
2760
|
-
std::string(Where_ver16_history))
|
|
2793
|
+
.SetDoc(GET_OP_DOC_STR(std::string(Where_ver16_doc) + GenerateBroadcastingDocMul()))
|
|
2761
2794
|
.Input(
|
|
2762
2795
|
0,
|
|
2763
2796
|
"condition",
|
|
@@ -2797,7 +2830,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
2797
2830
|
.TypeConstraint("B", {"tensor(bool)"}, "Constrain to boolean tensors.")
|
|
2798
2831
|
.TypeConstraint(
|
|
2799
2832
|
"T",
|
|
2800
|
-
OpSchema::
|
|
2833
|
+
OpSchema::all_tensor_types_ir4(),
|
|
2801
2834
|
"Constrain input and output types to all tensor types (including bfloat).")
|
|
2802
2835
|
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
2803
2836
|
propagateElemTypeFromInputToOutput(ctx, 1, 0);
|
|
@@ -2818,7 +2851,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
2818
2851
|
.SetDoc(NonZero_ver9_doc)
|
|
2819
2852
|
.Input(0, "X", "input", "T", OpSchema::Single, true, 1, OpSchema::NonDifferentiable)
|
|
2820
2853
|
.Output(0, "Y", "output", "tensor(int64)", OpSchema::Single, true, 1, OpSchema::NonDifferentiable)
|
|
2821
|
-
.TypeConstraint("T", OpSchema::
|
|
2854
|
+
.TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain to all tensor types.")
|
|
2822
2855
|
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
2823
2856
|
updateOutputElemType(ctx, 0, TensorProto::INT64);
|
|
2824
2857
|
TensorShapeProto output_shape;
|
|
@@ -2924,67 +2957,89 @@ Outputs are either sorted in ascending order or optionally in the order of the f
|
|
|
2924
2957
|
https://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html
|
|
2925
2958
|
|
|
2926
2959
|
Example 1:
|
|
2927
|
-
|
|
2928
|
-
|
|
2929
|
-
|
|
2930
|
-
|
|
2931
|
-
|
|
2932
|
-
|
|
2933
|
-
|
|
2960
|
+
```
|
|
2961
|
+
input_X = [2, 1, 1, 3, 4, 3]
|
|
2962
|
+
attribute_sorted = 0
|
|
2963
|
+
attribute_axis = None
|
|
2964
|
+
output_Y = [2, 1, 3, 4]
|
|
2965
|
+
output_indices = [0, 1, 3, 4]
|
|
2966
|
+
output_inverse_indices = [0, 1, 1, 2, 3, 2]
|
|
2967
|
+
output_counts = [1, 2, 2, 1]
|
|
2968
|
+
```
|
|
2934
2969
|
|
|
2935
2970
|
Example 2:
|
|
2936
|
-
|
|
2937
|
-
|
|
2938
|
-
|
|
2939
|
-
|
|
2940
|
-
|
|
2941
|
-
|
|
2942
|
-
|
|
2971
|
+
```
|
|
2972
|
+
input_X = [[1, 3], [2, 3]]
|
|
2973
|
+
attribute_sorted = 1
|
|
2974
|
+
attribute_axis = None
|
|
2975
|
+
output_Y = [1, 2, 3]
|
|
2976
|
+
output_indices = [0, 2, 1]
|
|
2977
|
+
output_inverse_indices = [0, 2, 1, 2]
|
|
2978
|
+
output_counts = [1, 1, 2]
|
|
2979
|
+
```
|
|
2943
2980
|
|
|
2944
2981
|
Example 3:
|
|
2945
|
-
|
|
2946
|
-
|
|
2947
|
-
|
|
2948
|
-
|
|
2949
|
-
|
|
2950
|
-
|
|
2951
|
-
|
|
2982
|
+
```
|
|
2983
|
+
input_X = [[1, 0, 0], [1, 0, 0], [2, 3, 4]]
|
|
2984
|
+
attribute_sorted = 1
|
|
2985
|
+
attribute_axis = 0
|
|
2986
|
+
output_Y = [[1, 0, 0], [2, 3, 4]]
|
|
2987
|
+
output_indices = [0, 2]
|
|
2988
|
+
output_inverse_indices = [0, 0, 1]
|
|
2989
|
+
output_counts = [2, 1]
|
|
2990
|
+
```
|
|
2952
2991
|
|
|
2953
2992
|
Example 4:
|
|
2954
|
-
|
|
2955
|
-
|
|
2956
|
-
|
|
2957
|
-
|
|
2958
|
-
|
|
2959
|
-
|
|
2993
|
+
```
|
|
2994
|
+
input_x = [[[1., 1.], [0., 1.], [2., 1.], [0., 1.]],
|
|
2995
|
+
[[1., 1.], [0., 1.], [2., 1.], [0., 1.]]]
|
|
2996
|
+
attribute_sorted = 1
|
|
2997
|
+
attribute_axis = 1
|
|
2998
|
+
```
|
|
2960
2999
|
|
|
2961
|
-
|
|
2962
|
-
|
|
2963
|
-
|
|
2964
|
-
|
|
2965
|
-
|
|
3000
|
+
intermediate data are presented below for better understanding:
|
|
3001
|
+
there are 4 subtensors sliced along axis 1 of input_x (shape = (2, 4, 2)):
|
|
3002
|
+
```
|
|
3003
|
+
A: [[1, 1], [1, 1]],
|
|
3004
|
+
[[0, 1], [0, 1]],
|
|
3005
|
+
[[2, 1], [2, 1]],
|
|
3006
|
+
[[0, 1], [0, 1]].
|
|
3007
|
+
```
|
|
2966
3008
|
|
|
2967
|
-
|
|
2968
|
-
|
|
2969
|
-
|
|
2970
|
-
|
|
3009
|
+
there are 3 unique subtensors:
|
|
3010
|
+
```
|
|
3011
|
+
[[1, 1], [1, 1]],
|
|
3012
|
+
[[0, 1], [0, 1]],
|
|
3013
|
+
[[2, 1], [2, 1]].
|
|
3014
|
+
```
|
|
2971
3015
|
|
|
2972
|
-
|
|
2973
|
-
|
|
2974
|
-
|
|
2975
|
-
|
|
3016
|
+
sorted unique subtensors:
|
|
3017
|
+
```
|
|
3018
|
+
B: [[0, 1], [0, 1]],
|
|
3019
|
+
[[1, 1], [1, 1]],
|
|
3020
|
+
[[2, 1], [2, 1]].
|
|
3021
|
+
```
|
|
2976
3022
|
|
|
2977
|
-
|
|
2978
|
-
|
|
2979
|
-
|
|
3023
|
+
output_Y is constructed from B:
|
|
3024
|
+
```
|
|
3025
|
+
[[[0. 1.], [1. 1.], [2. 1.]],
|
|
3026
|
+
[[0. 1.], [1. 1.], [2. 1.]]]
|
|
3027
|
+
```
|
|
2980
3028
|
|
|
2981
|
-
|
|
2982
|
-
|
|
3029
|
+
output_indices is to map from B to A:
|
|
3030
|
+
```
|
|
3031
|
+
[1, 0, 2]
|
|
3032
|
+
```
|
|
2983
3033
|
|
|
2984
|
-
|
|
2985
|
-
|
|
3034
|
+
output_inverse_indices is to map from A to B:
|
|
3035
|
+
```
|
|
3036
|
+
[1, 0, 2, 0]
|
|
3037
|
+
```
|
|
2986
3038
|
|
|
2987
|
-
|
|
3039
|
+
output_counts:
|
|
3040
|
+
```
|
|
3041
|
+
[2, 1, 1]
|
|
3042
|
+
```
|
|
2988
3043
|
)DOC";
|
|
2989
3044
|
|
|
2990
3045
|
ONNX_OPERATOR_SET_SCHEMA(
|
|
@@ -3246,10 +3301,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
3246
3301
|
true,
|
|
3247
3302
|
1,
|
|
3248
3303
|
OpSchema::Differentiable)
|
|
3249
|
-
.TypeConstraint(
|
|
3250
|
-
"T",
|
|
3251
|
-
OpSchema::all_tensor_types_with_bfloat(),
|
|
3252
|
-
"Constrain input and output types to any tensor type.")
|
|
3304
|
+
.TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to any tensor type.")
|
|
3253
3305
|
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
3254
3306
|
// Type inference
|
|
3255
3307
|
propagateElemTypeFromInputToOutput(ctx, 0, 0);
|
|
@@ -3296,7 +3348,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
3296
3348
|
}
|
|
3297
3349
|
}));
|
|
3298
3350
|
|
|
3299
|
-
static const char*
|
|
3351
|
+
static const char* Pad_ver19_doc = R"DOC(
|
|
3300
3352
|
Given a tensor containing the data to be padded (`data`), a tensor containing the number of start and end pad values for axis (`pads`), (optionally) a `mode`, and (optionally) `constant_value`,
|
|
3301
3353
|
a padded tensor (`output`) is generated.
|
|
3302
3354
|
|
|
@@ -3308,209 +3360,102 @@ The three supported `modes` are (similar to corresponding modes supported by `nu
|
|
|
3308
3360
|
|
|
3309
3361
|
3) `edge` - pads with the edge values of array
|
|
3310
3362
|
|
|
3363
|
+
4) `wrap` - wrap-around padding as if the data tensor forms a torus
|
|
3364
|
+
|
|
3311
3365
|
|
|
3312
3366
|
Example 1 (`constant` mode):
|
|
3313
|
-
Insert 0 pads to the beginning of the second dimension.
|
|
3314
3367
|
|
|
3315
|
-
|
|
3316
|
-
[
|
|
3317
|
-
[1.0, 1.2],
|
|
3318
|
-
[2.3, 3.4],
|
|
3319
|
-
[4.5, 5.7],
|
|
3320
|
-
]
|
|
3368
|
+
Insert 0 pads to the beginning of the second dimension.
|
|
3321
3369
|
|
|
3322
|
-
|
|
3370
|
+
```
|
|
3371
|
+
data = [
|
|
3372
|
+
[1.0, 1.2],
|
|
3373
|
+
[2.3, 3.4],
|
|
3374
|
+
[4.5, 5.7],
|
|
3375
|
+
]
|
|
3323
3376
|
|
|
3324
|
-
|
|
3377
|
+
pads = [0, 2, 0, 0]
|
|
3325
3378
|
|
|
3326
|
-
|
|
3379
|
+
mode = 'constant'
|
|
3327
3380
|
|
|
3328
|
-
|
|
3329
|
-
[
|
|
3330
|
-
[0.0, 0.0, 1.0, 1.2],
|
|
3331
|
-
[0.0, 0.0, 2.3, 3.4],
|
|
3332
|
-
[0.0, 0.0, 4.5, 5.7],
|
|
3333
|
-
]
|
|
3381
|
+
constant_value = 0.0
|
|
3334
3382
|
|
|
3383
|
+
output = [
|
|
3384
|
+
[0.0, 0.0, 1.0, 1.2],
|
|
3385
|
+
[0.0, 0.0, 2.3, 3.4],
|
|
3386
|
+
[0.0, 0.0, 4.5, 5.7],
|
|
3387
|
+
]
|
|
3388
|
+
```
|
|
3335
3389
|
|
|
3336
3390
|
Example 2 (`reflect` mode):
|
|
3337
|
-
data =
|
|
3338
|
-
[
|
|
3339
|
-
[1.0, 1.2],
|
|
3340
|
-
[2.3, 3.4],
|
|
3341
|
-
[4.5, 5.7],
|
|
3342
|
-
]
|
|
3343
3391
|
|
|
3344
|
-
|
|
3392
|
+
```
|
|
3393
|
+
data = [
|
|
3394
|
+
[1.0, 1.2],
|
|
3395
|
+
[2.3, 3.4],
|
|
3396
|
+
[4.5, 5.7],
|
|
3397
|
+
]
|
|
3345
3398
|
|
|
3346
|
-
|
|
3399
|
+
pads = [0, 2, 0, 0]
|
|
3347
3400
|
|
|
3348
|
-
|
|
3349
|
-
[
|
|
3350
|
-
[1.0, 1.2, 1.0, 1.2],
|
|
3351
|
-
[2.3, 3.4, 2.3, 3.4],
|
|
3352
|
-
[4.5, 5.7, 4.5, 5.7],
|
|
3353
|
-
]
|
|
3401
|
+
mode = 'reflect'
|
|
3354
3402
|
|
|
3403
|
+
output = [
|
|
3404
|
+
[1.0, 1.2, 1.0, 1.2],
|
|
3405
|
+
[2.3, 3.4, 2.3, 3.4],
|
|
3406
|
+
[4.5, 5.7, 4.5, 5.7],
|
|
3407
|
+
]
|
|
3408
|
+
```
|
|
3355
3409
|
|
|
3356
3410
|
Example 3 (`edge` mode):
|
|
3357
|
-
data =
|
|
3358
|
-
[
|
|
3359
|
-
[1.0, 1.2],
|
|
3360
|
-
[2.3, 3.4],
|
|
3361
|
-
[4.5, 5.7],
|
|
3362
|
-
]
|
|
3363
3411
|
|
|
3364
|
-
|
|
3412
|
+
```
|
|
3413
|
+
data = [
|
|
3414
|
+
[1.0, 1.2],
|
|
3415
|
+
[2.3, 3.4],
|
|
3416
|
+
[4.5, 5.7],
|
|
3417
|
+
]
|
|
3365
3418
|
|
|
3366
|
-
|
|
3419
|
+
pads = [0, 2, 0, 0]
|
|
3367
3420
|
|
|
3368
|
-
|
|
3369
|
-
|
|
3370
|
-
|
|
3371
|
-
|
|
3372
|
-
|
|
3373
|
-
|
|
3421
|
+
mode = 'edge'
|
|
3422
|
+
|
|
3423
|
+
output = [
|
|
3424
|
+
[1.0, 1.0, 1.0, 1.2],
|
|
3425
|
+
[2.3, 2.3, 2.3, 3.4],
|
|
3426
|
+
[4.5, 4.5, 4.5, 5.7],
|
|
3427
|
+
]
|
|
3428
|
+
```
|
|
3374
3429
|
|
|
3430
|
+
Example 4 (`wrap` mode):
|
|
3431
|
+
|
|
3432
|
+
```
|
|
3433
|
+
data = [
|
|
3434
|
+
[1.0, 1.2],
|
|
3435
|
+
[2.3, 3.4],
|
|
3436
|
+
[4.5, 5.7],
|
|
3437
|
+
]
|
|
3438
|
+
|
|
3439
|
+
pads = [2, 1, 1, 1]
|
|
3440
|
+
|
|
3441
|
+
mode = 'wrap'
|
|
3442
|
+
|
|
3443
|
+
output = [
|
|
3444
|
+
[3.4, 2.3, 3.4, 2.3],
|
|
3445
|
+
[5.7, 4.5, 5.7, 4.5],
|
|
3446
|
+
[1.2, 1.0, 1.2, 1.0],
|
|
3447
|
+
[3.4, 2.3, 3.4, 2.3],
|
|
3448
|
+
[5.7, 4.5, 5.7, 4.5],
|
|
3449
|
+
[1.2, 1.0, 1.2, 1.0],
|
|
3450
|
+
]
|
|
3451
|
+
```
|
|
3375
3452
|
)DOC";
|
|
3376
3453
|
|
|
3377
3454
|
ONNX_OPERATOR_SET_SCHEMA(
|
|
3378
3455
|
Pad,
|
|
3379
|
-
|
|
3380
|
-
OpSchema()
|
|
3381
|
-
|
|
3382
|
-
"mode",
|
|
3383
|
-
"Supported modes: `constant`(default), `reflect`, `edge`",
|
|
3384
|
-
AttributeProto::STRING,
|
|
3385
|
-
std::string("constant"))
|
|
3386
|
-
.SetDoc(Pad_ver18_doc)
|
|
3387
|
-
.Input(0, "data", "Input tensor.", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
3388
|
-
.Input(
|
|
3389
|
-
1,
|
|
3390
|
-
"pads",
|
|
3391
|
-
"Tensor of integers indicating the number of padding elements to add or remove (if negative) "
|
|
3392
|
-
"at the beginning and end of each axis. For 2D input tensor, it is the number of pixels. "
|
|
3393
|
-
"`pads` should be a 1D tensor of shape [2 * num_axes] where `num_axes` refers to the number "
|
|
3394
|
-
"of elements in the `axes` input or the input rank if `axes` are not provided explicitly. "
|
|
3395
|
-
"`pads` format should be: [x1_begin, x2_begin, ..., x1_end, x2_end,...], "
|
|
3396
|
-
"where xi_begin is the number of pad values added at the beginning of axis `axes[i]` and "
|
|
3397
|
-
"xi_end, the number of pad values added at the end of axis `axes[i]`.",
|
|
3398
|
-
"tensor(int64)",
|
|
3399
|
-
OpSchema::Single,
|
|
3400
|
-
true,
|
|
3401
|
-
1,
|
|
3402
|
-
OpSchema::NonDifferentiable)
|
|
3403
|
-
.Input(
|
|
3404
|
-
2,
|
|
3405
|
-
"constant_value",
|
|
3406
|
-
"(Optional) A scalar value to be used if the mode chosen is `constant` (by default it is 0, "
|
|
3407
|
-
"empty string or False).",
|
|
3408
|
-
"T",
|
|
3409
|
-
OpSchema::Optional,
|
|
3410
|
-
true,
|
|
3411
|
-
1,
|
|
3412
|
-
OpSchema::NonDifferentiable)
|
|
3413
|
-
.Input(
|
|
3414
|
-
3,
|
|
3415
|
-
"axes",
|
|
3416
|
-
"1-D tensor of axes that `pads` apply to. Negative value means counting dimensions "
|
|
3417
|
-
"from the back. Accepted range is [-r, r-1] where r = rank(data). Behavior is undefined if an "
|
|
3418
|
-
"axis is repeated. If not provided, all axes are assumed (`[0, 1, ..., input_rank-1]`).",
|
|
3419
|
-
"Tind",
|
|
3420
|
-
OpSchema::Optional,
|
|
3421
|
-
true,
|
|
3422
|
-
1,
|
|
3423
|
-
OpSchema::NonDifferentiable)
|
|
3424
|
-
|
|
3425
|
-
.Output(0, "output", "Tensor after padding.", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
3426
|
-
.TypeConstraint(
|
|
3427
|
-
"T",
|
|
3428
|
-
OpSchema::all_tensor_types_with_bfloat(),
|
|
3429
|
-
"Constrain input and output types to all tensor types.")
|
|
3430
|
-
.TypeConstraint("Tind", {"tensor(int32)", "tensor(int64)"}, "Constrain indices to integer types")
|
|
3431
|
-
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
3432
|
-
// Type inference
|
|
3433
|
-
propagateElemTypeFromInputToOutput(ctx, 0, 0);
|
|
3434
|
-
// Shape inference needs the input data shape
|
|
3435
|
-
if (!hasNInputShapes(ctx, 1)) {
|
|
3436
|
-
return;
|
|
3437
|
-
}
|
|
3438
|
-
const auto& input_shape = ctx.getInputType(0)->tensor_type().shape();
|
|
3439
|
-
const auto input_rank = input_shape.dim_size();
|
|
3440
|
-
|
|
3441
|
-
std::vector<int64_t> axes;
|
|
3442
|
-
if (hasInputShape(ctx, 3)) { //'axes' input
|
|
3443
|
-
auto axes_initializer = ctx.getInputData(3);
|
|
3444
|
-
if (axes_initializer == nullptr)
|
|
3445
|
-
return; // can't do shape inference then
|
|
3446
|
-
|
|
3447
|
-
axes = ParseData<int64_t>(axes_initializer);
|
|
3448
|
-
|
|
3449
|
-
std::vector<bool> tmp(input_rank, false);
|
|
3450
|
-
for (auto axis : axes) {
|
|
3451
|
-
if (tmp[axis]) {
|
|
3452
|
-
fail_shape_inference("Repeated axis: ", axis);
|
|
3453
|
-
}
|
|
3454
|
-
tmp[axis] = true;
|
|
3455
|
-
}
|
|
3456
|
-
} else {
|
|
3457
|
-
axes.resize(input_rank);
|
|
3458
|
-
std::iota(axes.begin(), axes.end(), 0);
|
|
3459
|
-
}
|
|
3460
|
-
|
|
3461
|
-
int num_axes = axes.size();
|
|
3462
|
-
if (num_axes > input_rank) {
|
|
3463
|
-
fail_shape_inference("Too many axes provided");
|
|
3464
|
-
}
|
|
3465
|
-
|
|
3466
|
-
auto* output_shape = ctx.getOutputType(0)->mutable_tensor_type()->mutable_shape();
|
|
3467
|
-
|
|
3468
|
-
// Populating default dims
|
|
3469
|
-
std::vector<TensorShapeProto_Dimension*> out_dims(input_rank);
|
|
3470
|
-
for (int i = 0; i < input_rank; ++i) {
|
|
3471
|
-
out_dims[i] = output_shape->add_dim();
|
|
3472
|
-
}
|
|
3473
|
-
|
|
3474
|
-
// Shape Inference if
|
|
3475
|
-
// 1. 'pads' are available.
|
|
3476
|
-
// and 2. 'axes' are available, or default.
|
|
3477
|
-
const TensorProto* pads_initializer = ctx.getInputData(1);
|
|
3478
|
-
if (nullptr != pads_initializer && !axes.empty()) {
|
|
3479
|
-
if (pads_initializer->dims_size() != 1 || pads_initializer->data_type() != TensorProto::INT64) {
|
|
3480
|
-
fail_shape_inference("'pads' input must be a 1D (shape: [2 * num_axes]) tensor of type int64");
|
|
3481
|
-
}
|
|
3482
|
-
|
|
3483
|
-
const auto& pads_data = ParseData<int64_t>(pads_initializer);
|
|
3484
|
-
if (pads_data.size() != static_cast<size_t>(2 * num_axes)) {
|
|
3485
|
-
fail_shape_inference(
|
|
3486
|
-
"Pads has incorrect number of values. Expected 2 * ",
|
|
3487
|
-
num_axes,
|
|
3488
|
-
" values. Got ",
|
|
3489
|
-
pads_data.size(),
|
|
3490
|
-
" values.");
|
|
3491
|
-
}
|
|
3492
|
-
|
|
3493
|
-
// Set default dim values
|
|
3494
|
-
for (int i = 0; i < input_rank; ++i) {
|
|
3495
|
-
const auto& input_dim = input_shape.dim(i);
|
|
3496
|
-
if (input_dim.has_dim_value()) {
|
|
3497
|
-
out_dims[i]->set_dim_value(input_dim.dim_value());
|
|
3498
|
-
}
|
|
3499
|
-
}
|
|
3500
|
-
|
|
3501
|
-
for (int i = 0; i < num_axes; ++i) {
|
|
3502
|
-
auto axis = axes[i];
|
|
3503
|
-
const auto& input_dim = input_shape.dim(axis);
|
|
3504
|
-
auto& out_dim = *out_dims[axis];
|
|
3505
|
-
auto total_pad = pads_data[i] + pads_data[num_axes + i];
|
|
3506
|
-
if (input_dim.has_dim_value()) {
|
|
3507
|
-
out_dim.set_dim_value(input_dim.dim_value() + total_pad);
|
|
3508
|
-
} else if (total_pad == 0) {
|
|
3509
|
-
out_dim = input_dim;
|
|
3510
|
-
}
|
|
3511
|
-
}
|
|
3512
|
-
}
|
|
3513
|
-
}));
|
|
3456
|
+
19,
|
|
3457
|
+
OpSchema().FillUsing(
|
|
3458
|
+
PadDocGenerator(Pad_ver19_doc, "Supported modes: `constant`(default), `reflect`, `edge`, `wrap`")));
|
|
3514
3459
|
|
|
3515
3460
|
static const char* Trilu_ver14_doc = R"DOC(
|
|
3516
3461
|
Given a 2-D matrix or batches of 2-D matrices, returns the upper or lower triangular part of the tensor(s).
|
|
@@ -3565,10 +3510,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
3565
3510
|
true,
|
|
3566
3511
|
1,
|
|
3567
3512
|
OpSchema::Differentiable)
|
|
3568
|
-
.TypeConstraint(
|
|
3569
|
-
"T",
|
|
3570
|
-
OpSchema::all_tensor_types_with_bfloat(),
|
|
3571
|
-
"Constrain input and output types to all tensor types.")
|
|
3513
|
+
.TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to all tensor types.")
|
|
3572
3514
|
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
3573
3515
|
// Type inference
|
|
3574
3516
|
propagateElemTypeFromInputToOutput(ctx, 0, 0);
|
|
@@ -3626,10 +3568,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
3626
3568
|
"Behavior is undefined if an axis is repeated.",
|
|
3627
3569
|
AttributeProto::INTS,
|
|
3628
3570
|
OPTIONAL_VALUE)
|
|
3629
|
-
.TypeConstraint(
|
|
3630
|
-
"T",
|
|
3631
|
-
OpSchema::all_tensor_types_with_bfloat(),
|
|
3632
|
-
"Constrain input and output types to all tensor types.")
|
|
3571
|
+
.TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to all tensor types.")
|
|
3633
3572
|
.TypeConstraint("Tind", {"tensor(int32)", "tensor(int64)"}, "Constrain indices to integer types")
|
|
3634
3573
|
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
3635
3574
|
if (ctx.getNumInputs() != 2) {
|
|
@@ -3668,14 +3607,9 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
3668
3607
|
std::vector<int64_t> axes;
|
|
3669
3608
|
if (axes_attr) {
|
|
3670
3609
|
axes = RetrieveValues<int64_t>(*axes_attr);
|
|
3671
|
-
|
|
3672
|
-
|
|
3673
|
-
|
|
3674
|
-
if (tmp[axis]) {
|
|
3675
|
-
fail_shape_inference("Repeated axis: ", axis);
|
|
3676
|
-
}
|
|
3677
|
-
tmp[axis] = true;
|
|
3678
|
-
}
|
|
3610
|
+
checkAxesRange(axes, input_rank);
|
|
3611
|
+
adjustNegativeAxes(axes, input_rank);
|
|
3612
|
+
checkDuplicateAxes(axes, input_rank);
|
|
3679
3613
|
} else {
|
|
3680
3614
|
axes.resize(input_rank);
|
|
3681
3615
|
std::iota(axes.begin(), axes.end(), 0);
|