onnx 1.13.0__cp310-cp310-win_amd64.whl → 1.14.0__cp310-cp310-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of onnx might be problematic. Click here for more details.
- onnx/__init__.py +116 -70
- onnx/backend/__init__.py +2 -0
- onnx/backend/base.py +3 -0
- onnx/backend/sample/__init__.py +2 -0
- onnx/backend/sample/ops/__init__.py +8 -6
- onnx/backend/sample/ops/abs.py +1 -1
- onnx/backend/test/__init__.py +4 -1
- onnx/backend/test/case/__init__.py +4 -2
- onnx/backend/test/case/base.py +2 -0
- onnx/backend/test/case/model/__init__.py +8 -9
- onnx/backend/test/case/model/expand.py +4 -3
- onnx/backend/test/case/model/gradient.py +4 -3
- onnx/backend/test/case/model/sequence.py +4 -3
- onnx/backend/test/case/model/shrink.py +4 -4
- onnx/backend/test/case/model/sign.py +4 -3
- onnx/backend/test/case/model/single-relu.py +4 -4
- onnx/backend/test/case/model/stringnormalizer.py +4 -3
- onnx/backend/test/case/node/__init__.py +18 -12
- onnx/backend/test/case/node/abs.py +4 -3
- onnx/backend/test/case/node/acos.py +4 -3
- onnx/backend/test/case/node/acosh.py +4 -3
- onnx/backend/test/case/node/adagrad.py +4 -3
- onnx/backend/test/case/node/adam.py +4 -3
- onnx/backend/test/case/node/add.py +4 -3
- onnx/backend/test/case/node/ai_onnx_ml/__init__.py +0 -0
- onnx/backend/test/case/node/ai_onnx_ml/array_feature_extractor.py +30 -0
- onnx/backend/test/case/node/ai_onnx_ml/binarizer.py +27 -0
- onnx/backend/test/case/node/and.py +4 -3
- onnx/backend/test/case/node/argmax.py +6 -7
- onnx/backend/test/case/node/argmin.py +6 -7
- onnx/backend/test/case/node/asin.py +4 -3
- onnx/backend/test/case/node/asinh.py +4 -3
- onnx/backend/test/case/node/atan.py +4 -3
- onnx/backend/test/case/node/atanh.py +4 -3
- onnx/backend/test/case/node/averagepool.py +43 -4
- onnx/backend/test/case/node/batchnorm.py +4 -3
- onnx/backend/test/case/node/bernoulli.py +8 -7
- onnx/backend/test/case/node/bitshift.py +4 -3
- onnx/backend/test/case/node/bitwiseand.py +13 -11
- onnx/backend/test/case/node/bitwisenot.py +8 -6
- onnx/backend/test/case/node/bitwiseor.py +13 -11
- onnx/backend/test/case/node/bitwisexor.py +13 -11
- onnx/backend/test/case/node/blackmanwindow.py +4 -4
- onnx/backend/test/case/node/cast.py +218 -8
- onnx/backend/test/case/node/castlike.py +106 -12
- onnx/backend/test/case/node/ceil.py +4 -3
- onnx/backend/test/case/node/celu.py +4 -3
- onnx/backend/test/case/node/center_crop_pad.py +26 -3
- onnx/backend/test/case/node/clip.py +4 -3
- onnx/backend/test/case/node/col2im.py +5 -4
- onnx/backend/test/case/node/compress.py +4 -3
- onnx/backend/test/case/node/concat.py +4 -3
- onnx/backend/test/case/node/constant.py +4 -3
- onnx/backend/test/case/node/constantofshape.py +4 -3
- onnx/backend/test/case/node/conv.py +4 -6
- onnx/backend/test/case/node/convinteger.py +4 -5
- onnx/backend/test/case/node/convtranspose.py +4 -3
- onnx/backend/test/case/node/cos.py +4 -3
- onnx/backend/test/case/node/cosh.py +4 -3
- onnx/backend/test/case/node/cumsum.py +4 -3
- onnx/backend/test/case/node/deformconv.py +170 -0
- onnx/backend/test/case/node/depthtospace.py +4 -3
- onnx/backend/test/case/node/dequantizelinear.py +46 -3
- onnx/backend/test/case/node/det.py +4 -3
- onnx/backend/test/case/node/dft.py +4 -4
- onnx/backend/test/case/node/div.py +4 -3
- onnx/backend/test/case/node/dropout.py +4 -4
- onnx/backend/test/case/node/dynamicquantizelinear.py +4 -3
- onnx/backend/test/case/node/einsum.py +4 -4
- onnx/backend/test/case/node/elu.py +4 -3
- onnx/backend/test/case/node/equal.py +28 -3
- onnx/backend/test/case/node/erf.py +4 -3
- onnx/backend/test/case/node/exp.py +4 -3
- onnx/backend/test/case/node/expand.py +4 -3
- onnx/backend/test/case/node/eyelike.py +4 -3
- onnx/backend/test/case/node/flatten.py +4 -3
- onnx/backend/test/case/node/floor.py +4 -3
- onnx/backend/test/case/node/gather.py +4 -3
- onnx/backend/test/case/node/gatherelements.py +4 -3
- onnx/backend/test/case/node/gathernd.py +5 -4
- onnx/backend/test/case/node/gemm.py +4 -3
- onnx/backend/test/case/node/globalaveragepool.py +4 -4
- onnx/backend/test/case/node/globalmaxpool.py +4 -5
- onnx/backend/test/case/node/greater.py +4 -3
- onnx/backend/test/case/node/greater_equal.py +4 -3
- onnx/backend/test/case/node/gridsample.py +4 -3
- onnx/backend/test/case/node/groupnormalization.py +5 -4
- onnx/backend/test/case/node/gru.py +10 -9
- onnx/backend/test/case/node/hammingwindow.py +4 -4
- onnx/backend/test/case/node/hannwindow.py +4 -4
- onnx/backend/test/case/node/hardmax.py +4 -3
- onnx/backend/test/case/node/hardsigmoid.py +4 -3
- onnx/backend/test/case/node/hardswish.py +4 -3
- onnx/backend/test/case/node/identity.py +4 -3
- onnx/backend/test/case/node/if.py +4 -3
- onnx/backend/test/case/node/instancenorm.py +4 -3
- onnx/backend/test/case/node/isinf.py +4 -3
- onnx/backend/test/case/node/isnan.py +4 -3
- onnx/backend/test/case/node/layernormalization.py +4 -3
- onnx/backend/test/case/node/leakyrelu.py +4 -3
- onnx/backend/test/case/node/less.py +4 -3
- onnx/backend/test/case/node/less_equal.py +4 -3
- onnx/backend/test/case/node/log.py +4 -3
- onnx/backend/test/case/node/logsoftmax.py +4 -3
- onnx/backend/test/case/node/loop.py +4 -3
- onnx/backend/test/case/node/lppool.py +279 -0
- onnx/backend/test/case/node/lrn.py +4 -3
- onnx/backend/test/case/node/lstm.py +10 -9
- onnx/backend/test/case/node/matmul.py +4 -3
- onnx/backend/test/case/node/matmulinteger.py +4 -3
- onnx/backend/test/case/node/max.py +5 -4
- onnx/backend/test/case/node/maxpool.py +9 -4
- onnx/backend/test/case/node/maxunpool.py +4 -3
- onnx/backend/test/case/node/mean.py +4 -3
- onnx/backend/test/case/node/meanvariancenormalization.py +4 -3
- onnx/backend/test/case/node/melweightmatrix.py +4 -4
- onnx/backend/test/case/node/min.py +5 -4
- onnx/backend/test/case/node/mish.py +4 -3
- onnx/backend/test/case/node/mod.py +4 -3
- onnx/backend/test/case/node/momentum.py +4 -3
- onnx/backend/test/case/node/mul.py +4 -3
- onnx/backend/test/case/node/neg.py +4 -3
- onnx/backend/test/case/node/negativeloglikelihoodloss.py +4 -3
- onnx/backend/test/case/node/nonmaxsuppression.py +4 -3
- onnx/backend/test/case/node/nonzero.py +4 -3
- onnx/backend/test/case/node/not.py +4 -3
- onnx/backend/test/case/node/onehot.py +5 -4
- onnx/backend/test/case/node/optionalgetelement.py +4 -3
- onnx/backend/test/case/node/optionalhaselement.py +4 -3
- onnx/backend/test/case/node/or.py +4 -3
- onnx/backend/test/case/node/pad.py +36 -5
- onnx/backend/test/case/node/pool_op_common.py +20 -2
- onnx/backend/test/case/node/pow.py +4 -3
- onnx/backend/test/case/node/prelu.py +4 -3
- onnx/backend/test/case/node/qlinearconv.py +4 -3
- onnx/backend/test/case/node/qlinearmatmul.py +4 -3
- onnx/backend/test/case/node/quantizelinear.py +50 -3
- onnx/backend/test/case/node/rangeop.py +4 -3
- onnx/backend/test/case/node/reciprocal.py +4 -3
- onnx/backend/test/case/node/reduce_log_sum.py +4 -3
- onnx/backend/test/case/node/reduce_log_sum_exp.py +4 -3
- onnx/backend/test/case/node/reducel1.py +4 -3
- onnx/backend/test/case/node/reducel2.py +4 -3
- onnx/backend/test/case/node/reducemax.py +4 -3
- onnx/backend/test/case/node/reducemean.py +4 -3
- onnx/backend/test/case/node/reducemin.py +4 -3
- onnx/backend/test/case/node/reduceprod.py +4 -3
- onnx/backend/test/case/node/reducesum.py +4 -3
- onnx/backend/test/case/node/reducesumsquare.py +4 -3
- onnx/backend/test/case/node/relu.py +4 -3
- onnx/backend/test/case/node/reshape.py +4 -3
- onnx/backend/test/case/node/resize.py +73 -321
- onnx/backend/test/case/node/reversesequence.py +4 -3
- onnx/backend/test/case/node/rnn.py +10 -9
- onnx/backend/test/case/node/roialign.py +193 -3
- onnx/backend/test/case/node/round.py +4 -3
- onnx/backend/test/case/node/scan.py +4 -3
- onnx/backend/test/case/node/scatter.py +4 -3
- onnx/backend/test/case/node/scatterelements.py +4 -3
- onnx/backend/test/case/node/scatternd.py +4 -4
- onnx/backend/test/case/node/selu.py +4 -3
- onnx/backend/test/case/node/sequence_map.py +4 -4
- onnx/backend/test/case/node/sequenceinsert.py +4 -3
- onnx/backend/test/case/node/shape.py +4 -3
- onnx/backend/test/case/node/shrink.py +4 -3
- onnx/backend/test/case/node/sigmoid.py +4 -3
- onnx/backend/test/case/node/sign.py +4 -3
- onnx/backend/test/case/node/sin.py +4 -3
- onnx/backend/test/case/node/sinh.py +4 -3
- onnx/backend/test/case/node/size.py +4 -3
- onnx/backend/test/case/node/slice.py +4 -3
- onnx/backend/test/case/node/softmax.py +4 -3
- onnx/backend/test/case/node/softmaxcrossentropy.py +4 -3
- onnx/backend/test/case/node/softplus.py +4 -3
- onnx/backend/test/case/node/softsign.py +4 -3
- onnx/backend/test/case/node/spacetodepth.py +6 -3
- onnx/backend/test/case/node/split.py +4 -3
- onnx/backend/test/case/node/splittosequence.py +79 -0
- onnx/backend/test/case/node/sqrt.py +4 -3
- onnx/backend/test/case/node/squeeze.py +2 -0
- onnx/backend/test/case/node/stft.py +4 -4
- onnx/backend/test/case/node/stringnormalizer.py +4 -4
- onnx/backend/test/case/node/sub.py +4 -3
- onnx/backend/test/case/node/sum.py +4 -3
- onnx/backend/test/case/node/tan.py +4 -3
- onnx/backend/test/case/node/tanh.py +4 -3
- onnx/backend/test/case/node/tfidfvectorizer.py +4 -3
- onnx/backend/test/case/node/thresholdedrelu.py +4 -3
- onnx/backend/test/case/node/tile.py +4 -3
- onnx/backend/test/case/node/topk.py +4 -3
- onnx/backend/test/case/node/transpose.py +8 -7
- onnx/backend/test/case/node/trilu.py +4 -3
- onnx/backend/test/case/node/unique.py +4 -3
- onnx/backend/test/case/node/unsqueeze.py +4 -3
- onnx/backend/test/case/node/upsample.py +4 -3
- onnx/backend/test/case/node/where.py +4 -3
- onnx/backend/test/case/node/xor.py +4 -3
- onnx/backend/test/case/test_case.py +2 -0
- onnx/backend/test/case/utils.py +10 -1
- onnx/backend/test/cmd_tools.py +22 -13
- onnx/backend/test/data/light/README.md +16 -0
- onnx/backend/test/data/light/light_bvlc_alexnet.onnx +0 -0
- onnx/backend/test/data/light/light_bvlc_alexnet_output_0.pb +1 -0
- onnx/backend/test/data/light/light_densenet121.onnx +0 -0
- onnx/backend/test/data/light/light_densenet121_output_0.pb +1 -0
- onnx/backend/test/data/light/light_inception_v1.onnx +0 -0
- onnx/backend/test/data/light/light_inception_v1_output_0.pb +1 -0
- onnx/backend/test/data/light/light_inception_v2.onnx +0 -0
- onnx/backend/test/data/light/light_inception_v2_output_0.pb +1 -0
- onnx/backend/test/data/light/light_resnet50.onnx +0 -0
- onnx/backend/test/data/light/light_resnet50_output_0.pb +1 -0
- onnx/backend/test/data/light/light_shufflenet.onnx +0 -0
- onnx/backend/test/data/light/light_shufflenet_output_0.pb +1 -0
- onnx/backend/test/data/light/light_squeezenet.onnx +0 -0
- onnx/backend/test/data/light/light_squeezenet_output_0.pb +1 -0
- onnx/backend/test/data/light/light_vgg19.onnx +0 -0
- onnx/backend/test/data/light/light_vgg19_output_0.pb +1 -0
- onnx/backend/test/data/light/light_zfnet512.onnx +0 -0
- onnx/backend/test/data/light/light_zfnet512_output_0.pb +1 -0
- onnx/backend/test/data/node/test_acos/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_acosh/test_data_set_0/output_0.pb +1 -1
- onnx/backend/test/data/node/test_ai_onnx_ml_array_feature_extractor/model.onnx +19 -0
- onnx/backend/test/data/node/test_ai_onnx_ml_array_feature_extractor/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_ai_onnx_ml_array_feature_extractor/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_ai_onnx_ml_array_feature_extractor/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_ai_onnx_ml_binarizer/model.onnx +0 -0
- onnx/backend/test/data/node/test_ai_onnx_ml_binarizer/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_ai_onnx_ml_binarizer/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_asin/test_data_set_0/output_0.pb +1 -1
- onnx/backend/test/data/node/test_asinh/test_data_set_0/output_0.pb +1 -1
- onnx/backend/test/data/node/test_atan/test_data_set_0/output_0.pb +1 -1
- onnx/backend/test/data/node/test_atanh/test_data_set_0/output_0.pb +2 -2
- onnx/backend/test/data/node/test_averagepool_1d_default/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_ceil/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_default/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_dilations/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_dilations/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_dilations/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_pads/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_pads_count_include_pad/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_precomputed_pads/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_precomputed_pads_count_include_pad/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_precomputed_same_upper/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_precomputed_strides/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_same_lower/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_same_upper/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_2d_strides/model.onnx +0 -0
- onnx/backend/test/data/node/test_averagepool_3d_default/model.onnx +0 -0
- onnx/backend/test/data/node/test_basic_deform_conv_with_padding/model.onnx +0 -0
- onnx/backend/test/data/node/test_basic_deform_conv_with_padding/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_basic_deform_conv_with_padding/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_basic_deform_conv_with_padding/test_data_set_0/input_2.pb +0 -0
- onnx/backend/test/data/node/test_basic_deform_conv_with_padding/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_basic_deform_conv_without_padding/model.onnx +0 -0
- onnx/backend/test/data/node/test_basic_deform_conv_without_padding/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_basic_deform_conv_without_padding/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_basic_deform_conv_without_padding/test_data_set_0/input_2.pb +0 -0
- onnx/backend/test/data/node/test_basic_deform_conv_without_padding/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_and_i16_3d/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_and_i16_3d/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_and_i16_3d/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_and_i32_2d/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_and_i32_2d/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_and_i32_2d/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_and_ui64_bcast_3v1d/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_and_ui64_bcast_3v1d/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_and_ui64_bcast_3v1d/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_and_ui8_bcast_4v3d/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_and_ui8_bcast_4v3d/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_and_ui8_bcast_4v3d/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_not_2d/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_not_2d/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_not_3d/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_not_3d/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_not_4d/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_not_4d/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_or_i16_4d/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_or_i16_4d/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_or_i16_4d/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_or_i32_2d/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_or_i32_2d/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_or_i32_2d/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_or_ui64_bcast_3v1d/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_or_ui64_bcast_3v1d/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_or_ui64_bcast_3v1d/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_or_ui8_bcast_4v3d/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_or_ui8_bcast_4v3d/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_or_ui8_bcast_4v3d/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_xor_i16_3d/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_xor_i16_3d/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_xor_i16_3d/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_xor_i32_2d/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_xor_i32_2d/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_xor_i32_2d/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_xor_ui64_bcast_3v1d/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_xor_ui64_bcast_3v1d/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_xor_ui64_bcast_3v1d/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_xor_ui8_bcast_4v3d/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_xor_ui8_bcast_4v3d/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_bitwise_xor_ui8_bcast_4v3d/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_BFLOAT16_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_DOUBLE_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_DOUBLE_to_FLOAT16/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_DOUBLE/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FN/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FN/test_data_set_0/input_0.pb +2 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FN/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FNUZ/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FNUZ/test_data_set_0/input_0.pb +2 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FNUZ/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2/test_data_set_0/input_0.pb +2 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2FNUZ/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2FNUZ/test_data_set_0/input_0.pb +2 -0
- onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2FNUZ/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT16/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT16/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT16/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT16/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT16/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT16/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT16/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT16/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT16/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT16/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT16/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT16/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_BFLOAT16/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_DOUBLE/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT16/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FN/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FNUZ/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2FNUZ/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_FLOAT_to_STRING/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_STRING_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FN/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FN/test_data_set_0/input_0.pb +2 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FN/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FNUZ/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FNUZ/test_data_set_0/input_0.pb +2 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FNUZ/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2/test_data_set_0/input_0.pb +2 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2FNUZ/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2FNUZ/test_data_set_0/input_0.pb +2 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2FNUZ/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FN/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FNUZ/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2FNUZ/model.onnx +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_castlike_BFLOAT16_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_BFLOAT16_to_FLOAT_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_DOUBLE_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_DOUBLE_to_FLOAT16/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_DOUBLE_to_FLOAT16_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_DOUBLE_to_FLOAT_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT16_to_DOUBLE/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT16_to_DOUBLE_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT16_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT16_to_FLOAT_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT_expanded/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT_expanded/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT_expanded/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT_expanded/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT_expanded/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT_expanded/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT_expanded/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT_expanded/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT_expanded/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT_expanded/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT_expanded/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT_expanded/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_BFLOAT16/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_BFLOAT16_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_DOUBLE/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_DOUBLE_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT16/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT16_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/input_1.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/output_0.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/input_1.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/output_0.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ_expanded/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ_expanded/test_data_set_0/input_1.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ_expanded/test_data_set_0/output_0.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN_expanded/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN_expanded/test_data_set_0/input_1.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN_expanded/test_data_set_0/output_0.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2/test_data_set_0/input_1.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2/test_data_set_0/output_0.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/input_1.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/output_0.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ_expanded/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ_expanded/test_data_set_0/input_1.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ_expanded/test_data_set_0/output_0.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2_expanded/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2_expanded/test_data_set_0/input_1.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2_expanded/test_data_set_0/output_0.pb +1 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_STRING/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_FLOAT_to_STRING_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_STRING_to_FLOAT/model.onnx +0 -0
- onnx/backend/test/data/node/test_castlike_STRING_to_FLOAT_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_center_crop_pad_crop_negative_axes_hwc/model.onnx +0 -0
- onnx/backend/test/data/node/test_center_crop_pad_crop_negative_axes_hwc/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_center_crop_pad_crop_negative_axes_hwc/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_center_crop_pad_crop_negative_axes_hwc/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_center_crop_pad_crop_negative_axes_hwc_expanded/model.onnx +0 -0
- onnx/backend/test/data/node/test_center_crop_pad_crop_negative_axes_hwc_expanded/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_center_crop_pad_crop_negative_axes_hwc_expanded/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_center_crop_pad_crop_negative_axes_hwc_expanded/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_col2im_pads/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_constant/model.onnx +0 -0
- onnx/backend/test/data/node/test_constant_pad/model.onnx +0 -0
- onnx/backend/test/data/node/test_constant_pad_axes/model.onnx +0 -0
- onnx/backend/test/data/node/test_constant_pad_negative_axes/model.onnx +0 -0
- onnx/backend/test/data/node/test_constant_pad_negative_axes/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_constant_pad_negative_axes/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_constant_pad_negative_axes/test_data_set_0/input_2.pb +1 -0
- onnx/backend/test/data/node/test_constant_pad_negative_axes/test_data_set_0/input_3.pb +1 -0
- onnx/backend/test/data/node/test_constant_pad_negative_axes/test_data_set_0/output_0.pb +1 -0
- onnx/backend/test/data/node/test_cosh/test_data_set_0/output_0.pb +1 -1
- onnx/backend/test/data/node/test_cosh_example/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_deform_conv_with_mask_bias/model.onnx +0 -0
- onnx/backend/test/data/node/test_deform_conv_with_mask_bias/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_deform_conv_with_mask_bias/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_deform_conv_with_mask_bias/test_data_set_0/input_2.pb +0 -0
- onnx/backend/test/data/node/test_deform_conv_with_mask_bias/test_data_set_0/input_3.pb +0 -0
- onnx/backend/test/data/node/test_deform_conv_with_mask_bias/test_data_set_0/input_4.pb +0 -0
- onnx/backend/test/data/node/test_deform_conv_with_mask_bias/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_deform_conv_with_multiple_offset_groups/model.onnx +0 -0
- onnx/backend/test/data/node/test_deform_conv_with_multiple_offset_groups/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_deform_conv_with_multiple_offset_groups/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_deform_conv_with_multiple_offset_groups/test_data_set_0/input_2.pb +0 -0
- onnx/backend/test/data/node/test_deform_conv_with_multiple_offset_groups/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_dequantizelinear/model.onnx +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_axis/model.onnx +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_e4m3fn/model.onnx +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_e4m3fn/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_e4m3fn/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_e4m3fn/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_e5m2/model.onnx +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_e5m2/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_e5m2/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_dequantizelinear_e5m2/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_edge_pad/model.onnx +0 -0
- onnx/backend/test/data/node/test_equal/model.onnx +0 -0
- onnx/backend/test/data/node/test_equal_bcast/model.onnx +0 -0
- onnx/backend/test/data/node/test_equal_string/model.onnx +0 -0
- onnx/backend/test/data/node/test_equal_string/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_equal_string/test_data_set_0/input_1.pb +1 -0
- onnx/backend/test/data/node/test_equal_string/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_equal_string_broadcast/model.onnx +0 -0
- onnx/backend/test/data/node/test_equal_string_broadcast/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_equal_string_broadcast/test_data_set_0/input_1.pb +1 -0
- onnx/backend/test/data/node/test_equal_string_broadcast/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_identity/model.onnx +0 -0
- onnx/backend/test/data/node/test_identity_sequence/model.onnx +0 -0
- onnx/backend/test/data/node/test_lppool_1d_default/model.onnx +0 -0
- onnx/backend/test/data/node/test_lppool_1d_default/test_data_set_0/input_0.pb +1 -0
- onnx/backend/test/data/node/test_lppool_1d_default/test_data_set_0/output_0.pb +2 -0
- onnx/backend/test/data/node/test_lppool_2d_default/model.onnx +0 -0
- onnx/backend/test/data/node/test_lppool_2d_default/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_lppool_2d_default/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_lppool_2d_dilations/model.onnx +0 -0
- onnx/backend/test/data/node/test_lppool_2d_dilations/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_lppool_2d_dilations/test_data_set_0/output_0.pb +1 -0
- onnx/backend/test/data/node/test_lppool_2d_pads/model.onnx +0 -0
- onnx/backend/test/data/node/test_lppool_2d_pads/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_lppool_2d_pads/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_lppool_2d_same_lower/model.onnx +0 -0
- onnx/backend/test/data/node/test_lppool_2d_same_lower/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_lppool_2d_same_lower/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_lppool_2d_same_upper/model.onnx +0 -0
- onnx/backend/test/data/node/test_lppool_2d_same_upper/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_lppool_2d_same_upper/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_lppool_2d_strides/model.onnx +0 -0
- onnx/backend/test/data/node/test_lppool_2d_strides/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_lppool_2d_strides/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_lppool_3d_default/model.onnx +0 -0
- onnx/backend/test/data/node/test_lppool_3d_default/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_lppool_3d_default/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_mish/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_mish_expanded/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear/model.onnx +0 -0
- onnx/backend/test/data/node/test_quantizelinear_axis/model.onnx +0 -0
- onnx/backend/test/data/node/test_quantizelinear_e4m3fn/model.onnx +0 -0
- onnx/backend/test/data/node/test_quantizelinear_e4m3fn/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear_e4m3fn/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear_e4m3fn/test_data_set_0/input_2.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear_e4m3fn/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear_e5m2/model.onnx +0 -0
- onnx/backend/test/data/node/test_quantizelinear_e5m2/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear_e5m2/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear_e5m2/test_data_set_0/input_2.pb +0 -0
- onnx/backend/test/data/node/test_quantizelinear_e5m2/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_reflect_pad/model.onnx +0 -0
- onnx/backend/test/data/node/test_reshape_allowzero_reordered/model.onnx +0 -0
- onnx/backend/test/data/node/test_reshape_extended_dims/model.onnx +0 -0
- onnx/backend/test/data/node/test_reshape_negative_dim/model.onnx +0 -0
- onnx/backend/test/data/node/test_reshape_negative_extended_dims/model.onnx +0 -0
- onnx/backend/test/data/node/test_reshape_one_dim/model.onnx +0 -0
- onnx/backend/test/data/node/test_reshape_reduced_dims/model.onnx +0 -0
- onnx/backend/test/data/node/test_reshape_reordered_all_dims/model.onnx +0 -0
- onnx/backend/test/data/node/test_reshape_reordered_last_dims/model.onnx +0 -0
- onnx/backend/test/data/node/test_reshape_zero_and_negative_dim/model.onnx +0 -0
- onnx/backend/test/data/node/test_reshape_zero_dim/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_downsample_scales_cubic/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_downsample_scales_cubic_A_n0p5_exclude_outside/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_downsample_scales_cubic_align_corners/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_downsample_scales_cubic_antialias/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_downsample_scales_linear/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_downsample_scales_linear_align_corners/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_downsample_scales_linear_antialias/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_downsample_scales_linear_half_pixel_symmetric/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_downsample_scales_linear_half_pixel_symmetric/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_resize_downsample_scales_linear_half_pixel_symmetric/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_resize_downsample_scales_linear_half_pixel_symmetric/test_data_set_0/output_0.pb +1 -0
- onnx/backend/test/data/node/test_resize_downsample_scales_nearest/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_downsample_sizes_cubic/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_downsample_sizes_cubic_antialias/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_downsample_sizes_linear_antialias/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_downsample_sizes_linear_pytorch_half_pixel/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_downsample_sizes_nearest/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_downsample_sizes_nearest_not_larger/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_downsample_sizes_nearest_not_smaller/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_tf_crop_and_resize/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_tf_crop_and_resize_axes_2_3/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_tf_crop_and_resize_axes_3_2/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_scales_cubic/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_scales_cubic_A_n0p5_exclude_outside/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_scales_cubic_align_corners/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_scales_cubic_asymmetric/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_scales_linear/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_scales_linear_align_corners/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_scales_linear_half_pixel_symmetric/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_scales_linear_half_pixel_symmetric/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_resize_upsample_scales_linear_half_pixel_symmetric/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_resize_upsample_scales_linear_half_pixel_symmetric/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_resize_upsample_scales_nearest/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_scales_nearest_axes_2_3/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_scales_nearest_axes_3_2/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_sizes_cubic/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_sizes_nearest/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_sizes_nearest_axes_2_3/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_sizes_nearest_axes_3_2/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_sizes_nearest_ceil_half_pixel/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_sizes_nearest_floor_align_corners/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_sizes_nearest_not_larger/model.onnx +0 -0
- onnx/backend/test/data/node/test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric/model.onnx +0 -0
- onnx/backend/test/data/node/test_roialign_mode_max/model.onnx +0 -0
- onnx/backend/test/data/node/test_roialign_mode_max/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_roialign_mode_max/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_roialign_mode_max/test_data_set_0/input_2.pb +0 -0
- onnx/backend/test/data/node/test_roialign_mode_max/test_data_set_0/output_0.pb +2 -0
- onnx/backend/test/data/node/test_shape/model.onnx +0 -0
- onnx/backend/test/data/node/test_shape_clip_end/model.onnx +0 -0
- onnx/backend/test/data/node/test_shape_clip_start/model.onnx +0 -0
- onnx/backend/test/data/node/test_shape_end_1/model.onnx +0 -0
- onnx/backend/test/data/node/test_shape_end_negative_1/model.onnx +0 -0
- onnx/backend/test/data/node/test_shape_example/model.onnx +0 -0
- onnx/backend/test/data/node/test_shape_start_1/model.onnx +0 -0
- onnx/backend/test/data/node/test_shape_start_1_end_2/model.onnx +0 -0
- onnx/backend/test/data/node/test_shape_start_1_end_negative_1/model.onnx +0 -0
- onnx/backend/test/data/node/test_shape_start_negative_1/model.onnx +0 -0
- onnx/backend/test/data/node/test_sinh/test_data_set_0/output_0.pb +1 -1
- onnx/backend/test/data/node/test_size/model.onnx +0 -0
- onnx/backend/test/data/node/test_size_example/model.onnx +0 -0
- onnx/backend/test/data/node/test_softplus_example_expanded_ver18/model.onnx +0 -0
- onnx/backend/test/data/node/{test_softplus_expanded → test_softplus_expanded_ver18}/model.onnx +0 -0
- onnx/backend/test/data/node/test_split_to_sequence_1/model.onnx +0 -0
- onnx/backend/test/data/node/test_split_to_sequence_1/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_split_to_sequence_1/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_split_to_sequence_1/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_split_to_sequence_2/model.onnx +0 -0
- onnx/backend/test/data/node/test_split_to_sequence_2/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_split_to_sequence_2/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_split_to_sequence_2/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_split_to_sequence_nokeepdims/model.onnx +0 -0
- onnx/backend/test/data/node/test_split_to_sequence_nokeepdims/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_split_to_sequence_nokeepdims/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/node/test_tan/test_data_set_0/output_0.pb +1 -1
- onnx/backend/test/data/node/test_wrap_pad/model.onnx +0 -0
- onnx/backend/test/data/node/test_wrap_pad/test_data_set_0/input_0.pb +0 -0
- onnx/backend/test/data/node/test_wrap_pad/test_data_set_0/input_1.pb +0 -0
- onnx/backend/test/data/node/test_wrap_pad/test_data_set_0/output_0.pb +0 -0
- onnx/backend/test/data/real/test_bvlc_alexnet/data.json +1 -1
- onnx/backend/test/data/real/test_densenet121/data.json +1 -1
- onnx/backend/test/data/real/test_inception_v1/data.json +1 -1
- onnx/backend/test/data/real/test_inception_v2/data.json +1 -1
- onnx/backend/test/data/real/test_resnet50/data.json +1 -1
- onnx/backend/test/data/real/test_shufflenet/data.json +1 -1
- onnx/backend/test/data/real/test_squeezenet/data.json +1 -1
- onnx/backend/test/data/real/test_vgg19/data.json +1 -1
- onnx/backend/test/data/real/test_zfnet512/data.json +1 -1
- onnx/backend/test/loader/__init__.py +3 -1
- onnx/backend/test/report/__init__.py +3 -1
- onnx/backend/test/report/base.py +2 -0
- onnx/backend/test/report/coverage.py +8 -14
- onnx/backend/test/runner/__init__.py +145 -38
- onnx/backend/test/runner/item.py +2 -0
- onnx/backend/test/stat_coverage.py +23 -26
- onnx/bin/__init__.py +2 -0
- onnx/bin/checker.py +2 -0
- onnx/checker.cc +17 -5
- onnx/checker.h +3 -3
- onnx/checker.py +22 -5
- onnx/common/array_ref.h +2 -0
- onnx/common/assertions.cc +2 -0
- onnx/common/assertions.h +2 -0
- onnx/common/common.h +2 -0
- onnx/common/constants.h +3 -3
- onnx/common/file_utils.h +3 -1
- onnx/common/graph_node_list.h +2 -0
- onnx/common/interned_strings.cc +2 -0
- onnx/common/interned_strings.h +2 -0
- onnx/common/ir.h +2 -0
- onnx/common/ir_pb_converter.cc +7 -1
- onnx/common/ir_pb_converter.h +2 -0
- onnx/common/model_helpers.cc +3 -3
- onnx/common/model_helpers.h +3 -3
- onnx/common/path.cc +0 -1
- onnx/common/path.h +0 -1
- onnx/common/platform_helpers.h +2 -0
- onnx/common/status.cc +2 -0
- onnx/common/status.h +2 -0
- onnx/common/stl_backports.h +3 -3
- onnx/common/tensor.h +24 -171
- onnx/common/version.h +3 -1
- onnx/compose.py +40 -32
- onnx/cpp2py_export.cc +268 -89
- onnx/defs/__init__.py +9 -7
- onnx/defs/attr_proto_util.cc +2 -0
- onnx/defs/attr_proto_util.h +2 -0
- onnx/defs/controlflow/defs.cc +25 -369
- onnx/defs/controlflow/old.cc +444 -0
- onnx/defs/controlflow/utils.cc +357 -0
- onnx/defs/controlflow/utils.h +21 -0
- onnx/defs/data_propagators.h +2 -0
- onnx/defs/data_type_utils.cc +6 -2
- onnx/defs/gen_doc.py +31 -45
- onnx/defs/gen_shape_inference_information.py +2 -0
- onnx/defs/generator/defs.cc +21 -19
- onnx/defs/generator/old.cc +157 -0
- onnx/defs/logical/defs.cc +17 -16
- onnx/defs/logical/old.cc +23 -0
- onnx/defs/math/defs.cc +155 -131
- onnx/defs/math/old.cc +1 -1
- onnx/defs/nn/defs.cc +135 -45
- onnx/defs/nn/old.cc +142 -9
- onnx/defs/operator_sets.h +45 -0
- onnx/defs/optional/defs.cc +8 -4
- onnx/defs/parser.cc +61 -4
- onnx/defs/parser.h +43 -31
- onnx/defs/printer.cc +7 -1
- onnx/defs/printer.h +1 -1
- onnx/defs/quantization/defs.cc +63 -26
- onnx/defs/quantization/old.cc +102 -1
- onnx/defs/reduction/defs.cc +1 -1
- onnx/defs/reduction/utils.cc +5 -4
- onnx/defs/rnn/defs.cc +95 -173
- onnx/defs/schema.cc +45 -29
- onnx/defs/schema.h +125 -15
- onnx/defs/sequence/defs.cc +11 -8
- onnx/defs/shape_inference.cc +25 -4
- onnx/defs/shape_inference.h +29 -1
- onnx/defs/tensor/defs.cc +499 -565
- onnx/defs/tensor/old.cc +777 -47
- onnx/defs/tensor/utils.cc +130 -8
- onnx/defs/tensor/utils.h +2 -0
- onnx/defs/tensor_proto_util.cc +3 -0
- onnx/defs/traditionalml/defs.cc +19 -2
- onnx/examples/Protobufs.ipynb +129 -31
- onnx/examples/check_model.ipynb +29 -21
- onnx/examples/load_model.ipynb +25 -3
- onnx/examples/make_model.ipynb +32 -23
- onnx/external_data_helper.py +8 -9
- onnx/frontend/__init__.py +2 -0
- onnx/gen_proto.py +18 -24
- onnx/helper.py +394 -107
- onnx/hub.py +189 -20
- onnx/mapping.py +33 -7
- onnx/numpy_helper.py +263 -52
- onnx/onnx-ml.proto +28 -6
- onnx/onnx-operators-ml.proto +1 -1
- onnx/onnx-operators.in.proto +1 -1
- onnx/onnx-operators.proto +1 -1
- onnx/onnx.in.proto +28 -6
- onnx/onnx.proto +28 -6
- onnx/onnx_cpp2py_export.cp310-win_amd64.pyd +0 -0
- onnx/onnx_data_pb2.pyi +2 -1
- onnx/onnx_ml_pb2.py +33 -33
- onnx/onnx_ml_pb2.pyi +12 -2
- onnx/onnx_operators_ml_pb2.pyi +2 -1
- onnx/parser.py +29 -13
- onnx/printer.py +6 -4
- onnx/proto_utils.h +3 -3
- onnx/py_utils.h +3 -3
- onnx/reference/__init__.py +2 -0
- onnx/reference/custom_element_types.py +11 -0
- onnx/reference/op_run.py +84 -8
- onnx/reference/ops/__init__.py +5 -1
- onnx/reference/ops/_helpers.py +55 -0
- onnx/reference/ops/_op.py +18 -11
- onnx/reference/ops/_op_common_indices.py +2 -0
- onnx/reference/ops/_op_common_pool.py +4 -10
- onnx/reference/ops/_op_common_random.py +2 -0
- onnx/reference/ops/_op_common_window.py +2 -0
- onnx/reference/ops/_op_list.py +208 -214
- onnx/reference/ops/aionnx_preview_training/__init__.py +4 -2
- onnx/reference/ops/aionnx_preview_training/_op_list.py +15 -38
- onnx/reference/ops/aionnx_preview_training/_op_run_training.py +2 -0
- onnx/reference/ops/aionnx_preview_training/op_adagrad.py +3 -1
- onnx/reference/ops/aionnx_preview_training/op_adam.py +3 -1
- onnx/reference/ops/aionnx_preview_training/op_momentum.py +3 -1
- onnx/reference/ops/aionnxml/__init__.py +3 -0
- onnx/reference/ops/aionnxml/_common_classifier.py +81 -0
- onnx/reference/ops/aionnxml/_op_list.py +97 -0
- onnx/reference/ops/aionnxml/_op_run_aionnxml.py +8 -0
- onnx/reference/ops/aionnxml/op_array_feature_extractor.py +50 -0
- onnx/reference/ops/aionnxml/op_binarizer.py +15 -0
- onnx/reference/ops/aionnxml/op_dict_vectorizer.py +56 -0
- onnx/reference/ops/aionnxml/op_feature_vectorizer.py +30 -0
- onnx/reference/ops/aionnxml/op_imputer.py +47 -0
- onnx/reference/ops/aionnxml/op_label_encoder.py +52 -0
- onnx/reference/ops/aionnxml/op_linear_classifier.py +99 -0
- onnx/reference/ops/aionnxml/op_linear_regressor.py +26 -0
- onnx/reference/ops/aionnxml/op_normalizer.py +41 -0
- onnx/reference/ops/aionnxml/op_one_hot_encoder.py +55 -0
- onnx/reference/ops/aionnxml/op_scaler.py +12 -0
- onnx/reference/ops/aionnxml/op_svm_classifier.py +334 -0
- onnx/reference/ops/aionnxml/op_svm_helper.py +99 -0
- onnx/reference/ops/aionnxml/op_svm_regressor.py +45 -0
- onnx/reference/ops/aionnxml/op_tree_ensemble_classifier.py +132 -0
- onnx/reference/ops/aionnxml/op_tree_ensemble_helper.py +109 -0
- onnx/reference/ops/aionnxml/op_tree_ensemble_regressor.py +105 -0
- onnx/reference/ops/experimental/__init__.py +3 -1
- onnx/reference/ops/experimental/_op_list.py +15 -36
- onnx/reference/ops/experimental/_op_run_experimental.py +2 -0
- onnx/reference/ops/experimental/op_im2col.py +3 -2
- onnx/reference/ops/op_abs.py +3 -1
- onnx/reference/ops/op_acos.py +3 -1
- onnx/reference/ops/op_acosh.py +3 -1
- onnx/reference/ops/op_add.py +3 -1
- onnx/reference/ops/op_and.py +3 -1
- onnx/reference/ops/op_argmax.py +4 -9
- onnx/reference/ops/op_argmin.py +4 -9
- onnx/reference/ops/op_asin.py +3 -1
- onnx/reference/ops/op_asinh.py +3 -1
- onnx/reference/ops/op_atan.py +3 -1
- onnx/reference/ops/op_atanh.py +3 -1
- onnx/reference/ops/op_attribute_has_value.py +2 -0
- onnx/reference/ops/op_average_pool.py +80 -2
- onnx/reference/ops/op_batch_normalization.py +14 -11
- onnx/reference/ops/op_bernoulli.py +3 -2
- onnx/reference/ops/op_bitshift.py +3 -1
- onnx/reference/ops/op_bitwise_and.py +3 -1
- onnx/reference/ops/op_bitwise_not.py +3 -1
- onnx/reference/ops/op_bitwise_or.py +3 -1
- onnx/reference/ops/op_bitwise_xor.py +3 -1
- onnx/reference/ops/op_blackman_window.py +3 -1
- onnx/reference/ops/op_cast.py +91 -10
- onnx/reference/ops/op_cast_like.py +32 -7
- onnx/reference/ops/op_ceil.py +3 -1
- onnx/reference/ops/op_celu.py +3 -1
- onnx/reference/ops/op_center_crop_pad.py +7 -3
- onnx/reference/ops/op_clip.py +2 -7
- onnx/reference/ops/op_col2im.py +3 -4
- onnx/reference/ops/op_compress.py +2 -0
- onnx/reference/ops/op_concat.py +6 -5
- onnx/reference/ops/op_concat_from_sequence.py +2 -0
- onnx/reference/ops/op_constant.py +46 -35
- onnx/reference/ops/op_constant_of_shape.py +4 -0
- onnx/reference/ops/op_conv.py +62 -39
- onnx/reference/ops/op_conv_integer.py +3 -2
- onnx/reference/ops/op_conv_transpose.py +4 -4
- onnx/reference/ops/op_cos.py +3 -1
- onnx/reference/ops/op_cosh.py +3 -1
- onnx/reference/ops/op_cum_sum.py +2 -0
- onnx/reference/ops/op_deform_conv.py +178 -0
- onnx/reference/ops/op_depth_to_space.py +2 -0
- onnx/reference/ops/op_dequantize_linear.py +72 -21
- onnx/reference/ops/op_det.py +3 -4
- onnx/reference/ops/op_dft.py +2 -0
- onnx/reference/ops/op_div.py +3 -1
- onnx/reference/ops/op_dropout.py +2 -7
- onnx/reference/ops/op_dynamic_quantize_linear.py +2 -0
- onnx/reference/ops/op_einsum.py +2 -0
- onnx/reference/ops/op_elu.py +4 -2
- onnx/reference/ops/op_equal.py +3 -1
- onnx/reference/ops/op_erf.py +3 -1
- onnx/reference/ops/op_exp.py +4 -2
- onnx/reference/ops/op_expand.py +2 -0
- onnx/reference/ops/op_eyelike.py +9 -4
- onnx/reference/ops/op_flatten.py +3 -1
- onnx/reference/ops/op_floor.py +3 -1
- onnx/reference/ops/op_gather.py +2 -0
- onnx/reference/ops/op_gather_elements.py +2 -0
- onnx/reference/ops/op_gathernd.py +3 -1
- onnx/reference/ops/op_gemm.py +5 -10
- onnx/reference/ops/op_global_average_pool.py +6 -5
- onnx/reference/ops/op_global_max_pool.py +2 -0
- onnx/reference/ops/op_greater.py +3 -1
- onnx/reference/ops/op_greater_or_equal.py +3 -1
- onnx/reference/ops/op_grid_sample.py +3 -1
- onnx/reference/ops/op_gru.py +4 -1
- onnx/reference/ops/op_hamming_window.py +3 -1
- onnx/reference/ops/op_hann_window.py +3 -1
- onnx/reference/ops/op_hard_sigmoid.py +3 -1
- onnx/reference/ops/op_hardmax.py +3 -1
- onnx/reference/ops/op_identity.py +3 -1
- onnx/reference/ops/op_if.py +16 -8
- onnx/reference/ops/op_instance_normalization.py +2 -0
- onnx/reference/ops/op_isinf.py +2 -0
- onnx/reference/ops/op_isnan.py +3 -1
- onnx/reference/ops/op_layer_normalization.py +2 -0
- onnx/reference/ops/op_leaky_relu.py +4 -2
- onnx/reference/ops/op_less.py +3 -1
- onnx/reference/ops/op_less_or_equal.py +3 -1
- onnx/reference/ops/op_log.py +4 -2
- onnx/reference/ops/op_log_softmax.py +3 -1
- onnx/reference/ops/op_loop.py +4 -2
- onnx/reference/ops/op_lp_normalization.py +4 -2
- onnx/reference/ops/op_lp_pool.py +41 -0
- onnx/reference/ops/op_lrn.py +9 -5
- onnx/reference/ops/op_lstm.py +4 -2
- onnx/reference/ops/op_matmul.py +3 -1
- onnx/reference/ops/op_matmul_integer.py +2 -0
- onnx/reference/ops/op_max.py +3 -1
- onnx/reference/ops/op_max_pool.py +3 -4
- onnx/reference/ops/op_max_unpool.py +2 -1
- onnx/reference/ops/op_mean.py +3 -1
- onnx/reference/ops/op_mel_weight_matrix.py +2 -0
- onnx/reference/ops/op_min.py +3 -1
- onnx/reference/ops/op_mod.py +2 -0
- onnx/reference/ops/op_mul.py +3 -1
- onnx/reference/ops/op_neg.py +3 -1
- onnx/reference/ops/op_negative_log_likelihood_loss.py +3 -1
- onnx/reference/ops/op_non_max_suppression.py +22 -20
- onnx/reference/ops/op_non_zero.py +4 -1
- onnx/reference/ops/op_not.py +3 -1
- onnx/reference/ops/op_one_hot.py +3 -1
- onnx/reference/ops/op_optional.py +2 -0
- onnx/reference/ops/op_optional_get_element.py +4 -8
- onnx/reference/ops/op_optional_has_element.py +3 -9
- onnx/reference/ops/op_or.py +3 -1
- onnx/reference/ops/op_pad.py +18 -29
- onnx/reference/ops/op_pow.py +2 -0
- onnx/reference/ops/op_prelu.py +4 -2
- onnx/reference/ops/op_qlinear_conv.py +3 -2
- onnx/reference/ops/op_qlinear_matmul.py +2 -0
- onnx/reference/ops/op_quantize_linear.py +100 -15
- onnx/reference/ops/op_random_normal.py +3 -1
- onnx/reference/ops/op_random_normal_like.py +3 -2
- onnx/reference/ops/op_random_uniform.py +3 -1
- onnx/reference/ops/op_random_uniform_like.py +3 -2
- onnx/reference/ops/op_range.py +2 -0
- onnx/reference/ops/op_reciprocal.py +4 -2
- onnx/reference/ops/op_reduce_l1.py +17 -31
- onnx/reference/ops/op_reduce_l2.py +17 -35
- onnx/reference/ops/op_reduce_log_sum.py +6 -29
- onnx/reference/ops/op_reduce_log_sum_exp.py +6 -29
- onnx/reference/ops/op_reduce_max.py +15 -36
- onnx/reference/ops/op_reduce_mean.py +15 -33
- onnx/reference/ops/op_reduce_min.py +15 -32
- onnx/reference/ops/op_reduce_prod.py +15 -29
- onnx/reference/ops/op_reduce_sum.py +17 -45
- onnx/reference/ops/op_reduce_sum_square.py +15 -29
- onnx/reference/ops/op_relu.py +3 -1
- onnx/reference/ops/op_reshape.py +2 -8
- onnx/reference/ops/op_resize.py +59 -28
- onnx/reference/ops/op_reverse_sequence.py +2 -0
- onnx/reference/ops/op_rnn.py +3 -9
- onnx/reference/ops/op_roi_align.py +7 -5
- onnx/reference/ops/op_round.py +4 -2
- onnx/reference/ops/op_scan.py +4 -1
- onnx/reference/ops/op_scatter_elements.py +17 -4
- onnx/reference/ops/op_scatternd.py +2 -0
- onnx/reference/ops/op_selu.py +5 -1
- onnx/reference/ops/op_sequence_at.py +2 -0
- onnx/reference/ops/op_sequence_construct.py +2 -0
- onnx/reference/ops/op_sequence_empty.py +2 -0
- onnx/reference/ops/op_sequence_erase.py +2 -0
- onnx/reference/ops/op_sequence_insert.py +4 -2
- onnx/reference/ops/op_sequence_length.py +7 -1
- onnx/reference/ops/op_sequence_map.py +4 -2
- onnx/reference/ops/op_shape.py +2 -7
- onnx/reference/ops/op_shrink.py +3 -1
- onnx/reference/ops/op_sigmoid.py +7 -1
- onnx/reference/ops/op_sign.py +3 -1
- onnx/reference/ops/op_sin.py +3 -1
- onnx/reference/ops/op_sinh.py +3 -1
- onnx/reference/ops/op_size.py +2 -0
- onnx/reference/ops/op_slice.py +3 -9
- onnx/reference/ops/op_softmax.py +4 -2
- onnx/reference/ops/op_softmax_cross_entropy_loss.py +4 -1
- onnx/reference/ops/op_softplus.py +4 -2
- onnx/reference/ops/op_softsign.py +3 -1
- onnx/reference/ops/op_space_to_depth.py +3 -1
- onnx/reference/ops/op_split.py +7 -9
- onnx/reference/ops/op_split_to_sequence.py +41 -10
- onnx/reference/ops/op_sqrt.py +4 -2
- onnx/reference/ops/op_squeeze.py +3 -12
- onnx/reference/ops/op_stft.py +8 -7
- onnx/reference/ops/op_string_normalizer.py +3 -1
- onnx/reference/ops/op_sub.py +3 -1
- onnx/reference/ops/op_sum.py +3 -1
- onnx/reference/ops/op_tan.py +3 -1
- onnx/reference/ops/op_tanh.py +3 -1
- onnx/reference/ops/op_tfidf_vectorizer.py +15 -15
- onnx/reference/ops/op_thresholded_relu.py +4 -2
- onnx/reference/ops/op_tile.py +2 -0
- onnx/reference/ops/op_topk.py +12 -19
- onnx/reference/ops/op_transpose.py +2 -0
- onnx/reference/ops/op_trilu.py +3 -1
- onnx/reference/ops/op_unique.py +2 -0
- onnx/reference/ops/op_unsqueeze.py +2 -9
- onnx/reference/ops/op_upsample.py +9 -8
- onnx/reference/ops/op_where.py +7 -1
- onnx/reference/ops/op_xor.py +3 -1
- onnx/reference/reference_evaluator.py +64 -20
- onnx/shape_inference/implementation.cc +207 -30
- onnx/shape_inference/implementation.h +15 -4
- onnx/shape_inference.py +37 -12
- onnx/string_utils.h +3 -3
- onnx/test/cpp/common_path_test.cc +2 -0
- onnx/test/cpp/data_propagation_test.cc +2 -0
- onnx/test/cpp/function_context_test.cc +2 -0
- onnx/test/cpp/function_get_test.cc +2 -0
- onnx/test/cpp/function_verify_test.cc +176 -0
- onnx/test/cpp/op_reg_test.cc +2 -0
- onnx/test/cpp/parser_test.cc +65 -1
- onnx/test/cpp/schema_registration_test.cc +2 -0
- onnx/test/cpp/shape_inference_test.cc +2 -0
- onnx/test/cpp/test_main.cc +2 -0
- onnx/tools/__init__.py +2 -0
- onnx/tools/net_drawer.py +13 -9
- onnx/tools/replace_constants.py +429 -0
- onnx/tools/update_model_dims.py +7 -9
- onnx/utils.py +16 -6
- onnx/version.py +2 -2
- onnx/version_converter/BaseConverter.h +2 -0
- onnx/version_converter/adapters/adapter.h +2 -0
- onnx/version_converter/adapters/axes_attribute_to_input.h +2 -0
- onnx/version_converter/adapters/axes_input_to_attribute.h +2 -0
- onnx/version_converter/adapters/batch_normalization_13_14.h +2 -0
- onnx/version_converter/adapters/broadcast_backward_compatibility.h +2 -0
- onnx/version_converter/adapters/broadcast_forward_compatibility.h +2 -0
- onnx/version_converter/adapters/cast_9_8.h +2 -0
- onnx/version_converter/adapters/clip_10_11.h +2 -0
- onnx/version_converter/adapters/compatible.h +2 -0
- onnx/version_converter/adapters/dropout_11_12.h +2 -0
- onnx/version_converter/adapters/extend_supported_types.h +2 -0
- onnx/version_converter/adapters/gemm_6_7.h +2 -0
- onnx/version_converter/adapters/gemm_7_6.h +2 -0
- onnx/version_converter/adapters/maxpool_8_7.h +2 -0
- onnx/version_converter/adapters/no_previous_version.h +2 -0
- onnx/version_converter/adapters/pad_10_11.h +4 -0
- onnx/version_converter/adapters/remove_consumed_inputs.h +2 -0
- onnx/version_converter/adapters/reshape_4_5.h +2 -0
- onnx/version_converter/adapters/reshape_5_4.h +2 -0
- onnx/version_converter/adapters/resize_10_11.h +2 -0
- onnx/version_converter/adapters/scan_8_9.h +2 -0
- onnx/version_converter/adapters/scan_9_8.h +2 -0
- onnx/version_converter/adapters/scatter_10_11.h +2 -0
- onnx/version_converter/adapters/slice_9_10.h +2 -0
- onnx/version_converter/adapters/softmax_12_13.h +20 -28
- onnx/version_converter/adapters/split_12_13.h +2 -0
- onnx/version_converter/adapters/split_13_12.h +2 -0
- onnx/version_converter/adapters/split_17_18.h +2 -0
- onnx/version_converter/adapters/sum_8_7.h +2 -0
- onnx/version_converter/adapters/topk_9_10.h +2 -0
- onnx/version_converter/adapters/transformers.h +3 -1
- onnx/version_converter/adapters/type_restriction.h +2 -0
- onnx/version_converter/adapters/upsample_6_7.h +2 -0
- onnx/version_converter/adapters/upsample_8_9.h +2 -0
- onnx/version_converter/adapters/upsample_9_10.h +2 -0
- onnx/version_converter/adapters/upsample_9_8.h +2 -0
- onnx/version_converter/convert.cc +14 -7
- onnx/version_converter/convert.h +20 -0
- onnx/version_converter/helper.cc +3 -3
- onnx/version_converter/helper.h +3 -3
- onnx/version_converter.py +6 -3
- {onnx-1.13.0.dist-info → onnx-1.14.0.dist-info}/METADATA +95 -51
- {onnx-1.13.0.dist-info → onnx-1.14.0.dist-info}/RECORD +1056 -743
- {onnx-1.13.0.dist-info → onnx-1.14.0.dist-info}/WHEEL +1 -1
- onnx/backend/test/data/node/test_softplus_example_expanded/model.onnx +0 -0
- /onnx/backend/test/data/node/{test_softplus_example_expanded → test_softplus_example_expanded_ver18}/test_data_set_0/input_0.pb +0 -0
- /onnx/backend/test/data/node/{test_softplus_example_expanded → test_softplus_example_expanded_ver18}/test_data_set_0/output_0.pb +0 -0
- /onnx/backend/test/data/node/{test_softplus_expanded → test_softplus_expanded_ver18}/test_data_set_0/input_0.pb +0 -0
- /onnx/backend/test/data/node/{test_softplus_expanded → test_softplus_expanded_ver18}/test_data_set_0/output_0.pb +0 -0
- {onnx-1.13.0.dist-info → onnx-1.14.0.dist-info}/LICENSE +0 -0
- {onnx-1.13.0.dist-info → onnx-1.14.0.dist-info}/entry_points.txt +0 -0
- {onnx-1.13.0.dist-info → onnx-1.14.0.dist-info}/top_level.txt +0 -0
onnx/defs/tensor/old.cc
CHANGED
|
@@ -6,10 +6,198 @@
|
|
|
6
6
|
#include <cmath>
|
|
7
7
|
#include <numeric>
|
|
8
8
|
#include "onnx/defs/data_propagators.h"
|
|
9
|
+
#include "onnx/defs/function.h"
|
|
9
10
|
#include "onnx/defs/tensor/utils.h"
|
|
10
11
|
|
|
11
12
|
namespace ONNX_NAMESPACE {
|
|
12
13
|
|
|
14
|
+
static const char* Cast_ver13_doc = R"DOC(
|
|
15
|
+
The operator casts the elements of a given input tensor to a data type
|
|
16
|
+
specified by the 'to' argument and returns an output tensor of the same size in
|
|
17
|
+
the converted type. The 'to' argument must be one of the data types specified
|
|
18
|
+
in the 'DataType' enum field in the TensorProto message.
|
|
19
|
+
|
|
20
|
+
Casting from string tensor in plain (e.g., "3.14" and "1000") and scientific numeric representations
|
|
21
|
+
(e.g., "1e-5" and "1E8") to float types is supported. For example, converting string "100.5" to an integer may
|
|
22
|
+
yield result 100. There are some string literals reserved for special floating-point values;
|
|
23
|
+
"+INF" (and "INF"), "-INF", and "NaN" are positive infinity, negative infinity, and not-a-number, respectively.
|
|
24
|
+
Any string which can exactly match "+INF" in a case-insensitive way would be mapped to positive infinite. Similarly,
|
|
25
|
+
this case-insensitive rule is applied to "INF" and "NaN". When casting from numeric tensors
|
|
26
|
+
to string tensors, plain floating-point representation (such as "314.15926") would be used.
|
|
27
|
+
Converting non-numerical-literal string such as "Hello World!" is an undefined behavior. Cases
|
|
28
|
+
of converting string representing floating-point arithmetic value, such as "2.718", to INT is an undefined behavior.
|
|
29
|
+
|
|
30
|
+
Conversion from a numerical type to any numerical type is always allowed.
|
|
31
|
+
User must be aware of precision loss and value change caused by range difference between two types.
|
|
32
|
+
For example, a 64-bit float 3.1415926459 may be round to a 32-bit float 3.141592. Similarly, converting
|
|
33
|
+
an integer 36 to Boolean may produce 1 because we truncate bits which can't be stored in the targeted type.
|
|
34
|
+
|
|
35
|
+
In more detail, the conversion among numerical types should follow these rules:
|
|
36
|
+
|
|
37
|
+
* Casting from floating point to:
|
|
38
|
+
* floating point: +/- infinity if OOR (out of range).
|
|
39
|
+
* fixed point: undefined if OOR.
|
|
40
|
+
* bool: +/- 0.0 to False; all else to True.
|
|
41
|
+
* Casting from fixed point to:
|
|
42
|
+
* floating point: +/- infinity if OOR. (+ infinity in the case of uint)
|
|
43
|
+
* fixed point: when OOR, discard higher bits and reinterpret (with respect to two's complement representation for
|
|
44
|
+
signed types). For example, 200 (int16) -> -56 (int8).
|
|
45
|
+
* bool: zero to False; nonzero to True.
|
|
46
|
+
* Casting from bool to:
|
|
47
|
+
* floating point: `{1.0, 0.0}`.
|
|
48
|
+
* fixed point: `{1, 0}`.
|
|
49
|
+
* bool: no change.
|
|
50
|
+
)DOC";
|
|
51
|
+
|
|
52
|
+
ONNX_OPERATOR_SET_SCHEMA(
|
|
53
|
+
Cast,
|
|
54
|
+
13,
|
|
55
|
+
OpSchema()
|
|
56
|
+
.SetDoc(Cast_ver13_doc)
|
|
57
|
+
.Attr(
|
|
58
|
+
"to",
|
|
59
|
+
"The data type to which the elements of the input tensor are cast. "
|
|
60
|
+
"Strictly must be one of the types from DataType enum in TensorProto",
|
|
61
|
+
AttributeProto::INT)
|
|
62
|
+
.Input(0, "input", "Input tensor to be cast.", "T1", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
63
|
+
.Output(
|
|
64
|
+
0,
|
|
65
|
+
"output",
|
|
66
|
+
"Output tensor with the same shape as input with type "
|
|
67
|
+
"specified by the 'to' argument",
|
|
68
|
+
"T2",
|
|
69
|
+
OpSchema::Single,
|
|
70
|
+
true,
|
|
71
|
+
1,
|
|
72
|
+
OpSchema::Differentiable)
|
|
73
|
+
.TypeConstraint(
|
|
74
|
+
"T1",
|
|
75
|
+
{"tensor(float16)",
|
|
76
|
+
"tensor(float)",
|
|
77
|
+
"tensor(double)",
|
|
78
|
+
"tensor(int8)",
|
|
79
|
+
"tensor(int16)",
|
|
80
|
+
"tensor(int32)",
|
|
81
|
+
"tensor(int64)",
|
|
82
|
+
"tensor(uint8)",
|
|
83
|
+
"tensor(uint16)",
|
|
84
|
+
"tensor(uint32)",
|
|
85
|
+
"tensor(uint64)",
|
|
86
|
+
"tensor(bool)",
|
|
87
|
+
"tensor(string)",
|
|
88
|
+
"tensor(bfloat16)"},
|
|
89
|
+
"Constrain input types. Casting from complex is not supported.")
|
|
90
|
+
.TypeConstraint(
|
|
91
|
+
"T2",
|
|
92
|
+
{"tensor(float16)",
|
|
93
|
+
"tensor(float)",
|
|
94
|
+
"tensor(double)",
|
|
95
|
+
"tensor(int8)",
|
|
96
|
+
"tensor(int16)",
|
|
97
|
+
"tensor(int32)",
|
|
98
|
+
"tensor(int64)",
|
|
99
|
+
"tensor(uint8)",
|
|
100
|
+
"tensor(uint16)",
|
|
101
|
+
"tensor(uint32)",
|
|
102
|
+
"tensor(uint64)",
|
|
103
|
+
"tensor(bool)",
|
|
104
|
+
"tensor(string)",
|
|
105
|
+
"tensor(bfloat16)"},
|
|
106
|
+
"Constrain output types. Casting to complex is not supported.")
|
|
107
|
+
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
108
|
+
propagateElemTypeFromAttributeToOutput(ctx, "to", 0);
|
|
109
|
+
if (hasNInputShapes(ctx, 1)) {
|
|
110
|
+
propagateShapeFromInputToOutput(ctx, 0, 0);
|
|
111
|
+
}
|
|
112
|
+
})
|
|
113
|
+
.PartialDataPropagationFunction([](DataPropagationContext& ctx) {
|
|
114
|
+
PropagateShapeDataFromInputToOutput(ctx, 0);
|
|
115
|
+
}));
|
|
116
|
+
|
|
117
|
+
static const char* CastLike_ver15_doc = R"DOC(
|
|
118
|
+
The operator casts the elements of a given input tensor (the first input) to
|
|
119
|
+
the same data type as the elements of the second input tensor.
|
|
120
|
+
See documentation of the Cast operator for further details.
|
|
121
|
+
)DOC";
|
|
122
|
+
|
|
123
|
+
ONNX_OPERATOR_SET_SCHEMA(
|
|
124
|
+
CastLike,
|
|
125
|
+
15,
|
|
126
|
+
OpSchema()
|
|
127
|
+
.SetDoc(CastLike_ver15_doc)
|
|
128
|
+
.Input(0, "input", "Input tensor to be cast.", "T1", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
129
|
+
.Input(
|
|
130
|
+
1,
|
|
131
|
+
"target_type",
|
|
132
|
+
"The (first) input tensor will be cast to produce a tensor of the same type as this (second input) tensor.",
|
|
133
|
+
"T2",
|
|
134
|
+
OpSchema::Single,
|
|
135
|
+
true,
|
|
136
|
+
1,
|
|
137
|
+
OpSchema::NonDifferentiable)
|
|
138
|
+
.Output(
|
|
139
|
+
0,
|
|
140
|
+
"output",
|
|
141
|
+
"Output tensor produced by casting the first input tensor to have the same type as the second input tensor.",
|
|
142
|
+
"T2",
|
|
143
|
+
OpSchema::Single,
|
|
144
|
+
true,
|
|
145
|
+
1,
|
|
146
|
+
OpSchema::Differentiable)
|
|
147
|
+
.TypeConstraint(
|
|
148
|
+
"T1",
|
|
149
|
+
{"tensor(float16)",
|
|
150
|
+
"tensor(float)",
|
|
151
|
+
"tensor(double)",
|
|
152
|
+
"tensor(int8)",
|
|
153
|
+
"tensor(int16)",
|
|
154
|
+
"tensor(int32)",
|
|
155
|
+
"tensor(int64)",
|
|
156
|
+
"tensor(uint8)",
|
|
157
|
+
"tensor(uint16)",
|
|
158
|
+
"tensor(uint32)",
|
|
159
|
+
"tensor(uint64)",
|
|
160
|
+
"tensor(bool)",
|
|
161
|
+
"tensor(string)",
|
|
162
|
+
"tensor(bfloat16)"},
|
|
163
|
+
"Constrain input types. Casting from complex is not supported.")
|
|
164
|
+
.TypeConstraint(
|
|
165
|
+
"T2",
|
|
166
|
+
{"tensor(float16)",
|
|
167
|
+
"tensor(float)",
|
|
168
|
+
"tensor(double)",
|
|
169
|
+
"tensor(int8)",
|
|
170
|
+
"tensor(int16)",
|
|
171
|
+
"tensor(int32)",
|
|
172
|
+
"tensor(int64)",
|
|
173
|
+
"tensor(uint8)",
|
|
174
|
+
"tensor(uint16)",
|
|
175
|
+
"tensor(uint32)",
|
|
176
|
+
"tensor(uint64)",
|
|
177
|
+
"tensor(bool)",
|
|
178
|
+
"tensor(string)",
|
|
179
|
+
"tensor(bfloat16)"},
|
|
180
|
+
"Constrain output types. Casting to complex is not supported.")
|
|
181
|
+
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
182
|
+
propagateElemTypeFromInputToOutput(ctx, 1, 0);
|
|
183
|
+
if (hasNInputShapes(ctx, 1)) {
|
|
184
|
+
propagateShapeFromInputToOutput(ctx, 0, 0);
|
|
185
|
+
}
|
|
186
|
+
})
|
|
187
|
+
.SetContextDependentFunctionBodyBuilder(
|
|
188
|
+
[](const FunctionBodyBuildContext& ctx, const OpSchema& schema, FunctionProto& functionProto) -> bool {
|
|
189
|
+
auto target_type = ctx.getInputType(1);
|
|
190
|
+
if ((target_type == nullptr) || (!target_type->has_tensor_type())) {
|
|
191
|
+
// we cannot create a correct function body without knowing the target element type
|
|
192
|
+
return false;
|
|
193
|
+
}
|
|
194
|
+
auto target_elt_type = target_type->tensor_type().elem_type();
|
|
195
|
+
FunctionBuilder builder(functionProto);
|
|
196
|
+
builder.Add("output = Cast (input)", "to", (int64_t)(target_elt_type));
|
|
197
|
+
schema.BuildFunction(functionProto);
|
|
198
|
+
return true;
|
|
199
|
+
}));
|
|
200
|
+
|
|
13
201
|
static const char* Cast_ver9_doc = R"DOC(
|
|
14
202
|
The operator casts the elements of a given input tensor to a data type
|
|
15
203
|
specified by the 'to' argument and returns an output tensor of the same size in
|
|
@@ -18,7 +206,7 @@ in the 'DataType' enum field in the TensorProto message.
|
|
|
18
206
|
|
|
19
207
|
Casting from string tensor in plain (e.g., "3.14" and "1000") and scientific numeric representations
|
|
20
208
|
(e.g., "1e-5" and "1E8") to float types is supported. For example, converting string "100.5" to an integer may
|
|
21
|
-
result 100. There are some string literals reserved for special floating-point values;
|
|
209
|
+
yield result 100. There are some string literals reserved for special floating-point values;
|
|
22
210
|
"+INF" (and "INF"), "-INF", and "NaN" are positive infinity, negative infinity, and not-a-number, respectively.
|
|
23
211
|
Any string which can exactly match "+INF" in a case-insensitive way would be mapped to positive infinite. Similarly,
|
|
24
212
|
this case-insensitive rule is applied to "INF" and "NaN". When casting from numeric tensors
|
|
@@ -113,10 +301,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
113
301
|
1,
|
|
114
302
|
OpSchema::NonDifferentiable)
|
|
115
303
|
.Output(0, "reshaped", "Reshaped data.", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
116
|
-
.TypeConstraint(
|
|
117
|
-
"T",
|
|
118
|
-
OpSchema::all_tensor_types_with_bfloat(),
|
|
119
|
-
"Constrain input and output types to all tensor types.")
|
|
304
|
+
.TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to all tensor types.")
|
|
120
305
|
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
121
306
|
// Type inference
|
|
122
307
|
propagateElemTypeFromInputToOutput(ctx, 0, 0);
|
|
@@ -361,7 +546,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
361
546
|
.SetDoc(Shape_ver13_doc)
|
|
362
547
|
.Input(0, "data", "An input tensor.", "T", OpSchema::Single, true, 1, OpSchema::NonDifferentiable)
|
|
363
548
|
.Output(0, "shape", "Shape of the input tensor", "T1", OpSchema::Single, true, 1, OpSchema::NonDifferentiable)
|
|
364
|
-
.TypeConstraint("T", OpSchema::
|
|
549
|
+
.TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Input tensor can be of arbitrary type.")
|
|
365
550
|
.TypeConstraint("T1", {"tensor(int64)"}, "Constrain output to int64 tensor.")
|
|
366
551
|
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
367
552
|
ctx.getOutputType(0)->mutable_tensor_type()->set_elem_type(TensorProto::INT64);
|
|
@@ -613,10 +798,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
613
798
|
true,
|
|
614
799
|
1,
|
|
615
800
|
OpSchema::Differentiable)
|
|
616
|
-
.TypeConstraint(
|
|
617
|
-
"T",
|
|
618
|
-
OpSchema::all_tensor_types_with_bfloat(),
|
|
619
|
-
"Constrain input and output types to all tensor types.")
|
|
801
|
+
.TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to all tensor types.")
|
|
620
802
|
.Attr(
|
|
621
803
|
"axis",
|
|
622
804
|
"Which axis to split on. "
|
|
@@ -1084,10 +1266,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
1084
1266
|
1,
|
|
1085
1267
|
OpSchema::Differentiable)
|
|
1086
1268
|
.Output(0, "output", "Tensor of rank r >= 1.", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
1087
|
-
.TypeConstraint(
|
|
1088
|
-
"T",
|
|
1089
|
-
OpSchema::all_tensor_types_with_bfloat(),
|
|
1090
|
-
"Constrain input and output types to any tensor type.")
|
|
1269
|
+
.TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to any tensor type.")
|
|
1091
1270
|
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
1092
1271
|
propagateElemTypeFromInputToOutput(ctx, 0, 0);
|
|
1093
1272
|
if (hasNInputShapes(ctx, 1)) {
|
|
@@ -1180,10 +1359,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
1180
1359
|
1,
|
|
1181
1360
|
OpSchema::Differentiable)
|
|
1182
1361
|
.Output(0, "output", "Tensor of rank r >= 1.", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
1183
|
-
.TypeConstraint(
|
|
1184
|
-
"T",
|
|
1185
|
-
OpSchema::all_tensor_types_with_bfloat(),
|
|
1186
|
-
"Constrain input and output types to any tensor type.")
|
|
1362
|
+
.TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to any tensor type.")
|
|
1187
1363
|
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
1188
1364
|
propagateElemTypeFromInputToOutput(ctx, 0, 0);
|
|
1189
1365
|
if (hasNInputShapes(ctx, 1)) {
|
|
@@ -1379,10 +1555,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
1379
1555
|
true,
|
|
1380
1556
|
1,
|
|
1381
1557
|
OpSchema::Differentiable)
|
|
1382
|
-
.TypeConstraint(
|
|
1383
|
-
"T",
|
|
1384
|
-
OpSchema::all_tensor_types_with_bfloat(),
|
|
1385
|
-
"Input and output types can be of any tensor type.")
|
|
1558
|
+
.TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Input and output types can be of any tensor type.")
|
|
1386
1559
|
.TypeConstraint("Tind", {"tensor(int32)", "tensor(int64)"}, "Constrain indices to integer types")
|
|
1387
1560
|
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
1388
1561
|
propagateElemTypeFromInputToOutput(ctx, 0, 0);
|
|
@@ -1485,10 +1658,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
1485
1658
|
true,
|
|
1486
1659
|
1,
|
|
1487
1660
|
OpSchema::Differentiable)
|
|
1488
|
-
.TypeConstraint(
|
|
1489
|
-
"T",
|
|
1490
|
-
OpSchema::all_tensor_types_with_bfloat(),
|
|
1491
|
-
"Input and output types can be of any tensor type.")
|
|
1661
|
+
.TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Input and output types can be of any tensor type.")
|
|
1492
1662
|
.TypeConstraint("Tind", {"tensor(int32)", "tensor(int64)"}, "Constrain indices to integer types")
|
|
1493
1663
|
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
1494
1664
|
propagateElemTypeFromInputToOutput(ctx, 0, 0);
|
|
@@ -1618,7 +1788,7 @@ axis = 1 :
|
|
|
1618
1788
|
Let
|
|
1619
1789
|
k = indices[i_{0}, ..., i_{q-1}]
|
|
1620
1790
|
Then
|
|
1621
|
-
output[i_{0}, ..., i_{q-1}, j_{
|
|
1791
|
+
output[j_{0}, i_{0}, ..., i_{q-1}, j_{1}, ..., j_{r-2}] = input[j_{0}, k, j_{1}, ..., j_{r-2}]
|
|
1622
1792
|
|
|
1623
1793
|
```
|
|
1624
1794
|
data = [
|
|
@@ -1631,11 +1801,9 @@ output[i_{0}, ..., i_{q-1}, j_{0}, ..., j_{r-2}] = input[j_{0}, k, j_{1}, ..., j
|
|
|
1631
1801
|
]
|
|
1632
1802
|
axis = 1,
|
|
1633
1803
|
output = [
|
|
1634
|
-
[
|
|
1635
|
-
|
|
1636
|
-
|
|
1637
|
-
[4.5, 5.9],
|
|
1638
|
-
],
|
|
1804
|
+
[[1.0, 1.9]],
|
|
1805
|
+
[[2.3, 3.9]],
|
|
1806
|
+
[[4.5, 5.9]],
|
|
1639
1807
|
]
|
|
1640
1808
|
```
|
|
1641
1809
|
)DOC";
|
|
@@ -2118,6 +2286,161 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
2118
2286
|
return;
|
|
2119
2287
|
}));
|
|
2120
2288
|
|
|
2289
|
+
static const char* Resize_ver18_doc = R"DOC(
|
|
2290
|
+
Resize the input tensor. In general, it calculates every value in the output tensor as a weighted average of neighborhood (a.k.a. sampling locations) in the input tensor.
|
|
2291
|
+
Each dimension value of the output tensor is: <br/>
|
|
2292
|
+
`output_dimension = floor(input_dimension * (roi_end - roi_start) * scale)` <br/>
|
|
2293
|
+
if input \"sizes\" is not specified.
|
|
2294
|
+
)DOC";
|
|
2295
|
+
|
|
2296
|
+
static const char* Resize_ver18_attr_coordinate_transformation_mode_doc = R"DOC(
|
|
2297
|
+
This attribute describes how to transform the coordinate in the resized tensor to the coordinate in the original tensor. <br/>
|
|
2298
|
+
|
|
2299
|
+
The coordinate of each dimension is transformed individually. Let's describe a case using axis x as an example.
|
|
2300
|
+
Denote x_resized as the coordinate of axis x in the resized tensor, x_original as the coordinate of axis x in the original tensor, `length_original` as the length of the original tensor in axis x, length_resized as the length of the resized tensor in axis x, roi_x = (start_x, end_x) of the axis x in input "roi", `scale = length_resized / length_original`, <br/>
|
|
2301
|
+
|
|
2302
|
+
if coordinate_transformation_mode is `"half_pixel"`, <br/>
|
|
2303
|
+
`x_original = (x_resized + 0.5) / scale - 0.5` <br/>
|
|
2304
|
+
|
|
2305
|
+
if coordinate_transformation_mode is `"pytorch_half_pixel"`, <br/>
|
|
2306
|
+
`x_original = length_resized > 1 ? (x_resized + 0.5) / scale - 0.5 : 0` <br/>
|
|
2307
|
+
|
|
2308
|
+
if coordinate_transformation_mode is `"align_corners"`, <br/>
|
|
2309
|
+
`x_original = x_resized * (length_original - 1) / (length_resized - 1)` <br/>
|
|
2310
|
+
|
|
2311
|
+
if coordinate_transformation_mode is `"asymmetric"`, <br/>
|
|
2312
|
+
`x_original = x_resized / scale` <br/>
|
|
2313
|
+
|
|
2314
|
+
if coordinate_transformation_mode is `"tf_crop_and_resize"`, <br/>
|
|
2315
|
+
`x_original = length_resized > 1 ? start_x * (length_original - 1) + x_resized * (end_x - start_x) * (length_original - 1) / (length_resized - 1) : 0.5 * (start_x + end_x) * (length_original - 1)`
|
|
2316
|
+
.)DOC";
|
|
2317
|
+
|
|
2318
|
+
static const char* Resize_ver18_attr_keep_aspect_ratio_policy_doc = R"DOC(
|
|
2319
|
+
This attribute describes how to interpret the `sizes` input with regard to keeping the original aspect ratio of the input, and it is not applicable when
|
|
2320
|
+
the `scales` input is used. <br/>
|
|
2321
|
+
|
|
2322
|
+
Given a set of `sizes`, associated with a subset of `axes` (explicitly provided or default), and assuming `d = axes[i]`, with `i` being the index of the provided `sizes`. <br/>
|
|
2323
|
+
|
|
2324
|
+
If `keep_aspect_ratio_policy` is `"stretch"`, the original aspect ratio is disregarded, and the input is resized to the specified size: <br/>
|
|
2325
|
+
`out_size[d] = sizes[i]` <br/>
|
|
2326
|
+
|
|
2327
|
+
If `keep_aspect_ratio_policy` is `"not_larger"`, the sizes are adjusted so that no extent of the output is larger than the specified size, while keeping the original aspect ratio: <br/>
|
|
2328
|
+
`scale = Min(sizes[i] / in_size[d])` <br/>
|
|
2329
|
+
`out_size[d] = round_int(scale * in_size[i])` <br/>
|
|
2330
|
+
|
|
2331
|
+
If `keep_aspect_ratio_policy` is `"not_smaller"`, the sizes are adjusted so that no extent of the output is smaller than the specified size, while keeping the original aspect ratio: <br/>
|
|
2332
|
+
`scale = Max(sizes[i] / in_size[d])` <br/>
|
|
2333
|
+
`out_size[d] = round_int(scale * in_size[i])` <br/>
|
|
2334
|
+
|
|
2335
|
+
For non-resizable axes (those not specified in `axes`), the output size will be equal to the input size.
|
|
2336
|
+
|
|
2337
|
+
Note: `round_int` stands for computing the nearest integer value, rounding halfway cases up.)DOC";
|
|
2338
|
+
|
|
2339
|
+
ONNX_OPERATOR_SET_SCHEMA(
|
|
2340
|
+
Resize,
|
|
2341
|
+
18,
|
|
2342
|
+
OpSchema()
|
|
2343
|
+
.Attr(
|
|
2344
|
+
"mode",
|
|
2345
|
+
"Three interpolation modes: \"nearest\" (default), \"linear\" and \"cubic\". "
|
|
2346
|
+
"The \"linear\" mode includes linear interpolation for 1D tensor and N-linear interpolation for N-D tensor (for example, bilinear interpolation for 2D tensor). "
|
|
2347
|
+
"The \"cubic\" mode includes cubic interpolation for 1D tensor and N-cubic interpolation for N-D tensor (for example, bicubic interpolation for 2D tensor).",
|
|
2348
|
+
AttributeProto::STRING,
|
|
2349
|
+
std::string("nearest"))
|
|
2350
|
+
.Attr(
|
|
2351
|
+
"cubic_coeff_a",
|
|
2352
|
+
"The coefficient 'a' used in cubic interpolation. Two common choice are -0.5 (in some cases of TensorFlow) and -0.75"
|
|
2353
|
+
" (in PyTorch). Check out Equation (4) in https://ieeexplore.ieee.org/document/1163711 for the details. "
|
|
2354
|
+
"This attribute is valid only if mode is \"cubic\".",
|
|
2355
|
+
AttributeProto::FLOAT,
|
|
2356
|
+
static_cast<float>(-0.75))
|
|
2357
|
+
.Attr(
|
|
2358
|
+
"exclude_outside",
|
|
2359
|
+
"If set to 1, the weight of sampling locations outside the tensor will be set to 0"
|
|
2360
|
+
" and the weight will be renormalized so that their sum is 1.0. The default value is 0.",
|
|
2361
|
+
AttributeProto::INT,
|
|
2362
|
+
static_cast<int64_t>(0))
|
|
2363
|
+
.Attr(
|
|
2364
|
+
"coordinate_transformation_mode",
|
|
2365
|
+
Resize_ver18_attr_coordinate_transformation_mode_doc,
|
|
2366
|
+
AttributeProto::STRING,
|
|
2367
|
+
std::string("half_pixel"))
|
|
2368
|
+
.Attr(
|
|
2369
|
+
"nearest_mode",
|
|
2370
|
+
"Four modes: \"round_prefer_floor\" (default, as known as round half down), \"round_prefer_ceil\" (as known as round half up), \"floor\", \"ceil\". Only used by nearest interpolation. It indicates how to get \"nearest\" pixel in input tensor from x_original, so this attribute is valid only if \"mode\" is \"nearest\".",
|
|
2371
|
+
AttributeProto::STRING,
|
|
2372
|
+
std::string("round_prefer_floor"))
|
|
2373
|
+
.Attr(
|
|
2374
|
+
"extrapolation_value",
|
|
2375
|
+
"When coordinate_transformation_mode is \"tf_crop_and_resize\" and x_original is outside the range [0, length_original - 1], this value is used as the corresponding output value. Default is 0.0f.",
|
|
2376
|
+
AttributeProto::FLOAT,
|
|
2377
|
+
static_cast<float>(0))
|
|
2378
|
+
.Attr(
|
|
2379
|
+
"antialias",
|
|
2380
|
+
"If set to 1, \"linear\" and \"cubic\" interpolation modes will use an antialiasing filter when downscaling. "
|
|
2381
|
+
"Antialiasing is achieved by stretching the resampling filter by a factor max(1, 1 / scale), which means that when downsampling, more input pixels contribute to an output pixel.",
|
|
2382
|
+
AttributeProto::INT,
|
|
2383
|
+
static_cast<int64_t>(0))
|
|
2384
|
+
.Attr(
|
|
2385
|
+
"axes",
|
|
2386
|
+
"If provided, it specifies a subset of axes that 'roi', 'scales' and 'sizes' refer to. "
|
|
2387
|
+
"If not provided, all axes are assumed [0, 1, ..., r-1], where r = rank(data). "
|
|
2388
|
+
"Non-specified dimensions are interpreted as non-resizable. "
|
|
2389
|
+
"Negative value means counting dimensions from the back. Accepted range is [-r, r-1], where r = rank(data). "
|
|
2390
|
+
"Behavior is undefined if an axis is repeated.",
|
|
2391
|
+
AttributeProto::INTS,
|
|
2392
|
+
false)
|
|
2393
|
+
.Attr(
|
|
2394
|
+
"keep_aspect_ratio_policy",
|
|
2395
|
+
Resize_ver18_attr_keep_aspect_ratio_policy_doc,
|
|
2396
|
+
AttributeProto::STRING,
|
|
2397
|
+
std::string("stretch"))
|
|
2398
|
+
.Input(0, "X", "N-D tensor", "T1", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
2399
|
+
.Input(
|
|
2400
|
+
1,
|
|
2401
|
+
"roi",
|
|
2402
|
+
"1-D tensor given as [start1, ..., startN, end1, ..., endN], where N is the rank of X or the length of axes, if provided. "
|
|
2403
|
+
"The RoIs' coordinates are normalized in the coordinate system of the input image. It only takes effect when coordinate_transformation_mode is \"tf_crop_and_resize\"",
|
|
2404
|
+
"T2",
|
|
2405
|
+
OpSchema::Optional,
|
|
2406
|
+
true,
|
|
2407
|
+
1,
|
|
2408
|
+
OpSchema::NonDifferentiable)
|
|
2409
|
+
.Input(
|
|
2410
|
+
2,
|
|
2411
|
+
"scales",
|
|
2412
|
+
"The scale array along each dimension. It takes value greater than 0. If it's less than 1,"
|
|
2413
|
+
" it's sampling down, otherwise, it's upsampling. The number of elements of 'scales' should"
|
|
2414
|
+
" be the same as the rank of input 'X' or the length of 'axes', if provided. "
|
|
2415
|
+
"One of 'scales' and 'sizes' MUST be specified and it is an error if both are specified. If 'sizes' is needed, the user can use an empty string as the name of 'scales' in this operator's input list.",
|
|
2416
|
+
"tensor(float)",
|
|
2417
|
+
OpSchema::Optional,
|
|
2418
|
+
true,
|
|
2419
|
+
1,
|
|
2420
|
+
OpSchema::NonDifferentiable)
|
|
2421
|
+
.Input(
|
|
2422
|
+
3,
|
|
2423
|
+
"sizes",
|
|
2424
|
+
"Target size of the output tensor. Its interpretation depends on the 'keep_aspect_ratio_policy' value."
|
|
2425
|
+
"The number of elements of 'sizes' should be the same as the"
|
|
2426
|
+
" rank of input 'X', or the length of 'axes', if provided. Only one of 'scales' and 'sizes' can be specified. ",
|
|
2427
|
+
"tensor(int64)",
|
|
2428
|
+
OpSchema::Optional,
|
|
2429
|
+
true,
|
|
2430
|
+
1,
|
|
2431
|
+
OpSchema::NonDifferentiable)
|
|
2432
|
+
.Output(0, "Y", "N-D tensor after resizing", "T1", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
2433
|
+
.TypeConstraint(
|
|
2434
|
+
"T1",
|
|
2435
|
+
OpSchema::all_tensor_types_ir4(),
|
|
2436
|
+
"Constrain input 'X' and output 'Y' to all tensor types.")
|
|
2437
|
+
.TypeConstraint(
|
|
2438
|
+
"T2",
|
|
2439
|
+
{"tensor(float16)", "tensor(float)", "tensor(double)"},
|
|
2440
|
+
"Constrain roi type to float or double.")
|
|
2441
|
+
.SetDoc(Resize_ver18_doc)
|
|
2442
|
+
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) { resizeShapeInference_opset13_to_18(ctx); }));
|
|
2443
|
+
|
|
2121
2444
|
static const char* Resize_ver13_doc = R"DOC(
|
|
2122
2445
|
Resize the input tensor. In general, it calculates every value in the output tensor as a weighted average of neighborhood (a.k.a. sampling locations) in the input tensor.
|
|
2123
2446
|
Each dimension value of the output tensor is:
|
|
@@ -2218,7 +2541,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
2218
2541
|
.Output(0, "Y", "N-D tensor after resizing", "T1", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
2219
2542
|
.TypeConstraint(
|
|
2220
2543
|
"T1",
|
|
2221
|
-
OpSchema::
|
|
2544
|
+
OpSchema::all_tensor_types_ir4(),
|
|
2222
2545
|
"Constrain input 'X' and output 'Y' to all tensor types.")
|
|
2223
2546
|
.TypeConstraint(
|
|
2224
2547
|
"T2",
|
|
@@ -2332,10 +2655,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
2332
2655
|
.SetDoc("Identity operator")
|
|
2333
2656
|
.Input(0, "input", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
2334
2657
|
.Output(0, "output", "Tensor to copy input into.", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
2335
|
-
.TypeConstraint(
|
|
2336
|
-
"T",
|
|
2337
|
-
OpSchema::all_tensor_types_with_bfloat(),
|
|
2338
|
-
"Constrain input and output types to all tensor types.")
|
|
2658
|
+
.TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to all tensor types.")
|
|
2339
2659
|
.TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));
|
|
2340
2660
|
|
|
2341
2661
|
ONNX_OPERATOR_SET_SCHEMA(
|
|
@@ -3633,11 +3953,9 @@ Example 2:
|
|
|
3633
3953
|
]
|
|
3634
3954
|
axis = 1,
|
|
3635
3955
|
output = [
|
|
3636
|
-
[
|
|
3637
|
-
|
|
3638
|
-
|
|
3639
|
-
[4.5, 5.9],
|
|
3640
|
-
],
|
|
3956
|
+
[[1.0, 1.9]],
|
|
3957
|
+
[[2.3, 3.9]],
|
|
3958
|
+
[[4.5, 5.9]],
|
|
3641
3959
|
]
|
|
3642
3960
|
```
|
|
3643
3961
|
)DOC";
|
|
@@ -4252,7 +4570,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
4252
4570
|
.TypeConstraint(
|
|
4253
4571
|
"V",
|
|
4254
4572
|
[]() {
|
|
4255
|
-
auto t = OpSchema::
|
|
4573
|
+
auto t = OpSchema::all_tensor_types_ir4();
|
|
4256
4574
|
auto s = OpSchema::all_tensor_sequence_types();
|
|
4257
4575
|
t.insert(t.end(), s.begin(), s.end());
|
|
4258
4576
|
return t;
|
|
@@ -4437,10 +4755,7 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
4437
4755
|
1,
|
|
4438
4756
|
OpSchema::NonDifferentiable)
|
|
4439
4757
|
.Output(0, "output", "Tensor after padding.", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
4440
|
-
.TypeConstraint(
|
|
4441
|
-
"T",
|
|
4442
|
-
OpSchema::all_tensor_types_with_bfloat(),
|
|
4443
|
-
"Constrain input and output types to all tensor types.")
|
|
4758
|
+
.TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to all tensor types.")
|
|
4444
4759
|
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
4445
4760
|
// Type inference
|
|
4446
4761
|
propagateElemTypeFromInputToOutput(ctx, 0, 0);
|
|
@@ -4483,4 +4798,419 @@ ONNX_OPERATOR_SET_SCHEMA(
|
|
|
4483
4798
|
return;
|
|
4484
4799
|
}));
|
|
4485
4800
|
|
|
4801
|
+
static const char* Pad_ver18_doc = R"DOC(
|
|
4802
|
+
Given a tensor containing the data to be padded (`data`), a tensor containing the number of start and end pad values for axis (`pads`), (optionally) a `mode`, and (optionally) `constant_value`,
|
|
4803
|
+
a padded tensor (`output`) is generated.
|
|
4804
|
+
|
|
4805
|
+
The three supported `modes` are (similar to corresponding modes supported by `numpy.pad`):
|
|
4806
|
+
|
|
4807
|
+
1) `constant`(default) - pads with a given constant value as specified by `constant_value` (which defaults to 0, empty string, or False)
|
|
4808
|
+
|
|
4809
|
+
2) `reflect` - pads with the reflection of the vector mirrored on the first and last values of the vector along each axis
|
|
4810
|
+
|
|
4811
|
+
3) `edge` - pads with the edge values of array
|
|
4812
|
+
|
|
4813
|
+
|
|
4814
|
+
Example 1 (`constant` mode):
|
|
4815
|
+
|
|
4816
|
+
Insert 0 pads to the beginning of the second dimension.
|
|
4817
|
+
|
|
4818
|
+
```
|
|
4819
|
+
data = [
|
|
4820
|
+
[1.0, 1.2],
|
|
4821
|
+
[2.3, 3.4],
|
|
4822
|
+
[4.5, 5.7],
|
|
4823
|
+
]
|
|
4824
|
+
|
|
4825
|
+
pads = [0, 2, 0, 0]
|
|
4826
|
+
|
|
4827
|
+
mode = 'constant'
|
|
4828
|
+
|
|
4829
|
+
constant_value = 0.0
|
|
4830
|
+
|
|
4831
|
+
output = [
|
|
4832
|
+
[0.0, 0.0, 1.0, 1.2],
|
|
4833
|
+
[0.0, 0.0, 2.3, 3.4],
|
|
4834
|
+
[0.0, 0.0, 4.5, 5.7],
|
|
4835
|
+
]
|
|
4836
|
+
```
|
|
4837
|
+
|
|
4838
|
+
Example 2 (`reflect` mode):
|
|
4839
|
+
|
|
4840
|
+
```
|
|
4841
|
+
data = [
|
|
4842
|
+
[1.0, 1.2],
|
|
4843
|
+
[2.3, 3.4],
|
|
4844
|
+
[4.5, 5.7],
|
|
4845
|
+
]
|
|
4846
|
+
|
|
4847
|
+
pads = [0, 2, 0, 0]
|
|
4848
|
+
|
|
4849
|
+
mode = 'reflect'
|
|
4850
|
+
|
|
4851
|
+
output = [
|
|
4852
|
+
[1.0, 1.2, 1.0, 1.2],
|
|
4853
|
+
[2.3, 3.4, 2.3, 3.4],
|
|
4854
|
+
[4.5, 5.7, 4.5, 5.7],
|
|
4855
|
+
]
|
|
4856
|
+
```
|
|
4857
|
+
|
|
4858
|
+
Example 3 (`edge` mode):
|
|
4859
|
+
|
|
4860
|
+
```
|
|
4861
|
+
data = [
|
|
4862
|
+
[1.0, 1.2],
|
|
4863
|
+
[2.3, 3.4],
|
|
4864
|
+
[4.5, 5.7],
|
|
4865
|
+
]
|
|
4866
|
+
|
|
4867
|
+
pads = [0, 2, 0, 0]
|
|
4868
|
+
|
|
4869
|
+
mode = 'edge'
|
|
4870
|
+
|
|
4871
|
+
output = [
|
|
4872
|
+
[1.0, 1.0, 1.0, 1.2],
|
|
4873
|
+
[2.3, 2.3, 2.3, 3.4],
|
|
4874
|
+
[4.5, 4.5, 4.5, 5.7],
|
|
4875
|
+
]
|
|
4876
|
+
```
|
|
4877
|
+
)DOC";
|
|
4878
|
+
|
|
4879
|
+
ONNX_OPERATOR_SET_SCHEMA(
|
|
4880
|
+
Pad,
|
|
4881
|
+
18,
|
|
4882
|
+
OpSchema().FillUsing(PadDocGenerator(Pad_ver18_doc, "Supported modes: `constant`(default), `reflect`, `edge`")));
|
|
4883
|
+
|
|
4884
|
+
ONNX_OPERATOR_SET_SCHEMA(
|
|
4885
|
+
Identity,
|
|
4886
|
+
16,
|
|
4887
|
+
OpSchema()
|
|
4888
|
+
.SetDoc("Identity operator")
|
|
4889
|
+
.Input(0, "input", "Input tensor", "V", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
4890
|
+
.Output(0, "output", "Tensor to copy input into.", "V", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
4891
|
+
.TypeConstraint(
|
|
4892
|
+
"V",
|
|
4893
|
+
[]() {
|
|
4894
|
+
auto t = OpSchema::all_tensor_types_ir4();
|
|
4895
|
+
auto s = OpSchema::all_tensor_sequence_types();
|
|
4896
|
+
auto o = OpSchema::all_optional_types();
|
|
4897
|
+
t.insert(t.end(), s.begin(), s.end());
|
|
4898
|
+
t.insert(t.end(), o.begin(), o.end());
|
|
4899
|
+
return t;
|
|
4900
|
+
}(),
|
|
4901
|
+
"Constrain input and output types to all tensor, sequence, and optional types.")
|
|
4902
|
+
.TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));
|
|
4903
|
+
|
|
4904
|
+
static const char* Reshape_ver14_doc = R"DOC(
|
|
4905
|
+
Reshape the input tensor similar to numpy.reshape.
|
|
4906
|
+
First input is the data tensor, second input is a shape tensor which specifies the output shape. It outputs the reshaped tensor.
|
|
4907
|
+
At most one dimension of the new shape can be -1. In this case, the value is
|
|
4908
|
+
inferred from the size of the tensor and the remaining dimensions. A dimension
|
|
4909
|
+
could also be 0, in which case the actual dimension value is unchanged (i.e. taken
|
|
4910
|
+
from the input tensor). If 'allowzero' is set, and the new shape includes 0, the
|
|
4911
|
+
dimension will be set explicitly to zero (i.e. not taken from input tensor).
|
|
4912
|
+
Shape (second input) could be an empty shape, which means converting to a scalar.
|
|
4913
|
+
The input tensor's shape and the output tensor's shape are required to have the same number of elements.
|
|
4914
|
+
|
|
4915
|
+
If the attribute 'allowzero' is set, it is invalid for the specified shape to
|
|
4916
|
+
contain both a zero value and -1, as the value of the dimension corresponding
|
|
4917
|
+
to -1 cannot be determined uniquely.
|
|
4918
|
+
)DOC";
|
|
4919
|
+
|
|
4920
|
+
ONNX_OPERATOR_SET_SCHEMA(
|
|
4921
|
+
Reshape,
|
|
4922
|
+
14,
|
|
4923
|
+
OpSchema()
|
|
4924
|
+
.SetDoc(Reshape_ver14_doc)
|
|
4925
|
+
.Attr(
|
|
4926
|
+
"allowzero",
|
|
4927
|
+
"(Optional) By default, when any value in the 'shape' input is equal to zero "
|
|
4928
|
+
"the corresponding dimension value is copied from the input tensor dynamically. "
|
|
4929
|
+
"allowzero=1 indicates that if any value in the 'shape' input is set to zero, "
|
|
4930
|
+
"the zero value is honored, similar to NumPy.",
|
|
4931
|
+
AttributeProto::INT,
|
|
4932
|
+
static_cast<int64_t>(0))
|
|
4933
|
+
.Input(0, "data", "An input tensor.", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
4934
|
+
.Input(
|
|
4935
|
+
1,
|
|
4936
|
+
"shape",
|
|
4937
|
+
"Specified shape for output.",
|
|
4938
|
+
"tensor(int64)",
|
|
4939
|
+
OpSchema::Single,
|
|
4940
|
+
true,
|
|
4941
|
+
1,
|
|
4942
|
+
OpSchema::NonDifferentiable)
|
|
4943
|
+
.Output(0, "reshaped", "Reshaped data.", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
|
|
4944
|
+
.TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to all tensor types.")
|
|
4945
|
+
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
4946
|
+
// Type inference
|
|
4947
|
+
propagateElemTypeFromInputToOutput(ctx, 0, 0);
|
|
4948
|
+
// Shape Inference if 2nd input data (the target shape) is available
|
|
4949
|
+
// or the target shape is generated via partial data propagation
|
|
4950
|
+
const TensorProto* targetShapeInitializer = ctx.getInputData(1);
|
|
4951
|
+
const auto* shapeInput = ctx.getSymbolicInput(1);
|
|
4952
|
+
// The targetShapeProto represents the specified shape for output.
|
|
4953
|
+
TensorShapeProto targetShapeProto;
|
|
4954
|
+
if (targetShapeInitializer) {
|
|
4955
|
+
auto targetShape = ParseData<int64_t>(targetShapeInitializer);
|
|
4956
|
+
for (auto val : targetShape) {
|
|
4957
|
+
targetShapeProto.add_dim()->set_dim_value(val);
|
|
4958
|
+
}
|
|
4959
|
+
} else if (shapeInput) {
|
|
4960
|
+
targetShapeProto.CopyFrom(*shapeInput);
|
|
4961
|
+
} else {
|
|
4962
|
+
return;
|
|
4963
|
+
}
|
|
4964
|
+
|
|
4965
|
+
int allowzero = static_cast<int>(getAttribute(ctx, "allowzero", 0));
|
|
4966
|
+
|
|
4967
|
+
// Iterate through targetShape, adding dimensions in the outputShape
|
|
4968
|
+
// TensorProto. If the targetShape dimension is -1, we do not set the
|
|
4969
|
+
// dimension value in this iteration, but we record the Dimension. If
|
|
4970
|
+
// targetShape dimension is 0, we attempt to propagate the dimension
|
|
4971
|
+
// value/param. If the value cannot be inferred, we set the flag in
|
|
4972
|
+
// the unresolveZeros vector. If targetShape dimension is positive, we
|
|
4973
|
+
// set the dimension value in the outputShape. We track the product of
|
|
4974
|
+
// the dimensions we are setting outputShape in the outputProduct
|
|
4975
|
+
// variable. The outputProduct will potentially be used for inferring
|
|
4976
|
+
// a dimension marked -1.
|
|
4977
|
+
auto* outputShape = ctx.getOutputType(0)->mutable_tensor_type()->mutable_shape();
|
|
4978
|
+
TensorShapeProto::Dimension* negativeOneDim = nullptr;
|
|
4979
|
+
const auto& dataInputTensorType = ctx.getInputType(0)->tensor_type();
|
|
4980
|
+
std::vector<bool> unresolvedZeros(targetShapeProto.dim_size(), false);
|
|
4981
|
+
int64_t outputProduct = 1;
|
|
4982
|
+
bool outputProductValid = true;
|
|
4983
|
+
for (int i = 0; i < static_cast<int>(targetShapeProto.dim_size()); ++i) {
|
|
4984
|
+
// Add a new dimension to outputShape
|
|
4985
|
+
auto* new_dim = outputShape->add_dim();
|
|
4986
|
+
if (targetShapeProto.dim(i).has_dim_param()) {
|
|
4987
|
+
// There is a tricky edge case here. It is possible that the value of
|
|
4988
|
+
// symbolic dim can be -1 or 0 at runtime. In that case simply propgating this
|
|
4989
|
+
// symbol can be erroneous. This should be a very rare scenario and in such a
|
|
4990
|
+
// case an option is to turn off data propagation during shape inference.
|
|
4991
|
+
new_dim->set_dim_param(targetShapeProto.dim(i).dim_param());
|
|
4992
|
+
outputProductValid = false;
|
|
4993
|
+
} else {
|
|
4994
|
+
if (!targetShapeProto.dim(i).has_dim_value()) {
|
|
4995
|
+
outputProductValid = false;
|
|
4996
|
+
// treat this dim as unknown dim
|
|
4997
|
+
continue;
|
|
4998
|
+
}
|
|
4999
|
+
|
|
5000
|
+
const auto dim_value = targetShapeProto.dim(i).dim_value();
|
|
5001
|
+
|
|
5002
|
+
if (dim_value == -1) {
|
|
5003
|
+
// Check if multiple -1's. If not, set negativeOneDim, marking
|
|
5004
|
+
// this dimension to potentially be filled in later.
|
|
5005
|
+
if (negativeOneDim) {
|
|
5006
|
+
fail_shape_inference("Target shape may not have multiple -1 dimensions.");
|
|
5007
|
+
}
|
|
5008
|
+
negativeOneDim = new_dim;
|
|
5009
|
+
} else if (dim_value == 0) {
|
|
5010
|
+
// Check if data input has a shape and if the index i is within
|
|
5011
|
+
// its bounds. If these conditions are satisfied, any dimension
|
|
5012
|
+
// value/param should be propogated. If dimension value cannot be
|
|
5013
|
+
// inferred, set the corresponding unresolvedZeros flag to true.
|
|
5014
|
+
// If allowzero is set however, do not propagate values, since output
|
|
5015
|
+
// dimension is explicitly zero.
|
|
5016
|
+
if (allowzero == 0) {
|
|
5017
|
+
unresolvedZeros[i] = true;
|
|
5018
|
+
if (dataInputTensorType.has_shape()) {
|
|
5019
|
+
if (i >= dataInputTensorType.shape().dim_size()) {
|
|
5020
|
+
fail_shape_inference("Invalid position of 0.");
|
|
5021
|
+
}
|
|
5022
|
+
if (dataInputTensorType.shape().dim(i).has_dim_value()) {
|
|
5023
|
+
const auto& input_dim_value = dataInputTensorType.shape().dim(i).dim_value();
|
|
5024
|
+
new_dim->set_dim_value(input_dim_value);
|
|
5025
|
+
outputProduct *= input_dim_value;
|
|
5026
|
+
unresolvedZeros[i] = false;
|
|
5027
|
+
} else if (dataInputTensorType.shape().dim(i).has_dim_param()) {
|
|
5028
|
+
new_dim->set_dim_param(dataInputTensorType.shape().dim(i).dim_param());
|
|
5029
|
+
}
|
|
5030
|
+
}
|
|
5031
|
+
} else {
|
|
5032
|
+
new_dim->set_dim_value(dim_value);
|
|
5033
|
+
outputProduct *= dim_value;
|
|
5034
|
+
}
|
|
5035
|
+
} else if (dim_value > 0) {
|
|
5036
|
+
// Set the dimension value to dim_value
|
|
5037
|
+
new_dim->set_dim_value(dim_value);
|
|
5038
|
+
outputProduct *= dim_value;
|
|
5039
|
+
} else {
|
|
5040
|
+
// Check if value is less than -1; fail if so
|
|
5041
|
+
fail_shape_inference("Invalid dimension value: ", dim_value);
|
|
5042
|
+
}
|
|
5043
|
+
}
|
|
5044
|
+
}
|
|
5045
|
+
// If negativeOneDim has been set, we attempt to infer its value. This
|
|
5046
|
+
// can be done if all dimension values for the data input tensor shape
|
|
5047
|
+
// are known other than the ones corresponding to unresolvedZeros
|
|
5048
|
+
// flags.
|
|
5049
|
+
if (negativeOneDim && outputProductValid) {
|
|
5050
|
+
// First, attempt to compute product of data input shape dimensions
|
|
5051
|
+
// that are not marked by unresolvedZeros. If not possible, set the
|
|
5052
|
+
// inputProductValid flag to false.
|
|
5053
|
+
if (!outputProduct) {
|
|
5054
|
+
fail_shape_inference("Invalid Target shape product of 0. Product cannot be 0 in combination with -1");
|
|
5055
|
+
}
|
|
5056
|
+
int64_t inputProduct = 1;
|
|
5057
|
+
bool inputProductValid = true;
|
|
5058
|
+
if (!dataInputTensorType.has_shape()) {
|
|
5059
|
+
inputProductValid = false;
|
|
5060
|
+
} else {
|
|
5061
|
+
for (int i = 0; i < dataInputTensorType.shape().dim_size(); ++i) {
|
|
5062
|
+
if (dataInputTensorType.shape().dim(i).has_dim_value()) {
|
|
5063
|
+
inputProduct *= dataInputTensorType.shape().dim(i).dim_value();
|
|
5064
|
+
} else if (i >= static_cast<int>(unresolvedZeros.size()) || !unresolvedZeros[i]) {
|
|
5065
|
+
inputProductValid = false;
|
|
5066
|
+
break;
|
|
5067
|
+
}
|
|
5068
|
+
}
|
|
5069
|
+
}
|
|
5070
|
+
if (inputProductValid) {
|
|
5071
|
+
if (inputProduct % outputProduct != 0) {
|
|
5072
|
+
fail_shape_inference("Dimension could not be inferred: incompatible shapes");
|
|
5073
|
+
}
|
|
5074
|
+
negativeOneDim->set_dim_value(inputProduct / outputProduct);
|
|
5075
|
+
}
|
|
5076
|
+
}
|
|
5077
|
+
}));
|
|
5078
|
+
|
|
5079
|
+
static const char* Shape_ver15_doc = R"DOC(
|
|
5080
|
+
Takes a tensor as input and outputs an 1D int64 tensor containing the shape of the input tensor.
|
|
5081
|
+
Optional attributes start and end can be used to compute a slice of the input tensor's shape.
|
|
5082
|
+
If start axis is omitted, the slice starts from axis 0.
|
|
5083
|
+
The end axis, if specified, is exclusive (and the returned value will not include the size of that axis).
|
|
5084
|
+
If the end axis is omitted, the axes upto the last one will be included.
|
|
5085
|
+
Negative axes indicate counting back from the last axis.
|
|
5086
|
+
Note that axes will be clamped to the range [0, r-1], where r is the
|
|
5087
|
+
rank of the input tensor if they are out-of-range (after adding r in the case of
|
|
5088
|
+
negative axis). Thus, specifying any end value > r is equivalent to specifying an end
|
|
5089
|
+
value of r, and specifying any start value < -r is equivalent to specifying a start
|
|
5090
|
+
value of 0.
|
|
5091
|
+
|
|
5092
|
+
Examples:
|
|
5093
|
+
|
|
5094
|
+
```
|
|
5095
|
+
Input tensor with shape: [2, 3, 4]
|
|
5096
|
+
No attributes specified.
|
|
5097
|
+
Output: [2, 3, 4]
|
|
5098
|
+
```
|
|
5099
|
+
|
|
5100
|
+
```
|
|
5101
|
+
Input tensor with shape: [2, 3, 4]
|
|
5102
|
+
start: -1
|
|
5103
|
+
Output: [4]
|
|
5104
|
+
```
|
|
5105
|
+
|
|
5106
|
+
```
|
|
5107
|
+
Input tensor with shape: [2, 3, 4]
|
|
5108
|
+
end: -1
|
|
5109
|
+
Output: [2, 3]
|
|
5110
|
+
```
|
|
5111
|
+
|
|
5112
|
+
```
|
|
5113
|
+
Input tensor with shape: [2, 3, 4]
|
|
5114
|
+
start: 1
|
|
5115
|
+
end: 2
|
|
5116
|
+
Output: [3]
|
|
5117
|
+
```
|
|
5118
|
+
)DOC";
|
|
5119
|
+
|
|
5120
|
+
ONNX_OPERATOR_SET_SCHEMA(
|
|
5121
|
+
Shape,
|
|
5122
|
+
15,
|
|
5123
|
+
OpSchema()
|
|
5124
|
+
.SetDoc(Shape_ver15_doc)
|
|
5125
|
+
.Input(0, "data", "An input tensor.", "T", OpSchema::Single, true, 1, OpSchema::NonDifferentiable)
|
|
5126
|
+
.Output(0, "shape", "Shape of the input tensor", "T1", OpSchema::Single, true, 1, OpSchema::NonDifferentiable)
|
|
5127
|
+
.Attr(
|
|
5128
|
+
"start",
|
|
5129
|
+
"(Optional) Starting axis for slicing the shape. Default value is 0."
|
|
5130
|
+
"Negative value means counting dimensions from the back.",
|
|
5131
|
+
AttributeProto::INT,
|
|
5132
|
+
static_cast<int64_t>(0))
|
|
5133
|
+
.Attr(
|
|
5134
|
+
"end",
|
|
5135
|
+
"(Optional) Ending axis for slicing the shape. "
|
|
5136
|
+
"Negative value means counting dimensions from the back. "
|
|
5137
|
+
"If omitted, sizes of all axes upto (including) the last one will be included.",
|
|
5138
|
+
AttributeProto::INT,
|
|
5139
|
+
OPTIONAL_VALUE)
|
|
5140
|
+
.TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Input tensor can be of arbitrary type.")
|
|
5141
|
+
.TypeConstraint("T1", {"tensor(int64)"}, "Constrain output to int64 tensor.")
|
|
5142
|
+
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
5143
|
+
ctx.getOutputType(0)->mutable_tensor_type()->set_elem_type(TensorProto::INT64);
|
|
5144
|
+
auto* output_shape = ctx.getOutputType(0)->mutable_tensor_type()->mutable_shape();
|
|
5145
|
+
auto* output_length = output_shape->add_dim();
|
|
5146
|
+
|
|
5147
|
+
if (!hasNInputShapes(ctx, 1)) {
|
|
5148
|
+
return;
|
|
5149
|
+
}
|
|
5150
|
+
|
|
5151
|
+
int64_t rank = static_cast<int64_t>(ctx.getInputType(0)->tensor_type().shape().dim_size());
|
|
5152
|
+
int64_t start = getAttribute(ctx, "start", 0);
|
|
5153
|
+
if (start < 0)
|
|
5154
|
+
start += rank;
|
|
5155
|
+
start = (start < 0) ? 0 : (start > rank) ? rank : start;
|
|
5156
|
+
int64_t end = getAttribute(ctx, "end", rank);
|
|
5157
|
+
if (end < 0)
|
|
5158
|
+
end += rank;
|
|
5159
|
+
end = (end < 0) ? 0 : (end > rank) ? rank : end;
|
|
5160
|
+
output_length->set_dim_value((end - start) < 0 ? 0 : (end - start));
|
|
5161
|
+
})
|
|
5162
|
+
.PartialDataPropagationFunction([](DataPropagationContext& ctx) {
|
|
5163
|
+
if (ctx.getInputType(0)->tensor_type().has_shape()) {
|
|
5164
|
+
auto& input_shape = ctx.getInputType(0)->tensor_type().shape();
|
|
5165
|
+
int64_t rank = static_cast<int64_t>(input_shape.dim_size());
|
|
5166
|
+
int64_t start = getAttribute(ctx, "start", 0);
|
|
5167
|
+
if (start < 0)
|
|
5168
|
+
start += rank;
|
|
5169
|
+
start = (start < 0) ? 0 : (start > rank) ? rank : start;
|
|
5170
|
+
int64_t end = getAttribute(ctx, "end", rank);
|
|
5171
|
+
if (end < 0)
|
|
5172
|
+
end += rank;
|
|
5173
|
+
end = (end < 0) ? 0 : (end > rank) ? rank : end;
|
|
5174
|
+
TensorShapeProto output_shape;
|
|
5175
|
+
for (int64_t d = start; d < end; ++d) {
|
|
5176
|
+
*output_shape.add_dim() = input_shape.dim(static_cast<int>(d));
|
|
5177
|
+
}
|
|
5178
|
+
ctx.addOutputData(0, std::move(output_shape));
|
|
5179
|
+
}
|
|
5180
|
+
}));
|
|
5181
|
+
|
|
5182
|
+
static const char* Size_ver13_doc = R"DOC(
|
|
5183
|
+
Takes a tensor as input and outputs a int64 scalar that equals to the total number of elements of the input tensor.
|
|
5184
|
+
)DOC";
|
|
5185
|
+
|
|
5186
|
+
ONNX_OPERATOR_SET_SCHEMA(
|
|
5187
|
+
Size,
|
|
5188
|
+
13,
|
|
5189
|
+
OpSchema()
|
|
5190
|
+
.SetDoc(Size_ver13_doc)
|
|
5191
|
+
.Input(0, "data", "An input tensor.", "T", OpSchema::Single, true, 1, OpSchema::NonDifferentiable)
|
|
5192
|
+
.Output(
|
|
5193
|
+
0,
|
|
5194
|
+
"size",
|
|
5195
|
+
"Total number of elements of the input tensor",
|
|
5196
|
+
"T1",
|
|
5197
|
+
OpSchema::Single,
|
|
5198
|
+
true,
|
|
5199
|
+
1,
|
|
5200
|
+
OpSchema::NonDifferentiable)
|
|
5201
|
+
.TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Input tensor can be of arbitrary type.")
|
|
5202
|
+
.TypeConstraint("T1", {"tensor(int64)"}, "Constrain output to int64 tensor, which should be a scalar though.")
|
|
5203
|
+
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
|
|
5204
|
+
ctx.getOutputType(0)->mutable_tensor_type()->set_elem_type(TensorProto::INT64);
|
|
5205
|
+
ctx.getOutputType(0)->mutable_tensor_type()->mutable_shape();
|
|
5206
|
+
})
|
|
5207
|
+
.PartialDataPropagationFunction([](DataPropagationContext& ctx) {
|
|
5208
|
+
const auto input_data = ctx.getInputData(0);
|
|
5209
|
+
if (input_data != nullptr) {
|
|
5210
|
+
TensorShapeProto tsp;
|
|
5211
|
+
tsp.mutable_dim()->Add()->set_dim_value(input_data->dim_size());
|
|
5212
|
+
ctx.addOutputData(0, std::move(tsp));
|
|
5213
|
+
}
|
|
5214
|
+
}));
|
|
5215
|
+
|
|
4486
5216
|
} // namespace ONNX_NAMESPACE
|