onnx 1.13.1__cp310-cp310-win_amd64.whl → 1.14.1__cp310-cp310-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of onnx might be problematic. Click here for more details.

Files changed (1057) hide show
  1. onnx/__init__.py +116 -70
  2. onnx/backend/__init__.py +2 -0
  3. onnx/backend/base.py +3 -0
  4. onnx/backend/sample/__init__.py +2 -0
  5. onnx/backend/sample/ops/__init__.py +8 -6
  6. onnx/backend/sample/ops/abs.py +1 -1
  7. onnx/backend/test/__init__.py +4 -1
  8. onnx/backend/test/case/__init__.py +4 -2
  9. onnx/backend/test/case/base.py +2 -0
  10. onnx/backend/test/case/model/__init__.py +8 -6
  11. onnx/backend/test/case/model/expand.py +4 -3
  12. onnx/backend/test/case/model/gradient.py +4 -3
  13. onnx/backend/test/case/model/sequence.py +4 -3
  14. onnx/backend/test/case/model/shrink.py +4 -3
  15. onnx/backend/test/case/model/sign.py +4 -3
  16. onnx/backend/test/case/model/single-relu.py +4 -3
  17. onnx/backend/test/case/model/stringnormalizer.py +4 -3
  18. onnx/backend/test/case/node/__init__.py +18 -12
  19. onnx/backend/test/case/node/abs.py +4 -3
  20. onnx/backend/test/case/node/acos.py +4 -3
  21. onnx/backend/test/case/node/acosh.py +4 -3
  22. onnx/backend/test/case/node/adagrad.py +4 -3
  23. onnx/backend/test/case/node/adam.py +4 -3
  24. onnx/backend/test/case/node/add.py +4 -3
  25. onnx/backend/test/case/node/ai_onnx_ml/__init__.py +0 -0
  26. onnx/backend/test/case/node/ai_onnx_ml/array_feature_extractor.py +30 -0
  27. onnx/backend/test/case/node/ai_onnx_ml/binarizer.py +27 -0
  28. onnx/backend/test/case/node/and.py +4 -3
  29. onnx/backend/test/case/node/argmax.py +4 -3
  30. onnx/backend/test/case/node/argmin.py +4 -3
  31. onnx/backend/test/case/node/asin.py +4 -3
  32. onnx/backend/test/case/node/asinh.py +4 -3
  33. onnx/backend/test/case/node/atan.py +4 -3
  34. onnx/backend/test/case/node/atanh.py +4 -3
  35. onnx/backend/test/case/node/averagepool.py +43 -4
  36. onnx/backend/test/case/node/batchnorm.py +4 -3
  37. onnx/backend/test/case/node/bernoulli.py +4 -3
  38. onnx/backend/test/case/node/bitshift.py +4 -3
  39. onnx/backend/test/case/node/bitwiseand.py +13 -11
  40. onnx/backend/test/case/node/bitwisenot.py +8 -6
  41. onnx/backend/test/case/node/bitwiseor.py +13 -11
  42. onnx/backend/test/case/node/bitwisexor.py +13 -11
  43. onnx/backend/test/case/node/blackmanwindow.py +4 -4
  44. onnx/backend/test/case/node/cast.py +218 -8
  45. onnx/backend/test/case/node/castlike.py +103 -9
  46. onnx/backend/test/case/node/ceil.py +4 -3
  47. onnx/backend/test/case/node/celu.py +4 -3
  48. onnx/backend/test/case/node/center_crop_pad.py +26 -3
  49. onnx/backend/test/case/node/clip.py +4 -3
  50. onnx/backend/test/case/node/col2im.py +5 -4
  51. onnx/backend/test/case/node/compress.py +4 -3
  52. onnx/backend/test/case/node/concat.py +4 -3
  53. onnx/backend/test/case/node/constant.py +4 -3
  54. onnx/backend/test/case/node/constantofshape.py +4 -3
  55. onnx/backend/test/case/node/conv.py +4 -3
  56. onnx/backend/test/case/node/convinteger.py +4 -3
  57. onnx/backend/test/case/node/convtranspose.py +4 -3
  58. onnx/backend/test/case/node/cos.py +4 -3
  59. onnx/backend/test/case/node/cosh.py +4 -3
  60. onnx/backend/test/case/node/cumsum.py +4 -3
  61. onnx/backend/test/case/node/deformconv.py +170 -0
  62. onnx/backend/test/case/node/depthtospace.py +4 -3
  63. onnx/backend/test/case/node/dequantizelinear.py +46 -3
  64. onnx/backend/test/case/node/det.py +4 -3
  65. onnx/backend/test/case/node/dft.py +4 -4
  66. onnx/backend/test/case/node/div.py +4 -3
  67. onnx/backend/test/case/node/dropout.py +4 -3
  68. onnx/backend/test/case/node/dynamicquantizelinear.py +4 -3
  69. onnx/backend/test/case/node/einsum.py +4 -4
  70. onnx/backend/test/case/node/elu.py +4 -3
  71. onnx/backend/test/case/node/equal.py +28 -3
  72. onnx/backend/test/case/node/erf.py +4 -3
  73. onnx/backend/test/case/node/exp.py +4 -3
  74. onnx/backend/test/case/node/expand.py +4 -3
  75. onnx/backend/test/case/node/eyelike.py +4 -3
  76. onnx/backend/test/case/node/flatten.py +4 -3
  77. onnx/backend/test/case/node/floor.py +4 -3
  78. onnx/backend/test/case/node/gather.py +4 -3
  79. onnx/backend/test/case/node/gatherelements.py +4 -3
  80. onnx/backend/test/case/node/gathernd.py +5 -4
  81. onnx/backend/test/case/node/gemm.py +4 -3
  82. onnx/backend/test/case/node/globalaveragepool.py +4 -3
  83. onnx/backend/test/case/node/globalmaxpool.py +4 -3
  84. onnx/backend/test/case/node/greater.py +4 -3
  85. onnx/backend/test/case/node/greater_equal.py +4 -3
  86. onnx/backend/test/case/node/gridsample.py +4 -3
  87. onnx/backend/test/case/node/groupnormalization.py +5 -4
  88. onnx/backend/test/case/node/gru.py +10 -9
  89. onnx/backend/test/case/node/hammingwindow.py +4 -4
  90. onnx/backend/test/case/node/hannwindow.py +4 -4
  91. onnx/backend/test/case/node/hardmax.py +4 -3
  92. onnx/backend/test/case/node/hardsigmoid.py +4 -3
  93. onnx/backend/test/case/node/hardswish.py +4 -3
  94. onnx/backend/test/case/node/identity.py +4 -3
  95. onnx/backend/test/case/node/if.py +4 -3
  96. onnx/backend/test/case/node/instancenorm.py +4 -3
  97. onnx/backend/test/case/node/isinf.py +4 -3
  98. onnx/backend/test/case/node/isnan.py +4 -3
  99. onnx/backend/test/case/node/layernormalization.py +4 -3
  100. onnx/backend/test/case/node/leakyrelu.py +4 -3
  101. onnx/backend/test/case/node/less.py +4 -3
  102. onnx/backend/test/case/node/less_equal.py +4 -3
  103. onnx/backend/test/case/node/log.py +4 -3
  104. onnx/backend/test/case/node/logsoftmax.py +4 -3
  105. onnx/backend/test/case/node/loop.py +4 -3
  106. onnx/backend/test/case/node/lppool.py +279 -0
  107. onnx/backend/test/case/node/lrn.py +4 -3
  108. onnx/backend/test/case/node/lstm.py +10 -9
  109. onnx/backend/test/case/node/matmul.py +4 -3
  110. onnx/backend/test/case/node/matmulinteger.py +4 -3
  111. onnx/backend/test/case/node/max.py +5 -4
  112. onnx/backend/test/case/node/maxpool.py +9 -4
  113. onnx/backend/test/case/node/maxunpool.py +4 -3
  114. onnx/backend/test/case/node/mean.py +4 -3
  115. onnx/backend/test/case/node/meanvariancenormalization.py +4 -3
  116. onnx/backend/test/case/node/melweightmatrix.py +4 -4
  117. onnx/backend/test/case/node/min.py +5 -4
  118. onnx/backend/test/case/node/mish.py +4 -3
  119. onnx/backend/test/case/node/mod.py +4 -3
  120. onnx/backend/test/case/node/momentum.py +4 -3
  121. onnx/backend/test/case/node/mul.py +4 -3
  122. onnx/backend/test/case/node/neg.py +4 -3
  123. onnx/backend/test/case/node/negativeloglikelihoodloss.py +4 -3
  124. onnx/backend/test/case/node/nonmaxsuppression.py +4 -3
  125. onnx/backend/test/case/node/nonzero.py +4 -3
  126. onnx/backend/test/case/node/not.py +4 -3
  127. onnx/backend/test/case/node/onehot.py +5 -4
  128. onnx/backend/test/case/node/optionalgetelement.py +4 -3
  129. onnx/backend/test/case/node/optionalhaselement.py +4 -3
  130. onnx/backend/test/case/node/or.py +4 -3
  131. onnx/backend/test/case/node/pad.py +36 -5
  132. onnx/backend/test/case/node/pool_op_common.py +20 -2
  133. onnx/backend/test/case/node/pow.py +4 -3
  134. onnx/backend/test/case/node/prelu.py +4 -3
  135. onnx/backend/test/case/node/qlinearconv.py +4 -3
  136. onnx/backend/test/case/node/qlinearmatmul.py +4 -3
  137. onnx/backend/test/case/node/quantizelinear.py +50 -3
  138. onnx/backend/test/case/node/rangeop.py +4 -3
  139. onnx/backend/test/case/node/reciprocal.py +4 -3
  140. onnx/backend/test/case/node/reduce_log_sum.py +4 -3
  141. onnx/backend/test/case/node/reduce_log_sum_exp.py +4 -3
  142. onnx/backend/test/case/node/reducel1.py +4 -3
  143. onnx/backend/test/case/node/reducel2.py +4 -3
  144. onnx/backend/test/case/node/reducemax.py +4 -3
  145. onnx/backend/test/case/node/reducemean.py +4 -3
  146. onnx/backend/test/case/node/reducemin.py +4 -3
  147. onnx/backend/test/case/node/reduceprod.py +4 -3
  148. onnx/backend/test/case/node/reducesum.py +4 -3
  149. onnx/backend/test/case/node/reducesumsquare.py +4 -3
  150. onnx/backend/test/case/node/relu.py +4 -3
  151. onnx/backend/test/case/node/reshape.py +4 -3
  152. onnx/backend/test/case/node/resize.py +73 -321
  153. onnx/backend/test/case/node/reversesequence.py +4 -3
  154. onnx/backend/test/case/node/rnn.py +10 -9
  155. onnx/backend/test/case/node/roialign.py +193 -3
  156. onnx/backend/test/case/node/round.py +4 -3
  157. onnx/backend/test/case/node/scan.py +4 -3
  158. onnx/backend/test/case/node/scatter.py +4 -3
  159. onnx/backend/test/case/node/scatterelements.py +4 -3
  160. onnx/backend/test/case/node/scatternd.py +4 -3
  161. onnx/backend/test/case/node/selu.py +4 -3
  162. onnx/backend/test/case/node/sequence_map.py +4 -4
  163. onnx/backend/test/case/node/sequenceinsert.py +4 -3
  164. onnx/backend/test/case/node/shape.py +4 -3
  165. onnx/backend/test/case/node/shrink.py +4 -3
  166. onnx/backend/test/case/node/sigmoid.py +4 -3
  167. onnx/backend/test/case/node/sign.py +4 -3
  168. onnx/backend/test/case/node/sin.py +4 -3
  169. onnx/backend/test/case/node/sinh.py +4 -3
  170. onnx/backend/test/case/node/size.py +4 -3
  171. onnx/backend/test/case/node/slice.py +4 -3
  172. onnx/backend/test/case/node/softmax.py +4 -3
  173. onnx/backend/test/case/node/softmaxcrossentropy.py +4 -3
  174. onnx/backend/test/case/node/softplus.py +4 -3
  175. onnx/backend/test/case/node/softsign.py +4 -3
  176. onnx/backend/test/case/node/spacetodepth.py +6 -3
  177. onnx/backend/test/case/node/split.py +4 -3
  178. onnx/backend/test/case/node/splittosequence.py +79 -0
  179. onnx/backend/test/case/node/sqrt.py +4 -3
  180. onnx/backend/test/case/node/squeeze.py +2 -0
  181. onnx/backend/test/case/node/stft.py +4 -4
  182. onnx/backend/test/case/node/stringnormalizer.py +4 -4
  183. onnx/backend/test/case/node/sub.py +4 -3
  184. onnx/backend/test/case/node/sum.py +4 -3
  185. onnx/backend/test/case/node/tan.py +4 -3
  186. onnx/backend/test/case/node/tanh.py +4 -3
  187. onnx/backend/test/case/node/tfidfvectorizer.py +4 -3
  188. onnx/backend/test/case/node/thresholdedrelu.py +4 -3
  189. onnx/backend/test/case/node/tile.py +4 -3
  190. onnx/backend/test/case/node/topk.py +4 -3
  191. onnx/backend/test/case/node/transpose.py +8 -7
  192. onnx/backend/test/case/node/trilu.py +4 -3
  193. onnx/backend/test/case/node/unique.py +4 -3
  194. onnx/backend/test/case/node/unsqueeze.py +4 -3
  195. onnx/backend/test/case/node/upsample.py +4 -3
  196. onnx/backend/test/case/node/where.py +4 -3
  197. onnx/backend/test/case/node/xor.py +4 -3
  198. onnx/backend/test/case/test_case.py +2 -0
  199. onnx/backend/test/case/utils.py +9 -0
  200. onnx/backend/test/cmd_tools.py +22 -13
  201. onnx/backend/test/data/light/README.md +16 -0
  202. onnx/backend/test/data/light/light_bvlc_alexnet.onnx +0 -0
  203. onnx/backend/test/data/light/light_bvlc_alexnet_output_0.pb +1 -0
  204. onnx/backend/test/data/light/light_densenet121.onnx +0 -0
  205. onnx/backend/test/data/light/light_densenet121_output_0.pb +1 -0
  206. onnx/backend/test/data/light/light_inception_v1.onnx +0 -0
  207. onnx/backend/test/data/light/light_inception_v1_output_0.pb +1 -0
  208. onnx/backend/test/data/light/light_inception_v2.onnx +0 -0
  209. onnx/backend/test/data/light/light_inception_v2_output_0.pb +1 -0
  210. onnx/backend/test/data/light/light_resnet50.onnx +0 -0
  211. onnx/backend/test/data/light/light_resnet50_output_0.pb +1 -0
  212. onnx/backend/test/data/light/light_shufflenet.onnx +0 -0
  213. onnx/backend/test/data/light/light_shufflenet_output_0.pb +1 -0
  214. onnx/backend/test/data/light/light_squeezenet.onnx +0 -0
  215. onnx/backend/test/data/light/light_squeezenet_output_0.pb +1 -0
  216. onnx/backend/test/data/light/light_vgg19.onnx +0 -0
  217. onnx/backend/test/data/light/light_vgg19_output_0.pb +1 -0
  218. onnx/backend/test/data/light/light_zfnet512.onnx +0 -0
  219. onnx/backend/test/data/light/light_zfnet512_output_0.pb +1 -0
  220. onnx/backend/test/data/node/test_acos/test_data_set_0/output_0.pb +0 -0
  221. onnx/backend/test/data/node/test_acosh/test_data_set_0/output_0.pb +1 -1
  222. onnx/backend/test/data/node/test_ai_onnx_ml_array_feature_extractor/model.onnx +19 -0
  223. onnx/backend/test/data/node/test_ai_onnx_ml_array_feature_extractor/test_data_set_0/input_0.pb +0 -0
  224. onnx/backend/test/data/node/test_ai_onnx_ml_array_feature_extractor/test_data_set_0/input_1.pb +0 -0
  225. onnx/backend/test/data/node/test_ai_onnx_ml_array_feature_extractor/test_data_set_0/output_0.pb +0 -0
  226. onnx/backend/test/data/node/test_ai_onnx_ml_binarizer/model.onnx +0 -0
  227. onnx/backend/test/data/node/test_ai_onnx_ml_binarizer/test_data_set_0/input_0.pb +1 -0
  228. onnx/backend/test/data/node/test_ai_onnx_ml_binarizer/test_data_set_0/output_0.pb +0 -0
  229. onnx/backend/test/data/node/test_asin/test_data_set_0/output_0.pb +1 -1
  230. onnx/backend/test/data/node/test_asinh/test_data_set_0/output_0.pb +1 -1
  231. onnx/backend/test/data/node/test_atan/test_data_set_0/output_0.pb +1 -1
  232. onnx/backend/test/data/node/test_atanh/test_data_set_0/output_0.pb +2 -2
  233. onnx/backend/test/data/node/test_averagepool_1d_default/model.onnx +0 -0
  234. onnx/backend/test/data/node/test_averagepool_2d_ceil/model.onnx +0 -0
  235. onnx/backend/test/data/node/test_averagepool_2d_default/model.onnx +0 -0
  236. onnx/backend/test/data/node/test_averagepool_2d_dilations/model.onnx +0 -0
  237. onnx/backend/test/data/node/test_averagepool_2d_dilations/test_data_set_0/input_0.pb +0 -0
  238. onnx/backend/test/data/node/test_averagepool_2d_dilations/test_data_set_0/output_0.pb +0 -0
  239. onnx/backend/test/data/node/test_averagepool_2d_pads/model.onnx +0 -0
  240. onnx/backend/test/data/node/test_averagepool_2d_pads_count_include_pad/model.onnx +0 -0
  241. onnx/backend/test/data/node/test_averagepool_2d_precomputed_pads/model.onnx +0 -0
  242. onnx/backend/test/data/node/test_averagepool_2d_precomputed_pads_count_include_pad/model.onnx +0 -0
  243. onnx/backend/test/data/node/test_averagepool_2d_precomputed_same_upper/model.onnx +0 -0
  244. onnx/backend/test/data/node/test_averagepool_2d_precomputed_strides/model.onnx +0 -0
  245. onnx/backend/test/data/node/test_averagepool_2d_same_lower/model.onnx +0 -0
  246. onnx/backend/test/data/node/test_averagepool_2d_same_upper/model.onnx +0 -0
  247. onnx/backend/test/data/node/test_averagepool_2d_strides/model.onnx +0 -0
  248. onnx/backend/test/data/node/test_averagepool_3d_default/model.onnx +0 -0
  249. onnx/backend/test/data/node/test_basic_deform_conv_with_padding/model.onnx +0 -0
  250. onnx/backend/test/data/node/test_basic_deform_conv_with_padding/test_data_set_0/input_0.pb +0 -0
  251. onnx/backend/test/data/node/test_basic_deform_conv_with_padding/test_data_set_0/input_1.pb +0 -0
  252. onnx/backend/test/data/node/test_basic_deform_conv_with_padding/test_data_set_0/input_2.pb +0 -0
  253. onnx/backend/test/data/node/test_basic_deform_conv_with_padding/test_data_set_0/output_0.pb +0 -0
  254. onnx/backend/test/data/node/test_basic_deform_conv_without_padding/model.onnx +0 -0
  255. onnx/backend/test/data/node/test_basic_deform_conv_without_padding/test_data_set_0/input_0.pb +0 -0
  256. onnx/backend/test/data/node/test_basic_deform_conv_without_padding/test_data_set_0/input_1.pb +0 -0
  257. onnx/backend/test/data/node/test_basic_deform_conv_without_padding/test_data_set_0/input_2.pb +0 -0
  258. onnx/backend/test/data/node/test_basic_deform_conv_without_padding/test_data_set_0/output_0.pb +0 -0
  259. onnx/backend/test/data/node/test_bitwise_and_i16_3d/test_data_set_0/input_0.pb +0 -0
  260. onnx/backend/test/data/node/test_bitwise_and_i16_3d/test_data_set_0/input_1.pb +0 -0
  261. onnx/backend/test/data/node/test_bitwise_and_i16_3d/test_data_set_0/output_0.pb +0 -0
  262. onnx/backend/test/data/node/test_bitwise_and_i32_2d/test_data_set_0/input_0.pb +0 -0
  263. onnx/backend/test/data/node/test_bitwise_and_i32_2d/test_data_set_0/input_1.pb +0 -0
  264. onnx/backend/test/data/node/test_bitwise_and_i32_2d/test_data_set_0/output_0.pb +0 -0
  265. onnx/backend/test/data/node/test_bitwise_and_ui64_bcast_3v1d/test_data_set_0/input_0.pb +0 -0
  266. onnx/backend/test/data/node/test_bitwise_and_ui64_bcast_3v1d/test_data_set_0/input_1.pb +0 -0
  267. onnx/backend/test/data/node/test_bitwise_and_ui64_bcast_3v1d/test_data_set_0/output_0.pb +0 -0
  268. onnx/backend/test/data/node/test_bitwise_and_ui8_bcast_4v3d/test_data_set_0/input_0.pb +0 -0
  269. onnx/backend/test/data/node/test_bitwise_and_ui8_bcast_4v3d/test_data_set_0/input_1.pb +0 -0
  270. onnx/backend/test/data/node/test_bitwise_and_ui8_bcast_4v3d/test_data_set_0/output_0.pb +0 -0
  271. onnx/backend/test/data/node/test_bitwise_not_2d/test_data_set_0/input_0.pb +0 -0
  272. onnx/backend/test/data/node/test_bitwise_not_2d/test_data_set_0/output_0.pb +0 -0
  273. onnx/backend/test/data/node/test_bitwise_not_3d/test_data_set_0/input_0.pb +0 -0
  274. onnx/backend/test/data/node/test_bitwise_not_3d/test_data_set_0/output_0.pb +0 -0
  275. onnx/backend/test/data/node/test_bitwise_not_4d/test_data_set_0/input_0.pb +0 -0
  276. onnx/backend/test/data/node/test_bitwise_not_4d/test_data_set_0/output_0.pb +0 -0
  277. onnx/backend/test/data/node/test_bitwise_or_i16_4d/test_data_set_0/input_0.pb +0 -0
  278. onnx/backend/test/data/node/test_bitwise_or_i16_4d/test_data_set_0/input_1.pb +0 -0
  279. onnx/backend/test/data/node/test_bitwise_or_i16_4d/test_data_set_0/output_0.pb +0 -0
  280. onnx/backend/test/data/node/test_bitwise_or_i32_2d/test_data_set_0/input_0.pb +0 -0
  281. onnx/backend/test/data/node/test_bitwise_or_i32_2d/test_data_set_0/input_1.pb +0 -0
  282. onnx/backend/test/data/node/test_bitwise_or_i32_2d/test_data_set_0/output_0.pb +0 -0
  283. onnx/backend/test/data/node/test_bitwise_or_ui64_bcast_3v1d/test_data_set_0/input_0.pb +0 -0
  284. onnx/backend/test/data/node/test_bitwise_or_ui64_bcast_3v1d/test_data_set_0/input_1.pb +0 -0
  285. onnx/backend/test/data/node/test_bitwise_or_ui64_bcast_3v1d/test_data_set_0/output_0.pb +0 -0
  286. onnx/backend/test/data/node/test_bitwise_or_ui8_bcast_4v3d/test_data_set_0/input_0.pb +0 -0
  287. onnx/backend/test/data/node/test_bitwise_or_ui8_bcast_4v3d/test_data_set_0/input_1.pb +0 -0
  288. onnx/backend/test/data/node/test_bitwise_or_ui8_bcast_4v3d/test_data_set_0/output_0.pb +0 -0
  289. onnx/backend/test/data/node/test_bitwise_xor_i16_3d/test_data_set_0/input_0.pb +0 -0
  290. onnx/backend/test/data/node/test_bitwise_xor_i16_3d/test_data_set_0/input_1.pb +0 -0
  291. onnx/backend/test/data/node/test_bitwise_xor_i16_3d/test_data_set_0/output_0.pb +0 -0
  292. onnx/backend/test/data/node/test_bitwise_xor_i32_2d/test_data_set_0/input_0.pb +0 -0
  293. onnx/backend/test/data/node/test_bitwise_xor_i32_2d/test_data_set_0/input_1.pb +0 -0
  294. onnx/backend/test/data/node/test_bitwise_xor_i32_2d/test_data_set_0/output_0.pb +0 -0
  295. onnx/backend/test/data/node/test_bitwise_xor_ui64_bcast_3v1d/test_data_set_0/input_0.pb +0 -0
  296. onnx/backend/test/data/node/test_bitwise_xor_ui64_bcast_3v1d/test_data_set_0/input_1.pb +0 -0
  297. onnx/backend/test/data/node/test_bitwise_xor_ui64_bcast_3v1d/test_data_set_0/output_0.pb +0 -0
  298. onnx/backend/test/data/node/test_bitwise_xor_ui8_bcast_4v3d/test_data_set_0/input_0.pb +0 -0
  299. onnx/backend/test/data/node/test_bitwise_xor_ui8_bcast_4v3d/test_data_set_0/input_1.pb +0 -0
  300. onnx/backend/test/data/node/test_bitwise_xor_ui8_bcast_4v3d/test_data_set_0/output_0.pb +0 -0
  301. onnx/backend/test/data/node/test_cast_BFLOAT16_to_FLOAT/model.onnx +0 -0
  302. onnx/backend/test/data/node/test_cast_DOUBLE_to_FLOAT/model.onnx +0 -0
  303. onnx/backend/test/data/node/test_cast_DOUBLE_to_FLOAT16/model.onnx +0 -0
  304. onnx/backend/test/data/node/test_cast_FLOAT16_to_DOUBLE/model.onnx +0 -0
  305. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT/model.onnx +0 -0
  306. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FN/model.onnx +0 -0
  307. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FN/test_data_set_0/input_0.pb +2 -0
  308. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FN/test_data_set_0/output_0.pb +0 -0
  309. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FNUZ/model.onnx +0 -0
  310. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FNUZ/test_data_set_0/input_0.pb +2 -0
  311. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FNUZ/test_data_set_0/output_0.pb +0 -0
  312. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2/model.onnx +0 -0
  313. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2/test_data_set_0/input_0.pb +2 -0
  314. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2/test_data_set_0/output_0.pb +0 -0
  315. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2FNUZ/model.onnx +0 -0
  316. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2FNUZ/test_data_set_0/input_0.pb +2 -0
  317. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2FNUZ/test_data_set_0/output_0.pb +0 -0
  318. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT/model.onnx +0 -0
  319. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT/test_data_set_0/input_0.pb +0 -0
  320. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT/test_data_set_0/output_0.pb +0 -0
  321. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT16/model.onnx +0 -0
  322. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT16/test_data_set_0/input_0.pb +0 -0
  323. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT16/test_data_set_0/output_0.pb +0 -0
  324. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT/model.onnx +0 -0
  325. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT/test_data_set_0/input_0.pb +0 -0
  326. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT/test_data_set_0/output_0.pb +0 -0
  327. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT16/model.onnx +0 -0
  328. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT16/test_data_set_0/input_0.pb +0 -0
  329. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT16/test_data_set_0/output_0.pb +0 -0
  330. onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT/model.onnx +0 -0
  331. onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT/test_data_set_0/input_0.pb +0 -0
  332. onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT/test_data_set_0/output_0.pb +0 -0
  333. onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT16/model.onnx +0 -0
  334. onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT16/test_data_set_0/input_0.pb +0 -0
  335. onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT16/test_data_set_0/output_0.pb +0 -0
  336. onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT/model.onnx +0 -0
  337. onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT/test_data_set_0/input_0.pb +0 -0
  338. onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT/test_data_set_0/output_0.pb +0 -0
  339. onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT16/model.onnx +0 -0
  340. onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT16/test_data_set_0/input_0.pb +0 -0
  341. onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT16/test_data_set_0/output_0.pb +0 -0
  342. onnx/backend/test/data/node/test_cast_FLOAT_to_BFLOAT16/model.onnx +0 -0
  343. onnx/backend/test/data/node/test_cast_FLOAT_to_DOUBLE/model.onnx +0 -0
  344. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT16/model.onnx +0 -0
  345. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FN/model.onnx +0 -0
  346. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/input_0.pb +0 -0
  347. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/output_0.pb +0 -0
  348. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FNUZ/model.onnx +0 -0
  349. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/input_0.pb +0 -0
  350. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/output_0.pb +0 -0
  351. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2/model.onnx +0 -0
  352. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2/test_data_set_0/input_0.pb +0 -0
  353. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2/test_data_set_0/output_0.pb +0 -0
  354. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2FNUZ/model.onnx +0 -0
  355. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/input_0.pb +0 -0
  356. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/output_0.pb +0 -0
  357. onnx/backend/test/data/node/test_cast_FLOAT_to_STRING/model.onnx +0 -0
  358. onnx/backend/test/data/node/test_cast_STRING_to_FLOAT/model.onnx +0 -0
  359. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FN/model.onnx +0 -0
  360. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FN/test_data_set_0/input_0.pb +2 -0
  361. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FN/test_data_set_0/output_0.pb +0 -0
  362. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FNUZ/model.onnx +0 -0
  363. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FNUZ/test_data_set_0/input_0.pb +2 -0
  364. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FNUZ/test_data_set_0/output_0.pb +0 -0
  365. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2/model.onnx +0 -0
  366. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2/test_data_set_0/input_0.pb +2 -0
  367. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2/test_data_set_0/output_0.pb +0 -0
  368. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2FNUZ/model.onnx +0 -0
  369. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2FNUZ/test_data_set_0/input_0.pb +2 -0
  370. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2FNUZ/test_data_set_0/output_0.pb +0 -0
  371. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FN/model.onnx +0 -0
  372. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/input_0.pb +0 -0
  373. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/output_0.pb +0 -0
  374. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FNUZ/model.onnx +0 -0
  375. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/input_0.pb +0 -0
  376. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/output_0.pb +0 -0
  377. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2/model.onnx +0 -0
  378. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2/test_data_set_0/input_0.pb +0 -0
  379. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2/test_data_set_0/output_0.pb +0 -0
  380. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2FNUZ/model.onnx +0 -0
  381. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/input_0.pb +0 -0
  382. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/output_0.pb +0 -0
  383. onnx/backend/test/data/node/test_castlike_BFLOAT16_to_FLOAT/model.onnx +0 -0
  384. onnx/backend/test/data/node/test_castlike_BFLOAT16_to_FLOAT_expanded/model.onnx +0 -0
  385. onnx/backend/test/data/node/test_castlike_DOUBLE_to_FLOAT/model.onnx +0 -0
  386. onnx/backend/test/data/node/test_castlike_DOUBLE_to_FLOAT16/model.onnx +0 -0
  387. onnx/backend/test/data/node/test_castlike_DOUBLE_to_FLOAT16_expanded/model.onnx +0 -0
  388. onnx/backend/test/data/node/test_castlike_DOUBLE_to_FLOAT_expanded/model.onnx +0 -0
  389. onnx/backend/test/data/node/test_castlike_FLOAT16_to_DOUBLE/model.onnx +0 -0
  390. onnx/backend/test/data/node/test_castlike_FLOAT16_to_DOUBLE_expanded/model.onnx +0 -0
  391. onnx/backend/test/data/node/test_castlike_FLOAT16_to_FLOAT/model.onnx +0 -0
  392. onnx/backend/test/data/node/test_castlike_FLOAT16_to_FLOAT_expanded/model.onnx +0 -0
  393. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT/model.onnx +0 -0
  394. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT/test_data_set_0/input_0.pb +1 -0
  395. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT/test_data_set_0/input_1.pb +0 -0
  396. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT/test_data_set_0/output_0.pb +0 -0
  397. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT_expanded/model.onnx +0 -0
  398. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT_expanded/test_data_set_0/input_0.pb +1 -0
  399. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT_expanded/test_data_set_0/input_1.pb +0 -0
  400. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT_expanded/test_data_set_0/output_0.pb +0 -0
  401. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT/model.onnx +0 -0
  402. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT/test_data_set_0/input_0.pb +1 -0
  403. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT/test_data_set_0/input_1.pb +0 -0
  404. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT/test_data_set_0/output_0.pb +0 -0
  405. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT_expanded/model.onnx +0 -0
  406. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT_expanded/test_data_set_0/input_0.pb +1 -0
  407. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT_expanded/test_data_set_0/input_1.pb +0 -0
  408. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT_expanded/test_data_set_0/output_0.pb +0 -0
  409. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT/model.onnx +0 -0
  410. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT/test_data_set_0/input_0.pb +1 -0
  411. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT/test_data_set_0/input_1.pb +0 -0
  412. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT/test_data_set_0/output_0.pb +0 -0
  413. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT_expanded/model.onnx +0 -0
  414. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT_expanded/test_data_set_0/input_0.pb +1 -0
  415. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT_expanded/test_data_set_0/input_1.pb +0 -0
  416. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT_expanded/test_data_set_0/output_0.pb +0 -0
  417. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT/model.onnx +0 -0
  418. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT/test_data_set_0/input_0.pb +1 -0
  419. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT/test_data_set_0/input_1.pb +0 -0
  420. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT/test_data_set_0/output_0.pb +0 -0
  421. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT_expanded/model.onnx +0 -0
  422. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT_expanded/test_data_set_0/input_0.pb +1 -0
  423. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT_expanded/test_data_set_0/input_1.pb +0 -0
  424. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT_expanded/test_data_set_0/output_0.pb +0 -0
  425. onnx/backend/test/data/node/test_castlike_FLOAT_to_BFLOAT16/model.onnx +0 -0
  426. onnx/backend/test/data/node/test_castlike_FLOAT_to_BFLOAT16_expanded/model.onnx +0 -0
  427. onnx/backend/test/data/node/test_castlike_FLOAT_to_DOUBLE/model.onnx +0 -0
  428. onnx/backend/test/data/node/test_castlike_FLOAT_to_DOUBLE_expanded/model.onnx +0 -0
  429. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT16/model.onnx +0 -0
  430. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT16_expanded/model.onnx +0 -0
  431. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN/model.onnx +0 -0
  432. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/input_0.pb +0 -0
  433. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/input_1.pb +1 -0
  434. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/output_0.pb +1 -0
  435. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ/model.onnx +0 -0
  436. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/input_0.pb +0 -0
  437. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/input_1.pb +1 -0
  438. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/output_0.pb +1 -0
  439. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ_expanded/model.onnx +0 -0
  440. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ_expanded/test_data_set_0/input_0.pb +0 -0
  441. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ_expanded/test_data_set_0/input_1.pb +1 -0
  442. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ_expanded/test_data_set_0/output_0.pb +1 -0
  443. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN_expanded/model.onnx +0 -0
  444. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN_expanded/test_data_set_0/input_0.pb +0 -0
  445. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN_expanded/test_data_set_0/input_1.pb +1 -0
  446. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN_expanded/test_data_set_0/output_0.pb +1 -0
  447. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2/model.onnx +0 -0
  448. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2/test_data_set_0/input_0.pb +0 -0
  449. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2/test_data_set_0/input_1.pb +1 -0
  450. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2/test_data_set_0/output_0.pb +1 -0
  451. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ/model.onnx +0 -0
  452. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/input_0.pb +0 -0
  453. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/input_1.pb +1 -0
  454. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/output_0.pb +1 -0
  455. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ_expanded/model.onnx +0 -0
  456. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ_expanded/test_data_set_0/input_0.pb +0 -0
  457. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ_expanded/test_data_set_0/input_1.pb +1 -0
  458. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ_expanded/test_data_set_0/output_0.pb +1 -0
  459. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2_expanded/model.onnx +0 -0
  460. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2_expanded/test_data_set_0/input_0.pb +0 -0
  461. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2_expanded/test_data_set_0/input_1.pb +1 -0
  462. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2_expanded/test_data_set_0/output_0.pb +1 -0
  463. onnx/backend/test/data/node/test_castlike_FLOAT_to_STRING/model.onnx +0 -0
  464. onnx/backend/test/data/node/test_castlike_FLOAT_to_STRING_expanded/model.onnx +0 -0
  465. onnx/backend/test/data/node/test_castlike_STRING_to_FLOAT/model.onnx +0 -0
  466. onnx/backend/test/data/node/test_castlike_STRING_to_FLOAT_expanded/model.onnx +0 -0
  467. onnx/backend/test/data/node/test_center_crop_pad_crop_negative_axes_hwc/model.onnx +0 -0
  468. onnx/backend/test/data/node/test_center_crop_pad_crop_negative_axes_hwc/test_data_set_0/input_0.pb +0 -0
  469. onnx/backend/test/data/node/test_center_crop_pad_crop_negative_axes_hwc/test_data_set_0/input_1.pb +0 -0
  470. onnx/backend/test/data/node/test_center_crop_pad_crop_negative_axes_hwc/test_data_set_0/output_0.pb +0 -0
  471. onnx/backend/test/data/node/test_center_crop_pad_crop_negative_axes_hwc_expanded/model.onnx +0 -0
  472. onnx/backend/test/data/node/test_center_crop_pad_crop_negative_axes_hwc_expanded/test_data_set_0/input_0.pb +0 -0
  473. onnx/backend/test/data/node/test_center_crop_pad_crop_negative_axes_hwc_expanded/test_data_set_0/input_1.pb +0 -0
  474. onnx/backend/test/data/node/test_center_crop_pad_crop_negative_axes_hwc_expanded/test_data_set_0/output_0.pb +0 -0
  475. onnx/backend/test/data/node/test_col2im_pads/test_data_set_0/output_0.pb +0 -0
  476. onnx/backend/test/data/node/test_constant/model.onnx +0 -0
  477. onnx/backend/test/data/node/test_constant_pad/model.onnx +0 -0
  478. onnx/backend/test/data/node/test_constant_pad_axes/model.onnx +0 -0
  479. onnx/backend/test/data/node/test_constant_pad_negative_axes/model.onnx +0 -0
  480. onnx/backend/test/data/node/test_constant_pad_negative_axes/test_data_set_0/input_0.pb +1 -0
  481. onnx/backend/test/data/node/test_constant_pad_negative_axes/test_data_set_0/input_1.pb +0 -0
  482. onnx/backend/test/data/node/test_constant_pad_negative_axes/test_data_set_0/input_2.pb +1 -0
  483. onnx/backend/test/data/node/test_constant_pad_negative_axes/test_data_set_0/input_3.pb +1 -0
  484. onnx/backend/test/data/node/test_constant_pad_negative_axes/test_data_set_0/output_0.pb +1 -0
  485. onnx/backend/test/data/node/test_cosh/test_data_set_0/output_0.pb +1 -1
  486. onnx/backend/test/data/node/test_cosh_example/test_data_set_0/output_0.pb +0 -0
  487. onnx/backend/test/data/node/test_deform_conv_with_mask_bias/model.onnx +0 -0
  488. onnx/backend/test/data/node/test_deform_conv_with_mask_bias/test_data_set_0/input_0.pb +0 -0
  489. onnx/backend/test/data/node/test_deform_conv_with_mask_bias/test_data_set_0/input_1.pb +0 -0
  490. onnx/backend/test/data/node/test_deform_conv_with_mask_bias/test_data_set_0/input_2.pb +0 -0
  491. onnx/backend/test/data/node/test_deform_conv_with_mask_bias/test_data_set_0/input_3.pb +0 -0
  492. onnx/backend/test/data/node/test_deform_conv_with_mask_bias/test_data_set_0/input_4.pb +0 -0
  493. onnx/backend/test/data/node/test_deform_conv_with_mask_bias/test_data_set_0/output_0.pb +0 -0
  494. onnx/backend/test/data/node/test_deform_conv_with_multiple_offset_groups/model.onnx +0 -0
  495. onnx/backend/test/data/node/test_deform_conv_with_multiple_offset_groups/test_data_set_0/input_0.pb +0 -0
  496. onnx/backend/test/data/node/test_deform_conv_with_multiple_offset_groups/test_data_set_0/input_1.pb +0 -0
  497. onnx/backend/test/data/node/test_deform_conv_with_multiple_offset_groups/test_data_set_0/input_2.pb +0 -0
  498. onnx/backend/test/data/node/test_deform_conv_with_multiple_offset_groups/test_data_set_0/output_0.pb +0 -0
  499. onnx/backend/test/data/node/test_dequantizelinear/model.onnx +0 -0
  500. onnx/backend/test/data/node/test_dequantizelinear_axis/model.onnx +0 -0
  501. onnx/backend/test/data/node/test_dequantizelinear_e4m3fn/model.onnx +0 -0
  502. onnx/backend/test/data/node/test_dequantizelinear_e4m3fn/test_data_set_0/input_0.pb +0 -0
  503. onnx/backend/test/data/node/test_dequantizelinear_e4m3fn/test_data_set_0/input_1.pb +0 -0
  504. onnx/backend/test/data/node/test_dequantizelinear_e4m3fn/test_data_set_0/output_0.pb +0 -0
  505. onnx/backend/test/data/node/test_dequantizelinear_e5m2/model.onnx +0 -0
  506. onnx/backend/test/data/node/test_dequantizelinear_e5m2/test_data_set_0/input_0.pb +0 -0
  507. onnx/backend/test/data/node/test_dequantizelinear_e5m2/test_data_set_0/input_1.pb +0 -0
  508. onnx/backend/test/data/node/test_dequantizelinear_e5m2/test_data_set_0/output_0.pb +0 -0
  509. onnx/backend/test/data/node/test_edge_pad/model.onnx +0 -0
  510. onnx/backend/test/data/node/test_equal/model.onnx +0 -0
  511. onnx/backend/test/data/node/test_equal_bcast/model.onnx +0 -0
  512. onnx/backend/test/data/node/test_equal_string/model.onnx +0 -0
  513. onnx/backend/test/data/node/test_equal_string/test_data_set_0/input_0.pb +1 -0
  514. onnx/backend/test/data/node/test_equal_string/test_data_set_0/input_1.pb +1 -0
  515. onnx/backend/test/data/node/test_equal_string/test_data_set_0/output_0.pb +0 -0
  516. onnx/backend/test/data/node/test_equal_string_broadcast/model.onnx +0 -0
  517. onnx/backend/test/data/node/test_equal_string_broadcast/test_data_set_0/input_0.pb +1 -0
  518. onnx/backend/test/data/node/test_equal_string_broadcast/test_data_set_0/input_1.pb +1 -0
  519. onnx/backend/test/data/node/test_equal_string_broadcast/test_data_set_0/output_0.pb +0 -0
  520. onnx/backend/test/data/node/test_identity/model.onnx +0 -0
  521. onnx/backend/test/data/node/test_identity_sequence/model.onnx +0 -0
  522. onnx/backend/test/data/node/test_lppool_1d_default/model.onnx +0 -0
  523. onnx/backend/test/data/node/test_lppool_1d_default/test_data_set_0/input_0.pb +1 -0
  524. onnx/backend/test/data/node/test_lppool_1d_default/test_data_set_0/output_0.pb +2 -0
  525. onnx/backend/test/data/node/test_lppool_2d_default/model.onnx +0 -0
  526. onnx/backend/test/data/node/test_lppool_2d_default/test_data_set_0/input_0.pb +0 -0
  527. onnx/backend/test/data/node/test_lppool_2d_default/test_data_set_0/output_0.pb +0 -0
  528. onnx/backend/test/data/node/test_lppool_2d_dilations/model.onnx +0 -0
  529. onnx/backend/test/data/node/test_lppool_2d_dilations/test_data_set_0/input_0.pb +0 -0
  530. onnx/backend/test/data/node/test_lppool_2d_dilations/test_data_set_0/output_0.pb +1 -0
  531. onnx/backend/test/data/node/test_lppool_2d_pads/model.onnx +0 -0
  532. onnx/backend/test/data/node/test_lppool_2d_pads/test_data_set_0/input_0.pb +0 -0
  533. onnx/backend/test/data/node/test_lppool_2d_pads/test_data_set_0/output_0.pb +0 -0
  534. onnx/backend/test/data/node/test_lppool_2d_same_lower/model.onnx +0 -0
  535. onnx/backend/test/data/node/test_lppool_2d_same_lower/test_data_set_0/input_0.pb +0 -0
  536. onnx/backend/test/data/node/test_lppool_2d_same_lower/test_data_set_0/output_0.pb +0 -0
  537. onnx/backend/test/data/node/test_lppool_2d_same_upper/model.onnx +0 -0
  538. onnx/backend/test/data/node/test_lppool_2d_same_upper/test_data_set_0/input_0.pb +0 -0
  539. onnx/backend/test/data/node/test_lppool_2d_same_upper/test_data_set_0/output_0.pb +0 -0
  540. onnx/backend/test/data/node/test_lppool_2d_strides/model.onnx +0 -0
  541. onnx/backend/test/data/node/test_lppool_2d_strides/test_data_set_0/input_0.pb +0 -0
  542. onnx/backend/test/data/node/test_lppool_2d_strides/test_data_set_0/output_0.pb +0 -0
  543. onnx/backend/test/data/node/test_lppool_3d_default/model.onnx +0 -0
  544. onnx/backend/test/data/node/test_lppool_3d_default/test_data_set_0/input_0.pb +0 -0
  545. onnx/backend/test/data/node/test_lppool_3d_default/test_data_set_0/output_0.pb +0 -0
  546. onnx/backend/test/data/node/test_mish/test_data_set_0/output_0.pb +0 -0
  547. onnx/backend/test/data/node/test_mish_expanded/test_data_set_0/output_0.pb +0 -0
  548. onnx/backend/test/data/node/test_quantizelinear/model.onnx +0 -0
  549. onnx/backend/test/data/node/test_quantizelinear_axis/model.onnx +0 -0
  550. onnx/backend/test/data/node/test_quantizelinear_e4m3fn/model.onnx +0 -0
  551. onnx/backend/test/data/node/test_quantizelinear_e4m3fn/test_data_set_0/input_0.pb +0 -0
  552. onnx/backend/test/data/node/test_quantizelinear_e4m3fn/test_data_set_0/input_1.pb +0 -0
  553. onnx/backend/test/data/node/test_quantizelinear_e4m3fn/test_data_set_0/input_2.pb +0 -0
  554. onnx/backend/test/data/node/test_quantizelinear_e4m3fn/test_data_set_0/output_0.pb +0 -0
  555. onnx/backend/test/data/node/test_quantizelinear_e5m2/model.onnx +0 -0
  556. onnx/backend/test/data/node/test_quantizelinear_e5m2/test_data_set_0/input_0.pb +0 -0
  557. onnx/backend/test/data/node/test_quantizelinear_e5m2/test_data_set_0/input_1.pb +0 -0
  558. onnx/backend/test/data/node/test_quantizelinear_e5m2/test_data_set_0/input_2.pb +0 -0
  559. onnx/backend/test/data/node/test_quantizelinear_e5m2/test_data_set_0/output_0.pb +0 -0
  560. onnx/backend/test/data/node/test_reflect_pad/model.onnx +0 -0
  561. onnx/backend/test/data/node/test_reshape_allowzero_reordered/model.onnx +0 -0
  562. onnx/backend/test/data/node/test_reshape_extended_dims/model.onnx +0 -0
  563. onnx/backend/test/data/node/test_reshape_negative_dim/model.onnx +0 -0
  564. onnx/backend/test/data/node/test_reshape_negative_extended_dims/model.onnx +0 -0
  565. onnx/backend/test/data/node/test_reshape_one_dim/model.onnx +0 -0
  566. onnx/backend/test/data/node/test_reshape_reduced_dims/model.onnx +0 -0
  567. onnx/backend/test/data/node/test_reshape_reordered_all_dims/model.onnx +0 -0
  568. onnx/backend/test/data/node/test_reshape_reordered_last_dims/model.onnx +0 -0
  569. onnx/backend/test/data/node/test_reshape_zero_and_negative_dim/model.onnx +0 -0
  570. onnx/backend/test/data/node/test_reshape_zero_dim/model.onnx +0 -0
  571. onnx/backend/test/data/node/test_resize_downsample_scales_cubic/model.onnx +0 -0
  572. onnx/backend/test/data/node/test_resize_downsample_scales_cubic_A_n0p5_exclude_outside/model.onnx +0 -0
  573. onnx/backend/test/data/node/test_resize_downsample_scales_cubic_align_corners/model.onnx +0 -0
  574. onnx/backend/test/data/node/test_resize_downsample_scales_cubic_antialias/model.onnx +0 -0
  575. onnx/backend/test/data/node/test_resize_downsample_scales_linear/model.onnx +0 -0
  576. onnx/backend/test/data/node/test_resize_downsample_scales_linear_align_corners/model.onnx +0 -0
  577. onnx/backend/test/data/node/test_resize_downsample_scales_linear_antialias/model.onnx +0 -0
  578. onnx/backend/test/data/node/test_resize_downsample_scales_linear_half_pixel_symmetric/model.onnx +0 -0
  579. onnx/backend/test/data/node/test_resize_downsample_scales_linear_half_pixel_symmetric/test_data_set_0/input_0.pb +0 -0
  580. onnx/backend/test/data/node/test_resize_downsample_scales_linear_half_pixel_symmetric/test_data_set_0/input_1.pb +0 -0
  581. onnx/backend/test/data/node/test_resize_downsample_scales_linear_half_pixel_symmetric/test_data_set_0/output_0.pb +1 -0
  582. onnx/backend/test/data/node/test_resize_downsample_scales_nearest/model.onnx +0 -0
  583. onnx/backend/test/data/node/test_resize_downsample_sizes_cubic/model.onnx +0 -0
  584. onnx/backend/test/data/node/test_resize_downsample_sizes_cubic_antialias/model.onnx +0 -0
  585. onnx/backend/test/data/node/test_resize_downsample_sizes_linear_antialias/model.onnx +0 -0
  586. onnx/backend/test/data/node/test_resize_downsample_sizes_linear_pytorch_half_pixel/model.onnx +0 -0
  587. onnx/backend/test/data/node/test_resize_downsample_sizes_nearest/model.onnx +0 -0
  588. onnx/backend/test/data/node/test_resize_downsample_sizes_nearest_not_larger/model.onnx +0 -0
  589. onnx/backend/test/data/node/test_resize_downsample_sizes_nearest_not_smaller/model.onnx +0 -0
  590. onnx/backend/test/data/node/test_resize_tf_crop_and_resize/model.onnx +0 -0
  591. onnx/backend/test/data/node/test_resize_tf_crop_and_resize_axes_2_3/model.onnx +0 -0
  592. onnx/backend/test/data/node/test_resize_tf_crop_and_resize_axes_3_2/model.onnx +0 -0
  593. onnx/backend/test/data/node/test_resize_upsample_scales_cubic/model.onnx +0 -0
  594. onnx/backend/test/data/node/test_resize_upsample_scales_cubic_A_n0p5_exclude_outside/model.onnx +0 -0
  595. onnx/backend/test/data/node/test_resize_upsample_scales_cubic_align_corners/model.onnx +0 -0
  596. onnx/backend/test/data/node/test_resize_upsample_scales_cubic_asymmetric/model.onnx +0 -0
  597. onnx/backend/test/data/node/test_resize_upsample_scales_linear/model.onnx +0 -0
  598. onnx/backend/test/data/node/test_resize_upsample_scales_linear_align_corners/model.onnx +0 -0
  599. onnx/backend/test/data/node/test_resize_upsample_scales_linear_half_pixel_symmetric/model.onnx +0 -0
  600. onnx/backend/test/data/node/test_resize_upsample_scales_linear_half_pixel_symmetric/test_data_set_0/input_0.pb +0 -0
  601. onnx/backend/test/data/node/test_resize_upsample_scales_linear_half_pixel_symmetric/test_data_set_0/input_1.pb +0 -0
  602. onnx/backend/test/data/node/test_resize_upsample_scales_linear_half_pixel_symmetric/test_data_set_0/output_0.pb +0 -0
  603. onnx/backend/test/data/node/test_resize_upsample_scales_nearest/model.onnx +0 -0
  604. onnx/backend/test/data/node/test_resize_upsample_scales_nearest_axes_2_3/model.onnx +0 -0
  605. onnx/backend/test/data/node/test_resize_upsample_scales_nearest_axes_3_2/model.onnx +0 -0
  606. onnx/backend/test/data/node/test_resize_upsample_sizes_cubic/model.onnx +0 -0
  607. onnx/backend/test/data/node/test_resize_upsample_sizes_nearest/model.onnx +0 -0
  608. onnx/backend/test/data/node/test_resize_upsample_sizes_nearest_axes_2_3/model.onnx +0 -0
  609. onnx/backend/test/data/node/test_resize_upsample_sizes_nearest_axes_3_2/model.onnx +0 -0
  610. onnx/backend/test/data/node/test_resize_upsample_sizes_nearest_ceil_half_pixel/model.onnx +0 -0
  611. onnx/backend/test/data/node/test_resize_upsample_sizes_nearest_floor_align_corners/model.onnx +0 -0
  612. onnx/backend/test/data/node/test_resize_upsample_sizes_nearest_not_larger/model.onnx +0 -0
  613. onnx/backend/test/data/node/test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric/model.onnx +0 -0
  614. onnx/backend/test/data/node/test_roialign_mode_max/model.onnx +0 -0
  615. onnx/backend/test/data/node/test_roialign_mode_max/test_data_set_0/input_0.pb +0 -0
  616. onnx/backend/test/data/node/test_roialign_mode_max/test_data_set_0/input_1.pb +0 -0
  617. onnx/backend/test/data/node/test_roialign_mode_max/test_data_set_0/input_2.pb +0 -0
  618. onnx/backend/test/data/node/test_roialign_mode_max/test_data_set_0/output_0.pb +2 -0
  619. onnx/backend/test/data/node/test_shape/model.onnx +0 -0
  620. onnx/backend/test/data/node/test_shape_clip_end/model.onnx +0 -0
  621. onnx/backend/test/data/node/test_shape_clip_start/model.onnx +0 -0
  622. onnx/backend/test/data/node/test_shape_end_1/model.onnx +0 -0
  623. onnx/backend/test/data/node/test_shape_end_negative_1/model.onnx +0 -0
  624. onnx/backend/test/data/node/test_shape_example/model.onnx +0 -0
  625. onnx/backend/test/data/node/test_shape_start_1/model.onnx +0 -0
  626. onnx/backend/test/data/node/test_shape_start_1_end_2/model.onnx +0 -0
  627. onnx/backend/test/data/node/test_shape_start_1_end_negative_1/model.onnx +0 -0
  628. onnx/backend/test/data/node/test_shape_start_negative_1/model.onnx +0 -0
  629. onnx/backend/test/data/node/test_sinh/test_data_set_0/output_0.pb +1 -1
  630. onnx/backend/test/data/node/test_size/model.onnx +0 -0
  631. onnx/backend/test/data/node/test_size_example/model.onnx +0 -0
  632. onnx/backend/test/data/node/test_softplus_example_expanded_ver18/model.onnx +0 -0
  633. onnx/backend/test/data/node/{test_softplus_expanded → test_softplus_expanded_ver18}/model.onnx +0 -0
  634. onnx/backend/test/data/node/test_split_to_sequence_1/model.onnx +0 -0
  635. onnx/backend/test/data/node/test_split_to_sequence_1/test_data_set_0/input_0.pb +0 -0
  636. onnx/backend/test/data/node/test_split_to_sequence_1/test_data_set_0/input_1.pb +0 -0
  637. onnx/backend/test/data/node/test_split_to_sequence_1/test_data_set_0/output_0.pb +0 -0
  638. onnx/backend/test/data/node/test_split_to_sequence_2/model.onnx +0 -0
  639. onnx/backend/test/data/node/test_split_to_sequence_2/test_data_set_0/input_0.pb +0 -0
  640. onnx/backend/test/data/node/test_split_to_sequence_2/test_data_set_0/input_1.pb +0 -0
  641. onnx/backend/test/data/node/test_split_to_sequence_2/test_data_set_0/output_0.pb +0 -0
  642. onnx/backend/test/data/node/test_split_to_sequence_nokeepdims/model.onnx +0 -0
  643. onnx/backend/test/data/node/test_split_to_sequence_nokeepdims/test_data_set_0/input_0.pb +0 -0
  644. onnx/backend/test/data/node/test_split_to_sequence_nokeepdims/test_data_set_0/output_0.pb +0 -0
  645. onnx/backend/test/data/node/test_tan/test_data_set_0/output_0.pb +1 -1
  646. onnx/backend/test/data/node/test_wrap_pad/model.onnx +0 -0
  647. onnx/backend/test/data/node/test_wrap_pad/test_data_set_0/input_0.pb +0 -0
  648. onnx/backend/test/data/node/test_wrap_pad/test_data_set_0/input_1.pb +0 -0
  649. onnx/backend/test/data/node/test_wrap_pad/test_data_set_0/output_0.pb +0 -0
  650. onnx/backend/test/data/real/test_bvlc_alexnet/data.json +1 -1
  651. onnx/backend/test/data/real/test_densenet121/data.json +1 -1
  652. onnx/backend/test/data/real/test_inception_v1/data.json +1 -1
  653. onnx/backend/test/data/real/test_inception_v2/data.json +1 -1
  654. onnx/backend/test/data/real/test_resnet50/data.json +1 -1
  655. onnx/backend/test/data/real/test_shufflenet/data.json +1 -1
  656. onnx/backend/test/data/real/test_squeezenet/data.json +1 -1
  657. onnx/backend/test/data/real/test_vgg19/data.json +1 -1
  658. onnx/backend/test/data/real/test_zfnet512/data.json +1 -1
  659. onnx/backend/test/loader/__init__.py +3 -1
  660. onnx/backend/test/report/__init__.py +3 -1
  661. onnx/backend/test/report/base.py +2 -0
  662. onnx/backend/test/report/coverage.py +8 -14
  663. onnx/backend/test/runner/__init__.py +146 -39
  664. onnx/backend/test/runner/item.py +2 -0
  665. onnx/backend/test/stat_coverage.py +23 -26
  666. onnx/bin/__init__.py +2 -0
  667. onnx/bin/checker.py +2 -0
  668. onnx/checker.cc +26 -9
  669. onnx/checker.h +3 -3
  670. onnx/checker.py +22 -5
  671. onnx/common/array_ref.h +2 -0
  672. onnx/common/assertions.cc +2 -0
  673. onnx/common/assertions.h +2 -0
  674. onnx/common/common.h +2 -0
  675. onnx/common/constants.h +3 -3
  676. onnx/common/file_utils.h +3 -1
  677. onnx/common/graph_node_list.h +2 -0
  678. onnx/common/interned_strings.cc +2 -0
  679. onnx/common/interned_strings.h +2 -0
  680. onnx/common/ir.h +2 -0
  681. onnx/common/ir_pb_converter.cc +7 -1
  682. onnx/common/ir_pb_converter.h +2 -0
  683. onnx/common/model_helpers.cc +3 -3
  684. onnx/common/model_helpers.h +3 -3
  685. onnx/common/path.cc +0 -1
  686. onnx/common/path.h +0 -1
  687. onnx/common/platform_helpers.h +2 -0
  688. onnx/common/status.cc +2 -0
  689. onnx/common/status.h +2 -0
  690. onnx/common/stl_backports.h +3 -3
  691. onnx/common/tensor.h +24 -171
  692. onnx/common/version.h +3 -1
  693. onnx/compose.py +40 -32
  694. onnx/cpp2py_export.cc +268 -89
  695. onnx/defs/__init__.py +9 -7
  696. onnx/defs/attr_proto_util.cc +2 -0
  697. onnx/defs/attr_proto_util.h +2 -0
  698. onnx/defs/controlflow/defs.cc +25 -369
  699. onnx/defs/controlflow/old.cc +444 -0
  700. onnx/defs/controlflow/utils.cc +357 -0
  701. onnx/defs/controlflow/utils.h +21 -0
  702. onnx/defs/data_propagators.h +2 -0
  703. onnx/defs/data_type_utils.cc +6 -2
  704. onnx/defs/gen_doc.py +32 -46
  705. onnx/defs/gen_shape_inference_information.py +2 -0
  706. onnx/defs/generator/defs.cc +21 -19
  707. onnx/defs/generator/old.cc +159 -0
  708. onnx/defs/logical/defs.cc +17 -16
  709. onnx/defs/logical/old.cc +23 -0
  710. onnx/defs/math/defs.cc +155 -131
  711. onnx/defs/math/old.cc +1 -1
  712. onnx/defs/nn/defs.cc +135 -45
  713. onnx/defs/nn/old.cc +142 -9
  714. onnx/defs/operator_sets.h +45 -0
  715. onnx/defs/optional/defs.cc +8 -4
  716. onnx/defs/parser.cc +50 -3
  717. onnx/defs/parser.h +43 -31
  718. onnx/defs/printer.cc +7 -1
  719. onnx/defs/printer.h +1 -1
  720. onnx/defs/quantization/defs.cc +63 -26
  721. onnx/defs/quantization/old.cc +102 -1
  722. onnx/defs/reduction/defs.cc +1 -1
  723. onnx/defs/reduction/utils.cc +5 -4
  724. onnx/defs/rnn/defs.cc +95 -173
  725. onnx/defs/schema.cc +45 -29
  726. onnx/defs/schema.h +125 -15
  727. onnx/defs/sequence/defs.cc +11 -8
  728. onnx/defs/shape_inference.cc +25 -4
  729. onnx/defs/shape_inference.h +29 -1
  730. onnx/defs/tensor/defs.cc +500 -566
  731. onnx/defs/tensor/old.cc +777 -47
  732. onnx/defs/tensor/utils.cc +130 -8
  733. onnx/defs/tensor/utils.h +2 -0
  734. onnx/defs/tensor_proto_util.cc +3 -0
  735. onnx/defs/traditionalml/defs.cc +19 -2
  736. onnx/examples/Protobufs.ipynb +129 -31
  737. onnx/examples/check_model.ipynb +29 -21
  738. onnx/examples/load_model.ipynb +25 -3
  739. onnx/examples/make_model.ipynb +32 -23
  740. onnx/external_data_helper.py +6 -6
  741. onnx/frontend/__init__.py +2 -0
  742. onnx/gen_proto.py +18 -24
  743. onnx/helper.py +393 -108
  744. onnx/hub.py +189 -20
  745. onnx/mapping.py +29 -3
  746. onnx/numpy_helper.py +263 -52
  747. onnx/onnx-ml.proto +28 -6
  748. onnx/onnx-operators-ml.proto +1 -1
  749. onnx/onnx-operators.in.proto +1 -1
  750. onnx/onnx-operators.proto +1 -1
  751. onnx/onnx.in.proto +28 -6
  752. onnx/onnx.proto +28 -6
  753. onnx/onnx_cpp2py_export.cp310-win_amd64.pyd +0 -0
  754. onnx/onnx_data_pb2.pyi +2 -1
  755. onnx/onnx_ml_pb2.py +33 -33
  756. onnx/onnx_ml_pb2.pyi +12 -2
  757. onnx/onnx_operators_ml_pb2.pyi +2 -1
  758. onnx/parser.py +29 -13
  759. onnx/printer.py +6 -4
  760. onnx/proto_utils.h +3 -3
  761. onnx/py_utils.h +3 -3
  762. onnx/reference/__init__.py +2 -0
  763. onnx/reference/custom_element_types.py +11 -0
  764. onnx/reference/op_run.py +84 -8
  765. onnx/reference/ops/__init__.py +5 -1
  766. onnx/reference/ops/_helpers.py +55 -0
  767. onnx/reference/ops/_op.py +19 -12
  768. onnx/reference/ops/_op_common_indices.py +2 -0
  769. onnx/reference/ops/_op_common_pool.py +4 -9
  770. onnx/reference/ops/_op_common_random.py +2 -0
  771. onnx/reference/ops/_op_common_window.py +2 -0
  772. onnx/reference/ops/_op_list.py +208 -214
  773. onnx/reference/ops/aionnx_preview_training/__init__.py +4 -2
  774. onnx/reference/ops/aionnx_preview_training/_op_list.py +15 -38
  775. onnx/reference/ops/aionnx_preview_training/_op_run_training.py +2 -0
  776. onnx/reference/ops/aionnx_preview_training/op_adagrad.py +3 -1
  777. onnx/reference/ops/aionnx_preview_training/op_adam.py +3 -1
  778. onnx/reference/ops/aionnx_preview_training/op_momentum.py +3 -1
  779. onnx/reference/ops/aionnxml/__init__.py +3 -0
  780. onnx/reference/ops/aionnxml/_common_classifier.py +81 -0
  781. onnx/reference/ops/aionnxml/_op_list.py +97 -0
  782. onnx/reference/ops/aionnxml/_op_run_aionnxml.py +8 -0
  783. onnx/reference/ops/aionnxml/op_array_feature_extractor.py +50 -0
  784. onnx/reference/ops/aionnxml/op_binarizer.py +15 -0
  785. onnx/reference/ops/aionnxml/op_dict_vectorizer.py +56 -0
  786. onnx/reference/ops/aionnxml/op_feature_vectorizer.py +30 -0
  787. onnx/reference/ops/aionnxml/op_imputer.py +47 -0
  788. onnx/reference/ops/aionnxml/op_label_encoder.py +52 -0
  789. onnx/reference/ops/aionnxml/op_linear_classifier.py +99 -0
  790. onnx/reference/ops/aionnxml/op_linear_regressor.py +26 -0
  791. onnx/reference/ops/aionnxml/op_normalizer.py +41 -0
  792. onnx/reference/ops/aionnxml/op_one_hot_encoder.py +55 -0
  793. onnx/reference/ops/aionnxml/op_scaler.py +12 -0
  794. onnx/reference/ops/aionnxml/op_svm_classifier.py +334 -0
  795. onnx/reference/ops/aionnxml/op_svm_helper.py +99 -0
  796. onnx/reference/ops/aionnxml/op_svm_regressor.py +45 -0
  797. onnx/reference/ops/aionnxml/op_tree_ensemble_classifier.py +132 -0
  798. onnx/reference/ops/aionnxml/op_tree_ensemble_helper.py +109 -0
  799. onnx/reference/ops/aionnxml/op_tree_ensemble_regressor.py +105 -0
  800. onnx/reference/ops/experimental/__init__.py +3 -1
  801. onnx/reference/ops/experimental/_op_list.py +15 -36
  802. onnx/reference/ops/experimental/_op_run_experimental.py +2 -0
  803. onnx/reference/ops/experimental/op_im2col.py +3 -2
  804. onnx/reference/ops/op_abs.py +3 -1
  805. onnx/reference/ops/op_acos.py +3 -1
  806. onnx/reference/ops/op_acosh.py +3 -1
  807. onnx/reference/ops/op_add.py +3 -1
  808. onnx/reference/ops/op_and.py +3 -1
  809. onnx/reference/ops/op_argmax.py +4 -9
  810. onnx/reference/ops/op_argmin.py +4 -9
  811. onnx/reference/ops/op_asin.py +3 -1
  812. onnx/reference/ops/op_asinh.py +3 -1
  813. onnx/reference/ops/op_atan.py +3 -1
  814. onnx/reference/ops/op_atanh.py +3 -1
  815. onnx/reference/ops/op_attribute_has_value.py +2 -0
  816. onnx/reference/ops/op_average_pool.py +80 -2
  817. onnx/reference/ops/op_batch_normalization.py +14 -11
  818. onnx/reference/ops/op_bernoulli.py +3 -2
  819. onnx/reference/ops/op_bitshift.py +3 -1
  820. onnx/reference/ops/op_bitwise_and.py +3 -1
  821. onnx/reference/ops/op_bitwise_not.py +3 -1
  822. onnx/reference/ops/op_bitwise_or.py +3 -1
  823. onnx/reference/ops/op_bitwise_xor.py +3 -1
  824. onnx/reference/ops/op_blackman_window.py +3 -1
  825. onnx/reference/ops/op_cast.py +91 -10
  826. onnx/reference/ops/op_cast_like.py +32 -7
  827. onnx/reference/ops/op_ceil.py +3 -1
  828. onnx/reference/ops/op_celu.py +3 -1
  829. onnx/reference/ops/op_center_crop_pad.py +7 -3
  830. onnx/reference/ops/op_clip.py +2 -7
  831. onnx/reference/ops/op_col2im.py +3 -2
  832. onnx/reference/ops/op_compress.py +2 -0
  833. onnx/reference/ops/op_concat.py +6 -5
  834. onnx/reference/ops/op_concat_from_sequence.py +2 -0
  835. onnx/reference/ops/op_constant.py +46 -35
  836. onnx/reference/ops/op_constant_of_shape.py +4 -0
  837. onnx/reference/ops/op_conv.py +62 -39
  838. onnx/reference/ops/op_conv_integer.py +3 -2
  839. onnx/reference/ops/op_conv_transpose.py +4 -3
  840. onnx/reference/ops/op_cos.py +3 -1
  841. onnx/reference/ops/op_cosh.py +3 -1
  842. onnx/reference/ops/op_cum_sum.py +2 -0
  843. onnx/reference/ops/op_deform_conv.py +178 -0
  844. onnx/reference/ops/op_depth_to_space.py +2 -0
  845. onnx/reference/ops/op_dequantize_linear.py +72 -21
  846. onnx/reference/ops/op_det.py +3 -4
  847. onnx/reference/ops/op_dft.py +2 -0
  848. onnx/reference/ops/op_div.py +3 -1
  849. onnx/reference/ops/op_dropout.py +2 -7
  850. onnx/reference/ops/op_dynamic_quantize_linear.py +2 -0
  851. onnx/reference/ops/op_einsum.py +2 -0
  852. onnx/reference/ops/op_elu.py +4 -2
  853. onnx/reference/ops/op_equal.py +3 -1
  854. onnx/reference/ops/op_erf.py +3 -1
  855. onnx/reference/ops/op_exp.py +4 -2
  856. onnx/reference/ops/op_expand.py +2 -0
  857. onnx/reference/ops/op_eyelike.py +9 -4
  858. onnx/reference/ops/op_flatten.py +3 -1
  859. onnx/reference/ops/op_floor.py +3 -1
  860. onnx/reference/ops/op_gather.py +2 -0
  861. onnx/reference/ops/op_gather_elements.py +2 -0
  862. onnx/reference/ops/op_gathernd.py +3 -1
  863. onnx/reference/ops/op_gemm.py +5 -10
  864. onnx/reference/ops/op_global_average_pool.py +6 -5
  865. onnx/reference/ops/op_global_max_pool.py +2 -0
  866. onnx/reference/ops/op_greater.py +3 -1
  867. onnx/reference/ops/op_greater_or_equal.py +3 -1
  868. onnx/reference/ops/op_grid_sample.py +3 -1
  869. onnx/reference/ops/op_gru.py +4 -1
  870. onnx/reference/ops/op_hamming_window.py +3 -1
  871. onnx/reference/ops/op_hann_window.py +3 -1
  872. onnx/reference/ops/op_hard_sigmoid.py +3 -1
  873. onnx/reference/ops/op_hardmax.py +3 -1
  874. onnx/reference/ops/op_identity.py +3 -1
  875. onnx/reference/ops/op_if.py +16 -7
  876. onnx/reference/ops/op_instance_normalization.py +2 -0
  877. onnx/reference/ops/op_isinf.py +2 -0
  878. onnx/reference/ops/op_isnan.py +3 -1
  879. onnx/reference/ops/op_layer_normalization.py +2 -0
  880. onnx/reference/ops/op_leaky_relu.py +4 -2
  881. onnx/reference/ops/op_less.py +3 -1
  882. onnx/reference/ops/op_less_or_equal.py +3 -1
  883. onnx/reference/ops/op_log.py +4 -2
  884. onnx/reference/ops/op_log_softmax.py +3 -1
  885. onnx/reference/ops/op_loop.py +4 -2
  886. onnx/reference/ops/op_lp_normalization.py +4 -2
  887. onnx/reference/ops/op_lp_pool.py +41 -0
  888. onnx/reference/ops/op_lrn.py +9 -5
  889. onnx/reference/ops/op_lstm.py +4 -2
  890. onnx/reference/ops/op_matmul.py +3 -1
  891. onnx/reference/ops/op_matmul_integer.py +2 -0
  892. onnx/reference/ops/op_max.py +3 -1
  893. onnx/reference/ops/op_max_pool.py +3 -1
  894. onnx/reference/ops/op_max_unpool.py +2 -0
  895. onnx/reference/ops/op_mean.py +3 -1
  896. onnx/reference/ops/op_mel_weight_matrix.py +2 -0
  897. onnx/reference/ops/op_min.py +3 -1
  898. onnx/reference/ops/op_mod.py +2 -0
  899. onnx/reference/ops/op_mul.py +3 -1
  900. onnx/reference/ops/op_neg.py +3 -1
  901. onnx/reference/ops/op_negative_log_likelihood_loss.py +3 -1
  902. onnx/reference/ops/op_non_max_suppression.py +22 -19
  903. onnx/reference/ops/op_non_zero.py +4 -1
  904. onnx/reference/ops/op_not.py +3 -1
  905. onnx/reference/ops/op_one_hot.py +3 -1
  906. onnx/reference/ops/op_optional.py +2 -0
  907. onnx/reference/ops/op_optional_get_element.py +4 -8
  908. onnx/reference/ops/op_optional_has_element.py +3 -9
  909. onnx/reference/ops/op_or.py +3 -1
  910. onnx/reference/ops/op_pad.py +18 -29
  911. onnx/reference/ops/op_pow.py +2 -0
  912. onnx/reference/ops/op_prelu.py +4 -2
  913. onnx/reference/ops/op_qlinear_conv.py +3 -2
  914. onnx/reference/ops/op_qlinear_matmul.py +2 -0
  915. onnx/reference/ops/op_quantize_linear.py +100 -15
  916. onnx/reference/ops/op_random_normal.py +3 -1
  917. onnx/reference/ops/op_random_normal_like.py +3 -2
  918. onnx/reference/ops/op_random_uniform.py +3 -1
  919. onnx/reference/ops/op_random_uniform_like.py +3 -2
  920. onnx/reference/ops/op_range.py +2 -0
  921. onnx/reference/ops/op_reciprocal.py +4 -2
  922. onnx/reference/ops/op_reduce_l1.py +17 -31
  923. onnx/reference/ops/op_reduce_l2.py +17 -35
  924. onnx/reference/ops/op_reduce_log_sum.py +6 -29
  925. onnx/reference/ops/op_reduce_log_sum_exp.py +6 -29
  926. onnx/reference/ops/op_reduce_max.py +15 -36
  927. onnx/reference/ops/op_reduce_mean.py +15 -33
  928. onnx/reference/ops/op_reduce_min.py +15 -32
  929. onnx/reference/ops/op_reduce_prod.py +15 -29
  930. onnx/reference/ops/op_reduce_sum.py +17 -45
  931. onnx/reference/ops/op_reduce_sum_square.py +15 -29
  932. onnx/reference/ops/op_relu.py +3 -1
  933. onnx/reference/ops/op_reshape.py +2 -7
  934. onnx/reference/ops/op_resize.py +59 -26
  935. onnx/reference/ops/op_reverse_sequence.py +2 -0
  936. onnx/reference/ops/op_rnn.py +3 -7
  937. onnx/reference/ops/op_roi_align.py +7 -5
  938. onnx/reference/ops/op_round.py +4 -2
  939. onnx/reference/ops/op_scan.py +5 -2
  940. onnx/reference/ops/op_scatter_elements.py +17 -4
  941. onnx/reference/ops/op_scatternd.py +2 -0
  942. onnx/reference/ops/op_selu.py +5 -1
  943. onnx/reference/ops/op_sequence_at.py +2 -0
  944. onnx/reference/ops/op_sequence_construct.py +2 -0
  945. onnx/reference/ops/op_sequence_empty.py +2 -0
  946. onnx/reference/ops/op_sequence_erase.py +2 -0
  947. onnx/reference/ops/op_sequence_insert.py +4 -2
  948. onnx/reference/ops/op_sequence_length.py +7 -1
  949. onnx/reference/ops/op_sequence_map.py +4 -2
  950. onnx/reference/ops/op_shape.py +2 -7
  951. onnx/reference/ops/op_shrink.py +3 -1
  952. onnx/reference/ops/op_sigmoid.py +7 -1
  953. onnx/reference/ops/op_sign.py +3 -1
  954. onnx/reference/ops/op_sin.py +3 -1
  955. onnx/reference/ops/op_sinh.py +3 -1
  956. onnx/reference/ops/op_size.py +2 -0
  957. onnx/reference/ops/op_slice.py +3 -9
  958. onnx/reference/ops/op_softmax.py +4 -2
  959. onnx/reference/ops/op_softmax_cross_entropy_loss.py +4 -1
  960. onnx/reference/ops/op_softplus.py +4 -2
  961. onnx/reference/ops/op_softsign.py +3 -1
  962. onnx/reference/ops/op_space_to_depth.py +3 -1
  963. onnx/reference/ops/op_split.py +7 -9
  964. onnx/reference/ops/op_split_to_sequence.py +41 -10
  965. onnx/reference/ops/op_sqrt.py +4 -2
  966. onnx/reference/ops/op_squeeze.py +3 -12
  967. onnx/reference/ops/op_stft.py +8 -7
  968. onnx/reference/ops/op_string_normalizer.py +4 -3
  969. onnx/reference/ops/op_sub.py +3 -1
  970. onnx/reference/ops/op_sum.py +3 -1
  971. onnx/reference/ops/op_tan.py +3 -1
  972. onnx/reference/ops/op_tanh.py +3 -1
  973. onnx/reference/ops/op_tfidf_vectorizer.py +15 -13
  974. onnx/reference/ops/op_thresholded_relu.py +4 -2
  975. onnx/reference/ops/op_tile.py +2 -0
  976. onnx/reference/ops/op_topk.py +12 -19
  977. onnx/reference/ops/op_transpose.py +2 -0
  978. onnx/reference/ops/op_trilu.py +3 -1
  979. onnx/reference/ops/op_unique.py +2 -0
  980. onnx/reference/ops/op_unsqueeze.py +2 -9
  981. onnx/reference/ops/op_upsample.py +9 -8
  982. onnx/reference/ops/op_where.py +7 -1
  983. onnx/reference/ops/op_xor.py +3 -1
  984. onnx/reference/reference_evaluator.py +64 -20
  985. onnx/shape_inference/implementation.cc +204 -43
  986. onnx/shape_inference/implementation.h +33 -13
  987. onnx/shape_inference.py +37 -12
  988. onnx/string_utils.h +3 -3
  989. onnx/test/cpp/common_path_test.cc +2 -0
  990. onnx/test/cpp/data_propagation_test.cc +2 -0
  991. onnx/test/cpp/function_context_test.cc +2 -0
  992. onnx/test/cpp/function_get_test.cc +2 -0
  993. onnx/test/cpp/function_verify_test.cc +176 -0
  994. onnx/test/cpp/op_reg_test.cc +2 -0
  995. onnx/test/cpp/parser_test.cc +37 -1
  996. onnx/test/cpp/schema_registration_test.cc +2 -0
  997. onnx/test/cpp/shape_inference_test.cc +2 -0
  998. onnx/test/cpp/test_main.cc +2 -0
  999. onnx/tools/__init__.py +2 -0
  1000. onnx/tools/net_drawer.py +13 -9
  1001. onnx/tools/replace_constants.py +429 -0
  1002. onnx/tools/update_model_dims.py +7 -9
  1003. onnx/utils.py +16 -6
  1004. onnx/version.py +2 -2
  1005. onnx/version_converter/BaseConverter.h +2 -0
  1006. onnx/version_converter/adapters/adapter.h +2 -0
  1007. onnx/version_converter/adapters/axes_attribute_to_input.h +2 -0
  1008. onnx/version_converter/adapters/axes_input_to_attribute.h +2 -0
  1009. onnx/version_converter/adapters/batch_normalization_13_14.h +2 -0
  1010. onnx/version_converter/adapters/broadcast_backward_compatibility.h +2 -0
  1011. onnx/version_converter/adapters/broadcast_forward_compatibility.h +2 -0
  1012. onnx/version_converter/adapters/cast_9_8.h +2 -0
  1013. onnx/version_converter/adapters/clip_10_11.h +2 -0
  1014. onnx/version_converter/adapters/compatible.h +2 -0
  1015. onnx/version_converter/adapters/dropout_11_12.h +2 -0
  1016. onnx/version_converter/adapters/extend_supported_types.h +2 -0
  1017. onnx/version_converter/adapters/gemm_6_7.h +2 -0
  1018. onnx/version_converter/adapters/gemm_7_6.h +2 -0
  1019. onnx/version_converter/adapters/maxpool_8_7.h +2 -0
  1020. onnx/version_converter/adapters/no_previous_version.h +2 -0
  1021. onnx/version_converter/adapters/pad_10_11.h +4 -0
  1022. onnx/version_converter/adapters/remove_consumed_inputs.h +2 -0
  1023. onnx/version_converter/adapters/reshape_4_5.h +2 -0
  1024. onnx/version_converter/adapters/reshape_5_4.h +2 -0
  1025. onnx/version_converter/adapters/resize_10_11.h +2 -0
  1026. onnx/version_converter/adapters/scan_8_9.h +2 -0
  1027. onnx/version_converter/adapters/scan_9_8.h +2 -0
  1028. onnx/version_converter/adapters/scatter_10_11.h +2 -0
  1029. onnx/version_converter/adapters/slice_9_10.h +2 -0
  1030. onnx/version_converter/adapters/softmax_12_13.h +20 -28
  1031. onnx/version_converter/adapters/split_12_13.h +2 -0
  1032. onnx/version_converter/adapters/split_13_12.h +2 -0
  1033. onnx/version_converter/adapters/split_17_18.h +2 -0
  1034. onnx/version_converter/adapters/sum_8_7.h +2 -0
  1035. onnx/version_converter/adapters/topk_9_10.h +2 -0
  1036. onnx/version_converter/adapters/transformers.h +3 -1
  1037. onnx/version_converter/adapters/type_restriction.h +2 -0
  1038. onnx/version_converter/adapters/upsample_6_7.h +2 -0
  1039. onnx/version_converter/adapters/upsample_8_9.h +2 -0
  1040. onnx/version_converter/adapters/upsample_9_10.h +2 -0
  1041. onnx/version_converter/adapters/upsample_9_8.h +2 -0
  1042. onnx/version_converter/convert.cc +14 -7
  1043. onnx/version_converter/convert.h +20 -0
  1044. onnx/version_converter/helper.cc +3 -3
  1045. onnx/version_converter/helper.h +3 -3
  1046. onnx/version_converter.py +6 -3
  1047. {onnx-1.13.1.dist-info → onnx-1.14.1.dist-info}/METADATA +96 -52
  1048. {onnx-1.13.1.dist-info → onnx-1.14.1.dist-info}/RECORD +1056 -743
  1049. {onnx-1.13.1.dist-info → onnx-1.14.1.dist-info}/WHEEL +1 -1
  1050. onnx/backend/test/data/node/test_softplus_example_expanded/model.onnx +0 -0
  1051. /onnx/backend/test/data/node/{test_softplus_example_expanded → test_softplus_example_expanded_ver18}/test_data_set_0/input_0.pb +0 -0
  1052. /onnx/backend/test/data/node/{test_softplus_example_expanded → test_softplus_example_expanded_ver18}/test_data_set_0/output_0.pb +0 -0
  1053. /onnx/backend/test/data/node/{test_softplus_expanded → test_softplus_expanded_ver18}/test_data_set_0/input_0.pb +0 -0
  1054. /onnx/backend/test/data/node/{test_softplus_expanded → test_softplus_expanded_ver18}/test_data_set_0/output_0.pb +0 -0
  1055. {onnx-1.13.1.dist-info → onnx-1.14.1.dist-info}/LICENSE +0 -0
  1056. {onnx-1.13.1.dist-info → onnx-1.14.1.dist-info}/entry_points.txt +0 -0
  1057. {onnx-1.13.1.dist-info → onnx-1.14.1.dist-info}/top_level.txt +0 -0
onnx/defs/tensor/old.cc CHANGED
@@ -6,10 +6,198 @@
6
6
  #include <cmath>
7
7
  #include <numeric>
8
8
  #include "onnx/defs/data_propagators.h"
9
+ #include "onnx/defs/function.h"
9
10
  #include "onnx/defs/tensor/utils.h"
10
11
 
11
12
  namespace ONNX_NAMESPACE {
12
13
 
14
+ static const char* Cast_ver13_doc = R"DOC(
15
+ The operator casts the elements of a given input tensor to a data type
16
+ specified by the 'to' argument and returns an output tensor of the same size in
17
+ the converted type. The 'to' argument must be one of the data types specified
18
+ in the 'DataType' enum field in the TensorProto message.
19
+
20
+ Casting from string tensor in plain (e.g., "3.14" and "1000") and scientific numeric representations
21
+ (e.g., "1e-5" and "1E8") to float types is supported. For example, converting string "100.5" to an integer may
22
+ yield result 100. There are some string literals reserved for special floating-point values;
23
+ "+INF" (and "INF"), "-INF", and "NaN" are positive infinity, negative infinity, and not-a-number, respectively.
24
+ Any string which can exactly match "+INF" in a case-insensitive way would be mapped to positive infinite. Similarly,
25
+ this case-insensitive rule is applied to "INF" and "NaN". When casting from numeric tensors
26
+ to string tensors, plain floating-point representation (such as "314.15926") would be used.
27
+ Converting non-numerical-literal string such as "Hello World!" is an undefined behavior. Cases
28
+ of converting string representing floating-point arithmetic value, such as "2.718", to INT is an undefined behavior.
29
+
30
+ Conversion from a numerical type to any numerical type is always allowed.
31
+ User must be aware of precision loss and value change caused by range difference between two types.
32
+ For example, a 64-bit float 3.1415926459 may be round to a 32-bit float 3.141592. Similarly, converting
33
+ an integer 36 to Boolean may produce 1 because we truncate bits which can't be stored in the targeted type.
34
+
35
+ In more detail, the conversion among numerical types should follow these rules:
36
+
37
+ * Casting from floating point to:
38
+ * floating point: +/- infinity if OOR (out of range).
39
+ * fixed point: undefined if OOR.
40
+ * bool: +/- 0.0 to False; all else to True.
41
+ * Casting from fixed point to:
42
+ * floating point: +/- infinity if OOR. (+ infinity in the case of uint)
43
+ * fixed point: when OOR, discard higher bits and reinterpret (with respect to two's complement representation for
44
+ signed types). For example, 200 (int16) -> -56 (int8).
45
+ * bool: zero to False; nonzero to True.
46
+ * Casting from bool to:
47
+ * floating point: `{1.0, 0.0}`.
48
+ * fixed point: `{1, 0}`.
49
+ * bool: no change.
50
+ )DOC";
51
+
52
+ ONNX_OPERATOR_SET_SCHEMA(
53
+ Cast,
54
+ 13,
55
+ OpSchema()
56
+ .SetDoc(Cast_ver13_doc)
57
+ .Attr(
58
+ "to",
59
+ "The data type to which the elements of the input tensor are cast. "
60
+ "Strictly must be one of the types from DataType enum in TensorProto",
61
+ AttributeProto::INT)
62
+ .Input(0, "input", "Input tensor to be cast.", "T1", OpSchema::Single, true, 1, OpSchema::Differentiable)
63
+ .Output(
64
+ 0,
65
+ "output",
66
+ "Output tensor with the same shape as input with type "
67
+ "specified by the 'to' argument",
68
+ "T2",
69
+ OpSchema::Single,
70
+ true,
71
+ 1,
72
+ OpSchema::Differentiable)
73
+ .TypeConstraint(
74
+ "T1",
75
+ {"tensor(float16)",
76
+ "tensor(float)",
77
+ "tensor(double)",
78
+ "tensor(int8)",
79
+ "tensor(int16)",
80
+ "tensor(int32)",
81
+ "tensor(int64)",
82
+ "tensor(uint8)",
83
+ "tensor(uint16)",
84
+ "tensor(uint32)",
85
+ "tensor(uint64)",
86
+ "tensor(bool)",
87
+ "tensor(string)",
88
+ "tensor(bfloat16)"},
89
+ "Constrain input types. Casting from complex is not supported.")
90
+ .TypeConstraint(
91
+ "T2",
92
+ {"tensor(float16)",
93
+ "tensor(float)",
94
+ "tensor(double)",
95
+ "tensor(int8)",
96
+ "tensor(int16)",
97
+ "tensor(int32)",
98
+ "tensor(int64)",
99
+ "tensor(uint8)",
100
+ "tensor(uint16)",
101
+ "tensor(uint32)",
102
+ "tensor(uint64)",
103
+ "tensor(bool)",
104
+ "tensor(string)",
105
+ "tensor(bfloat16)"},
106
+ "Constrain output types. Casting to complex is not supported.")
107
+ .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
108
+ propagateElemTypeFromAttributeToOutput(ctx, "to", 0);
109
+ if (hasNInputShapes(ctx, 1)) {
110
+ propagateShapeFromInputToOutput(ctx, 0, 0);
111
+ }
112
+ })
113
+ .PartialDataPropagationFunction([](DataPropagationContext& ctx) {
114
+ PropagateShapeDataFromInputToOutput(ctx, 0);
115
+ }));
116
+
117
+ static const char* CastLike_ver15_doc = R"DOC(
118
+ The operator casts the elements of a given input tensor (the first input) to
119
+ the same data type as the elements of the second input tensor.
120
+ See documentation of the Cast operator for further details.
121
+ )DOC";
122
+
123
+ ONNX_OPERATOR_SET_SCHEMA(
124
+ CastLike,
125
+ 15,
126
+ OpSchema()
127
+ .SetDoc(CastLike_ver15_doc)
128
+ .Input(0, "input", "Input tensor to be cast.", "T1", OpSchema::Single, true, 1, OpSchema::Differentiable)
129
+ .Input(
130
+ 1,
131
+ "target_type",
132
+ "The (first) input tensor will be cast to produce a tensor of the same type as this (second input) tensor.",
133
+ "T2",
134
+ OpSchema::Single,
135
+ true,
136
+ 1,
137
+ OpSchema::NonDifferentiable)
138
+ .Output(
139
+ 0,
140
+ "output",
141
+ "Output tensor produced by casting the first input tensor to have the same type as the second input tensor.",
142
+ "T2",
143
+ OpSchema::Single,
144
+ true,
145
+ 1,
146
+ OpSchema::Differentiable)
147
+ .TypeConstraint(
148
+ "T1",
149
+ {"tensor(float16)",
150
+ "tensor(float)",
151
+ "tensor(double)",
152
+ "tensor(int8)",
153
+ "tensor(int16)",
154
+ "tensor(int32)",
155
+ "tensor(int64)",
156
+ "tensor(uint8)",
157
+ "tensor(uint16)",
158
+ "tensor(uint32)",
159
+ "tensor(uint64)",
160
+ "tensor(bool)",
161
+ "tensor(string)",
162
+ "tensor(bfloat16)"},
163
+ "Constrain input types. Casting from complex is not supported.")
164
+ .TypeConstraint(
165
+ "T2",
166
+ {"tensor(float16)",
167
+ "tensor(float)",
168
+ "tensor(double)",
169
+ "tensor(int8)",
170
+ "tensor(int16)",
171
+ "tensor(int32)",
172
+ "tensor(int64)",
173
+ "tensor(uint8)",
174
+ "tensor(uint16)",
175
+ "tensor(uint32)",
176
+ "tensor(uint64)",
177
+ "tensor(bool)",
178
+ "tensor(string)",
179
+ "tensor(bfloat16)"},
180
+ "Constrain output types. Casting to complex is not supported.")
181
+ .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
182
+ propagateElemTypeFromInputToOutput(ctx, 1, 0);
183
+ if (hasNInputShapes(ctx, 1)) {
184
+ propagateShapeFromInputToOutput(ctx, 0, 0);
185
+ }
186
+ })
187
+ .SetContextDependentFunctionBodyBuilder(
188
+ [](const FunctionBodyBuildContext& ctx, const OpSchema& schema, FunctionProto& functionProto) -> bool {
189
+ auto target_type = ctx.getInputType(1);
190
+ if ((target_type == nullptr) || (!target_type->has_tensor_type())) {
191
+ // we cannot create a correct function body without knowing the target element type
192
+ return false;
193
+ }
194
+ auto target_elt_type = target_type->tensor_type().elem_type();
195
+ FunctionBuilder builder(functionProto);
196
+ builder.Add("output = Cast (input)", "to", (int64_t)(target_elt_type));
197
+ schema.BuildFunction(functionProto);
198
+ return true;
199
+ }));
200
+
13
201
  static const char* Cast_ver9_doc = R"DOC(
14
202
  The operator casts the elements of a given input tensor to a data type
15
203
  specified by the 'to' argument and returns an output tensor of the same size in
@@ -18,7 +206,7 @@ in the 'DataType' enum field in the TensorProto message.
18
206
 
19
207
  Casting from string tensor in plain (e.g., "3.14" and "1000") and scientific numeric representations
20
208
  (e.g., "1e-5" and "1E8") to float types is supported. For example, converting string "100.5" to an integer may
21
- result 100. There are some string literals reserved for special floating-point values;
209
+ yield result 100. There are some string literals reserved for special floating-point values;
22
210
  "+INF" (and "INF"), "-INF", and "NaN" are positive infinity, negative infinity, and not-a-number, respectively.
23
211
  Any string which can exactly match "+INF" in a case-insensitive way would be mapped to positive infinite. Similarly,
24
212
  this case-insensitive rule is applied to "INF" and "NaN". When casting from numeric tensors
@@ -113,10 +301,7 @@ ONNX_OPERATOR_SET_SCHEMA(
113
301
  1,
114
302
  OpSchema::NonDifferentiable)
115
303
  .Output(0, "reshaped", "Reshaped data.", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
116
- .TypeConstraint(
117
- "T",
118
- OpSchema::all_tensor_types_with_bfloat(),
119
- "Constrain input and output types to all tensor types.")
304
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to all tensor types.")
120
305
  .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
121
306
  // Type inference
122
307
  propagateElemTypeFromInputToOutput(ctx, 0, 0);
@@ -361,7 +546,7 @@ ONNX_OPERATOR_SET_SCHEMA(
361
546
  .SetDoc(Shape_ver13_doc)
362
547
  .Input(0, "data", "An input tensor.", "T", OpSchema::Single, true, 1, OpSchema::NonDifferentiable)
363
548
  .Output(0, "shape", "Shape of the input tensor", "T1", OpSchema::Single, true, 1, OpSchema::NonDifferentiable)
364
- .TypeConstraint("T", OpSchema::all_tensor_types_with_bfloat(), "Input tensor can be of arbitrary type.")
549
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Input tensor can be of arbitrary type.")
365
550
  .TypeConstraint("T1", {"tensor(int64)"}, "Constrain output to int64 tensor.")
366
551
  .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
367
552
  ctx.getOutputType(0)->mutable_tensor_type()->set_elem_type(TensorProto::INT64);
@@ -613,10 +798,7 @@ ONNX_OPERATOR_SET_SCHEMA(
613
798
  true,
614
799
  1,
615
800
  OpSchema::Differentiable)
616
- .TypeConstraint(
617
- "T",
618
- OpSchema::all_tensor_types_with_bfloat(),
619
- "Constrain input and output types to all tensor types.")
801
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to all tensor types.")
620
802
  .Attr(
621
803
  "axis",
622
804
  "Which axis to split on. "
@@ -1084,10 +1266,7 @@ ONNX_OPERATOR_SET_SCHEMA(
1084
1266
  1,
1085
1267
  OpSchema::Differentiable)
1086
1268
  .Output(0, "output", "Tensor of rank r >= 1.", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
1087
- .TypeConstraint(
1088
- "T",
1089
- OpSchema::all_tensor_types_with_bfloat(),
1090
- "Constrain input and output types to any tensor type.")
1269
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to any tensor type.")
1091
1270
  .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
1092
1271
  propagateElemTypeFromInputToOutput(ctx, 0, 0);
1093
1272
  if (hasNInputShapes(ctx, 1)) {
@@ -1180,10 +1359,7 @@ ONNX_OPERATOR_SET_SCHEMA(
1180
1359
  1,
1181
1360
  OpSchema::Differentiable)
1182
1361
  .Output(0, "output", "Tensor of rank r >= 1.", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
1183
- .TypeConstraint(
1184
- "T",
1185
- OpSchema::all_tensor_types_with_bfloat(),
1186
- "Constrain input and output types to any tensor type.")
1362
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to any tensor type.")
1187
1363
  .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
1188
1364
  propagateElemTypeFromInputToOutput(ctx, 0, 0);
1189
1365
  if (hasNInputShapes(ctx, 1)) {
@@ -1379,10 +1555,7 @@ ONNX_OPERATOR_SET_SCHEMA(
1379
1555
  true,
1380
1556
  1,
1381
1557
  OpSchema::Differentiable)
1382
- .TypeConstraint(
1383
- "T",
1384
- OpSchema::all_tensor_types_with_bfloat(),
1385
- "Input and output types can be of any tensor type.")
1558
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Input and output types can be of any tensor type.")
1386
1559
  .TypeConstraint("Tind", {"tensor(int32)", "tensor(int64)"}, "Constrain indices to integer types")
1387
1560
  .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
1388
1561
  propagateElemTypeFromInputToOutput(ctx, 0, 0);
@@ -1485,10 +1658,7 @@ ONNX_OPERATOR_SET_SCHEMA(
1485
1658
  true,
1486
1659
  1,
1487
1660
  OpSchema::Differentiable)
1488
- .TypeConstraint(
1489
- "T",
1490
- OpSchema::all_tensor_types_with_bfloat(),
1491
- "Input and output types can be of any tensor type.")
1661
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Input and output types can be of any tensor type.")
1492
1662
  .TypeConstraint("Tind", {"tensor(int32)", "tensor(int64)"}, "Constrain indices to integer types")
1493
1663
  .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
1494
1664
  propagateElemTypeFromInputToOutput(ctx, 0, 0);
@@ -1618,7 +1788,7 @@ axis = 1 :
1618
1788
  Let
1619
1789
  k = indices[i_{0}, ..., i_{q-1}]
1620
1790
  Then
1621
- output[i_{0}, ..., i_{q-1}, j_{0}, ..., j_{r-2}] = input[j_{0}, k, j_{1}, ..., j_{r-2}]
1791
+ output[j_{0}, i_{0}, ..., i_{q-1}, j_{1}, ..., j_{r-2}] = input[j_{0}, k, j_{1}, ..., j_{r-2}]
1622
1792
 
1623
1793
  ```
1624
1794
  data = [
@@ -1631,11 +1801,9 @@ output[i_{0}, ..., i_{q-1}, j_{0}, ..., j_{r-2}] = input[j_{0}, k, j_{1}, ..., j
1631
1801
  ]
1632
1802
  axis = 1,
1633
1803
  output = [
1634
- [
1635
- [1.0, 1.9],
1636
- [2.3, 3.9],
1637
- [4.5, 5.9],
1638
- ],
1804
+ [[1.0, 1.9]],
1805
+ [[2.3, 3.9]],
1806
+ [[4.5, 5.9]],
1639
1807
  ]
1640
1808
  ```
1641
1809
  )DOC";
@@ -2118,6 +2286,161 @@ ONNX_OPERATOR_SET_SCHEMA(
2118
2286
  return;
2119
2287
  }));
2120
2288
 
2289
+ static const char* Resize_ver18_doc = R"DOC(
2290
+ Resize the input tensor. In general, it calculates every value in the output tensor as a weighted average of neighborhood (a.k.a. sampling locations) in the input tensor.
2291
+ Each dimension value of the output tensor is: <br/>
2292
+ `output_dimension = floor(input_dimension * (roi_end - roi_start) * scale)` <br/>
2293
+ if input \"sizes\" is not specified.
2294
+ )DOC";
2295
+
2296
+ static const char* Resize_ver18_attr_coordinate_transformation_mode_doc = R"DOC(
2297
+ This attribute describes how to transform the coordinate in the resized tensor to the coordinate in the original tensor. <br/>
2298
+
2299
+ The coordinate of each dimension is transformed individually. Let's describe a case using axis x as an example.
2300
+ Denote x_resized as the coordinate of axis x in the resized tensor, x_original as the coordinate of axis x in the original tensor, `length_original` as the length of the original tensor in axis x, length_resized as the length of the resized tensor in axis x, roi_x = (start_x, end_x) of the axis x in input "roi", `scale = length_resized / length_original`, <br/>
2301
+
2302
+ if coordinate_transformation_mode is `"half_pixel"`, <br/>
2303
+ `x_original = (x_resized + 0.5) / scale - 0.5` <br/>
2304
+
2305
+ if coordinate_transformation_mode is `"pytorch_half_pixel"`, <br/>
2306
+ `x_original = length_resized > 1 ? (x_resized + 0.5) / scale - 0.5 : 0` <br/>
2307
+
2308
+ if coordinate_transformation_mode is `"align_corners"`, <br/>
2309
+ `x_original = x_resized * (length_original - 1) / (length_resized - 1)` <br/>
2310
+
2311
+ if coordinate_transformation_mode is `"asymmetric"`, <br/>
2312
+ `x_original = x_resized / scale` <br/>
2313
+
2314
+ if coordinate_transformation_mode is `"tf_crop_and_resize"`, <br/>
2315
+ `x_original = length_resized > 1 ? start_x * (length_original - 1) + x_resized * (end_x - start_x) * (length_original - 1) / (length_resized - 1) : 0.5 * (start_x + end_x) * (length_original - 1)`
2316
+ .)DOC";
2317
+
2318
+ static const char* Resize_ver18_attr_keep_aspect_ratio_policy_doc = R"DOC(
2319
+ This attribute describes how to interpret the `sizes` input with regard to keeping the original aspect ratio of the input, and it is not applicable when
2320
+ the `scales` input is used. <br/>
2321
+
2322
+ Given a set of `sizes`, associated with a subset of `axes` (explicitly provided or default), and assuming `d = axes[i]`, with `i` being the index of the provided `sizes`. <br/>
2323
+
2324
+ If `keep_aspect_ratio_policy` is `"stretch"`, the original aspect ratio is disregarded, and the input is resized to the specified size: <br/>
2325
+ `out_size[d] = sizes[i]` <br/>
2326
+
2327
+ If `keep_aspect_ratio_policy` is `"not_larger"`, the sizes are adjusted so that no extent of the output is larger than the specified size, while keeping the original aspect ratio: <br/>
2328
+ `scale = Min(sizes[i] / in_size[d])` <br/>
2329
+ `out_size[d] = round_int(scale * in_size[i])` <br/>
2330
+
2331
+ If `keep_aspect_ratio_policy` is `"not_smaller"`, the sizes are adjusted so that no extent of the output is smaller than the specified size, while keeping the original aspect ratio: <br/>
2332
+ `scale = Max(sizes[i] / in_size[d])` <br/>
2333
+ `out_size[d] = round_int(scale * in_size[i])` <br/>
2334
+
2335
+ For non-resizable axes (those not specified in `axes`), the output size will be equal to the input size.
2336
+
2337
+ Note: `round_int` stands for computing the nearest integer value, rounding halfway cases up.)DOC";
2338
+
2339
+ ONNX_OPERATOR_SET_SCHEMA(
2340
+ Resize,
2341
+ 18,
2342
+ OpSchema()
2343
+ .Attr(
2344
+ "mode",
2345
+ "Three interpolation modes: \"nearest\" (default), \"linear\" and \"cubic\". "
2346
+ "The \"linear\" mode includes linear interpolation for 1D tensor and N-linear interpolation for N-D tensor (for example, bilinear interpolation for 2D tensor). "
2347
+ "The \"cubic\" mode includes cubic interpolation for 1D tensor and N-cubic interpolation for N-D tensor (for example, bicubic interpolation for 2D tensor).",
2348
+ AttributeProto::STRING,
2349
+ std::string("nearest"))
2350
+ .Attr(
2351
+ "cubic_coeff_a",
2352
+ "The coefficient 'a' used in cubic interpolation. Two common choice are -0.5 (in some cases of TensorFlow) and -0.75"
2353
+ " (in PyTorch). Check out Equation (4) in https://ieeexplore.ieee.org/document/1163711 for the details. "
2354
+ "This attribute is valid only if mode is \"cubic\".",
2355
+ AttributeProto::FLOAT,
2356
+ static_cast<float>(-0.75))
2357
+ .Attr(
2358
+ "exclude_outside",
2359
+ "If set to 1, the weight of sampling locations outside the tensor will be set to 0"
2360
+ " and the weight will be renormalized so that their sum is 1.0. The default value is 0.",
2361
+ AttributeProto::INT,
2362
+ static_cast<int64_t>(0))
2363
+ .Attr(
2364
+ "coordinate_transformation_mode",
2365
+ Resize_ver18_attr_coordinate_transformation_mode_doc,
2366
+ AttributeProto::STRING,
2367
+ std::string("half_pixel"))
2368
+ .Attr(
2369
+ "nearest_mode",
2370
+ "Four modes: \"round_prefer_floor\" (default, as known as round half down), \"round_prefer_ceil\" (as known as round half up), \"floor\", \"ceil\". Only used by nearest interpolation. It indicates how to get \"nearest\" pixel in input tensor from x_original, so this attribute is valid only if \"mode\" is \"nearest\".",
2371
+ AttributeProto::STRING,
2372
+ std::string("round_prefer_floor"))
2373
+ .Attr(
2374
+ "extrapolation_value",
2375
+ "When coordinate_transformation_mode is \"tf_crop_and_resize\" and x_original is outside the range [0, length_original - 1], this value is used as the corresponding output value. Default is 0.0f.",
2376
+ AttributeProto::FLOAT,
2377
+ static_cast<float>(0))
2378
+ .Attr(
2379
+ "antialias",
2380
+ "If set to 1, \"linear\" and \"cubic\" interpolation modes will use an antialiasing filter when downscaling. "
2381
+ "Antialiasing is achieved by stretching the resampling filter by a factor max(1, 1 / scale), which means that when downsampling, more input pixels contribute to an output pixel.",
2382
+ AttributeProto::INT,
2383
+ static_cast<int64_t>(0))
2384
+ .Attr(
2385
+ "axes",
2386
+ "If provided, it specifies a subset of axes that 'roi', 'scales' and 'sizes' refer to. "
2387
+ "If not provided, all axes are assumed [0, 1, ..., r-1], where r = rank(data). "
2388
+ "Non-specified dimensions are interpreted as non-resizable. "
2389
+ "Negative value means counting dimensions from the back. Accepted range is [-r, r-1], where r = rank(data). "
2390
+ "Behavior is undefined if an axis is repeated.",
2391
+ AttributeProto::INTS,
2392
+ false)
2393
+ .Attr(
2394
+ "keep_aspect_ratio_policy",
2395
+ Resize_ver18_attr_keep_aspect_ratio_policy_doc,
2396
+ AttributeProto::STRING,
2397
+ std::string("stretch"))
2398
+ .Input(0, "X", "N-D tensor", "T1", OpSchema::Single, true, 1, OpSchema::Differentiable)
2399
+ .Input(
2400
+ 1,
2401
+ "roi",
2402
+ "1-D tensor given as [start1, ..., startN, end1, ..., endN], where N is the rank of X or the length of axes, if provided. "
2403
+ "The RoIs' coordinates are normalized in the coordinate system of the input image. It only takes effect when coordinate_transformation_mode is \"tf_crop_and_resize\"",
2404
+ "T2",
2405
+ OpSchema::Optional,
2406
+ true,
2407
+ 1,
2408
+ OpSchema::NonDifferentiable)
2409
+ .Input(
2410
+ 2,
2411
+ "scales",
2412
+ "The scale array along each dimension. It takes value greater than 0. If it's less than 1,"
2413
+ " it's sampling down, otherwise, it's upsampling. The number of elements of 'scales' should"
2414
+ " be the same as the rank of input 'X' or the length of 'axes', if provided. "
2415
+ "One of 'scales' and 'sizes' MUST be specified and it is an error if both are specified. If 'sizes' is needed, the user can use an empty string as the name of 'scales' in this operator's input list.",
2416
+ "tensor(float)",
2417
+ OpSchema::Optional,
2418
+ true,
2419
+ 1,
2420
+ OpSchema::NonDifferentiable)
2421
+ .Input(
2422
+ 3,
2423
+ "sizes",
2424
+ "Target size of the output tensor. Its interpretation depends on the 'keep_aspect_ratio_policy' value."
2425
+ "The number of elements of 'sizes' should be the same as the"
2426
+ " rank of input 'X', or the length of 'axes', if provided. Only one of 'scales' and 'sizes' can be specified. ",
2427
+ "tensor(int64)",
2428
+ OpSchema::Optional,
2429
+ true,
2430
+ 1,
2431
+ OpSchema::NonDifferentiable)
2432
+ .Output(0, "Y", "N-D tensor after resizing", "T1", OpSchema::Single, true, 1, OpSchema::Differentiable)
2433
+ .TypeConstraint(
2434
+ "T1",
2435
+ OpSchema::all_tensor_types_ir4(),
2436
+ "Constrain input 'X' and output 'Y' to all tensor types.")
2437
+ .TypeConstraint(
2438
+ "T2",
2439
+ {"tensor(float16)", "tensor(float)", "tensor(double)"},
2440
+ "Constrain roi type to float or double.")
2441
+ .SetDoc(Resize_ver18_doc)
2442
+ .TypeAndShapeInferenceFunction([](InferenceContext& ctx) { resizeShapeInference_opset13_to_18(ctx); }));
2443
+
2121
2444
  static const char* Resize_ver13_doc = R"DOC(
2122
2445
  Resize the input tensor. In general, it calculates every value in the output tensor as a weighted average of neighborhood (a.k.a. sampling locations) in the input tensor.
2123
2446
  Each dimension value of the output tensor is:
@@ -2218,7 +2541,7 @@ ONNX_OPERATOR_SET_SCHEMA(
2218
2541
  .Output(0, "Y", "N-D tensor after resizing", "T1", OpSchema::Single, true, 1, OpSchema::Differentiable)
2219
2542
  .TypeConstraint(
2220
2543
  "T1",
2221
- OpSchema::all_tensor_types_with_bfloat(),
2544
+ OpSchema::all_tensor_types_ir4(),
2222
2545
  "Constrain input 'X' and output 'Y' to all tensor types.")
2223
2546
  .TypeConstraint(
2224
2547
  "T2",
@@ -2332,10 +2655,7 @@ ONNX_OPERATOR_SET_SCHEMA(
2332
2655
  .SetDoc("Identity operator")
2333
2656
  .Input(0, "input", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
2334
2657
  .Output(0, "output", "Tensor to copy input into.", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
2335
- .TypeConstraint(
2336
- "T",
2337
- OpSchema::all_tensor_types_with_bfloat(),
2338
- "Constrain input and output types to all tensor types.")
2658
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to all tensor types.")
2339
2659
  .TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));
2340
2660
 
2341
2661
  ONNX_OPERATOR_SET_SCHEMA(
@@ -3633,11 +3953,9 @@ Example 2:
3633
3953
  ]
3634
3954
  axis = 1,
3635
3955
  output = [
3636
- [
3637
- [1.0, 1.9],
3638
- [2.3, 3.9],
3639
- [4.5, 5.9],
3640
- ],
3956
+ [[1.0, 1.9]],
3957
+ [[2.3, 3.9]],
3958
+ [[4.5, 5.9]],
3641
3959
  ]
3642
3960
  ```
3643
3961
  )DOC";
@@ -4252,7 +4570,7 @@ ONNX_OPERATOR_SET_SCHEMA(
4252
4570
  .TypeConstraint(
4253
4571
  "V",
4254
4572
  []() {
4255
- auto t = OpSchema::all_tensor_types_with_bfloat();
4573
+ auto t = OpSchema::all_tensor_types_ir4();
4256
4574
  auto s = OpSchema::all_tensor_sequence_types();
4257
4575
  t.insert(t.end(), s.begin(), s.end());
4258
4576
  return t;
@@ -4437,10 +4755,7 @@ ONNX_OPERATOR_SET_SCHEMA(
4437
4755
  1,
4438
4756
  OpSchema::NonDifferentiable)
4439
4757
  .Output(0, "output", "Tensor after padding.", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
4440
- .TypeConstraint(
4441
- "T",
4442
- OpSchema::all_tensor_types_with_bfloat(),
4443
- "Constrain input and output types to all tensor types.")
4758
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to all tensor types.")
4444
4759
  .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
4445
4760
  // Type inference
4446
4761
  propagateElemTypeFromInputToOutput(ctx, 0, 0);
@@ -4483,4 +4798,419 @@ ONNX_OPERATOR_SET_SCHEMA(
4483
4798
  return;
4484
4799
  }));
4485
4800
 
4801
+ static const char* Pad_ver18_doc = R"DOC(
4802
+ Given a tensor containing the data to be padded (`data`), a tensor containing the number of start and end pad values for axis (`pads`), (optionally) a `mode`, and (optionally) `constant_value`,
4803
+ a padded tensor (`output`) is generated.
4804
+
4805
+ The three supported `modes` are (similar to corresponding modes supported by `numpy.pad`):
4806
+
4807
+ 1) `constant`(default) - pads with a given constant value as specified by `constant_value` (which defaults to 0, empty string, or False)
4808
+
4809
+ 2) `reflect` - pads with the reflection of the vector mirrored on the first and last values of the vector along each axis
4810
+
4811
+ 3) `edge` - pads with the edge values of array
4812
+
4813
+
4814
+ Example 1 (`constant` mode):
4815
+
4816
+ Insert 0 pads to the beginning of the second dimension.
4817
+
4818
+ ```
4819
+ data = [
4820
+ [1.0, 1.2],
4821
+ [2.3, 3.4],
4822
+ [4.5, 5.7],
4823
+ ]
4824
+
4825
+ pads = [0, 2, 0, 0]
4826
+
4827
+ mode = 'constant'
4828
+
4829
+ constant_value = 0.0
4830
+
4831
+ output = [
4832
+ [0.0, 0.0, 1.0, 1.2],
4833
+ [0.0, 0.0, 2.3, 3.4],
4834
+ [0.0, 0.0, 4.5, 5.7],
4835
+ ]
4836
+ ```
4837
+
4838
+ Example 2 (`reflect` mode):
4839
+
4840
+ ```
4841
+ data = [
4842
+ [1.0, 1.2],
4843
+ [2.3, 3.4],
4844
+ [4.5, 5.7],
4845
+ ]
4846
+
4847
+ pads = [0, 2, 0, 0]
4848
+
4849
+ mode = 'reflect'
4850
+
4851
+ output = [
4852
+ [1.0, 1.2, 1.0, 1.2],
4853
+ [2.3, 3.4, 2.3, 3.4],
4854
+ [4.5, 5.7, 4.5, 5.7],
4855
+ ]
4856
+ ```
4857
+
4858
+ Example 3 (`edge` mode):
4859
+
4860
+ ```
4861
+ data = [
4862
+ [1.0, 1.2],
4863
+ [2.3, 3.4],
4864
+ [4.5, 5.7],
4865
+ ]
4866
+
4867
+ pads = [0, 2, 0, 0]
4868
+
4869
+ mode = 'edge'
4870
+
4871
+ output = [
4872
+ [1.0, 1.0, 1.0, 1.2],
4873
+ [2.3, 2.3, 2.3, 3.4],
4874
+ [4.5, 4.5, 4.5, 5.7],
4875
+ ]
4876
+ ```
4877
+ )DOC";
4878
+
4879
+ ONNX_OPERATOR_SET_SCHEMA(
4880
+ Pad,
4881
+ 18,
4882
+ OpSchema().FillUsing(PadDocGenerator(Pad_ver18_doc, "Supported modes: `constant`(default), `reflect`, `edge`")));
4883
+
4884
+ ONNX_OPERATOR_SET_SCHEMA(
4885
+ Identity,
4886
+ 16,
4887
+ OpSchema()
4888
+ .SetDoc("Identity operator")
4889
+ .Input(0, "input", "Input tensor", "V", OpSchema::Single, true, 1, OpSchema::Differentiable)
4890
+ .Output(0, "output", "Tensor to copy input into.", "V", OpSchema::Single, true, 1, OpSchema::Differentiable)
4891
+ .TypeConstraint(
4892
+ "V",
4893
+ []() {
4894
+ auto t = OpSchema::all_tensor_types_ir4();
4895
+ auto s = OpSchema::all_tensor_sequence_types();
4896
+ auto o = OpSchema::all_optional_types();
4897
+ t.insert(t.end(), s.begin(), s.end());
4898
+ t.insert(t.end(), o.begin(), o.end());
4899
+ return t;
4900
+ }(),
4901
+ "Constrain input and output types to all tensor, sequence, and optional types.")
4902
+ .TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));
4903
+
4904
+ static const char* Reshape_ver14_doc = R"DOC(
4905
+ Reshape the input tensor similar to numpy.reshape.
4906
+ First input is the data tensor, second input is a shape tensor which specifies the output shape. It outputs the reshaped tensor.
4907
+ At most one dimension of the new shape can be -1. In this case, the value is
4908
+ inferred from the size of the tensor and the remaining dimensions. A dimension
4909
+ could also be 0, in which case the actual dimension value is unchanged (i.e. taken
4910
+ from the input tensor). If 'allowzero' is set, and the new shape includes 0, the
4911
+ dimension will be set explicitly to zero (i.e. not taken from input tensor).
4912
+ Shape (second input) could be an empty shape, which means converting to a scalar.
4913
+ The input tensor's shape and the output tensor's shape are required to have the same number of elements.
4914
+
4915
+ If the attribute 'allowzero' is set, it is invalid for the specified shape to
4916
+ contain both a zero value and -1, as the value of the dimension corresponding
4917
+ to -1 cannot be determined uniquely.
4918
+ )DOC";
4919
+
4920
+ ONNX_OPERATOR_SET_SCHEMA(
4921
+ Reshape,
4922
+ 14,
4923
+ OpSchema()
4924
+ .SetDoc(Reshape_ver14_doc)
4925
+ .Attr(
4926
+ "allowzero",
4927
+ "(Optional) By default, when any value in the 'shape' input is equal to zero "
4928
+ "the corresponding dimension value is copied from the input tensor dynamically. "
4929
+ "allowzero=1 indicates that if any value in the 'shape' input is set to zero, "
4930
+ "the zero value is honored, similar to NumPy.",
4931
+ AttributeProto::INT,
4932
+ static_cast<int64_t>(0))
4933
+ .Input(0, "data", "An input tensor.", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
4934
+ .Input(
4935
+ 1,
4936
+ "shape",
4937
+ "Specified shape for output.",
4938
+ "tensor(int64)",
4939
+ OpSchema::Single,
4940
+ true,
4941
+ 1,
4942
+ OpSchema::NonDifferentiable)
4943
+ .Output(0, "reshaped", "Reshaped data.", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
4944
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to all tensor types.")
4945
+ .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
4946
+ // Type inference
4947
+ propagateElemTypeFromInputToOutput(ctx, 0, 0);
4948
+ // Shape Inference if 2nd input data (the target shape) is available
4949
+ // or the target shape is generated via partial data propagation
4950
+ const TensorProto* targetShapeInitializer = ctx.getInputData(1);
4951
+ const auto* shapeInput = ctx.getSymbolicInput(1);
4952
+ // The targetShapeProto represents the specified shape for output.
4953
+ TensorShapeProto targetShapeProto;
4954
+ if (targetShapeInitializer) {
4955
+ auto targetShape = ParseData<int64_t>(targetShapeInitializer);
4956
+ for (auto val : targetShape) {
4957
+ targetShapeProto.add_dim()->set_dim_value(val);
4958
+ }
4959
+ } else if (shapeInput) {
4960
+ targetShapeProto.CopyFrom(*shapeInput);
4961
+ } else {
4962
+ return;
4963
+ }
4964
+
4965
+ int allowzero = static_cast<int>(getAttribute(ctx, "allowzero", 0));
4966
+
4967
+ // Iterate through targetShape, adding dimensions in the outputShape
4968
+ // TensorProto. If the targetShape dimension is -1, we do not set the
4969
+ // dimension value in this iteration, but we record the Dimension. If
4970
+ // targetShape dimension is 0, we attempt to propagate the dimension
4971
+ // value/param. If the value cannot be inferred, we set the flag in
4972
+ // the unresolveZeros vector. If targetShape dimension is positive, we
4973
+ // set the dimension value in the outputShape. We track the product of
4974
+ // the dimensions we are setting outputShape in the outputProduct
4975
+ // variable. The outputProduct will potentially be used for inferring
4976
+ // a dimension marked -1.
4977
+ auto* outputShape = ctx.getOutputType(0)->mutable_tensor_type()->mutable_shape();
4978
+ TensorShapeProto::Dimension* negativeOneDim = nullptr;
4979
+ const auto& dataInputTensorType = ctx.getInputType(0)->tensor_type();
4980
+ std::vector<bool> unresolvedZeros(targetShapeProto.dim_size(), false);
4981
+ int64_t outputProduct = 1;
4982
+ bool outputProductValid = true;
4983
+ for (int i = 0; i < static_cast<int>(targetShapeProto.dim_size()); ++i) {
4984
+ // Add a new dimension to outputShape
4985
+ auto* new_dim = outputShape->add_dim();
4986
+ if (targetShapeProto.dim(i).has_dim_param()) {
4987
+ // There is a tricky edge case here. It is possible that the value of
4988
+ // symbolic dim can be -1 or 0 at runtime. In that case simply propgating this
4989
+ // symbol can be erroneous. This should be a very rare scenario and in such a
4990
+ // case an option is to turn off data propagation during shape inference.
4991
+ new_dim->set_dim_param(targetShapeProto.dim(i).dim_param());
4992
+ outputProductValid = false;
4993
+ } else {
4994
+ if (!targetShapeProto.dim(i).has_dim_value()) {
4995
+ outputProductValid = false;
4996
+ // treat this dim as unknown dim
4997
+ continue;
4998
+ }
4999
+
5000
+ const auto dim_value = targetShapeProto.dim(i).dim_value();
5001
+
5002
+ if (dim_value == -1) {
5003
+ // Check if multiple -1's. If not, set negativeOneDim, marking
5004
+ // this dimension to potentially be filled in later.
5005
+ if (negativeOneDim) {
5006
+ fail_shape_inference("Target shape may not have multiple -1 dimensions.");
5007
+ }
5008
+ negativeOneDim = new_dim;
5009
+ } else if (dim_value == 0) {
5010
+ // Check if data input has a shape and if the index i is within
5011
+ // its bounds. If these conditions are satisfied, any dimension
5012
+ // value/param should be propogated. If dimension value cannot be
5013
+ // inferred, set the corresponding unresolvedZeros flag to true.
5014
+ // If allowzero is set however, do not propagate values, since output
5015
+ // dimension is explicitly zero.
5016
+ if (allowzero == 0) {
5017
+ unresolvedZeros[i] = true;
5018
+ if (dataInputTensorType.has_shape()) {
5019
+ if (i >= dataInputTensorType.shape().dim_size()) {
5020
+ fail_shape_inference("Invalid position of 0.");
5021
+ }
5022
+ if (dataInputTensorType.shape().dim(i).has_dim_value()) {
5023
+ const auto& input_dim_value = dataInputTensorType.shape().dim(i).dim_value();
5024
+ new_dim->set_dim_value(input_dim_value);
5025
+ outputProduct *= input_dim_value;
5026
+ unresolvedZeros[i] = false;
5027
+ } else if (dataInputTensorType.shape().dim(i).has_dim_param()) {
5028
+ new_dim->set_dim_param(dataInputTensorType.shape().dim(i).dim_param());
5029
+ }
5030
+ }
5031
+ } else {
5032
+ new_dim->set_dim_value(dim_value);
5033
+ outputProduct *= dim_value;
5034
+ }
5035
+ } else if (dim_value > 0) {
5036
+ // Set the dimension value to dim_value
5037
+ new_dim->set_dim_value(dim_value);
5038
+ outputProduct *= dim_value;
5039
+ } else {
5040
+ // Check if value is less than -1; fail if so
5041
+ fail_shape_inference("Invalid dimension value: ", dim_value);
5042
+ }
5043
+ }
5044
+ }
5045
+ // If negativeOneDim has been set, we attempt to infer its value. This
5046
+ // can be done if all dimension values for the data input tensor shape
5047
+ // are known other than the ones corresponding to unresolvedZeros
5048
+ // flags.
5049
+ if (negativeOneDim && outputProductValid) {
5050
+ // First, attempt to compute product of data input shape dimensions
5051
+ // that are not marked by unresolvedZeros. If not possible, set the
5052
+ // inputProductValid flag to false.
5053
+ if (!outputProduct) {
5054
+ fail_shape_inference("Invalid Target shape product of 0. Product cannot be 0 in combination with -1");
5055
+ }
5056
+ int64_t inputProduct = 1;
5057
+ bool inputProductValid = true;
5058
+ if (!dataInputTensorType.has_shape()) {
5059
+ inputProductValid = false;
5060
+ } else {
5061
+ for (int i = 0; i < dataInputTensorType.shape().dim_size(); ++i) {
5062
+ if (dataInputTensorType.shape().dim(i).has_dim_value()) {
5063
+ inputProduct *= dataInputTensorType.shape().dim(i).dim_value();
5064
+ } else if (i >= static_cast<int>(unresolvedZeros.size()) || !unresolvedZeros[i]) {
5065
+ inputProductValid = false;
5066
+ break;
5067
+ }
5068
+ }
5069
+ }
5070
+ if (inputProductValid) {
5071
+ if (inputProduct % outputProduct != 0) {
5072
+ fail_shape_inference("Dimension could not be inferred: incompatible shapes");
5073
+ }
5074
+ negativeOneDim->set_dim_value(inputProduct / outputProduct);
5075
+ }
5076
+ }
5077
+ }));
5078
+
5079
+ static const char* Shape_ver15_doc = R"DOC(
5080
+ Takes a tensor as input and outputs an 1D int64 tensor containing the shape of the input tensor.
5081
+ Optional attributes start and end can be used to compute a slice of the input tensor's shape.
5082
+ If start axis is omitted, the slice starts from axis 0.
5083
+ The end axis, if specified, is exclusive (and the returned value will not include the size of that axis).
5084
+ If the end axis is omitted, the axes upto the last one will be included.
5085
+ Negative axes indicate counting back from the last axis.
5086
+ Note that axes will be clamped to the range [0, r-1], where r is the
5087
+ rank of the input tensor if they are out-of-range (after adding r in the case of
5088
+ negative axis). Thus, specifying any end value > r is equivalent to specifying an end
5089
+ value of r, and specifying any start value < -r is equivalent to specifying a start
5090
+ value of 0.
5091
+
5092
+ Examples:
5093
+
5094
+ ```
5095
+ Input tensor with shape: [2, 3, 4]
5096
+ No attributes specified.
5097
+ Output: [2, 3, 4]
5098
+ ```
5099
+
5100
+ ```
5101
+ Input tensor with shape: [2, 3, 4]
5102
+ start: -1
5103
+ Output: [4]
5104
+ ```
5105
+
5106
+ ```
5107
+ Input tensor with shape: [2, 3, 4]
5108
+ end: -1
5109
+ Output: [2, 3]
5110
+ ```
5111
+
5112
+ ```
5113
+ Input tensor with shape: [2, 3, 4]
5114
+ start: 1
5115
+ end: 2
5116
+ Output: [3]
5117
+ ```
5118
+ )DOC";
5119
+
5120
+ ONNX_OPERATOR_SET_SCHEMA(
5121
+ Shape,
5122
+ 15,
5123
+ OpSchema()
5124
+ .SetDoc(Shape_ver15_doc)
5125
+ .Input(0, "data", "An input tensor.", "T", OpSchema::Single, true, 1, OpSchema::NonDifferentiable)
5126
+ .Output(0, "shape", "Shape of the input tensor", "T1", OpSchema::Single, true, 1, OpSchema::NonDifferentiable)
5127
+ .Attr(
5128
+ "start",
5129
+ "(Optional) Starting axis for slicing the shape. Default value is 0."
5130
+ "Negative value means counting dimensions from the back.",
5131
+ AttributeProto::INT,
5132
+ static_cast<int64_t>(0))
5133
+ .Attr(
5134
+ "end",
5135
+ "(Optional) Ending axis for slicing the shape. "
5136
+ "Negative value means counting dimensions from the back. "
5137
+ "If omitted, sizes of all axes upto (including) the last one will be included.",
5138
+ AttributeProto::INT,
5139
+ OPTIONAL_VALUE)
5140
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Input tensor can be of arbitrary type.")
5141
+ .TypeConstraint("T1", {"tensor(int64)"}, "Constrain output to int64 tensor.")
5142
+ .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
5143
+ ctx.getOutputType(0)->mutable_tensor_type()->set_elem_type(TensorProto::INT64);
5144
+ auto* output_shape = ctx.getOutputType(0)->mutable_tensor_type()->mutable_shape();
5145
+ auto* output_length = output_shape->add_dim();
5146
+
5147
+ if (!hasNInputShapes(ctx, 1)) {
5148
+ return;
5149
+ }
5150
+
5151
+ int64_t rank = static_cast<int64_t>(ctx.getInputType(0)->tensor_type().shape().dim_size());
5152
+ int64_t start = getAttribute(ctx, "start", 0);
5153
+ if (start < 0)
5154
+ start += rank;
5155
+ start = (start < 0) ? 0 : (start > rank) ? rank : start;
5156
+ int64_t end = getAttribute(ctx, "end", rank);
5157
+ if (end < 0)
5158
+ end += rank;
5159
+ end = (end < 0) ? 0 : (end > rank) ? rank : end;
5160
+ output_length->set_dim_value((end - start) < 0 ? 0 : (end - start));
5161
+ })
5162
+ .PartialDataPropagationFunction([](DataPropagationContext& ctx) {
5163
+ if (hasInputShape(ctx, 0)) {
5164
+ auto& input_shape = ctx.getInputType(0)->tensor_type().shape();
5165
+ int64_t rank = static_cast<int64_t>(input_shape.dim_size());
5166
+ int64_t start = getAttribute(ctx, "start", 0);
5167
+ if (start < 0)
5168
+ start += rank;
5169
+ start = (start < 0) ? 0 : (start > rank) ? rank : start;
5170
+ int64_t end = getAttribute(ctx, "end", rank);
5171
+ if (end < 0)
5172
+ end += rank;
5173
+ end = (end < 0) ? 0 : (end > rank) ? rank : end;
5174
+ TensorShapeProto output_shape;
5175
+ for (int64_t d = start; d < end; ++d) {
5176
+ *output_shape.add_dim() = input_shape.dim(static_cast<int>(d));
5177
+ }
5178
+ ctx.addOutputData(0, std::move(output_shape));
5179
+ }
5180
+ }));
5181
+
5182
+ static const char* Size_ver13_doc = R"DOC(
5183
+ Takes a tensor as input and outputs a int64 scalar that equals to the total number of elements of the input tensor.
5184
+ )DOC";
5185
+
5186
+ ONNX_OPERATOR_SET_SCHEMA(
5187
+ Size,
5188
+ 13,
5189
+ OpSchema()
5190
+ .SetDoc(Size_ver13_doc)
5191
+ .Input(0, "data", "An input tensor.", "T", OpSchema::Single, true, 1, OpSchema::NonDifferentiable)
5192
+ .Output(
5193
+ 0,
5194
+ "size",
5195
+ "Total number of elements of the input tensor",
5196
+ "T1",
5197
+ OpSchema::Single,
5198
+ true,
5199
+ 1,
5200
+ OpSchema::NonDifferentiable)
5201
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Input tensor can be of arbitrary type.")
5202
+ .TypeConstraint("T1", {"tensor(int64)"}, "Constrain output to int64 tensor, which should be a scalar though.")
5203
+ .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
5204
+ ctx.getOutputType(0)->mutable_tensor_type()->set_elem_type(TensorProto::INT64);
5205
+ ctx.getOutputType(0)->mutable_tensor_type()->mutable_shape();
5206
+ })
5207
+ .PartialDataPropagationFunction([](DataPropagationContext& ctx) {
5208
+ const auto input_data = ctx.getInputData(0);
5209
+ if (input_data != nullptr) {
5210
+ TensorShapeProto tsp;
5211
+ tsp.mutable_dim()->Add()->set_dim_value(input_data->dim_size());
5212
+ ctx.addOutputData(0, std::move(tsp));
5213
+ }
5214
+ }));
5215
+
4486
5216
  } // namespace ONNX_NAMESPACE