onnx 1.13.1__cp310-cp310-macosx_10_12_universal2.whl → 1.14.0__cp310-cp310-macosx_10_12_universal2.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of onnx might be problematic. Click here for more details.

Files changed (1057) hide show
  1. onnx/__init__.py +116 -70
  2. onnx/backend/__init__.py +2 -0
  3. onnx/backend/base.py +3 -0
  4. onnx/backend/sample/__init__.py +2 -0
  5. onnx/backend/sample/ops/__init__.py +8 -6
  6. onnx/backend/sample/ops/abs.py +1 -1
  7. onnx/backend/test/__init__.py +4 -1
  8. onnx/backend/test/case/__init__.py +4 -2
  9. onnx/backend/test/case/base.py +2 -0
  10. onnx/backend/test/case/model/__init__.py +8 -6
  11. onnx/backend/test/case/model/expand.py +4 -3
  12. onnx/backend/test/case/model/gradient.py +4 -3
  13. onnx/backend/test/case/model/sequence.py +4 -3
  14. onnx/backend/test/case/model/shrink.py +4 -3
  15. onnx/backend/test/case/model/sign.py +4 -3
  16. onnx/backend/test/case/model/single-relu.py +4 -3
  17. onnx/backend/test/case/model/stringnormalizer.py +4 -3
  18. onnx/backend/test/case/node/__init__.py +18 -12
  19. onnx/backend/test/case/node/abs.py +4 -3
  20. onnx/backend/test/case/node/acos.py +4 -3
  21. onnx/backend/test/case/node/acosh.py +4 -3
  22. onnx/backend/test/case/node/adagrad.py +4 -3
  23. onnx/backend/test/case/node/adam.py +4 -3
  24. onnx/backend/test/case/node/add.py +4 -3
  25. onnx/backend/test/case/node/ai_onnx_ml/__init__.py +0 -0
  26. onnx/backend/test/case/node/ai_onnx_ml/array_feature_extractor.py +30 -0
  27. onnx/backend/test/case/node/ai_onnx_ml/binarizer.py +27 -0
  28. onnx/backend/test/case/node/and.py +4 -3
  29. onnx/backend/test/case/node/argmax.py +4 -3
  30. onnx/backend/test/case/node/argmin.py +4 -3
  31. onnx/backend/test/case/node/asin.py +4 -3
  32. onnx/backend/test/case/node/asinh.py +4 -3
  33. onnx/backend/test/case/node/atan.py +4 -3
  34. onnx/backend/test/case/node/atanh.py +4 -3
  35. onnx/backend/test/case/node/averagepool.py +43 -4
  36. onnx/backend/test/case/node/batchnorm.py +4 -3
  37. onnx/backend/test/case/node/bernoulli.py +4 -3
  38. onnx/backend/test/case/node/bitshift.py +4 -3
  39. onnx/backend/test/case/node/bitwiseand.py +13 -11
  40. onnx/backend/test/case/node/bitwisenot.py +8 -6
  41. onnx/backend/test/case/node/bitwiseor.py +13 -11
  42. onnx/backend/test/case/node/bitwisexor.py +13 -11
  43. onnx/backend/test/case/node/blackmanwindow.py +4 -4
  44. onnx/backend/test/case/node/cast.py +218 -8
  45. onnx/backend/test/case/node/castlike.py +103 -9
  46. onnx/backend/test/case/node/ceil.py +4 -3
  47. onnx/backend/test/case/node/celu.py +4 -3
  48. onnx/backend/test/case/node/center_crop_pad.py +26 -3
  49. onnx/backend/test/case/node/clip.py +4 -3
  50. onnx/backend/test/case/node/col2im.py +5 -4
  51. onnx/backend/test/case/node/compress.py +4 -3
  52. onnx/backend/test/case/node/concat.py +4 -3
  53. onnx/backend/test/case/node/constant.py +4 -3
  54. onnx/backend/test/case/node/constantofshape.py +4 -3
  55. onnx/backend/test/case/node/conv.py +4 -3
  56. onnx/backend/test/case/node/convinteger.py +4 -3
  57. onnx/backend/test/case/node/convtranspose.py +4 -3
  58. onnx/backend/test/case/node/cos.py +4 -3
  59. onnx/backend/test/case/node/cosh.py +4 -3
  60. onnx/backend/test/case/node/cumsum.py +4 -3
  61. onnx/backend/test/case/node/deformconv.py +170 -0
  62. onnx/backend/test/case/node/depthtospace.py +4 -3
  63. onnx/backend/test/case/node/dequantizelinear.py +46 -3
  64. onnx/backend/test/case/node/det.py +4 -3
  65. onnx/backend/test/case/node/dft.py +4 -4
  66. onnx/backend/test/case/node/div.py +4 -3
  67. onnx/backend/test/case/node/dropout.py +4 -3
  68. onnx/backend/test/case/node/dynamicquantizelinear.py +4 -3
  69. onnx/backend/test/case/node/einsum.py +4 -4
  70. onnx/backend/test/case/node/elu.py +4 -3
  71. onnx/backend/test/case/node/equal.py +28 -3
  72. onnx/backend/test/case/node/erf.py +4 -3
  73. onnx/backend/test/case/node/exp.py +4 -3
  74. onnx/backend/test/case/node/expand.py +4 -3
  75. onnx/backend/test/case/node/eyelike.py +4 -3
  76. onnx/backend/test/case/node/flatten.py +4 -3
  77. onnx/backend/test/case/node/floor.py +4 -3
  78. onnx/backend/test/case/node/gather.py +4 -3
  79. onnx/backend/test/case/node/gatherelements.py +4 -3
  80. onnx/backend/test/case/node/gathernd.py +5 -4
  81. onnx/backend/test/case/node/gemm.py +4 -3
  82. onnx/backend/test/case/node/globalaveragepool.py +4 -3
  83. onnx/backend/test/case/node/globalmaxpool.py +4 -3
  84. onnx/backend/test/case/node/greater.py +4 -3
  85. onnx/backend/test/case/node/greater_equal.py +4 -3
  86. onnx/backend/test/case/node/gridsample.py +4 -3
  87. onnx/backend/test/case/node/groupnormalization.py +5 -4
  88. onnx/backend/test/case/node/gru.py +10 -9
  89. onnx/backend/test/case/node/hammingwindow.py +4 -4
  90. onnx/backend/test/case/node/hannwindow.py +4 -4
  91. onnx/backend/test/case/node/hardmax.py +4 -3
  92. onnx/backend/test/case/node/hardsigmoid.py +4 -3
  93. onnx/backend/test/case/node/hardswish.py +4 -3
  94. onnx/backend/test/case/node/identity.py +4 -3
  95. onnx/backend/test/case/node/if.py +4 -3
  96. onnx/backend/test/case/node/instancenorm.py +4 -3
  97. onnx/backend/test/case/node/isinf.py +4 -3
  98. onnx/backend/test/case/node/isnan.py +4 -3
  99. onnx/backend/test/case/node/layernormalization.py +4 -3
  100. onnx/backend/test/case/node/leakyrelu.py +4 -3
  101. onnx/backend/test/case/node/less.py +4 -3
  102. onnx/backend/test/case/node/less_equal.py +4 -3
  103. onnx/backend/test/case/node/log.py +4 -3
  104. onnx/backend/test/case/node/logsoftmax.py +4 -3
  105. onnx/backend/test/case/node/loop.py +4 -3
  106. onnx/backend/test/case/node/lppool.py +279 -0
  107. onnx/backend/test/case/node/lrn.py +4 -3
  108. onnx/backend/test/case/node/lstm.py +10 -9
  109. onnx/backend/test/case/node/matmul.py +4 -3
  110. onnx/backend/test/case/node/matmulinteger.py +4 -3
  111. onnx/backend/test/case/node/max.py +5 -4
  112. onnx/backend/test/case/node/maxpool.py +9 -4
  113. onnx/backend/test/case/node/maxunpool.py +4 -3
  114. onnx/backend/test/case/node/mean.py +4 -3
  115. onnx/backend/test/case/node/meanvariancenormalization.py +4 -3
  116. onnx/backend/test/case/node/melweightmatrix.py +4 -4
  117. onnx/backend/test/case/node/min.py +5 -4
  118. onnx/backend/test/case/node/mish.py +4 -3
  119. onnx/backend/test/case/node/mod.py +4 -3
  120. onnx/backend/test/case/node/momentum.py +4 -3
  121. onnx/backend/test/case/node/mul.py +4 -3
  122. onnx/backend/test/case/node/neg.py +4 -3
  123. onnx/backend/test/case/node/negativeloglikelihoodloss.py +4 -3
  124. onnx/backend/test/case/node/nonmaxsuppression.py +4 -3
  125. onnx/backend/test/case/node/nonzero.py +4 -3
  126. onnx/backend/test/case/node/not.py +4 -3
  127. onnx/backend/test/case/node/onehot.py +5 -4
  128. onnx/backend/test/case/node/optionalgetelement.py +4 -3
  129. onnx/backend/test/case/node/optionalhaselement.py +4 -3
  130. onnx/backend/test/case/node/or.py +4 -3
  131. onnx/backend/test/case/node/pad.py +36 -5
  132. onnx/backend/test/case/node/pool_op_common.py +20 -2
  133. onnx/backend/test/case/node/pow.py +4 -3
  134. onnx/backend/test/case/node/prelu.py +4 -3
  135. onnx/backend/test/case/node/qlinearconv.py +4 -3
  136. onnx/backend/test/case/node/qlinearmatmul.py +4 -3
  137. onnx/backend/test/case/node/quantizelinear.py +50 -3
  138. onnx/backend/test/case/node/rangeop.py +4 -3
  139. onnx/backend/test/case/node/reciprocal.py +4 -3
  140. onnx/backend/test/case/node/reduce_log_sum.py +4 -3
  141. onnx/backend/test/case/node/reduce_log_sum_exp.py +4 -3
  142. onnx/backend/test/case/node/reducel1.py +4 -3
  143. onnx/backend/test/case/node/reducel2.py +4 -3
  144. onnx/backend/test/case/node/reducemax.py +4 -3
  145. onnx/backend/test/case/node/reducemean.py +4 -3
  146. onnx/backend/test/case/node/reducemin.py +4 -3
  147. onnx/backend/test/case/node/reduceprod.py +4 -3
  148. onnx/backend/test/case/node/reducesum.py +4 -3
  149. onnx/backend/test/case/node/reducesumsquare.py +4 -3
  150. onnx/backend/test/case/node/relu.py +4 -3
  151. onnx/backend/test/case/node/reshape.py +4 -3
  152. onnx/backend/test/case/node/resize.py +73 -321
  153. onnx/backend/test/case/node/reversesequence.py +4 -3
  154. onnx/backend/test/case/node/rnn.py +10 -9
  155. onnx/backend/test/case/node/roialign.py +193 -3
  156. onnx/backend/test/case/node/round.py +4 -3
  157. onnx/backend/test/case/node/scan.py +4 -3
  158. onnx/backend/test/case/node/scatter.py +4 -3
  159. onnx/backend/test/case/node/scatterelements.py +4 -3
  160. onnx/backend/test/case/node/scatternd.py +4 -3
  161. onnx/backend/test/case/node/selu.py +4 -3
  162. onnx/backend/test/case/node/sequence_map.py +4 -4
  163. onnx/backend/test/case/node/sequenceinsert.py +4 -3
  164. onnx/backend/test/case/node/shape.py +4 -3
  165. onnx/backend/test/case/node/shrink.py +4 -3
  166. onnx/backend/test/case/node/sigmoid.py +4 -3
  167. onnx/backend/test/case/node/sign.py +4 -3
  168. onnx/backend/test/case/node/sin.py +4 -3
  169. onnx/backend/test/case/node/sinh.py +4 -3
  170. onnx/backend/test/case/node/size.py +4 -3
  171. onnx/backend/test/case/node/slice.py +4 -3
  172. onnx/backend/test/case/node/softmax.py +4 -3
  173. onnx/backend/test/case/node/softmaxcrossentropy.py +4 -3
  174. onnx/backend/test/case/node/softplus.py +4 -3
  175. onnx/backend/test/case/node/softsign.py +4 -3
  176. onnx/backend/test/case/node/spacetodepth.py +6 -3
  177. onnx/backend/test/case/node/split.py +4 -3
  178. onnx/backend/test/case/node/splittosequence.py +79 -0
  179. onnx/backend/test/case/node/sqrt.py +4 -3
  180. onnx/backend/test/case/node/squeeze.py +2 -0
  181. onnx/backend/test/case/node/stft.py +4 -4
  182. onnx/backend/test/case/node/stringnormalizer.py +4 -4
  183. onnx/backend/test/case/node/sub.py +4 -3
  184. onnx/backend/test/case/node/sum.py +4 -3
  185. onnx/backend/test/case/node/tan.py +4 -3
  186. onnx/backend/test/case/node/tanh.py +4 -3
  187. onnx/backend/test/case/node/tfidfvectorizer.py +4 -3
  188. onnx/backend/test/case/node/thresholdedrelu.py +4 -3
  189. onnx/backend/test/case/node/tile.py +4 -3
  190. onnx/backend/test/case/node/topk.py +4 -3
  191. onnx/backend/test/case/node/transpose.py +8 -7
  192. onnx/backend/test/case/node/trilu.py +4 -3
  193. onnx/backend/test/case/node/unique.py +4 -3
  194. onnx/backend/test/case/node/unsqueeze.py +4 -3
  195. onnx/backend/test/case/node/upsample.py +4 -3
  196. onnx/backend/test/case/node/where.py +4 -3
  197. onnx/backend/test/case/node/xor.py +4 -3
  198. onnx/backend/test/case/test_case.py +2 -0
  199. onnx/backend/test/case/utils.py +9 -0
  200. onnx/backend/test/cmd_tools.py +22 -13
  201. onnx/backend/test/data/light/README.md +16 -0
  202. onnx/backend/test/data/light/light_bvlc_alexnet.onnx +0 -0
  203. onnx/backend/test/data/light/light_bvlc_alexnet_output_0.pb +1 -0
  204. onnx/backend/test/data/light/light_densenet121.onnx +0 -0
  205. onnx/backend/test/data/light/light_densenet121_output_0.pb +1 -0
  206. onnx/backend/test/data/light/light_inception_v1.onnx +0 -0
  207. onnx/backend/test/data/light/light_inception_v1_output_0.pb +1 -0
  208. onnx/backend/test/data/light/light_inception_v2.onnx +0 -0
  209. onnx/backend/test/data/light/light_inception_v2_output_0.pb +1 -0
  210. onnx/backend/test/data/light/light_resnet50.onnx +0 -0
  211. onnx/backend/test/data/light/light_resnet50_output_0.pb +1 -0
  212. onnx/backend/test/data/light/light_shufflenet.onnx +0 -0
  213. onnx/backend/test/data/light/light_shufflenet_output_0.pb +1 -0
  214. onnx/backend/test/data/light/light_squeezenet.onnx +0 -0
  215. onnx/backend/test/data/light/light_squeezenet_output_0.pb +1 -0
  216. onnx/backend/test/data/light/light_vgg19.onnx +0 -0
  217. onnx/backend/test/data/light/light_vgg19_output_0.pb +1 -0
  218. onnx/backend/test/data/light/light_zfnet512.onnx +0 -0
  219. onnx/backend/test/data/light/light_zfnet512_output_0.pb +1 -0
  220. onnx/backend/test/data/node/test_acos/test_data_set_0/output_0.pb +0 -0
  221. onnx/backend/test/data/node/test_acosh/test_data_set_0/output_0.pb +1 -1
  222. onnx/backend/test/data/node/test_ai_onnx_ml_array_feature_extractor/model.onnx +19 -0
  223. onnx/backend/test/data/node/test_ai_onnx_ml_array_feature_extractor/test_data_set_0/input_0.pb +0 -0
  224. onnx/backend/test/data/node/test_ai_onnx_ml_array_feature_extractor/test_data_set_0/input_1.pb +0 -0
  225. onnx/backend/test/data/node/test_ai_onnx_ml_array_feature_extractor/test_data_set_0/output_0.pb +0 -0
  226. onnx/backend/test/data/node/test_ai_onnx_ml_binarizer/model.onnx +0 -0
  227. onnx/backend/test/data/node/test_ai_onnx_ml_binarizer/test_data_set_0/input_0.pb +1 -0
  228. onnx/backend/test/data/node/test_ai_onnx_ml_binarizer/test_data_set_0/output_0.pb +0 -0
  229. onnx/backend/test/data/node/test_asin/test_data_set_0/output_0.pb +1 -1
  230. onnx/backend/test/data/node/test_asinh/test_data_set_0/output_0.pb +1 -1
  231. onnx/backend/test/data/node/test_atan/test_data_set_0/output_0.pb +1 -1
  232. onnx/backend/test/data/node/test_atanh/test_data_set_0/output_0.pb +2 -2
  233. onnx/backend/test/data/node/test_averagepool_1d_default/model.onnx +0 -0
  234. onnx/backend/test/data/node/test_averagepool_2d_ceil/model.onnx +0 -0
  235. onnx/backend/test/data/node/test_averagepool_2d_default/model.onnx +0 -0
  236. onnx/backend/test/data/node/test_averagepool_2d_dilations/model.onnx +0 -0
  237. onnx/backend/test/data/node/test_averagepool_2d_dilations/test_data_set_0/input_0.pb +0 -0
  238. onnx/backend/test/data/node/test_averagepool_2d_dilations/test_data_set_0/output_0.pb +0 -0
  239. onnx/backend/test/data/node/test_averagepool_2d_pads/model.onnx +0 -0
  240. onnx/backend/test/data/node/test_averagepool_2d_pads_count_include_pad/model.onnx +0 -0
  241. onnx/backend/test/data/node/test_averagepool_2d_precomputed_pads/model.onnx +0 -0
  242. onnx/backend/test/data/node/test_averagepool_2d_precomputed_pads_count_include_pad/model.onnx +0 -0
  243. onnx/backend/test/data/node/test_averagepool_2d_precomputed_same_upper/model.onnx +0 -0
  244. onnx/backend/test/data/node/test_averagepool_2d_precomputed_strides/model.onnx +0 -0
  245. onnx/backend/test/data/node/test_averagepool_2d_same_lower/model.onnx +0 -0
  246. onnx/backend/test/data/node/test_averagepool_2d_same_upper/model.onnx +0 -0
  247. onnx/backend/test/data/node/test_averagepool_2d_strides/model.onnx +0 -0
  248. onnx/backend/test/data/node/test_averagepool_3d_default/model.onnx +0 -0
  249. onnx/backend/test/data/node/test_basic_deform_conv_with_padding/model.onnx +0 -0
  250. onnx/backend/test/data/node/test_basic_deform_conv_with_padding/test_data_set_0/input_0.pb +0 -0
  251. onnx/backend/test/data/node/test_basic_deform_conv_with_padding/test_data_set_0/input_1.pb +0 -0
  252. onnx/backend/test/data/node/test_basic_deform_conv_with_padding/test_data_set_0/input_2.pb +0 -0
  253. onnx/backend/test/data/node/test_basic_deform_conv_with_padding/test_data_set_0/output_0.pb +0 -0
  254. onnx/backend/test/data/node/test_basic_deform_conv_without_padding/model.onnx +0 -0
  255. onnx/backend/test/data/node/test_basic_deform_conv_without_padding/test_data_set_0/input_0.pb +0 -0
  256. onnx/backend/test/data/node/test_basic_deform_conv_without_padding/test_data_set_0/input_1.pb +0 -0
  257. onnx/backend/test/data/node/test_basic_deform_conv_without_padding/test_data_set_0/input_2.pb +0 -0
  258. onnx/backend/test/data/node/test_basic_deform_conv_without_padding/test_data_set_0/output_0.pb +0 -0
  259. onnx/backend/test/data/node/test_bitwise_and_i16_3d/test_data_set_0/input_0.pb +0 -0
  260. onnx/backend/test/data/node/test_bitwise_and_i16_3d/test_data_set_0/input_1.pb +0 -0
  261. onnx/backend/test/data/node/test_bitwise_and_i16_3d/test_data_set_0/output_0.pb +0 -0
  262. onnx/backend/test/data/node/test_bitwise_and_i32_2d/test_data_set_0/input_0.pb +0 -0
  263. onnx/backend/test/data/node/test_bitwise_and_i32_2d/test_data_set_0/input_1.pb +0 -0
  264. onnx/backend/test/data/node/test_bitwise_and_i32_2d/test_data_set_0/output_0.pb +0 -0
  265. onnx/backend/test/data/node/test_bitwise_and_ui64_bcast_3v1d/test_data_set_0/input_0.pb +0 -0
  266. onnx/backend/test/data/node/test_bitwise_and_ui64_bcast_3v1d/test_data_set_0/input_1.pb +0 -0
  267. onnx/backend/test/data/node/test_bitwise_and_ui64_bcast_3v1d/test_data_set_0/output_0.pb +0 -0
  268. onnx/backend/test/data/node/test_bitwise_and_ui8_bcast_4v3d/test_data_set_0/input_0.pb +0 -0
  269. onnx/backend/test/data/node/test_bitwise_and_ui8_bcast_4v3d/test_data_set_0/input_1.pb +0 -0
  270. onnx/backend/test/data/node/test_bitwise_and_ui8_bcast_4v3d/test_data_set_0/output_0.pb +0 -0
  271. onnx/backend/test/data/node/test_bitwise_not_2d/test_data_set_0/input_0.pb +0 -0
  272. onnx/backend/test/data/node/test_bitwise_not_2d/test_data_set_0/output_0.pb +0 -0
  273. onnx/backend/test/data/node/test_bitwise_not_3d/test_data_set_0/input_0.pb +0 -0
  274. onnx/backend/test/data/node/test_bitwise_not_3d/test_data_set_0/output_0.pb +0 -0
  275. onnx/backend/test/data/node/test_bitwise_not_4d/test_data_set_0/input_0.pb +0 -0
  276. onnx/backend/test/data/node/test_bitwise_not_4d/test_data_set_0/output_0.pb +0 -0
  277. onnx/backend/test/data/node/test_bitwise_or_i16_4d/test_data_set_0/input_0.pb +0 -0
  278. onnx/backend/test/data/node/test_bitwise_or_i16_4d/test_data_set_0/input_1.pb +0 -0
  279. onnx/backend/test/data/node/test_bitwise_or_i16_4d/test_data_set_0/output_0.pb +0 -0
  280. onnx/backend/test/data/node/test_bitwise_or_i32_2d/test_data_set_0/input_0.pb +0 -0
  281. onnx/backend/test/data/node/test_bitwise_or_i32_2d/test_data_set_0/input_1.pb +0 -0
  282. onnx/backend/test/data/node/test_bitwise_or_i32_2d/test_data_set_0/output_0.pb +0 -0
  283. onnx/backend/test/data/node/test_bitwise_or_ui64_bcast_3v1d/test_data_set_0/input_0.pb +0 -0
  284. onnx/backend/test/data/node/test_bitwise_or_ui64_bcast_3v1d/test_data_set_0/input_1.pb +0 -0
  285. onnx/backend/test/data/node/test_bitwise_or_ui64_bcast_3v1d/test_data_set_0/output_0.pb +0 -0
  286. onnx/backend/test/data/node/test_bitwise_or_ui8_bcast_4v3d/test_data_set_0/input_0.pb +0 -0
  287. onnx/backend/test/data/node/test_bitwise_or_ui8_bcast_4v3d/test_data_set_0/input_1.pb +0 -0
  288. onnx/backend/test/data/node/test_bitwise_or_ui8_bcast_4v3d/test_data_set_0/output_0.pb +0 -0
  289. onnx/backend/test/data/node/test_bitwise_xor_i16_3d/test_data_set_0/input_0.pb +0 -0
  290. onnx/backend/test/data/node/test_bitwise_xor_i16_3d/test_data_set_0/input_1.pb +0 -0
  291. onnx/backend/test/data/node/test_bitwise_xor_i16_3d/test_data_set_0/output_0.pb +0 -0
  292. onnx/backend/test/data/node/test_bitwise_xor_i32_2d/test_data_set_0/input_0.pb +0 -0
  293. onnx/backend/test/data/node/test_bitwise_xor_i32_2d/test_data_set_0/input_1.pb +0 -0
  294. onnx/backend/test/data/node/test_bitwise_xor_i32_2d/test_data_set_0/output_0.pb +0 -0
  295. onnx/backend/test/data/node/test_bitwise_xor_ui64_bcast_3v1d/test_data_set_0/input_0.pb +0 -0
  296. onnx/backend/test/data/node/test_bitwise_xor_ui64_bcast_3v1d/test_data_set_0/input_1.pb +0 -0
  297. onnx/backend/test/data/node/test_bitwise_xor_ui64_bcast_3v1d/test_data_set_0/output_0.pb +0 -0
  298. onnx/backend/test/data/node/test_bitwise_xor_ui8_bcast_4v3d/test_data_set_0/input_0.pb +0 -0
  299. onnx/backend/test/data/node/test_bitwise_xor_ui8_bcast_4v3d/test_data_set_0/input_1.pb +0 -0
  300. onnx/backend/test/data/node/test_bitwise_xor_ui8_bcast_4v3d/test_data_set_0/output_0.pb +0 -0
  301. onnx/backend/test/data/node/test_cast_BFLOAT16_to_FLOAT/model.onnx +0 -0
  302. onnx/backend/test/data/node/test_cast_DOUBLE_to_FLOAT/model.onnx +0 -0
  303. onnx/backend/test/data/node/test_cast_DOUBLE_to_FLOAT16/model.onnx +0 -0
  304. onnx/backend/test/data/node/test_cast_FLOAT16_to_DOUBLE/model.onnx +0 -0
  305. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT/model.onnx +0 -0
  306. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FN/model.onnx +0 -0
  307. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FN/test_data_set_0/input_0.pb +2 -0
  308. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FN/test_data_set_0/output_0.pb +0 -0
  309. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FNUZ/model.onnx +0 -0
  310. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FNUZ/test_data_set_0/input_0.pb +2 -0
  311. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FNUZ/test_data_set_0/output_0.pb +0 -0
  312. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2/model.onnx +0 -0
  313. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2/test_data_set_0/input_0.pb +2 -0
  314. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2/test_data_set_0/output_0.pb +0 -0
  315. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2FNUZ/model.onnx +0 -0
  316. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2FNUZ/test_data_set_0/input_0.pb +2 -0
  317. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2FNUZ/test_data_set_0/output_0.pb +0 -0
  318. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT/model.onnx +0 -0
  319. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT/test_data_set_0/input_0.pb +0 -0
  320. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT/test_data_set_0/output_0.pb +0 -0
  321. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT16/model.onnx +0 -0
  322. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT16/test_data_set_0/input_0.pb +0 -0
  323. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT16/test_data_set_0/output_0.pb +0 -0
  324. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT/model.onnx +0 -0
  325. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT/test_data_set_0/input_0.pb +0 -0
  326. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT/test_data_set_0/output_0.pb +0 -0
  327. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT16/model.onnx +0 -0
  328. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT16/test_data_set_0/input_0.pb +0 -0
  329. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT16/test_data_set_0/output_0.pb +0 -0
  330. onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT/model.onnx +0 -0
  331. onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT/test_data_set_0/input_0.pb +0 -0
  332. onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT/test_data_set_0/output_0.pb +0 -0
  333. onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT16/model.onnx +0 -0
  334. onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT16/test_data_set_0/input_0.pb +0 -0
  335. onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT16/test_data_set_0/output_0.pb +0 -0
  336. onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT/model.onnx +0 -0
  337. onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT/test_data_set_0/input_0.pb +0 -0
  338. onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT/test_data_set_0/output_0.pb +0 -0
  339. onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT16/model.onnx +0 -0
  340. onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT16/test_data_set_0/input_0.pb +0 -0
  341. onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT16/test_data_set_0/output_0.pb +0 -0
  342. onnx/backend/test/data/node/test_cast_FLOAT_to_BFLOAT16/model.onnx +0 -0
  343. onnx/backend/test/data/node/test_cast_FLOAT_to_DOUBLE/model.onnx +0 -0
  344. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT16/model.onnx +0 -0
  345. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FN/model.onnx +0 -0
  346. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/input_0.pb +0 -0
  347. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/output_0.pb +0 -0
  348. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FNUZ/model.onnx +0 -0
  349. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/input_0.pb +0 -0
  350. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/output_0.pb +0 -0
  351. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2/model.onnx +0 -0
  352. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2/test_data_set_0/input_0.pb +0 -0
  353. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2/test_data_set_0/output_0.pb +0 -0
  354. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2FNUZ/model.onnx +0 -0
  355. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/input_0.pb +0 -0
  356. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/output_0.pb +0 -0
  357. onnx/backend/test/data/node/test_cast_FLOAT_to_STRING/model.onnx +0 -0
  358. onnx/backend/test/data/node/test_cast_STRING_to_FLOAT/model.onnx +0 -0
  359. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FN/model.onnx +0 -0
  360. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FN/test_data_set_0/input_0.pb +2 -0
  361. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FN/test_data_set_0/output_0.pb +0 -0
  362. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FNUZ/model.onnx +0 -0
  363. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FNUZ/test_data_set_0/input_0.pb +2 -0
  364. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FNUZ/test_data_set_0/output_0.pb +0 -0
  365. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2/model.onnx +0 -0
  366. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2/test_data_set_0/input_0.pb +2 -0
  367. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2/test_data_set_0/output_0.pb +0 -0
  368. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2FNUZ/model.onnx +0 -0
  369. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2FNUZ/test_data_set_0/input_0.pb +2 -0
  370. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2FNUZ/test_data_set_0/output_0.pb +0 -0
  371. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FN/model.onnx +0 -0
  372. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/input_0.pb +0 -0
  373. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/output_0.pb +0 -0
  374. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FNUZ/model.onnx +0 -0
  375. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/input_0.pb +0 -0
  376. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/output_0.pb +0 -0
  377. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2/model.onnx +0 -0
  378. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2/test_data_set_0/input_0.pb +0 -0
  379. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2/test_data_set_0/output_0.pb +0 -0
  380. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2FNUZ/model.onnx +0 -0
  381. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/input_0.pb +0 -0
  382. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/output_0.pb +0 -0
  383. onnx/backend/test/data/node/test_castlike_BFLOAT16_to_FLOAT/model.onnx +0 -0
  384. onnx/backend/test/data/node/test_castlike_BFLOAT16_to_FLOAT_expanded/model.onnx +0 -0
  385. onnx/backend/test/data/node/test_castlike_DOUBLE_to_FLOAT/model.onnx +0 -0
  386. onnx/backend/test/data/node/test_castlike_DOUBLE_to_FLOAT16/model.onnx +0 -0
  387. onnx/backend/test/data/node/test_castlike_DOUBLE_to_FLOAT16_expanded/model.onnx +0 -0
  388. onnx/backend/test/data/node/test_castlike_DOUBLE_to_FLOAT_expanded/model.onnx +0 -0
  389. onnx/backend/test/data/node/test_castlike_FLOAT16_to_DOUBLE/model.onnx +0 -0
  390. onnx/backend/test/data/node/test_castlike_FLOAT16_to_DOUBLE_expanded/model.onnx +0 -0
  391. onnx/backend/test/data/node/test_castlike_FLOAT16_to_FLOAT/model.onnx +0 -0
  392. onnx/backend/test/data/node/test_castlike_FLOAT16_to_FLOAT_expanded/model.onnx +0 -0
  393. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT/model.onnx +0 -0
  394. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT/test_data_set_0/input_0.pb +1 -0
  395. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT/test_data_set_0/input_1.pb +0 -0
  396. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT/test_data_set_0/output_0.pb +0 -0
  397. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT_expanded/model.onnx +0 -0
  398. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT_expanded/test_data_set_0/input_0.pb +1 -0
  399. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT_expanded/test_data_set_0/input_1.pb +0 -0
  400. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT_expanded/test_data_set_0/output_0.pb +0 -0
  401. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT/model.onnx +0 -0
  402. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT/test_data_set_0/input_0.pb +1 -0
  403. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT/test_data_set_0/input_1.pb +0 -0
  404. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT/test_data_set_0/output_0.pb +0 -0
  405. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT_expanded/model.onnx +0 -0
  406. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT_expanded/test_data_set_0/input_0.pb +1 -0
  407. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT_expanded/test_data_set_0/input_1.pb +0 -0
  408. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT_expanded/test_data_set_0/output_0.pb +0 -0
  409. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT/model.onnx +0 -0
  410. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT/test_data_set_0/input_0.pb +1 -0
  411. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT/test_data_set_0/input_1.pb +0 -0
  412. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT/test_data_set_0/output_0.pb +0 -0
  413. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT_expanded/model.onnx +0 -0
  414. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT_expanded/test_data_set_0/input_0.pb +1 -0
  415. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT_expanded/test_data_set_0/input_1.pb +0 -0
  416. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT_expanded/test_data_set_0/output_0.pb +0 -0
  417. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT/model.onnx +0 -0
  418. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT/test_data_set_0/input_0.pb +1 -0
  419. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT/test_data_set_0/input_1.pb +0 -0
  420. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT/test_data_set_0/output_0.pb +0 -0
  421. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT_expanded/model.onnx +0 -0
  422. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT_expanded/test_data_set_0/input_0.pb +1 -0
  423. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT_expanded/test_data_set_0/input_1.pb +0 -0
  424. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT_expanded/test_data_set_0/output_0.pb +0 -0
  425. onnx/backend/test/data/node/test_castlike_FLOAT_to_BFLOAT16/model.onnx +0 -0
  426. onnx/backend/test/data/node/test_castlike_FLOAT_to_BFLOAT16_expanded/model.onnx +0 -0
  427. onnx/backend/test/data/node/test_castlike_FLOAT_to_DOUBLE/model.onnx +0 -0
  428. onnx/backend/test/data/node/test_castlike_FLOAT_to_DOUBLE_expanded/model.onnx +0 -0
  429. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT16/model.onnx +0 -0
  430. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT16_expanded/model.onnx +0 -0
  431. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN/model.onnx +0 -0
  432. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/input_0.pb +0 -0
  433. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/input_1.pb +1 -0
  434. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/output_0.pb +1 -0
  435. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ/model.onnx +0 -0
  436. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/input_0.pb +0 -0
  437. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/input_1.pb +1 -0
  438. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/output_0.pb +1 -0
  439. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ_expanded/model.onnx +0 -0
  440. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ_expanded/test_data_set_0/input_0.pb +0 -0
  441. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ_expanded/test_data_set_0/input_1.pb +1 -0
  442. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ_expanded/test_data_set_0/output_0.pb +1 -0
  443. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN_expanded/model.onnx +0 -0
  444. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN_expanded/test_data_set_0/input_0.pb +0 -0
  445. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN_expanded/test_data_set_0/input_1.pb +1 -0
  446. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN_expanded/test_data_set_0/output_0.pb +1 -0
  447. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2/model.onnx +0 -0
  448. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2/test_data_set_0/input_0.pb +0 -0
  449. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2/test_data_set_0/input_1.pb +1 -0
  450. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2/test_data_set_0/output_0.pb +1 -0
  451. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ/model.onnx +0 -0
  452. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/input_0.pb +0 -0
  453. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/input_1.pb +1 -0
  454. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/output_0.pb +1 -0
  455. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ_expanded/model.onnx +0 -0
  456. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ_expanded/test_data_set_0/input_0.pb +0 -0
  457. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ_expanded/test_data_set_0/input_1.pb +1 -0
  458. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ_expanded/test_data_set_0/output_0.pb +1 -0
  459. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2_expanded/model.onnx +0 -0
  460. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2_expanded/test_data_set_0/input_0.pb +0 -0
  461. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2_expanded/test_data_set_0/input_1.pb +1 -0
  462. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2_expanded/test_data_set_0/output_0.pb +1 -0
  463. onnx/backend/test/data/node/test_castlike_FLOAT_to_STRING/model.onnx +0 -0
  464. onnx/backend/test/data/node/test_castlike_FLOAT_to_STRING_expanded/model.onnx +0 -0
  465. onnx/backend/test/data/node/test_castlike_STRING_to_FLOAT/model.onnx +0 -0
  466. onnx/backend/test/data/node/test_castlike_STRING_to_FLOAT_expanded/model.onnx +0 -0
  467. onnx/backend/test/data/node/test_center_crop_pad_crop_negative_axes_hwc/model.onnx +0 -0
  468. onnx/backend/test/data/node/test_center_crop_pad_crop_negative_axes_hwc/test_data_set_0/input_0.pb +0 -0
  469. onnx/backend/test/data/node/test_center_crop_pad_crop_negative_axes_hwc/test_data_set_0/input_1.pb +0 -0
  470. onnx/backend/test/data/node/test_center_crop_pad_crop_negative_axes_hwc/test_data_set_0/output_0.pb +0 -0
  471. onnx/backend/test/data/node/test_center_crop_pad_crop_negative_axes_hwc_expanded/model.onnx +0 -0
  472. onnx/backend/test/data/node/test_center_crop_pad_crop_negative_axes_hwc_expanded/test_data_set_0/input_0.pb +0 -0
  473. onnx/backend/test/data/node/test_center_crop_pad_crop_negative_axes_hwc_expanded/test_data_set_0/input_1.pb +0 -0
  474. onnx/backend/test/data/node/test_center_crop_pad_crop_negative_axes_hwc_expanded/test_data_set_0/output_0.pb +0 -0
  475. onnx/backend/test/data/node/test_col2im_pads/test_data_set_0/output_0.pb +0 -0
  476. onnx/backend/test/data/node/test_constant/model.onnx +0 -0
  477. onnx/backend/test/data/node/test_constant_pad/model.onnx +0 -0
  478. onnx/backend/test/data/node/test_constant_pad_axes/model.onnx +0 -0
  479. onnx/backend/test/data/node/test_constant_pad_negative_axes/model.onnx +0 -0
  480. onnx/backend/test/data/node/test_constant_pad_negative_axes/test_data_set_0/input_0.pb +1 -0
  481. onnx/backend/test/data/node/test_constant_pad_negative_axes/test_data_set_0/input_1.pb +0 -0
  482. onnx/backend/test/data/node/test_constant_pad_negative_axes/test_data_set_0/input_2.pb +1 -0
  483. onnx/backend/test/data/node/test_constant_pad_negative_axes/test_data_set_0/input_3.pb +1 -0
  484. onnx/backend/test/data/node/test_constant_pad_negative_axes/test_data_set_0/output_0.pb +1 -0
  485. onnx/backend/test/data/node/test_cosh/test_data_set_0/output_0.pb +1 -1
  486. onnx/backend/test/data/node/test_cosh_example/test_data_set_0/output_0.pb +0 -0
  487. onnx/backend/test/data/node/test_deform_conv_with_mask_bias/model.onnx +0 -0
  488. onnx/backend/test/data/node/test_deform_conv_with_mask_bias/test_data_set_0/input_0.pb +0 -0
  489. onnx/backend/test/data/node/test_deform_conv_with_mask_bias/test_data_set_0/input_1.pb +0 -0
  490. onnx/backend/test/data/node/test_deform_conv_with_mask_bias/test_data_set_0/input_2.pb +0 -0
  491. onnx/backend/test/data/node/test_deform_conv_with_mask_bias/test_data_set_0/input_3.pb +0 -0
  492. onnx/backend/test/data/node/test_deform_conv_with_mask_bias/test_data_set_0/input_4.pb +0 -0
  493. onnx/backend/test/data/node/test_deform_conv_with_mask_bias/test_data_set_0/output_0.pb +0 -0
  494. onnx/backend/test/data/node/test_deform_conv_with_multiple_offset_groups/model.onnx +0 -0
  495. onnx/backend/test/data/node/test_deform_conv_with_multiple_offset_groups/test_data_set_0/input_0.pb +0 -0
  496. onnx/backend/test/data/node/test_deform_conv_with_multiple_offset_groups/test_data_set_0/input_1.pb +0 -0
  497. onnx/backend/test/data/node/test_deform_conv_with_multiple_offset_groups/test_data_set_0/input_2.pb +0 -0
  498. onnx/backend/test/data/node/test_deform_conv_with_multiple_offset_groups/test_data_set_0/output_0.pb +0 -0
  499. onnx/backend/test/data/node/test_dequantizelinear/model.onnx +0 -0
  500. onnx/backend/test/data/node/test_dequantizelinear_axis/model.onnx +0 -0
  501. onnx/backend/test/data/node/test_dequantizelinear_e4m3fn/model.onnx +0 -0
  502. onnx/backend/test/data/node/test_dequantizelinear_e4m3fn/test_data_set_0/input_0.pb +0 -0
  503. onnx/backend/test/data/node/test_dequantizelinear_e4m3fn/test_data_set_0/input_1.pb +0 -0
  504. onnx/backend/test/data/node/test_dequantizelinear_e4m3fn/test_data_set_0/output_0.pb +0 -0
  505. onnx/backend/test/data/node/test_dequantizelinear_e5m2/model.onnx +0 -0
  506. onnx/backend/test/data/node/test_dequantizelinear_e5m2/test_data_set_0/input_0.pb +0 -0
  507. onnx/backend/test/data/node/test_dequantizelinear_e5m2/test_data_set_0/input_1.pb +0 -0
  508. onnx/backend/test/data/node/test_dequantizelinear_e5m2/test_data_set_0/output_0.pb +0 -0
  509. onnx/backend/test/data/node/test_edge_pad/model.onnx +0 -0
  510. onnx/backend/test/data/node/test_equal/model.onnx +0 -0
  511. onnx/backend/test/data/node/test_equal_bcast/model.onnx +0 -0
  512. onnx/backend/test/data/node/test_equal_string/model.onnx +0 -0
  513. onnx/backend/test/data/node/test_equal_string/test_data_set_0/input_0.pb +1 -0
  514. onnx/backend/test/data/node/test_equal_string/test_data_set_0/input_1.pb +1 -0
  515. onnx/backend/test/data/node/test_equal_string/test_data_set_0/output_0.pb +0 -0
  516. onnx/backend/test/data/node/test_equal_string_broadcast/model.onnx +0 -0
  517. onnx/backend/test/data/node/test_equal_string_broadcast/test_data_set_0/input_0.pb +1 -0
  518. onnx/backend/test/data/node/test_equal_string_broadcast/test_data_set_0/input_1.pb +1 -0
  519. onnx/backend/test/data/node/test_equal_string_broadcast/test_data_set_0/output_0.pb +0 -0
  520. onnx/backend/test/data/node/test_identity/model.onnx +0 -0
  521. onnx/backend/test/data/node/test_identity_sequence/model.onnx +0 -0
  522. onnx/backend/test/data/node/test_lppool_1d_default/model.onnx +0 -0
  523. onnx/backend/test/data/node/test_lppool_1d_default/test_data_set_0/input_0.pb +1 -0
  524. onnx/backend/test/data/node/test_lppool_1d_default/test_data_set_0/output_0.pb +2 -0
  525. onnx/backend/test/data/node/test_lppool_2d_default/model.onnx +0 -0
  526. onnx/backend/test/data/node/test_lppool_2d_default/test_data_set_0/input_0.pb +0 -0
  527. onnx/backend/test/data/node/test_lppool_2d_default/test_data_set_0/output_0.pb +0 -0
  528. onnx/backend/test/data/node/test_lppool_2d_dilations/model.onnx +0 -0
  529. onnx/backend/test/data/node/test_lppool_2d_dilations/test_data_set_0/input_0.pb +0 -0
  530. onnx/backend/test/data/node/test_lppool_2d_dilations/test_data_set_0/output_0.pb +1 -0
  531. onnx/backend/test/data/node/test_lppool_2d_pads/model.onnx +0 -0
  532. onnx/backend/test/data/node/test_lppool_2d_pads/test_data_set_0/input_0.pb +0 -0
  533. onnx/backend/test/data/node/test_lppool_2d_pads/test_data_set_0/output_0.pb +0 -0
  534. onnx/backend/test/data/node/test_lppool_2d_same_lower/model.onnx +0 -0
  535. onnx/backend/test/data/node/test_lppool_2d_same_lower/test_data_set_0/input_0.pb +0 -0
  536. onnx/backend/test/data/node/test_lppool_2d_same_lower/test_data_set_0/output_0.pb +0 -0
  537. onnx/backend/test/data/node/test_lppool_2d_same_upper/model.onnx +0 -0
  538. onnx/backend/test/data/node/test_lppool_2d_same_upper/test_data_set_0/input_0.pb +0 -0
  539. onnx/backend/test/data/node/test_lppool_2d_same_upper/test_data_set_0/output_0.pb +0 -0
  540. onnx/backend/test/data/node/test_lppool_2d_strides/model.onnx +0 -0
  541. onnx/backend/test/data/node/test_lppool_2d_strides/test_data_set_0/input_0.pb +0 -0
  542. onnx/backend/test/data/node/test_lppool_2d_strides/test_data_set_0/output_0.pb +0 -0
  543. onnx/backend/test/data/node/test_lppool_3d_default/model.onnx +0 -0
  544. onnx/backend/test/data/node/test_lppool_3d_default/test_data_set_0/input_0.pb +0 -0
  545. onnx/backend/test/data/node/test_lppool_3d_default/test_data_set_0/output_0.pb +0 -0
  546. onnx/backend/test/data/node/test_mish/test_data_set_0/output_0.pb +0 -0
  547. onnx/backend/test/data/node/test_mish_expanded/test_data_set_0/output_0.pb +0 -0
  548. onnx/backend/test/data/node/test_quantizelinear/model.onnx +0 -0
  549. onnx/backend/test/data/node/test_quantizelinear_axis/model.onnx +0 -0
  550. onnx/backend/test/data/node/test_quantizelinear_e4m3fn/model.onnx +0 -0
  551. onnx/backend/test/data/node/test_quantizelinear_e4m3fn/test_data_set_0/input_0.pb +0 -0
  552. onnx/backend/test/data/node/test_quantizelinear_e4m3fn/test_data_set_0/input_1.pb +0 -0
  553. onnx/backend/test/data/node/test_quantizelinear_e4m3fn/test_data_set_0/input_2.pb +0 -0
  554. onnx/backend/test/data/node/test_quantizelinear_e4m3fn/test_data_set_0/output_0.pb +0 -0
  555. onnx/backend/test/data/node/test_quantizelinear_e5m2/model.onnx +0 -0
  556. onnx/backend/test/data/node/test_quantizelinear_e5m2/test_data_set_0/input_0.pb +0 -0
  557. onnx/backend/test/data/node/test_quantizelinear_e5m2/test_data_set_0/input_1.pb +0 -0
  558. onnx/backend/test/data/node/test_quantizelinear_e5m2/test_data_set_0/input_2.pb +0 -0
  559. onnx/backend/test/data/node/test_quantizelinear_e5m2/test_data_set_0/output_0.pb +0 -0
  560. onnx/backend/test/data/node/test_reflect_pad/model.onnx +0 -0
  561. onnx/backend/test/data/node/test_reshape_allowzero_reordered/model.onnx +0 -0
  562. onnx/backend/test/data/node/test_reshape_extended_dims/model.onnx +0 -0
  563. onnx/backend/test/data/node/test_reshape_negative_dim/model.onnx +0 -0
  564. onnx/backend/test/data/node/test_reshape_negative_extended_dims/model.onnx +0 -0
  565. onnx/backend/test/data/node/test_reshape_one_dim/model.onnx +0 -0
  566. onnx/backend/test/data/node/test_reshape_reduced_dims/model.onnx +0 -0
  567. onnx/backend/test/data/node/test_reshape_reordered_all_dims/model.onnx +0 -0
  568. onnx/backend/test/data/node/test_reshape_reordered_last_dims/model.onnx +0 -0
  569. onnx/backend/test/data/node/test_reshape_zero_and_negative_dim/model.onnx +0 -0
  570. onnx/backend/test/data/node/test_reshape_zero_dim/model.onnx +0 -0
  571. onnx/backend/test/data/node/test_resize_downsample_scales_cubic/model.onnx +0 -0
  572. onnx/backend/test/data/node/test_resize_downsample_scales_cubic_A_n0p5_exclude_outside/model.onnx +0 -0
  573. onnx/backend/test/data/node/test_resize_downsample_scales_cubic_align_corners/model.onnx +0 -0
  574. onnx/backend/test/data/node/test_resize_downsample_scales_cubic_antialias/model.onnx +0 -0
  575. onnx/backend/test/data/node/test_resize_downsample_scales_linear/model.onnx +0 -0
  576. onnx/backend/test/data/node/test_resize_downsample_scales_linear_align_corners/model.onnx +0 -0
  577. onnx/backend/test/data/node/test_resize_downsample_scales_linear_antialias/model.onnx +0 -0
  578. onnx/backend/test/data/node/test_resize_downsample_scales_linear_half_pixel_symmetric/model.onnx +0 -0
  579. onnx/backend/test/data/node/test_resize_downsample_scales_linear_half_pixel_symmetric/test_data_set_0/input_0.pb +0 -0
  580. onnx/backend/test/data/node/test_resize_downsample_scales_linear_half_pixel_symmetric/test_data_set_0/input_1.pb +0 -0
  581. onnx/backend/test/data/node/test_resize_downsample_scales_linear_half_pixel_symmetric/test_data_set_0/output_0.pb +1 -0
  582. onnx/backend/test/data/node/test_resize_downsample_scales_nearest/model.onnx +0 -0
  583. onnx/backend/test/data/node/test_resize_downsample_sizes_cubic/model.onnx +0 -0
  584. onnx/backend/test/data/node/test_resize_downsample_sizes_cubic_antialias/model.onnx +0 -0
  585. onnx/backend/test/data/node/test_resize_downsample_sizes_linear_antialias/model.onnx +0 -0
  586. onnx/backend/test/data/node/test_resize_downsample_sizes_linear_pytorch_half_pixel/model.onnx +0 -0
  587. onnx/backend/test/data/node/test_resize_downsample_sizes_nearest/model.onnx +0 -0
  588. onnx/backend/test/data/node/test_resize_downsample_sizes_nearest_not_larger/model.onnx +0 -0
  589. onnx/backend/test/data/node/test_resize_downsample_sizes_nearest_not_smaller/model.onnx +0 -0
  590. onnx/backend/test/data/node/test_resize_tf_crop_and_resize/model.onnx +0 -0
  591. onnx/backend/test/data/node/test_resize_tf_crop_and_resize_axes_2_3/model.onnx +0 -0
  592. onnx/backend/test/data/node/test_resize_tf_crop_and_resize_axes_3_2/model.onnx +0 -0
  593. onnx/backend/test/data/node/test_resize_upsample_scales_cubic/model.onnx +0 -0
  594. onnx/backend/test/data/node/test_resize_upsample_scales_cubic_A_n0p5_exclude_outside/model.onnx +0 -0
  595. onnx/backend/test/data/node/test_resize_upsample_scales_cubic_align_corners/model.onnx +0 -0
  596. onnx/backend/test/data/node/test_resize_upsample_scales_cubic_asymmetric/model.onnx +0 -0
  597. onnx/backend/test/data/node/test_resize_upsample_scales_linear/model.onnx +0 -0
  598. onnx/backend/test/data/node/test_resize_upsample_scales_linear_align_corners/model.onnx +0 -0
  599. onnx/backend/test/data/node/test_resize_upsample_scales_linear_half_pixel_symmetric/model.onnx +0 -0
  600. onnx/backend/test/data/node/test_resize_upsample_scales_linear_half_pixel_symmetric/test_data_set_0/input_0.pb +0 -0
  601. onnx/backend/test/data/node/test_resize_upsample_scales_linear_half_pixel_symmetric/test_data_set_0/input_1.pb +0 -0
  602. onnx/backend/test/data/node/test_resize_upsample_scales_linear_half_pixel_symmetric/test_data_set_0/output_0.pb +0 -0
  603. onnx/backend/test/data/node/test_resize_upsample_scales_nearest/model.onnx +0 -0
  604. onnx/backend/test/data/node/test_resize_upsample_scales_nearest_axes_2_3/model.onnx +0 -0
  605. onnx/backend/test/data/node/test_resize_upsample_scales_nearest_axes_3_2/model.onnx +0 -0
  606. onnx/backend/test/data/node/test_resize_upsample_sizes_cubic/model.onnx +0 -0
  607. onnx/backend/test/data/node/test_resize_upsample_sizes_nearest/model.onnx +0 -0
  608. onnx/backend/test/data/node/test_resize_upsample_sizes_nearest_axes_2_3/model.onnx +0 -0
  609. onnx/backend/test/data/node/test_resize_upsample_sizes_nearest_axes_3_2/model.onnx +0 -0
  610. onnx/backend/test/data/node/test_resize_upsample_sizes_nearest_ceil_half_pixel/model.onnx +0 -0
  611. onnx/backend/test/data/node/test_resize_upsample_sizes_nearest_floor_align_corners/model.onnx +0 -0
  612. onnx/backend/test/data/node/test_resize_upsample_sizes_nearest_not_larger/model.onnx +0 -0
  613. onnx/backend/test/data/node/test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric/model.onnx +0 -0
  614. onnx/backend/test/data/node/test_roialign_mode_max/model.onnx +0 -0
  615. onnx/backend/test/data/node/test_roialign_mode_max/test_data_set_0/input_0.pb +0 -0
  616. onnx/backend/test/data/node/test_roialign_mode_max/test_data_set_0/input_1.pb +0 -0
  617. onnx/backend/test/data/node/test_roialign_mode_max/test_data_set_0/input_2.pb +0 -0
  618. onnx/backend/test/data/node/test_roialign_mode_max/test_data_set_0/output_0.pb +2 -0
  619. onnx/backend/test/data/node/test_shape/model.onnx +0 -0
  620. onnx/backend/test/data/node/test_shape_clip_end/model.onnx +0 -0
  621. onnx/backend/test/data/node/test_shape_clip_start/model.onnx +0 -0
  622. onnx/backend/test/data/node/test_shape_end_1/model.onnx +0 -0
  623. onnx/backend/test/data/node/test_shape_end_negative_1/model.onnx +0 -0
  624. onnx/backend/test/data/node/test_shape_example/model.onnx +0 -0
  625. onnx/backend/test/data/node/test_shape_start_1/model.onnx +0 -0
  626. onnx/backend/test/data/node/test_shape_start_1_end_2/model.onnx +0 -0
  627. onnx/backend/test/data/node/test_shape_start_1_end_negative_1/model.onnx +0 -0
  628. onnx/backend/test/data/node/test_shape_start_negative_1/model.onnx +0 -0
  629. onnx/backend/test/data/node/test_sinh/test_data_set_0/output_0.pb +1 -1
  630. onnx/backend/test/data/node/test_size/model.onnx +0 -0
  631. onnx/backend/test/data/node/test_size_example/model.onnx +0 -0
  632. onnx/backend/test/data/node/test_softplus_example_expanded_ver18/model.onnx +0 -0
  633. onnx/backend/test/data/node/{test_softplus_expanded → test_softplus_expanded_ver18}/model.onnx +0 -0
  634. onnx/backend/test/data/node/test_split_to_sequence_1/model.onnx +0 -0
  635. onnx/backend/test/data/node/test_split_to_sequence_1/test_data_set_0/input_0.pb +0 -0
  636. onnx/backend/test/data/node/test_split_to_sequence_1/test_data_set_0/input_1.pb +0 -0
  637. onnx/backend/test/data/node/test_split_to_sequence_1/test_data_set_0/output_0.pb +0 -0
  638. onnx/backend/test/data/node/test_split_to_sequence_2/model.onnx +0 -0
  639. onnx/backend/test/data/node/test_split_to_sequence_2/test_data_set_0/input_0.pb +0 -0
  640. onnx/backend/test/data/node/test_split_to_sequence_2/test_data_set_0/input_1.pb +0 -0
  641. onnx/backend/test/data/node/test_split_to_sequence_2/test_data_set_0/output_0.pb +0 -0
  642. onnx/backend/test/data/node/test_split_to_sequence_nokeepdims/model.onnx +0 -0
  643. onnx/backend/test/data/node/test_split_to_sequence_nokeepdims/test_data_set_0/input_0.pb +0 -0
  644. onnx/backend/test/data/node/test_split_to_sequence_nokeepdims/test_data_set_0/output_0.pb +0 -0
  645. onnx/backend/test/data/node/test_tan/test_data_set_0/output_0.pb +1 -1
  646. onnx/backend/test/data/node/test_wrap_pad/model.onnx +0 -0
  647. onnx/backend/test/data/node/test_wrap_pad/test_data_set_0/input_0.pb +0 -0
  648. onnx/backend/test/data/node/test_wrap_pad/test_data_set_0/input_1.pb +0 -0
  649. onnx/backend/test/data/node/test_wrap_pad/test_data_set_0/output_0.pb +0 -0
  650. onnx/backend/test/data/real/test_bvlc_alexnet/data.json +1 -1
  651. onnx/backend/test/data/real/test_densenet121/data.json +1 -1
  652. onnx/backend/test/data/real/test_inception_v1/data.json +1 -1
  653. onnx/backend/test/data/real/test_inception_v2/data.json +1 -1
  654. onnx/backend/test/data/real/test_resnet50/data.json +1 -1
  655. onnx/backend/test/data/real/test_shufflenet/data.json +1 -1
  656. onnx/backend/test/data/real/test_squeezenet/data.json +1 -1
  657. onnx/backend/test/data/real/test_vgg19/data.json +1 -1
  658. onnx/backend/test/data/real/test_zfnet512/data.json +1 -1
  659. onnx/backend/test/loader/__init__.py +3 -1
  660. onnx/backend/test/report/__init__.py +3 -1
  661. onnx/backend/test/report/base.py +2 -0
  662. onnx/backend/test/report/coverage.py +8 -14
  663. onnx/backend/test/runner/__init__.py +146 -39
  664. onnx/backend/test/runner/item.py +2 -0
  665. onnx/backend/test/stat_coverage.py +23 -26
  666. onnx/bin/__init__.py +2 -0
  667. onnx/bin/checker.py +2 -0
  668. onnx/checker.cc +17 -5
  669. onnx/checker.h +3 -3
  670. onnx/checker.py +22 -5
  671. onnx/common/array_ref.h +2 -0
  672. onnx/common/assertions.cc +2 -0
  673. onnx/common/assertions.h +2 -0
  674. onnx/common/common.h +2 -0
  675. onnx/common/constants.h +3 -3
  676. onnx/common/file_utils.h +3 -1
  677. onnx/common/graph_node_list.h +2 -0
  678. onnx/common/interned_strings.cc +2 -0
  679. onnx/common/interned_strings.h +2 -0
  680. onnx/common/ir.h +2 -0
  681. onnx/common/ir_pb_converter.cc +7 -1
  682. onnx/common/ir_pb_converter.h +2 -0
  683. onnx/common/model_helpers.cc +3 -3
  684. onnx/common/model_helpers.h +3 -3
  685. onnx/common/path.cc +0 -1
  686. onnx/common/path.h +0 -1
  687. onnx/common/platform_helpers.h +2 -0
  688. onnx/common/status.cc +2 -0
  689. onnx/common/status.h +2 -0
  690. onnx/common/stl_backports.h +3 -3
  691. onnx/common/tensor.h +24 -171
  692. onnx/common/version.h +3 -1
  693. onnx/compose.py +40 -32
  694. onnx/cpp2py_export.cc +268 -89
  695. onnx/defs/__init__.py +9 -7
  696. onnx/defs/attr_proto_util.cc +2 -0
  697. onnx/defs/attr_proto_util.h +2 -0
  698. onnx/defs/controlflow/defs.cc +25 -369
  699. onnx/defs/controlflow/old.cc +444 -0
  700. onnx/defs/controlflow/utils.cc +357 -0
  701. onnx/defs/controlflow/utils.h +21 -0
  702. onnx/defs/data_propagators.h +2 -0
  703. onnx/defs/data_type_utils.cc +6 -2
  704. onnx/defs/gen_doc.py +32 -46
  705. onnx/defs/gen_shape_inference_information.py +2 -0
  706. onnx/defs/generator/defs.cc +21 -19
  707. onnx/defs/generator/old.cc +157 -0
  708. onnx/defs/logical/defs.cc +17 -16
  709. onnx/defs/logical/old.cc +23 -0
  710. onnx/defs/math/defs.cc +155 -131
  711. onnx/defs/math/old.cc +1 -1
  712. onnx/defs/nn/defs.cc +135 -45
  713. onnx/defs/nn/old.cc +142 -9
  714. onnx/defs/operator_sets.h +45 -0
  715. onnx/defs/optional/defs.cc +8 -4
  716. onnx/defs/parser.cc +50 -3
  717. onnx/defs/parser.h +43 -31
  718. onnx/defs/printer.cc +7 -1
  719. onnx/defs/printer.h +1 -1
  720. onnx/defs/quantization/defs.cc +63 -26
  721. onnx/defs/quantization/old.cc +102 -1
  722. onnx/defs/reduction/defs.cc +1 -1
  723. onnx/defs/reduction/utils.cc +5 -4
  724. onnx/defs/rnn/defs.cc +95 -173
  725. onnx/defs/schema.cc +45 -29
  726. onnx/defs/schema.h +125 -15
  727. onnx/defs/sequence/defs.cc +11 -8
  728. onnx/defs/shape_inference.cc +25 -4
  729. onnx/defs/shape_inference.h +29 -1
  730. onnx/defs/tensor/defs.cc +499 -565
  731. onnx/defs/tensor/old.cc +777 -47
  732. onnx/defs/tensor/utils.cc +130 -8
  733. onnx/defs/tensor/utils.h +2 -0
  734. onnx/defs/tensor_proto_util.cc +3 -0
  735. onnx/defs/traditionalml/defs.cc +19 -2
  736. onnx/examples/Protobufs.ipynb +129 -31
  737. onnx/examples/check_model.ipynb +29 -21
  738. onnx/examples/load_model.ipynb +25 -3
  739. onnx/examples/make_model.ipynb +32 -23
  740. onnx/external_data_helper.py +6 -6
  741. onnx/frontend/__init__.py +2 -0
  742. onnx/gen_proto.py +18 -24
  743. onnx/helper.py +392 -108
  744. onnx/hub.py +189 -20
  745. onnx/mapping.py +29 -3
  746. onnx/numpy_helper.py +263 -52
  747. onnx/onnx-ml.proto +28 -6
  748. onnx/onnx-operators-ml.proto +1 -1
  749. onnx/onnx-operators.in.proto +1 -1
  750. onnx/onnx-operators.proto +1 -1
  751. onnx/onnx.in.proto +28 -6
  752. onnx/onnx.proto +28 -6
  753. onnx/onnx_cpp2py_export.cpython-310-darwin.so +0 -0
  754. onnx/onnx_data_pb2.pyi +2 -1
  755. onnx/onnx_ml_pb2.py +33 -33
  756. onnx/onnx_ml_pb2.pyi +12 -2
  757. onnx/onnx_operators_ml_pb2.pyi +2 -1
  758. onnx/parser.py +29 -13
  759. onnx/printer.py +6 -4
  760. onnx/proto_utils.h +3 -3
  761. onnx/py_utils.h +3 -3
  762. onnx/reference/__init__.py +2 -0
  763. onnx/reference/custom_element_types.py +11 -0
  764. onnx/reference/op_run.py +84 -8
  765. onnx/reference/ops/__init__.py +5 -1
  766. onnx/reference/ops/_helpers.py +55 -0
  767. onnx/reference/ops/_op.py +19 -12
  768. onnx/reference/ops/_op_common_indices.py +2 -0
  769. onnx/reference/ops/_op_common_pool.py +4 -9
  770. onnx/reference/ops/_op_common_random.py +2 -0
  771. onnx/reference/ops/_op_common_window.py +2 -0
  772. onnx/reference/ops/_op_list.py +208 -214
  773. onnx/reference/ops/aionnx_preview_training/__init__.py +4 -2
  774. onnx/reference/ops/aionnx_preview_training/_op_list.py +15 -38
  775. onnx/reference/ops/aionnx_preview_training/_op_run_training.py +2 -0
  776. onnx/reference/ops/aionnx_preview_training/op_adagrad.py +3 -1
  777. onnx/reference/ops/aionnx_preview_training/op_adam.py +3 -1
  778. onnx/reference/ops/aionnx_preview_training/op_momentum.py +3 -1
  779. onnx/reference/ops/aionnxml/__init__.py +3 -0
  780. onnx/reference/ops/aionnxml/_common_classifier.py +81 -0
  781. onnx/reference/ops/aionnxml/_op_list.py +97 -0
  782. onnx/reference/ops/aionnxml/_op_run_aionnxml.py +8 -0
  783. onnx/reference/ops/aionnxml/op_array_feature_extractor.py +50 -0
  784. onnx/reference/ops/aionnxml/op_binarizer.py +15 -0
  785. onnx/reference/ops/aionnxml/op_dict_vectorizer.py +56 -0
  786. onnx/reference/ops/aionnxml/op_feature_vectorizer.py +30 -0
  787. onnx/reference/ops/aionnxml/op_imputer.py +47 -0
  788. onnx/reference/ops/aionnxml/op_label_encoder.py +52 -0
  789. onnx/reference/ops/aionnxml/op_linear_classifier.py +99 -0
  790. onnx/reference/ops/aionnxml/op_linear_regressor.py +26 -0
  791. onnx/reference/ops/aionnxml/op_normalizer.py +41 -0
  792. onnx/reference/ops/aionnxml/op_one_hot_encoder.py +55 -0
  793. onnx/reference/ops/aionnxml/op_scaler.py +12 -0
  794. onnx/reference/ops/aionnxml/op_svm_classifier.py +334 -0
  795. onnx/reference/ops/aionnxml/op_svm_helper.py +99 -0
  796. onnx/reference/ops/aionnxml/op_svm_regressor.py +45 -0
  797. onnx/reference/ops/aionnxml/op_tree_ensemble_classifier.py +132 -0
  798. onnx/reference/ops/aionnxml/op_tree_ensemble_helper.py +109 -0
  799. onnx/reference/ops/aionnxml/op_tree_ensemble_regressor.py +105 -0
  800. onnx/reference/ops/experimental/__init__.py +3 -1
  801. onnx/reference/ops/experimental/_op_list.py +15 -36
  802. onnx/reference/ops/experimental/_op_run_experimental.py +2 -0
  803. onnx/reference/ops/experimental/op_im2col.py +3 -2
  804. onnx/reference/ops/op_abs.py +3 -1
  805. onnx/reference/ops/op_acos.py +3 -1
  806. onnx/reference/ops/op_acosh.py +3 -1
  807. onnx/reference/ops/op_add.py +3 -1
  808. onnx/reference/ops/op_and.py +3 -1
  809. onnx/reference/ops/op_argmax.py +4 -9
  810. onnx/reference/ops/op_argmin.py +4 -9
  811. onnx/reference/ops/op_asin.py +3 -1
  812. onnx/reference/ops/op_asinh.py +3 -1
  813. onnx/reference/ops/op_atan.py +3 -1
  814. onnx/reference/ops/op_atanh.py +3 -1
  815. onnx/reference/ops/op_attribute_has_value.py +2 -0
  816. onnx/reference/ops/op_average_pool.py +80 -2
  817. onnx/reference/ops/op_batch_normalization.py +14 -11
  818. onnx/reference/ops/op_bernoulli.py +3 -2
  819. onnx/reference/ops/op_bitshift.py +3 -1
  820. onnx/reference/ops/op_bitwise_and.py +3 -1
  821. onnx/reference/ops/op_bitwise_not.py +3 -1
  822. onnx/reference/ops/op_bitwise_or.py +3 -1
  823. onnx/reference/ops/op_bitwise_xor.py +3 -1
  824. onnx/reference/ops/op_blackman_window.py +3 -1
  825. onnx/reference/ops/op_cast.py +91 -10
  826. onnx/reference/ops/op_cast_like.py +32 -7
  827. onnx/reference/ops/op_ceil.py +3 -1
  828. onnx/reference/ops/op_celu.py +3 -1
  829. onnx/reference/ops/op_center_crop_pad.py +7 -3
  830. onnx/reference/ops/op_clip.py +2 -7
  831. onnx/reference/ops/op_col2im.py +3 -2
  832. onnx/reference/ops/op_compress.py +2 -0
  833. onnx/reference/ops/op_concat.py +6 -5
  834. onnx/reference/ops/op_concat_from_sequence.py +2 -0
  835. onnx/reference/ops/op_constant.py +46 -35
  836. onnx/reference/ops/op_constant_of_shape.py +4 -0
  837. onnx/reference/ops/op_conv.py +62 -39
  838. onnx/reference/ops/op_conv_integer.py +3 -2
  839. onnx/reference/ops/op_conv_transpose.py +4 -3
  840. onnx/reference/ops/op_cos.py +3 -1
  841. onnx/reference/ops/op_cosh.py +3 -1
  842. onnx/reference/ops/op_cum_sum.py +2 -0
  843. onnx/reference/ops/op_deform_conv.py +178 -0
  844. onnx/reference/ops/op_depth_to_space.py +2 -0
  845. onnx/reference/ops/op_dequantize_linear.py +72 -21
  846. onnx/reference/ops/op_det.py +3 -4
  847. onnx/reference/ops/op_dft.py +2 -0
  848. onnx/reference/ops/op_div.py +3 -1
  849. onnx/reference/ops/op_dropout.py +2 -7
  850. onnx/reference/ops/op_dynamic_quantize_linear.py +2 -0
  851. onnx/reference/ops/op_einsum.py +2 -0
  852. onnx/reference/ops/op_elu.py +4 -2
  853. onnx/reference/ops/op_equal.py +3 -1
  854. onnx/reference/ops/op_erf.py +3 -1
  855. onnx/reference/ops/op_exp.py +4 -2
  856. onnx/reference/ops/op_expand.py +2 -0
  857. onnx/reference/ops/op_eyelike.py +9 -4
  858. onnx/reference/ops/op_flatten.py +3 -1
  859. onnx/reference/ops/op_floor.py +3 -1
  860. onnx/reference/ops/op_gather.py +2 -0
  861. onnx/reference/ops/op_gather_elements.py +2 -0
  862. onnx/reference/ops/op_gathernd.py +3 -1
  863. onnx/reference/ops/op_gemm.py +5 -10
  864. onnx/reference/ops/op_global_average_pool.py +6 -5
  865. onnx/reference/ops/op_global_max_pool.py +2 -0
  866. onnx/reference/ops/op_greater.py +3 -1
  867. onnx/reference/ops/op_greater_or_equal.py +3 -1
  868. onnx/reference/ops/op_grid_sample.py +3 -1
  869. onnx/reference/ops/op_gru.py +4 -1
  870. onnx/reference/ops/op_hamming_window.py +3 -1
  871. onnx/reference/ops/op_hann_window.py +3 -1
  872. onnx/reference/ops/op_hard_sigmoid.py +3 -1
  873. onnx/reference/ops/op_hardmax.py +3 -1
  874. onnx/reference/ops/op_identity.py +3 -1
  875. onnx/reference/ops/op_if.py +16 -7
  876. onnx/reference/ops/op_instance_normalization.py +2 -0
  877. onnx/reference/ops/op_isinf.py +2 -0
  878. onnx/reference/ops/op_isnan.py +3 -1
  879. onnx/reference/ops/op_layer_normalization.py +2 -0
  880. onnx/reference/ops/op_leaky_relu.py +4 -2
  881. onnx/reference/ops/op_less.py +3 -1
  882. onnx/reference/ops/op_less_or_equal.py +3 -1
  883. onnx/reference/ops/op_log.py +4 -2
  884. onnx/reference/ops/op_log_softmax.py +3 -1
  885. onnx/reference/ops/op_loop.py +4 -2
  886. onnx/reference/ops/op_lp_normalization.py +4 -2
  887. onnx/reference/ops/op_lp_pool.py +41 -0
  888. onnx/reference/ops/op_lrn.py +9 -5
  889. onnx/reference/ops/op_lstm.py +4 -2
  890. onnx/reference/ops/op_matmul.py +3 -1
  891. onnx/reference/ops/op_matmul_integer.py +2 -0
  892. onnx/reference/ops/op_max.py +3 -1
  893. onnx/reference/ops/op_max_pool.py +3 -1
  894. onnx/reference/ops/op_max_unpool.py +2 -0
  895. onnx/reference/ops/op_mean.py +3 -1
  896. onnx/reference/ops/op_mel_weight_matrix.py +2 -0
  897. onnx/reference/ops/op_min.py +3 -1
  898. onnx/reference/ops/op_mod.py +2 -0
  899. onnx/reference/ops/op_mul.py +3 -1
  900. onnx/reference/ops/op_neg.py +3 -1
  901. onnx/reference/ops/op_negative_log_likelihood_loss.py +3 -1
  902. onnx/reference/ops/op_non_max_suppression.py +22 -19
  903. onnx/reference/ops/op_non_zero.py +4 -1
  904. onnx/reference/ops/op_not.py +3 -1
  905. onnx/reference/ops/op_one_hot.py +3 -1
  906. onnx/reference/ops/op_optional.py +2 -0
  907. onnx/reference/ops/op_optional_get_element.py +4 -8
  908. onnx/reference/ops/op_optional_has_element.py +3 -9
  909. onnx/reference/ops/op_or.py +3 -1
  910. onnx/reference/ops/op_pad.py +18 -29
  911. onnx/reference/ops/op_pow.py +2 -0
  912. onnx/reference/ops/op_prelu.py +4 -2
  913. onnx/reference/ops/op_qlinear_conv.py +3 -2
  914. onnx/reference/ops/op_qlinear_matmul.py +2 -0
  915. onnx/reference/ops/op_quantize_linear.py +100 -15
  916. onnx/reference/ops/op_random_normal.py +3 -1
  917. onnx/reference/ops/op_random_normal_like.py +3 -2
  918. onnx/reference/ops/op_random_uniform.py +3 -1
  919. onnx/reference/ops/op_random_uniform_like.py +3 -2
  920. onnx/reference/ops/op_range.py +2 -0
  921. onnx/reference/ops/op_reciprocal.py +4 -2
  922. onnx/reference/ops/op_reduce_l1.py +17 -31
  923. onnx/reference/ops/op_reduce_l2.py +17 -35
  924. onnx/reference/ops/op_reduce_log_sum.py +6 -29
  925. onnx/reference/ops/op_reduce_log_sum_exp.py +6 -29
  926. onnx/reference/ops/op_reduce_max.py +15 -36
  927. onnx/reference/ops/op_reduce_mean.py +15 -33
  928. onnx/reference/ops/op_reduce_min.py +15 -32
  929. onnx/reference/ops/op_reduce_prod.py +15 -29
  930. onnx/reference/ops/op_reduce_sum.py +17 -45
  931. onnx/reference/ops/op_reduce_sum_square.py +15 -29
  932. onnx/reference/ops/op_relu.py +3 -1
  933. onnx/reference/ops/op_reshape.py +2 -7
  934. onnx/reference/ops/op_resize.py +59 -26
  935. onnx/reference/ops/op_reverse_sequence.py +2 -0
  936. onnx/reference/ops/op_rnn.py +3 -7
  937. onnx/reference/ops/op_roi_align.py +7 -5
  938. onnx/reference/ops/op_round.py +4 -2
  939. onnx/reference/ops/op_scan.py +5 -2
  940. onnx/reference/ops/op_scatter_elements.py +17 -4
  941. onnx/reference/ops/op_scatternd.py +2 -0
  942. onnx/reference/ops/op_selu.py +5 -1
  943. onnx/reference/ops/op_sequence_at.py +2 -0
  944. onnx/reference/ops/op_sequence_construct.py +2 -0
  945. onnx/reference/ops/op_sequence_empty.py +2 -0
  946. onnx/reference/ops/op_sequence_erase.py +2 -0
  947. onnx/reference/ops/op_sequence_insert.py +4 -2
  948. onnx/reference/ops/op_sequence_length.py +7 -1
  949. onnx/reference/ops/op_sequence_map.py +4 -2
  950. onnx/reference/ops/op_shape.py +2 -7
  951. onnx/reference/ops/op_shrink.py +3 -1
  952. onnx/reference/ops/op_sigmoid.py +7 -1
  953. onnx/reference/ops/op_sign.py +3 -1
  954. onnx/reference/ops/op_sin.py +3 -1
  955. onnx/reference/ops/op_sinh.py +3 -1
  956. onnx/reference/ops/op_size.py +2 -0
  957. onnx/reference/ops/op_slice.py +3 -9
  958. onnx/reference/ops/op_softmax.py +4 -2
  959. onnx/reference/ops/op_softmax_cross_entropy_loss.py +4 -1
  960. onnx/reference/ops/op_softplus.py +4 -2
  961. onnx/reference/ops/op_softsign.py +3 -1
  962. onnx/reference/ops/op_space_to_depth.py +3 -1
  963. onnx/reference/ops/op_split.py +7 -9
  964. onnx/reference/ops/op_split_to_sequence.py +41 -10
  965. onnx/reference/ops/op_sqrt.py +4 -2
  966. onnx/reference/ops/op_squeeze.py +3 -12
  967. onnx/reference/ops/op_stft.py +8 -7
  968. onnx/reference/ops/op_string_normalizer.py +4 -3
  969. onnx/reference/ops/op_sub.py +3 -1
  970. onnx/reference/ops/op_sum.py +3 -1
  971. onnx/reference/ops/op_tan.py +3 -1
  972. onnx/reference/ops/op_tanh.py +3 -1
  973. onnx/reference/ops/op_tfidf_vectorizer.py +15 -13
  974. onnx/reference/ops/op_thresholded_relu.py +4 -2
  975. onnx/reference/ops/op_tile.py +2 -0
  976. onnx/reference/ops/op_topk.py +12 -19
  977. onnx/reference/ops/op_transpose.py +2 -0
  978. onnx/reference/ops/op_trilu.py +3 -1
  979. onnx/reference/ops/op_unique.py +2 -0
  980. onnx/reference/ops/op_unsqueeze.py +2 -9
  981. onnx/reference/ops/op_upsample.py +9 -8
  982. onnx/reference/ops/op_where.py +7 -1
  983. onnx/reference/ops/op_xor.py +3 -1
  984. onnx/reference/reference_evaluator.py +64 -20
  985. onnx/shape_inference/implementation.cc +124 -15
  986. onnx/shape_inference/implementation.h +15 -4
  987. onnx/shape_inference.py +37 -12
  988. onnx/string_utils.h +3 -3
  989. onnx/test/cpp/common_path_test.cc +2 -0
  990. onnx/test/cpp/data_propagation_test.cc +2 -0
  991. onnx/test/cpp/function_context_test.cc +2 -0
  992. onnx/test/cpp/function_get_test.cc +2 -0
  993. onnx/test/cpp/function_verify_test.cc +176 -0
  994. onnx/test/cpp/op_reg_test.cc +2 -0
  995. onnx/test/cpp/parser_test.cc +37 -1
  996. onnx/test/cpp/schema_registration_test.cc +2 -0
  997. onnx/test/cpp/shape_inference_test.cc +2 -0
  998. onnx/test/cpp/test_main.cc +2 -0
  999. onnx/tools/__init__.py +2 -0
  1000. onnx/tools/net_drawer.py +13 -9
  1001. onnx/tools/replace_constants.py +429 -0
  1002. onnx/tools/update_model_dims.py +7 -9
  1003. onnx/utils.py +16 -6
  1004. onnx/version.py +2 -2
  1005. onnx/version_converter/BaseConverter.h +2 -0
  1006. onnx/version_converter/adapters/adapter.h +2 -0
  1007. onnx/version_converter/adapters/axes_attribute_to_input.h +2 -0
  1008. onnx/version_converter/adapters/axes_input_to_attribute.h +2 -0
  1009. onnx/version_converter/adapters/batch_normalization_13_14.h +2 -0
  1010. onnx/version_converter/adapters/broadcast_backward_compatibility.h +2 -0
  1011. onnx/version_converter/adapters/broadcast_forward_compatibility.h +2 -0
  1012. onnx/version_converter/adapters/cast_9_8.h +2 -0
  1013. onnx/version_converter/adapters/clip_10_11.h +2 -0
  1014. onnx/version_converter/adapters/compatible.h +2 -0
  1015. onnx/version_converter/adapters/dropout_11_12.h +2 -0
  1016. onnx/version_converter/adapters/extend_supported_types.h +2 -0
  1017. onnx/version_converter/adapters/gemm_6_7.h +2 -0
  1018. onnx/version_converter/adapters/gemm_7_6.h +2 -0
  1019. onnx/version_converter/adapters/maxpool_8_7.h +2 -0
  1020. onnx/version_converter/adapters/no_previous_version.h +2 -0
  1021. onnx/version_converter/adapters/pad_10_11.h +4 -0
  1022. onnx/version_converter/adapters/remove_consumed_inputs.h +2 -0
  1023. onnx/version_converter/adapters/reshape_4_5.h +2 -0
  1024. onnx/version_converter/adapters/reshape_5_4.h +2 -0
  1025. onnx/version_converter/adapters/resize_10_11.h +2 -0
  1026. onnx/version_converter/adapters/scan_8_9.h +2 -0
  1027. onnx/version_converter/adapters/scan_9_8.h +2 -0
  1028. onnx/version_converter/adapters/scatter_10_11.h +2 -0
  1029. onnx/version_converter/adapters/slice_9_10.h +2 -0
  1030. onnx/version_converter/adapters/softmax_12_13.h +20 -28
  1031. onnx/version_converter/adapters/split_12_13.h +2 -0
  1032. onnx/version_converter/adapters/split_13_12.h +2 -0
  1033. onnx/version_converter/adapters/split_17_18.h +2 -0
  1034. onnx/version_converter/adapters/sum_8_7.h +2 -0
  1035. onnx/version_converter/adapters/topk_9_10.h +2 -0
  1036. onnx/version_converter/adapters/transformers.h +3 -1
  1037. onnx/version_converter/adapters/type_restriction.h +2 -0
  1038. onnx/version_converter/adapters/upsample_6_7.h +2 -0
  1039. onnx/version_converter/adapters/upsample_8_9.h +2 -0
  1040. onnx/version_converter/adapters/upsample_9_10.h +2 -0
  1041. onnx/version_converter/adapters/upsample_9_8.h +2 -0
  1042. onnx/version_converter/convert.cc +14 -7
  1043. onnx/version_converter/convert.h +20 -0
  1044. onnx/version_converter/helper.cc +3 -3
  1045. onnx/version_converter/helper.h +3 -3
  1046. onnx/version_converter.py +6 -3
  1047. {onnx-1.13.1.dist-info → onnx-1.14.0.dist-info}/METADATA +95 -51
  1048. {onnx-1.13.1.dist-info → onnx-1.14.0.dist-info}/RECORD +1056 -743
  1049. {onnx-1.13.1.dist-info → onnx-1.14.0.dist-info}/WHEEL +1 -1
  1050. onnx/backend/test/data/node/test_softplus_example_expanded/model.onnx +0 -0
  1051. /onnx/backend/test/data/node/{test_softplus_example_expanded → test_softplus_example_expanded_ver18}/test_data_set_0/input_0.pb +0 -0
  1052. /onnx/backend/test/data/node/{test_softplus_example_expanded → test_softplus_example_expanded_ver18}/test_data_set_0/output_0.pb +0 -0
  1053. /onnx/backend/test/data/node/{test_softplus_expanded → test_softplus_expanded_ver18}/test_data_set_0/input_0.pb +0 -0
  1054. /onnx/backend/test/data/node/{test_softplus_expanded → test_softplus_expanded_ver18}/test_data_set_0/output_0.pb +0 -0
  1055. {onnx-1.13.1.dist-info → onnx-1.14.0.dist-info}/LICENSE +0 -0
  1056. {onnx-1.13.1.dist-info → onnx-1.14.0.dist-info}/entry_points.txt +0 -0
  1057. {onnx-1.13.1.dist-info → onnx-1.14.0.dist-info}/top_level.txt +0 -0
onnx/defs/tensor/defs.cc CHANGED
@@ -13,7 +13,7 @@
13
13
 
14
14
  namespace ONNX_NAMESPACE {
15
15
 
16
- static const char* Cast_ver13_doc = R"DOC(
16
+ static const char* Cast_ver19_doc = R"DOC(
17
17
  The operator casts the elements of a given input tensor to a data type
18
18
  specified by the 'to' argument and returns an output tensor of the same size in
19
19
  the converted type. The 'to' argument must be one of the data types specified
@@ -21,7 +21,7 @@ in the 'DataType' enum field in the TensorProto message.
21
21
 
22
22
  Casting from string tensor in plain (e.g., "3.14" and "1000") and scientific numeric representations
23
23
  (e.g., "1e-5" and "1E8") to float types is supported. For example, converting string "100.5" to an integer may
24
- result 100. There are some string literals reserved for special floating-point values;
24
+ yield result 100. There are some string literals reserved for special floating-point values;
25
25
  "+INF" (and "INF"), "-INF", and "NaN" are positive infinity, negative infinity, and not-a-number, respectively.
26
26
  Any string which can exactly match "+INF" in a case-insensitive way would be mapped to positive infinite. Similarly,
27
27
  this case-insensitive rule is applied to "INF" and "NaN". When casting from numeric tensors
@@ -34,7 +34,8 @@ User must be aware of precision loss and value change caused by range difference
34
34
  For example, a 64-bit float 3.1415926459 may be round to a 32-bit float 3.141592. Similarly, converting
35
35
  an integer 36 to Boolean may produce 1 because we truncate bits which can't be stored in the targeted type.
36
36
 
37
- In more detail, the conversion among numerical types should follow these rules:
37
+ In more detail, the conversion among numerical types should follow these rules
38
+ if the destination type is not a float 8 type.
38
39
 
39
40
  * Casting from floating point to:
40
41
  * floating point: +/- infinity if OOR (out of range).
@@ -43,24 +44,60 @@ In more detail, the conversion among numerical types should follow these rules:
43
44
  * Casting from fixed point to:
44
45
  * floating point: +/- infinity if OOR. (+ infinity in the case of uint)
45
46
  * fixed point: when OOR, discard higher bits and reinterpret (with respect to two's complement representation for
46
- signed types). For example, 200 (int16) -> -56 (int8).
47
+ signed types). For example, 200 (int16) -> -56 (int8).
47
48
  * bool: zero to False; nonzero to True.
48
49
  * Casting from bool to:
49
50
  * floating point: `{1.0, 0.0}`.
50
51
  * fixed point: `{1, 0}`.
51
52
  * bool: no change.
53
+
54
+ Float 8 type were introduced to speed up the training of
55
+ deep models. By default the conversion of a float *x* obeys
56
+ to the following rules. `[x]` means the value rounded to
57
+ the target mantissa width.
58
+
59
+ | x | E4M3FN | E4M3FNUZ | E5M2 | E5M2FNUZ |
60
+ |------|----|----|----|----|
61
+ | 0 | 0 | 0 | 0 | 0 |
62
+ |-0 | -0 | 0 | -0 | 0 |
63
+ | NaN | NaN | NaN | NaN | NaN |
64
+ | +/- Inf | +/- FLT_MAX | NaN | FLT_MAX | NaN |
65
+ | [x] > FLT_MAX | FLT_MAX | FLT_MAX | FLT_MAX | FLT_MAX |
66
+ | [x] < -FLT_MAX | -FLT_MAX | -FLT_MAX | -FLT_MAX | -FLT_MAX |
67
+ | else | RNE | RNE | RNE | RNE |
68
+
69
+ The behavior changes if the parameter 'saturate' is set to False.
70
+ The rules then become:
71
+
72
+ | x | E4M3FN | E4M3FNUZ | E5M2 | E5M2FNUZ |
73
+ |------|----|----|----|----|
74
+ | 0 | 0 | 0 | 0 | 0 |
75
+ |-0 | -0 | 0 | -0 | 0 |
76
+ | NaN | NaN | NaN | NaN | NaN |
77
+ | +/- Inf | NaN | NaN | +/- Inf | NaN |
78
+ | [x] > FLT_MAX | NaN | NaN | Inf | NaN |
79
+ | [x] < -FLT_MAX | NaN | NaN | -Inf | NaN |
80
+ | else | RNE | RNE | RNE | RNE |
52
81
  )DOC";
53
82
 
54
83
  ONNX_OPERATOR_SET_SCHEMA(
55
84
  Cast,
56
- 13,
85
+ 19,
57
86
  OpSchema()
58
- .SetDoc(Cast_ver13_doc)
87
+ .SetDoc(Cast_ver19_doc)
59
88
  .Attr(
60
89
  "to",
61
90
  "The data type to which the elements of the input tensor are cast. "
62
91
  "Strictly must be one of the types from DataType enum in TensorProto",
63
92
  AttributeProto::INT)
93
+ .Attr(
94
+ "saturate",
95
+ "The parameter defines how the conversion behaves if an input value is out of "
96
+ "range of the destination type. It only applies for float 8 conversion "
97
+ "(float8e4m3fn, float8e4m3fnuz, float8e5m2, float8e5m2fnuz). It is true by default. "
98
+ "All cases are fully described in two tables inserted in the operator description.",
99
+ AttributeProto::INT,
100
+ static_cast<int64_t>(1))
64
101
  .Input(0, "input", "Input tensor to be cast.", "T1", OpSchema::Single, true, 1, OpSchema::Differentiable)
65
102
  .Output(
66
103
  0,
@@ -87,7 +124,11 @@ ONNX_OPERATOR_SET_SCHEMA(
87
124
  "tensor(uint64)",
88
125
  "tensor(bool)",
89
126
  "tensor(string)",
90
- "tensor(bfloat16)"},
127
+ "tensor(bfloat16)",
128
+ "tensor(float8e4m3fn)",
129
+ "tensor(float8e4m3fnuz)",
130
+ "tensor(float8e5m2)",
131
+ "tensor(float8e5m2fnuz)"},
91
132
  "Constrain input types. Casting from complex is not supported.")
92
133
  .TypeConstraint(
93
134
  "T2",
@@ -104,7 +145,11 @@ ONNX_OPERATOR_SET_SCHEMA(
104
145
  "tensor(uint64)",
105
146
  "tensor(bool)",
106
147
  "tensor(string)",
107
- "tensor(bfloat16)"},
148
+ "tensor(bfloat16)",
149
+ "tensor(float8e4m3fn)",
150
+ "tensor(float8e4m3fnuz)",
151
+ "tensor(float8e5m2)",
152
+ "tensor(float8e5m2fnuz)"},
108
153
  "Constrain output types. Casting to complex is not supported.")
109
154
  .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
110
155
  propagateElemTypeFromAttributeToOutput(ctx, "to", 0);
@@ -116,7 +161,7 @@ ONNX_OPERATOR_SET_SCHEMA(
116
161
  PropagateShapeDataFromInputToOutput(ctx, 0);
117
162
  }));
118
163
 
119
- static const char* CastLike_ver15_doc = R"DOC(
164
+ static const char* CastLike_ver19_doc = R"DOC(
120
165
  The operator casts the elements of a given input tensor (the first input) to
121
166
  the same data type as the elements of the second input tensor.
122
167
  See documentation of the Cast operator for further details.
@@ -124,9 +169,17 @@ See documentation of the Cast operator for further details.
124
169
 
125
170
  ONNX_OPERATOR_SET_SCHEMA(
126
171
  CastLike,
127
- 15,
172
+ 19,
128
173
  OpSchema()
129
- .SetDoc(CastLike_ver15_doc)
174
+ .SetDoc(CastLike_ver19_doc)
175
+ .Attr(
176
+ "saturate",
177
+ "The parameter defines how the conversion behaves if an input value is out of "
178
+ "range of the destination type. It only applies for float 8 conversion "
179
+ "(float8e4m3fn, float8e4m3fnuz, float8e5m2, float8e5m2fnuz). It is true by default. "
180
+ "Please refer to operator Cast description for further details.",
181
+ AttributeProto::INT,
182
+ static_cast<int64_t>(1))
130
183
  .Input(0, "input", "Input tensor to be cast.", "T1", OpSchema::Single, true, 1, OpSchema::Differentiable)
131
184
  .Input(
132
185
  1,
@@ -161,7 +214,11 @@ ONNX_OPERATOR_SET_SCHEMA(
161
214
  "tensor(uint64)",
162
215
  "tensor(bool)",
163
216
  "tensor(string)",
164
- "tensor(bfloat16)"},
217
+ "tensor(bfloat16)",
218
+ "tensor(float8e4m3fn)",
219
+ "tensor(float8e4m3fnuz)",
220
+ "tensor(float8e5m2)",
221
+ "tensor(float8e5m2fnuz)"},
165
222
  "Constrain input types. Casting from complex is not supported.")
166
223
  .TypeConstraint(
167
224
  "T2",
@@ -178,7 +235,11 @@ ONNX_OPERATOR_SET_SCHEMA(
178
235
  "tensor(uint64)",
179
236
  "tensor(bool)",
180
237
  "tensor(string)",
181
- "tensor(bfloat16)"},
238
+ "tensor(bfloat16)",
239
+ "tensor(float8e4m3fn)",
240
+ "tensor(float8e4m3fnuz)",
241
+ "tensor(float8e5m2)",
242
+ "tensor(float8e5m2fnuz)"},
182
243
  "Constrain output types. Casting to complex is not supported.")
183
244
  .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
184
245
  propagateElemTypeFromInputToOutput(ctx, 1, 0);
@@ -200,7 +261,7 @@ ONNX_OPERATOR_SET_SCHEMA(
200
261
  return true;
201
262
  }));
202
263
 
203
- static const char* Reshape_ver14_doc = R"DOC(
264
+ static const char* Reshape_ver19_doc = R"DOC(
204
265
  Reshape the input tensor similar to numpy.reshape.
205
266
  First input is the data tensor, second input is a shape tensor which specifies the output shape. It outputs the reshaped tensor.
206
267
  At most one dimension of the new shape can be -1. In this case, the value is
@@ -218,9 +279,9 @@ to -1 cannot be determined uniquely.
218
279
 
219
280
  ONNX_OPERATOR_SET_SCHEMA(
220
281
  Reshape,
221
- 14,
282
+ 19,
222
283
  OpSchema()
223
- .SetDoc(Reshape_ver14_doc)
284
+ .SetDoc(Reshape_ver19_doc)
224
285
  .Attr(
225
286
  "allowzero",
226
287
  "(Optional) By default, when any value in the 'shape' input is equal to zero "
@@ -240,10 +301,7 @@ ONNX_OPERATOR_SET_SCHEMA(
240
301
  1,
241
302
  OpSchema::NonDifferentiable)
242
303
  .Output(0, "reshaped", "Reshaped data.", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
243
- .TypeConstraint(
244
- "T",
245
- OpSchema::all_tensor_types_with_bfloat(),
246
- "Constrain input and output types to all tensor types.")
304
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir9(), "Constrain input and output types to all tensor types.")
247
305
  .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
248
306
  // Type inference
249
307
  propagateElemTypeFromInputToOutput(ctx, 0, 0);
@@ -391,28 +449,37 @@ negative axis). Thus, specifying any end value > r is equivalent to specifying a
391
449
  value of r, and specifying any start value < -r is equivalent to specifying a start
392
450
  value of 0.
393
451
 
394
- For example:
452
+ Examples:
453
+
454
+ ```
395
455
  Input tensor with shape: [2, 3, 4]
396
456
  No attributes specified.
397
457
  Output: [2, 3, 4]
458
+ ```
398
459
 
460
+ ```
399
461
  Input tensor with shape: [2, 3, 4]
400
462
  start: -1
401
463
  Output: [4]
464
+ ```
402
465
 
466
+ ```
403
467
  Input tensor with shape: [2, 3, 4]
404
468
  end: -1
405
469
  Output: [2, 3]
470
+ ```
406
471
 
472
+ ```
407
473
  Input tensor with shape: [2, 3, 4]
408
474
  start: 1
409
475
  end: 2
410
476
  Output: [3]
477
+ ```
411
478
  )DOC";
412
479
 
413
480
  ONNX_OPERATOR_SET_SCHEMA(
414
481
  Shape,
415
- 15,
482
+ 19,
416
483
  OpSchema()
417
484
  .SetDoc(Shape_ver15_doc)
418
485
  .Input(0, "data", "An input tensor.", "T", OpSchema::Single, true, 1, OpSchema::NonDifferentiable)
@@ -430,7 +497,7 @@ ONNX_OPERATOR_SET_SCHEMA(
430
497
  "If omitted, sizes of all axes upto (including) the last one will be included.",
431
498
  AttributeProto::INT,
432
499
  OPTIONAL_VALUE)
433
- .TypeConstraint("T", OpSchema::all_tensor_types_with_bfloat(), "Input tensor can be of arbitrary type.")
500
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir9(), "Input tensor can be of arbitrary type.")
434
501
  .TypeConstraint("T1", {"tensor(int64)"}, "Constrain output to int64 tensor.")
435
502
  .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
436
503
  ctx.getOutputType(0)->mutable_tensor_type()->set_elem_type(TensorProto::INT64);
@@ -478,7 +545,7 @@ Takes a tensor as input and outputs a int64 scalar that equals to the total numb
478
545
 
479
546
  ONNX_OPERATOR_SET_SCHEMA(
480
547
  Size,
481
- 13,
548
+ 19,
482
549
  OpSchema()
483
550
  .SetDoc(Size_ver13_doc)
484
551
  .Input(0, "data", "An input tensor.", "T", OpSchema::Single, true, 1, OpSchema::NonDifferentiable)
@@ -491,7 +558,7 @@ ONNX_OPERATOR_SET_SCHEMA(
491
558
  true,
492
559
  1,
493
560
  OpSchema::NonDifferentiable)
494
- .TypeConstraint("T", OpSchema::all_tensor_types_with_bfloat(), "Input tensor can be of arbitrary type.")
561
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir9(), "Input tensor can be of arbitrary type.")
495
562
  .TypeConstraint("T1", {"tensor(int64)"}, "Constrain output to int64 tensor, which should be a scalar though.")
496
563
  .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
497
564
  ctx.getOutputType(0)->mutable_tensor_type()->set_elem_type(TensorProto::INT64);
@@ -528,7 +595,7 @@ ONNX_OPERATOR_SET_SCHEMA(
528
595
  1,
529
596
  OpSchema::Differentiable)
530
597
  .Output(0, "concat_result", "Concatenated tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
531
- .TypeConstraint("T", OpSchema::all_tensor_types_with_bfloat(), "Constrain output types to any tensor type.")
598
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain output types to any tensor type.")
532
599
  .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
533
600
  propagateElemTypeFromInputToOutput(ctx, 0, 0);
534
601
  auto numInputs = ctx.getNumInputs();
@@ -640,10 +707,7 @@ ONNX_OPERATOR_SET_SCHEMA(
640
707
  true,
641
708
  1,
642
709
  OpSchema::Differentiable)
643
- .TypeConstraint(
644
- "T",
645
- OpSchema::all_tensor_types_with_bfloat(),
646
- "Constrain input and output types to all tensor types.")
710
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to all tensor types.")
647
711
  .Attr(
648
712
  "axis",
649
713
  "Which axis to split on. "
@@ -772,27 +836,34 @@ For slicing to the end of a dimension with unknown size, it is recommended to pa
772
836
  in `INT_MAX` when slicing forward and 'INT_MIN' when slicing backward.
773
837
 
774
838
  Example 1:
775
- data = [
776
- [1, 2, 3, 4],
777
- [5, 6, 7, 8],
778
- ]
779
- axes = [0, 1]
780
- starts = [1, 0]
781
- ends = [2, 3]
782
- steps = [1, 2]
783
- result = [
784
- [5, 7],
785
- ]
839
+
840
+ ```
841
+ data = [
842
+ [1, 2, 3, 4],
843
+ [5, 6, 7, 8],
844
+ ]
845
+ axes = [0, 1]
846
+ starts = [1, 0]
847
+ ends = [2, 3]
848
+ steps = [1, 2]
849
+ result = [
850
+ [5, 7],
851
+ ]
852
+ ```
853
+
786
854
  Example 2:
787
- data = [
788
- [1, 2, 3, 4],
789
- [5, 6, 7, 8],
790
- ]
791
- starts = [0, 1]
792
- ends = [-1, 1000]
793
- result = [
794
- [2, 3, 4],
795
- ]
855
+
856
+ ```
857
+ data = [
858
+ [1, 2, 3, 4],
859
+ [5, 6, 7, 8],
860
+ ]
861
+ starts = [0, 1]
862
+ ends = [-1, 1000]
863
+ result = [
864
+ [2, 3, 4],
865
+ ]
866
+ ```
796
867
  )DOC";
797
868
 
798
869
  inline void processSliceInputs(const int64_t input_rank, int64_t& start, int64_t& end, int64_t& step) {
@@ -874,10 +945,7 @@ ONNX_OPERATOR_SET_SCHEMA(
874
945
  1,
875
946
  OpSchema::NonDifferentiable)
876
947
  .Output(0, "output", "Sliced data tensor.", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
877
- .TypeConstraint(
878
- "T",
879
- OpSchema::all_tensor_types_with_bfloat(),
880
- "Constrain input and output types to all tensor types.")
948
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to all tensor types.")
881
949
  .TypeConstraint("Tind", {"tensor(int32)", "tensor(int64)"}, "Constrain indices to integer types")
882
950
  .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
883
951
  size_t num_inputs = ctx.getNumInputs();
@@ -939,7 +1007,9 @@ ONNX_OPERATOR_SET_SCHEMA(
939
1007
  fail_shape_inference("Input axes has incorrect length");
940
1008
  }
941
1009
  }
942
-
1010
+ checkAxesRange(axes, input_rank);
1011
+ adjustNegativeAxes(axes, input_rank);
1012
+ checkDuplicateAxes(axes, input_rank);
943
1013
  std::vector<int64_t> steps;
944
1014
  if (!stepsInitializer) {
945
1015
  steps = std::vector<int64_t>(starts.size(), 1);
@@ -961,21 +1031,10 @@ ONNX_OPERATOR_SET_SCHEMA(
961
1031
  }
962
1032
  }
963
1033
 
964
- std::unordered_set<int64_t> unique_axes;
965
1034
  size_t axes_size = axes.size();
966
1035
  for (size_t axis_index = 0; axis_index < axes_size; ++axis_index) {
967
1036
  auto axis = axes[axis_index] < 0 ? axes[axis_index] + static_cast<int64_t>(input_rank) : axes[axis_index];
968
1037
 
969
- if (axis >= static_cast<int64_t>(input_rank) || axis < 0) {
970
- fail_shape_inference("Input axes has invalid data");
971
- }
972
-
973
- if (unique_axes.find(axis) != unique_axes.end()) {
974
- fail_shape_inference("'axes' has duplicates");
975
- }
976
-
977
- unique_axes.insert(axis);
978
-
979
1038
  auto input_dim = ctx.getInputType(0)->tensor_type().shape().dim((int)axis);
980
1039
 
981
1040
  // input dim value is missing - cannot perform shape inference for
@@ -1089,10 +1148,7 @@ ONNX_OPERATOR_SET_SCHEMA(
1089
1148
  OPTIONAL_VALUE)
1090
1149
  .Input(0, "data", "An input tensor.", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
1091
1150
  .Output(0, "transposed", "Transposed output.", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
1092
- .TypeConstraint(
1093
- "T",
1094
- OpSchema::all_tensor_types_with_bfloat(),
1095
- "Constrain input and output types to all tensor types.")
1151
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to all tensor types.")
1096
1152
  .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
1097
1153
  propagateElemTypeFromInputToOutput(ctx, 0, 0);
1098
1154
  if (!hasNInputShapes(ctx, 1)) {
@@ -1255,7 +1311,7 @@ specified by `updates` at specific index positions specified by `indices`. Its o
1255
1311
  is the same as the shape of `data`.
1256
1312
 
1257
1313
  `indices` is an integer tensor. Let k denote indices.shape[-1], the last dimension in the shape of `indices`.
1258
- `indices` is treated as a (q-1)-dimensional tensor of k-tuples, where each k-tuple is a partial-index into `data`.
1314
+ `indices` is treated as a (q-1)-dimensional tensor of k-tuples, where each k-tuple is a partial-index into `data`.
1259
1315
  Hence, k can be a value at most the rank of `data`. When k equals rank(data), each update entry specifies an
1260
1316
  update to a single element of the tensor. When k is less than rank(data) each update entry specifies an
1261
1317
  update to a slice of the tensor. Index values are allowed to be negative, as per the usual
@@ -1271,10 +1327,12 @@ of shapes.
1271
1327
 
1272
1328
  The `output` is calculated via the following equation:
1273
1329
 
1274
- output = np.copy(data)
1275
- update_indices = indices.shape[:-1]
1276
- for idx in np.ndindex(update_indices):
1277
- output[indices[idx]] = updates[idx]
1330
+ ```
1331
+ output = np.copy(data)
1332
+ update_indices = indices.shape[:-1]
1333
+ for idx in np.ndindex(update_indices):
1334
+ output[indices[idx]] = updates[idx]
1335
+ ```
1278
1336
 
1279
1337
  The order of iteration in the above loop is not specified.
1280
1338
  In particular, indices should not have duplicate entries: that is, if idx1 != idx2, then indices[idx1] != indices[idx2].
@@ -1286,12 +1344,14 @@ In cases where `reduction` is set to "none", indices should not have duplicate e
1286
1344
  then indices[idx1] != indices[idx2]. This ensures that the output value does not depend on the iteration order.
1287
1345
  When `reduction` is set to some reduction function `f`, `output` is calculated as follows:
1288
1346
 
1289
- output = np.copy(data)
1290
- update_indices = indices.shape[:-1]
1291
- for idx in np.ndindex(update_indices):
1292
- output[indices[idx]] = f(output[indices[idx]], updates[idx])
1347
+ ```
1348
+ output = np.copy(data)
1349
+ update_indices = indices.shape[:-1]
1350
+ for idx in np.ndindex(update_indices):
1351
+ output[indices[idx]] = f(output[indices[idx]], updates[idx])
1352
+ ```
1293
1353
 
1294
- where the `f` is +/*/max/min as specified.
1354
+ where the `f` is `+`, `*`, `max` or `min` as specified.
1295
1355
 
1296
1356
  This operator is the inverse of GatherND.
1297
1357
 
@@ -1299,25 +1359,25 @@ This operator is the inverse of GatherND.
1299
1359
 
1300
1360
  Example 1:
1301
1361
  ```
1302
- data = [1, 2, 3, 4, 5, 6, 7, 8]
1303
- indices = [[4], [3], [1], [7]]
1304
- updates = [9, 10, 11, 12]
1305
- output = [1, 11, 3, 10, 9, 6, 7, 12]
1362
+ data = [1, 2, 3, 4, 5, 6, 7, 8]
1363
+ indices = [[4], [3], [1], [7]]
1364
+ updates = [9, 10, 11, 12]
1365
+ output = [1, 11, 3, 10, 9, 6, 7, 12]
1306
1366
  ```
1307
1367
 
1308
1368
  Example 2:
1309
1369
  ```
1310
- data = [[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
1311
- [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
1312
- [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],
1313
- [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]
1314
- indices = [[0], [2]]
1315
- updates = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
1316
- [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]]]
1317
- output = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
1318
- [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
1319
- [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
1320
- [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]
1370
+ data = [[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
1371
+ [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
1372
+ [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],
1373
+ [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]
1374
+ indices = [[0], [2]]
1375
+ updates = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
1376
+ [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]]]
1377
+ output = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
1378
+ [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
1379
+ [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
1380
+ [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]
1321
1381
  ```
1322
1382
  )DOC";
1323
1383
 
@@ -1356,10 +1416,7 @@ ONNX_OPERATOR_SET_SCHEMA(
1356
1416
  1,
1357
1417
  OpSchema::Differentiable)
1358
1418
  .Output(0, "output", "Tensor of rank r >= 1.", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
1359
- .TypeConstraint(
1360
- "T",
1361
- OpSchema::all_tensor_types_with_bfloat(),
1362
- "Constrain input and output types to any tensor type.")
1419
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to any tensor type.")
1363
1420
  .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
1364
1421
  propagateElemTypeFromInputToOutput(ctx, 0, 0);
1365
1422
  if (hasNInputShapes(ctx, 1)) {
@@ -1387,16 +1444,15 @@ In cases where `reduction` is set to "none", indices should not have duplicate e
1387
1444
  then indices[idx1] != indices[idx2]. For instance, in a 2-D tensor case, the update
1388
1445
  corresponding to the [i][j] entry is performed as below:
1389
1446
  ```
1390
- output[indices[i][j]][j] = updates[i][j] if axis = 0,
1391
- output[i][indices[i][j]] = updates[i][j] if axis = 1,
1447
+ output[indices[i][j]][j] = updates[i][j] if axis = 0,
1448
+ output[i][indices[i][j]] = updates[i][j] if axis = 1,
1392
1449
  ```
1393
1450
  When `reduction` is set to some reduction function `f`, the update corresponding to the [i][j] entry is performed as below:
1394
1451
  ```
1395
- output[indices[i][j]][j] += f(output[indices[i][j]][j], updates[i][j]) if axis = 0,
1396
- output[i][indices[i][j]] += f(output[i][indices[i][j]], updates[i][j]) if axis = 1,
1452
+ output[indices[i][j]][j] += f(output[indices[i][j]][j], updates[i][j]) if axis = 0,
1453
+ output[i][indices[i][j]] += f(output[i][indices[i][j]], updates[i][j]) if axis = 1,
1397
1454
  ```
1398
- where the `f` is +/*/max/min as specified.
1399
-
1455
+ where the `f` is `+`, `*`, `max` or `min` as specified.
1400
1456
 
1401
1457
  This operator is the inverse of GatherElements. It is similar to Torch's Scatter operation.
1402
1458
 
@@ -1404,32 +1460,32 @@ This operator is the inverse of GatherElements. It is similar to Torch's Scatter
1404
1460
 
1405
1461
  Example 1:
1406
1462
  ```
1407
- data = [
1408
- [0.0, 0.0, 0.0],
1409
- [0.0, 0.0, 0.0],
1410
- [0.0, 0.0, 0.0],
1411
- ]
1412
- indices = [
1413
- [1, 0, 2],
1414
- [0, 2, 1],
1415
- ]
1416
- updates = [
1417
- [1.0, 1.1, 1.2],
1418
- [2.0, 2.1, 2.2],
1419
- ]
1420
- output = [
1421
- [2.0, 1.1, 0.0]
1422
- [1.0, 0.0, 2.2]
1423
- [0.0, 2.1, 1.2]
1424
- ]
1463
+ data = [
1464
+ [0.0, 0.0, 0.0],
1465
+ [0.0, 0.0, 0.0],
1466
+ [0.0, 0.0, 0.0],
1467
+ ]
1468
+ indices = [
1469
+ [1, 0, 2],
1470
+ [0, 2, 1],
1471
+ ]
1472
+ updates = [
1473
+ [1.0, 1.1, 1.2],
1474
+ [2.0, 2.1, 2.2],
1475
+ ]
1476
+ output = [
1477
+ [2.0, 1.1, 0.0]
1478
+ [1.0, 0.0, 2.2]
1479
+ [0.0, 2.1, 1.2]
1480
+ ]
1425
1481
  ```
1426
1482
  Example 2:
1427
1483
  ```
1428
- data = [[1.0, 2.0, 3.0, 4.0, 5.0]]
1429
- indices = [[1, 3]]
1430
- updates = [[1.1, 2.1]]
1431
- axis = 1
1432
- output = [[1.0, 1.1, 3.0, 2.1, 5.0]]
1484
+ data = [[1.0, 2.0, 3.0, 4.0, 5.0]]
1485
+ indices = [[1, 3]]
1486
+ updates = [[1.1, 2.1]]
1487
+ axis = 1
1488
+ output = [[1.0, 1.1, 3.0, 2.1, 5.0]]
1433
1489
  ```
1434
1490
  )DOC";
1435
1491
 
@@ -1483,10 +1539,7 @@ ONNX_OPERATOR_SET_SCHEMA(
1483
1539
  true,
1484
1540
  1,
1485
1541
  OpSchema::Differentiable)
1486
- .TypeConstraint(
1487
- "T",
1488
- OpSchema::all_tensor_types_with_bfloat(),
1489
- "Input and output types can be of any tensor type.")
1542
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Input and output types can be of any tensor type.")
1490
1543
  .TypeConstraint("Tind", {"tensor(int32)", "tensor(int64)"}, "Constrain indices to integer types")
1491
1544
  .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
1492
1545
  propagateElemTypeFromInputToOutput(ctx, 0, 0);
@@ -1500,56 +1553,49 @@ Given `data` tensor of rank r >= 1, and `indices` tensor of rank q, gather
1500
1553
  entries of the axis dimension of `data` (by default outer-most one as axis=0) indexed by `indices`, and concatenates
1501
1554
  them in an output tensor of rank q + (r - 1).
1502
1555
 
1503
- axis = 0 :
1504
-
1505
- Let
1506
- k = indices[i_{0}, ..., i_{q-1}]
1507
- Then
1508
- output[i_{0}, ..., i_{q-1}, j_{0}, ..., j_{r-2}] = input[k , j_{0}, ..., j_{r-2}]
1556
+ If `axis = 0`, let `k = indices[i_{0}, ..., i_{q-1}]`
1557
+ then `output[i_{0}, ..., i_{q-1}, j_{0}, ..., j_{r-2}] = input[k , j_{0}, ..., j_{r-2}]`:
1509
1558
 
1510
1559
  ```
1511
- data = [
1512
- [1.0, 1.2],
1513
- [2.3, 3.4],
1514
- [4.5, 5.7],
1515
- ]
1516
- indices = [
1517
- [0, 1],
1518
- [1, 2],
1519
- ]
1520
- output = [
1521
- [
1522
- [1.0, 1.2],
1523
- [2.3, 3.4],
1524
- ],
1525
- [
1526
- [2.3, 3.4],
1527
- [4.5, 5.7],
1528
- ],
1529
- ]
1560
+ data = [
1561
+ [1.0, 1.2],
1562
+ [2.3, 3.4],
1563
+ [4.5, 5.7],
1564
+ ]
1565
+ indices = [
1566
+ [0, 1],
1567
+ [1, 2],
1568
+ ]
1569
+ output = [
1570
+ [
1571
+ [1.0, 1.2],
1572
+ [2.3, 3.4],
1573
+ ],
1574
+ [
1575
+ [2.3, 3.4],
1576
+ [4.5, 5.7],
1577
+ ],
1578
+ ]
1530
1579
  ```
1531
- axis = 1 :
1532
1580
 
1533
- Let
1534
- k = indices[i_{0}, ..., i_{q-1}]
1535
- Then
1536
- output[j_{0}, i_{0}, ..., i_{q-1}, j_{1}, ..., j_{r-2}] = input[j_{0}, k, j_{1}, ..., j_{r-2}]
1581
+ If `axis = 1`, let `k = indices[i_{0}, ..., i_{q-1}]`
1582
+ then `output[j_{0}, i_{0}, ..., i_{q-1}, j_{1}, ..., j_{r-2}] = input[j_{0}, k, j_{1}, ..., j_{r-2}]`:
1537
1583
 
1538
1584
  ```
1539
- data = [
1540
- [1.0, 1.2, 1.9],
1541
- [2.3, 3.4, 3.9],
1542
- [4.5, 5.7, 5.9],
1543
- ]
1544
- indices = [
1545
- [0, 2],
1546
- ]
1547
- axis = 1,
1548
- output = [
1549
- [[1.0, 1.9]],
1550
- [[2.3, 3.9]],
1551
- [[4.5, 5.9]],
1552
- ]
1585
+ data = [
1586
+ [1.0, 1.2, 1.9],
1587
+ [2.3, 3.4, 3.9],
1588
+ [4.5, 5.7, 5.9],
1589
+ ]
1590
+ indices = [
1591
+ [0, 2],
1592
+ ]
1593
+ axis = 1,
1594
+ output = [
1595
+ [[1.0, 1.9]],
1596
+ [[2.3, 3.9]],
1597
+ [[4.5, 5.9]],
1598
+ ]
1553
1599
  ```
1554
1600
  )DOC";
1555
1601
 
@@ -1576,10 +1622,7 @@ ONNX_OPERATOR_SET_SCHEMA(
1576
1622
  1,
1577
1623
  OpSchema::NonDifferentiable)
1578
1624
  .Output(0, "output", "Tensor of rank q + (r - 1).", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
1579
- .TypeConstraint(
1580
- "T",
1581
- OpSchema::all_tensor_types_with_bfloat(),
1582
- "Constrain input and output types to any tensor type.")
1625
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to any tensor type.")
1583
1626
  .TypeConstraint("Tind", {"tensor(int32)", "tensor(int64)"}, "Constrain indices to integer types")
1584
1627
  .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
1585
1628
  propagateElemTypeFromInputToOutput(ctx, 0, 0);
@@ -1627,45 +1670,45 @@ Its output shape is the same as the shape of `indices` and consists of one value
1627
1670
  For instance, in the 3-D case (r = 3), the output produced is determined
1628
1671
  by the following equations:
1629
1672
  ```
1630
- out[i][j][k] = input[index[i][j][k]][j][k] if axis = 0,
1631
- out[i][j][k] = input[i][index[i][j][k]][k] if axis = 1,
1632
- out[i][j][k] = input[i][j][index[i][j][k]] if axis = 2,
1673
+ out[i][j][k] = input[index[i][j][k]][j][k] if axis = 0,
1674
+ out[i][j][k] = input[i][index[i][j][k]][k] if axis = 1,
1675
+ out[i][j][k] = input[i][j][index[i][j][k]] if axis = 2,
1633
1676
  ```
1634
1677
 
1635
1678
  This operator is also the inverse of ScatterElements. It is similar to Torch's gather operation.
1636
1679
 
1637
1680
  Example 1:
1638
1681
  ```
1639
- data = [
1640
- [1, 2],
1641
- [3, 4],
1642
- ]
1643
- indices = [
1644
- [0, 0],
1645
- [1, 0],
1646
- ]
1647
- axis = 1
1648
- output = [
1649
- [1, 1],
1650
- [4, 3],
1651
- ]
1682
+ data = [
1683
+ [1, 2],
1684
+ [3, 4],
1685
+ ]
1686
+ indices = [
1687
+ [0, 0],
1688
+ [1, 0],
1689
+ ]
1690
+ axis = 1
1691
+ output = [
1692
+ [1, 1],
1693
+ [4, 3],
1694
+ ]
1652
1695
  ```
1653
1696
  Example 2:
1654
1697
  ```
1655
- data = [
1656
- [1, 2, 3],
1657
- [4, 5, 6],
1658
- [7, 8, 9],
1659
- ]
1660
- indices = [
1661
- [1, 2, 0],
1662
- [2, 0, 0],
1663
- ]
1664
- axis = 0
1665
- output = [
1666
- [4, 8, 3],
1667
- [7, 2, 3],
1668
- ]
1698
+ data = [
1699
+ [1, 2, 3],
1700
+ [4, 5, 6],
1701
+ [7, 8, 9],
1702
+ ]
1703
+ indices = [
1704
+ [1, 2, 0],
1705
+ [2, 0, 0],
1706
+ ]
1707
+ axis = 0
1708
+ output = [
1709
+ [4, 8, 3],
1710
+ [7, 2, 3],
1711
+ ]
1669
1712
  ```
1670
1713
  )DOC";
1671
1714
 
@@ -1700,10 +1743,7 @@ ONNX_OPERATOR_SET_SCHEMA(
1700
1743
  true,
1701
1744
  1,
1702
1745
  OpSchema::Differentiable)
1703
- .TypeConstraint(
1704
- "T",
1705
- OpSchema::all_tensor_types_with_bfloat(),
1706
- "Constrain input and output types to any tensor type.")
1746
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to any tensor type.")
1707
1747
  .TypeConstraint("Tind", {"tensor(int32)", "tensor(int64)"}, "Constrain indices to integer types")
1708
1748
  .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
1709
1749
  propagateElemTypeFromInputToOutput(ctx, 0, 0);
@@ -1753,10 +1793,7 @@ ONNX_OPERATOR_SET_SCHEMA(
1753
1793
  true,
1754
1794
  1,
1755
1795
  OpSchema::Differentiable)
1756
- .TypeConstraint(
1757
- "T",
1758
- OpSchema::all_tensor_types_with_bfloat(),
1759
- "Constrain input and output types to all tensor types.")
1796
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to all tensor types.")
1760
1797
  .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
1761
1798
  propagateElemTypeFromInputToOutput(ctx, 0, 0);
1762
1799
  if (!hasNInputShapes(ctx, 1)) {
@@ -1781,9 +1818,8 @@ ONNX_OPERATOR_SET_SCHEMA(
1781
1818
 
1782
1819
  const auto& input_shape = ctx.getInputType(0)->tensor_type().shape();
1783
1820
  const auto input_ndim = input_shape.dim_size();
1784
- std::transform(axes.begin(), axes.end(), axes.begin(), [&](int64_t axis) -> int64_t {
1785
- return axis < 0 ? axis + input_ndim : axis;
1786
- });
1821
+ checkAxesRange(axes, input_ndim);
1822
+ adjustNegativeAxes(axes, input_ndim);
1787
1823
 
1788
1824
  for (int i = 0; i < input_ndim; ++i) {
1789
1825
  if (!input_shape.dim(i).has_dim_value() && axes_not_specified) {
@@ -1821,15 +1857,13 @@ static const char* Unsqueeze_ver13_doc = R"DOC(
1821
1857
  Insert single-dimensional entries to the shape of an input tensor (`data`).
1822
1858
  Takes one required input `axes` - which contains a list of dimension indices and this operator will insert a dimension of value `1` into the corresponding index of the output tensor (`expanded`).
1823
1859
 
1824
- For example:
1825
- Given an input tensor (`data`) of shape [3, 4, 5], then
1826
- Unsqueeze(data, axes=[0, 4]) outputs a tensor (`expanded`) containing same data as `data` but with shape [1, 3, 4, 5, 1].
1860
+ For example, given an input tensor (`data`) of shape [3, 4, 5], then
1861
+ Unsqueeze(data, axes=[0, 4]) outputs a tensor (`expanded`) containing same data as `data` but with shape [1, 3, 4, 5, 1].
1827
1862
 
1828
1863
  The input `axes` should not contain any duplicate entries. It is an error if it contains duplicates.
1829
1864
  The rank of the output tensor (`output_rank`) is the rank of the input tensor (`data`) plus the number of values in `axes`.
1830
1865
  Each value in `axes` should be within the (inclusive) range [-output_rank , output_rank - 1].
1831
1866
  The order of values in `axes` does not matter and can come in any order.
1832
-
1833
1867
  )DOC";
1834
1868
 
1835
1869
  ONNX_OPERATOR_SET_SCHEMA(
@@ -1857,10 +1891,7 @@ ONNX_OPERATOR_SET_SCHEMA(
1857
1891
  true,
1858
1892
  1,
1859
1893
  OpSchema::Differentiable)
1860
- .TypeConstraint(
1861
- "T",
1862
- OpSchema::all_tensor_types_with_bfloat(),
1863
- "Constrain input and output types to all tensor types.")
1894
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to all tensor types.")
1864
1895
  .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
1865
1896
  propagateElemTypeFromInputToOutput(ctx, 0, 0);
1866
1897
  if (!hasNInputShapes(ctx, 1)) {
@@ -1873,31 +1904,14 @@ ONNX_OPERATOR_SET_SCHEMA(
1873
1904
  return;
1874
1905
  }
1875
1906
  axes = ParseData<int64_t>(axes_proto);
1876
-
1877
- // validate 'axes' for duplicate entries
1878
- std::unordered_set<int64_t> unique_values;
1879
- for (const auto val : axes) {
1880
- if (unique_values.find(val) != unique_values.end()) {
1881
- fail_shape_inference("'axes' attribute must not contain any duplicates");
1882
- }
1883
- unique_values.insert(val);
1884
- }
1885
-
1886
1907
  ctx.getOutputType(0)->mutable_tensor_type()->mutable_shape();
1887
1908
  const auto& input_shape = ctx.getInputType(0)->tensor_type().shape();
1888
1909
  const auto input_ndim = input_shape.dim_size();
1889
1910
  const auto output_ndim = input_ndim + static_cast<int>(axes.size());
1890
- for (auto& axe : axes) {
1891
- if (axe < -output_ndim || axe >= output_ndim) {
1892
- fail_shape_inference("values in 'axes' are beyond the bounds of the computed output shape");
1893
- }
1894
- if (axe < 0) {
1895
- axe += output_ndim;
1896
- }
1897
- }
1898
-
1899
- // sort after correcting negative axes values (if any) in the previous
1900
- // step
1911
+ checkAxesRange(axes, output_ndim);
1912
+ adjustNegativeAxes(axes, output_ndim);
1913
+ checkDuplicateAxes(axes, output_ndim);
1914
+ // sort after correcting negative axes values (if any)
1901
1915
  std::sort(axes.begin(), axes.end());
1902
1916
 
1903
1917
  int j = 0;
@@ -1951,10 +1965,7 @@ ONNX_OPERATOR_SET_SCHEMA(
1951
1965
  true,
1952
1966
  1,
1953
1967
  OpSchema::Differentiable)
1954
- .TypeConstraint(
1955
- "T",
1956
- OpSchema::all_tensor_types_with_bfloat(),
1957
- "Constrain input and output types to all tensor types.")
1968
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to all tensor types.")
1958
1969
  .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
1959
1970
  propagateElemTypeFromInputToOutput(ctx, 0, 0);
1960
1971
  auto blocksize = getAttribute(ctx, "blocksize", 0);
@@ -1987,26 +1998,22 @@ and width dimensions. By default, `mode` = `DCR`.
1987
1998
  In the DCR mode, elements along the depth dimension from the input tensor are rearranged in the
1988
1999
  following order: depth, column, and then row. The output y is computed from the input x as below:
1989
2000
 
2001
+ ```
1990
2002
  b, c, h, w = x.shape
1991
-
1992
2003
  tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])
1993
-
1994
2004
  tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])
1995
-
1996
2005
  y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])
1997
-
2006
+ ```
1998
2007
 
1999
2008
  In the CRD mode, elements along the depth dimension from the input tensor are rearranged in the
2000
2009
  following order: column, row, and the depth. The output y is computed from the input x as below:
2001
2010
 
2011
+ ```
2002
2012
  b, c, h, w = x.shape
2003
-
2004
2013
  tmp = np.reshape(x, [b, c // (blocksize ** 2), blocksize, blocksize, h, w])
2005
-
2006
2014
  tmp = np.transpose(tmp, [0, 1, 4, 2, 5, 3])
2007
-
2008
2015
  y = np.reshape(tmp, [b, c // (blocksize ** 2), h * blocksize, w * blocksize])
2009
-
2016
+ ```
2010
2017
  )DOC";
2011
2018
 
2012
2019
  ONNX_OPERATOR_SET_SCHEMA(
@@ -2039,10 +2046,7 @@ ONNX_OPERATOR_SET_SCHEMA(
2039
2046
  true,
2040
2047
  1,
2041
2048
  OpSchema::Differentiable)
2042
- .TypeConstraint(
2043
- "T",
2044
- OpSchema::all_tensor_types_with_bfloat(),
2045
- "Constrain input and output types to all tensor types.")
2049
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to all tensor types.")
2046
2050
  .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
2047
2051
  propagateElemTypeFromInputToOutput(ctx, 0, 0);
2048
2052
  auto blocksize = getAttribute(ctx, "blocksize", 0);
@@ -2099,10 +2103,7 @@ ONNX_OPERATOR_SET_SCHEMA(
2099
2103
  true,
2100
2104
  1,
2101
2105
  OpSchema::Differentiable)
2102
- .TypeConstraint(
2103
- "T",
2104
- OpSchema::all_tensor_types_with_bfloat(),
2105
- "Constrain input and output types to all tensor types.")
2106
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to all tensor types.")
2106
2107
  .TypeConstraint("T1", {"tensor(int64)"}, "Constrain repeat's type to int64 tensors.")
2107
2108
  .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
2108
2109
  // Type inference
@@ -2184,51 +2185,81 @@ ONNX_OPERATOR_SET_SCHEMA(
2184
2185
  .SetDoc(Upsample_ver10_doc)
2185
2186
  .TypeAndShapeInferenceFunction([](InferenceContext& ctx) { resizeShapeInference_opset7_to_10(ctx); }));
2186
2187
 
2187
- static const char* Resize_ver18_doc = R"DOC(
2188
+ static const char* Resize_ver19_doc = R"DOC(
2188
2189
  Resize the input tensor. In general, it calculates every value in the output tensor as a weighted average of neighborhood (a.k.a. sampling locations) in the input tensor.
2189
- Each dimension value of the output tensor is: <br/>
2190
- `output_dimension = floor(input_dimension * (roi_end - roi_start) * scale)` <br/>
2190
+ Each dimension value of the output tensor is:
2191
+ ```
2192
+ output_dimension = floor(input_dimension * (roi_end - roi_start) * scale)
2193
+ ```
2191
2194
  if input \"sizes\" is not specified.
2192
2195
  )DOC";
2193
2196
 
2194
- static const char* Resize_attr_coordinate_transformation_mode_doc = R"DOC(
2195
- This attribute describes how to transform the coordinate in the resized tensor to the coordinate in the original tensor. <br/>
2197
+ static const char* Resize_ver19_attr_coordinate_transformation_mode_doc = R"DOC(
2198
+ This attribute describes how to transform the coordinate in the resized tensor to the coordinate in the original tensor.
2196
2199
 
2197
2200
  The coordinate of each dimension is transformed individually. Let's describe a case using axis x as an example.
2198
- Denote x_resized as the coordinate of axis x in the resized tensor, x_original as the coordinate of axis x in the original tensor, `length_original` as the length of the original tensor in axis x, length_resized as the length of the resized tensor in axis x, roi_x = (start_x, end_x) of the axis x in input "roi", `scale = length_resized / length_original`, <br/>
2201
+ Denote `x_resized` as the coordinate of axis x in the resized tensor,
2202
+ `x_original` as the coordinate of axis x in the original tensor,
2203
+ `length_original` as the length of the original tensor in axis x,
2204
+ `length_resized` as the length of the resized tensor in axis x,
2205
+ `scale = length_resized / length_original`,
2206
+ `output_width` the target length on the axis x which can be a fractional number when it is calculated out of a scale factor,
2207
+ and `output_width_int` the effective output width as an integer.
2208
+
2209
+ if coordinate_transformation_mode is `"half_pixel"`,
2210
+ ```
2211
+ x_original = (x_resized + 0.5) / scale - 0.5
2212
+ ```
2199
2213
 
2200
- if coordinate_transformation_mode is `"half_pixel"`, <br/>
2201
- `x_original = (x_resized + 0.5) / scale - 0.5` <br/>
2214
+ if coordinate_transformation_mode is `"half_pixel_symmetric"`,
2215
+ ```
2216
+ adjustment = output_width_int / output_width
2217
+ center = input_width / 2
2218
+ offset = center * (1 - adjustment)
2219
+ x_ori = offset + (x + 0.5) / scale - 0.5
2220
+ ```
2202
2221
 
2203
- if coordinate_transformation_mode is `"pytorch_half_pixel"`, <br/>
2204
- `x_original = length_resized > 1 ? (x_resized + 0.5) / scale - 0.5 : 0` <br/>
2222
+ if coordinate_transformation_mode is `"pytorch_half_pixel"`,
2223
+ ```
2224
+ x_original = length_resized > 1 ? (x_resized + 0.5) / scale - 0.5 : 0
2225
+ ```
2205
2226
 
2206
- if coordinate_transformation_mode is `"align_corners"`, <br/>
2207
- `x_original = x_resized * (length_original - 1) / (length_resized - 1)` <br/>
2227
+ if coordinate_transformation_mode is `"align_corners"`,
2228
+ ```
2229
+ x_original = x_resized * (length_original - 1) / (length_resized - 1)
2230
+ ```
2208
2231
 
2209
- if coordinate_transformation_mode is `"asymmetric"`, <br/>
2210
- `x_original = x_resized / scale` <br/>
2232
+ if coordinate_transformation_mode is `"asymmetric"`,
2233
+ ```
2234
+ x_original = x_resized / scale
2235
+ ```
2211
2236
 
2212
- if coordinate_transformation_mode is `"tf_crop_and_resize"`, <br/>
2213
- `x_original = length_resized > 1 ? start_x * (length_original - 1) + x_resized * (end_x - start_x) * (length_original - 1) / (length_resized - 1) : 0.5 * (start_x + end_x) * (length_original - 1)`
2237
+ if coordinate_transformation_mode is `"tf_crop_and_resize"`,
2238
+ ```
2239
+ x_original = length_resized > 1 ? start_x * (length_original - 1) + x_resized * (end_x - start_x) * (length_original - 1) / (length_resized - 1) : 0.5 * (start_x + end_x) * (length_original - 1)
2240
+ ```
2214
2241
  .)DOC";
2215
2242
 
2216
- static const char* Resize_attr_keep_aspect_ratio_policy_doc = R"DOC(
2243
+ static const char* Resize_ver19_attr_keep_aspect_ratio_policy_doc = R"DOC(
2217
2244
  This attribute describes how to interpret the `sizes` input with regard to keeping the original aspect ratio of the input, and it is not applicable when
2218
- the `scales` input is used. <br/>
2245
+ the `scales` input is used.
2219
2246
 
2220
- Given a set of `sizes`, associated with a subset of `axes` (explicitly provided or default), and assuming `d = axes[i]`, with `i` being the index of the provided `sizes`. <br/>
2247
+ Given a set of `sizes`, associated with a subset of `axes` (explicitly provided or default), and assuming `d = axes[i]`, with `i` being the index of the provided `sizes`.
2221
2248
 
2222
- If `keep_aspect_ratio_policy` is `"stretch"`, the original aspect ratio is disregarded, and the input is resized to the specified size: <br/>
2223
- `out_size[d] = sizes[i]` <br/>
2249
+ If `keep_aspect_ratio_policy` is `"stretch"`, the original aspect ratio is disregarded, and the input is resized to the specified size:
2250
+ `out_size[d] = sizes[i]`
2224
2251
 
2225
- If `keep_aspect_ratio_policy` is `"not_larger"`, the sizes are adjusted so that no extent of the output is larger than the specified size, while keeping the original aspect ratio: <br/>
2226
- `scale = Min(sizes[i] / in_size[d])` <br/>
2227
- `out_size[d] = round_int(scale * in_size[i])` <br/>
2252
+ If `keep_aspect_ratio_policy` is `"not_larger"`, the sizes are adjusted so that no extent of the output is larger than the specified size, while keeping the original aspect ratio:
2253
+ ```
2254
+ scale = Min(sizes[i] / in_size[d])
2255
+ out_size[d] = round_int(scale * in_size[i])
2256
+ ```
2228
2257
 
2229
- If `keep_aspect_ratio_policy` is `"not_smaller"`, the sizes are adjusted so that no extent of the output is smaller than the specified size, while keeping the original aspect ratio: <br/>
2230
- `scale = Max(sizes[i] / in_size[d])` <br/>
2231
- `out_size[d] = round_int(scale * in_size[i])` <br/>
2258
+ If `keep_aspect_ratio_policy` is `"not_smaller"`, the sizes are adjusted so that no extent of the output is smaller than the specified size, while keeping the original aspect ratio:
2259
+ ```
2260
+ scale = Max(sizes[i] / in_size[d])
2261
+ out_size[d] = round_int(scale * in_size[i])
2262
+ ```
2232
2263
 
2233
2264
  For non-resizable axes (those not specified in `axes`), the output size will be equal to the input size.
2234
2265
 
@@ -2236,7 +2267,7 @@ Note: `round_int` stands for computing the nearest integer value, rounding halfw
2236
2267
 
2237
2268
  ONNX_OPERATOR_SET_SCHEMA(
2238
2269
  Resize,
2239
- 18,
2270
+ 19,
2240
2271
  OpSchema()
2241
2272
  .Attr(
2242
2273
  "mode",
@@ -2260,7 +2291,7 @@ ONNX_OPERATOR_SET_SCHEMA(
2260
2291
  static_cast<int64_t>(0))
2261
2292
  .Attr(
2262
2293
  "coordinate_transformation_mode",
2263
- Resize_attr_coordinate_transformation_mode_doc,
2294
+ Resize_ver19_attr_coordinate_transformation_mode_doc,
2264
2295
  AttributeProto::STRING,
2265
2296
  std::string("half_pixel"))
2266
2297
  .Attr(
@@ -2290,7 +2321,7 @@ ONNX_OPERATOR_SET_SCHEMA(
2290
2321
  false)
2291
2322
  .Attr(
2292
2323
  "keep_aspect_ratio_policy",
2293
- Resize_attr_keep_aspect_ratio_policy_doc,
2324
+ Resize_ver19_attr_keep_aspect_ratio_policy_doc,
2294
2325
  AttributeProto::STRING,
2295
2326
  std::string("stretch"))
2296
2327
  .Input(0, "X", "N-D tensor", "T1", OpSchema::Single, true, 1, OpSchema::Differentiable)
@@ -2330,14 +2361,14 @@ ONNX_OPERATOR_SET_SCHEMA(
2330
2361
  .Output(0, "Y", "N-D tensor after resizing", "T1", OpSchema::Single, true, 1, OpSchema::Differentiable)
2331
2362
  .TypeConstraint(
2332
2363
  "T1",
2333
- OpSchema::all_tensor_types_with_bfloat(),
2364
+ OpSchema::all_tensor_types_ir4(),
2334
2365
  "Constrain input 'X' and output 'Y' to all tensor types.")
2335
2366
  .TypeConstraint(
2336
2367
  "T2",
2337
2368
  {"tensor(float16)", "tensor(float)", "tensor(double)"},
2338
2369
  "Constrain roi type to float or double.")
2339
- .SetDoc(Resize_ver18_doc)
2340
- .TypeAndShapeInferenceFunction([](InferenceContext& ctx) { resizeShapeInference_opset13_to_18(ctx); }));
2370
+ .SetDoc(Resize_ver19_doc)
2371
+ .TypeAndShapeInferenceFunction([](InferenceContext& ctx) { resizeShapeInference_opset18_to_19(ctx); }));
2341
2372
 
2342
2373
  static const char* GridSample_ver16_doc = R"DOC(
2343
2374
  Given an input `X` and a flow-field `grid`, computes the output `Y` using `X` values and pixel locations from `grid`.
@@ -2451,7 +2482,7 @@ ONNX_OPERATOR_SET_SCHEMA(
2451
2482
 
2452
2483
  ONNX_OPERATOR_SET_SCHEMA(
2453
2484
  Identity,
2454
- 16,
2485
+ 19,
2455
2486
  OpSchema()
2456
2487
  .SetDoc("Identity operator")
2457
2488
  .Input(0, "input", "Input tensor", "V", OpSchema::Single, true, 1, OpSchema::Differentiable)
@@ -2459,7 +2490,7 @@ ONNX_OPERATOR_SET_SCHEMA(
2459
2490
  .TypeConstraint(
2460
2491
  "V",
2461
2492
  []() {
2462
- auto t = OpSchema::all_tensor_types_with_bfloat();
2493
+ auto t = OpSchema::all_tensor_types_ir9();
2463
2494
  auto s = OpSchema::all_tensor_sequence_types();
2464
2495
  auto o = OpSchema::all_optional_types();
2465
2496
  t.insert(t.end(), s.begin(), s.end());
@@ -2513,13 +2544,13 @@ ONNX_OPERATOR_SET_SCHEMA(
2513
2544
  .TypeConstraint("T1", {"tensor(bool)"}, "Constrain to boolean tensors.")
2514
2545
  .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
2515
2546
  propagateElemTypeFromInputToOutput(ctx, 0, 0);
2547
+ auto axisAttr = ctx.getAttribute("axis");
2516
2548
  if (hasInputShape(ctx, 0)) {
2517
2549
  const TensorShapeProto& indices_shape = ctx.getInputType(0)->tensor_type().shape();
2518
2550
  int r = indices_shape.dim_size();
2519
2551
  if (r < 1) {
2520
2552
  fail_shape_inference("Indices tensor must have rank >= 1");
2521
2553
  }
2522
- auto axisAttr = ctx.getAttribute("axis");
2523
2554
  if (axisAttr) {
2524
2555
  int axis = static_cast<int>(axisAttr->i());
2525
2556
  if (axis < -r || axis >= r) {
@@ -2528,8 +2559,18 @@ ONNX_OPERATOR_SET_SCHEMA(
2528
2559
  if (axis < 0) {
2529
2560
  axis += r;
2530
2561
  }
2562
+ TensorShapeProto* shape = ctx.getOutputType(0)->mutable_tensor_type()->mutable_shape();
2563
+ for (int i = 0; i < indices_shape.dim_size(); i++) {
2564
+ auto* dim = shape->add_dim();
2565
+ if (i != axis) {
2566
+ *dim = indices_shape.dim(i);
2567
+ }
2568
+ }
2531
2569
  }
2532
2570
  }
2571
+ if (!axisAttr) {
2572
+ updateOutputShape(ctx, 0, {Dim()});
2573
+ }
2533
2574
  }));
2534
2575
 
2535
2576
  static const char* OneHot_ver11_doc = R"DOC(
@@ -2745,19 +2786,11 @@ with three parameters.
2745
2786
 
2746
2787
  )DOC";
2747
2788
 
2748
- static const char* Where_ver16_history = R"DOC(
2749
-
2750
- **History**
2751
- - Version 16 adds bfloat16 to the types allowed (for the second and third parameter).
2752
- )DOC";
2753
-
2754
2789
  ONNX_OPERATOR_SET_SCHEMA(
2755
2790
  Where,
2756
2791
  16,
2757
2792
  OpSchema()
2758
- .SetDoc(
2759
- GET_OP_DOC_STR(std::string(Where_ver16_doc) + GenerateBroadcastingDocMul()) +
2760
- std::string(Where_ver16_history))
2793
+ .SetDoc(GET_OP_DOC_STR(std::string(Where_ver16_doc) + GenerateBroadcastingDocMul()))
2761
2794
  .Input(
2762
2795
  0,
2763
2796
  "condition",
@@ -2797,7 +2830,7 @@ ONNX_OPERATOR_SET_SCHEMA(
2797
2830
  .TypeConstraint("B", {"tensor(bool)"}, "Constrain to boolean tensors.")
2798
2831
  .TypeConstraint(
2799
2832
  "T",
2800
- OpSchema::all_tensor_types_with_bfloat(),
2833
+ OpSchema::all_tensor_types_ir4(),
2801
2834
  "Constrain input and output types to all tensor types (including bfloat).")
2802
2835
  .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
2803
2836
  propagateElemTypeFromInputToOutput(ctx, 1, 0);
@@ -2818,7 +2851,7 @@ ONNX_OPERATOR_SET_SCHEMA(
2818
2851
  .SetDoc(NonZero_ver9_doc)
2819
2852
  .Input(0, "X", "input", "T", OpSchema::Single, true, 1, OpSchema::NonDifferentiable)
2820
2853
  .Output(0, "Y", "output", "tensor(int64)", OpSchema::Single, true, 1, OpSchema::NonDifferentiable)
2821
- .TypeConstraint("T", OpSchema::all_tensor_types_with_bfloat(), "Constrain to all tensor types.")
2854
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain to all tensor types.")
2822
2855
  .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
2823
2856
  updateOutputElemType(ctx, 0, TensorProto::INT64);
2824
2857
  TensorShapeProto output_shape;
@@ -2924,67 +2957,89 @@ Outputs are either sorted in ascending order or optionally in the order of the f
2924
2957
  https://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html
2925
2958
 
2926
2959
  Example 1:
2927
- input_X = [2, 1, 1, 3, 4, 3]
2928
- attribute_sorted = 0
2929
- attribute_axis = None
2930
- output_Y = [2, 1, 3, 4]
2931
- output_indices = [0, 1, 3, 4]
2932
- output_inverse_indices = [0, 1, 1, 2, 3, 2]
2933
- output_counts = [1, 2, 2, 1]
2960
+ ```
2961
+ input_X = [2, 1, 1, 3, 4, 3]
2962
+ attribute_sorted = 0
2963
+ attribute_axis = None
2964
+ output_Y = [2, 1, 3, 4]
2965
+ output_indices = [0, 1, 3, 4]
2966
+ output_inverse_indices = [0, 1, 1, 2, 3, 2]
2967
+ output_counts = [1, 2, 2, 1]
2968
+ ```
2934
2969
 
2935
2970
  Example 2:
2936
- input_X = [[1, 3], [2, 3]]
2937
- attribute_sorted = 1
2938
- attribute_axis = None
2939
- output_Y = [1, 2, 3]
2940
- output_indices = [0, 2, 1]
2941
- output_inverse_indices = [0, 2, 1, 2]
2942
- output_counts = [1, 1, 2]
2971
+ ```
2972
+ input_X = [[1, 3], [2, 3]]
2973
+ attribute_sorted = 1
2974
+ attribute_axis = None
2975
+ output_Y = [1, 2, 3]
2976
+ output_indices = [0, 2, 1]
2977
+ output_inverse_indices = [0, 2, 1, 2]
2978
+ output_counts = [1, 1, 2]
2979
+ ```
2943
2980
 
2944
2981
  Example 3:
2945
- input_X = [[1, 0, 0], [1, 0, 0], [2, 3, 4]]
2946
- attribute_sorted = 1
2947
- attribute_axis = 0
2948
- output_Y = [[1, 0, 0], [2, 3, 4]]
2949
- output_indices = [0, 2]
2950
- output_inverse_indices = [0, 0, 1]
2951
- output_counts = [2, 1]
2982
+ ```
2983
+ input_X = [[1, 0, 0], [1, 0, 0], [2, 3, 4]]
2984
+ attribute_sorted = 1
2985
+ attribute_axis = 0
2986
+ output_Y = [[1, 0, 0], [2, 3, 4]]
2987
+ output_indices = [0, 2]
2988
+ output_inverse_indices = [0, 0, 1]
2989
+ output_counts = [2, 1]
2990
+ ```
2952
2991
 
2953
2992
  Example 4:
2954
- input_x = [[[1., 1.], [0., 1.], [2., 1.], [0., 1.]],
2955
- [[1., 1.], [0., 1.], [2., 1.], [0., 1.]]]
2956
- attribute_sorted = 1
2957
- attribute_axis = 1
2958
-
2959
- intermediate data are presented below for better understanding:
2993
+ ```
2994
+ input_x = [[[1., 1.], [0., 1.], [2., 1.], [0., 1.]],
2995
+ [[1., 1.], [0., 1.], [2., 1.], [0., 1.]]]
2996
+ attribute_sorted = 1
2997
+ attribute_axis = 1
2998
+ ```
2960
2999
 
2961
- there are 4 subtensors sliced along axis 1 of input_x (shape = (2, 4, 2)):
2962
- A: [[1, 1], [1, 1]],
2963
- [[0, 1], [0, 1]],
2964
- [[2, 1], [2, 1]],
2965
- [[0, 1], [0, 1]].
3000
+ intermediate data are presented below for better understanding:
3001
+ there are 4 subtensors sliced along axis 1 of input_x (shape = (2, 4, 2)):
3002
+ ```
3003
+ A: [[1, 1], [1, 1]],
3004
+ [[0, 1], [0, 1]],
3005
+ [[2, 1], [2, 1]],
3006
+ [[0, 1], [0, 1]].
3007
+ ```
2966
3008
 
2967
- there are 3 unique subtensors:
2968
- [[1, 1], [1, 1]],
2969
- [[0, 1], [0, 1]],
2970
- [[2, 1], [2, 1]].
3009
+ there are 3 unique subtensors:
3010
+ ```
3011
+ [[1, 1], [1, 1]],
3012
+ [[0, 1], [0, 1]],
3013
+ [[2, 1], [2, 1]].
3014
+ ```
2971
3015
 
2972
- sorted unique subtensors:
2973
- B: [[0, 1], [0, 1]],
2974
- [[1, 1], [1, 1]],
2975
- [[2, 1], [2, 1]].
3016
+ sorted unique subtensors:
3017
+ ```
3018
+ B: [[0, 1], [0, 1]],
3019
+ [[1, 1], [1, 1]],
3020
+ [[2, 1], [2, 1]].
3021
+ ```
2976
3022
 
2977
- output_Y is constructed from B:
2978
- [[[0. 1.], [1. 1.], [2. 1.]],
2979
- [[0. 1.], [1. 1.], [2. 1.]]]
3023
+ output_Y is constructed from B:
3024
+ ```
3025
+ [[[0. 1.], [1. 1.], [2. 1.]],
3026
+ [[0. 1.], [1. 1.], [2. 1.]]]
3027
+ ```
2980
3028
 
2981
- output_indices is to map from B to A:
2982
- [1, 0, 2]
3029
+ output_indices is to map from B to A:
3030
+ ```
3031
+ [1, 0, 2]
3032
+ ```
2983
3033
 
2984
- output_inverse_indices is to map from A to B:
2985
- [1, 0, 2, 0]
3034
+ output_inverse_indices is to map from A to B:
3035
+ ```
3036
+ [1, 0, 2, 0]
3037
+ ```
2986
3038
 
2987
- output_counts = [2 1 1]
3039
+ output_counts:
3040
+ ```
3041
+ [2, 1, 1]
3042
+ ```
2988
3043
  )DOC";
2989
3044
 
2990
3045
  ONNX_OPERATOR_SET_SCHEMA(
@@ -3246,10 +3301,7 @@ ONNX_OPERATOR_SET_SCHEMA(
3246
3301
  true,
3247
3302
  1,
3248
3303
  OpSchema::Differentiable)
3249
- .TypeConstraint(
3250
- "T",
3251
- OpSchema::all_tensor_types_with_bfloat(),
3252
- "Constrain input and output types to any tensor type.")
3304
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to any tensor type.")
3253
3305
  .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
3254
3306
  // Type inference
3255
3307
  propagateElemTypeFromInputToOutput(ctx, 0, 0);
@@ -3296,7 +3348,7 @@ ONNX_OPERATOR_SET_SCHEMA(
3296
3348
  }
3297
3349
  }));
3298
3350
 
3299
- static const char* Pad_ver18_doc = R"DOC(
3351
+ static const char* Pad_ver19_doc = R"DOC(
3300
3352
  Given a tensor containing the data to be padded (`data`), a tensor containing the number of start and end pad values for axis (`pads`), (optionally) a `mode`, and (optionally) `constant_value`,
3301
3353
  a padded tensor (`output`) is generated.
3302
3354
 
@@ -3308,209 +3360,102 @@ The three supported `modes` are (similar to corresponding modes supported by `nu
3308
3360
 
3309
3361
  3) `edge` - pads with the edge values of array
3310
3362
 
3363
+ 4) `wrap` - wrap-around padding as if the data tensor forms a torus
3364
+
3311
3365
 
3312
3366
  Example 1 (`constant` mode):
3313
- Insert 0 pads to the beginning of the second dimension.
3314
3367
 
3315
- data =
3316
- [
3317
- [1.0, 1.2],
3318
- [2.3, 3.4],
3319
- [4.5, 5.7],
3320
- ]
3368
+ Insert 0 pads to the beginning of the second dimension.
3321
3369
 
3322
- pads = [0, 2, 0, 0]
3370
+ ```
3371
+ data = [
3372
+ [1.0, 1.2],
3373
+ [2.3, 3.4],
3374
+ [4.5, 5.7],
3375
+ ]
3323
3376
 
3324
- mode = 'constant'
3377
+ pads = [0, 2, 0, 0]
3325
3378
 
3326
- constant_value = 0.0
3379
+ mode = 'constant'
3327
3380
 
3328
- output =
3329
- [
3330
- [0.0, 0.0, 1.0, 1.2],
3331
- [0.0, 0.0, 2.3, 3.4],
3332
- [0.0, 0.0, 4.5, 5.7],
3333
- ]
3381
+ constant_value = 0.0
3334
3382
 
3383
+ output = [
3384
+ [0.0, 0.0, 1.0, 1.2],
3385
+ [0.0, 0.0, 2.3, 3.4],
3386
+ [0.0, 0.0, 4.5, 5.7],
3387
+ ]
3388
+ ```
3335
3389
 
3336
3390
  Example 2 (`reflect` mode):
3337
- data =
3338
- [
3339
- [1.0, 1.2],
3340
- [2.3, 3.4],
3341
- [4.5, 5.7],
3342
- ]
3343
3391
 
3344
- pads = [0, 2, 0, 0]
3392
+ ```
3393
+ data = [
3394
+ [1.0, 1.2],
3395
+ [2.3, 3.4],
3396
+ [4.5, 5.7],
3397
+ ]
3345
3398
 
3346
- mode = 'reflect'
3399
+ pads = [0, 2, 0, 0]
3347
3400
 
3348
- output =
3349
- [
3350
- [1.0, 1.2, 1.0, 1.2],
3351
- [2.3, 3.4, 2.3, 3.4],
3352
- [4.5, 5.7, 4.5, 5.7],
3353
- ]
3401
+ mode = 'reflect'
3354
3402
 
3403
+ output = [
3404
+ [1.0, 1.2, 1.0, 1.2],
3405
+ [2.3, 3.4, 2.3, 3.4],
3406
+ [4.5, 5.7, 4.5, 5.7],
3407
+ ]
3408
+ ```
3355
3409
 
3356
3410
  Example 3 (`edge` mode):
3357
- data =
3358
- [
3359
- [1.0, 1.2],
3360
- [2.3, 3.4],
3361
- [4.5, 5.7],
3362
- ]
3363
3411
 
3364
- pads = [0, 2, 0, 0]
3412
+ ```
3413
+ data = [
3414
+ [1.0, 1.2],
3415
+ [2.3, 3.4],
3416
+ [4.5, 5.7],
3417
+ ]
3365
3418
 
3366
- mode = 'edge'
3419
+ pads = [0, 2, 0, 0]
3367
3420
 
3368
- output =
3369
- [
3370
- [1.0, 1.0, 1.0, 1.2],
3371
- [2.3, 2.3, 2.3, 3.4],
3372
- [4.5, 4.5, 4.5, 5.7],
3373
- ]
3421
+ mode = 'edge'
3422
+
3423
+ output = [
3424
+ [1.0, 1.0, 1.0, 1.2],
3425
+ [2.3, 2.3, 2.3, 3.4],
3426
+ [4.5, 4.5, 4.5, 5.7],
3427
+ ]
3428
+ ```
3374
3429
 
3430
+ Example 4 (`wrap` mode):
3431
+
3432
+ ```
3433
+ data = [
3434
+ [1.0, 1.2],
3435
+ [2.3, 3.4],
3436
+ [4.5, 5.7],
3437
+ ]
3438
+
3439
+ pads = [2, 1, 1, 1]
3440
+
3441
+ mode = 'wrap'
3442
+
3443
+ output = [
3444
+ [3.4, 2.3, 3.4, 2.3],
3445
+ [5.7, 4.5, 5.7, 4.5],
3446
+ [1.2, 1.0, 1.2, 1.0],
3447
+ [3.4, 2.3, 3.4, 2.3],
3448
+ [5.7, 4.5, 5.7, 4.5],
3449
+ [1.2, 1.0, 1.2, 1.0],
3450
+ ]
3451
+ ```
3375
3452
  )DOC";
3376
3453
 
3377
3454
  ONNX_OPERATOR_SET_SCHEMA(
3378
3455
  Pad,
3379
- 18,
3380
- OpSchema()
3381
- .Attr(
3382
- "mode",
3383
- "Supported modes: `constant`(default), `reflect`, `edge`",
3384
- AttributeProto::STRING,
3385
- std::string("constant"))
3386
- .SetDoc(Pad_ver18_doc)
3387
- .Input(0, "data", "Input tensor.", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
3388
- .Input(
3389
- 1,
3390
- "pads",
3391
- "Tensor of integers indicating the number of padding elements to add or remove (if negative) "
3392
- "at the beginning and end of each axis. For 2D input tensor, it is the number of pixels. "
3393
- "`pads` should be a 1D tensor of shape [2 * num_axes] where `num_axes` refers to the number "
3394
- "of elements in the `axes` input or the input rank if `axes` are not provided explicitly. "
3395
- "`pads` format should be: [x1_begin, x2_begin, ..., x1_end, x2_end,...], "
3396
- "where xi_begin is the number of pad values added at the beginning of axis `axes[i]` and "
3397
- "xi_end, the number of pad values added at the end of axis `axes[i]`.",
3398
- "tensor(int64)",
3399
- OpSchema::Single,
3400
- true,
3401
- 1,
3402
- OpSchema::NonDifferentiable)
3403
- .Input(
3404
- 2,
3405
- "constant_value",
3406
- "(Optional) A scalar value to be used if the mode chosen is `constant` (by default it is 0, "
3407
- "empty string or False).",
3408
- "T",
3409
- OpSchema::Optional,
3410
- true,
3411
- 1,
3412
- OpSchema::NonDifferentiable)
3413
- .Input(
3414
- 3,
3415
- "axes",
3416
- "1-D tensor of axes that `pads` apply to. Negative value means counting dimensions "
3417
- "from the back. Accepted range is [-r, r-1] where r = rank(data). Behavior is undefined if an "
3418
- "axis is repeated. If not provided, all axes are assumed (`[0, 1, ..., input_rank-1]`).",
3419
- "Tind",
3420
- OpSchema::Optional,
3421
- true,
3422
- 1,
3423
- OpSchema::NonDifferentiable)
3424
-
3425
- .Output(0, "output", "Tensor after padding.", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
3426
- .TypeConstraint(
3427
- "T",
3428
- OpSchema::all_tensor_types_with_bfloat(),
3429
- "Constrain input and output types to all tensor types.")
3430
- .TypeConstraint("Tind", {"tensor(int32)", "tensor(int64)"}, "Constrain indices to integer types")
3431
- .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
3432
- // Type inference
3433
- propagateElemTypeFromInputToOutput(ctx, 0, 0);
3434
- // Shape inference needs the input data shape
3435
- if (!hasNInputShapes(ctx, 1)) {
3436
- return;
3437
- }
3438
- const auto& input_shape = ctx.getInputType(0)->tensor_type().shape();
3439
- const auto input_rank = input_shape.dim_size();
3440
-
3441
- std::vector<int64_t> axes;
3442
- if (hasInputShape(ctx, 3)) { //'axes' input
3443
- auto axes_initializer = ctx.getInputData(3);
3444
- if (axes_initializer == nullptr)
3445
- return; // can't do shape inference then
3446
-
3447
- axes = ParseData<int64_t>(axes_initializer);
3448
-
3449
- std::vector<bool> tmp(input_rank, false);
3450
- for (auto axis : axes) {
3451
- if (tmp[axis]) {
3452
- fail_shape_inference("Repeated axis: ", axis);
3453
- }
3454
- tmp[axis] = true;
3455
- }
3456
- } else {
3457
- axes.resize(input_rank);
3458
- std::iota(axes.begin(), axes.end(), 0);
3459
- }
3460
-
3461
- int num_axes = axes.size();
3462
- if (num_axes > input_rank) {
3463
- fail_shape_inference("Too many axes provided");
3464
- }
3465
-
3466
- auto* output_shape = ctx.getOutputType(0)->mutable_tensor_type()->mutable_shape();
3467
-
3468
- // Populating default dims
3469
- std::vector<TensorShapeProto_Dimension*> out_dims(input_rank);
3470
- for (int i = 0; i < input_rank; ++i) {
3471
- out_dims[i] = output_shape->add_dim();
3472
- }
3473
-
3474
- // Shape Inference if
3475
- // 1. 'pads' are available.
3476
- // and 2. 'axes' are available, or default.
3477
- const TensorProto* pads_initializer = ctx.getInputData(1);
3478
- if (nullptr != pads_initializer && !axes.empty()) {
3479
- if (pads_initializer->dims_size() != 1 || pads_initializer->data_type() != TensorProto::INT64) {
3480
- fail_shape_inference("'pads' input must be a 1D (shape: [2 * num_axes]) tensor of type int64");
3481
- }
3482
-
3483
- const auto& pads_data = ParseData<int64_t>(pads_initializer);
3484
- if (pads_data.size() != static_cast<size_t>(2 * num_axes)) {
3485
- fail_shape_inference(
3486
- "Pads has incorrect number of values. Expected 2 * ",
3487
- num_axes,
3488
- " values. Got ",
3489
- pads_data.size(),
3490
- " values.");
3491
- }
3492
-
3493
- // Set default dim values
3494
- for (int i = 0; i < input_rank; ++i) {
3495
- const auto& input_dim = input_shape.dim(i);
3496
- if (input_dim.has_dim_value()) {
3497
- out_dims[i]->set_dim_value(input_dim.dim_value());
3498
- }
3499
- }
3500
-
3501
- for (int i = 0; i < num_axes; ++i) {
3502
- auto axis = axes[i];
3503
- const auto& input_dim = input_shape.dim(axis);
3504
- auto& out_dim = *out_dims[axis];
3505
- auto total_pad = pads_data[i] + pads_data[num_axes + i];
3506
- if (input_dim.has_dim_value()) {
3507
- out_dim.set_dim_value(input_dim.dim_value() + total_pad);
3508
- } else if (total_pad == 0) {
3509
- out_dim = input_dim;
3510
- }
3511
- }
3512
- }
3513
- }));
3456
+ 19,
3457
+ OpSchema().FillUsing(
3458
+ PadDocGenerator(Pad_ver19_doc, "Supported modes: `constant`(default), `reflect`, `edge`, `wrap`")));
3514
3459
 
3515
3460
  static const char* Trilu_ver14_doc = R"DOC(
3516
3461
  Given a 2-D matrix or batches of 2-D matrices, returns the upper or lower triangular part of the tensor(s).
@@ -3565,10 +3510,7 @@ ONNX_OPERATOR_SET_SCHEMA(
3565
3510
  true,
3566
3511
  1,
3567
3512
  OpSchema::Differentiable)
3568
- .TypeConstraint(
3569
- "T",
3570
- OpSchema::all_tensor_types_with_bfloat(),
3571
- "Constrain input and output types to all tensor types.")
3513
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to all tensor types.")
3572
3514
  .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
3573
3515
  // Type inference
3574
3516
  propagateElemTypeFromInputToOutput(ctx, 0, 0);
@@ -3626,10 +3568,7 @@ ONNX_OPERATOR_SET_SCHEMA(
3626
3568
  "Behavior is undefined if an axis is repeated.",
3627
3569
  AttributeProto::INTS,
3628
3570
  OPTIONAL_VALUE)
3629
- .TypeConstraint(
3630
- "T",
3631
- OpSchema::all_tensor_types_with_bfloat(),
3632
- "Constrain input and output types to all tensor types.")
3571
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to all tensor types.")
3633
3572
  .TypeConstraint("Tind", {"tensor(int32)", "tensor(int64)"}, "Constrain indices to integer types")
3634
3573
  .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
3635
3574
  if (ctx.getNumInputs() != 2) {
@@ -3668,14 +3607,9 @@ ONNX_OPERATOR_SET_SCHEMA(
3668
3607
  std::vector<int64_t> axes;
3669
3608
  if (axes_attr) {
3670
3609
  axes = RetrieveValues<int64_t>(*axes_attr);
3671
-
3672
- std::vector<bool> tmp(input_rank, false);
3673
- for (auto axis : axes) {
3674
- if (tmp[axis]) {
3675
- fail_shape_inference("Repeated axis: ", axis);
3676
- }
3677
- tmp[axis] = true;
3678
- }
3610
+ checkAxesRange(axes, input_rank);
3611
+ adjustNegativeAxes(axes, input_rank);
3612
+ checkDuplicateAxes(axes, input_rank);
3679
3613
  } else {
3680
3614
  axes.resize(input_rank);
3681
3615
  std::iota(axes.begin(), axes.end(), 0);