onnx 1.16.1__cp39-cp39-win_amd64.whl → 1.17.0__cp39-cp39-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of onnx might be problematic. Click here for more details.

Files changed (843) hide show
  1. onnx/__init__.py +3 -1
  2. onnx/_custom_element_types.py +63 -0
  3. onnx/backend/base.py +17 -15
  4. onnx/backend/sample/ops/__init__.py +4 -4
  5. onnx/backend/sample/ops/abs.py +1 -0
  6. onnx/backend/test/__init__.py +1 -0
  7. onnx/backend/test/case/__init__.py +2 -2
  8. onnx/backend/test/case/base.py +6 -5
  9. onnx/backend/test/case/model/__init__.py +4 -3
  10. onnx/backend/test/case/model/expand.py +1 -0
  11. onnx/backend/test/case/model/gradient.py +1 -0
  12. onnx/backend/test/case/model/sequence.py +3 -1
  13. onnx/backend/test/case/model/shrink.py +1 -0
  14. onnx/backend/test/case/model/sign.py +1 -0
  15. onnx/backend/test/case/model/single-relu.py +1 -0
  16. onnx/backend/test/case/model/stringnormalizer.py +1 -1
  17. onnx/backend/test/case/node/__init__.py +31 -22
  18. onnx/backend/test/case/node/_image_decoder_data.py +1 -0
  19. onnx/backend/test/case/node/abs.py +1 -0
  20. onnx/backend/test/case/node/acos.py +1 -0
  21. onnx/backend/test/case/node/acosh.py +1 -0
  22. onnx/backend/test/case/node/adagrad.py +2 -1
  23. onnx/backend/test/case/node/adam.py +4 -1
  24. onnx/backend/test/case/node/add.py +1 -0
  25. onnx/backend/test/case/node/affinegrid.py +1 -0
  26. onnx/backend/test/case/node/ai_onnx_ml/array_feature_extractor.py +1 -0
  27. onnx/backend/test/case/node/ai_onnx_ml/binarizer.py +1 -0
  28. onnx/backend/test/case/node/ai_onnx_ml/label_encoder.py +1 -0
  29. onnx/backend/test/case/node/ai_onnx_ml/tree_ensemble.py +1 -0
  30. onnx/backend/test/case/node/and.py +1 -0
  31. onnx/backend/test/case/node/argmax.py +1 -0
  32. onnx/backend/test/case/node/argmin.py +1 -0
  33. onnx/backend/test/case/node/asin.py +1 -0
  34. onnx/backend/test/case/node/asinh.py +1 -0
  35. onnx/backend/test/case/node/atan.py +1 -0
  36. onnx/backend/test/case/node/atanh.py +1 -0
  37. onnx/backend/test/case/node/averagepool.py +1 -0
  38. onnx/backend/test/case/node/batchnorm.py +1 -0
  39. onnx/backend/test/case/node/bernoulli.py +1 -0
  40. onnx/backend/test/case/node/bitshift.py +1 -0
  41. onnx/backend/test/case/node/bitwiseand.py +1 -0
  42. onnx/backend/test/case/node/bitwisenot.py +1 -0
  43. onnx/backend/test/case/node/bitwiseor.py +1 -0
  44. onnx/backend/test/case/node/bitwisexor.py +1 -0
  45. onnx/backend/test/case/node/blackmanwindow.py +13 -3
  46. onnx/backend/test/case/node/cast.py +2 -1
  47. onnx/backend/test/case/node/castlike.py +1 -0
  48. onnx/backend/test/case/node/ceil.py +1 -0
  49. onnx/backend/test/case/node/celu.py +1 -0
  50. onnx/backend/test/case/node/center_crop_pad.py +1 -0
  51. onnx/backend/test/case/node/clip.py +1 -0
  52. onnx/backend/test/case/node/col2im.py +1 -1
  53. onnx/backend/test/case/node/compress.py +1 -0
  54. onnx/backend/test/case/node/concat.py +3 -2
  55. onnx/backend/test/case/node/constant.py +1 -0
  56. onnx/backend/test/case/node/constantofshape.py +1 -0
  57. onnx/backend/test/case/node/conv.py +1 -0
  58. onnx/backend/test/case/node/convinteger.py +1 -0
  59. onnx/backend/test/case/node/convtranspose.py +135 -0
  60. onnx/backend/test/case/node/cos.py +1 -0
  61. onnx/backend/test/case/node/cosh.py +1 -0
  62. onnx/backend/test/case/node/cumsum.py +1 -0
  63. onnx/backend/test/case/node/deformconv.py +17 -26
  64. onnx/backend/test/case/node/depthtospace.py +1 -0
  65. onnx/backend/test/case/node/dequantizelinear.py +1 -0
  66. onnx/backend/test/case/node/det.py +1 -0
  67. onnx/backend/test/case/node/dft.py +1 -0
  68. onnx/backend/test/case/node/div.py +1 -0
  69. onnx/backend/test/case/node/dropout.py +1 -0
  70. onnx/backend/test/case/node/dynamicquantizelinear.py +1 -0
  71. onnx/backend/test/case/node/einsum.py +2 -3
  72. onnx/backend/test/case/node/elu.py +1 -0
  73. onnx/backend/test/case/node/equal.py +1 -0
  74. onnx/backend/test/case/node/erf.py +1 -0
  75. onnx/backend/test/case/node/exp.py +1 -0
  76. onnx/backend/test/case/node/expand.py +1 -0
  77. onnx/backend/test/case/node/eyelike.py +1 -0
  78. onnx/backend/test/case/node/flatten.py +1 -0
  79. onnx/backend/test/case/node/floor.py +1 -0
  80. onnx/backend/test/case/node/gather.py +1 -0
  81. onnx/backend/test/case/node/gatherelements.py +1 -0
  82. onnx/backend/test/case/node/gathernd.py +1 -0
  83. onnx/backend/test/case/node/gelu.py +1 -0
  84. onnx/backend/test/case/node/gemm.py +3 -4
  85. onnx/backend/test/case/node/globalaveragepool.py +1 -0
  86. onnx/backend/test/case/node/globalmaxpool.py +1 -0
  87. onnx/backend/test/case/node/greater.py +1 -0
  88. onnx/backend/test/case/node/greater_equal.py +1 -0
  89. onnx/backend/test/case/node/gridsample.py +1 -0
  90. onnx/backend/test/case/node/groupnormalization.py +1 -0
  91. onnx/backend/test/case/node/gru.py +3 -2
  92. onnx/backend/test/case/node/hammingwindow.py +13 -2
  93. onnx/backend/test/case/node/hannwindow.py +10 -2
  94. onnx/backend/test/case/node/hardmax.py +1 -0
  95. onnx/backend/test/case/node/hardsigmoid.py +1 -0
  96. onnx/backend/test/case/node/hardswish.py +1 -0
  97. onnx/backend/test/case/node/identity.py +1 -0
  98. onnx/backend/test/case/node/if.py +1 -0
  99. onnx/backend/test/case/node/instancenorm.py +1 -0
  100. onnx/backend/test/case/node/isinf.py +1 -0
  101. onnx/backend/test/case/node/isnan.py +1 -0
  102. onnx/backend/test/case/node/layernormalization.py +1 -0
  103. onnx/backend/test/case/node/leakyrelu.py +1 -0
  104. onnx/backend/test/case/node/less.py +1 -0
  105. onnx/backend/test/case/node/less_equal.py +1 -0
  106. onnx/backend/test/case/node/log.py +1 -0
  107. onnx/backend/test/case/node/logsoftmax.py +1 -0
  108. onnx/backend/test/case/node/loop.py +4 -3
  109. onnx/backend/test/case/node/lppool.py +1 -0
  110. onnx/backend/test/case/node/lrn.py +1 -0
  111. onnx/backend/test/case/node/lstm.py +3 -2
  112. onnx/backend/test/case/node/matmul.py +1 -0
  113. onnx/backend/test/case/node/matmulinteger.py +1 -0
  114. onnx/backend/test/case/node/max.py +1 -0
  115. onnx/backend/test/case/node/maxpool.py +1 -0
  116. onnx/backend/test/case/node/maxunpool.py +1 -0
  117. onnx/backend/test/case/node/mean.py +1 -0
  118. onnx/backend/test/case/node/meanvariancenormalization.py +1 -0
  119. onnx/backend/test/case/node/melweightmatrix.py +1 -0
  120. onnx/backend/test/case/node/min.py +1 -0
  121. onnx/backend/test/case/node/mish.py +1 -0
  122. onnx/backend/test/case/node/mod.py +1 -0
  123. onnx/backend/test/case/node/momentum.py +1 -0
  124. onnx/backend/test/case/node/mul.py +1 -0
  125. onnx/backend/test/case/node/neg.py +1 -0
  126. onnx/backend/test/case/node/negativeloglikelihoodloss.py +4 -1
  127. onnx/backend/test/case/node/nonmaxsuppression.py +1 -0
  128. onnx/backend/test/case/node/nonzero.py +1 -0
  129. onnx/backend/test/case/node/not.py +1 -0
  130. onnx/backend/test/case/node/onehot.py +1 -0
  131. onnx/backend/test/case/node/optionalgetelement.py +3 -2
  132. onnx/backend/test/case/node/optionalhaselement.py +2 -3
  133. onnx/backend/test/case/node/or.py +1 -0
  134. onnx/backend/test/case/node/pad.py +2 -1
  135. onnx/backend/test/case/node/pow.py +1 -0
  136. onnx/backend/test/case/node/prelu.py +1 -0
  137. onnx/backend/test/case/node/qlinearconv.py +1 -0
  138. onnx/backend/test/case/node/qlinearmatmul.py +1 -0
  139. onnx/backend/test/case/node/quantizelinear.py +1 -0
  140. onnx/backend/test/case/node/rangeop.py +1 -0
  141. onnx/backend/test/case/node/reciprocal.py +1 -0
  142. onnx/backend/test/case/node/reduce_log_sum.py +1 -0
  143. onnx/backend/test/case/node/reduce_log_sum_exp.py +1 -0
  144. onnx/backend/test/case/node/reducel1.py +1 -0
  145. onnx/backend/test/case/node/reducel2.py +1 -0
  146. onnx/backend/test/case/node/reducemax.py +2 -1
  147. onnx/backend/test/case/node/reducemean.py +1 -0
  148. onnx/backend/test/case/node/reducemin.py +1 -0
  149. onnx/backend/test/case/node/reduceprod.py +1 -0
  150. onnx/backend/test/case/node/reducesum.py +2 -1
  151. onnx/backend/test/case/node/reducesumsquare.py +1 -0
  152. onnx/backend/test/case/node/regex_full_match.py +1 -0
  153. onnx/backend/test/case/node/relu.py +1 -0
  154. onnx/backend/test/case/node/reshape.py +1 -0
  155. onnx/backend/test/case/node/resize.py +3 -2
  156. onnx/backend/test/case/node/reversesequence.py +1 -0
  157. onnx/backend/test/case/node/rnn.py +3 -2
  158. onnx/backend/test/case/node/roialign.py +1 -0
  159. onnx/backend/test/case/node/round.py +4 -3
  160. onnx/backend/test/case/node/scan.py +1 -0
  161. onnx/backend/test/case/node/scatter.py +1 -0
  162. onnx/backend/test/case/node/scatterelements.py +7 -3
  163. onnx/backend/test/case/node/scatternd.py +1 -0
  164. onnx/backend/test/case/node/selu.py +1 -0
  165. onnx/backend/test/case/node/sequence_map.py +1 -0
  166. onnx/backend/test/case/node/sequenceinsert.py +4 -3
  167. onnx/backend/test/case/node/shape.py +1 -0
  168. onnx/backend/test/case/node/shrink.py +1 -0
  169. onnx/backend/test/case/node/sigmoid.py +1 -0
  170. onnx/backend/test/case/node/sign.py +1 -0
  171. onnx/backend/test/case/node/sin.py +1 -0
  172. onnx/backend/test/case/node/sinh.py +1 -0
  173. onnx/backend/test/case/node/size.py +1 -0
  174. onnx/backend/test/case/node/slice.py +1 -0
  175. onnx/backend/test/case/node/softmax.py +1 -0
  176. onnx/backend/test/case/node/softmaxcrossentropy.py +4 -1
  177. onnx/backend/test/case/node/softplus.py +1 -0
  178. onnx/backend/test/case/node/softsign.py +1 -0
  179. onnx/backend/test/case/node/spacetodepth.py +1 -0
  180. onnx/backend/test/case/node/split.py +1 -0
  181. onnx/backend/test/case/node/splittosequence.py +1 -0
  182. onnx/backend/test/case/node/sqrt.py +1 -0
  183. onnx/backend/test/case/node/squeeze.py +1 -0
  184. onnx/backend/test/case/node/stft.py +4 -1
  185. onnx/backend/test/case/node/string_concat.py +1 -0
  186. onnx/backend/test/case/node/string_split.py +1 -0
  187. onnx/backend/test/case/node/stringnormalizer.py +1 -0
  188. onnx/backend/test/case/node/sub.py +1 -0
  189. onnx/backend/test/case/node/sum.py +1 -0
  190. onnx/backend/test/case/node/tan.py +1 -0
  191. onnx/backend/test/case/node/tanh.py +1 -0
  192. onnx/backend/test/case/node/tfidfvectorizer.py +1 -0
  193. onnx/backend/test/case/node/thresholdedrelu.py +1 -0
  194. onnx/backend/test/case/node/tile.py +1 -0
  195. onnx/backend/test/case/node/topk.py +1 -0
  196. onnx/backend/test/case/node/transpose.py +1 -0
  197. onnx/backend/test/case/node/trilu.py +1 -0
  198. onnx/backend/test/case/node/unique.py +7 -0
  199. onnx/backend/test/case/node/unsqueeze.py +1 -0
  200. onnx/backend/test/case/node/upsample.py +1 -0
  201. onnx/backend/test/case/node/where.py +1 -0
  202. onnx/backend/test/case/node/xor.py +1 -0
  203. onnx/backend/test/case/test_case.py +6 -5
  204. onnx/backend/test/case/utils.py +2 -2
  205. onnx/backend/test/cmd_tools.py +1 -0
  206. onnx/backend/test/data/node/test_acos/model.onnx +0 -0
  207. onnx/backend/test/data/node/test_acos/test_data_set_0/output_0.pb +0 -0
  208. onnx/backend/test/data/node/test_acos_example/model.onnx +0 -0
  209. onnx/backend/test/data/node/test_acosh/model.onnx +0 -0
  210. onnx/backend/test/data/node/test_acosh/test_data_set_0/output_0.pb +1 -1
  211. onnx/backend/test/data/node/test_acosh_example/model.onnx +0 -0
  212. onnx/backend/test/data/node/test_asin/model.onnx +0 -0
  213. onnx/backend/test/data/node/test_asin/test_data_set_0/output_0.pb +1 -1
  214. onnx/backend/test/data/node/test_asin_example/model.onnx +0 -0
  215. onnx/backend/test/data/node/test_asinh/model.onnx +0 -0
  216. onnx/backend/test/data/node/test_asinh/test_data_set_0/output_0.pb +1 -1
  217. onnx/backend/test/data/node/test_asinh_example/model.onnx +0 -0
  218. onnx/backend/test/data/node/test_atan/model.onnx +0 -0
  219. onnx/backend/test/data/node/test_atan/test_data_set_0/output_0.pb +1 -1
  220. onnx/backend/test/data/node/test_atan_example/model.onnx +0 -0
  221. onnx/backend/test/data/node/test_atanh/model.onnx +0 -0
  222. onnx/backend/test/data/node/test_atanh/test_data_set_0/output_0.pb +2 -2
  223. onnx/backend/test/data/node/test_atanh_example/model.onnx +0 -0
  224. onnx/backend/test/data/node/test_averagepool_1d_default/model.onnx +0 -0
  225. onnx/backend/test/data/node/test_averagepool_2d_ceil/model.onnx +0 -0
  226. onnx/backend/test/data/node/test_averagepool_2d_default/model.onnx +0 -0
  227. onnx/backend/test/data/node/test_averagepool_2d_dilations/model.onnx +0 -0
  228. onnx/backend/test/data/node/test_averagepool_2d_pads/model.onnx +0 -0
  229. onnx/backend/test/data/node/test_averagepool_2d_pads_count_include_pad/model.onnx +0 -0
  230. onnx/backend/test/data/node/test_averagepool_2d_precomputed_pads/model.onnx +0 -0
  231. onnx/backend/test/data/node/test_averagepool_2d_precomputed_pads_count_include_pad/model.onnx +0 -0
  232. onnx/backend/test/data/node/test_averagepool_2d_precomputed_same_upper/model.onnx +0 -0
  233. onnx/backend/test/data/node/test_averagepool_2d_precomputed_strides/model.onnx +0 -0
  234. onnx/backend/test/data/node/test_averagepool_2d_same_lower/model.onnx +0 -0
  235. onnx/backend/test/data/node/test_averagepool_2d_same_upper/model.onnx +0 -0
  236. onnx/backend/test/data/node/test_averagepool_2d_strides/model.onnx +0 -0
  237. onnx/backend/test/data/node/test_averagepool_3d_default/model.onnx +0 -0
  238. onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_0_ceil_mode_is_False/model.onnx +0 -0
  239. onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_0_ceil_mode_is_True/model.onnx +0 -0
  240. onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_1_ceil_mode_is_False/model.onnx +0 -0
  241. onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_1_ceil_mode_is_True/model.onnx +0 -0
  242. onnx/backend/test/data/node/test_averagepool_3d_dilations_small/model.onnx +0 -0
  243. onnx/backend/test/data/node/test_basic_conv_with_padding/model.onnx +0 -0
  244. onnx/backend/test/data/node/test_basic_conv_without_padding/model.onnx +0 -0
  245. onnx/backend/test/data/node/test_basic_deform_conv_with_padding/model.onnx +0 -0
  246. onnx/backend/test/data/node/test_basic_deform_conv_without_padding/model.onnx +0 -0
  247. onnx/backend/test/data/node/test_bernoulli/model.onnx +0 -0
  248. onnx/backend/test/data/node/test_bernoulli_double/model.onnx +0 -0
  249. onnx/backend/test/data/node/test_bernoulli_double_expanded/model.onnx +0 -0
  250. onnx/backend/test/data/node/test_bernoulli_expanded/model.onnx +0 -0
  251. onnx/backend/test/data/node/test_bernoulli_seed/model.onnx +0 -0
  252. onnx/backend/test/data/node/test_bernoulli_seed_expanded/model.onnx +0 -0
  253. onnx/backend/test/data/node/test_blackmanwindow/test_data_set_0/output_0.pb +0 -0
  254. onnx/backend/test/data/node/test_blackmanwindow_expanded/test_data_set_0/output_0.pb +0 -0
  255. onnx/backend/test/data/node/test_blackmanwindow_symmetric/test_data_set_0/output_0.pb +0 -0
  256. onnx/backend/test/data/node/test_blackmanwindow_symmetric_expanded/test_data_set_0/output_0.pb +0 -0
  257. onnx/backend/test/data/node/test_cast_FLOAT16_to_INT4/test_data_set_0/output_0.pb +1 -1
  258. onnx/backend/test/data/node/test_cast_FLOAT_to_INT4/test_data_set_0/output_0.pb +1 -1
  259. onnx/backend/test/data/node/test_cast_INT4_to_FLOAT/test_data_set_0/input_0.pb +1 -1
  260. onnx/backend/test/data/node/test_cast_INT4_to_FLOAT16/test_data_set_0/input_0.pb +1 -1
  261. onnx/backend/test/data/node/test_cast_INT4_to_INT8/test_data_set_0/input_0.pb +1 -1
  262. onnx/backend/test/data/node/test_conv_with_autopad_same/model.onnx +0 -0
  263. onnx/backend/test/data/node/test_conv_with_strides_and_asymmetric_padding/model.onnx +0 -0
  264. onnx/backend/test/data/node/test_conv_with_strides_no_padding/model.onnx +0 -0
  265. onnx/backend/test/data/node/test_conv_with_strides_padding/model.onnx +0 -0
  266. onnx/backend/test/data/node/test_convtranspose/model.onnx +0 -0
  267. onnx/backend/test/data/node/test_convtranspose_1d/model.onnx +0 -0
  268. onnx/backend/test/data/node/test_convtranspose_3d/model.onnx +0 -0
  269. onnx/backend/test/data/node/test_convtranspose_autopad_same/model.onnx +0 -0
  270. onnx/backend/test/data/node/test_convtranspose_dilations/model.onnx +0 -0
  271. onnx/backend/test/data/node/test_convtranspose_group_2/model.onnx +0 -0
  272. onnx/backend/test/data/node/test_convtranspose_group_2/test_data_set_0/input_0.pb +0 -0
  273. onnx/backend/test/data/node/test_convtranspose_group_2/test_data_set_0/input_1.pb +0 -0
  274. onnx/backend/test/data/node/test_convtranspose_group_2/test_data_set_0/output_0.pb +0 -0
  275. onnx/backend/test/data/node/test_convtranspose_group_2_image_3/model.onnx +0 -0
  276. onnx/backend/test/data/node/test_convtranspose_group_2_image_3/test_data_set_0/input_0.pb +0 -0
  277. onnx/backend/test/data/node/test_convtranspose_group_2_image_3/test_data_set_0/input_1.pb +0 -0
  278. onnx/backend/test/data/node/test_convtranspose_group_2_image_3/test_data_set_0/output_0.pb +0 -0
  279. onnx/backend/test/data/node/test_convtranspose_kernel_shape/model.onnx +0 -0
  280. onnx/backend/test/data/node/test_convtranspose_output_shape/model.onnx +0 -0
  281. onnx/backend/test/data/node/test_convtranspose_pad/model.onnx +0 -0
  282. onnx/backend/test/data/node/test_convtranspose_pads/model.onnx +0 -0
  283. onnx/backend/test/data/node/test_cos/model.onnx +0 -0
  284. onnx/backend/test/data/node/test_cos_example/model.onnx +0 -0
  285. onnx/backend/test/data/node/test_cosh/model.onnx +0 -0
  286. onnx/backend/test/data/node/test_cosh/test_data_set_0/output_0.pb +1 -1
  287. onnx/backend/test/data/node/test_cosh_example/model.onnx +0 -0
  288. onnx/backend/test/data/node/test_cosh_example/test_data_set_0/output_0.pb +0 -0
  289. onnx/backend/test/data/node/test_deform_conv_with_mask_bias/model.onnx +0 -0
  290. onnx/backend/test/data/node/test_deform_conv_with_multiple_offset_groups/model.onnx +0 -0
  291. onnx/backend/test/data/node/test_dequantizelinear_int4/test_data_set_0/input_0.pb +1 -1
  292. onnx/backend/test/data/node/test_det_2d/model.onnx +0 -0
  293. onnx/backend/test/data/node/test_det_nd/model.onnx +0 -0
  294. onnx/backend/test/data/node/test_dft/test_data_set_0/output_0.pb +0 -0
  295. onnx/backend/test/data/node/test_dft_axis/test_data_set_0/output_0.pb +0 -0
  296. onnx/backend/test/data/node/test_dft_axis_opset19/test_data_set_0/output_0.pb +0 -0
  297. onnx/backend/test/data/node/test_dft_inverse/test_data_set_0/output_0.pb +0 -0
  298. onnx/backend/test/data/node/test_dft_inverse_opset19/test_data_set_0/output_0.pb +0 -0
  299. onnx/backend/test/data/node/test_dft_opset19/test_data_set_0/output_0.pb +0 -0
  300. onnx/backend/test/data/node/test_dropout_default/model.onnx +0 -0
  301. onnx/backend/test/data/node/test_dropout_default_mask/model.onnx +0 -0
  302. onnx/backend/test/data/node/test_dropout_default_mask_ratio/model.onnx +0 -0
  303. onnx/backend/test/data/node/test_dropout_default_ratio/model.onnx +0 -0
  304. onnx/backend/test/data/node/test_elu/model.onnx +0 -0
  305. onnx/backend/test/data/node/test_elu_default/model.onnx +0 -0
  306. onnx/backend/test/data/node/test_elu_example/model.onnx +0 -0
  307. onnx/backend/test/data/node/test_eyelike_populate_off_main_diagonal/model.onnx +0 -0
  308. onnx/backend/test/data/node/test_eyelike_with_dtype/model.onnx +0 -0
  309. onnx/backend/test/data/node/test_eyelike_without_dtype/model.onnx +0 -0
  310. onnx/backend/test/data/node/test_gelu_default_1/test_data_set_0/output_0.pb +0 -0
  311. onnx/backend/test/data/node/test_gelu_default_1_expanded/test_data_set_0/output_0.pb +0 -0
  312. onnx/backend/test/data/node/test_gelu_default_2/test_data_set_0/output_0.pb +4 -3
  313. onnx/backend/test/data/node/test_gelu_default_2_expanded/test_data_set_0/output_0.pb +4 -3
  314. onnx/backend/test/data/node/test_gelu_tanh_2/test_data_set_0/output_0.pb +0 -0
  315. onnx/backend/test/data/node/test_gelu_tanh_2_expanded/test_data_set_0/output_0.pb +0 -0
  316. onnx/backend/test/data/node/test_globalaveragepool/model.onnx +0 -0
  317. onnx/backend/test/data/node/test_globalaveragepool_precomputed/model.onnx +0 -0
  318. onnx/backend/test/data/node/test_globalmaxpool/model.onnx +0 -0
  319. onnx/backend/test/data/node/test_globalmaxpool_precomputed/model.onnx +0 -0
  320. onnx/backend/test/data/node/test_gridsample/model.onnx +0 -0
  321. onnx/backend/test/data/node/test_gridsample_aligncorners_true/model.onnx +0 -0
  322. onnx/backend/test/data/node/test_gridsample_bicubic/model.onnx +0 -0
  323. onnx/backend/test/data/node/test_gridsample_bicubic_align_corners_0_additional_1/model.onnx +0 -0
  324. onnx/backend/test/data/node/test_gridsample_bicubic_align_corners_1_additional_1/model.onnx +0 -0
  325. onnx/backend/test/data/node/test_gridsample_bilinear/model.onnx +0 -0
  326. onnx/backend/test/data/node/test_gridsample_bilinear_align_corners_0_additional_1/model.onnx +0 -0
  327. onnx/backend/test/data/node/test_gridsample_bilinear_align_corners_1_additional_1/model.onnx +0 -0
  328. onnx/backend/test/data/node/test_gridsample_border_padding/model.onnx +0 -0
  329. onnx/backend/test/data/node/test_gridsample_nearest/model.onnx +0 -0
  330. onnx/backend/test/data/node/test_gridsample_nearest_align_corners_0_additional_1/model.onnx +0 -0
  331. onnx/backend/test/data/node/test_gridsample_nearest_align_corners_1_additional_1/model.onnx +0 -0
  332. onnx/backend/test/data/node/test_gridsample_reflection_padding/model.onnx +0 -0
  333. onnx/backend/test/data/node/test_gridsample_volumetric_bilinear_align_corners_0/model.onnx +0 -0
  334. onnx/backend/test/data/node/test_gridsample_volumetric_bilinear_align_corners_1/model.onnx +0 -0
  335. onnx/backend/test/data/node/test_gridsample_volumetric_nearest_align_corners_0/model.onnx +0 -0
  336. onnx/backend/test/data/node/test_gridsample_volumetric_nearest_align_corners_1/model.onnx +0 -0
  337. onnx/backend/test/data/node/test_gridsample_zeros_padding/model.onnx +0 -0
  338. onnx/backend/test/data/node/test_gru_batchwise/model.onnx +0 -0
  339. onnx/backend/test/data/node/test_gru_defaults/model.onnx +0 -0
  340. onnx/backend/test/data/node/test_gru_seq_length/model.onnx +0 -0
  341. onnx/backend/test/data/node/test_gru_with_initial_bias/model.onnx +0 -0
  342. onnx/backend/test/data/node/test_hammingwindow/test_data_set_0/output_0.pb +0 -0
  343. onnx/backend/test/data/node/test_hammingwindow_expanded/test_data_set_0/output_0.pb +0 -0
  344. onnx/backend/test/data/node/test_hammingwindow_symmetric/test_data_set_0/output_0.pb +1 -1
  345. onnx/backend/test/data/node/test_hammingwindow_symmetric_expanded/test_data_set_0/output_0.pb +1 -1
  346. onnx/backend/test/data/node/test_hannwindow/test_data_set_0/output_0.pb +0 -0
  347. onnx/backend/test/data/node/test_hannwindow_expanded/test_data_set_0/output_0.pb +0 -0
  348. onnx/backend/test/data/node/test_hannwindow_symmetric/test_data_set_0/output_0.pb +0 -0
  349. onnx/backend/test/data/node/test_hannwindow_symmetric_expanded/test_data_set_0/output_0.pb +0 -0
  350. onnx/backend/test/data/node/test_hardsigmoid/model.onnx +0 -0
  351. onnx/backend/test/data/node/test_hardsigmoid_default/model.onnx +0 -0
  352. onnx/backend/test/data/node/test_hardsigmoid_example/model.onnx +0 -0
  353. onnx/backend/test/data/node/test_hardswish/model.onnx +0 -0
  354. onnx/backend/test/data/node/test_hardswish_expanded/model.onnx +0 -0
  355. onnx/backend/test/data/node/test_image_decoder_decode_jpeg2k_rgb/test_data_set_0/input_0.pb +0 -0
  356. onnx/backend/test/data/node/test_instancenorm_epsilon/model.onnx +0 -0
  357. onnx/backend/test/data/node/test_instancenorm_example/model.onnx +0 -0
  358. onnx/backend/test/data/node/test_lppool_1d_default/model.onnx +0 -0
  359. onnx/backend/test/data/node/test_lppool_1d_default/test_data_set_0/output_0.pb +2 -2
  360. onnx/backend/test/data/node/test_lppool_2d_default/model.onnx +0 -0
  361. onnx/backend/test/data/node/test_lppool_2d_default/test_data_set_0/output_0.pb +0 -0
  362. onnx/backend/test/data/node/test_lppool_2d_dilations/model.onnx +0 -0
  363. onnx/backend/test/data/node/test_lppool_2d_pads/model.onnx +0 -0
  364. onnx/backend/test/data/node/test_lppool_2d_pads/test_data_set_0/output_0.pb +0 -0
  365. onnx/backend/test/data/node/test_lppool_2d_same_lower/model.onnx +0 -0
  366. onnx/backend/test/data/node/test_lppool_2d_same_lower/test_data_set_0/output_0.pb +0 -0
  367. onnx/backend/test/data/node/test_lppool_2d_same_upper/model.onnx +0 -0
  368. onnx/backend/test/data/node/test_lppool_2d_same_upper/test_data_set_0/output_0.pb +0 -0
  369. onnx/backend/test/data/node/test_lppool_2d_strides/model.onnx +0 -0
  370. onnx/backend/test/data/node/test_lppool_2d_strides/test_data_set_0/output_0.pb +0 -0
  371. onnx/backend/test/data/node/test_lppool_3d_default/model.onnx +0 -0
  372. onnx/backend/test/data/node/test_lppool_3d_default/test_data_set_0/output_0.pb +0 -0
  373. onnx/backend/test/data/node/test_lstm_batchwise/model.onnx +0 -0
  374. onnx/backend/test/data/node/test_lstm_defaults/model.onnx +0 -0
  375. onnx/backend/test/data/node/test_lstm_with_initial_bias/model.onnx +0 -0
  376. onnx/backend/test/data/node/test_lstm_with_peepholes/model.onnx +0 -0
  377. onnx/backend/test/data/node/test_maxpool_1d_default/model.onnx +0 -0
  378. onnx/backend/test/data/node/test_maxpool_2d_ceil/model.onnx +0 -0
  379. onnx/backend/test/data/node/test_maxpool_2d_ceil_output_size_reduce_by_one/model.onnx +0 -0
  380. onnx/backend/test/data/node/test_maxpool_2d_default/model.onnx +0 -0
  381. onnx/backend/test/data/node/test_maxpool_2d_dilations/model.onnx +0 -0
  382. onnx/backend/test/data/node/test_maxpool_2d_pads/model.onnx +0 -0
  383. onnx/backend/test/data/node/test_maxpool_2d_precomputed_pads/model.onnx +0 -0
  384. onnx/backend/test/data/node/test_maxpool_2d_precomputed_same_upper/model.onnx +0 -0
  385. onnx/backend/test/data/node/test_maxpool_2d_precomputed_strides/model.onnx +0 -0
  386. onnx/backend/test/data/node/test_maxpool_2d_same_lower/model.onnx +0 -0
  387. onnx/backend/test/data/node/test_maxpool_2d_same_upper/model.onnx +0 -0
  388. onnx/backend/test/data/node/test_maxpool_2d_strides/model.onnx +0 -0
  389. onnx/backend/test/data/node/test_maxpool_2d_uint8/model.onnx +0 -0
  390. onnx/backend/test/data/node/test_maxpool_3d_default/model.onnx +0 -0
  391. onnx/backend/test/data/node/test_maxpool_3d_dilations/model.onnx +0 -0
  392. onnx/backend/test/data/node/test_maxpool_3d_dilations_use_ref_impl/model.onnx +0 -0
  393. onnx/backend/test/data/node/test_maxpool_3d_dilations_use_ref_impl_large/model.onnx +0 -0
  394. onnx/backend/test/data/node/test_maxpool_with_argmax_2d_precomputed_pads/model.onnx +0 -0
  395. onnx/backend/test/data/node/test_maxpool_with_argmax_2d_precomputed_strides/model.onnx +0 -0
  396. onnx/backend/test/data/node/test_maxunpool_export_with_output_shape/model.onnx +0 -0
  397. onnx/backend/test/data/node/test_maxunpool_export_without_output_shape/model.onnx +0 -0
  398. onnx/backend/test/data/node/test_mish/model.onnx +0 -0
  399. onnx/backend/test/data/node/test_mish/test_data_set_0/output_0.pb +0 -0
  400. onnx/backend/test/data/node/test_mish_expanded/model.onnx +0 -0
  401. onnx/backend/test/data/node/test_mish_expanded/test_data_set_0/output_0.pb +0 -0
  402. onnx/backend/test/data/node/test_nllloss_NC/model.onnx +0 -0
  403. onnx/backend/test/data/node/test_nllloss_NC_expanded/model.onnx +0 -0
  404. onnx/backend/test/data/node/test_nllloss_NCd1/model.onnx +0 -0
  405. onnx/backend/test/data/node/test_nllloss_NCd1_expanded/model.onnx +0 -0
  406. onnx/backend/test/data/node/test_nllloss_NCd1_ii/model.onnx +0 -0
  407. onnx/backend/test/data/node/test_nllloss_NCd1_ii_expanded/model.onnx +0 -0
  408. onnx/backend/test/data/node/test_nllloss_NCd1_mean_weight_negative_ii/model.onnx +0 -0
  409. onnx/backend/test/data/node/test_nllloss_NCd1_mean_weight_negative_ii_expanded/model.onnx +0 -0
  410. onnx/backend/test/data/node/test_nllloss_NCd1_weight/model.onnx +0 -0
  411. onnx/backend/test/data/node/test_nllloss_NCd1_weight_expanded/model.onnx +0 -0
  412. onnx/backend/test/data/node/test_nllloss_NCd1_weight_ii/model.onnx +0 -0
  413. onnx/backend/test/data/node/test_nllloss_NCd1_weight_ii_expanded/model.onnx +0 -0
  414. onnx/backend/test/data/node/test_nllloss_NCd1d2/model.onnx +0 -0
  415. onnx/backend/test/data/node/test_nllloss_NCd1d2_expanded/model.onnx +0 -0
  416. onnx/backend/test/data/node/test_nllloss_NCd1d2_no_weight_reduction_mean_ii/model.onnx +0 -0
  417. onnx/backend/test/data/node/test_nllloss_NCd1d2_no_weight_reduction_mean_ii_expanded/model.onnx +0 -0
  418. onnx/backend/test/data/node/test_nllloss_NCd1d2_reduction_mean/model.onnx +0 -0
  419. onnx/backend/test/data/node/test_nllloss_NCd1d2_reduction_mean_expanded/model.onnx +0 -0
  420. onnx/backend/test/data/node/test_nllloss_NCd1d2_reduction_sum/model.onnx +0 -0
  421. onnx/backend/test/data/node/test_nllloss_NCd1d2_reduction_sum_expanded/model.onnx +0 -0
  422. onnx/backend/test/data/node/test_nllloss_NCd1d2_with_weight/model.onnx +0 -0
  423. onnx/backend/test/data/node/test_nllloss_NCd1d2_with_weight_expanded/model.onnx +0 -0
  424. onnx/backend/test/data/node/test_nllloss_NCd1d2_with_weight_reduction_mean/model.onnx +0 -0
  425. onnx/backend/test/data/node/test_nllloss_NCd1d2_with_weight_reduction_mean_expanded/model.onnx +0 -0
  426. onnx/backend/test/data/node/test_nllloss_NCd1d2_with_weight_reduction_sum/model.onnx +0 -0
  427. onnx/backend/test/data/node/test_nllloss_NCd1d2_with_weight_reduction_sum_expanded/model.onnx +0 -0
  428. onnx/backend/test/data/node/test_nllloss_NCd1d2_with_weight_reduction_sum_ii/model.onnx +0 -0
  429. onnx/backend/test/data/node/test_nllloss_NCd1d2_with_weight_reduction_sum_ii_expanded/model.onnx +0 -0
  430. onnx/backend/test/data/node/test_nllloss_NCd1d2d3_none_no_weight_negative_ii/model.onnx +0 -0
  431. onnx/backend/test/data/node/test_nllloss_NCd1d2d3_none_no_weight_negative_ii_expanded/model.onnx +0 -0
  432. onnx/backend/test/data/node/test_nllloss_NCd1d2d3_sum_weight_high_ii/model.onnx +0 -0
  433. onnx/backend/test/data/node/test_nllloss_NCd1d2d3_sum_weight_high_ii_expanded/model.onnx +0 -0
  434. onnx/backend/test/data/node/test_nllloss_NCd1d2d3d4d5_mean_weight/model.onnx +0 -0
  435. onnx/backend/test/data/node/test_nllloss_NCd1d2d3d4d5_mean_weight_expanded/model.onnx +0 -0
  436. onnx/backend/test/data/node/test_nllloss_NCd1d2d3d4d5_none_no_weight/model.onnx +0 -0
  437. onnx/backend/test/data/node/test_nllloss_NCd1d2d3d4d5_none_no_weight_expanded/model.onnx +0 -0
  438. onnx/backend/test/data/node/test_quantizelinear_int4/test_data_set_0/output_0.pb +1 -1
  439. onnx/backend/test/data/node/test_reduce_log_sum_exp_do_not_keepdims_random/test_data_set_0/output_0.pb +1 -1
  440. onnx/backend/test/data/node/test_reduce_log_sum_exp_do_not_keepdims_random_expanded/test_data_set_0/output_0.pb +1 -1
  441. onnx/backend/test/data/node/test_reduce_log_sum_exp_keepdims_random/test_data_set_0/output_0.pb +1 -1
  442. onnx/backend/test/data/node/test_reduce_log_sum_exp_keepdims_random_expanded/test_data_set_0/output_0.pb +1 -1
  443. onnx/backend/test/data/node/test_reduce_log_sum_exp_negative_axes_keepdims_random/test_data_set_0/output_0.pb +1 -1
  444. onnx/backend/test/data/node/test_reduce_log_sum_exp_negative_axes_keepdims_random_expanded/test_data_set_0/output_0.pb +1 -1
  445. onnx/backend/test/data/node/test_reduce_max_empty_set/model.onnx +0 -0
  446. onnx/backend/test/data/node/test_reduce_max_empty_set/test_data_set_0/input_0.pb +0 -0
  447. onnx/backend/test/data/node/test_reduce_max_empty_set/test_data_set_0/input_1.pb +0 -0
  448. onnx/backend/test/data/node/test_reduce_max_empty_set/test_data_set_0/output_0.pb +0 -0
  449. onnx/backend/test/data/node/test_reduce_sum_empty_axes_input_noop/model.onnx +0 -0
  450. onnx/backend/test/data/node/test_reduce_sum_empty_axes_input_noop/test_data_set_0/input_0.pb +1 -0
  451. onnx/backend/test/data/node/test_reduce_sum_empty_axes_input_noop/test_data_set_0/input_1.pb +0 -0
  452. onnx/backend/test/data/node/test_reduce_sum_empty_axes_input_noop/test_data_set_0/output_0.pb +1 -0
  453. onnx/backend/test/data/node/test_reduce_sum_negative_axes_keepdims_random/model.onnx +0 -0
  454. onnx/backend/test/data/node/test_reduce_sum_negative_axes_keepdims_random/test_data_set_0/input_1.pb +0 -0
  455. onnx/backend/test/data/node/test_reduce_sum_negative_axes_keepdims_random/test_data_set_0/output_0.pb +1 -1
  456. onnx/backend/test/data/node/test_resize_tf_crop_and_resize/model.onnx +0 -0
  457. onnx/backend/test/data/node/test_resize_tf_crop_and_resize/test_data_set_0/input_1.pb +0 -0
  458. onnx/backend/test/data/node/test_resize_tf_crop_and_resize/test_data_set_0/output_0.pb +0 -0
  459. onnx/backend/test/data/node/test_resize_tf_crop_and_resize_extrapolation_value/model.onnx +0 -0
  460. onnx/backend/test/data/node/test_resize_tf_crop_and_resize_extrapolation_value/test_data_set_0/input_0.pb +0 -0
  461. onnx/backend/test/data/node/test_resize_tf_crop_and_resize_extrapolation_value/test_data_set_0/input_1.pb +0 -0
  462. onnx/backend/test/data/node/test_resize_tf_crop_and_resize_extrapolation_value/test_data_set_0/input_2.pb +0 -0
  463. onnx/backend/test/data/node/test_resize_tf_crop_and_resize_extrapolation_value/test_data_set_0/output_0.pb +0 -0
  464. onnx/backend/test/data/node/test_resize_upsample_sizes_nearest_not_larger/model.onnx +0 -0
  465. onnx/backend/test/data/node/test_resize_upsample_sizes_nearest_not_larger/test_data_set_0/output_0.pb +0 -0
  466. onnx/backend/test/data/node/test_resize_upsample_sizes_nearest_not_smaller/model.onnx +0 -0
  467. onnx/backend/test/data/node/test_resize_upsample_sizes_nearest_not_smaller/test_data_set_0/input_0.pb +0 -0
  468. onnx/backend/test/data/node/test_resize_upsample_sizes_nearest_not_smaller/test_data_set_0/input_1.pb +0 -0
  469. onnx/backend/test/data/node/test_resize_upsample_sizes_nearest_not_smaller/test_data_set_0/output_0.pb +0 -0
  470. onnx/backend/test/data/node/test_rnn_seq_length/model.onnx +0 -0
  471. onnx/backend/test/data/node/test_roialign_aligned_false/model.onnx +0 -0
  472. onnx/backend/test/data/node/test_roialign_aligned_true/model.onnx +0 -0
  473. onnx/backend/test/data/node/test_roialign_mode_max/model.onnx +0 -0
  474. onnx/backend/test/data/node/test_round/model.onnx +0 -0
  475. onnx/backend/test/data/node/test_selu/model.onnx +0 -0
  476. onnx/backend/test/data/node/test_selu_default/model.onnx +0 -0
  477. onnx/backend/test/data/node/test_selu_example/model.onnx +0 -0
  478. onnx/backend/test/data/node/test_simple_rnn_batchwise/model.onnx +0 -0
  479. onnx/backend/test/data/node/test_simple_rnn_defaults/model.onnx +0 -0
  480. onnx/backend/test/data/node/test_simple_rnn_with_initial_bias/model.onnx +0 -0
  481. onnx/backend/test/data/node/test_sin/model.onnx +0 -0
  482. onnx/backend/test/data/node/test_sin_example/model.onnx +0 -0
  483. onnx/backend/test/data/node/test_sinh/model.onnx +0 -0
  484. onnx/backend/test/data/node/test_sinh/test_data_set_0/output_0.pb +1 -1
  485. onnx/backend/test/data/node/test_sinh_example/model.onnx +0 -0
  486. onnx/backend/test/data/node/test_softplus/model.onnx +0 -0
  487. onnx/backend/test/data/node/test_softplus_example/model.onnx +0 -0
  488. onnx/backend/test/data/node/test_softsign/model.onnx +0 -0
  489. onnx/backend/test/data/node/test_softsign_example/model.onnx +0 -0
  490. onnx/backend/test/data/node/test_stft_with_window/test_data_set_0/input_2.pb +0 -0
  491. onnx/backend/test/data/node/test_stft_with_window/test_data_set_0/output_0.pb +0 -0
  492. onnx/backend/test/data/node/test_tan/model.onnx +0 -0
  493. onnx/backend/test/data/node/test_tan/test_data_set_0/output_0.pb +1 -1
  494. onnx/backend/test/data/node/test_tan_example/model.onnx +0 -0
  495. onnx/backend/test/data/node/test_thresholdedrelu/model.onnx +0 -0
  496. onnx/backend/test/data/node/test_thresholdedrelu_default/model.onnx +0 -0
  497. onnx/backend/test/data/node/test_thresholdedrelu_example/model.onnx +0 -0
  498. onnx/backend/test/data/node/test_training_dropout/model.onnx +0 -0
  499. onnx/backend/test/data/node/test_training_dropout_default/model.onnx +0 -0
  500. onnx/backend/test/data/node/test_training_dropout_default_mask/model.onnx +0 -0
  501. onnx/backend/test/data/node/test_training_dropout_mask/model.onnx +0 -0
  502. onnx/backend/test/data/node/test_training_dropout_zero_ratio/model.onnx +0 -0
  503. onnx/backend/test/data/node/test_training_dropout_zero_ratio_mask/model.onnx +0 -0
  504. onnx/backend/test/loader/__init__.py +11 -6
  505. onnx/backend/test/report/__init__.py +4 -3
  506. onnx/backend/test/report/base.py +1 -0
  507. onnx/backend/test/report/coverage.py +21 -20
  508. onnx/backend/test/runner/__init__.py +13 -11
  509. onnx/backend/test/runner/item.py +3 -2
  510. onnx/backend/test/stat_coverage.py +6 -5
  511. onnx/bin/checker.py +1 -0
  512. onnx/checker.cc +6 -1
  513. onnx/common/version.h +1 -1
  514. onnx/compose.py +66 -50
  515. onnx/cpp2py_export.cc +4 -0
  516. onnx/defs/__init__.py +2 -2
  517. onnx/defs/data_type_utils.cc +0 -1
  518. onnx/defs/gen_doc.py +9 -8
  519. onnx/defs/gen_shape_inference_information.py +1 -0
  520. onnx/defs/generator/defs.cc +32 -84
  521. onnx/defs/generator/old.cc +389 -0
  522. onnx/defs/math/defs.cc +308 -313
  523. onnx/defs/math/old.cc +996 -9
  524. onnx/defs/math/utils.cc +12 -1
  525. onnx/defs/math/utils.h +2 -0
  526. onnx/defs/nn/defs.cc +57 -75
  527. onnx/defs/nn/old.cc +1536 -2
  528. onnx/defs/object_detection/defs.cc +4 -7
  529. onnx/defs/object_detection/old.cc +117 -0
  530. onnx/defs/operator_sets.h +108 -1
  531. onnx/defs/parser.cc +10 -1
  532. onnx/defs/quantization/defs.cc +3 -2
  533. onnx/defs/quantization/old.cc +4 -1
  534. onnx/defs/rnn/defs.cc +10 -13
  535. onnx/defs/rnn/old.cc +517 -2
  536. onnx/defs/schema.cc +53 -59
  537. onnx/defs/schema.h +58 -2
  538. onnx/defs/shape_inference.h +67 -18
  539. onnx/defs/tensor/defs.cc +22 -20
  540. onnx/defs/tensor/old.cc +114 -3
  541. onnx/external_data_helper.py +27 -14
  542. onnx/gen_proto.py +3 -2
  543. onnx/helper.py +86 -61
  544. onnx/hub.py +39 -35
  545. onnx/inliner/inliner.cc +0 -1
  546. onnx/mapping.py +3 -2
  547. onnx/numpy_helper.py +159 -23
  548. onnx/onnx-ml.proto +1 -1
  549. onnx/onnx.in.proto +1 -1
  550. onnx/onnx.proto +1 -1
  551. onnx/onnx_cpp2py_export/defs.pyi +0 -2
  552. onnx/onnx_cpp2py_export/inliner.pyi +0 -4
  553. onnx/onnx_cpp2py_export/parser.pyi +0 -4
  554. onnx/onnx_cpp2py_export.cp39-win_amd64.pyd +0 -0
  555. onnx/parser.py +1 -0
  556. onnx/printer.py +2 -3
  557. onnx/reference/__init__.py +1 -0
  558. onnx/reference/custom_element_types.py +73 -8
  559. onnx/reference/op_run.py +13 -58
  560. onnx/reference/ops/__init__.py +1 -0
  561. onnx/reference/ops/_helpers.py +6 -4
  562. onnx/reference/ops/_op.py +16 -5
  563. onnx/reference/ops/_op_common_indices.py +1 -1
  564. onnx/reference/ops/_op_common_pool.py +38 -29
  565. onnx/reference/ops/_op_common_random.py +1 -1
  566. onnx/reference/ops/_op_common_window.py +2 -2
  567. onnx/reference/ops/_op_list.py +9 -6
  568. onnx/reference/ops/aionnx_preview_training/__init__.py +1 -0
  569. onnx/reference/ops/aionnx_preview_training/_op_list.py +5 -7
  570. onnx/reference/ops/aionnx_preview_training/_op_run_training.py +1 -1
  571. onnx/reference/ops/aionnx_preview_training/op_adagrad.py +14 -5
  572. onnx/reference/ops/aionnx_preview_training/op_adam.py +2 -2
  573. onnx/reference/ops/aionnx_preview_training/op_momentum.py +14 -2
  574. onnx/reference/ops/aionnxml/__init__.py +1 -0
  575. onnx/reference/ops/aionnxml/_common_classifier.py +1 -0
  576. onnx/reference/ops/aionnxml/_op_list.py +5 -6
  577. onnx/reference/ops/aionnxml/_op_run_aionnxml.py +1 -1
  578. onnx/reference/ops/aionnxml/op_array_feature_extractor.py +1 -1
  579. onnx/reference/ops/aionnxml/op_binarizer.py +1 -1
  580. onnx/reference/ops/aionnxml/op_dict_vectorizer.py +2 -2
  581. onnx/reference/ops/aionnxml/op_feature_vectorizer.py +1 -1
  582. onnx/reference/ops/aionnxml/op_imputer.py +3 -3
  583. onnx/reference/ops/aionnxml/op_label_encoder.py +1 -1
  584. onnx/reference/ops/aionnxml/op_linear_classifier.py +2 -2
  585. onnx/reference/ops/aionnxml/op_linear_regressor.py +1 -1
  586. onnx/reference/ops/aionnxml/op_normalizer.py +1 -1
  587. onnx/reference/ops/aionnxml/op_one_hot_encoder.py +1 -1
  588. onnx/reference/ops/aionnxml/op_scaler.py +1 -1
  589. onnx/reference/ops/aionnxml/op_svm_classifier.py +10 -7
  590. onnx/reference/ops/aionnxml/op_svm_helper.py +2 -2
  591. onnx/reference/ops/aionnxml/op_svm_regressor.py +1 -1
  592. onnx/reference/ops/aionnxml/op_tree_ensemble.py +3 -3
  593. onnx/reference/ops/aionnxml/op_tree_ensemble_classifier.py +1 -1
  594. onnx/reference/ops/aionnxml/op_tree_ensemble_helper.py +2 -2
  595. onnx/reference/ops/aionnxml/op_tree_ensemble_regressor.py +5 -3
  596. onnx/reference/ops/experimental/__init__.py +1 -0
  597. onnx/reference/ops/experimental/_op_list.py +6 -12
  598. onnx/reference/ops/experimental/_op_run_experimental.py +1 -1
  599. onnx/reference/ops/experimental/op_im2col.py +1 -1
  600. onnx/reference/ops/op_abs.py +1 -1
  601. onnx/reference/ops/op_acos.py +1 -1
  602. onnx/reference/ops/op_acosh.py +1 -1
  603. onnx/reference/ops/op_add.py +1 -1
  604. onnx/reference/ops/op_affine_grid.py +1 -1
  605. onnx/reference/ops/op_and.py +1 -1
  606. onnx/reference/ops/op_argmax.py +1 -1
  607. onnx/reference/ops/op_argmin.py +1 -1
  608. onnx/reference/ops/op_asin.py +1 -1
  609. onnx/reference/ops/op_asinh.py +1 -1
  610. onnx/reference/ops/op_atan.py +1 -1
  611. onnx/reference/ops/op_atanh.py +1 -1
  612. onnx/reference/ops/op_attribute_has_value.py +15 -15
  613. onnx/reference/ops/op_average_pool.py +1 -1
  614. onnx/reference/ops/op_batch_normalization.py +13 -2
  615. onnx/reference/ops/op_bernoulli.py +1 -1
  616. onnx/reference/ops/op_bitshift.py +1 -1
  617. onnx/reference/ops/op_bitwise_and.py +1 -1
  618. onnx/reference/ops/op_bitwise_not.py +1 -1
  619. onnx/reference/ops/op_bitwise_or.py +1 -1
  620. onnx/reference/ops/op_bitwise_xor.py +1 -1
  621. onnx/reference/ops/op_blackman_window.py +1 -1
  622. onnx/reference/ops/op_cast.py +11 -10
  623. onnx/reference/ops/op_cast_like.py +1 -1
  624. onnx/reference/ops/op_ceil.py +1 -1
  625. onnx/reference/ops/op_celu.py +1 -1
  626. onnx/reference/ops/op_center_crop_pad.py +1 -1
  627. onnx/reference/ops/op_clip.py +1 -1
  628. onnx/reference/ops/op_col2im.py +10 -4
  629. onnx/reference/ops/op_compress.py +1 -1
  630. onnx/reference/ops/op_concat.py +1 -1
  631. onnx/reference/ops/op_concat_from_sequence.py +3 -3
  632. onnx/reference/ops/op_constant.py +2 -2
  633. onnx/reference/ops/op_constant_of_shape.py +1 -1
  634. onnx/reference/ops/op_conv.py +22 -17
  635. onnx/reference/ops/op_conv_integer.py +1 -1
  636. onnx/reference/ops/op_conv_transpose.py +37 -6
  637. onnx/reference/ops/op_cos.py +1 -1
  638. onnx/reference/ops/op_cosh.py +1 -1
  639. onnx/reference/ops/op_cum_sum.py +1 -1
  640. onnx/reference/ops/op_deform_conv.py +1 -1
  641. onnx/reference/ops/op_depth_to_space.py +1 -1
  642. onnx/reference/ops/op_dequantize_linear.py +7 -9
  643. onnx/reference/ops/op_det.py +1 -1
  644. onnx/reference/ops/op_dft.py +16 -2
  645. onnx/reference/ops/op_div.py +1 -1
  646. onnx/reference/ops/op_dropout.py +9 -8
  647. onnx/reference/ops/op_dynamic_quantize_linear.py +1 -1
  648. onnx/reference/ops/op_einsum.py +1 -1
  649. onnx/reference/ops/op_elu.py +1 -1
  650. onnx/reference/ops/op_equal.py +1 -1
  651. onnx/reference/ops/op_erf.py +1 -1
  652. onnx/reference/ops/op_exp.py +1 -1
  653. onnx/reference/ops/op_expand.py +1 -1
  654. onnx/reference/ops/op_eyelike.py +2 -2
  655. onnx/reference/ops/op_flatten.py +1 -1
  656. onnx/reference/ops/op_floor.py +1 -1
  657. onnx/reference/ops/op_gather.py +1 -1
  658. onnx/reference/ops/op_gather_elements.py +3 -3
  659. onnx/reference/ops/op_gathernd.py +2 -4
  660. onnx/reference/ops/op_gemm.py +12 -2
  661. onnx/reference/ops/op_global_average_pool.py +1 -1
  662. onnx/reference/ops/op_global_max_pool.py +1 -1
  663. onnx/reference/ops/op_greater.py +1 -1
  664. onnx/reference/ops/op_greater_or_equal.py +1 -1
  665. onnx/reference/ops/op_grid_sample.py +2 -3
  666. onnx/reference/ops/op_gru.py +7 -7
  667. onnx/reference/ops/op_hamming_window.py +1 -1
  668. onnx/reference/ops/op_hann_window.py +1 -1
  669. onnx/reference/ops/op_hard_sigmoid.py +1 -1
  670. onnx/reference/ops/op_hardmax.py +5 -2
  671. onnx/reference/ops/op_identity.py +3 -3
  672. onnx/reference/ops/op_if.py +2 -2
  673. onnx/reference/ops/op_instance_normalization.py +1 -1
  674. onnx/reference/ops/op_isinf.py +1 -1
  675. onnx/reference/ops/op_isnan.py +1 -1
  676. onnx/reference/ops/op_layer_normalization.py +2 -4
  677. onnx/reference/ops/op_leaky_relu.py +1 -1
  678. onnx/reference/ops/op_less.py +1 -1
  679. onnx/reference/ops/op_less_or_equal.py +1 -1
  680. onnx/reference/ops/op_log.py +1 -1
  681. onnx/reference/ops/op_log_softmax.py +1 -1
  682. onnx/reference/ops/op_loop.py +4 -2
  683. onnx/reference/ops/op_lp_normalization.py +1 -1
  684. onnx/reference/ops/op_lp_pool.py +4 -2
  685. onnx/reference/ops/op_lrn.py +1 -1
  686. onnx/reference/ops/op_lstm.py +9 -11
  687. onnx/reference/ops/op_matmul.py +1 -1
  688. onnx/reference/ops/op_matmul_integer.py +1 -1
  689. onnx/reference/ops/op_max.py +1 -1
  690. onnx/reference/ops/op_max_pool.py +8 -8
  691. onnx/reference/ops/op_max_unpool.py +5 -3
  692. onnx/reference/ops/op_mean.py +1 -1
  693. onnx/reference/ops/op_mel_weight_matrix.py +1 -1
  694. onnx/reference/ops/op_min.py +1 -1
  695. onnx/reference/ops/op_mod.py +1 -1
  696. onnx/reference/ops/op_mul.py +1 -1
  697. onnx/reference/ops/op_neg.py +1 -1
  698. onnx/reference/ops/op_negative_log_likelihood_loss.py +4 -2
  699. onnx/reference/ops/op_non_max_suppression.py +10 -11
  700. onnx/reference/ops/op_non_zero.py +1 -1
  701. onnx/reference/ops/op_not.py +1 -1
  702. onnx/reference/ops/op_one_hot.py +1 -1
  703. onnx/reference/ops/op_optional.py +1 -1
  704. onnx/reference/ops/op_optional_get_element.py +1 -1
  705. onnx/reference/ops/op_optional_has_element.py +1 -1
  706. onnx/reference/ops/op_or.py +1 -1
  707. onnx/reference/ops/op_pad.py +1 -1
  708. onnx/reference/ops/op_pool_common.py +7 -6
  709. onnx/reference/ops/op_pow.py +1 -1
  710. onnx/reference/ops/op_prelu.py +3 -3
  711. onnx/reference/ops/op_qlinear_conv.py +1 -1
  712. onnx/reference/ops/op_qlinear_matmul.py +1 -1
  713. onnx/reference/ops/op_quantize_linear.py +15 -9
  714. onnx/reference/ops/op_random_normal.py +1 -1
  715. onnx/reference/ops/op_random_normal_like.py +1 -1
  716. onnx/reference/ops/op_random_uniform.py +1 -1
  717. onnx/reference/ops/op_random_uniform_like.py +1 -1
  718. onnx/reference/ops/op_range.py +1 -1
  719. onnx/reference/ops/op_reciprocal.py +1 -1
  720. onnx/reference/ops/op_reduce_l1.py +1 -1
  721. onnx/reference/ops/op_reduce_l2.py +1 -1
  722. onnx/reference/ops/op_reduce_log_sum.py +1 -1
  723. onnx/reference/ops/op_reduce_log_sum_exp.py +1 -1
  724. onnx/reference/ops/op_reduce_max.py +1 -1
  725. onnx/reference/ops/op_reduce_mean.py +2 -2
  726. onnx/reference/ops/op_reduce_min.py +1 -1
  727. onnx/reference/ops/op_reduce_prod.py +1 -1
  728. onnx/reference/ops/op_reduce_sum.py +2 -2
  729. onnx/reference/ops/op_reduce_sum_square.py +1 -1
  730. onnx/reference/ops/op_regex_full_match.py +1 -1
  731. onnx/reference/ops/op_relu.py +1 -1
  732. onnx/reference/ops/op_reshape.py +1 -1
  733. onnx/reference/ops/op_reverse_sequence.py +1 -1
  734. onnx/reference/ops/op_rnn.py +10 -8
  735. onnx/reference/ops/op_roi_align.py +5 -5
  736. onnx/reference/ops/op_round.py +1 -1
  737. onnx/reference/ops/op_scan.py +8 -8
  738. onnx/reference/ops/op_scatter_elements.py +19 -50
  739. onnx/reference/ops/op_scatternd.py +1 -1
  740. onnx/reference/ops/op_selu.py +1 -1
  741. onnx/reference/ops/op_sequence_at.py +1 -1
  742. onnx/reference/ops/op_sequence_construct.py +1 -1
  743. onnx/reference/ops/op_sequence_empty.py +2 -2
  744. onnx/reference/ops/op_sequence_erase.py +1 -1
  745. onnx/reference/ops/op_sequence_insert.py +6 -6
  746. onnx/reference/ops/op_sequence_length.py +1 -1
  747. onnx/reference/ops/op_sequence_map.py +1 -1
  748. onnx/reference/ops/op_shape.py +2 -6
  749. onnx/reference/ops/op_shrink.py +1 -1
  750. onnx/reference/ops/op_sigmoid.py +1 -1
  751. onnx/reference/ops/op_sign.py +1 -1
  752. onnx/reference/ops/op_sin.py +1 -1
  753. onnx/reference/ops/op_sinh.py +1 -1
  754. onnx/reference/ops/op_size.py +1 -1
  755. onnx/reference/ops/op_slice.py +3 -5
  756. onnx/reference/ops/op_softmax.py +1 -1
  757. onnx/reference/ops/op_softmax_cross_entropy_loss.py +1 -1
  758. onnx/reference/ops/op_softplus.py +1 -1
  759. onnx/reference/ops/op_softsign.py +1 -1
  760. onnx/reference/ops/op_space_to_depth.py +1 -1
  761. onnx/reference/ops/op_split.py +1 -1
  762. onnx/reference/ops/op_split_to_sequence.py +5 -7
  763. onnx/reference/ops/op_sqrt.py +1 -1
  764. onnx/reference/ops/op_squeeze.py +1 -1
  765. onnx/reference/ops/op_stft.py +3 -2
  766. onnx/reference/ops/op_string_concat.py +1 -1
  767. onnx/reference/ops/op_string_normalizer.py +8 -8
  768. onnx/reference/ops/op_string_split.py +2 -4
  769. onnx/reference/ops/op_sub.py +1 -1
  770. onnx/reference/ops/op_sum.py +1 -1
  771. onnx/reference/ops/op_tan.py +1 -1
  772. onnx/reference/ops/op_tanh.py +1 -1
  773. onnx/reference/ops/op_tfidf_vectorizer.py +11 -12
  774. onnx/reference/ops/op_thresholded_relu.py +1 -1
  775. onnx/reference/ops/op_tile.py +1 -1
  776. onnx/reference/ops/op_topk.py +7 -2
  777. onnx/reference/ops/op_transpose.py +1 -1
  778. onnx/reference/ops/op_trilu.py +1 -1
  779. onnx/reference/ops/op_unique.py +3 -1
  780. onnx/reference/ops/op_unsqueeze.py +2 -2
  781. onnx/reference/ops/op_upsample.py +1 -1
  782. onnx/reference/ops/op_where.py +1 -1
  783. onnx/reference/ops/op_xor.py +1 -1
  784. onnx/reference/ops_optimized/__init__.py +1 -0
  785. onnx/reference/ops_optimized/op_conv_optimized.py +1 -1
  786. onnx/reference/reference_evaluator.py +27 -13
  787. onnx/serialization.py +1 -1
  788. onnx/shape_inference/implementation.cc +15 -1
  789. onnx/shape_inference/implementation.h +15 -1
  790. onnx/shape_inference.py +1 -1
  791. onnx/subbyte.py +6 -6
  792. onnx/test/basic_test.py +1 -0
  793. onnx/test/checker_test.py +37 -2
  794. onnx/test/compose_test.py +12 -11
  795. onnx/test/cpp/schema_registration_test.cc +3 -3
  796. onnx/test/cpp/shape_inference_test.cc +38 -2
  797. onnx/test/elu_test.py +2 -0
  798. onnx/test/function_inference_test.py +2 -0
  799. onnx/test/function_test.py +1 -0
  800. onnx/test/helper_test.py +77 -16
  801. onnx/test/hub_test.py +1 -1
  802. onnx/test/inference_function_test.py +25 -8
  803. onnx/test/inliner_test.py +2 -0
  804. onnx/test/model_container_refeval_test.py +2 -1
  805. onnx/test/model_container_test.py +1 -0
  806. onnx/test/model_inference_test.py +2 -0
  807. onnx/test/numpy_helper_test.py +56 -1
  808. onnx/test/parser_test.py +48 -2
  809. onnx/test/printer_test.py +2 -0
  810. onnx/test/reference_evaluator_ml_test.py +2 -3
  811. onnx/test/reference_evaluator_model_test.py +2 -0
  812. onnx/test/reference_evaluator_test.py +173 -19
  813. onnx/test/relu_test.py +2 -0
  814. onnx/test/schema_test.py +4 -2
  815. onnx/test/serialization_test.py +2 -0
  816. onnx/test/shape_inference_test.py +349 -19
  817. onnx/test/symbolic_shape_test.py +3 -3
  818. onnx/test/test_backend_onnxruntime.py +272 -1
  819. onnx/test/test_backend_reference.py +24 -3
  820. onnx/test/test_backend_test.py +6 -5
  821. onnx/test/test_external_data.py +91 -2
  822. onnx/test/test_with_ort.py +1 -0
  823. onnx/test/tools_test.py +15 -14
  824. onnx/test/training_tool_test.py +1 -0
  825. onnx/test/utils_test.py +1 -0
  826. onnx/test/version_converter/automatic_downgrade_test.py +2 -0
  827. onnx/test/version_converter/automatic_upgrade_test.py +2 -0
  828. onnx/test/version_converter_test.py +26 -7
  829. onnx/test/version_utils.py +8 -0
  830. onnx/tools/net_drawer.py +7 -6
  831. onnx/tools/replace_constants.py +11 -11
  832. onnx/tools/update_model_dims.py +7 -6
  833. onnx/utils.py +104 -21
  834. onnx/version.py +2 -2
  835. onnx/version_converter/adapters/split_17_18.h +1 -1
  836. onnx/version_converter/convert.h +107 -2
  837. onnx/version_converter.py +3 -2
  838. {onnx-1.16.1.dist-info → onnx-1.17.0.dist-info}/METADATA +8 -11
  839. {onnx-1.16.1.dist-info → onnx-1.17.0.dist-info}/RECORD +843 -817
  840. {onnx-1.16.1.dist-info → onnx-1.17.0.dist-info}/WHEEL +1 -1
  841. {onnx-1.16.1.dist-info → onnx-1.17.0.dist-info}/LICENSE +0 -0
  842. {onnx-1.16.1.dist-info → onnx-1.17.0.dist-info}/entry_points.txt +0 -0
  843. {onnx-1.16.1.dist-info → onnx-1.17.0.dist-info}/top_level.txt +0 -0
onnx/defs/math/old.cc CHANGED
@@ -4,6 +4,7 @@
4
4
 
5
5
  #include <functional>
6
6
 
7
+ #include "onnx/defs/data_type_utils.h"
7
8
  #include "onnx/defs/function.h"
8
9
  #include "onnx/defs/math/utils.h"
9
10
  #include "onnx/defs/schema.h"
@@ -11,6 +12,978 @@
11
12
 
12
13
  namespace ONNX_NAMESPACE {
13
14
 
15
+ bool BuildContextDependentFunctionBody_opset13(
16
+ const FunctionBodyBuildContext& ctx,
17
+ const OpSchema& schema,
18
+ FunctionProto& functionProto) {
19
+ if (ctx.getInputType(0) == nullptr) {
20
+ // we cannot create a correct function body without knowing the input type
21
+ return false;
22
+ }
23
+ auto input_type = ctx.getInputType(0)->tensor_type().elem_type();
24
+ bool float_input = input_type == TensorProto_DataType_FLOAT;
25
+ auto reduction_attr_proto = ctx.getAttribute("reduction");
26
+ std::string reduction_attr =
27
+ reduction_attr_proto != nullptr && reduction_attr_proto->has_s() ? reduction_attr_proto->s() : "mean";
28
+
29
+ FunctionBuilder builder(functionProto);
30
+ builder.Const1D("const_zero", int64_t(0))
31
+ .Const1D("const_one", int64_t(1))
32
+ .Const1D("axes", int64_t(1))
33
+ .Add("expanded_target = Unsqueeze (target, axes)");
34
+
35
+ if (ctx.getAttribute("ignore_index") == nullptr) {
36
+ builder.Add(R"(
37
+ input_gather_element = GatherElements <axis = 1> (input, expanded_target)
38
+ loss_NCdd = Neg (input_gather_element)
39
+ loss_N1dd = Slice (loss_NCdd, const_zero, const_one, const_one)
40
+ )");
41
+
42
+ if (!ctx.hasInput(2)) {
43
+ if (reduction_attr == "none") {
44
+ builder.Add("loss = Squeeze (loss_N1dd, axes)");
45
+ } else {
46
+ builder.Add("loss_Ndd = Squeeze (loss_N1dd, axes)");
47
+ if (reduction_attr == "mean") {
48
+ builder.Add("loss = ReduceMean <keepdims = 0> (loss_Ndd)");
49
+ } else {
50
+ builder.Add("loss = ReduceSum <keepdims = 0> (loss_Ndd)");
51
+ }
52
+ }
53
+ } else {
54
+ builder.Add("weight_gather = Gather (weight, target)");
55
+ builder.Add("loss_unweighted = Squeeze (loss_N1dd, axes)");
56
+ if (reduction_attr == "none") {
57
+ builder.Add("loss = Mul (loss_unweighted, weight_gather)");
58
+ } else {
59
+ builder.Add("loss_Ndd = Mul (loss_unweighted, weight_gather)");
60
+ if (reduction_attr == "mean") {
61
+ builder.Add(R"(
62
+ loss_sum = ReduceSum <keepdims = 0> (loss_Ndd)
63
+ weight_gather_sum = ReduceSum <keepdims = 0> (weight_gather)
64
+ loss = Div (loss_sum, weight_gather_sum)
65
+ )");
66
+ } else {
67
+ builder.Add("loss = ReduceSum <keepdims = 0> (loss_Ndd)");
68
+ }
69
+ }
70
+ }
71
+ } else {
72
+ builder.Const1D("const_ignore_index", ctx.getAttribute("ignore_index")->i());
73
+ builder.Add(R"(
74
+ const_zero_target_typed = Sub (expanded_target, expanded_target)
75
+ expanded_target_int64 = Cast <to = 7> (expanded_target)
76
+ mask = Equal (expanded_target_int64, const_ignore_index)
77
+ transform_targets = Where (mask, const_zero_target_typed, expanded_target)
78
+ )");
79
+ builder.Add("input_gather_element = GatherElements <axis = 1> (input, transform_targets)");
80
+ builder.Const1D("const_zero_float", 0.0f);
81
+ if (!float_input) {
82
+ builder.Add("const_zero_casted = Cast (const_zero_float)", "to", static_cast<int64_t>(input_type))
83
+ .Add("input_gather_element_transform = Where (mask, const_zero_casted, input_gather_element)");
84
+ } else
85
+ builder.Add("input_gather_element_transform = Where (mask, const_zero_float, input_gather_element)");
86
+ builder.Add("loss_NCdd = Neg (input_gather_element_transform)");
87
+ builder.Add("loss_N1dd = Slice (loss_NCdd, const_zero, const_one, const_one)");
88
+
89
+ if (!ctx.hasInput(2)) {
90
+ builder.Add("squeeze_mask = Squeeze (mask, axes)");
91
+ builder.Const1D("const_one_float", 1.0f);
92
+ if (!float_input) {
93
+ builder.Add("const_one_casted = Cast (const_one_float)", "to", static_cast<int64_t>(input_type))
94
+ .Add("weight_gather = Where (squeeze_mask, const_zero_casted, const_one_casted)");
95
+ } else
96
+ builder.Add("weight_gather = Where (squeeze_mask, const_zero_float, const_one_float)");
97
+
98
+ } else {
99
+ builder.Add("weight_gather_temp = Gather (weight, transform_targets)");
100
+ builder.Add(
101
+ float_input ? "weight_gather_temp_1 = Where (mask, const_zero_float, weight_gather_temp)"
102
+ : "weight_gather_temp_1 = Where (mask, const_zero_casted, weight_gather_temp)");
103
+ builder.Add("weight_gather = Squeeze (weight_gather_temp_1, axes)");
104
+ }
105
+
106
+ builder.Add("loss_unweighted = Squeeze (loss_N1dd, axes)");
107
+ if (reduction_attr == "none") {
108
+ builder.Add("loss = Mul (loss_unweighted, weight_gather)");
109
+ } else {
110
+ builder.Add("loss_Ndd = Mul (loss_unweighted, weight_gather)");
111
+ if (reduction_attr == "mean") {
112
+ builder.Add(R"(
113
+ loss_sum = ReduceSum <keepdims = 0> (loss_Ndd)
114
+ weight_gather_sum = ReduceSum <keepdims = 0> (weight_gather)
115
+ loss = Div (loss_sum, weight_gather_sum)
116
+ )");
117
+ } else {
118
+ builder.Add("loss = ReduceSum <keepdims = 0> (loss_Ndd)");
119
+ }
120
+ }
121
+ }
122
+
123
+ schema.BuildFunction(functionProto);
124
+ return true;
125
+ }
126
+
127
+ static const char* NegativeLogLikelihoodLoss_ver13_doc = R"DOC(
128
+ A NegativeLogLikelihoodLoss operator computes (weighted) negative log likelihood loss.
129
+ Its "input" tensor has the shape of (N, C, d1, d2, ..., dk) where k >= 0.
130
+ The "input" tensor contains log-probabilities for input[n, :, d_1, d_2,..., d_k] being in a class of [0, C).
131
+ The operator's "target" input tensor has the shape of (N, d1, d2, ..., dk). It encodes class labels (one of C classes)
132
+ or it may contain a special value (indicated by an attribute ignore_index) for N x d1 x d2 x ... x dk samples.
133
+ The loss value for input[n, :, d_1, d_2,...d_k] being classified as class c = target[n][d_1][d_2]...[d_k] is computed as:
134
+
135
+ ```
136
+ loss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k].
137
+ ```
138
+
139
+ When an optional "weight" is provided, the sample loss is calculated as:
140
+
141
+ ```
142
+ loss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k] * weight[c].
143
+ ```
144
+
145
+ loss is zero for the case when target-value equals ignore_index.
146
+
147
+ ```
148
+ loss[n][d_1][d_2]...[d_k] = 0, when target[n][d_1][d_2]...[d_k] = ignore_index
149
+ ```
150
+
151
+ If "reduction" attribute is set to "none", the operator's output will be the above loss with shape (N, d1, d2, ..., dk).
152
+ If "reduction" attribute is set to "mean" (the default attribute value), the output loss is (weight) averaged:
153
+
154
+ ```
155
+ mean(loss), if "weight" is not provided,
156
+ ```
157
+
158
+ or if weight is provided,
159
+
160
+ ```
161
+ sum(loss) / sum(weight[target[n][d_1][d_2]...[d_k]]]), for all samples.
162
+ ```
163
+
164
+ If "reduction" attribute is set to "sum", the output is a scalar: `sum(loss)`.
165
+
166
+ See also https://pytorch.org/docs/stable/nn.html#torch.nn.NLLLoss.
167
+
168
+ Example 1:
169
+
170
+ ```
171
+ // negative log likelihood loss, "none" reduction
172
+ N, C, d1 = 2, 3, 2
173
+ input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],
174
+ [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]
175
+ target = [[2, 1], [0, 2]]
176
+
177
+ loss = np.zeros((N, d1))
178
+ for n in range(N):
179
+ for d_1 in range(d1):
180
+ c = target[n][d_1]
181
+ loss[n][d_1] = -input[n][c][d_1]
182
+
183
+ // print(loss)
184
+ // [[-3. -2.]
185
+ // [-0. -2.]]
186
+ ```
187
+
188
+ Example 2:
189
+
190
+ ```
191
+ // weighted negative log likelihood loss, sum reduction
192
+ N, C, d1 = 2, 3, 2
193
+ input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],
194
+ [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]
195
+ target = [[2, 1], [0, 2]]
196
+ weight = [0.2, 0.3, 0.1]
197
+ loss = np.zeros((N, d1))
198
+ for n in range(N):
199
+ for d_1 in range(d1):
200
+ c = target[n][d_1]
201
+ loss[n][d_1] = -input[n][c][d_1] * weight[c]
202
+
203
+ loss = np.sum(loss)
204
+ // print(loss)
205
+ // -1.1
206
+ ```
207
+
208
+ Example 3:
209
+
210
+ ```
211
+ // weighted negative log likelihood loss, mean reduction
212
+ N, C, d1 = 2, 3, 2
213
+ input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],
214
+ [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]
215
+ target = [[2, 1], [0, 2]]
216
+ weight = [0.2, 0.3, 0.1]
217
+ loss = np.zeros((N, d1))
218
+ weight_total = 0
219
+ for n in range(N):
220
+ for d_1 in range(d1):
221
+ c = target[n][d_1]
222
+ loss[n][d_1] = -input[n][c][d_1] * weight[c]
223
+ weight_total = weight_total + weight[c]
224
+
225
+ loss = np.sum(loss) / weight_total
226
+ // print(loss)
227
+ // -1.57
228
+ ```
229
+ )DOC";
230
+
231
+ ONNX_OPERATOR_SET_SCHEMA(
232
+ NegativeLogLikelihoodLoss,
233
+ 13,
234
+ OpSchema()
235
+ .SetDoc(NegativeLogLikelihoodLoss_ver13_doc)
236
+ .Input(
237
+ 0,
238
+ "input",
239
+ "Input tensor of shape (N, C) or (N, C, d1, d2, ..., dk).",
240
+ "T",
241
+ OpSchema::Single,
242
+ true,
243
+ 1,
244
+ OpSchema::Differentiable)
245
+ .Input(
246
+ 1,
247
+ "target",
248
+ "Target tensor of shape (N) or (N, d1, d2, ..., dk). Target element value shall be in range of [0, C). "
249
+ "If ignore_index is specified, it may have a value outside [0, C) and the target values should either be "
250
+ "in the range [0, C) or have the value ignore_index.",
251
+ "Tind",
252
+ OpSchema::Single,
253
+ true,
254
+ 1,
255
+ OpSchema::NonDifferentiable)
256
+ .Input(
257
+ 2,
258
+ "weight",
259
+ "Optional rescaling weight tensor. "
260
+ "If given, it has to be a tensor of size C. Otherwise, it is treated as if having all ones.",
261
+ "T",
262
+ OpSchema::Optional,
263
+ true,
264
+ 1,
265
+ OpSchema::NonDifferentiable)
266
+ .Output(0, "loss", "The negative log likelihood loss", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
267
+ .Attr(
268
+ "reduction",
269
+ "Type of reduction to apply to loss: none, sum, mean (default). "
270
+ "'none': the output is the loss for each sample. "
271
+ "'sum': the output will be summed. "
272
+ "'mean': the sum of the output will be divided by the sum of applied weights.",
273
+ AttributeProto::STRING,
274
+ std::string("mean"))
275
+ .Attr(
276
+ "ignore_index",
277
+ "Specifies a target value that is ignored and does not contribute to the input gradient. It's an optional value.",
278
+ AttributeProto::INT,
279
+ false)
280
+ .TypeConstraint(
281
+ "T",
282
+ {"tensor(float16)", "tensor(float)", "tensor(double)"},
283
+ "Constrain input, weight, and output types to floating-point tensors.")
284
+ .TypeConstraint("Tind", {"tensor(int32)", "tensor(int64)"}, "Constrain target to integer types")
285
+ .SetContextDependentFunctionBodyBuilder(BuildContextDependentFunctionBody_opset13)
286
+ .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
287
+ // Type inference
288
+ propagateElemTypeFromInputToOutput(ctx, 0, 0);
289
+
290
+ // Shape inference
291
+ if (hasNInputShapes(ctx, 2)) {
292
+ const TensorShapeProto& input_shape = ctx.getInputType(0)->tensor_type().shape();
293
+ const TensorShapeProto& target_shape = ctx.getInputType(1)->tensor_type().shape();
294
+
295
+ const int input_rank = static_cast<int>(input_shape.dim_size());
296
+ const int target_rank = static_cast<int>(target_shape.dim_size());
297
+
298
+ if (input_rank < 2) {
299
+ fail_shape_inference("Input rank must be >= 2.")
300
+ }
301
+ if (target_rank != input_rank - 1) {
302
+ fail_shape_inference("Target rank must be 1 less than the input rank.");
303
+ }
304
+
305
+ // match input dimensions (N, C, d1, ..., dk) with target
306
+ // dimensions of (C, d1, ..., dk)
307
+ for (int dim = 0; dim < target_rank; dim++) {
308
+ const auto input_dim = dim == 0 ? input_shape.dim(dim) : input_shape.dim(dim + 1);
309
+ const auto target_dim = target_shape.dim(dim);
310
+ if (input_dim.has_dim_value() && target_dim.has_dim_value() &&
311
+ input_dim.dim_value() != target_dim.dim_value())
312
+ fail_shape_inference("Input and target dimension value mismatch.");
313
+ }
314
+
315
+ if (ctx.getNumInputs() == 3 && hasInputShape(ctx, 2)) {
316
+ const TensorShapeProto& weight_shape = ctx.getInputType(2)->tensor_type().shape();
317
+ if (weight_shape.dim_size() != 1) {
318
+ fail_shape_inference("Weight rank must be 1.");
319
+ }
320
+ }
321
+
322
+ TensorShapeProto* output_shape = ctx.getOutputType(0)->mutable_tensor_type()->mutable_shape();
323
+
324
+ if (getAttribute(ctx, "reduction", "mean") == "none") {
325
+ // output tensor is of shape (N, d1, d2, ..., dk) if
326
+ // reduction attribute is "none".
327
+ for (int i = 0; i < input_rank - 1; i++) {
328
+ auto* dim = output_shape->add_dim();
329
+ if (i == 0)
330
+ *dim = input_shape.dim(i);
331
+ else
332
+ *dim = input_shape.dim(i + 1);
333
+ }
334
+ }
335
+ // otherwise output is a scalar.
336
+ }
337
+ }));
338
+
339
+ static const char* Det_ver11_doc = R"DOC(
340
+ Det calculates determinant of a square matrix or batches of square matrices.
341
+ Det takes one input tensor of shape `[*, M, M]`, where `*` is zero or more batch dimensions,
342
+ and the inner-most 2 dimensions form square matrices.
343
+ The output is a tensor of shape `[*]`, containing the determinants of all input submatrices.
344
+ e.g., When the input is 2-D, the output is a scalar(shape is empty: `[]`).
345
+ )DOC";
346
+
347
+ ONNX_OPERATOR_SET_SCHEMA(
348
+ Det,
349
+ 11,
350
+ OpSchema()
351
+ .SetDoc(Det_ver11_doc)
352
+ .Input(0, "X", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
353
+ .Output(0, "Y", "Output tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
354
+ .TypeConstraint(
355
+ "T",
356
+ {"tensor(float16)", "tensor(float)", "tensor(double)"},
357
+ "Constrain input and output types to floating-point tensors.")
358
+ .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
359
+ // Type inference
360
+ propagateElemTypeFromInputToOutput(ctx, 0, 0);
361
+
362
+ // Shape inference
363
+ if (hasInputShape(ctx, 0)) {
364
+ const TensorShapeProto& input_shape = ctx.getInputType(0)->tensor_type().shape();
365
+ TensorShapeProto* output_shape = ctx.getOutputType(0)->mutable_tensor_type()->mutable_shape();
366
+ const int rank = static_cast<int>(input_shape.dim_size());
367
+
368
+ if (rank < 2) {
369
+ fail_shape_inference("Input rank must be >= 2.");
370
+ }
371
+
372
+ const auto mat_w = input_shape.dim(rank - 1);
373
+ const auto mat_h = input_shape.dim(rank - 2);
374
+ if (mat_w.has_dim_value() && mat_h.has_dim_value() && (mat_w.dim_value() != mat_h.dim_value())) {
375
+ fail_shape_inference(
376
+ "The inner-most 2 dimensions must have the same size (mat_w:",
377
+ mat_w.dim_value(),
378
+ " != mat_h:",
379
+ mat_h.dim_value(),
380
+ ").");
381
+ }
382
+
383
+ for (int i = 0; i < rank - 2; ++i) {
384
+ auto* dim = output_shape->add_dim();
385
+ *dim = input_shape.dim(i);
386
+ }
387
+ }
388
+ }));
389
+
390
+ static const char* Round_ver11_doc = R"DOC(
391
+ Round takes one input Tensor and rounds the values, element-wise, meaning
392
+ it finds the nearest integer for each value.
393
+ In case of halves, the rule is to round them to the nearest even integer.
394
+ If input x is integral, +0, -0, NaN, or infinite, x itself is returned.
395
+ The output tensor has the same shape and type as the input.
396
+
397
+ Examples:
398
+ ```
399
+ round([0.9]) = [1.0]
400
+ round([2.5]) = [2.0]
401
+ round([2.3]) = [2.0]
402
+ round([1.5]) = [2.0]
403
+ round([-4.5]) = [-4.0]
404
+ ```
405
+ )DOC";
406
+
407
+ ONNX_OPERATOR_SET_SCHEMA(
408
+ Round,
409
+ 11,
410
+ OpSchema()
411
+ .SetDoc(Round_ver11_doc)
412
+ .Input(0, "X", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::NonDifferentiable)
413
+ .Output(0, "Y", "Output tensor", "T", OpSchema::Single, true, 1, OpSchema::NonDifferentiable)
414
+ .TypeConstraint(
415
+ "T",
416
+ {"tensor(float16)", "tensor(float)", "tensor(double)"},
417
+ "Constrain input and output types to float tensors.")
418
+ .TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));
419
+
420
+ static const char* Atanh_ver9_doc = R"DOC(
421
+ Calculates the hyperbolic arctangent of the given input tensor element-wise.
422
+ )DOC";
423
+
424
+ ONNX_OPERATOR_SET_SCHEMA(
425
+ Atanh,
426
+ 9,
427
+ OpSchema()
428
+ .SetDoc(Atanh_ver9_doc)
429
+ .Input(0, "input", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
430
+ .Output(
431
+ 0,
432
+ "output",
433
+ "The hyperbolic arctangent values of the input tensor "
434
+ "computed element-wise",
435
+ "T",
436
+ OpSchema::Single,
437
+ true,
438
+ 1,
439
+ OpSchema::Differentiable)
440
+ .TypeConstraint(
441
+ "T",
442
+ {"tensor(float16)", "tensor(float)", "tensor(double)"},
443
+ "Constrain input and output types to float tensors.")
444
+ .TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));
445
+
446
+ static const char* Acosh_ver9_doc = R"DOC(
447
+ Calculates the hyperbolic arccosine of the given input tensor element-wise.
448
+ )DOC";
449
+
450
+ ONNX_OPERATOR_SET_SCHEMA(
451
+ Acosh,
452
+ 9,
453
+ OpSchema()
454
+ .SetDoc(Acosh_ver9_doc)
455
+ .Input(0, "input", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
456
+ .Output(
457
+ 0,
458
+ "output",
459
+ "The hyperbolic arccosine values of the input tensor "
460
+ "computed element-wise",
461
+ "T",
462
+ OpSchema::Single,
463
+ true,
464
+ 1,
465
+ OpSchema::Differentiable)
466
+ .TypeConstraint(
467
+ "T",
468
+ {"tensor(float16)", "tensor(float)", "tensor(double)"},
469
+ "Constrain input and output types to float tensors.")
470
+ .TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));
471
+
472
+ static const char* Asinh_ver9_doc = R"DOC(
473
+ Calculates the hyperbolic arcsine of the given input tensor element-wise.
474
+ )DOC";
475
+
476
+ ONNX_OPERATOR_SET_SCHEMA(
477
+ Asinh,
478
+ 9,
479
+ OpSchema()
480
+ .SetDoc(Asinh_ver9_doc)
481
+ .Input(0, "input", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
482
+ .Output(
483
+ 0,
484
+ "output",
485
+ "The hyperbolic arcsine values of the input tensor "
486
+ "computed element-wise",
487
+ "T",
488
+ OpSchema::Single,
489
+ true,
490
+ 1,
491
+ OpSchema::Differentiable)
492
+ .TypeConstraint(
493
+ "T",
494
+ {"tensor(float16)", "tensor(float)", "tensor(double)"},
495
+ "Constrain input and output types to float tensors.")
496
+ .TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));
497
+
498
+ static const char* Cosh_ver9_doc = R"DOC(
499
+ Calculates the hyperbolic cosine of the given input tensor element-wise.
500
+ )DOC";
501
+
502
+ ONNX_OPERATOR_SET_SCHEMA(
503
+ Cosh,
504
+ 9,
505
+ OpSchema()
506
+ .SetDoc(Cosh_ver9_doc)
507
+ .Input(0, "input", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
508
+ .Output(
509
+ 0,
510
+ "output",
511
+ "The hyperbolic cosine values of the input tensor "
512
+ "computed element-wise",
513
+ "T",
514
+ OpSchema::Single,
515
+ true,
516
+ 1,
517
+ OpSchema::Differentiable)
518
+ .TypeConstraint(
519
+ "T",
520
+ {"tensor(float16)", "tensor(float)", "tensor(double)"},
521
+ "Constrain input and output types to float tensors.")
522
+ .TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));
523
+
524
+ static const char* Sinh_ver9_doc = R"DOC(
525
+ Calculates the hyperbolic sine of the given input tensor element-wise.
526
+ )DOC";
527
+
528
+ ONNX_OPERATOR_SET_SCHEMA(
529
+ Sinh,
530
+ 9,
531
+ OpSchema()
532
+ .SetDoc(Sinh_ver9_doc)
533
+ .Input(0, "input", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
534
+ .Output(
535
+ 0,
536
+ "output",
537
+ "The hyperbolic sine values of the input tensor "
538
+ "computed element-wise",
539
+ "T",
540
+ OpSchema::Single,
541
+ true,
542
+ 1,
543
+ OpSchema::Differentiable)
544
+ .TypeConstraint(
545
+ "T",
546
+ {"tensor(float16)", "tensor(float)", "tensor(double)"},
547
+ "Constrain input and output types to float tensors.")
548
+ .TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));
549
+
550
+ static const char* Atan_ver7_doc = R"DOC(
551
+ Calculates the arctangent (inverse of tangent) of the given input tensor, element-wise.
552
+ )DOC";
553
+
554
+ ONNX_OPERATOR_SET_SCHEMA(
555
+ Atan,
556
+ 7,
557
+ OpSchema()
558
+ .SetDoc(Atan_ver7_doc)
559
+ .Input(0, "input", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
560
+ .Output(
561
+ 0,
562
+ "output",
563
+ "The arctangent of the input tensor computed "
564
+ "element-wise",
565
+ "T",
566
+ OpSchema::Single,
567
+ true,
568
+ 1,
569
+ OpSchema::Differentiable)
570
+ .TypeConstraint(
571
+ "T",
572
+ {"tensor(float16)", "tensor(float)", "tensor(double)"},
573
+ "Constrain input and output types to float tensors.")
574
+ .TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));
575
+
576
+ static const char* Acos_ver7_doc = R"DOC(
577
+ Calculates the arccosine (inverse of cosine) of the given input tensor, element-wise.
578
+ )DOC";
579
+
580
+ ONNX_OPERATOR_SET_SCHEMA(
581
+ Acos,
582
+ 7,
583
+ OpSchema()
584
+ .SetDoc(Acos_ver7_doc)
585
+ .Input(0, "input", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
586
+ .Output(
587
+ 0,
588
+ "output",
589
+ "The arccosine of the input tensor computed "
590
+ "element-wise",
591
+ "T",
592
+ OpSchema::Single,
593
+ true,
594
+ 1,
595
+ OpSchema::Differentiable)
596
+ .TypeConstraint(
597
+ "T",
598
+ {"tensor(float16)", "tensor(float)", "tensor(double)"},
599
+ "Constrain input and output types to float tensors.")
600
+ .TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));
601
+
602
+ static const char* Asin_ver7_doc = R"DOC(
603
+ Calculates the arcsine (inverse of sine) of the given input tensor, element-wise.
604
+ )DOC";
605
+
606
+ ONNX_OPERATOR_SET_SCHEMA(
607
+ Asin,
608
+ 7,
609
+ OpSchema()
610
+ .SetDoc(Asin_ver7_doc)
611
+ .Input(0, "input", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
612
+ .Output(
613
+ 0,
614
+ "output",
615
+ "The arcsine of the input tensor computed "
616
+ "element-wise",
617
+ "T",
618
+ OpSchema::Single,
619
+ true,
620
+ 1,
621
+ OpSchema::Differentiable)
622
+ .TypeConstraint(
623
+ "T",
624
+ {"tensor(float16)", "tensor(float)", "tensor(double)"},
625
+ "Constrain input and output types to float tensors.")
626
+ .TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));
627
+
628
+ static const char* Tan_ver7_doc = R"DOC(
629
+ Calculates the tangent of the given input tensor, element-wise.
630
+ )DOC";
631
+
632
+ ONNX_OPERATOR_SET_SCHEMA(
633
+ Tan,
634
+ 7,
635
+ OpSchema()
636
+ .SetDoc(Tan_ver7_doc)
637
+ .Input(0, "input", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
638
+ .Output(
639
+ 0,
640
+ "output",
641
+ "The tangent of the input tensor computed "
642
+ "element-wise",
643
+ "T",
644
+ OpSchema::Single,
645
+ true,
646
+ 1,
647
+ OpSchema::Differentiable)
648
+ .TypeConstraint(
649
+ "T",
650
+ {"tensor(float16)", "tensor(float)", "tensor(double)"},
651
+ "Constrain input and output types to float tensors.")
652
+ .TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));
653
+
654
+ static const char* Cos_ver7_doc = R"DOC(
655
+ Calculates the cosine of the given input tensor, element-wise.
656
+ )DOC";
657
+
658
+ ONNX_OPERATOR_SET_SCHEMA(
659
+ Cos,
660
+ 7,
661
+ OpSchema()
662
+ .SetDoc(Cos_ver7_doc)
663
+ .Input(0, "input", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
664
+ .Output(
665
+ 0,
666
+ "output",
667
+ "The cosine of the input tensor computed "
668
+ "element-wise",
669
+ "T",
670
+ OpSchema::Single,
671
+ true,
672
+ 1,
673
+ OpSchema::Differentiable)
674
+ .TypeConstraint(
675
+ "T",
676
+ {"tensor(float16)", "tensor(float)", "tensor(double)"},
677
+ "Constrain input and output types to float tensors.")
678
+ .TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));
679
+
680
+ static const char* Sin_ver7_doc = R"DOC(
681
+ Calculates the sine of the given input tensor, element-wise.
682
+ )DOC";
683
+
684
+ ONNX_OPERATOR_SET_SCHEMA(
685
+ Sin,
686
+ 7,
687
+ OpSchema()
688
+ .SetDoc(Sin_ver7_doc)
689
+ .Input(0, "input", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
690
+ .Output(
691
+ 0,
692
+ "output",
693
+ "The sine of the input tensor computed "
694
+ "element-wise",
695
+ "T",
696
+ OpSchema::Single,
697
+ true,
698
+ 1,
699
+ OpSchema::Differentiable)
700
+ .TypeConstraint(
701
+ "T",
702
+ {"tensor(float16)", "tensor(float)", "tensor(double)"},
703
+ "Constrain input and output types to float tensors.")
704
+ .TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));
705
+
706
+ static const char* Softplus_ver1_doc = R"DOC(
707
+ Softplus takes one input data (Tensor<T>) and produces one output data
708
+ (Tensor<T>) where the softplus function, y = ln(exp(x) + 1), is applied to
709
+ the tensor elementwise.
710
+ )DOC";
711
+
712
+ ONNX_OPERATOR_SET_SCHEMA(
713
+ Softplus,
714
+ 1,
715
+ OpSchema()
716
+ .SetDoc(Softplus_ver1_doc)
717
+ .Input(0, "X", "1D input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
718
+ .Output(0, "Y", "1D input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
719
+ .TypeConstraint(
720
+ "T",
721
+ {"tensor(float16)", "tensor(float)", "tensor(double)"},
722
+ "Constrain input and output types to float tensors.")
723
+ .TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput)
724
+ .FunctionBody(
725
+ R"ONNX(
726
+ {
727
+ exp_x = Exp (X)
728
+ one = Constant <value = float {1.0}>()
729
+ one_cast = CastLike (one, X)
730
+ exp_x_add_one = Add (exp_x, one_cast)
731
+ Y = Log (exp_x_add_one)
732
+ }
733
+ )ONNX",
734
+ 18));
735
+
736
+ static const char* Softsign_ver1_doc = R"DOC(
737
+ Calculates the softsign (x/(1+|x|)) of the given input tensor element-wise.
738
+ )DOC";
739
+
740
+ ONNX_OPERATOR_SET_SCHEMA(
741
+ Softsign,
742
+ 1,
743
+ OpSchema()
744
+ .SetDoc(Softsign_ver1_doc)
745
+ .Input(0, "input", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
746
+ .Output(
747
+ 0,
748
+ "output",
749
+ "The softsign (x/(1+|x|)) values of the input tensor computed element-wise",
750
+ "T",
751
+ OpSchema::Single,
752
+ true,
753
+ 1,
754
+ OpSchema::Differentiable)
755
+ .TypeConstraint(
756
+ "T",
757
+ {"tensor(float16)", "tensor(float)", "tensor(double)"},
758
+ "Constrain input and output types to float tensors.")
759
+ .TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput)
760
+ .FunctionBody(
761
+ R"ONNX(
762
+ {
763
+ One = Constant <value = float {1.0}>()
764
+ OneCast = CastLike (One, input)
765
+ AbsInput = Abs(input)
766
+ OneAddAbsInput = Add (OneCast, AbsInput)
767
+ output = Div(input, OneAddAbsInput)
768
+ }
769
+ )ONNX",
770
+ 18));
771
+
772
+ static const char* HardSwish_ver14_doc = R"DOC(
773
+ HardSwish takes one input data (Tensor<T>) and produces one output data (Tensor<T>) where
774
+ the HardSwish function, y = x * max(0, min(1, alpha * x + beta)) = x * HardSigmoid<alpha, beta>(x),
775
+ where alpha = 1/6 and beta = 0.5, is applied to the tensor elementwise.
776
+ )DOC";
777
+
778
+ ONNX_OPERATOR_SET_SCHEMA(
779
+ HardSwish,
780
+ 14,
781
+ OpSchema()
782
+ .SetDoc(HardSwish_ver14_doc)
783
+ .Input(0, "X", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
784
+ .Output(0, "Y", "Output tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
785
+ .TypeConstraint(
786
+ "T",
787
+ {"tensor(float16)", "tensor(float)", "tensor(double)"},
788
+ "Constrain input and output types to float tensors.")
789
+ .TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput)
790
+ .FunctionBody(R"ONNX(
791
+ {
792
+ HS_X = HardSigmoid<alpha = 0.16666667163372, beta = 0.5>(X)
793
+ Y = Mul (X, HS_X)
794
+ }
795
+ )ONNX"));
796
+
797
+ static const char* HardSigmoid_ver6_doc = R"DOC(
798
+ HardSigmoid takes one input data (Tensor<T>) and produces one output data
799
+ (Tensor<T>) where the HardSigmoid function, y = max(0, min(1, alpha * x + beta)),
800
+ is applied to the tensor elementwise.
801
+ )DOC";
802
+
803
+ ONNX_OPERATOR_SET_SCHEMA(
804
+ HardSigmoid,
805
+ 6,
806
+ OpSchema()
807
+ .Attr("alpha", "Value of alpha.", AttributeProto::FLOAT, 0.2f)
808
+ .Attr("beta", "Value of beta.", AttributeProto::FLOAT, 0.5f)
809
+ .SetDoc(HardSigmoid_ver6_doc)
810
+ .Input(0, "X", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
811
+ .Output(0, "Y", "Output tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
812
+ .TypeConstraint(
813
+ "T",
814
+ {"tensor(float16)", "tensor(float)", "tensor(double)"},
815
+ "Constrain input and output types to float tensors.")
816
+ .TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput)
817
+ .FunctionBody(
818
+ R"ONNX(
819
+ {
820
+ Alpha = Constant <value_float: float = @alpha>()
821
+ AlphaCast = CastLike (Alpha, X)
822
+ Beta = Constant <value_float: float = @beta>()
823
+ BetaCast = CastLike (Beta, X)
824
+ Zero = Constant <value = float {0.0}>()
825
+ ZeroCast = CastLike (Zero, X)
826
+ One = Constant <value = float {1.0}>()
827
+ OneCast = CastLike (One, X)
828
+ AlphaMulX = Mul (X, AlphaCast)
829
+ AlphaMulXAddBeta = Add (AlphaMulX, BetaCast)
830
+ MinOneOrAlphaMulXAddBeta = Min (AlphaMulXAddBeta, OneCast)
831
+ Y = Max(MinOneOrAlphaMulXAddBeta, ZeroCast)
832
+ }
833
+ )ONNX",
834
+ 18));
835
+
836
+ static const char* mish_ver18_doc = R"DOC(
837
+ Mish: A Self Regularized Non-Monotonic Neural Activation Function.
838
+
839
+ Perform the linear unit element-wise on the input tensor X using formula:
840
+
841
+ ```
842
+ mish(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + e^{x}))
843
+ ```
844
+ )DOC";
845
+
846
+ ONNX_OPERATOR_SET_SCHEMA(
847
+ Mish,
848
+ 18,
849
+ OpSchema()
850
+ .SetDoc(mish_ver18_doc)
851
+ .Input(0, "X", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
852
+ .Output(0, "Y", "Output tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
853
+ .TypeConstraint(
854
+ "T",
855
+ {"tensor(float16)", "tensor(float)", "tensor(double)"},
856
+ "Constrain input X and output types to float tensors.")
857
+ .FunctionBody(R"ONNX(
858
+ {
859
+ Softplus_X = Softplus (X)
860
+ TanHSoftplusX = Tanh (Softplus_X)
861
+ Y = Mul (X, TanHSoftplusX)
862
+ }
863
+ )ONNX")
864
+ .TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));
865
+
866
+ static const char* Elu_ver6_doc = R"DOC(
867
+ Elu takes one input data (Tensor<T>) and produces one output data
868
+ (Tensor<T>) where the function `f(x) = alpha * (exp(x) - 1.) for x <
869
+ 0`, `f(x) = x for x >= 0`., is applied to the tensor elementwise.
870
+
871
+ )DOC";
872
+
873
+ ONNX_OPERATOR_SET_SCHEMA(
874
+ Elu,
875
+ 6,
876
+ OpSchema()
877
+ .Attr("alpha", "Coefficient of ELU.", AttributeProto::FLOAT, 1.0f)
878
+ .SetDoc(Elu_ver6_doc)
879
+ .Input(0, "X", "1D input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
880
+ .Output(0, "Y", "1D output tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
881
+ .TypeConstraint(
882
+ "T",
883
+ {"tensor(float16)", "tensor(float)", "tensor(double)"},
884
+ "Constrain input and output types to float tensors.")
885
+ .TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput)
886
+ .FunctionBody(
887
+ R"ONNX(
888
+ {
889
+ Alpha = Constant <value_float: float = @alpha>()
890
+ AlphaCast = CastLike (Alpha, X)
891
+ Zero = Constant <value = float {0.0}>()
892
+ ZeroCast = CastLike (Zero, X)
893
+ One = Constant <value = float {1.0}>()
894
+ OneCast = CastLike (One, X)
895
+ XLessThanZero = Less (X, ZeroCast)
896
+ ExpX = Exp (X)
897
+ ExpXSubOne = Sub (ExpX, OneCast)
898
+ AlphaMulExpXSubOne = Mul (AlphaCast, ExpXSubOne)
899
+ Y = Where(XLessThanZero, AlphaMulExpXSubOne, X)
900
+ }
901
+ )ONNX",
902
+ 18));
903
+
904
+ static const char* Selu_ver6_doc = R"DOC(
905
+ Selu takes one input data (Tensor<T>) and produces one output data
906
+ (Tensor<T>) where the scaled exponential linear unit function,
907
+ `y = gamma * (alpha * e^x - alpha) for x <= 0`, `y = gamma * x for x > 0`,
908
+ is applied to the tensor elementwise.
909
+ )DOC";
910
+
911
+ ONNX_OPERATOR_SET_SCHEMA(
912
+ Selu,
913
+ 6,
914
+ OpSchema()
915
+ .Attr(
916
+ "alpha",
917
+ "Coefficient of SELU default to 1.67326319217681884765625 "
918
+ "(i.e., float32 approximation of 1.6732632423543772848170429916717).",
919
+ AttributeProto::FLOAT,
920
+ 1.67326319217681884765625f)
921
+ .Attr(
922
+ "gamma",
923
+ "Coefficient of SELU default to 1.05070102214813232421875 "
924
+ "(i.e., float32 approximation of 1.0507009873554804934193349852946).",
925
+ AttributeProto::FLOAT,
926
+ 1.05070102214813232421875f)
927
+ .SetDoc(Selu_ver6_doc)
928
+ .Input(0, "X", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
929
+ .Output(0, "Y", "Output tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
930
+ .TypeConstraint(
931
+ "T",
932
+ {"tensor(float16)", "tensor(float)", "tensor(double)"},
933
+ "Constrain input and output types to float tensors.")
934
+ .TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput)
935
+ .FunctionBody(
936
+ R"ONNX(
937
+ {
938
+ Alpha = Constant <value_float: float = @alpha>()
939
+ AlphaCast = CastLike (Alpha, X)
940
+ Gamma = Constant <value_float: float = @gamma>()
941
+ GammaCast = CastLike (Gamma, X)
942
+ Zero = Constant <value = float {0.0}>()
943
+ ZeroCast = CastLike (Zero, X)
944
+ ExpX = Exp (X)
945
+ AlphaMulExpX = Mul(AlphaCast, ExpX)
946
+ AlphaMulExpXSubAlpha = Sub (AlphaMulExpX, AlphaCast)
947
+ Neg = Mul (GammaCast, AlphaMulExpXSubAlpha)
948
+ Pos = Mul (GammaCast, X)
949
+ XLessThanZero = Less (X, ZeroCast)
950
+ Y = Where(XLessThanZero, Neg, Pos)
951
+ }
952
+ )ONNX",
953
+ 18));
954
+
955
+ static const char* ThresholdedRelu_ver10_doc = R"DOC(
956
+ ThresholdedRelu takes one input data (Tensor<T>) and produces one output data
957
+ (Tensor<T>) where the rectified linear function, y = x for x > alpha, y = 0 otherwise,
958
+ is applied to the tensor elementwise.
959
+ )DOC";
960
+
961
+ ONNX_OPERATOR_SET_SCHEMA(
962
+ ThresholdedRelu,
963
+ 10,
964
+ OpSchema()
965
+ .SetDoc(ThresholdedRelu_ver10_doc)
966
+ .Attr("alpha", "Threshold value", AttributeProto::FLOAT, 1.0f)
967
+ .Input(0, "X", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
968
+ .Output(0, "Y", "Output tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
969
+ .TypeConstraint(
970
+ "T",
971
+ {"tensor(float16)", "tensor(float)", "tensor(double)"},
972
+ "Constrain input and output types to float tensors.")
973
+ .TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput)
974
+ .FunctionBody(
975
+ R"ONNX(
976
+ {
977
+ Alpha = Constant <value_float: float = @alpha>()
978
+ AlphaCast = CastLike (Alpha, X)
979
+ Zero = Constant <value = float {0.0}>()
980
+ ZeroCast = CastLike (Zero, X)
981
+ AlphaLessThanX = Less(AlphaCast, X)
982
+ Y = Where(AlphaLessThanX, X, ZeroCast)
983
+ }
984
+ )ONNX",
985
+ 18));
986
+
14
987
  std::function<void(OpSchema&)> MathDocGenerator_opset13(const char* name) {
15
988
  return [=](OpSchema& schema) {
16
989
  std::string doc;
@@ -796,7 +1769,7 @@ void matmulShapeInference_opset_9(ONNX_NAMESPACE::InferenceContext& ctx, int inp
796
1769
  }
797
1770
 
798
1771
  static const char* MatMul_ver9_doc = R"DOC(
799
- Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html
1772
+ Matrix product that behaves like [numpy.matmul](https://numpy.org/doc/stable/reference/generated/numpy.matmul.html).
800
1773
  )DOC";
801
1774
 
802
1775
  ONNX_OPERATOR_SET_SCHEMA(
@@ -1294,10 +2267,14 @@ ONNX_OPERATOR_SET_SCHEMA(
1294
2267
  const int target_rank = static_cast<int>(target_shape.dim_size());
1295
2268
 
1296
2269
  if (input_rank < 2) {
1297
- fail_shape_inference("Input rank must be >= 2.");
2270
+ fail_shape_inference("Input rank must be >= 2. input_rank=", input_rank);
1298
2271
  }
1299
2272
  if (target_rank != input_rank - 1) {
1300
- fail_shape_inference("Target rank must be 1 less than the input rank.");
2273
+ fail_shape_inference(
2274
+ "Target rank must be 1 less than the input rank. input_rank=",
2275
+ input_rank,
2276
+ ", target_rank=",
2277
+ target_rank);
1301
2278
  }
1302
2279
 
1303
2280
  // match input dimensions (N, C, d1, ..., dk) with target
@@ -1307,13 +2284,18 @@ ONNX_OPERATOR_SET_SCHEMA(
1307
2284
  const auto target_dim = target_shape.dim(dim);
1308
2285
  if (input_dim.has_dim_value() && target_dim.has_dim_value() &&
1309
2286
  input_dim.dim_value() != target_dim.dim_value())
1310
- fail_shape_inference("Input and target dimension value mismatch.");
2287
+ fail_shape_inference(
2288
+ "Input and target dimension value mismatch. input_dim_value=",
2289
+ input_dim.dim_value(),
2290
+ " target_dim_value=",
2291
+ target_dim.dim_value());
1311
2292
  }
1312
2293
 
1313
2294
  if (ctx.getNumInputs() == 3 && hasInputShape(ctx, 2)) {
1314
2295
  const TensorShapeProto& weight_shape = ctx.getInputType(2)->tensor_type().shape();
1315
- if (weight_shape.dim_size() != 1) {
1316
- fail_shape_inference("Weight rank must be 1.");
2296
+ const auto weight_rank = weight_shape.dim_size();
2297
+ if (weight_rank != 1) {
2298
+ fail_shape_inference("Weight rank must be 1. weight_rank=", weight_rank);
1317
2299
  }
1318
2300
  }
1319
2301
 
@@ -2322,10 +3304,15 @@ ONNX_OPERATOR_SET_SCHEMA(
2322
3304
  auto transBAttr = ctx.getAttribute("transB");
2323
3305
  bool transB = transBAttr ? static_cast<int>(transBAttr->i()) != 0 : false;
2324
3306
 
3307
+ checkInputRank(ctx, 0, 2);
3308
+ checkInputRank(ctx, 1, 2);
3309
+
3310
+ auto& first_input_shape = getInputShape(ctx, 0);
3311
+ auto& second_input_shape = getInputShape(ctx, 1);
2325
3312
  *ctx.getOutputType(0)->mutable_tensor_type()->mutable_shape()->add_dim() =
2326
- ctx.getInputType(0)->tensor_type().shape().dim(transA ? 1 : 0);
3313
+ first_input_shape.dim(transA ? 1 : 0);
2327
3314
  *ctx.getOutputType(0)->mutable_tensor_type()->mutable_shape()->add_dim() =
2328
- ctx.getInputType(1)->tensor_type().shape().dim(transB ? 0 : 1);
3315
+ second_input_shape.dim(transB ? 0 : 1);
2329
3316
  } else if (
2330
3317
  hasInputShape(ctx, 2) &&
2331
3318
  (!ctx.getAttribute("broadcast") || static_cast<int>(ctx.getAttribute("broadcast")->i()) == 0)) {
@@ -2544,7 +3531,7 @@ ONNX_OPERATOR_SET_SCHEMA(
2544
3531
  .TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));
2545
3532
 
2546
3533
  static const char* MatMul_ver1_doc = R"DOC(
2547
- Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html
3534
+ Matrix product that behaves like [numpy.matmul](https://numpy.org/doc/stable/reference/generated/numpy.matmul.html).
2548
3535
  )DOC";
2549
3536
 
2550
3537
  ONNX_OPERATOR_SET_SCHEMA(