onnx 1.15.0__cp311-cp311-win_amd64.whl → 1.16.1__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of onnx might be problematic. Click here for more details.

Files changed (584) hide show
  1. onnx/__init__.py +10 -10
  2. onnx/backend/base.py +13 -14
  3. onnx/backend/sample/ops/abs.py +1 -1
  4. onnx/backend/test/case/model/__init__.py +0 -1
  5. onnx/backend/test/case/node/ai_onnx_ml/tree_ensemble.py +122 -0
  6. onnx/backend/test/case/node/averagepool.py +15 -30
  7. onnx/backend/test/case/node/cast.py +88 -11
  8. onnx/backend/test/case/node/dequantizelinear.py +155 -0
  9. onnx/backend/test/case/node/groupnormalization.py +13 -9
  10. onnx/backend/test/case/node/gru.py +2 -2
  11. onnx/backend/test/case/node/isinf.py +4 -4
  12. onnx/backend/test/case/node/isnan.py +2 -2
  13. onnx/backend/test/case/node/lppool.py +8 -16
  14. onnx/backend/test/case/node/lstm.py +1 -1
  15. onnx/backend/test/case/node/maxpool.py +40 -34
  16. onnx/backend/test/case/node/pow.py +1 -1
  17. onnx/backend/test/case/node/qlinearmatmul.py +143 -109
  18. onnx/backend/test/case/node/quantizelinear.py +298 -7
  19. onnx/backend/test/case/node/reducemax.py +26 -0
  20. onnx/backend/test/case/node/rnn.py +1 -1
  21. onnx/backend/test/case/node/scan.py +6 -2
  22. onnx/backend/test/case/node/scatterelements.py +1 -1
  23. onnx/backend/test/case/node/topk.py +1 -1
  24. onnx/backend/test/case/utils.py +1 -3
  25. onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_set_membership/model.onnx +0 -0
  26. onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_set_membership/test_data_set_0/input_0.pb +0 -0
  27. onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_set_membership/test_data_set_0/output_0.pb +0 -0
  28. onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_single_tree/model.onnx +0 -0
  29. onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_single_tree/test_data_set_0/input_0.pb +1 -0
  30. onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_single_tree/test_data_set_0/output_0.pb +0 -0
  31. onnx/backend/test/data/node/test_cast_BFLOAT16_to_FLOAT/model.onnx +0 -0
  32. onnx/backend/test/data/node/test_cast_DOUBLE_to_FLOAT/model.onnx +0 -0
  33. onnx/backend/test/data/node/test_cast_DOUBLE_to_FLOAT16/model.onnx +0 -0
  34. onnx/backend/test/data/node/test_cast_FLOAT16_to_DOUBLE/model.onnx +0 -0
  35. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT/model.onnx +0 -0
  36. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FN/model.onnx +0 -0
  37. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FN/test_data_set_0/input_0.pb +2 -2
  38. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FN/test_data_set_0/output_0.pb +0 -0
  39. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FNUZ/model.onnx +0 -0
  40. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FNUZ/test_data_set_0/input_0.pb +2 -2
  41. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FNUZ/test_data_set_0/output_0.pb +0 -0
  42. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2/model.onnx +0 -0
  43. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2/test_data_set_0/input_0.pb +2 -2
  44. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2/test_data_set_0/output_0.pb +0 -0
  45. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2FNUZ/model.onnx +0 -0
  46. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2FNUZ/test_data_set_0/input_0.pb +2 -2
  47. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2FNUZ/test_data_set_0/output_0.pb +0 -0
  48. onnx/backend/test/data/node/test_cast_FLOAT16_to_INT4/model.onnx +0 -0
  49. onnx/backend/test/data/node/test_cast_FLOAT16_to_INT4/test_data_set_0/input_0.pb +0 -0
  50. onnx/backend/test/data/node/test_cast_FLOAT16_to_INT4/test_data_set_0/output_0.pb +1 -0
  51. onnx/backend/test/data/node/test_cast_FLOAT16_to_UINT4/model.onnx +0 -0
  52. onnx/backend/test/data/node/test_cast_FLOAT16_to_UINT4/test_data_set_0/input_0.pb +0 -0
  53. onnx/backend/test/data/node/test_cast_FLOAT16_to_UINT4/test_data_set_0/output_0.pb +0 -0
  54. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT/model.onnx +0 -0
  55. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT/test_data_set_0/input_0.pb +0 -0
  56. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT/test_data_set_0/output_0.pb +0 -0
  57. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT16/model.onnx +0 -0
  58. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT16/test_data_set_0/input_0.pb +0 -0
  59. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT16/test_data_set_0/output_0.pb +0 -0
  60. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT/model.onnx +0 -0
  61. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT/test_data_set_0/input_0.pb +0 -0
  62. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT/test_data_set_0/output_0.pb +0 -0
  63. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT16/model.onnx +0 -0
  64. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT16/test_data_set_0/input_0.pb +0 -0
  65. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT16/test_data_set_0/output_0.pb +0 -0
  66. onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT/model.onnx +0 -0
  67. onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT/test_data_set_0/input_0.pb +0 -0
  68. onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT/test_data_set_0/output_0.pb +0 -0
  69. onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT16/model.onnx +0 -0
  70. onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT16/test_data_set_0/input_0.pb +0 -0
  71. onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT16/test_data_set_0/output_0.pb +0 -0
  72. onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT/model.onnx +0 -0
  73. onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT/test_data_set_0/input_0.pb +0 -0
  74. onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT/test_data_set_0/output_0.pb +0 -0
  75. onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT16/model.onnx +0 -0
  76. onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT16/test_data_set_0/input_0.pb +0 -0
  77. onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT16/test_data_set_0/output_0.pb +0 -0
  78. onnx/backend/test/data/node/test_cast_FLOAT_to_BFLOAT16/model.onnx +0 -0
  79. onnx/backend/test/data/node/test_cast_FLOAT_to_DOUBLE/model.onnx +0 -0
  80. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT16/model.onnx +0 -0
  81. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FN/model.onnx +0 -0
  82. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/input_0.pb +0 -0
  83. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/output_0.pb +0 -0
  84. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FNUZ/model.onnx +0 -0
  85. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/input_0.pb +0 -0
  86. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/output_0.pb +0 -0
  87. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2/model.onnx +0 -0
  88. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2/test_data_set_0/input_0.pb +0 -0
  89. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2/test_data_set_0/output_0.pb +0 -0
  90. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2FNUZ/model.onnx +0 -0
  91. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/input_0.pb +0 -0
  92. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/output_0.pb +0 -0
  93. onnx/backend/test/data/node/test_cast_FLOAT_to_INT4/model.onnx +0 -0
  94. onnx/backend/test/data/node/test_cast_FLOAT_to_INT4/test_data_set_0/input_0.pb +0 -0
  95. onnx/backend/test/data/node/test_cast_FLOAT_to_INT4/test_data_set_0/output_0.pb +1 -0
  96. onnx/backend/test/data/node/test_cast_FLOAT_to_STRING/model.onnx +0 -0
  97. onnx/backend/test/data/node/test_cast_FLOAT_to_UINT4/model.onnx +0 -0
  98. onnx/backend/test/data/node/test_cast_FLOAT_to_UINT4/test_data_set_0/input_0.pb +0 -0
  99. onnx/backend/test/data/node/test_cast_FLOAT_to_UINT4/test_data_set_0/output_0.pb +0 -0
  100. onnx/backend/test/data/node/test_cast_INT4_to_FLOAT/model.onnx +0 -0
  101. onnx/backend/test/data/node/test_cast_INT4_to_FLOAT/test_data_set_0/input_0.pb +1 -0
  102. onnx/backend/test/data/node/test_cast_INT4_to_FLOAT/test_data_set_0/output_0.pb +0 -0
  103. onnx/backend/test/data/node/test_cast_INT4_to_FLOAT16/model.onnx +0 -0
  104. onnx/backend/test/data/node/test_cast_INT4_to_FLOAT16/test_data_set_0/input_0.pb +1 -0
  105. onnx/backend/test/data/node/test_cast_INT4_to_FLOAT16/test_data_set_0/output_0.pb +0 -0
  106. onnx/backend/test/data/node/test_cast_INT4_to_INT8/model.onnx +0 -0
  107. onnx/backend/test/data/node/test_cast_INT4_to_INT8/test_data_set_0/input_0.pb +1 -0
  108. onnx/backend/test/data/node/test_cast_INT4_to_INT8/test_data_set_0/output_0.pb +0 -0
  109. onnx/backend/test/data/node/test_cast_STRING_to_FLOAT/model.onnx +0 -0
  110. onnx/backend/test/data/node/test_cast_UINT4_to_FLOAT/model.onnx +0 -0
  111. onnx/backend/test/data/node/test_cast_UINT4_to_FLOAT/test_data_set_0/input_0.pb +0 -0
  112. onnx/backend/test/data/node/test_cast_UINT4_to_FLOAT/test_data_set_0/output_0.pb +0 -0
  113. onnx/backend/test/data/node/test_cast_UINT4_to_FLOAT16/model.onnx +0 -0
  114. onnx/backend/test/data/node/test_cast_UINT4_to_FLOAT16/test_data_set_0/input_0.pb +0 -0
  115. onnx/backend/test/data/node/test_cast_UINT4_to_FLOAT16/test_data_set_0/output_0.pb +0 -0
  116. onnx/backend/test/data/node/test_cast_UINT4_to_UINT8/model.onnx +0 -0
  117. onnx/backend/test/data/node/test_cast_UINT4_to_UINT8/test_data_set_0/input_0.pb +0 -0
  118. onnx/backend/test/data/node/test_cast_UINT4_to_UINT8/test_data_set_0/output_0.pb +0 -0
  119. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FN/model.onnx +0 -0
  120. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FN/test_data_set_0/input_0.pb +2 -2
  121. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FN/test_data_set_0/output_0.pb +0 -0
  122. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FNUZ/model.onnx +0 -0
  123. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FNUZ/test_data_set_0/input_0.pb +2 -2
  124. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FNUZ/test_data_set_0/output_0.pb +0 -0
  125. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2/model.onnx +0 -0
  126. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2/test_data_set_0/input_0.pb +2 -2
  127. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2/test_data_set_0/output_0.pb +0 -0
  128. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2FNUZ/model.onnx +0 -0
  129. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2FNUZ/test_data_set_0/input_0.pb +2 -2
  130. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2FNUZ/test_data_set_0/output_0.pb +0 -0
  131. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FN/model.onnx +0 -0
  132. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/input_0.pb +0 -0
  133. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/output_0.pb +0 -0
  134. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FNUZ/model.onnx +0 -0
  135. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/input_0.pb +0 -0
  136. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/output_0.pb +0 -0
  137. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2/model.onnx +0 -0
  138. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2/test_data_set_0/input_0.pb +0 -0
  139. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2/test_data_set_0/output_0.pb +0 -0
  140. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2FNUZ/model.onnx +0 -0
  141. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/input_0.pb +0 -0
  142. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/output_0.pb +0 -0
  143. onnx/backend/test/data/node/test_castlike_BFLOAT16_to_FLOAT/model.onnx +0 -0
  144. onnx/backend/test/data/node/test_castlike_BFLOAT16_to_FLOAT_expanded/model.onnx +0 -0
  145. onnx/backend/test/data/node/test_castlike_DOUBLE_to_FLOAT/model.onnx +0 -0
  146. onnx/backend/test/data/node/test_castlike_DOUBLE_to_FLOAT16/model.onnx +0 -0
  147. onnx/backend/test/data/node/test_castlike_DOUBLE_to_FLOAT16_expanded/model.onnx +0 -0
  148. onnx/backend/test/data/node/test_castlike_DOUBLE_to_FLOAT_expanded/model.onnx +0 -0
  149. onnx/backend/test/data/node/test_castlike_FLOAT16_to_DOUBLE/model.onnx +0 -0
  150. onnx/backend/test/data/node/test_castlike_FLOAT16_to_DOUBLE_expanded/model.onnx +0 -0
  151. onnx/backend/test/data/node/test_castlike_FLOAT16_to_FLOAT/model.onnx +0 -0
  152. onnx/backend/test/data/node/test_castlike_FLOAT16_to_FLOAT_expanded/model.onnx +0 -0
  153. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT/model.onnx +0 -0
  154. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT_expanded/model.onnx +0 -0
  155. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT/model.onnx +0 -0
  156. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT_expanded/model.onnx +0 -0
  157. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT/model.onnx +0 -0
  158. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT_expanded/model.onnx +0 -0
  159. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT/model.onnx +0 -0
  160. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT_expanded/model.onnx +0 -0
  161. onnx/backend/test/data/node/test_castlike_FLOAT_to_BFLOAT16/model.onnx +0 -0
  162. onnx/backend/test/data/node/test_castlike_FLOAT_to_BFLOAT16_expanded/model.onnx +0 -0
  163. onnx/backend/test/data/node/test_castlike_FLOAT_to_DOUBLE/model.onnx +0 -0
  164. onnx/backend/test/data/node/test_castlike_FLOAT_to_DOUBLE_expanded/model.onnx +0 -0
  165. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT16/model.onnx +0 -0
  166. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT16_expanded/model.onnx +0 -0
  167. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN/model.onnx +0 -0
  168. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ/model.onnx +0 -0
  169. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ_expanded/model.onnx +0 -0
  170. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN_expanded/model.onnx +0 -0
  171. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2/model.onnx +0 -0
  172. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ/model.onnx +0 -0
  173. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ_expanded/model.onnx +0 -0
  174. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2_expanded/model.onnx +0 -0
  175. onnx/backend/test/data/node/test_castlike_FLOAT_to_STRING/model.onnx +0 -0
  176. onnx/backend/test/data/node/test_castlike_FLOAT_to_STRING_expanded/model.onnx +0 -0
  177. onnx/backend/test/data/node/test_castlike_STRING_to_FLOAT/model.onnx +0 -0
  178. onnx/backend/test/data/node/test_castlike_STRING_to_FLOAT_expanded/model.onnx +0 -0
  179. onnx/backend/test/data/node/test_constant/model.onnx +0 -0
  180. onnx/backend/test/data/node/test_constant_pad/model.onnx +0 -0
  181. onnx/backend/test/data/node/test_constant_pad_axes/model.onnx +0 -0
  182. onnx/backend/test/data/node/test_constant_pad_negative_axes/model.onnx +0 -0
  183. onnx/backend/test/data/node/test_constantofshape_float_ones/model.onnx +0 -0
  184. onnx/backend/test/data/node/test_constantofshape_int_shape_zero/model.onnx +0 -0
  185. onnx/backend/test/data/node/test_constantofshape_int_zeros/model.onnx +0 -0
  186. onnx/backend/test/data/node/test_dequantizelinear/model.onnx +0 -0
  187. onnx/backend/test/data/node/test_dequantizelinear_axis/model.onnx +0 -0
  188. onnx/backend/test/data/node/test_dequantizelinear_blocked/model.onnx +0 -0
  189. onnx/backend/test/data/node/test_dequantizelinear_blocked/test_data_set_0/input_0.pb +1 -0
  190. onnx/backend/test/data/node/test_dequantizelinear_blocked/test_data_set_0/input_1.pb +0 -0
  191. onnx/backend/test/data/node/test_dequantizelinear_blocked/test_data_set_0/input_2.pb +0 -0
  192. onnx/backend/test/data/node/test_dequantizelinear_blocked/test_data_set_0/output_0.pb +0 -0
  193. onnx/backend/test/data/node/test_dequantizelinear_e4m3fn/model.onnx +0 -0
  194. onnx/backend/test/data/node/test_dequantizelinear_e4m3fn_float16/model.onnx +0 -0
  195. onnx/backend/test/data/node/test_dequantizelinear_e4m3fn_zero_point/model.onnx +0 -0
  196. onnx/backend/test/data/node/test_dequantizelinear_e5m2/model.onnx +0 -0
  197. onnx/backend/test/data/node/test_dequantizelinear_int16/model.onnx +0 -0
  198. onnx/backend/test/data/node/test_dequantizelinear_int16/test_data_set_0/input_0.pb +1 -0
  199. onnx/backend/test/data/node/test_dequantizelinear_int16/test_data_set_0/input_1.pb +0 -0
  200. onnx/backend/test/data/node/test_dequantizelinear_int16/test_data_set_0/input_2.pb +0 -0
  201. onnx/backend/test/data/node/test_dequantizelinear_int16/test_data_set_0/output_0.pb +0 -0
  202. onnx/backend/test/data/node/test_dequantizelinear_int4/model.onnx +0 -0
  203. onnx/backend/test/data/node/test_dequantizelinear_int4/test_data_set_0/input_0.pb +1 -0
  204. onnx/backend/test/data/node/test_dequantizelinear_int4/test_data_set_0/input_1.pb +0 -0
  205. onnx/backend/test/data/node/test_dequantizelinear_int4/test_data_set_0/input_2.pb +1 -0
  206. onnx/backend/test/data/node/test_dequantizelinear_int4/test_data_set_0/output_0.pb +0 -0
  207. onnx/backend/test/data/node/test_dequantizelinear_uint16/model.onnx +0 -0
  208. onnx/backend/test/data/node/test_dequantizelinear_uint16/test_data_set_0/input_0.pb +0 -0
  209. onnx/backend/test/data/node/test_dequantizelinear_uint16/test_data_set_0/input_1.pb +0 -0
  210. onnx/backend/test/data/node/test_dequantizelinear_uint16/test_data_set_0/input_2.pb +1 -0
  211. onnx/backend/test/data/node/test_dequantizelinear_uint16/test_data_set_0/output_0.pb +0 -0
  212. onnx/backend/test/data/node/test_dequantizelinear_uint4/model.onnx +0 -0
  213. onnx/backend/test/data/node/test_dequantizelinear_uint4/test_data_set_0/input_0.pb +1 -0
  214. onnx/backend/test/data/node/test_dequantizelinear_uint4/test_data_set_0/input_1.pb +0 -0
  215. onnx/backend/test/data/node/test_dequantizelinear_uint4/test_data_set_0/input_2.pb +1 -0
  216. onnx/backend/test/data/node/test_dequantizelinear_uint4/test_data_set_0/output_0.pb +0 -0
  217. onnx/backend/test/data/node/test_edge_pad/model.onnx +0 -0
  218. onnx/backend/test/data/node/test_flatten_axis0/model.onnx +0 -0
  219. onnx/backend/test/data/node/test_flatten_axis1/model.onnx +0 -0
  220. onnx/backend/test/data/node/test_flatten_axis2/model.onnx +0 -0
  221. onnx/backend/test/data/node/test_flatten_axis3/model.onnx +0 -0
  222. onnx/backend/test/data/node/test_flatten_default_axis/model.onnx +0 -0
  223. onnx/backend/test/data/node/test_flatten_negative_axis1/model.onnx +0 -0
  224. onnx/backend/test/data/node/test_flatten_negative_axis2/model.onnx +0 -0
  225. onnx/backend/test/data/node/test_flatten_negative_axis3/model.onnx +0 -0
  226. onnx/backend/test/data/node/test_flatten_negative_axis4/model.onnx +0 -0
  227. onnx/backend/test/data/node/test_group_normalization_epsilon/model.onnx +0 -0
  228. onnx/backend/test/data/node/test_group_normalization_epsilon/test_data_set_0/input_0.pb +1 -1
  229. onnx/backend/test/data/node/test_group_normalization_epsilon/test_data_set_0/input_1.pb +1 -1
  230. onnx/backend/test/data/node/test_group_normalization_epsilon/test_data_set_0/input_2.pb +1 -1
  231. onnx/backend/test/data/node/test_group_normalization_epsilon/test_data_set_0/output_0.pb +0 -0
  232. onnx/backend/test/data/node/test_group_normalization_epsilon_expanded/model.onnx +0 -0
  233. onnx/backend/test/data/node/test_group_normalization_epsilon_expanded/test_data_set_0/input_0.pb +1 -1
  234. onnx/backend/test/data/node/test_group_normalization_epsilon_expanded/test_data_set_0/input_1.pb +1 -1
  235. onnx/backend/test/data/node/test_group_normalization_epsilon_expanded/test_data_set_0/input_2.pb +1 -1
  236. onnx/backend/test/data/node/test_group_normalization_epsilon_expanded/test_data_set_0/output_0.pb +0 -0
  237. onnx/backend/test/data/node/test_group_normalization_example/model.onnx +0 -0
  238. onnx/backend/test/data/node/test_group_normalization_example/test_data_set_0/input_1.pb +1 -1
  239. onnx/backend/test/data/node/test_group_normalization_example/test_data_set_0/input_2.pb +1 -1
  240. onnx/backend/test/data/node/test_group_normalization_example/test_data_set_0/output_0.pb +0 -0
  241. onnx/backend/test/data/node/test_group_normalization_example_expanded/model.onnx +0 -0
  242. onnx/backend/test/data/node/test_group_normalization_example_expanded/test_data_set_0/input_1.pb +1 -1
  243. onnx/backend/test/data/node/test_group_normalization_example_expanded/test_data_set_0/input_2.pb +1 -1
  244. onnx/backend/test/data/node/test_group_normalization_example_expanded/test_data_set_0/output_0.pb +0 -0
  245. onnx/backend/test/data/node/test_identity/model.onnx +0 -0
  246. onnx/backend/test/data/node/test_identity_sequence/model.onnx +0 -0
  247. onnx/backend/test/data/node/test_lrn_default/test_data_set_0/output_0.pb +0 -0
  248. onnx/backend/test/data/node/test_maxpool_2d_ceil_output_size_reduce_by_one/model.onnx +0 -0
  249. onnx/backend/test/data/node/test_maxpool_2d_ceil_output_size_reduce_by_one/test_data_set_0/input_0.pb +0 -0
  250. onnx/backend/test/data/node/test_maxpool_2d_ceil_output_size_reduce_by_one/test_data_set_0/output_0.pb +0 -0
  251. onnx/backend/test/data/node/test_mvn/test_data_set_0/output_0.pb +1 -1
  252. onnx/backend/test/data/node/test_mvn_expanded/test_data_set_0/output_0.pb +1 -1
  253. onnx/backend/test/data/node/test_mvn_expanded_ver18/test_data_set_0/output_0.pb +1 -1
  254. onnx/backend/test/data/node/test_pow/test_data_set_0/output_0.pb +0 -0
  255. onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float16/model.onnx +0 -0
  256. onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float16/test_data_set_0/input_0.pb +1 -0
  257. onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float16/test_data_set_0/input_1.pb +2 -0
  258. onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float16/test_data_set_0/input_2.pb +1 -0
  259. onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float16/test_data_set_0/input_3.pb +0 -0
  260. onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float16/test_data_set_0/input_4.pb +2 -0
  261. onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float16/test_data_set_0/input_5.pb +1 -0
  262. onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float16/test_data_set_0/input_6.pb +2 -0
  263. onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float16/test_data_set_0/input_7.pb +1 -0
  264. onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float16/test_data_set_0/output_0.pb +1 -0
  265. onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float32/model.onnx +0 -0
  266. onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float32/test_data_set_0/input_0.pb +1 -0
  267. onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float32/test_data_set_0/input_2.pb +1 -0
  268. onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float32/test_data_set_0/input_3.pb +0 -0
  269. onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float32/test_data_set_0/input_5.pb +1 -0
  270. onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float32/test_data_set_0/input_7.pb +1 -0
  271. onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float32/test_data_set_0/output_0.pb +1 -0
  272. onnx/backend/test/data/node/test_qlinearmatmul_2D_uint8_float16/model.onnx +0 -0
  273. onnx/backend/test/data/node/test_qlinearmatmul_2D_uint8_float16/test_data_set_0/input_1.pb +2 -0
  274. onnx/backend/test/data/node/test_qlinearmatmul_2D_uint8_float16/test_data_set_0/input_4.pb +2 -0
  275. onnx/backend/test/data/node/test_qlinearmatmul_2D_uint8_float16/test_data_set_0/input_6.pb +2 -0
  276. onnx/backend/test/data/node/{test_qlinearmatmul_2D → test_qlinearmatmul_2D_uint8_float32}/model.onnx +0 -0
  277. onnx/backend/test/data/node/test_qlinearmatmul_2D_uint8_float32/test_data_set_0/input_0.pb +0 -0
  278. onnx/backend/test/data/node/test_qlinearmatmul_2D_uint8_float32/test_data_set_0/input_3.pb +0 -0
  279. onnx/backend/test/data/node/test_qlinearmatmul_2D_uint8_float32/test_data_set_0/output_0.pb +1 -0
  280. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float16/model.onnx +0 -0
  281. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float16/test_data_set_0/input_0.pb +1 -0
  282. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float16/test_data_set_0/input_1.pb +2 -0
  283. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float16/test_data_set_0/input_2.pb +1 -0
  284. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float16/test_data_set_0/input_3.pb +0 -0
  285. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float16/test_data_set_0/input_4.pb +2 -0
  286. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float16/test_data_set_0/input_5.pb +1 -0
  287. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float16/test_data_set_0/input_6.pb +2 -0
  288. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float16/test_data_set_0/input_7.pb +1 -0
  289. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float16/test_data_set_0/output_0.pb +1 -0
  290. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float32/model.onnx +0 -0
  291. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float32/test_data_set_0/input_0.pb +1 -0
  292. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float32/test_data_set_0/input_1.pb +1 -0
  293. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float32/test_data_set_0/input_2.pb +1 -0
  294. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float32/test_data_set_0/input_3.pb +0 -0
  295. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float32/test_data_set_0/input_4.pb +1 -0
  296. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float32/test_data_set_0/input_5.pb +1 -0
  297. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float32/test_data_set_0/input_6.pb +1 -0
  298. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float32/test_data_set_0/input_7.pb +1 -0
  299. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float32/test_data_set_0/output_0.pb +1 -0
  300. onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float16/model.onnx +0 -0
  301. onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float16/test_data_set_0/input_1.pb +2 -0
  302. onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float16/test_data_set_0/input_2.pb +1 -0
  303. onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float16/test_data_set_0/input_4.pb +2 -0
  304. onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float16/test_data_set_0/input_5.pb +1 -0
  305. onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float16/test_data_set_0/input_6.pb +2 -0
  306. onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float16/test_data_set_0/input_7.pb +1 -0
  307. onnx/backend/test/data/node/{test_qlinearmatmul_3D → test_qlinearmatmul_3D_uint8_float32}/model.onnx +0 -0
  308. onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float32/test_data_set_0/input_0.pb +0 -0
  309. onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float32/test_data_set_0/input_1.pb +1 -0
  310. onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float32/test_data_set_0/input_2.pb +1 -0
  311. onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float32/test_data_set_0/input_3.pb +0 -0
  312. onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float32/test_data_set_0/input_4.pb +1 -0
  313. onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float32/test_data_set_0/input_5.pb +1 -0
  314. onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float32/test_data_set_0/input_6.pb +1 -0
  315. onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float32/test_data_set_0/input_7.pb +1 -0
  316. onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float32/test_data_set_0/output_0.pb +1 -0
  317. onnx/backend/test/data/node/test_quantizelinear/model.onnx +0 -0
  318. onnx/backend/test/data/node/test_quantizelinear_axis/model.onnx +0 -0
  319. onnx/backend/test/data/node/test_quantizelinear_blocked_asymmetric/model.onnx +0 -0
  320. onnx/backend/test/data/node/test_quantizelinear_blocked_asymmetric/test_data_set_0/input_0.pb +0 -0
  321. onnx/backend/test/data/node/test_quantizelinear_blocked_asymmetric/test_data_set_0/input_1.pb +0 -0
  322. onnx/backend/test/data/node/test_quantizelinear_blocked_asymmetric/test_data_set_0/input_2.pb +0 -0
  323. onnx/backend/test/data/node/test_quantizelinear_blocked_asymmetric/test_data_set_0/output_0.pb +1 -0
  324. onnx/backend/test/data/node/test_quantizelinear_blocked_symmetric/model.onnx +0 -0
  325. onnx/backend/test/data/node/test_quantizelinear_blocked_symmetric/test_data_set_0/input_0.pb +0 -0
  326. onnx/backend/test/data/node/test_quantizelinear_blocked_symmetric/test_data_set_0/input_1.pb +0 -0
  327. onnx/backend/test/data/node/test_quantizelinear_blocked_symmetric/test_data_set_0/output_0.pb +0 -0
  328. onnx/backend/test/data/node/test_quantizelinear_e4m3fn/model.onnx +0 -0
  329. onnx/backend/test/data/node/test_quantizelinear_e4m3fn/test_data_set_0/input_2.pb +0 -0
  330. onnx/backend/test/data/node/test_quantizelinear_e4m3fn/test_data_set_0/output_0.pb +0 -0
  331. onnx/backend/test/data/node/test_quantizelinear_e5m2/model.onnx +0 -0
  332. onnx/backend/test/data/node/test_quantizelinear_e5m2/test_data_set_0/input_2.pb +0 -0
  333. onnx/backend/test/data/node/test_quantizelinear_e5m2/test_data_set_0/output_0.pb +0 -0
  334. onnx/backend/test/data/node/test_quantizelinear_int16/model.onnx +0 -0
  335. onnx/backend/test/data/node/test_quantizelinear_int16/test_data_set_0/input_0.pb +0 -0
  336. onnx/backend/test/data/node/test_quantizelinear_int16/test_data_set_0/input_1.pb +0 -0
  337. onnx/backend/test/data/node/test_quantizelinear_int16/test_data_set_0/input_2.pb +0 -0
  338. onnx/backend/test/data/node/test_quantizelinear_int16/test_data_set_0/output_0.pb +0 -0
  339. onnx/backend/test/data/node/test_quantizelinear_int4/model.onnx +0 -0
  340. onnx/backend/test/data/node/test_quantizelinear_int4/test_data_set_0/input_0.pb +0 -0
  341. onnx/backend/test/data/node/test_quantizelinear_int4/test_data_set_0/input_1.pb +0 -0
  342. onnx/backend/test/data/node/test_quantizelinear_int4/test_data_set_0/input_2.pb +1 -0
  343. onnx/backend/test/data/node/test_quantizelinear_int4/test_data_set_0/output_0.pb +1 -0
  344. onnx/backend/test/data/node/test_quantizelinear_uint16/model.onnx +0 -0
  345. onnx/backend/test/data/node/test_quantizelinear_uint16/test_data_set_0/input_0.pb +0 -0
  346. onnx/backend/test/data/node/test_quantizelinear_uint16/test_data_set_0/input_1.pb +0 -0
  347. onnx/backend/test/data/node/test_quantizelinear_uint16/test_data_set_0/input_2.pb +1 -0
  348. onnx/backend/test/data/node/test_quantizelinear_uint16/test_data_set_0/output_0.pb +0 -0
  349. onnx/backend/test/data/node/test_quantizelinear_uint4/model.onnx +0 -0
  350. onnx/backend/test/data/node/test_quantizelinear_uint4/test_data_set_0/input_0.pb +0 -0
  351. onnx/backend/test/data/node/test_quantizelinear_uint4/test_data_set_0/input_1.pb +0 -0
  352. onnx/backend/test/data/node/test_quantizelinear_uint4/test_data_set_0/input_2.pb +1 -0
  353. onnx/backend/test/data/node/test_quantizelinear_uint4/test_data_set_0/output_0.pb +0 -0
  354. onnx/backend/test/data/node/test_reflect_pad/model.onnx +0 -0
  355. onnx/backend/test/data/node/test_reshape_allowzero_reordered/model.onnx +0 -0
  356. onnx/backend/test/data/node/test_reshape_extended_dims/model.onnx +0 -0
  357. onnx/backend/test/data/node/test_reshape_negative_dim/model.onnx +0 -0
  358. onnx/backend/test/data/node/test_reshape_negative_extended_dims/model.onnx +0 -0
  359. onnx/backend/test/data/node/test_reshape_one_dim/model.onnx +0 -0
  360. onnx/backend/test/data/node/test_reshape_reduced_dims/model.onnx +0 -0
  361. onnx/backend/test/data/node/test_reshape_reordered_all_dims/model.onnx +0 -0
  362. onnx/backend/test/data/node/test_reshape_reordered_last_dims/model.onnx +0 -0
  363. onnx/backend/test/data/node/test_reshape_zero_and_negative_dim/model.onnx +0 -0
  364. onnx/backend/test/data/node/test_reshape_zero_dim/model.onnx +0 -0
  365. onnx/backend/test/data/node/test_shape/model.onnx +0 -0
  366. onnx/backend/test/data/node/test_shape_clip_end/model.onnx +0 -0
  367. onnx/backend/test/data/node/test_shape_clip_start/model.onnx +0 -0
  368. onnx/backend/test/data/node/test_shape_end_1/model.onnx +0 -0
  369. onnx/backend/test/data/node/test_shape_end_negative_1/model.onnx +0 -0
  370. onnx/backend/test/data/node/test_shape_example/model.onnx +0 -0
  371. onnx/backend/test/data/node/test_shape_start_1/model.onnx +0 -0
  372. onnx/backend/test/data/node/test_shape_start_1_end_2/model.onnx +0 -0
  373. onnx/backend/test/data/node/test_shape_start_1_end_negative_1/model.onnx +0 -0
  374. onnx/backend/test/data/node/test_shape_start_negative_1/model.onnx +0 -0
  375. onnx/backend/test/data/node/test_size/model.onnx +0 -0
  376. onnx/backend/test/data/node/test_size_example/model.onnx +0 -0
  377. onnx/backend/test/data/node/test_squeeze/model.onnx +0 -0
  378. onnx/backend/test/data/node/test_squeeze_negative_axes/model.onnx +0 -0
  379. onnx/backend/test/data/node/test_transpose_all_permutations_0/model.onnx +0 -0
  380. onnx/backend/test/data/node/test_transpose_all_permutations_1/model.onnx +0 -0
  381. onnx/backend/test/data/node/test_transpose_all_permutations_2/model.onnx +0 -0
  382. onnx/backend/test/data/node/test_transpose_all_permutations_3/model.onnx +0 -0
  383. onnx/backend/test/data/node/test_transpose_all_permutations_4/model.onnx +0 -0
  384. onnx/backend/test/data/node/test_transpose_all_permutations_5/model.onnx +0 -0
  385. onnx/backend/test/data/node/test_transpose_default/model.onnx +0 -0
  386. onnx/backend/test/data/node/test_unsqueeze_axis_0/model.onnx +0 -0
  387. onnx/backend/test/data/node/test_unsqueeze_axis_1/model.onnx +0 -0
  388. onnx/backend/test/data/node/test_unsqueeze_axis_2/model.onnx +0 -0
  389. onnx/backend/test/data/node/test_unsqueeze_negative_axes/model.onnx +0 -0
  390. onnx/backend/test/data/node/test_unsqueeze_three_axes/model.onnx +0 -0
  391. onnx/backend/test/data/node/test_unsqueeze_two_axes/model.onnx +0 -0
  392. onnx/backend/test/data/node/test_unsqueeze_unsorted_axes/model.onnx +0 -0
  393. onnx/backend/test/data/node/test_wrap_pad/model.onnx +0 -0
  394. onnx/backend/test/loader/__init__.py +0 -1
  395. onnx/backend/test/runner/__init__.py +43 -15
  396. onnx/checker.cc +104 -99
  397. onnx/checker.h +23 -3
  398. onnx/checker.py +56 -20
  399. onnx/common/assertions.cc +10 -5
  400. onnx/common/common.h +19 -0
  401. onnx/common/file_utils.h +3 -1
  402. onnx/common/interned_strings.h +7 -1
  403. onnx/common/ir.h +30 -7
  404. onnx/common/ir_pb_converter.cc +6 -0
  405. onnx/common/path.h +18 -2
  406. onnx/common/proto_util.h +43 -0
  407. onnx/common/version.h +1 -1
  408. onnx/cpp2py_export.cc +88 -56
  409. onnx/defs/__init__.py +29 -8
  410. onnx/defs/controlflow/defs.cc +16 -16
  411. onnx/defs/controlflow/old.cc +177 -0
  412. onnx/defs/data_propagators.h +2 -0
  413. onnx/defs/data_type_utils.cc +2 -0
  414. onnx/defs/generator/defs.cc +6 -4
  415. onnx/defs/generator/old.cc +115 -0
  416. onnx/defs/math/defs.cc +37 -142
  417. onnx/defs/math/old.cc +96 -12
  418. onnx/defs/math/utils.cc +127 -0
  419. onnx/defs/math/utils.h +8 -0
  420. onnx/defs/nn/defs.cc +72 -59
  421. onnx/defs/nn/old.cc +181 -2
  422. onnx/defs/object_detection/defs.cc +2 -2
  423. onnx/defs/object_detection/old.cc +2 -2
  424. onnx/defs/operator_sets.h +51 -0
  425. onnx/defs/operator_sets_ml.h +14 -0
  426. onnx/defs/parser.cc +112 -54
  427. onnx/defs/parser.h +14 -2
  428. onnx/defs/printer.cc +14 -7
  429. onnx/defs/quantization/defs.cc +111 -44
  430. onnx/defs/quantization/old.cc +130 -1
  431. onnx/defs/schema.cc +62 -18
  432. onnx/defs/schema.h +194 -48
  433. onnx/defs/shape_inference.cc +28 -19
  434. onnx/defs/shape_inference.h +2 -0
  435. onnx/defs/tensor/defs.cc +54 -96
  436. onnx/defs/tensor/old.cc +939 -34
  437. onnx/defs/tensor/utils.cc +6 -3
  438. onnx/defs/tensor/utils.h +5 -1
  439. onnx/defs/tensor_proto_util.cc +2 -0
  440. onnx/defs/tensor_util.cc +2 -0
  441. onnx/defs/traditionalml/defs.cc +273 -117
  442. onnx/defs/traditionalml/old.cc +329 -14
  443. onnx/defs/traditionalml/utils.h +27 -0
  444. onnx/external_data_helper.py +12 -26
  445. onnx/helper.py +242 -169
  446. onnx/hub.py +104 -70
  447. onnx/inliner/inliner.cc +89 -31
  448. onnx/inliner/inliner.h +5 -0
  449. onnx/inliner.py +2 -0
  450. onnx/mapping.py +9 -0
  451. onnx/model_container.py +346 -0
  452. onnx/numpy_helper.py +100 -38
  453. onnx/onnx-ml.proto +50 -13
  454. onnx/onnx.in.proto +50 -13
  455. onnx/onnx.proto +50 -13
  456. onnx/onnx_cpp2py_export/__init__.pyi +5 -0
  457. onnx/onnx_cpp2py_export/checker.pyi +21 -0
  458. onnx/onnx_cpp2py_export/defs.pyi +202 -0
  459. onnx/onnx_cpp2py_export/inliner.pyi +19 -0
  460. onnx/onnx_cpp2py_export/parser.pyi +32 -0
  461. onnx/onnx_cpp2py_export/printer.pyi +3 -0
  462. onnx/onnx_cpp2py_export/shape_inference.pyi +16 -0
  463. onnx/onnx_cpp2py_export/version_converter.pyi +4 -0
  464. onnx/onnx_cpp2py_export.cp311-win_amd64.pyd +0 -0
  465. onnx/onnx_data_pb2.pyi +146 -0
  466. onnx/onnx_ml_pb2.py +52 -52
  467. onnx/onnx_ml_pb2.pyi +663 -0
  468. onnx/onnx_operators_ml_pb2.pyi +67 -0
  469. onnx/reference/__init__.py +2 -0
  470. onnx/reference/custom_element_types.py +2 -0
  471. onnx/reference/op_run.py +166 -121
  472. onnx/reference/ops/_op.py +27 -50
  473. onnx/reference/ops/_op_list.py +36 -24
  474. onnx/reference/ops/aionnx_preview_training/_op_list.py +15 -8
  475. onnx/reference/ops/aionnxml/_common_classifier.py +3 -5
  476. onnx/reference/ops/aionnxml/_op_list.py +16 -8
  477. onnx/reference/ops/aionnxml/op_array_feature_extractor.py +4 -6
  478. onnx/reference/ops/aionnxml/op_linear_classifier.py +1 -2
  479. onnx/reference/ops/aionnxml/op_normalizer.py +3 -3
  480. onnx/reference/ops/aionnxml/op_svm_helper.py +1 -3
  481. onnx/reference/ops/aionnxml/op_svm_regressor.py +1 -3
  482. onnx/reference/ops/aionnxml/op_tree_ensemble.py +257 -0
  483. onnx/reference/ops/aionnxml/op_tree_ensemble_helper.py +2 -6
  484. onnx/reference/ops/aionnxml/op_tree_ensemble_regressor.py +4 -4
  485. onnx/reference/ops/experimental/_op_list.py +15 -8
  486. onnx/reference/ops/op_blackman_window.py +5 -6
  487. onnx/reference/ops/op_cast.py +22 -0
  488. onnx/reference/ops/op_cast_like.py +6 -0
  489. onnx/reference/ops/op_clip.py +5 -8
  490. onnx/reference/ops/op_col2im.py +1 -3
  491. onnx/reference/ops/op_constant.py +7 -1
  492. onnx/reference/ops/op_dequantize_linear.py +43 -40
  493. onnx/reference/ops/op_det.py +1 -1
  494. onnx/reference/ops/op_dynamic_quantize_linear.py +2 -2
  495. onnx/reference/ops/op_grid_sample.py +2 -4
  496. onnx/reference/ops/op_hamming_window.py +3 -6
  497. onnx/reference/ops/op_hann_window.py +3 -6
  498. onnx/reference/ops/op_if.py +4 -3
  499. onnx/reference/ops/op_loop.py +7 -9
  500. onnx/reference/ops/op_matmul.py +1 -2
  501. onnx/reference/ops/op_max_pool.py +5 -0
  502. onnx/reference/ops/op_optional.py +1 -1
  503. onnx/reference/ops/op_pool_common.py +3 -6
  504. onnx/reference/ops/op_qlinear_matmul.py +2 -2
  505. onnx/reference/ops/op_quantize_linear.py +166 -71
  506. onnx/reference/ops/op_resize.py +25 -21
  507. onnx/reference/ops/op_rnn.py +20 -12
  508. onnx/reference/ops/op_scan.py +23 -15
  509. onnx/reference/ops/op_scatter_elements.py +7 -6
  510. onnx/reference/ops/op_stft.py +3 -5
  511. onnx/reference/ops/op_string_normalizer.py +7 -7
  512. onnx/reference/ops/op_tfidf_vectorizer.py +7 -8
  513. onnx/reference/ops/op_topk.py +9 -11
  514. onnx/reference/ops/op_unique.py +1 -1
  515. onnx/reference/reference_evaluator.py +119 -63
  516. onnx/shape_inference/implementation.cc +160 -127
  517. onnx/shape_inference.py +11 -10
  518. onnx/subbyte.py +72 -0
  519. onnx/test/__init__.pyi +6 -0
  520. onnx/test/checker_test.py +21 -1
  521. onnx/test/compose_test.py +26 -74
  522. onnx/test/cpp/inliner_test.cc +76 -1
  523. onnx/test/cpp/ir_test.cc +60 -0
  524. onnx/test/cpp/parser_test.cc +106 -0
  525. onnx/test/function_test.py +1 -3
  526. onnx/test/helper_test.py +64 -4
  527. onnx/test/model_container_refeval_test.py +139 -0
  528. onnx/test/model_container_test.py +136 -0
  529. onnx/test/model_inference_test.py +44 -0
  530. onnx/test/reference_evaluator_ml_test.py +448 -47
  531. onnx/test/reference_evaluator_model_test.py +130 -0
  532. onnx/test/reference_evaluator_test.py +901 -14
  533. onnx/test/schema_test.py +166 -1
  534. onnx/test/shape_inference_test.py +285 -6
  535. onnx/test/symbolic_shape_test.py +3 -8
  536. onnx/test/test_backend_onnxruntime.py +238 -224
  537. onnx/test/test_backend_reference.py +11 -0
  538. onnx/test/test_external_data.py +51 -2
  539. onnx/test/version_converter/automatic_conversion_test_base.py +2 -1
  540. onnx/test/version_converter/automatic_upgrade_test.py +12 -10
  541. onnx/test/version_converter_test.py +166 -0
  542. onnx/tools/replace_constants.py +23 -26
  543. onnx/tools/update_model_dims.py +1 -2
  544. onnx/version.py +2 -2
  545. onnx/version_converter/adapters/group_normalization_20_21.h +128 -0
  546. onnx/version_converter/adapters/q_dq_21_20.h +77 -0
  547. onnx/version_converter/convert.h +67 -2
  548. onnx/version_converter.py +6 -142
  549. {onnx-1.15.0.dist-info → onnx-1.16.1.dist-info}/METADATA +18 -15
  550. {onnx-1.15.0.dist-info → onnx-1.16.1.dist-info}/RECORD +572 -406
  551. {onnx-1.15.0.dist-info → onnx-1.16.1.dist-info}/WHEEL +1 -1
  552. onnx/examples/Protobufs.ipynb +0 -639
  553. onnx/examples/check_model.ipynb +0 -128
  554. onnx/examples/load_model.ipynb +0 -116
  555. onnx/examples/make_model.ipynb +0 -176
  556. onnx/examples/np_array_tensorproto.ipynb +0 -136
  557. onnx/examples/resources/single_relu.onnx +0 -12
  558. onnx/examples/resources/single_relu_new.onnx +0 -12
  559. onnx/examples/resources/tensor.pb +0 -0
  560. onnx/examples/resources/two_transposes.onnx +0 -0
  561. onnx/examples/save_model.ipynb +0 -56
  562. onnx/examples/shape_inference.ipynb +0 -111
  563. onnx/test/reference_evaluator_backend_test.py +0 -876
  564. /onnx/backend/test/data/node/{test_qlinearmatmul_2D → test_qlinearmatmul_2D_int8_float32}/test_data_set_0/input_1.pb +0 -0
  565. /onnx/backend/test/data/node/{test_qlinearmatmul_2D → test_qlinearmatmul_2D_int8_float32}/test_data_set_0/input_4.pb +0 -0
  566. /onnx/backend/test/data/node/{test_qlinearmatmul_2D → test_qlinearmatmul_2D_int8_float32}/test_data_set_0/input_6.pb +0 -0
  567. /onnx/backend/test/data/node/{test_qlinearmatmul_2D → test_qlinearmatmul_2D_uint8_float16}/test_data_set_0/input_0.pb +0 -0
  568. /onnx/backend/test/data/node/{test_qlinearmatmul_2D → test_qlinearmatmul_2D_uint8_float16}/test_data_set_0/input_2.pb +0 -0
  569. /onnx/backend/test/data/node/{test_qlinearmatmul_2D → test_qlinearmatmul_2D_uint8_float16}/test_data_set_0/input_3.pb +0 -0
  570. /onnx/backend/test/data/node/{test_qlinearmatmul_2D → test_qlinearmatmul_2D_uint8_float16}/test_data_set_0/input_5.pb +0 -0
  571. /onnx/backend/test/data/node/{test_qlinearmatmul_2D → test_qlinearmatmul_2D_uint8_float16}/test_data_set_0/input_7.pb +0 -0
  572. /onnx/backend/test/data/node/{test_qlinearmatmul_2D → test_qlinearmatmul_2D_uint8_float16}/test_data_set_0/output_0.pb +0 -0
  573. /onnx/backend/test/data/node/{test_qlinearmatmul_3D → test_qlinearmatmul_2D_uint8_float32}/test_data_set_0/input_1.pb +0 -0
  574. /onnx/backend/test/data/node/{test_qlinearmatmul_3D → test_qlinearmatmul_2D_uint8_float32}/test_data_set_0/input_2.pb +0 -0
  575. /onnx/backend/test/data/node/{test_qlinearmatmul_3D → test_qlinearmatmul_2D_uint8_float32}/test_data_set_0/input_4.pb +0 -0
  576. /onnx/backend/test/data/node/{test_qlinearmatmul_3D → test_qlinearmatmul_2D_uint8_float32}/test_data_set_0/input_5.pb +0 -0
  577. /onnx/backend/test/data/node/{test_qlinearmatmul_3D → test_qlinearmatmul_2D_uint8_float32}/test_data_set_0/input_6.pb +0 -0
  578. /onnx/backend/test/data/node/{test_qlinearmatmul_3D → test_qlinearmatmul_2D_uint8_float32}/test_data_set_0/input_7.pb +0 -0
  579. /onnx/backend/test/data/node/{test_qlinearmatmul_3D → test_qlinearmatmul_3D_uint8_float16}/test_data_set_0/input_0.pb +0 -0
  580. /onnx/backend/test/data/node/{test_qlinearmatmul_3D → test_qlinearmatmul_3D_uint8_float16}/test_data_set_0/input_3.pb +0 -0
  581. /onnx/backend/test/data/node/{test_qlinearmatmul_3D → test_qlinearmatmul_3D_uint8_float16}/test_data_set_0/output_0.pb +0 -0
  582. {onnx-1.15.0.dist-info → onnx-1.16.1.dist-info}/LICENSE +0 -0
  583. {onnx-1.15.0.dist-info → onnx-1.16.1.dist-info}/entry_points.txt +0 -0
  584. {onnx-1.15.0.dist-info → onnx-1.16.1.dist-info}/top_level.txt +0 -0
onnx/defs/tensor/old.cc CHANGED
@@ -12,6 +12,154 @@
12
12
 
13
13
  namespace ONNX_NAMESPACE {
14
14
 
15
+ static const char* Cast_ver19_doc = R"DOC(
16
+ The operator casts the elements of a given input tensor to a data type
17
+ specified by the 'to' argument and returns an output tensor of the same size in
18
+ the converted type. The 'to' argument must be one of the data types specified
19
+ in the 'DataType' enum field in the TensorProto message.
20
+
21
+ Casting from string tensor in plain (e.g., "3.14" and "1000") and scientific numeric representations
22
+ (e.g., "1e-5" and "1E8") to float types is supported. For example, converting string "100.5" to an integer may
23
+ yield result 100. There are some string literals reserved for special floating-point values;
24
+ "+INF" (and "INF"), "-INF", and "NaN" are positive infinity, negative infinity, and not-a-number, respectively.
25
+ Any string which can exactly match "+INF" in a case-insensitive way would be mapped to positive infinite. Similarly,
26
+ this case-insensitive rule is applied to "INF" and "NaN". When casting from numeric tensors
27
+ to string tensors, plain floating-point representation (such as "314.15926") would be used.
28
+ Converting non-numerical-literal string such as "Hello World!" is an undefined behavior. Cases
29
+ of converting string representing floating-point arithmetic value, such as "2.718", to INT is an undefined behavior.
30
+
31
+ Conversion from a numerical type to any numerical type is always allowed.
32
+ User must be aware of precision loss and value change caused by range difference between two types.
33
+ For example, a 64-bit float 3.1415926459 may be round to a 32-bit float 3.141592. Similarly, converting
34
+ an integer 36 to Boolean may produce 1 because we truncate bits which can't be stored in the targeted type.
35
+
36
+ In more detail, the conversion among numerical types should follow these rules
37
+ if the destination type is not a float 8 type.
38
+
39
+ * Casting from floating point to:
40
+ * floating point: +/- infinity if OOR (out of range).
41
+ * fixed point: undefined if OOR.
42
+ * bool: +/- 0.0 to False; all else to True.
43
+ * Casting from fixed point to:
44
+ * floating point: +/- infinity if OOR. (+ infinity in the case of uint)
45
+ * fixed point: when OOR, discard higher bits and reinterpret (with respect to two's complement representation for
46
+ signed types). For example, 200 (int16) -> -56 (int8).
47
+ * bool: zero to False; nonzero to True.
48
+ * Casting from bool to:
49
+ * floating point: `{1.0, 0.0}`.
50
+ * fixed point: `{1, 0}`.
51
+ * bool: no change.
52
+
53
+ Float 8 type were introduced to speed up the training of
54
+ deep models. By default the conversion of a float *x* obeys
55
+ to the following rules. `[x]` means the value rounded to
56
+ the target mantissa width.
57
+
58
+ | x | E4M3FN | E4M3FNUZ | E5M2 | E5M2FNUZ |
59
+ |------|----|----|----|----|
60
+ | 0 | 0 | 0 | 0 | 0 |
61
+ |-0 | -0 | 0 | -0 | 0 |
62
+ | NaN | NaN | NaN | NaN | NaN |
63
+ | +/- Inf | +/- FLT_MAX | NaN | FLT_MAX | NaN |
64
+ | [x] > FLT_MAX | FLT_MAX | FLT_MAX | FLT_MAX | FLT_MAX |
65
+ | [x] < -FLT_MAX | -FLT_MAX | -FLT_MAX | -FLT_MAX | -FLT_MAX |
66
+ | else | RNE | RNE | RNE | RNE |
67
+
68
+ The behavior changes if the parameter 'saturate' is set to False.
69
+ The rules then become:
70
+
71
+ | x | E4M3FN | E4M3FNUZ | E5M2 | E5M2FNUZ |
72
+ |------|----|----|----|----|
73
+ | 0 | 0 | 0 | 0 | 0 |
74
+ |-0 | -0 | 0 | -0 | 0 |
75
+ | NaN | NaN | NaN | NaN | NaN |
76
+ | +/- Inf | NaN | NaN | +/- Inf | NaN |
77
+ | [x] > FLT_MAX | NaN | NaN | Inf | NaN |
78
+ | [x] < -FLT_MAX | NaN | NaN | -Inf | NaN |
79
+ | else | RNE | RNE | RNE | RNE |
80
+ )DOC";
81
+
82
+ ONNX_OPERATOR_SET_SCHEMA(
83
+ Cast,
84
+ 19,
85
+ OpSchema()
86
+ .SetDoc(Cast_ver19_doc)
87
+ .Attr(
88
+ "to",
89
+ "The data type to which the elements of the input tensor are cast. "
90
+ "Strictly must be one of the types from DataType enum in TensorProto",
91
+ AttributeProto::INT)
92
+ .Attr(
93
+ "saturate",
94
+ "The parameter defines how the conversion behaves if an input value is out of "
95
+ "range of the destination type. It only applies for float 8 conversion "
96
+ "(float8e4m3fn, float8e4m3fnuz, float8e5m2, float8e5m2fnuz). It is true by default. "
97
+ "All cases are fully described in two tables inserted in the operator description.",
98
+ AttributeProto::INT,
99
+ static_cast<int64_t>(1))
100
+ .Input(0, "input", "Input tensor to be cast.", "T1", OpSchema::Single, true, 1, OpSchema::Differentiable)
101
+ .Output(
102
+ 0,
103
+ "output",
104
+ "Output tensor with the same shape as input with type "
105
+ "specified by the 'to' argument",
106
+ "T2",
107
+ OpSchema::Single,
108
+ true,
109
+ 1,
110
+ OpSchema::Differentiable)
111
+ .TypeConstraint(
112
+ "T1",
113
+ {"tensor(float16)",
114
+ "tensor(float)",
115
+ "tensor(double)",
116
+ "tensor(int8)",
117
+ "tensor(int16)",
118
+ "tensor(int32)",
119
+ "tensor(int64)",
120
+ "tensor(uint8)",
121
+ "tensor(uint16)",
122
+ "tensor(uint32)",
123
+ "tensor(uint64)",
124
+ "tensor(bool)",
125
+ "tensor(string)",
126
+ "tensor(bfloat16)",
127
+ "tensor(float8e4m3fn)",
128
+ "tensor(float8e4m3fnuz)",
129
+ "tensor(float8e5m2)",
130
+ "tensor(float8e5m2fnuz)"},
131
+ "Constrain input types. Casting from complex is not supported.")
132
+ .TypeConstraint(
133
+ "T2",
134
+ {"tensor(float16)",
135
+ "tensor(float)",
136
+ "tensor(double)",
137
+ "tensor(int8)",
138
+ "tensor(int16)",
139
+ "tensor(int32)",
140
+ "tensor(int64)",
141
+ "tensor(uint8)",
142
+ "tensor(uint16)",
143
+ "tensor(uint32)",
144
+ "tensor(uint64)",
145
+ "tensor(bool)",
146
+ "tensor(string)",
147
+ "tensor(bfloat16)",
148
+ "tensor(float8e4m3fn)",
149
+ "tensor(float8e4m3fnuz)",
150
+ "tensor(float8e5m2)",
151
+ "tensor(float8e5m2fnuz)"},
152
+ "Constrain output types. Casting to complex is not supported.")
153
+ .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
154
+ propagateElemTypeFromAttributeToOutput(ctx, "to", 0);
155
+ if (hasNInputShapes(ctx, 1)) {
156
+ propagateShapeFromInputToOutput(ctx, 0, 0);
157
+ }
158
+ })
159
+ .PartialDataPropagationFunction([](DataPropagationContext& ctx) {
160
+ PropagateShapeDataFromInputToOutput(ctx, 0);
161
+ }));
162
+
15
163
  static const char* Cast_ver13_doc = R"DOC(
16
164
  The operator casts the elements of a given input tensor to a data type
17
165
  specified by the 'to' argument and returns an output tensor of the same size in
@@ -115,6 +263,108 @@ ONNX_OPERATOR_SET_SCHEMA(
115
263
  PropagateShapeDataFromInputToOutput(ctx, 0);
116
264
  }));
117
265
 
266
+ static const char* CastLike_ver19_doc = R"DOC(
267
+ The operator casts the elements of a given input tensor (the first input) to
268
+ the same data type as the elements of the second input tensor.
269
+ See documentation of the Cast operator for further details.
270
+ )DOC";
271
+
272
+ ONNX_OPERATOR_SET_SCHEMA(
273
+ CastLike,
274
+ 19,
275
+ OpSchema()
276
+ .SetDoc(CastLike_ver19_doc)
277
+ .Attr(
278
+ "saturate",
279
+ "The parameter defines how the conversion behaves if an input value is out of "
280
+ "range of the destination type. It only applies for float 8 conversion "
281
+ "(float8e4m3fn, float8e4m3fnuz, float8e5m2, float8e5m2fnuz). It is true by default. "
282
+ "Please refer to operator Cast description for further details.",
283
+ AttributeProto::INT,
284
+ static_cast<int64_t>(1))
285
+ .Input(0, "input", "Input tensor to be cast.", "T1", OpSchema::Single, true, 1, OpSchema::Differentiable)
286
+ .Input(
287
+ 1,
288
+ "target_type",
289
+ "The (first) input tensor will be cast to produce a tensor of the same type as this (second input) tensor.",
290
+ "T2",
291
+ OpSchema::Single,
292
+ true,
293
+ 1,
294
+ OpSchema::NonDifferentiable)
295
+ .Output(
296
+ 0,
297
+ "output",
298
+ "Output tensor produced by casting the first input tensor to have the same type as the second input tensor.",
299
+ "T2",
300
+ OpSchema::Single,
301
+ true,
302
+ 1,
303
+ OpSchema::Differentiable)
304
+ .TypeConstraint(
305
+ "T1",
306
+ {"tensor(float16)",
307
+ "tensor(float)",
308
+ "tensor(double)",
309
+ "tensor(int8)",
310
+ "tensor(int16)",
311
+ "tensor(int32)",
312
+ "tensor(int64)",
313
+ "tensor(uint8)",
314
+ "tensor(uint16)",
315
+ "tensor(uint32)",
316
+ "tensor(uint64)",
317
+ "tensor(bool)",
318
+ "tensor(string)",
319
+ "tensor(bfloat16)",
320
+ "tensor(float8e4m3fn)",
321
+ "tensor(float8e4m3fnuz)",
322
+ "tensor(float8e5m2)",
323
+ "tensor(float8e5m2fnuz)"},
324
+ "Constrain input types. Casting from complex is not supported.")
325
+ .TypeConstraint(
326
+ "T2",
327
+ {"tensor(float16)",
328
+ "tensor(float)",
329
+ "tensor(double)",
330
+ "tensor(int8)",
331
+ "tensor(int16)",
332
+ "tensor(int32)",
333
+ "tensor(int64)",
334
+ "tensor(uint8)",
335
+ "tensor(uint16)",
336
+ "tensor(uint32)",
337
+ "tensor(uint64)",
338
+ "tensor(bool)",
339
+ "tensor(string)",
340
+ "tensor(bfloat16)",
341
+ "tensor(float8e4m3fn)",
342
+ "tensor(float8e4m3fnuz)",
343
+ "tensor(float8e5m2)",
344
+ "tensor(float8e5m2fnuz)"},
345
+ "Constrain output types. Casting to complex is not supported.")
346
+ .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
347
+ propagateElemTypeFromInputToOutput(ctx, 1, 0);
348
+ if (hasNInputShapes(ctx, 1)) {
349
+ propagateShapeFromInputToOutput(ctx, 0, 0);
350
+ }
351
+ })
352
+ .SetContextDependentFunctionBodyBuilder(
353
+ [](const FunctionBodyBuildContext& ctx, const OpSchema& schema, FunctionProto& functionProto) -> bool {
354
+ auto target_type = ctx.getInputType(1);
355
+ if ((target_type == nullptr) || (!target_type->has_tensor_type())) {
356
+ // we cannot create a correct function body without knowing the target element type
357
+ return false;
358
+ }
359
+ auto target_elt_type = target_type->tensor_type().elem_type();
360
+ FunctionBuilder builder(functionProto);
361
+ builder.Add(
362
+ MakeString("output = Cast <to= ", (int64_t)(target_elt_type), ", saturate: int = @saturate> (input)")
363
+ .c_str());
364
+ schema.BuildFunction(functionProto);
365
+ return true;
366
+ }));
367
+
118
368
  static const char* CastLike_ver15_doc = R"DOC(
119
369
  The operator casts the elements of a given input tensor (the first input) to
120
370
  the same data type as the elements of the second input tensor.
@@ -364,6 +614,170 @@ ONNX_OPERATOR_SET_SCHEMA(
364
614
  .SetDoc(GridSample_ver16_doc)
365
615
  .TypeAndShapeInferenceFunction([](InferenceContext& ctx) { gridSampleShapeInference(ctx); }));
366
616
 
617
+ static const char* Reshape_ver19_doc = R"DOC(
618
+ Reshape the input tensor similar to numpy.reshape.
619
+ First input is the data tensor, second input is a shape tensor which specifies the output shape. It outputs the reshaped tensor.
620
+ At most one dimension of the new shape can be -1. In this case, the value is
621
+ inferred from the size of the tensor and the remaining dimensions. A dimension
622
+ could also be 0, in which case the actual dimension value is unchanged (i.e. taken
623
+ from the input tensor). If 'allowzero' is set, and the new shape includes 0, the
624
+ dimension will be set explicitly to zero (i.e. not taken from input tensor).
625
+ Shape (second input) could be an empty shape, which means converting to a scalar.
626
+ The input tensor's shape and the output tensor's shape are required to have the same number of elements.
627
+
628
+ If the attribute 'allowzero' is set, it is invalid for the specified shape to
629
+ contain both a zero value and -1, as the value of the dimension corresponding
630
+ to -1 cannot be determined uniquely.
631
+ )DOC";
632
+
633
+ ONNX_OPERATOR_SET_SCHEMA(
634
+ Reshape,
635
+ 19,
636
+ OpSchema()
637
+ .SetDoc(Reshape_ver19_doc)
638
+ .Attr(
639
+ "allowzero",
640
+ "(Optional) By default, when any value in the 'shape' input is equal to zero "
641
+ "the corresponding dimension value is copied from the input tensor dynamically. "
642
+ "allowzero=1 indicates that if any value in the 'shape' input is set to zero, "
643
+ "the zero value is honored, similar to NumPy.",
644
+ AttributeProto::INT,
645
+ static_cast<int64_t>(0))
646
+ .Input(0, "data", "An input tensor.", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
647
+ .Input(
648
+ 1,
649
+ "shape",
650
+ "Specified shape for output.",
651
+ "tensor(int64)",
652
+ OpSchema::Single,
653
+ true,
654
+ 1,
655
+ OpSchema::NonDifferentiable)
656
+ .Output(0, "reshaped", "Reshaped data.", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
657
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir9(), "Constrain input and output types to all tensor types.")
658
+ .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
659
+ // Type inference
660
+ propagateElemTypeFromInputToOutput(ctx, 0, 0);
661
+ bool found;
662
+ TensorShapeProto targetShapeProto = getShapeInput(ctx, 1, found);
663
+ if (!found) {
664
+ return;
665
+ }
666
+
667
+ int allowzero = static_cast<int>(getAttribute(ctx, "allowzero", 0));
668
+
669
+ // Iterate through targetShape, adding dimensions in the outputShape
670
+ // TensorProto. If the targetShape dimension is -1, we do not set the
671
+ // dimension value in this iteration, but we record the Dimension. If
672
+ // targetShape dimension is 0, we attempt to propagate the dimension
673
+ // value/param. If the value cannot be inferred, we set the flag in
674
+ // the unresolveZeros vector. If targetShape dimension is positive, we
675
+ // set the dimension value in the outputShape. We track the product of
676
+ // the dimensions we are setting outputShape in the outputProduct
677
+ // variable. The outputProduct will potentially be used for inferring
678
+ // a dimension marked -1.
679
+ auto* outputShape = ctx.getOutputType(0)->mutable_tensor_type()->mutable_shape();
680
+ TensorShapeProto::Dimension* negativeOneDim = nullptr;
681
+ const auto& dataInputTensorType = ctx.getInputType(0)->tensor_type();
682
+ std::vector<bool> unresolvedZeros(targetShapeProto.dim_size(), false);
683
+ int64_t outputProduct = 1;
684
+ bool outputProductValid = true;
685
+ for (int i = 0; i < static_cast<int>(targetShapeProto.dim_size()); ++i) {
686
+ // Add a new dimension to outputShape
687
+ auto* new_dim = outputShape->add_dim();
688
+ if (targetShapeProto.dim(i).has_dim_param()) {
689
+ // There is a tricky edge case here. It is possible that the value of
690
+ // symbolic dim can be -1 or 0 at runtime. In that case simply propgating this
691
+ // symbol can be erroneous. This should be a very rare scenario and in such a
692
+ // case an option is to turn off data propagation during shape inference.
693
+ new_dim->set_dim_param(targetShapeProto.dim(i).dim_param());
694
+ outputProductValid = false;
695
+ } else {
696
+ if (!targetShapeProto.dim(i).has_dim_value()) {
697
+ outputProductValid = false;
698
+ // treat this dim as unknown dim
699
+ continue;
700
+ }
701
+
702
+ const auto dim_value = targetShapeProto.dim(i).dim_value();
703
+
704
+ if (dim_value == -1) {
705
+ // Check if multiple -1's. If not, set negativeOneDim, marking
706
+ // this dimension to potentially be filled in later.
707
+ if (negativeOneDim) {
708
+ fail_shape_inference("Target shape may not have multiple -1 dimensions.");
709
+ }
710
+ negativeOneDim = new_dim;
711
+ } else if (dim_value == 0) {
712
+ // Check if data input has a shape and if the index i is within
713
+ // its bounds. If these conditions are satisfied, any dimension
714
+ // value/param should be propagated. If dimension value cannot be
715
+ // inferred, set the corresponding unresolvedZeros flag to true.
716
+ // If allowzero is set however, do not propagate values, since output
717
+ // dimension is explicitly zero.
718
+ if (allowzero == 0) {
719
+ unresolvedZeros[i] = true;
720
+ if (dataInputTensorType.has_shape()) {
721
+ if (i >= dataInputTensorType.shape().dim_size()) {
722
+ fail_shape_inference("Invalid position of 0.");
723
+ }
724
+ if (dataInputTensorType.shape().dim(i).has_dim_value()) {
725
+ const auto& input_dim_value = dataInputTensorType.shape().dim(i).dim_value();
726
+ new_dim->set_dim_value(input_dim_value);
727
+ outputProduct *= input_dim_value;
728
+ unresolvedZeros[i] = false;
729
+ } else if (dataInputTensorType.shape().dim(i).has_dim_param()) {
730
+ new_dim->set_dim_param(dataInputTensorType.shape().dim(i).dim_param());
731
+ }
732
+ }
733
+ } else {
734
+ new_dim->set_dim_value(dim_value);
735
+ outputProduct *= dim_value;
736
+ }
737
+ } else if (dim_value > 0) {
738
+ // Set the dimension value to dim_value
739
+ new_dim->set_dim_value(dim_value);
740
+ outputProduct *= dim_value;
741
+ } else {
742
+ // Check if value is less than -1; fail if so
743
+ fail_shape_inference("Invalid dimension value: ", dim_value);
744
+ }
745
+ }
746
+ }
747
+ // If negativeOneDim has been set, we attempt to infer its value. This
748
+ // can be done if all dimension values for the data input tensor shape
749
+ // are known other than the ones corresponding to unresolvedZeros
750
+ // flags.
751
+ if (negativeOneDim && outputProductValid) {
752
+ // First, attempt to compute product of data input shape dimensions
753
+ // that are not marked by unresolvedZeros. If not possible, set the
754
+ // inputProductValid flag to false.
755
+ if (!outputProduct) {
756
+ fail_shape_inference("Invalid Target shape product of 0. Product cannot be 0 in combination with -1");
757
+ }
758
+ int64_t inputProduct = 1;
759
+ bool inputProductValid = true;
760
+ if (!dataInputTensorType.has_shape()) {
761
+ inputProductValid = false;
762
+ } else {
763
+ for (int i = 0; i < dataInputTensorType.shape().dim_size(); ++i) {
764
+ if (dataInputTensorType.shape().dim(i).has_dim_value()) {
765
+ inputProduct *= dataInputTensorType.shape().dim(i).dim_value();
766
+ } else if (i >= static_cast<int>(unresolvedZeros.size()) || !unresolvedZeros[i]) {
767
+ inputProductValid = false;
768
+ break;
769
+ }
770
+ }
771
+ }
772
+ if (inputProductValid) {
773
+ if (inputProduct % outputProduct != 0) {
774
+ fail_shape_inference("Dimension could not be inferred: incompatible shapes");
775
+ }
776
+ negativeOneDim->set_dim_value(inputProduct / outputProduct);
777
+ }
778
+ }
779
+ }));
780
+
367
781
  static const char* Reshape_ver13_doc = R"DOC(
368
782
  Reshape the input tensor similar to numpy.reshape.
369
783
  First input is the data tensor, second input is a shape tensor which specifies the output shape. It outputs the reshaped tensor.
@@ -1190,6 +1604,76 @@ ONNX_OPERATOR_SET_SCHEMA(
1190
1604
  }
1191
1605
  }));
1192
1606
 
1607
+ static const char* Transpose_ver13_doc = R"DOC(
1608
+ Transpose the input tensor similar to numpy.transpose. For example, when
1609
+ perm=(1, 0, 2), given an input tensor of shape (1, 2, 3), the output shape
1610
+ will be (2, 1, 3).
1611
+ )DOC";
1612
+
1613
+ ONNX_OPERATOR_SET_SCHEMA(
1614
+ Transpose,
1615
+ 13,
1616
+ OpSchema()
1617
+ .SetDoc(Transpose_ver13_doc)
1618
+ .Attr(
1619
+ "perm",
1620
+ "A list of integers. By default, reverse the dimensions, "
1621
+ "otherwise permute the axes according to the values given.",
1622
+ AttributeProto::INTS,
1623
+ OPTIONAL_VALUE)
1624
+ .Input(0, "data", "An input tensor.", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
1625
+ .Output(0, "transposed", "Transposed output.", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
1626
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to all tensor types.")
1627
+ .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
1628
+ propagateElemTypeFromInputToOutput(ctx, 0, 0);
1629
+ if (!hasNInputShapes(ctx, 1)) {
1630
+ return;
1631
+ }
1632
+ auto input_type = ctx.getInputType(0);
1633
+ const TensorShapeProto& shape = input_type->tensor_type().shape();
1634
+ std::vector<int64_t> perm;
1635
+ bool has_perm_attr = getRepeatedAttribute(ctx, "perm", perm);
1636
+ if (!has_perm_attr) {
1637
+ perm.reserve(shape.dim_size());
1638
+ for (int i = shape.dim_size() - 1; i >= 0; --i)
1639
+ perm.push_back(i);
1640
+ } else if (!perm.empty()) {
1641
+ // check if every index is valid
1642
+ std::vector<bool> seen(shape.dim_size(), false);
1643
+ for (int64_t fromDimIndex : perm) {
1644
+ if (!(0 <= fromDimIndex && fromDimIndex < shape.dim_size())) {
1645
+ std::ostringstream oss;
1646
+ oss << "Invalid attribute perm {" << perm[0];
1647
+ for (size_t i = 1; i != perm.size(); ++i) {
1648
+ oss << ", " << perm[i];
1649
+ }
1650
+ oss << "}, input shape = {";
1651
+ if (shape.dim_size() > 0) {
1652
+ oss << shape.dim(0).dim_value();
1653
+ for (int i = 1; i != shape.dim_size(); ++i) {
1654
+ oss << ", " << shape.dim(i).dim_value();
1655
+ }
1656
+ oss << "}";
1657
+ }
1658
+ fail_type_inference(oss.str());
1659
+ } else {
1660
+ // check if any perm is repeated
1661
+ if (seen[fromDimIndex]) {
1662
+ fail_type_inference("Attribute perm for Transpose has repeated value: ", fromDimIndex);
1663
+ }
1664
+ seen[fromDimIndex] = true;
1665
+ }
1666
+ }
1667
+ }
1668
+
1669
+ getOutputShape(ctx, 0);
1670
+
1671
+ propagateElemTypeFromInputToOutput(ctx, 0, 0);
1672
+ for (size_t i = 0; i < perm.size(); ++i) {
1673
+ appendSingleDimCopiedFromInputTypeToOutputType(ctx, 0, 0, static_cast<size_t>(perm[i]));
1674
+ }
1675
+ }));
1676
+
1193
1677
  static const char* Transpose_ver1_doc = R"DOC(
1194
1678
  Transpose the input tensor similar to numpy.transpose. For example, when
1195
1679
  perm=(1, 0, 2), given an input tensor of shape (1, 2, 3), the output shape
@@ -2043,6 +2527,106 @@ ONNX_OPERATOR_SET_SCHEMA(
2043
2527
  }
2044
2528
  }));
2045
2529
 
2530
+ static const char* Squeeze_ver13_doc = R"DOC(
2531
+ Remove single-dimensional entries from the shape of a tensor.
2532
+ Takes an input `axes` with a list of axes to squeeze.
2533
+ If `axes` is not provided, all the single dimensions will be removed from
2534
+ the shape. If an axis is selected with shape entry not equal to one, an error is raised.
2535
+ )DOC";
2536
+
2537
+ ONNX_OPERATOR_SET_SCHEMA(
2538
+ Squeeze,
2539
+ 13,
2540
+ OpSchema()
2541
+ .SetDoc(Squeeze_ver13_doc)
2542
+ .Input(
2543
+ 0,
2544
+ "data",
2545
+ "Tensors with at least max(dims) dimensions.",
2546
+ "T",
2547
+ OpSchema::Single,
2548
+ true,
2549
+ 1,
2550
+ OpSchema::Differentiable)
2551
+ .Input(
2552
+ 1,
2553
+ "axes",
2554
+ "List of integers indicating the dimensions to squeeze. Negative value means counting dimensions "
2555
+ "from the back. Accepted range is [-r, r-1] where r = rank(data).",
2556
+ "tensor(int64)",
2557
+ OpSchema::Optional,
2558
+ true,
2559
+ 1,
2560
+ OpSchema::NonDifferentiable)
2561
+ .Output(
2562
+ 0,
2563
+ "squeezed",
2564
+ "Reshaped tensor with same data as input.",
2565
+ "T",
2566
+ OpSchema::Single,
2567
+ true,
2568
+ 1,
2569
+ OpSchema::Differentiable)
2570
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to all tensor types.")
2571
+ .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
2572
+ propagateElemTypeFromInputToOutput(ctx, 0, 0);
2573
+ if (!hasNInputShapes(ctx, 1)) {
2574
+ return;
2575
+ }
2576
+
2577
+ std::vector<int64_t> axes;
2578
+ size_t num_inputs = ctx.getNumInputs();
2579
+ bool axes_not_specified = false;
2580
+
2581
+ if ((num_inputs == 2) && ctx.getInputType(1)) { //'axes' is input
2582
+ auto axes_proto = ctx.getInputData(1);
2583
+ if (axes_proto == nullptr) {
2584
+ // skip if axes is not an initializer
2585
+ return;
2586
+ }
2587
+ axes = ParseData<int64_t>(axes_proto);
2588
+ } else {
2589
+ // axes not specified
2590
+ axes_not_specified = true;
2591
+ }
2592
+
2593
+ const auto& input_shape = ctx.getInputType(0)->tensor_type().shape();
2594
+ const auto input_ndim = input_shape.dim_size();
2595
+ checkAxesRange(axes, input_ndim);
2596
+ adjustNegativeAxes(axes, input_ndim);
2597
+
2598
+ for (int i = 0; i < input_ndim; ++i) {
2599
+ if (!input_shape.dim(i).has_dim_value() && axes_not_specified) {
2600
+ // if dim has a symbolic value and the axes spec want to act on all dims,
2601
+ // return early because we can't infer the shape
2602
+ return;
2603
+ }
2604
+ }
2605
+
2606
+ ctx.getOutputType(0)->mutable_tensor_type()->mutable_shape();
2607
+
2608
+ for (int i = 0; i < input_ndim; ++i) {
2609
+ if (axes_not_specified && input_shape.dim(i).dim_value() == 1) {
2610
+ // if axes not specified, do not keep shape if the dimension is equal to one
2611
+ continue;
2612
+ } else if (!axes_not_specified && std::find(axes.begin(), axes.end(), i) != axes.end()) {
2613
+ // if axes wants to explicitly act on this dim, fail explicitly only if the
2614
+ // dim is numerical and != 1. If the dim is 1 or symbolic, remove it. If
2615
+ // the dim is symbolic, runtime engines should check that the dimension is
2616
+ // actually 1 when the op is evaluated
2617
+ if (input_shape.dim(i).has_dim_value() && input_shape.dim(i).dim_value() != 1) {
2618
+ fail_shape_inference(
2619
+ "Dimension of input ", i, " must be 1 instead of ", input_shape.dim(i).dim_value());
2620
+ }
2621
+ } else {
2622
+ *ctx.getOutputType(0)->mutable_tensor_type()->mutable_shape()->add_dim() = input_shape.dim(i);
2623
+ }
2624
+ }
2625
+ })
2626
+ .PartialDataPropagationFunction([](DataPropagationContext& ctx) {
2627
+ PropagateShapeDataFromInputToOutput(ctx, 0);
2628
+ }));
2629
+
2046
2630
  static const char* Squeeze_ver11_doc = R"DOC(
2047
2631
  Remove single-dimensional entries from the shape of a tensor.
2048
2632
  Takes a parameter `axes` with a list of axes to squeeze.
@@ -2069,33 +2653,121 @@ ONNX_OPERATOR_SET_SCHEMA(
2069
2653
  if (!hasNInputShapes(ctx, 1)) {
2070
2654
  return;
2071
2655
  }
2072
-
2656
+
2657
+ if (!ctx.getInputType(0)->tensor_type().has_shape()) {
2658
+ return;
2659
+ }
2660
+
2661
+ ctx.getOutputType(0)->mutable_tensor_type()->mutable_shape();
2662
+ const auto& input_shape = ctx.getInputType(0)->tensor_type().shape();
2663
+ const auto input_ndim = input_shape.dim_size();
2664
+ std::vector<int64_t> axes;
2665
+ if (!getRepeatedAttribute(ctx, "axes", axes)) {
2666
+ for (int i = 0; i < input_ndim; ++i) {
2667
+ if (!input_shape.dim(i).has_dim_value()) {
2668
+ return;
2669
+ }
2670
+ if (input_shape.dim(i).dim_value() == 1) {
2671
+ axes.push_back(i);
2672
+ }
2673
+ }
2674
+ }
2675
+
2676
+ std::transform(axes.begin(), axes.end(), axes.begin(), [&](int64_t axis) -> int64_t {
2677
+ return axis < 0 ? axis + input_ndim : axis;
2678
+ });
2679
+
2680
+ for (int i = 0; i < input_ndim; ++i) {
2681
+ if (std::find(axes.begin(), axes.end(), i) != axes.end()) {
2682
+ if (input_shape.dim(i).has_dim_value() && input_shape.dim(i).dim_value() != 1) {
2683
+ fail_shape_inference(
2684
+ "Dimension of input ", i, " must be 1 instead of ", input_shape.dim(i).dim_value());
2685
+ }
2686
+ } else {
2687
+ *ctx.getOutputType(0)->mutable_tensor_type()->mutable_shape()->add_dim() = input_shape.dim(i);
2688
+ }
2689
+ }
2690
+ }));
2691
+
2692
+ static const char* Unsqueeze_ver13_doc = R"DOC(
2693
+ Insert single-dimensional entries to the shape of an input tensor (`data`).
2694
+ Takes one required input `axes` - which contains a list of dimension indices and this operator will insert a dimension of value `1` into the corresponding index of the output tensor (`expanded`).
2695
+
2696
+ For example, given an input tensor (`data`) of shape [3, 4, 5], then
2697
+ Unsqueeze(data, axes=[0, 4]) outputs a tensor (`expanded`) containing same data as `data` but with shape [1, 3, 4, 5, 1].
2698
+
2699
+ The input `axes` should not contain any duplicate entries. It is an error if it contains duplicates.
2700
+ The rank of the output tensor (`output_rank`) is the rank of the input tensor (`data`) plus the number of values in `axes`.
2701
+ Each value in `axes` should be within the (inclusive) range [-output_rank , output_rank - 1].
2702
+ The order of values in `axes` does not matter and can come in any order.
2703
+ )DOC";
2704
+
2705
+ ONNX_OPERATOR_SET_SCHEMA(
2706
+ Unsqueeze,
2707
+ 13,
2708
+ OpSchema()
2709
+ .SetDoc(Unsqueeze_ver13_doc)
2710
+ .Input(0, "data", "Original tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable)
2711
+ .Input(
2712
+ 1,
2713
+ "axes",
2714
+ "List of integers indicating the dimensions to be inserted. Negative value means counting dimensions "
2715
+ "from the back. Accepted range is [-r, r-1] where r = rank(expanded).",
2716
+ "tensor(int64)",
2717
+ OpSchema::Single,
2718
+ true,
2719
+ 1,
2720
+ OpSchema::NonDifferentiable)
2721
+ .Output(
2722
+ 0,
2723
+ "expanded",
2724
+ "Reshaped tensor with same data as input.",
2725
+ "T",
2726
+ OpSchema::Single,
2727
+ true,
2728
+ 1,
2729
+ OpSchema::Differentiable)
2730
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to all tensor types.")
2731
+ .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
2732
+ propagateElemTypeFromInputToOutput(ctx, 0, 0);
2733
+ if (!hasNInputShapes(ctx, 1)) {
2734
+ return;
2735
+ }
2073
2736
  std::vector<int64_t> axes;
2074
- if (!getRepeatedAttribute(ctx, "axes", axes)) {
2737
+ auto axes_proto = ctx.getInputData(1);
2738
+ if (axes_proto == nullptr) {
2739
+ // skip if axes is not an initializer
2075
2740
  return;
2076
2741
  }
2077
-
2078
- if (!ctx.getInputType(0)->tensor_type().has_shape()) {
2079
- return;
2080
- }
2081
-
2742
+ axes = ParseData<int64_t>(axes_proto);
2082
2743
  ctx.getOutputType(0)->mutable_tensor_type()->mutable_shape();
2083
2744
  const auto& input_shape = ctx.getInputType(0)->tensor_type().shape();
2084
2745
  const auto input_ndim = input_shape.dim_size();
2085
- std::transform(axes.begin(), axes.end(), axes.begin(), [&](int64_t axis) -> int64_t {
2086
- return axis < 0 ? axis + input_ndim : axis;
2087
- });
2746
+ const auto output_ndim = input_ndim + static_cast<int>(axes.size());
2747
+ checkAxesRange(axes, output_ndim);
2748
+ adjustNegativeAxes(axes, output_ndim);
2749
+ checkDuplicateAxes(axes, output_ndim);
2750
+ // sort after correcting negative axes values (if any)
2751
+ std::sort(axes.begin(), axes.end());
2088
2752
 
2753
+ int j = 0;
2089
2754
  for (int i = 0; i < input_ndim; ++i) {
2090
- if (std::find(axes.begin(), axes.end(), i) != axes.end()) {
2091
- if (input_shape.dim(i).has_dim_value() && input_shape.dim(i).dim_value() != 1) {
2092
- fail_shape_inference(
2093
- "Dimension of input ", i, " must be 1 instead of ", input_shape.dim(i).dim_value());
2094
- }
2095
- } else {
2096
- *ctx.getOutputType(0)->mutable_tensor_type()->mutable_shape()->add_dim() = input_shape.dim(i);
2755
+ while (static_cast<size_t>(j) < axes.size() &&
2756
+ axes[j] == ctx.getOutputType(0)->tensor_type().shape().dim_size()) {
2757
+ ctx.getOutputType(0)->mutable_tensor_type()->mutable_shape()->add_dim()->set_dim_value(1);
2758
+ ++j;
2097
2759
  }
2760
+ *ctx.getOutputType(0)->mutable_tensor_type()->mutable_shape()->add_dim() =
2761
+ ctx.getInputType(0)->tensor_type().shape().dim(i);
2098
2762
  }
2763
+ while (static_cast<size_t>(j) < axes.size() &&
2764
+ axes[j] == ctx.getOutputType(0)->tensor_type().shape().dim_size()) {
2765
+ ctx.getOutputType(0)->mutable_tensor_type()->mutable_shape()->add_dim()->set_dim_value(1);
2766
+ ++j;
2767
+ }
2768
+ })
2769
+ .PartialDataPropagationFunction([](DataPropagationContext& ctx) {
2770
+ PropagateShapeDataFromInputToOutput(ctx, 0);
2099
2771
  }));
2100
2772
 
2101
2773
  static const char* Unsqueeze_ver11_doc = R"DOC(
@@ -2648,7 +3320,7 @@ Each dimension value of the output tensor is:
2648
3320
  output_dimension = floor(input_dimension * (roi_end - roi_start) * scale) if input \"sizes\" is not specified.
2649
3321
  )DOC";
2650
3322
 
2651
- static const char* Resize_attr_coordinate_transformation_mode_doc = R"DOC(
3323
+ static const char* Resize_attr_coordinate_transformation_mode_ver11_doc = R"DOC(
2652
3324
  This attribute describes how to transform the coordinate in the resized tensor to the coordinate in the original tensor. <br/>
2653
3325
 
2654
3326
  The coordinate of each dimension is transformed individually. Let's describe a case using axis x as an example.
@@ -2698,7 +3370,7 @@ ONNX_OPERATOR_SET_SCHEMA(
2698
3370
  static_cast<int64_t>(0))
2699
3371
  .Attr(
2700
3372
  "coordinate_transformation_mode",
2701
- Resize_attr_coordinate_transformation_mode_doc,
3373
+ Resize_attr_coordinate_transformation_mode_ver11_doc,
2702
3374
  AttributeProto::STRING,
2703
3375
  std::string("half_pixel"))
2704
3376
  .Attr(
@@ -2740,6 +3412,26 @@ ONNX_OPERATOR_SET_SCHEMA(
2740
3412
  .SetDoc(Resize_ver11_doc)
2741
3413
  .TypeAndShapeInferenceFunction([](InferenceContext& ctx) { resizeShapeInference_opset11_to_12(ctx); }));
2742
3414
 
3415
+ ONNX_OPERATOR_SET_SCHEMA(
3416
+ Identity,
3417
+ 19,
3418
+ OpSchema()
3419
+ .SetDoc("Identity operator")
3420
+ .Input(0, "input", "Input tensor", "V", OpSchema::Single, true, 1, OpSchema::Differentiable)
3421
+ .Output(0, "output", "Tensor to copy input into.", "V", OpSchema::Single, true, 1, OpSchema::Differentiable)
3422
+ .TypeConstraint(
3423
+ "V",
3424
+ []() {
3425
+ auto t = OpSchema::all_tensor_types_ir9();
3426
+ auto s = OpSchema::all_tensor_sequence_types();
3427
+ auto o = OpSchema::all_optional_types();
3428
+ t.insert(t.end(), s.begin(), s.end());
3429
+ t.insert(t.end(), o.begin(), o.end());
3430
+ return t;
3431
+ }(),
3432
+ "Constrain input and output types to all tensor, sequence, and optional types.")
3433
+ .TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));
3434
+
2743
3435
  ONNX_OPERATOR_SET_SCHEMA(
2744
3436
  Identity,
2745
3437
  13,
@@ -3015,6 +3707,115 @@ ONNX_OPERATOR_SET_SCHEMA(
3015
3707
  }
3016
3708
  }));
3017
3709
 
3710
+ static const char* Pad_ver19_doc = R"DOC(
3711
+ Given a tensor containing the data to be padded (`data`), a tensor containing the number of start and end pad values for axis (`pads`), (optionally) a `mode`, and (optionally) `constant_value`,
3712
+ a padded tensor (`output`) is generated.
3713
+
3714
+ The three supported `modes` are (similar to corresponding modes supported by `numpy.pad`):
3715
+
3716
+ 1) `constant`(default) - pads with a given constant value as specified by `constant_value` (which defaults to 0, empty string, or False)
3717
+
3718
+ 2) `reflect` - pads with the reflection of the vector mirrored on the first and last values of the vector along each axis
3719
+
3720
+ 3) `edge` - pads with the edge values of array
3721
+
3722
+ 4) `wrap` - wrap-around padding as if the data tensor forms a torus
3723
+
3724
+
3725
+ Example 1 (`constant` mode):
3726
+
3727
+ Insert 0 pads to the beginning of the second dimension.
3728
+
3729
+ ```
3730
+ data = [
3731
+ [1.0, 1.2],
3732
+ [2.3, 3.4],
3733
+ [4.5, 5.7],
3734
+ ]
3735
+
3736
+ pads = [0, 2, 0, 0]
3737
+
3738
+ mode = 'constant'
3739
+
3740
+ constant_value = 0.0
3741
+
3742
+ output = [
3743
+ [0.0, 0.0, 1.0, 1.2],
3744
+ [0.0, 0.0, 2.3, 3.4],
3745
+ [0.0, 0.0, 4.5, 5.7],
3746
+ ]
3747
+ ```
3748
+
3749
+ Example 2 (`reflect` mode):
3750
+
3751
+ ```
3752
+ data = [
3753
+ [1.0, 1.2],
3754
+ [2.3, 3.4],
3755
+ [4.5, 5.7],
3756
+ ]
3757
+
3758
+ pads = [0, 2, 0, 0]
3759
+
3760
+ mode = 'reflect'
3761
+
3762
+ output = [
3763
+ [1.0, 1.2, 1.0, 1.2],
3764
+ [2.3, 3.4, 2.3, 3.4],
3765
+ [4.5, 5.7, 4.5, 5.7],
3766
+ ]
3767
+ ```
3768
+
3769
+ Example 3 (`edge` mode):
3770
+
3771
+ ```
3772
+ data = [
3773
+ [1.0, 1.2],
3774
+ [2.3, 3.4],
3775
+ [4.5, 5.7],
3776
+ ]
3777
+
3778
+ pads = [0, 2, 0, 0]
3779
+
3780
+ mode = 'edge'
3781
+
3782
+ output = [
3783
+ [1.0, 1.0, 1.0, 1.2],
3784
+ [2.3, 2.3, 2.3, 3.4],
3785
+ [4.5, 4.5, 4.5, 5.7],
3786
+ ]
3787
+ ```
3788
+
3789
+ Example 4 (`wrap` mode):
3790
+
3791
+ ```
3792
+ data = [
3793
+ [1.0, 1.2],
3794
+ [2.3, 3.4],
3795
+ [4.5, 5.7],
3796
+ ]
3797
+
3798
+ pads = [2, 1, 1, 1]
3799
+
3800
+ mode = 'wrap'
3801
+
3802
+ output = [
3803
+ [3.4, 2.3, 3.4, 2.3],
3804
+ [5.7, 4.5, 5.7, 4.5],
3805
+ [1.2, 1.0, 1.2, 1.0],
3806
+ [3.4, 2.3, 3.4, 2.3],
3807
+ [5.7, 4.5, 5.7, 4.5],
3808
+ [1.2, 1.0, 1.2, 1.0],
3809
+ ]
3810
+ ```
3811
+ )DOC";
3812
+
3813
+ ONNX_OPERATOR_SET_SCHEMA(
3814
+ Pad,
3815
+ 19,
3816
+ OpSchema().FillUsing(
3817
+ PadDocGenerator(Pad_ver19_doc, "Supported modes: `constant`(default), `reflect`, `edge`, `wrap`")));
3818
+
3018
3819
  static const char* Pad_ver11_doc = R"DOC(
3019
3820
  Given a tensor containing the data to be padded (`data`), a tensor containing the number of start and end pad values for axis (`pads`), (optionally) a `mode`, and (optionally) `constant_value`,
3020
3821
  a padded tensor (`output`) is generated.
@@ -3716,9 +4517,7 @@ ONNX_OPERATOR_SET_SCHEMA(
3716
4517
  }
3717
4518
 
3718
4519
  auto is_negative = [](int64_t index) { return index < 0; };
3719
- if (std::any_of(starts.begin(), starts.end(), is_negative) ||
3720
- std::any_of(ends.begin(), ends.end(), is_negative) ||
3721
- std::any_of(axes.begin(), axes.end(), is_negative)) {
4520
+ if (std::any_of(axes.begin(), axes.end(), is_negative)) {
3722
4521
  // Negative axes were not explicitly discussed in the spec before opset-10.
3723
4522
  // Hence, they are officially not part of the spec, but some models/runtimes may use them.
3724
4523
  // So we perform simple rank inference in this case.
@@ -3735,13 +4534,20 @@ ONNX_OPERATOR_SET_SCHEMA(
3735
4534
  if (j < axes.size() && static_cast<size_t>(axes[j]) == i) {
3736
4535
  // There's a lot of potential behaviors. For now just
3737
4536
  // handle some simple cases.
3738
- if (ctx.getInputType(0)->tensor_type().shape().dim((int)i).has_dim_value() && starts[j] >= 0 &&
3739
- ends[j] >= 0) {
3740
- auto newval =
3741
- std::min((int64_t)ctx.getInputType(0)->tensor_type().shape().dim((int)i).dim_value(), ends[j]) -
3742
- starts[j];
3743
- if (newval >= 0) {
3744
- newdim->set_dim_value(newval);
4537
+ const auto& dim = ctx.getInputType(0)->tensor_type().shape().dim((int)i);
4538
+ if (dim.has_dim_value()) {
4539
+ auto dim_value = dim.dim_value();
4540
+ if (starts[j] < 0) {
4541
+ starts[j] += dim_value;
4542
+ }
4543
+ if (ends[j] < 0) {
4544
+ ends[j] += dim_value;
4545
+ }
4546
+ if (starts[j] >= 0 && ends[j] >= 0) {
4547
+ auto newval = std::min(dim_value, ends[j]) - starts[j];
4548
+ if (newval >= 0) {
4549
+ newdim->set_dim_value(newval);
4550
+ }
3745
4551
  }
3746
4552
  }
3747
4553
  ++j;
@@ -4187,17 +4993,24 @@ ONNX_OPERATOR_SET_SCHEMA(
4187
4993
  return;
4188
4994
  }
4189
4995
 
4190
- std::vector<int64_t> axes;
4191
- if (!getRepeatedAttribute(ctx, "axes", axes)) {
4192
- return;
4193
- }
4194
-
4195
4996
  if (!ctx.getInputType(0)->tensor_type().has_shape()) {
4196
4997
  return;
4197
4998
  }
4198
4999
 
4199
5000
  ctx.getOutputType(0)->mutable_tensor_type()->mutable_shape();
4200
5001
  const auto& input_shape = ctx.getInputType(0)->tensor_type().shape();
5002
+ const auto input_ndim = input_shape.dim_size();
5003
+ std::vector<int64_t> axes;
5004
+ if (!getRepeatedAttribute(ctx, "axes", axes)) {
5005
+ for (int i = 0; i < input_ndim; ++i) {
5006
+ if (!input_shape.dim(i).has_dim_value()) {
5007
+ return;
5008
+ }
5009
+ if (input_shape.dim(i).dim_value() == 1) {
5010
+ axes.push_back(i);
5011
+ }
5012
+ }
5013
+ }
4201
5014
 
4202
5015
  for (int i = 0, j = 0; i < input_shape.dim_size(); ++i) {
4203
5016
  if (static_cast<size_t>(j) < axes.size() && axes[j] == i) {
@@ -5252,6 +6065,68 @@ Output: [3]
5252
6065
  ```
5253
6066
  )DOC";
5254
6067
 
6068
+ ONNX_OPERATOR_SET_SCHEMA(
6069
+ Shape,
6070
+ 19,
6071
+ OpSchema()
6072
+ .SetDoc(Shape_ver15_doc)
6073
+ .Input(0, "data", "An input tensor.", "T", OpSchema::Single, true, 1, OpSchema::NonDifferentiable)
6074
+ .Output(0, "shape", "Shape of the input tensor", "T1", OpSchema::Single, true, 1, OpSchema::NonDifferentiable)
6075
+ .Attr(
6076
+ "start",
6077
+ "(Optional) Starting axis for slicing the shape. Default value is 0."
6078
+ "Negative value means counting dimensions from the back.",
6079
+ AttributeProto::INT,
6080
+ static_cast<int64_t>(0))
6081
+ .Attr(
6082
+ "end",
6083
+ "(Optional) Ending axis for slicing the shape. "
6084
+ "Negative value means counting dimensions from the back. "
6085
+ "If omitted, sizes of all axes upto (including) the last one will be included.",
6086
+ AttributeProto::INT,
6087
+ OPTIONAL_VALUE)
6088
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir9(), "Input tensor can be of arbitrary type.")
6089
+ .TypeConstraint("T1", {"tensor(int64)"}, "Constrain output to int64 tensor.")
6090
+ .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
6091
+ ctx.getOutputType(0)->mutable_tensor_type()->set_elem_type(TensorProto::INT64);
6092
+ auto* output_shape = ctx.getOutputType(0)->mutable_tensor_type()->mutable_shape();
6093
+ auto* output_length = output_shape->add_dim();
6094
+
6095
+ if (!hasNInputShapes(ctx, 1)) {
6096
+ return;
6097
+ }
6098
+
6099
+ int64_t rank = static_cast<int64_t>(ctx.getInputType(0)->tensor_type().shape().dim_size());
6100
+ int64_t start = getAttribute(ctx, "start", 0);
6101
+ if (start < 0)
6102
+ start += rank;
6103
+ start = (start < 0) ? 0 : (start > rank) ? rank : start;
6104
+ int64_t end = getAttribute(ctx, "end", rank);
6105
+ if (end < 0)
6106
+ end += rank;
6107
+ end = (end < 0) ? 0 : (end > rank) ? rank : end;
6108
+ output_length->set_dim_value((end - start) < 0 ? 0 : (end - start));
6109
+ })
6110
+ .PartialDataPropagationFunction([](DataPropagationContext& ctx) {
6111
+ if (hasInputShape(ctx, 0)) {
6112
+ auto& input_shape = ctx.getInputType(0)->tensor_type().shape();
6113
+ int64_t rank = static_cast<int64_t>(input_shape.dim_size());
6114
+ int64_t start = getAttribute(ctx, "start", 0);
6115
+ if (start < 0)
6116
+ start += rank;
6117
+ start = (start < 0) ? 0 : (start > rank) ? rank : start;
6118
+ int64_t end = getAttribute(ctx, "end", rank);
6119
+ if (end < 0)
6120
+ end += rank;
6121
+ end = (end < 0) ? 0 : (end > rank) ? rank : end;
6122
+ TensorShapeProto output_shape;
6123
+ for (int64_t d = start; d < end; ++d) {
6124
+ *output_shape.add_dim() = input_shape.dim(static_cast<int>(d));
6125
+ }
6126
+ ctx.addOutputData(0, std::move(output_shape));
6127
+ }
6128
+ }));
6129
+
5255
6130
  ONNX_OPERATOR_SET_SCHEMA(
5256
6131
  Shape,
5257
6132
  15,
@@ -5318,6 +6193,36 @@ static const char* Size_ver13_doc = R"DOC(
5318
6193
  Takes a tensor as input and outputs a int64 scalar that equals to the total number of elements of the input tensor.
5319
6194
  )DOC";
5320
6195
 
6196
+ ONNX_OPERATOR_SET_SCHEMA(
6197
+ Size,
6198
+ 19,
6199
+ OpSchema()
6200
+ .SetDoc(Size_ver13_doc)
6201
+ .Input(0, "data", "An input tensor.", "T", OpSchema::Single, true, 1, OpSchema::NonDifferentiable)
6202
+ .Output(
6203
+ 0,
6204
+ "size",
6205
+ "Total number of elements of the input tensor",
6206
+ "T1",
6207
+ OpSchema::Single,
6208
+ true,
6209
+ 1,
6210
+ OpSchema::NonDifferentiable)
6211
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir9(), "Input tensor can be of arbitrary type.")
6212
+ .TypeConstraint("T1", {"tensor(int64)"}, "Constrain output to int64 tensor, which should be a scalar though.")
6213
+ .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
6214
+ ctx.getOutputType(0)->mutable_tensor_type()->set_elem_type(TensorProto::INT64);
6215
+ ctx.getOutputType(0)->mutable_tensor_type()->mutable_shape();
6216
+ })
6217
+ .PartialDataPropagationFunction([](DataPropagationContext& ctx) {
6218
+ const auto input_data = ctx.getInputData(0);
6219
+ if (input_data != nullptr) {
6220
+ TensorShapeProto tsp;
6221
+ tsp.mutable_dim()->Add()->set_dim_value(input_data->dim_size());
6222
+ ctx.addOutputData(0, std::move(tsp));
6223
+ }
6224
+ }));
6225
+
5321
6226
  ONNX_OPERATOR_SET_SCHEMA(
5322
6227
  Size,
5323
6228
  13,