onnx 1.15.0__cp311-cp311-win_amd64.whl → 1.16.1__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of onnx might be problematic. Click here for more details.

Files changed (584) hide show
  1. onnx/__init__.py +10 -10
  2. onnx/backend/base.py +13 -14
  3. onnx/backend/sample/ops/abs.py +1 -1
  4. onnx/backend/test/case/model/__init__.py +0 -1
  5. onnx/backend/test/case/node/ai_onnx_ml/tree_ensemble.py +122 -0
  6. onnx/backend/test/case/node/averagepool.py +15 -30
  7. onnx/backend/test/case/node/cast.py +88 -11
  8. onnx/backend/test/case/node/dequantizelinear.py +155 -0
  9. onnx/backend/test/case/node/groupnormalization.py +13 -9
  10. onnx/backend/test/case/node/gru.py +2 -2
  11. onnx/backend/test/case/node/isinf.py +4 -4
  12. onnx/backend/test/case/node/isnan.py +2 -2
  13. onnx/backend/test/case/node/lppool.py +8 -16
  14. onnx/backend/test/case/node/lstm.py +1 -1
  15. onnx/backend/test/case/node/maxpool.py +40 -34
  16. onnx/backend/test/case/node/pow.py +1 -1
  17. onnx/backend/test/case/node/qlinearmatmul.py +143 -109
  18. onnx/backend/test/case/node/quantizelinear.py +298 -7
  19. onnx/backend/test/case/node/reducemax.py +26 -0
  20. onnx/backend/test/case/node/rnn.py +1 -1
  21. onnx/backend/test/case/node/scan.py +6 -2
  22. onnx/backend/test/case/node/scatterelements.py +1 -1
  23. onnx/backend/test/case/node/topk.py +1 -1
  24. onnx/backend/test/case/utils.py +1 -3
  25. onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_set_membership/model.onnx +0 -0
  26. onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_set_membership/test_data_set_0/input_0.pb +0 -0
  27. onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_set_membership/test_data_set_0/output_0.pb +0 -0
  28. onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_single_tree/model.onnx +0 -0
  29. onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_single_tree/test_data_set_0/input_0.pb +1 -0
  30. onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_single_tree/test_data_set_0/output_0.pb +0 -0
  31. onnx/backend/test/data/node/test_cast_BFLOAT16_to_FLOAT/model.onnx +0 -0
  32. onnx/backend/test/data/node/test_cast_DOUBLE_to_FLOAT/model.onnx +0 -0
  33. onnx/backend/test/data/node/test_cast_DOUBLE_to_FLOAT16/model.onnx +0 -0
  34. onnx/backend/test/data/node/test_cast_FLOAT16_to_DOUBLE/model.onnx +0 -0
  35. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT/model.onnx +0 -0
  36. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FN/model.onnx +0 -0
  37. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FN/test_data_set_0/input_0.pb +2 -2
  38. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FN/test_data_set_0/output_0.pb +0 -0
  39. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FNUZ/model.onnx +0 -0
  40. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FNUZ/test_data_set_0/input_0.pb +2 -2
  41. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E4M3FNUZ/test_data_set_0/output_0.pb +0 -0
  42. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2/model.onnx +0 -0
  43. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2/test_data_set_0/input_0.pb +2 -2
  44. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2/test_data_set_0/output_0.pb +0 -0
  45. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2FNUZ/model.onnx +0 -0
  46. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2FNUZ/test_data_set_0/input_0.pb +2 -2
  47. onnx/backend/test/data/node/test_cast_FLOAT16_to_FLOAT8E5M2FNUZ/test_data_set_0/output_0.pb +0 -0
  48. onnx/backend/test/data/node/test_cast_FLOAT16_to_INT4/model.onnx +0 -0
  49. onnx/backend/test/data/node/test_cast_FLOAT16_to_INT4/test_data_set_0/input_0.pb +0 -0
  50. onnx/backend/test/data/node/test_cast_FLOAT16_to_INT4/test_data_set_0/output_0.pb +1 -0
  51. onnx/backend/test/data/node/test_cast_FLOAT16_to_UINT4/model.onnx +0 -0
  52. onnx/backend/test/data/node/test_cast_FLOAT16_to_UINT4/test_data_set_0/input_0.pb +0 -0
  53. onnx/backend/test/data/node/test_cast_FLOAT16_to_UINT4/test_data_set_0/output_0.pb +0 -0
  54. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT/model.onnx +0 -0
  55. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT/test_data_set_0/input_0.pb +0 -0
  56. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT/test_data_set_0/output_0.pb +0 -0
  57. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT16/model.onnx +0 -0
  58. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT16/test_data_set_0/input_0.pb +0 -0
  59. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FNUZ_to_FLOAT16/test_data_set_0/output_0.pb +0 -0
  60. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT/model.onnx +0 -0
  61. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT/test_data_set_0/input_0.pb +0 -0
  62. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT/test_data_set_0/output_0.pb +0 -0
  63. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT16/model.onnx +0 -0
  64. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT16/test_data_set_0/input_0.pb +0 -0
  65. onnx/backend/test/data/node/test_cast_FLOAT8E4M3FN_to_FLOAT16/test_data_set_0/output_0.pb +0 -0
  66. onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT/model.onnx +0 -0
  67. onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT/test_data_set_0/input_0.pb +0 -0
  68. onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT/test_data_set_0/output_0.pb +0 -0
  69. onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT16/model.onnx +0 -0
  70. onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT16/test_data_set_0/input_0.pb +0 -0
  71. onnx/backend/test/data/node/test_cast_FLOAT8E5M2FNUZ_to_FLOAT16/test_data_set_0/output_0.pb +0 -0
  72. onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT/model.onnx +0 -0
  73. onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT/test_data_set_0/input_0.pb +0 -0
  74. onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT/test_data_set_0/output_0.pb +0 -0
  75. onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT16/model.onnx +0 -0
  76. onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT16/test_data_set_0/input_0.pb +0 -0
  77. onnx/backend/test/data/node/test_cast_FLOAT8E5M2_to_FLOAT16/test_data_set_0/output_0.pb +0 -0
  78. onnx/backend/test/data/node/test_cast_FLOAT_to_BFLOAT16/model.onnx +0 -0
  79. onnx/backend/test/data/node/test_cast_FLOAT_to_DOUBLE/model.onnx +0 -0
  80. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT16/model.onnx +0 -0
  81. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FN/model.onnx +0 -0
  82. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/input_0.pb +0 -0
  83. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/output_0.pb +0 -0
  84. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FNUZ/model.onnx +0 -0
  85. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/input_0.pb +0 -0
  86. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/output_0.pb +0 -0
  87. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2/model.onnx +0 -0
  88. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2/test_data_set_0/input_0.pb +0 -0
  89. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2/test_data_set_0/output_0.pb +0 -0
  90. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2FNUZ/model.onnx +0 -0
  91. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/input_0.pb +0 -0
  92. onnx/backend/test/data/node/test_cast_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/output_0.pb +0 -0
  93. onnx/backend/test/data/node/test_cast_FLOAT_to_INT4/model.onnx +0 -0
  94. onnx/backend/test/data/node/test_cast_FLOAT_to_INT4/test_data_set_0/input_0.pb +0 -0
  95. onnx/backend/test/data/node/test_cast_FLOAT_to_INT4/test_data_set_0/output_0.pb +1 -0
  96. onnx/backend/test/data/node/test_cast_FLOAT_to_STRING/model.onnx +0 -0
  97. onnx/backend/test/data/node/test_cast_FLOAT_to_UINT4/model.onnx +0 -0
  98. onnx/backend/test/data/node/test_cast_FLOAT_to_UINT4/test_data_set_0/input_0.pb +0 -0
  99. onnx/backend/test/data/node/test_cast_FLOAT_to_UINT4/test_data_set_0/output_0.pb +0 -0
  100. onnx/backend/test/data/node/test_cast_INT4_to_FLOAT/model.onnx +0 -0
  101. onnx/backend/test/data/node/test_cast_INT4_to_FLOAT/test_data_set_0/input_0.pb +1 -0
  102. onnx/backend/test/data/node/test_cast_INT4_to_FLOAT/test_data_set_0/output_0.pb +0 -0
  103. onnx/backend/test/data/node/test_cast_INT4_to_FLOAT16/model.onnx +0 -0
  104. onnx/backend/test/data/node/test_cast_INT4_to_FLOAT16/test_data_set_0/input_0.pb +1 -0
  105. onnx/backend/test/data/node/test_cast_INT4_to_FLOAT16/test_data_set_0/output_0.pb +0 -0
  106. onnx/backend/test/data/node/test_cast_INT4_to_INT8/model.onnx +0 -0
  107. onnx/backend/test/data/node/test_cast_INT4_to_INT8/test_data_set_0/input_0.pb +1 -0
  108. onnx/backend/test/data/node/test_cast_INT4_to_INT8/test_data_set_0/output_0.pb +0 -0
  109. onnx/backend/test/data/node/test_cast_STRING_to_FLOAT/model.onnx +0 -0
  110. onnx/backend/test/data/node/test_cast_UINT4_to_FLOAT/model.onnx +0 -0
  111. onnx/backend/test/data/node/test_cast_UINT4_to_FLOAT/test_data_set_0/input_0.pb +0 -0
  112. onnx/backend/test/data/node/test_cast_UINT4_to_FLOAT/test_data_set_0/output_0.pb +0 -0
  113. onnx/backend/test/data/node/test_cast_UINT4_to_FLOAT16/model.onnx +0 -0
  114. onnx/backend/test/data/node/test_cast_UINT4_to_FLOAT16/test_data_set_0/input_0.pb +0 -0
  115. onnx/backend/test/data/node/test_cast_UINT4_to_FLOAT16/test_data_set_0/output_0.pb +0 -0
  116. onnx/backend/test/data/node/test_cast_UINT4_to_UINT8/model.onnx +0 -0
  117. onnx/backend/test/data/node/test_cast_UINT4_to_UINT8/test_data_set_0/input_0.pb +0 -0
  118. onnx/backend/test/data/node/test_cast_UINT4_to_UINT8/test_data_set_0/output_0.pb +0 -0
  119. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FN/model.onnx +0 -0
  120. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FN/test_data_set_0/input_0.pb +2 -2
  121. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FN/test_data_set_0/output_0.pb +0 -0
  122. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FNUZ/model.onnx +0 -0
  123. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FNUZ/test_data_set_0/input_0.pb +2 -2
  124. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FNUZ/test_data_set_0/output_0.pb +0 -0
  125. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2/model.onnx +0 -0
  126. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2/test_data_set_0/input_0.pb +2 -2
  127. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2/test_data_set_0/output_0.pb +0 -0
  128. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2FNUZ/model.onnx +0 -0
  129. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2FNUZ/test_data_set_0/input_0.pb +2 -2
  130. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2FNUZ/test_data_set_0/output_0.pb +0 -0
  131. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FN/model.onnx +0 -0
  132. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/input_0.pb +0 -0
  133. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FN/test_data_set_0/output_0.pb +0 -0
  134. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FNUZ/model.onnx +0 -0
  135. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/input_0.pb +0 -0
  136. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FNUZ/test_data_set_0/output_0.pb +0 -0
  137. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2/model.onnx +0 -0
  138. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2/test_data_set_0/input_0.pb +0 -0
  139. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2/test_data_set_0/output_0.pb +0 -0
  140. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2FNUZ/model.onnx +0 -0
  141. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/input_0.pb +0 -0
  142. onnx/backend/test/data/node/test_cast_no_saturate_FLOAT_to_FLOAT8E5M2FNUZ/test_data_set_0/output_0.pb +0 -0
  143. onnx/backend/test/data/node/test_castlike_BFLOAT16_to_FLOAT/model.onnx +0 -0
  144. onnx/backend/test/data/node/test_castlike_BFLOAT16_to_FLOAT_expanded/model.onnx +0 -0
  145. onnx/backend/test/data/node/test_castlike_DOUBLE_to_FLOAT/model.onnx +0 -0
  146. onnx/backend/test/data/node/test_castlike_DOUBLE_to_FLOAT16/model.onnx +0 -0
  147. onnx/backend/test/data/node/test_castlike_DOUBLE_to_FLOAT16_expanded/model.onnx +0 -0
  148. onnx/backend/test/data/node/test_castlike_DOUBLE_to_FLOAT_expanded/model.onnx +0 -0
  149. onnx/backend/test/data/node/test_castlike_FLOAT16_to_DOUBLE/model.onnx +0 -0
  150. onnx/backend/test/data/node/test_castlike_FLOAT16_to_DOUBLE_expanded/model.onnx +0 -0
  151. onnx/backend/test/data/node/test_castlike_FLOAT16_to_FLOAT/model.onnx +0 -0
  152. onnx/backend/test/data/node/test_castlike_FLOAT16_to_FLOAT_expanded/model.onnx +0 -0
  153. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT/model.onnx +0 -0
  154. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FNUZ_to_FLOAT_expanded/model.onnx +0 -0
  155. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT/model.onnx +0 -0
  156. onnx/backend/test/data/node/test_castlike_FLOAT8E4M3FN_to_FLOAT_expanded/model.onnx +0 -0
  157. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT/model.onnx +0 -0
  158. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2FNUZ_to_FLOAT_expanded/model.onnx +0 -0
  159. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT/model.onnx +0 -0
  160. onnx/backend/test/data/node/test_castlike_FLOAT8E5M2_to_FLOAT_expanded/model.onnx +0 -0
  161. onnx/backend/test/data/node/test_castlike_FLOAT_to_BFLOAT16/model.onnx +0 -0
  162. onnx/backend/test/data/node/test_castlike_FLOAT_to_BFLOAT16_expanded/model.onnx +0 -0
  163. onnx/backend/test/data/node/test_castlike_FLOAT_to_DOUBLE/model.onnx +0 -0
  164. onnx/backend/test/data/node/test_castlike_FLOAT_to_DOUBLE_expanded/model.onnx +0 -0
  165. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT16/model.onnx +0 -0
  166. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT16_expanded/model.onnx +0 -0
  167. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN/model.onnx +0 -0
  168. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ/model.onnx +0 -0
  169. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FNUZ_expanded/model.onnx +0 -0
  170. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E4M3FN_expanded/model.onnx +0 -0
  171. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2/model.onnx +0 -0
  172. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ/model.onnx +0 -0
  173. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2FNUZ_expanded/model.onnx +0 -0
  174. onnx/backend/test/data/node/test_castlike_FLOAT_to_FLOAT8E5M2_expanded/model.onnx +0 -0
  175. onnx/backend/test/data/node/test_castlike_FLOAT_to_STRING/model.onnx +0 -0
  176. onnx/backend/test/data/node/test_castlike_FLOAT_to_STRING_expanded/model.onnx +0 -0
  177. onnx/backend/test/data/node/test_castlike_STRING_to_FLOAT/model.onnx +0 -0
  178. onnx/backend/test/data/node/test_castlike_STRING_to_FLOAT_expanded/model.onnx +0 -0
  179. onnx/backend/test/data/node/test_constant/model.onnx +0 -0
  180. onnx/backend/test/data/node/test_constant_pad/model.onnx +0 -0
  181. onnx/backend/test/data/node/test_constant_pad_axes/model.onnx +0 -0
  182. onnx/backend/test/data/node/test_constant_pad_negative_axes/model.onnx +0 -0
  183. onnx/backend/test/data/node/test_constantofshape_float_ones/model.onnx +0 -0
  184. onnx/backend/test/data/node/test_constantofshape_int_shape_zero/model.onnx +0 -0
  185. onnx/backend/test/data/node/test_constantofshape_int_zeros/model.onnx +0 -0
  186. onnx/backend/test/data/node/test_dequantizelinear/model.onnx +0 -0
  187. onnx/backend/test/data/node/test_dequantizelinear_axis/model.onnx +0 -0
  188. onnx/backend/test/data/node/test_dequantizelinear_blocked/model.onnx +0 -0
  189. onnx/backend/test/data/node/test_dequantizelinear_blocked/test_data_set_0/input_0.pb +1 -0
  190. onnx/backend/test/data/node/test_dequantizelinear_blocked/test_data_set_0/input_1.pb +0 -0
  191. onnx/backend/test/data/node/test_dequantizelinear_blocked/test_data_set_0/input_2.pb +0 -0
  192. onnx/backend/test/data/node/test_dequantizelinear_blocked/test_data_set_0/output_0.pb +0 -0
  193. onnx/backend/test/data/node/test_dequantizelinear_e4m3fn/model.onnx +0 -0
  194. onnx/backend/test/data/node/test_dequantizelinear_e4m3fn_float16/model.onnx +0 -0
  195. onnx/backend/test/data/node/test_dequantizelinear_e4m3fn_zero_point/model.onnx +0 -0
  196. onnx/backend/test/data/node/test_dequantizelinear_e5m2/model.onnx +0 -0
  197. onnx/backend/test/data/node/test_dequantizelinear_int16/model.onnx +0 -0
  198. onnx/backend/test/data/node/test_dequantizelinear_int16/test_data_set_0/input_0.pb +1 -0
  199. onnx/backend/test/data/node/test_dequantizelinear_int16/test_data_set_0/input_1.pb +0 -0
  200. onnx/backend/test/data/node/test_dequantizelinear_int16/test_data_set_0/input_2.pb +0 -0
  201. onnx/backend/test/data/node/test_dequantizelinear_int16/test_data_set_0/output_0.pb +0 -0
  202. onnx/backend/test/data/node/test_dequantizelinear_int4/model.onnx +0 -0
  203. onnx/backend/test/data/node/test_dequantizelinear_int4/test_data_set_0/input_0.pb +1 -0
  204. onnx/backend/test/data/node/test_dequantizelinear_int4/test_data_set_0/input_1.pb +0 -0
  205. onnx/backend/test/data/node/test_dequantizelinear_int4/test_data_set_0/input_2.pb +1 -0
  206. onnx/backend/test/data/node/test_dequantizelinear_int4/test_data_set_0/output_0.pb +0 -0
  207. onnx/backend/test/data/node/test_dequantizelinear_uint16/model.onnx +0 -0
  208. onnx/backend/test/data/node/test_dequantizelinear_uint16/test_data_set_0/input_0.pb +0 -0
  209. onnx/backend/test/data/node/test_dequantizelinear_uint16/test_data_set_0/input_1.pb +0 -0
  210. onnx/backend/test/data/node/test_dequantizelinear_uint16/test_data_set_0/input_2.pb +1 -0
  211. onnx/backend/test/data/node/test_dequantizelinear_uint16/test_data_set_0/output_0.pb +0 -0
  212. onnx/backend/test/data/node/test_dequantizelinear_uint4/model.onnx +0 -0
  213. onnx/backend/test/data/node/test_dequantizelinear_uint4/test_data_set_0/input_0.pb +1 -0
  214. onnx/backend/test/data/node/test_dequantizelinear_uint4/test_data_set_0/input_1.pb +0 -0
  215. onnx/backend/test/data/node/test_dequantizelinear_uint4/test_data_set_0/input_2.pb +1 -0
  216. onnx/backend/test/data/node/test_dequantizelinear_uint4/test_data_set_0/output_0.pb +0 -0
  217. onnx/backend/test/data/node/test_edge_pad/model.onnx +0 -0
  218. onnx/backend/test/data/node/test_flatten_axis0/model.onnx +0 -0
  219. onnx/backend/test/data/node/test_flatten_axis1/model.onnx +0 -0
  220. onnx/backend/test/data/node/test_flatten_axis2/model.onnx +0 -0
  221. onnx/backend/test/data/node/test_flatten_axis3/model.onnx +0 -0
  222. onnx/backend/test/data/node/test_flatten_default_axis/model.onnx +0 -0
  223. onnx/backend/test/data/node/test_flatten_negative_axis1/model.onnx +0 -0
  224. onnx/backend/test/data/node/test_flatten_negative_axis2/model.onnx +0 -0
  225. onnx/backend/test/data/node/test_flatten_negative_axis3/model.onnx +0 -0
  226. onnx/backend/test/data/node/test_flatten_negative_axis4/model.onnx +0 -0
  227. onnx/backend/test/data/node/test_group_normalization_epsilon/model.onnx +0 -0
  228. onnx/backend/test/data/node/test_group_normalization_epsilon/test_data_set_0/input_0.pb +1 -1
  229. onnx/backend/test/data/node/test_group_normalization_epsilon/test_data_set_0/input_1.pb +1 -1
  230. onnx/backend/test/data/node/test_group_normalization_epsilon/test_data_set_0/input_2.pb +1 -1
  231. onnx/backend/test/data/node/test_group_normalization_epsilon/test_data_set_0/output_0.pb +0 -0
  232. onnx/backend/test/data/node/test_group_normalization_epsilon_expanded/model.onnx +0 -0
  233. onnx/backend/test/data/node/test_group_normalization_epsilon_expanded/test_data_set_0/input_0.pb +1 -1
  234. onnx/backend/test/data/node/test_group_normalization_epsilon_expanded/test_data_set_0/input_1.pb +1 -1
  235. onnx/backend/test/data/node/test_group_normalization_epsilon_expanded/test_data_set_0/input_2.pb +1 -1
  236. onnx/backend/test/data/node/test_group_normalization_epsilon_expanded/test_data_set_0/output_0.pb +0 -0
  237. onnx/backend/test/data/node/test_group_normalization_example/model.onnx +0 -0
  238. onnx/backend/test/data/node/test_group_normalization_example/test_data_set_0/input_1.pb +1 -1
  239. onnx/backend/test/data/node/test_group_normalization_example/test_data_set_0/input_2.pb +1 -1
  240. onnx/backend/test/data/node/test_group_normalization_example/test_data_set_0/output_0.pb +0 -0
  241. onnx/backend/test/data/node/test_group_normalization_example_expanded/model.onnx +0 -0
  242. onnx/backend/test/data/node/test_group_normalization_example_expanded/test_data_set_0/input_1.pb +1 -1
  243. onnx/backend/test/data/node/test_group_normalization_example_expanded/test_data_set_0/input_2.pb +1 -1
  244. onnx/backend/test/data/node/test_group_normalization_example_expanded/test_data_set_0/output_0.pb +0 -0
  245. onnx/backend/test/data/node/test_identity/model.onnx +0 -0
  246. onnx/backend/test/data/node/test_identity_sequence/model.onnx +0 -0
  247. onnx/backend/test/data/node/test_lrn_default/test_data_set_0/output_0.pb +0 -0
  248. onnx/backend/test/data/node/test_maxpool_2d_ceil_output_size_reduce_by_one/model.onnx +0 -0
  249. onnx/backend/test/data/node/test_maxpool_2d_ceil_output_size_reduce_by_one/test_data_set_0/input_0.pb +0 -0
  250. onnx/backend/test/data/node/test_maxpool_2d_ceil_output_size_reduce_by_one/test_data_set_0/output_0.pb +0 -0
  251. onnx/backend/test/data/node/test_mvn/test_data_set_0/output_0.pb +1 -1
  252. onnx/backend/test/data/node/test_mvn_expanded/test_data_set_0/output_0.pb +1 -1
  253. onnx/backend/test/data/node/test_mvn_expanded_ver18/test_data_set_0/output_0.pb +1 -1
  254. onnx/backend/test/data/node/test_pow/test_data_set_0/output_0.pb +0 -0
  255. onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float16/model.onnx +0 -0
  256. onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float16/test_data_set_0/input_0.pb +1 -0
  257. onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float16/test_data_set_0/input_1.pb +2 -0
  258. onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float16/test_data_set_0/input_2.pb +1 -0
  259. onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float16/test_data_set_0/input_3.pb +0 -0
  260. onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float16/test_data_set_0/input_4.pb +2 -0
  261. onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float16/test_data_set_0/input_5.pb +1 -0
  262. onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float16/test_data_set_0/input_6.pb +2 -0
  263. onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float16/test_data_set_0/input_7.pb +1 -0
  264. onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float16/test_data_set_0/output_0.pb +1 -0
  265. onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float32/model.onnx +0 -0
  266. onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float32/test_data_set_0/input_0.pb +1 -0
  267. onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float32/test_data_set_0/input_2.pb +1 -0
  268. onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float32/test_data_set_0/input_3.pb +0 -0
  269. onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float32/test_data_set_0/input_5.pb +1 -0
  270. onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float32/test_data_set_0/input_7.pb +1 -0
  271. onnx/backend/test/data/node/test_qlinearmatmul_2D_int8_float32/test_data_set_0/output_0.pb +1 -0
  272. onnx/backend/test/data/node/test_qlinearmatmul_2D_uint8_float16/model.onnx +0 -0
  273. onnx/backend/test/data/node/test_qlinearmatmul_2D_uint8_float16/test_data_set_0/input_1.pb +2 -0
  274. onnx/backend/test/data/node/test_qlinearmatmul_2D_uint8_float16/test_data_set_0/input_4.pb +2 -0
  275. onnx/backend/test/data/node/test_qlinearmatmul_2D_uint8_float16/test_data_set_0/input_6.pb +2 -0
  276. onnx/backend/test/data/node/{test_qlinearmatmul_2D → test_qlinearmatmul_2D_uint8_float32}/model.onnx +0 -0
  277. onnx/backend/test/data/node/test_qlinearmatmul_2D_uint8_float32/test_data_set_0/input_0.pb +0 -0
  278. onnx/backend/test/data/node/test_qlinearmatmul_2D_uint8_float32/test_data_set_0/input_3.pb +0 -0
  279. onnx/backend/test/data/node/test_qlinearmatmul_2D_uint8_float32/test_data_set_0/output_0.pb +1 -0
  280. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float16/model.onnx +0 -0
  281. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float16/test_data_set_0/input_0.pb +1 -0
  282. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float16/test_data_set_0/input_1.pb +2 -0
  283. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float16/test_data_set_0/input_2.pb +1 -0
  284. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float16/test_data_set_0/input_3.pb +0 -0
  285. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float16/test_data_set_0/input_4.pb +2 -0
  286. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float16/test_data_set_0/input_5.pb +1 -0
  287. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float16/test_data_set_0/input_6.pb +2 -0
  288. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float16/test_data_set_0/input_7.pb +1 -0
  289. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float16/test_data_set_0/output_0.pb +1 -0
  290. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float32/model.onnx +0 -0
  291. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float32/test_data_set_0/input_0.pb +1 -0
  292. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float32/test_data_set_0/input_1.pb +1 -0
  293. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float32/test_data_set_0/input_2.pb +1 -0
  294. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float32/test_data_set_0/input_3.pb +0 -0
  295. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float32/test_data_set_0/input_4.pb +1 -0
  296. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float32/test_data_set_0/input_5.pb +1 -0
  297. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float32/test_data_set_0/input_6.pb +1 -0
  298. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float32/test_data_set_0/input_7.pb +1 -0
  299. onnx/backend/test/data/node/test_qlinearmatmul_3D_int8_float32/test_data_set_0/output_0.pb +1 -0
  300. onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float16/model.onnx +0 -0
  301. onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float16/test_data_set_0/input_1.pb +2 -0
  302. onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float16/test_data_set_0/input_2.pb +1 -0
  303. onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float16/test_data_set_0/input_4.pb +2 -0
  304. onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float16/test_data_set_0/input_5.pb +1 -0
  305. onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float16/test_data_set_0/input_6.pb +2 -0
  306. onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float16/test_data_set_0/input_7.pb +1 -0
  307. onnx/backend/test/data/node/{test_qlinearmatmul_3D → test_qlinearmatmul_3D_uint8_float32}/model.onnx +0 -0
  308. onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float32/test_data_set_0/input_0.pb +0 -0
  309. onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float32/test_data_set_0/input_1.pb +1 -0
  310. onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float32/test_data_set_0/input_2.pb +1 -0
  311. onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float32/test_data_set_0/input_3.pb +0 -0
  312. onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float32/test_data_set_0/input_4.pb +1 -0
  313. onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float32/test_data_set_0/input_5.pb +1 -0
  314. onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float32/test_data_set_0/input_6.pb +1 -0
  315. onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float32/test_data_set_0/input_7.pb +1 -0
  316. onnx/backend/test/data/node/test_qlinearmatmul_3D_uint8_float32/test_data_set_0/output_0.pb +1 -0
  317. onnx/backend/test/data/node/test_quantizelinear/model.onnx +0 -0
  318. onnx/backend/test/data/node/test_quantizelinear_axis/model.onnx +0 -0
  319. onnx/backend/test/data/node/test_quantizelinear_blocked_asymmetric/model.onnx +0 -0
  320. onnx/backend/test/data/node/test_quantizelinear_blocked_asymmetric/test_data_set_0/input_0.pb +0 -0
  321. onnx/backend/test/data/node/test_quantizelinear_blocked_asymmetric/test_data_set_0/input_1.pb +0 -0
  322. onnx/backend/test/data/node/test_quantizelinear_blocked_asymmetric/test_data_set_0/input_2.pb +0 -0
  323. onnx/backend/test/data/node/test_quantizelinear_blocked_asymmetric/test_data_set_0/output_0.pb +1 -0
  324. onnx/backend/test/data/node/test_quantizelinear_blocked_symmetric/model.onnx +0 -0
  325. onnx/backend/test/data/node/test_quantizelinear_blocked_symmetric/test_data_set_0/input_0.pb +0 -0
  326. onnx/backend/test/data/node/test_quantizelinear_blocked_symmetric/test_data_set_0/input_1.pb +0 -0
  327. onnx/backend/test/data/node/test_quantizelinear_blocked_symmetric/test_data_set_0/output_0.pb +0 -0
  328. onnx/backend/test/data/node/test_quantizelinear_e4m3fn/model.onnx +0 -0
  329. onnx/backend/test/data/node/test_quantizelinear_e4m3fn/test_data_set_0/input_2.pb +0 -0
  330. onnx/backend/test/data/node/test_quantizelinear_e4m3fn/test_data_set_0/output_0.pb +0 -0
  331. onnx/backend/test/data/node/test_quantizelinear_e5m2/model.onnx +0 -0
  332. onnx/backend/test/data/node/test_quantizelinear_e5m2/test_data_set_0/input_2.pb +0 -0
  333. onnx/backend/test/data/node/test_quantizelinear_e5m2/test_data_set_0/output_0.pb +0 -0
  334. onnx/backend/test/data/node/test_quantizelinear_int16/model.onnx +0 -0
  335. onnx/backend/test/data/node/test_quantizelinear_int16/test_data_set_0/input_0.pb +0 -0
  336. onnx/backend/test/data/node/test_quantizelinear_int16/test_data_set_0/input_1.pb +0 -0
  337. onnx/backend/test/data/node/test_quantizelinear_int16/test_data_set_0/input_2.pb +0 -0
  338. onnx/backend/test/data/node/test_quantizelinear_int16/test_data_set_0/output_0.pb +0 -0
  339. onnx/backend/test/data/node/test_quantizelinear_int4/model.onnx +0 -0
  340. onnx/backend/test/data/node/test_quantizelinear_int4/test_data_set_0/input_0.pb +0 -0
  341. onnx/backend/test/data/node/test_quantizelinear_int4/test_data_set_0/input_1.pb +0 -0
  342. onnx/backend/test/data/node/test_quantizelinear_int4/test_data_set_0/input_2.pb +1 -0
  343. onnx/backend/test/data/node/test_quantizelinear_int4/test_data_set_0/output_0.pb +1 -0
  344. onnx/backend/test/data/node/test_quantizelinear_uint16/model.onnx +0 -0
  345. onnx/backend/test/data/node/test_quantizelinear_uint16/test_data_set_0/input_0.pb +0 -0
  346. onnx/backend/test/data/node/test_quantizelinear_uint16/test_data_set_0/input_1.pb +0 -0
  347. onnx/backend/test/data/node/test_quantizelinear_uint16/test_data_set_0/input_2.pb +1 -0
  348. onnx/backend/test/data/node/test_quantizelinear_uint16/test_data_set_0/output_0.pb +0 -0
  349. onnx/backend/test/data/node/test_quantizelinear_uint4/model.onnx +0 -0
  350. onnx/backend/test/data/node/test_quantizelinear_uint4/test_data_set_0/input_0.pb +0 -0
  351. onnx/backend/test/data/node/test_quantizelinear_uint4/test_data_set_0/input_1.pb +0 -0
  352. onnx/backend/test/data/node/test_quantizelinear_uint4/test_data_set_0/input_2.pb +1 -0
  353. onnx/backend/test/data/node/test_quantizelinear_uint4/test_data_set_0/output_0.pb +0 -0
  354. onnx/backend/test/data/node/test_reflect_pad/model.onnx +0 -0
  355. onnx/backend/test/data/node/test_reshape_allowzero_reordered/model.onnx +0 -0
  356. onnx/backend/test/data/node/test_reshape_extended_dims/model.onnx +0 -0
  357. onnx/backend/test/data/node/test_reshape_negative_dim/model.onnx +0 -0
  358. onnx/backend/test/data/node/test_reshape_negative_extended_dims/model.onnx +0 -0
  359. onnx/backend/test/data/node/test_reshape_one_dim/model.onnx +0 -0
  360. onnx/backend/test/data/node/test_reshape_reduced_dims/model.onnx +0 -0
  361. onnx/backend/test/data/node/test_reshape_reordered_all_dims/model.onnx +0 -0
  362. onnx/backend/test/data/node/test_reshape_reordered_last_dims/model.onnx +0 -0
  363. onnx/backend/test/data/node/test_reshape_zero_and_negative_dim/model.onnx +0 -0
  364. onnx/backend/test/data/node/test_reshape_zero_dim/model.onnx +0 -0
  365. onnx/backend/test/data/node/test_shape/model.onnx +0 -0
  366. onnx/backend/test/data/node/test_shape_clip_end/model.onnx +0 -0
  367. onnx/backend/test/data/node/test_shape_clip_start/model.onnx +0 -0
  368. onnx/backend/test/data/node/test_shape_end_1/model.onnx +0 -0
  369. onnx/backend/test/data/node/test_shape_end_negative_1/model.onnx +0 -0
  370. onnx/backend/test/data/node/test_shape_example/model.onnx +0 -0
  371. onnx/backend/test/data/node/test_shape_start_1/model.onnx +0 -0
  372. onnx/backend/test/data/node/test_shape_start_1_end_2/model.onnx +0 -0
  373. onnx/backend/test/data/node/test_shape_start_1_end_negative_1/model.onnx +0 -0
  374. onnx/backend/test/data/node/test_shape_start_negative_1/model.onnx +0 -0
  375. onnx/backend/test/data/node/test_size/model.onnx +0 -0
  376. onnx/backend/test/data/node/test_size_example/model.onnx +0 -0
  377. onnx/backend/test/data/node/test_squeeze/model.onnx +0 -0
  378. onnx/backend/test/data/node/test_squeeze_negative_axes/model.onnx +0 -0
  379. onnx/backend/test/data/node/test_transpose_all_permutations_0/model.onnx +0 -0
  380. onnx/backend/test/data/node/test_transpose_all_permutations_1/model.onnx +0 -0
  381. onnx/backend/test/data/node/test_transpose_all_permutations_2/model.onnx +0 -0
  382. onnx/backend/test/data/node/test_transpose_all_permutations_3/model.onnx +0 -0
  383. onnx/backend/test/data/node/test_transpose_all_permutations_4/model.onnx +0 -0
  384. onnx/backend/test/data/node/test_transpose_all_permutations_5/model.onnx +0 -0
  385. onnx/backend/test/data/node/test_transpose_default/model.onnx +0 -0
  386. onnx/backend/test/data/node/test_unsqueeze_axis_0/model.onnx +0 -0
  387. onnx/backend/test/data/node/test_unsqueeze_axis_1/model.onnx +0 -0
  388. onnx/backend/test/data/node/test_unsqueeze_axis_2/model.onnx +0 -0
  389. onnx/backend/test/data/node/test_unsqueeze_negative_axes/model.onnx +0 -0
  390. onnx/backend/test/data/node/test_unsqueeze_three_axes/model.onnx +0 -0
  391. onnx/backend/test/data/node/test_unsqueeze_two_axes/model.onnx +0 -0
  392. onnx/backend/test/data/node/test_unsqueeze_unsorted_axes/model.onnx +0 -0
  393. onnx/backend/test/data/node/test_wrap_pad/model.onnx +0 -0
  394. onnx/backend/test/loader/__init__.py +0 -1
  395. onnx/backend/test/runner/__init__.py +43 -15
  396. onnx/checker.cc +104 -99
  397. onnx/checker.h +23 -3
  398. onnx/checker.py +56 -20
  399. onnx/common/assertions.cc +10 -5
  400. onnx/common/common.h +19 -0
  401. onnx/common/file_utils.h +3 -1
  402. onnx/common/interned_strings.h +7 -1
  403. onnx/common/ir.h +30 -7
  404. onnx/common/ir_pb_converter.cc +6 -0
  405. onnx/common/path.h +18 -2
  406. onnx/common/proto_util.h +43 -0
  407. onnx/common/version.h +1 -1
  408. onnx/cpp2py_export.cc +88 -56
  409. onnx/defs/__init__.py +29 -8
  410. onnx/defs/controlflow/defs.cc +16 -16
  411. onnx/defs/controlflow/old.cc +177 -0
  412. onnx/defs/data_propagators.h +2 -0
  413. onnx/defs/data_type_utils.cc +2 -0
  414. onnx/defs/generator/defs.cc +6 -4
  415. onnx/defs/generator/old.cc +115 -0
  416. onnx/defs/math/defs.cc +37 -142
  417. onnx/defs/math/old.cc +96 -12
  418. onnx/defs/math/utils.cc +127 -0
  419. onnx/defs/math/utils.h +8 -0
  420. onnx/defs/nn/defs.cc +72 -59
  421. onnx/defs/nn/old.cc +181 -2
  422. onnx/defs/object_detection/defs.cc +2 -2
  423. onnx/defs/object_detection/old.cc +2 -2
  424. onnx/defs/operator_sets.h +51 -0
  425. onnx/defs/operator_sets_ml.h +14 -0
  426. onnx/defs/parser.cc +112 -54
  427. onnx/defs/parser.h +14 -2
  428. onnx/defs/printer.cc +14 -7
  429. onnx/defs/quantization/defs.cc +111 -44
  430. onnx/defs/quantization/old.cc +130 -1
  431. onnx/defs/schema.cc +62 -18
  432. onnx/defs/schema.h +194 -48
  433. onnx/defs/shape_inference.cc +28 -19
  434. onnx/defs/shape_inference.h +2 -0
  435. onnx/defs/tensor/defs.cc +54 -96
  436. onnx/defs/tensor/old.cc +939 -34
  437. onnx/defs/tensor/utils.cc +6 -3
  438. onnx/defs/tensor/utils.h +5 -1
  439. onnx/defs/tensor_proto_util.cc +2 -0
  440. onnx/defs/tensor_util.cc +2 -0
  441. onnx/defs/traditionalml/defs.cc +273 -117
  442. onnx/defs/traditionalml/old.cc +329 -14
  443. onnx/defs/traditionalml/utils.h +27 -0
  444. onnx/external_data_helper.py +12 -26
  445. onnx/helper.py +242 -169
  446. onnx/hub.py +104 -70
  447. onnx/inliner/inliner.cc +89 -31
  448. onnx/inliner/inliner.h +5 -0
  449. onnx/inliner.py +2 -0
  450. onnx/mapping.py +9 -0
  451. onnx/model_container.py +346 -0
  452. onnx/numpy_helper.py +100 -38
  453. onnx/onnx-ml.proto +50 -13
  454. onnx/onnx.in.proto +50 -13
  455. onnx/onnx.proto +50 -13
  456. onnx/onnx_cpp2py_export/__init__.pyi +5 -0
  457. onnx/onnx_cpp2py_export/checker.pyi +21 -0
  458. onnx/onnx_cpp2py_export/defs.pyi +202 -0
  459. onnx/onnx_cpp2py_export/inliner.pyi +19 -0
  460. onnx/onnx_cpp2py_export/parser.pyi +32 -0
  461. onnx/onnx_cpp2py_export/printer.pyi +3 -0
  462. onnx/onnx_cpp2py_export/shape_inference.pyi +16 -0
  463. onnx/onnx_cpp2py_export/version_converter.pyi +4 -0
  464. onnx/onnx_cpp2py_export.cp311-win_amd64.pyd +0 -0
  465. onnx/onnx_data_pb2.pyi +146 -0
  466. onnx/onnx_ml_pb2.py +52 -52
  467. onnx/onnx_ml_pb2.pyi +663 -0
  468. onnx/onnx_operators_ml_pb2.pyi +67 -0
  469. onnx/reference/__init__.py +2 -0
  470. onnx/reference/custom_element_types.py +2 -0
  471. onnx/reference/op_run.py +166 -121
  472. onnx/reference/ops/_op.py +27 -50
  473. onnx/reference/ops/_op_list.py +36 -24
  474. onnx/reference/ops/aionnx_preview_training/_op_list.py +15 -8
  475. onnx/reference/ops/aionnxml/_common_classifier.py +3 -5
  476. onnx/reference/ops/aionnxml/_op_list.py +16 -8
  477. onnx/reference/ops/aionnxml/op_array_feature_extractor.py +4 -6
  478. onnx/reference/ops/aionnxml/op_linear_classifier.py +1 -2
  479. onnx/reference/ops/aionnxml/op_normalizer.py +3 -3
  480. onnx/reference/ops/aionnxml/op_svm_helper.py +1 -3
  481. onnx/reference/ops/aionnxml/op_svm_regressor.py +1 -3
  482. onnx/reference/ops/aionnxml/op_tree_ensemble.py +257 -0
  483. onnx/reference/ops/aionnxml/op_tree_ensemble_helper.py +2 -6
  484. onnx/reference/ops/aionnxml/op_tree_ensemble_regressor.py +4 -4
  485. onnx/reference/ops/experimental/_op_list.py +15 -8
  486. onnx/reference/ops/op_blackman_window.py +5 -6
  487. onnx/reference/ops/op_cast.py +22 -0
  488. onnx/reference/ops/op_cast_like.py +6 -0
  489. onnx/reference/ops/op_clip.py +5 -8
  490. onnx/reference/ops/op_col2im.py +1 -3
  491. onnx/reference/ops/op_constant.py +7 -1
  492. onnx/reference/ops/op_dequantize_linear.py +43 -40
  493. onnx/reference/ops/op_det.py +1 -1
  494. onnx/reference/ops/op_dynamic_quantize_linear.py +2 -2
  495. onnx/reference/ops/op_grid_sample.py +2 -4
  496. onnx/reference/ops/op_hamming_window.py +3 -6
  497. onnx/reference/ops/op_hann_window.py +3 -6
  498. onnx/reference/ops/op_if.py +4 -3
  499. onnx/reference/ops/op_loop.py +7 -9
  500. onnx/reference/ops/op_matmul.py +1 -2
  501. onnx/reference/ops/op_max_pool.py +5 -0
  502. onnx/reference/ops/op_optional.py +1 -1
  503. onnx/reference/ops/op_pool_common.py +3 -6
  504. onnx/reference/ops/op_qlinear_matmul.py +2 -2
  505. onnx/reference/ops/op_quantize_linear.py +166 -71
  506. onnx/reference/ops/op_resize.py +25 -21
  507. onnx/reference/ops/op_rnn.py +20 -12
  508. onnx/reference/ops/op_scan.py +23 -15
  509. onnx/reference/ops/op_scatter_elements.py +7 -6
  510. onnx/reference/ops/op_stft.py +3 -5
  511. onnx/reference/ops/op_string_normalizer.py +7 -7
  512. onnx/reference/ops/op_tfidf_vectorizer.py +7 -8
  513. onnx/reference/ops/op_topk.py +9 -11
  514. onnx/reference/ops/op_unique.py +1 -1
  515. onnx/reference/reference_evaluator.py +119 -63
  516. onnx/shape_inference/implementation.cc +160 -127
  517. onnx/shape_inference.py +11 -10
  518. onnx/subbyte.py +72 -0
  519. onnx/test/__init__.pyi +6 -0
  520. onnx/test/checker_test.py +21 -1
  521. onnx/test/compose_test.py +26 -74
  522. onnx/test/cpp/inliner_test.cc +76 -1
  523. onnx/test/cpp/ir_test.cc +60 -0
  524. onnx/test/cpp/parser_test.cc +106 -0
  525. onnx/test/function_test.py +1 -3
  526. onnx/test/helper_test.py +64 -4
  527. onnx/test/model_container_refeval_test.py +139 -0
  528. onnx/test/model_container_test.py +136 -0
  529. onnx/test/model_inference_test.py +44 -0
  530. onnx/test/reference_evaluator_ml_test.py +448 -47
  531. onnx/test/reference_evaluator_model_test.py +130 -0
  532. onnx/test/reference_evaluator_test.py +901 -14
  533. onnx/test/schema_test.py +166 -1
  534. onnx/test/shape_inference_test.py +285 -6
  535. onnx/test/symbolic_shape_test.py +3 -8
  536. onnx/test/test_backend_onnxruntime.py +238 -224
  537. onnx/test/test_backend_reference.py +11 -0
  538. onnx/test/test_external_data.py +51 -2
  539. onnx/test/version_converter/automatic_conversion_test_base.py +2 -1
  540. onnx/test/version_converter/automatic_upgrade_test.py +12 -10
  541. onnx/test/version_converter_test.py +166 -0
  542. onnx/tools/replace_constants.py +23 -26
  543. onnx/tools/update_model_dims.py +1 -2
  544. onnx/version.py +2 -2
  545. onnx/version_converter/adapters/group_normalization_20_21.h +128 -0
  546. onnx/version_converter/adapters/q_dq_21_20.h +77 -0
  547. onnx/version_converter/convert.h +67 -2
  548. onnx/version_converter.py +6 -142
  549. {onnx-1.15.0.dist-info → onnx-1.16.1.dist-info}/METADATA +18 -15
  550. {onnx-1.15.0.dist-info → onnx-1.16.1.dist-info}/RECORD +572 -406
  551. {onnx-1.15.0.dist-info → onnx-1.16.1.dist-info}/WHEEL +1 -1
  552. onnx/examples/Protobufs.ipynb +0 -639
  553. onnx/examples/check_model.ipynb +0 -128
  554. onnx/examples/load_model.ipynb +0 -116
  555. onnx/examples/make_model.ipynb +0 -176
  556. onnx/examples/np_array_tensorproto.ipynb +0 -136
  557. onnx/examples/resources/single_relu.onnx +0 -12
  558. onnx/examples/resources/single_relu_new.onnx +0 -12
  559. onnx/examples/resources/tensor.pb +0 -0
  560. onnx/examples/resources/two_transposes.onnx +0 -0
  561. onnx/examples/save_model.ipynb +0 -56
  562. onnx/examples/shape_inference.ipynb +0 -111
  563. onnx/test/reference_evaluator_backend_test.py +0 -876
  564. /onnx/backend/test/data/node/{test_qlinearmatmul_2D → test_qlinearmatmul_2D_int8_float32}/test_data_set_0/input_1.pb +0 -0
  565. /onnx/backend/test/data/node/{test_qlinearmatmul_2D → test_qlinearmatmul_2D_int8_float32}/test_data_set_0/input_4.pb +0 -0
  566. /onnx/backend/test/data/node/{test_qlinearmatmul_2D → test_qlinearmatmul_2D_int8_float32}/test_data_set_0/input_6.pb +0 -0
  567. /onnx/backend/test/data/node/{test_qlinearmatmul_2D → test_qlinearmatmul_2D_uint8_float16}/test_data_set_0/input_0.pb +0 -0
  568. /onnx/backend/test/data/node/{test_qlinearmatmul_2D → test_qlinearmatmul_2D_uint8_float16}/test_data_set_0/input_2.pb +0 -0
  569. /onnx/backend/test/data/node/{test_qlinearmatmul_2D → test_qlinearmatmul_2D_uint8_float16}/test_data_set_0/input_3.pb +0 -0
  570. /onnx/backend/test/data/node/{test_qlinearmatmul_2D → test_qlinearmatmul_2D_uint8_float16}/test_data_set_0/input_5.pb +0 -0
  571. /onnx/backend/test/data/node/{test_qlinearmatmul_2D → test_qlinearmatmul_2D_uint8_float16}/test_data_set_0/input_7.pb +0 -0
  572. /onnx/backend/test/data/node/{test_qlinearmatmul_2D → test_qlinearmatmul_2D_uint8_float16}/test_data_set_0/output_0.pb +0 -0
  573. /onnx/backend/test/data/node/{test_qlinearmatmul_3D → test_qlinearmatmul_2D_uint8_float32}/test_data_set_0/input_1.pb +0 -0
  574. /onnx/backend/test/data/node/{test_qlinearmatmul_3D → test_qlinearmatmul_2D_uint8_float32}/test_data_set_0/input_2.pb +0 -0
  575. /onnx/backend/test/data/node/{test_qlinearmatmul_3D → test_qlinearmatmul_2D_uint8_float32}/test_data_set_0/input_4.pb +0 -0
  576. /onnx/backend/test/data/node/{test_qlinearmatmul_3D → test_qlinearmatmul_2D_uint8_float32}/test_data_set_0/input_5.pb +0 -0
  577. /onnx/backend/test/data/node/{test_qlinearmatmul_3D → test_qlinearmatmul_2D_uint8_float32}/test_data_set_0/input_6.pb +0 -0
  578. /onnx/backend/test/data/node/{test_qlinearmatmul_3D → test_qlinearmatmul_2D_uint8_float32}/test_data_set_0/input_7.pb +0 -0
  579. /onnx/backend/test/data/node/{test_qlinearmatmul_3D → test_qlinearmatmul_3D_uint8_float16}/test_data_set_0/input_0.pb +0 -0
  580. /onnx/backend/test/data/node/{test_qlinearmatmul_3D → test_qlinearmatmul_3D_uint8_float16}/test_data_set_0/input_3.pb +0 -0
  581. /onnx/backend/test/data/node/{test_qlinearmatmul_3D → test_qlinearmatmul_3D_uint8_float16}/test_data_set_0/output_0.pb +0 -0
  582. {onnx-1.15.0.dist-info → onnx-1.16.1.dist-info}/LICENSE +0 -0
  583. {onnx-1.15.0.dist-info → onnx-1.16.1.dist-info}/entry_points.txt +0 -0
  584. {onnx-1.15.0.dist-info → onnx-1.16.1.dist-info}/top_level.txt +0 -0
@@ -11,6 +11,15 @@ namespace ONNX_NAMESPACE {
11
11
 
12
12
  using SupportType = OpSchema::SupportType;
13
13
 
14
+ static std::vector<std::string> control_flow_types_ir9() {
15
+ auto t = OpSchema::all_tensor_types_ir9();
16
+ auto s = OpSchema::all_tensor_sequence_types_ir9();
17
+ auto o = OpSchema::all_optional_types_ir9();
18
+ t.insert(t.end(), s.begin(), s.end());
19
+ t.insert(t.end(), o.begin(), o.end());
20
+ return t;
21
+ }
22
+
14
23
  static std::vector<std::string> control_flow_types_ir4() {
15
24
  auto t = OpSchema::all_tensor_types_ir4();
16
25
  auto s = OpSchema::all_tensor_sequence_types_ir4();
@@ -20,6 +29,53 @@ static std::vector<std::string> control_flow_types_ir4() {
20
29
  return t;
21
30
  }
22
31
 
32
+ ONNX_OPERATOR_SET_SCHEMA(
33
+ If,
34
+ 19,
35
+ OpSchema()
36
+ .SetDoc("If conditional")
37
+ .Input(0, "cond", "Condition for the if. The tensor must contain a single element.", "B")
38
+ .Output(
39
+ 0,
40
+ "outputs",
41
+ "Values that are live-out to the enclosing scope. The return values in "
42
+ "the `then_branch` and `else_branch` must be of the same data type. "
43
+ "The `then_branch` and `else_branch` may produce tensors with the same "
44
+ "element type and different shapes. "
45
+ "If corresponding outputs from the then-branch and the else-branch have "
46
+ "static shapes S1 and S2, then the shape of the corresponding output "
47
+ "variable of the if-node (if present) must be compatible with both S1 "
48
+ "and S2 as it represents the union of both possible shapes."
49
+ "For example, if in a model file, the first "
50
+ "output of `then_branch` is typed float tensor with shape [2] and the "
51
+ "first output of `else_branch` is another float tensor with shape [3], "
52
+ "If's first output should have (a) no shape set, or (b) "
53
+ "a shape of rank 1 with neither `dim_value` nor `dim_param` set, or (c) "
54
+ "a shape of rank 1 with a unique `dim_param`. "
55
+ "In contrast, the first output cannot have the shape [2] since [2] and "
56
+ "[3] are not compatible.",
57
+ "V",
58
+ OpSchema::Variadic,
59
+ false)
60
+ .Attr(
61
+ "then_branch",
62
+ "Graph to run if condition is true. Has N outputs: values you wish to "
63
+ "be live-out to the enclosing scope. The number of outputs must match"
64
+ " the number of outputs in the else_branch.",
65
+ AttributeProto::GRAPH)
66
+ .Attr(
67
+ "else_branch",
68
+ "Graph to run if condition is false. Has N outputs: values you wish to"
69
+ " be live-out to the enclosing scope. The number of outputs must match"
70
+ " the number of outputs in the then_branch.",
71
+ AttributeProto::GRAPH)
72
+ .TypeConstraint(
73
+ "V",
74
+ control_flow_types_ir9(),
75
+ "All Tensor, Sequence(Tensor), Optional(Tensor), and Optional(Sequence(Tensor)) types up to IRv9.")
76
+ .TypeConstraint("B", {"tensor(bool)"}, "Only bool")
77
+ .TypeAndShapeInferenceFunction(IfInferenceFunction));
78
+
23
79
  ONNX_OPERATOR_SET_SCHEMA(
24
80
  If,
25
81
  16,
@@ -205,6 +261,59 @@ point-wise operators (e.g. dropout, residual connections, linear layer).
205
261
  The input/output of subgraph (produced by loop node) matching is based on order instead of name. The implementation will figure out the names based on this order.
206
262
  )DOC";
207
263
 
264
+ ONNX_OPERATOR_SET_SCHEMA(
265
+ Loop,
266
+ 19,
267
+ OpSchema()
268
+ .SetDoc(Loop_ver16_doc)
269
+ .Input(
270
+ 0,
271
+ "M",
272
+ "A maximum trip-count for the loop specified at runtime. Optional."
273
+ " Pass empty string to skip.",
274
+ "I",
275
+ OpSchema::Optional)
276
+ .Input(
277
+ 1,
278
+ "cond",
279
+ "A boolean termination condition. Optional. Pass empty string to skip.",
280
+ "B",
281
+ OpSchema::Optional)
282
+ .Input(
283
+ 2,
284
+ "v_initial",
285
+ "The initial values of any loop-carried dependencies (values that "
286
+ "change across loop iterations)",
287
+ "V",
288
+ OpSchema::Variadic,
289
+ false,
290
+ 0)
291
+ .Output(
292
+ 0,
293
+ "v_final_and_scan_outputs",
294
+ "Final N loop carried dependency values then K scan_outputs. "
295
+ "Scan outputs must be Tensors.",
296
+ "V",
297
+ OpSchema::Variadic,
298
+ false)
299
+ .Attr(
300
+ "body",
301
+ "The graph run each iteration. It has 2+N inputs: (iteration_num, "
302
+ "condition, loop carried dependencies...). It has 1+N+K outputs: "
303
+ "(condition, loop carried dependencies..., scan_outputs...). Each "
304
+ "scan_output is created by concatenating the value of the specified "
305
+ "output value at the end of each iteration of the loop. It is an error"
306
+ " if the dimensions or data type of these scan_outputs change across loop"
307
+ " iterations.",
308
+ AttributeProto::GRAPH)
309
+ .TypeConstraint(
310
+ "V",
311
+ control_flow_types_ir9(),
312
+ "All Tensor, Sequence(Tensor), Optional(Tensor), and Optional(Sequence(Tensor)) types up to IRv9.")
313
+ .TypeConstraint("I", {"tensor(int64)"}, "tensor of int64, which should be a scalar.")
314
+ .TypeConstraint("B", {"tensor(bool)"}, "tensor of bool, which should be a scalar.")
315
+ .TypeAndShapeInferenceFunction(LoopInferenceFunction));
316
+
208
317
  ONNX_OPERATOR_SET_SCHEMA(
209
318
  Loop,
210
319
  16,
@@ -382,6 +491,74 @@ values are computed in the outer graph, they need to be passed in as extra state
382
491
 
383
492
  )DOC";
384
493
 
494
+ ONNX_OPERATOR_SET_SCHEMA(
495
+ Scan,
496
+ 19,
497
+ OpSchema()
498
+ .SetDoc(scan_16_doc)
499
+ .Input(
500
+ 0,
501
+ "initial_state_and_scan_inputs",
502
+ "Initial values of the loop's N state variables followed by M scan_inputs",
503
+ "V",
504
+ OpSchema::Variadic,
505
+ false)
506
+ .Output(
507
+ 0,
508
+ "final_state_and_scan_outputs",
509
+ "Final values of the loop's N state variables followed by K scan_outputs",
510
+ "V",
511
+ OpSchema::Variadic,
512
+ false)
513
+ .Attr(
514
+ "body",
515
+ "The graph run each iteration. It has N+M inputs: "
516
+ "(loop state variables..., scan_input_elts...). It has N+K outputs: "
517
+ "(loop state variables..., scan_output_elts...). Each "
518
+ "scan_output is created by concatenating the value of the specified "
519
+ "scan_output_elt value at the end of each iteration of the loop. It is an error"
520
+ " if the dimensions of these values change across loop iterations.",
521
+ AttributeProto::GRAPH,
522
+ true)
523
+ .Attr("num_scan_inputs", "An attribute specifying the number of scan_inputs M. ", AttributeProto::INT, true)
524
+ .Attr(
525
+ "scan_input_directions",
526
+ "An optional list of M flags. The i-th element of the list specifies the direction "
527
+ "to be scanned for the i-th scan_input tensor: 0 indicates forward direction and 1 "
528
+ "indicates reverse direction. "
529
+ "If omitted, all scan_input tensors will be scanned in the forward direction.",
530
+ AttributeProto::INTS,
531
+ false)
532
+ .Attr(
533
+ "scan_output_directions",
534
+ "An optional list of K flags, one for each scan_output. The i-th element of the list "
535
+ "specifies whether the i-th scan_output should be constructed by appending or "
536
+ "prepending a new value in each iteration: 0 indicates appending and 1 "
537
+ "indicates prepending. "
538
+ "If omitted, all scan_output tensors will be produced by appending a value "
539
+ "in each iteration.",
540
+ AttributeProto::INTS,
541
+ false)
542
+ .Attr(
543
+ "scan_input_axes",
544
+ "An optional list of M flags. The i-th element of the list specifies the axis "
545
+ "to be scanned (the sequence axis) for the i-th scan_input. If omitted, 0 will "
546
+ "be used as the scan axis for every scan_input. Negative value for an axis means "
547
+ "counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input).",
548
+ AttributeProto::INTS,
549
+ false)
550
+ .Attr(
551
+ "scan_output_axes",
552
+ "An optional list of K flags. The i-th element of the list specifies the axis "
553
+ "for the i-th scan_output. The scan outputs are accumulated along the specified "
554
+ "axis. If omitted, 0 will be used as the scan axis for every scan_output. "
555
+ "Negative value for an axis means counting dimensions from the back. Accepted "
556
+ "range is [-r, r-1].",
557
+ AttributeProto::INTS,
558
+ false)
559
+ .TypeConstraint("V", OpSchema::all_tensor_types_ir9(), "All Tensor types up to IRv9.")
560
+ .TypeAndShapeInferenceFunction(ScanInferenceFunction)); // Shares same shape inference as opset 11
561
+
385
562
  ONNX_OPERATOR_SET_SCHEMA(
386
563
  Scan,
387
564
  16,
@@ -4,6 +4,8 @@
4
4
  * SPDX-License-Identifier: Apache-2.0
5
5
  */
6
6
 
7
+ #pragma once
8
+
7
9
  #include <utility>
8
10
 
9
11
  #include "onnx/defs/shape_inference.h"
@@ -440,6 +440,8 @@ TypesWrapper::TypesWrapper() {
440
440
  type_str_to_tensor_data_type_["float8e4m3fnuz"] = TensorProto_DataType_FLOAT8E4M3FNUZ;
441
441
  type_str_to_tensor_data_type_["float8e5m2"] = TensorProto_DataType_FLOAT8E5M2;
442
442
  type_str_to_tensor_data_type_["float8e5m2fnuz"] = TensorProto_DataType_FLOAT8E5M2FNUZ;
443
+ type_str_to_tensor_data_type_["uint4"] = TensorProto_DataType_UINT4;
444
+ type_str_to_tensor_data_type_["int4"] = TensorProto_DataType_INT4;
443
445
 
444
446
  for (auto& str_type_pair : type_str_to_tensor_data_type_) {
445
447
  tensor_data_type_to_type_str_[str_type_pair.second] = str_type_pair.first;
@@ -17,7 +17,7 @@ or value_* must be specified.
17
17
 
18
18
  ONNX_OPERATOR_SET_SCHEMA(
19
19
  Constant,
20
- 19,
20
+ 21,
21
21
  OpSchema()
22
22
  .SetDoc(Constant_ver19_doc)
23
23
  .Attr("value", "The value for the elements of the output tensor.", AttributeProto::TENSOR, false)
@@ -57,7 +57,7 @@ ONNX_OPERATOR_SET_SCHEMA(
57
57
  AttributeProto::STRINGS,
58
58
  false)
59
59
  .Output(0, "output", "Output tensor containing the same value of the provided tensor.", "T")
60
- .TypeConstraint("T", OpSchema::all_tensor_types_ir9(), "Constrain input and output types to all tensor types.")
60
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir10(), "Constrain input and output types to all tensor types.")
61
61
  .TypeAndShapeInferenceFunction(ConstantOpInference));
62
62
 
63
63
  static const char* ConstantOfShape_ver20_doc = R"DOC(
@@ -66,7 +66,7 @@ Generate a tensor with given value and shape.
66
66
 
67
67
  ONNX_OPERATOR_SET_SCHEMA(
68
68
  ConstantOfShape,
69
- 20,
69
+ 21,
70
70
  OpSchema()
71
71
  .SetDoc(ConstantOfShape_ver20_doc)
72
72
  .Attr(
@@ -103,13 +103,15 @@ ONNX_OPERATOR_SET_SCHEMA(
103
103
  "tensor(uint16)",
104
104
  "tensor(uint32)",
105
105
  "tensor(uint64)",
106
+ "tensor(uint4)",
107
+ "tensor(int4)",
106
108
  "tensor(bool)",
107
109
  "tensor(bfloat16)",
108
110
  "tensor(float8e4m3fn)",
109
111
  "tensor(float8e4m3fnuz)",
110
112
  "tensor(float8e5m2)",
111
113
  "tensor(float8e5m2fnuz)"},
112
- "Constrain output types to be numerics.")
114
+ "Constrain output types to be numerics or boolean.")
113
115
  .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
114
116
  if (ctx.getAttribute("value") != nullptr) {
115
117
  propagateElemTypeFromDtypeToOutput(ctx, ctx.getAttribute("value"), 0);
@@ -9,6 +9,57 @@
9
9
  #include "onnx/defs/schema.h"
10
10
 
11
11
  namespace ONNX_NAMESPACE {
12
+
13
+ static const char* Constant_ver19_doc = R"DOC(
14
+ This operator produces a constant tensor. Exactly one of the provided attributes, either value, sparse_value,
15
+ or value_* must be specified.
16
+ )DOC";
17
+
18
+ ONNX_OPERATOR_SET_SCHEMA(
19
+ Constant,
20
+ 19,
21
+ OpSchema()
22
+ .SetDoc(Constant_ver19_doc)
23
+ .Attr("value", "The value for the elements of the output tensor.", AttributeProto::TENSOR, false)
24
+ .Attr(
25
+ "sparse_value",
26
+ "The value for the elements of the output tensor in sparse format.",
27
+ AttributeProto::SPARSE_TENSOR,
28
+ false)
29
+ .Attr(
30
+ "value_int",
31
+ "The value for the sole element for the scalar, int64, output tensor.",
32
+ AttributeProto::INT,
33
+ false)
34
+ .Attr(
35
+ "value_ints",
36
+ "The values for the elements for the 1D, int64, output tensor.",
37
+ AttributeProto::INTS,
38
+ false)
39
+ .Attr(
40
+ "value_float",
41
+ "The value for the sole element for the scalar, float32, output tensor.",
42
+ AttributeProto::FLOAT,
43
+ false)
44
+ .Attr(
45
+ "value_floats",
46
+ "The values for the elements for the 1D, float32, output tensor.",
47
+ AttributeProto::FLOATS,
48
+ false)
49
+ .Attr(
50
+ "value_string",
51
+ "The value for the sole element for the scalar, UTF-8 string, output tensor.",
52
+ AttributeProto::STRING,
53
+ false)
54
+ .Attr(
55
+ "value_strings",
56
+ "The values for the elements for the 1D, UTF-8 string, output tensor.",
57
+ AttributeProto::STRINGS,
58
+ false)
59
+ .Output(0, "output", "Output tensor containing the same value of the provided tensor.", "T")
60
+ .TypeConstraint("T", OpSchema::all_tensor_types_ir9(), "Constrain input and output types to all tensor types.")
61
+ .TypeAndShapeInferenceFunction(ConstantOpInference));
62
+
12
63
  static const char* Constant_ver13_doc = R"DOC(
13
64
  This operator produces a constant tensor. Exactly one of the provided attributes, either value, sparse_value,
14
65
  or value_* must be specified.
@@ -202,6 +253,70 @@ ONNX_OPERATOR_SET_SCHEMA(
202
253
  "One of the attributes 'value' or 'sparse_value' must be specified for a Constant node.");
203
254
  }));
204
255
 
256
+ static const char* ConstantOfShape_ver20_doc = R"DOC(
257
+ Generate a tensor with given value and shape.
258
+ )DOC";
259
+
260
+ ONNX_OPERATOR_SET_SCHEMA(
261
+ ConstantOfShape,
262
+ 20,
263
+ OpSchema()
264
+ .SetDoc(ConstantOfShape_ver20_doc)
265
+ .Attr(
266
+ "value",
267
+ "(Optional) The value of the output elements."
268
+ "Should be a one-element tensor. If not specified, it defaults to a tensor of value 0 and datatype float32",
269
+ AttributeProto::TENSOR,
270
+ OPTIONAL_VALUE)
271
+ .Input(
272
+ 0,
273
+ "input",
274
+ "1D tensor. The shape of the expected output tensor. If empty tensor is given, the output would be a scalar."
275
+ " All values must be >= 0.",
276
+ "T1")
277
+ .Output(
278
+ 0,
279
+ "output",
280
+ "Output tensor of shape specified by 'input'."
281
+ "If attribute 'value' is specified, the value and datatype of the output tensor is taken from 'value'."
282
+ "If attribute 'value' is not specified, the value in the output defaults to 0, and the datatype "
283
+ "defaults to float32.",
284
+ "T2")
285
+ .TypeConstraint("T1", {"tensor(int64)"}, "Constrain input types.")
286
+ .TypeConstraint(
287
+ "T2",
288
+ {"tensor(float16)",
289
+ "tensor(float)",
290
+ "tensor(double)",
291
+ "tensor(int8)",
292
+ "tensor(int16)",
293
+ "tensor(int32)",
294
+ "tensor(int64)",
295
+ "tensor(uint8)",
296
+ "tensor(uint16)",
297
+ "tensor(uint32)",
298
+ "tensor(uint64)",
299
+ "tensor(bool)",
300
+ "tensor(bfloat16)",
301
+ "tensor(float8e4m3fn)",
302
+ "tensor(float8e4m3fnuz)",
303
+ "tensor(float8e5m2)",
304
+ "tensor(float8e5m2fnuz)"},
305
+ "Constrain output types to be numerics.")
306
+ .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
307
+ if (ctx.getAttribute("value") != nullptr) {
308
+ propagateElemTypeFromDtypeToOutput(ctx, ctx.getAttribute("value"), 0);
309
+ } else {
310
+ propagateElemTypeFromDtypeToOutput(ctx, TensorProto::FLOAT, 0);
311
+ }
312
+
313
+ bool found = false;
314
+ TensorShapeProto output_shape = getShapeInput(ctx, 0, found);
315
+ if (found) {
316
+ *ctx.getOutputType(0)->mutable_tensor_type()->mutable_shape() = output_shape;
317
+ }
318
+ }));
319
+
205
320
  static const char* ConstantOfShape_ver9_doc = R"DOC(
206
321
  Generate a tensor with given value and shape.
207
322
  )DOC";
onnx/defs/math/defs.cc CHANGED
@@ -1385,74 +1385,6 @@ ONNX_OPERATOR_SET_SCHEMA(
1385
1385
  }
1386
1386
  }));
1387
1387
 
1388
- void matmulShapeInference(ONNX_NAMESPACE::InferenceContext& ctx, int input1Idx, int input2Idx) {
1389
- if (!hasInputShape(ctx, input1Idx) || !hasInputShape(ctx, input2Idx)) {
1390
- return;
1391
- }
1392
-
1393
- const auto shape0 = ctx.getInputType(input1Idx)->tensor_type().shape();
1394
- const auto shape1 = ctx.getInputType(input2Idx)->tensor_type().shape();
1395
-
1396
- if (shape0.dim_size() == 0 || shape1.dim_size() == 0) {
1397
- fail_shape_inference("Input tensors of wrong rank (0).");
1398
- }
1399
-
1400
- ONNX_NAMESPACE::TensorShapeProto shapeL, shapeR;
1401
-
1402
- // First promote each shape to at least rank-2. This logic is
1403
- // specific to matmul, not generic broadcasting.
1404
- {
1405
- if (shape0.dim_size() == 1) {
1406
- shapeL.add_dim()->set_dim_value(1);
1407
- *shapeL.add_dim() = shape0.dim(0);
1408
- } else {
1409
- *shapeL.mutable_dim() = shape0.dim();
1410
- }
1411
- if (shape1.dim_size() == 1) {
1412
- *shapeR.add_dim() = shape1.dim(0);
1413
- shapeR.add_dim()->set_dim_value(1);
1414
- } else {
1415
- *shapeR.mutable_dim() = shape1.dim();
1416
- }
1417
- }
1418
-
1419
- // Check for compatible matrix multiply dimensions
1420
- {
1421
- auto dimL = shapeL.dim(shapeL.dim_size() - 1);
1422
- auto dimR = shapeR.dim(shapeR.dim_size() - 2);
1423
- if (dimL.has_dim_value() && dimR.has_dim_value() && dimL.dim_value() != dimR.dim_value()) {
1424
- fail_shape_inference("Incompatible dimensions for matrix multiplication");
1425
- }
1426
- }
1427
-
1428
- ONNX_NAMESPACE::TensorShapeProto resultShape;
1429
-
1430
- // Now call out to generic multidimensional broadcasting for
1431
- // the broadcastable prefixes.
1432
- {
1433
- ONNX_NAMESPACE::TensorShapeProto prefixShapeL, prefixShapeR;
1434
- for (int i = 0; i < shapeL.dim_size() - 2; ++i) {
1435
- *prefixShapeL.add_dim() = shapeL.dim(i);
1436
- }
1437
- for (int i = 0; i < shapeR.dim_size() - 2; ++i) {
1438
- *prefixShapeR.add_dim() = shapeR.dim(i);
1439
- }
1440
- bidirectionalBroadcastShapeInference(prefixShapeL, prefixShapeR, resultShape);
1441
- }
1442
-
1443
- // Back to matmul-specific. Add the trailing dimensions back in.
1444
- {
1445
- if (shape0.dim_size() != 1) {
1446
- *resultShape.add_dim() = shapeL.dim(shapeL.dim_size() - 2);
1447
- }
1448
- if (shape1.dim_size() != 1) {
1449
- *resultShape.add_dim() = shapeR.dim(shapeR.dim_size() - 1);
1450
- }
1451
- }
1452
-
1453
- *ctx.getOutputType(0)->mutable_tensor_type()->mutable_shape() = resultShape;
1454
- }
1455
-
1456
1388
  static const char* MatMul_ver13_doc = R"DOC(
1457
1389
  Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html
1458
1390
  )DOC";
@@ -1478,16 +1410,16 @@ ONNX_OPERATOR_SET_SCHEMA(
1478
1410
  .SetDoc(MatMul_ver13_doc)
1479
1411
  .TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
1480
1412
  propagateElemTypeFromInputToOutput(ctx, 0, 0);
1481
- matmulShapeInference(ctx, 0, 1);
1413
+ defs::math::utils::MatMulShapeInference(ctx, 0, 1);
1482
1414
  }));
1483
1415
 
1484
1416
  static const char* TopK_ver11_doc = R"DOC(
1485
1417
  Retrieve the top-K largest or smallest elements along a specified axis. Given an input tensor of
1486
- shape [a_1, a_2, ..., a_n, r] and integer argument k, return two outputs:
1418
+ shape [a_0, a_1, ..., a_{n-1}] and integer argument k, return two outputs:
1487
1419
 
1488
- * Value tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n]
1420
+ * Value tensor of shape [a_0, a_1, ..., a_{axis-1}, k, a_{axis+1}, ... a_{n-1}]
1489
1421
  which contains the values of the top k elements along the specified axis
1490
- * Index tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] which
1422
+ * Index tensor of shape [a_0, a_1, ..., a_{axis-1}, k, a_{axis+1}, ... a_{n-1}] which
1491
1423
  contains the indices of the top k elements (original indices from the input
1492
1424
  tensor).
1493
1425
 
@@ -1507,7 +1439,7 @@ ONNX_OPERATOR_SET_SCHEMA(
1507
1439
  .Input(
1508
1440
  0,
1509
1441
  "X",
1510
- "Tensor of shape [a_1, a_2, ..., a_n, r]",
1442
+ "Tensor of shape [a_0, a_1, ..., a_{n-1}]",
1511
1443
  "T",
1512
1444
  OpSchema::Single,
1513
1445
  true,
@@ -1525,7 +1457,7 @@ ONNX_OPERATOR_SET_SCHEMA(
1525
1457
  .Output(
1526
1458
  0,
1527
1459
  "Values",
1528
- "Tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] "
1460
+ "Tensor of shape [a_0, a_1, ..., a_{axis-1}, k, a_{axis+1}, ... a_{n-1}] "
1529
1461
  "containing top K values from the input tensor",
1530
1462
  "T",
1531
1463
  OpSchema::Single,
@@ -1535,7 +1467,7 @@ ONNX_OPERATOR_SET_SCHEMA(
1535
1467
  .Output(
1536
1468
  1,
1537
1469
  "Indices",
1538
- "Tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] "
1470
+ "Tensor of shape [a_0, a_1, ..., a_{axis-1}, k, a_{axis+1}, ... a_{n-1}] "
1539
1471
  "containing the corresponding input tensor indices for the top K "
1540
1472
  "values.",
1541
1473
  "I",
@@ -2000,35 +1932,13 @@ ONNX_OPERATOR_SET_SCHEMA(
2000
1932
  "Constrain input and output types to all numeric tensors.")
2001
1933
  .TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));
2002
1934
 
2003
- static const char* QLinearMatMul_ver10_doc = R"DOC(
2004
- Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html.
2005
- It consumes two quantized input tensors, their scales and zero points, scale and zero point of output,
2006
- and computes the quantized output. The quantization formula is y = saturate((x / y_scale) + y_zero_point).
2007
- For (x / y_scale), it is rounding to nearest ties to even. Refer to https://en.wikipedia.org/wiki/Rounding for details.
2008
- Scale and zero point must have same shape. They must be either scalar (per tensor) or N-D tensor
2009
- (per row for 'a' and per column for 'b'). Scalar refers to per tensor quantization whereas N-D refers to per row
2010
- or per column quantization. If the input is 2D of shape [M, K] then zero point and scale tensor may be
2011
- an M element vector [v_1, v_2, ..., v_M] for per row quantization and K element vector of shape [v_1, v_2, ..., v_K]
2012
- for per column quantization. If the input is N-D tensor with shape [D1, D2, M, K] then zero point and scale tensor may
2013
- have shape [D1, D2, M, 1] for per row quantization and shape [D1, D2, 1, K] for per column quantization.
2014
- Production must never overflow, and accumulation may overflow if and only if in 32 bits.
2015
- )DOC";
2016
-
2017
1935
  ONNX_OPERATOR_SET_SCHEMA(
2018
1936
  QLinearMatMul,
2019
- 10,
1937
+ 21,
2020
1938
  OpSchema()
2021
- .SetDoc(QLinearMatMul_ver10_doc)
1939
+ .SetDoc(defs::math::utils::QLinearMatMulDoc())
2022
1940
  .Input(0, "a", "N-dimensional quantized matrix a", "T1", OpSchema::Single, true, 1, OpSchema::NonDifferentiable)
2023
- .Input(
2024
- 1,
2025
- "a_scale",
2026
- "scale of quantized input a",
2027
- "tensor(float)",
2028
- OpSchema::Single,
2029
- true,
2030
- 1,
2031
- OpSchema::NonDifferentiable)
1941
+ .Input(1, "a_scale", "scale of quantized input a", "TS", OpSchema::Single, true, 1, OpSchema::NonDifferentiable)
2032
1942
  .Input(
2033
1943
  2,
2034
1944
  "a_zero_point",
@@ -2039,15 +1949,7 @@ ONNX_OPERATOR_SET_SCHEMA(
2039
1949
  1,
2040
1950
  OpSchema::NonDifferentiable)
2041
1951
  .Input(3, "b", "N-dimensional quantized matrix b", "T2", OpSchema::Single, true, 1, OpSchema::NonDifferentiable)
2042
- .Input(
2043
- 4,
2044
- "b_scale",
2045
- "scale of quantized input b",
2046
- "tensor(float)",
2047
- OpSchema::Single,
2048
- true,
2049
- 1,
2050
- OpSchema::NonDifferentiable)
1952
+ .Input(4, "b_scale", "scale of quantized input b", "TS", OpSchema::Single, true, 1, OpSchema::NonDifferentiable)
2051
1953
  .Input(
2052
1954
  5,
2053
1955
  "b_zero_point",
@@ -2061,7 +1963,7 @@ ONNX_OPERATOR_SET_SCHEMA(
2061
1963
  6,
2062
1964
  "y_scale",
2063
1965
  "scale of quantized output y",
2064
- "tensor(float)",
1966
+ "TS",
2065
1967
  OpSchema::Single,
2066
1968
  true,
2067
1969
  1,
@@ -2084,43 +1986,35 @@ ONNX_OPERATOR_SET_SCHEMA(
2084
1986
  true,
2085
1987
  1,
2086
1988
  OpSchema::NonDifferentiable)
1989
+ .TypeConstraint("TS", {"tensor(float)", "tensor(float16)", "tensor(bfloat16)"}, "Constrain scales.")
2087
1990
  .TypeConstraint(
2088
1991
  "T1",
2089
- {"tensor(int8)", "tensor(uint8)"},
2090
- "Constrain input a and its zero point data type to 8-bit integer tensor.")
1992
+ {"tensor(int8)",
1993
+ "tensor(uint8)",
1994
+ "tensor(float8e4m3fn)",
1995
+ "tensor(float8e4m3fnuz)",
1996
+ "tensor(float8e5m2)",
1997
+ "tensor(float8e5m2fnuz)"},
1998
+ "The type of input a and its zeropoint.")
2091
1999
  .TypeConstraint(
2092
2000
  "T2",
2093
- {"tensor(int8)", "tensor(uint8)"},
2094
- "Constrain input b and its zero point data type to 8-bit integer tensor.")
2001
+ {"tensor(int8)",
2002
+ "tensor(uint8)",
2003
+ "tensor(float8e4m3fn)",
2004
+ "tensor(float8e4m3fnuz)",
2005
+ "tensor(float8e5m2)",
2006
+ "tensor(float8e5m2fnuz)"},
2007
+ "The type of input b and its zeropoint.")
2095
2008
  .TypeConstraint(
2096
2009
  "T3",
2097
- {"tensor(int8)", "tensor(uint8)"},
2098
- "Constrain output y and its zero point data type to 8-bit integer tensor.")
2099
- .TypeAndShapeInferenceFunction([](ONNX_NAMESPACE::InferenceContext& ctx) {
2100
- auto a_type = ctx.getInputType(0);
2101
- auto b_type = ctx.getInputType(3);
2102
- if (nullptr == a_type || nullptr == b_type ||
2103
- a_type->value_case() != ONNX_NAMESPACE::TypeProto::kTensorType ||
2104
- b_type->value_case() != ONNX_NAMESPACE::TypeProto::kTensorType) {
2105
- fail_type_inference("inputs are expected to have tensor type.");
2106
- }
2107
-
2108
- auto a_zero_point_type = ctx.getInputType(2);
2109
- if (nullptr == a_zero_point_type ||
2110
- a_zero_point_type->tensor_type().elem_type() != a_type->tensor_type().elem_type()) {
2111
- fail_type_inference("input and zero_point pair is expected to have be same type.");
2112
- }
2113
-
2114
- auto b_zero_point_type = ctx.getInputType(5);
2115
- if (nullptr == b_zero_point_type ||
2116
- b_zero_point_type->tensor_type().elem_type() != b_type->tensor_type().elem_type()) {
2117
- fail_type_inference("input and zero_point pair is expected to have same type.");
2118
- }
2119
-
2120
- propagateElemTypeFromInputToOutput(ctx, 7, 0);
2121
-
2122
- matmulShapeInference(ctx, 0, 3);
2123
- }));
2010
+ {"tensor(int8)",
2011
+ "tensor(uint8)",
2012
+ "tensor(float8e4m3fn)",
2013
+ "tensor(float8e4m3fnuz)",
2014
+ "tensor(float8e5m2)",
2015
+ "tensor(float8e5m2fnuz)"},
2016
+ "The type of the output and its zeropoint.")
2017
+ .TypeAndShapeInferenceFunction(defs::math::utils::QLinearMatMulShapeInference));
2124
2018
 
2125
2019
  static const char* MatMulInteger_ver10_doc = R"DOC(
2126
2020
  Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html.
@@ -2183,8 +2077,9 @@ ONNX_OPERATOR_SET_SCHEMA(
2183
2077
  // Right now we only support int32
2184
2078
  y_type->mutable_tensor_type()->set_elem_type(ONNX_NAMESPACE::TensorProto::INT32);
2185
2079
 
2186
- matmulShapeInference(ctx, 0, 1);
2080
+ defs::math::utils::MatMulShapeInference(ctx, 0, 1);
2187
2081
  }));
2082
+
2188
2083
  static const char* CumSum_ver14_doc = R"DOC(
2189
2084
  Performs cumulative sum of the input elements along the given axis.
2190
2085
  By default, it will do the sum inclusively meaning the first element is copied as is.