onnx2tf 1.29.12__tar.gz → 1.29.14__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (204) hide show
  1. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/PKG-INFO +4 -3
  2. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/README.md +2 -2
  3. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/__init__.py +1 -1
  4. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/onnx2tf.py +107 -0
  5. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/AveragePool.py +49 -0
  6. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Expand.py +12 -1
  7. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Flatten.py +106 -24
  8. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Slice.py +34 -2
  9. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/utils/common_functions.py +223 -0
  10. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/pyproject.toml +4 -1
  11. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/__main__.py +0 -0
  12. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Abs.py +0 -0
  13. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Acos.py +0 -0
  14. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Acosh.py +0 -0
  15. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Add.py +0 -0
  16. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/AffineGrid.py +0 -0
  17. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/And.py +0 -0
  18. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/ArgMax.py +0 -0
  19. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/ArgMin.py +0 -0
  20. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Asin.py +0 -0
  21. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Asinh.py +0 -0
  22. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Atan.py +0 -0
  23. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Atanh.py +0 -0
  24. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Attention.py +0 -0
  25. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/BatchNormalization.py +0 -0
  26. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Bernoulli.py +0 -0
  27. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/BitShift.py +0 -0
  28. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/BitwiseAnd.py +0 -0
  29. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/BitwiseNot.py +0 -0
  30. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/BitwiseOr.py +0 -0
  31. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/BitwiseXor.py +0 -0
  32. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/BlackmanWindow.py +0 -0
  33. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Cast.py +0 -0
  34. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Ceil.py +0 -0
  35. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Celu.py +0 -0
  36. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Clip.py +0 -0
  37. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Col2Im.py +0 -0
  38. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Compress.py +0 -0
  39. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Concat.py +0 -0
  40. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/ConcatFromSequence.py +0 -0
  41. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Constant.py +0 -0
  42. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/ConstantOfShape.py +0 -0
  43. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Conv.py +0 -0
  44. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/ConvInteger.py +0 -0
  45. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/ConvTranspose.py +0 -0
  46. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Cos.py +0 -0
  47. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Cosh.py +0 -0
  48. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/CumProd.py +0 -0
  49. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/CumSum.py +0 -0
  50. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/DepthToSpace.py +0 -0
  51. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/DequantizeLinear.py +0 -0
  52. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Det.py +0 -0
  53. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Div.py +0 -0
  54. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Dropout.py +0 -0
  55. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/DynamicQuantizeLinear.py +0 -0
  56. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Einsum.py +0 -0
  57. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Elu.py +0 -0
  58. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Equal.py +0 -0
  59. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Erf.py +0 -0
  60. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Exp.py +0 -0
  61. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/EyeLike.py +0 -0
  62. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Floor.py +0 -0
  63. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/FusedConv.py +0 -0
  64. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/GRU.py +0 -0
  65. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Gather.py +0 -0
  66. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/GatherElements.py +0 -0
  67. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/GatherND.py +0 -0
  68. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Gelu.py +0 -0
  69. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Gemm.py +0 -0
  70. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/GlobalAveragePool.py +0 -0
  71. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/GlobalLpPool.py +0 -0
  72. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/GlobalMaxPool.py +0 -0
  73. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Greater.py +0 -0
  74. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/GreaterOrEqual.py +0 -0
  75. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/GridSample.py +0 -0
  76. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/GroupNorm.py +0 -0
  77. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/HammingWindow.py +0 -0
  78. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/HannWindow.py +0 -0
  79. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/HardSigmoid.py +0 -0
  80. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/HardSwish.py +0 -0
  81. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Hardmax.py +0 -0
  82. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Identity.py +0 -0
  83. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/If.py +0 -0
  84. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Input.py +0 -0
  85. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/InstanceNormalization.py +0 -0
  86. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Inverse.py +0 -0
  87. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/IsInf.py +0 -0
  88. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/IsNaN.py +0 -0
  89. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/LRN.py +0 -0
  90. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/LSTM.py +0 -0
  91. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/LayerNormalization.py +0 -0
  92. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/LeakyRelu.py +0 -0
  93. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Less.py +0 -0
  94. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/LessOrEqual.py +0 -0
  95. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Log.py +0 -0
  96. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/LogSoftmax.py +0 -0
  97. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Loop.py +0 -0
  98. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/LpNormalization.py +0 -0
  99. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/LpPool.py +0 -0
  100. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/MatMul.py +0 -0
  101. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/MatMulInteger.py +0 -0
  102. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Max.py +0 -0
  103. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/MaxPool.py +0 -0
  104. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/MaxRoiPool.py +0 -0
  105. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/MaxUnpool.py +0 -0
  106. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Mean.py +0 -0
  107. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/MeanVarianceNormalization.py +0 -0
  108. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/MelWeightMatrix.py +0 -0
  109. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Min.py +0 -0
  110. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Mish.py +0 -0
  111. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Mod.py +0 -0
  112. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Mul.py +0 -0
  113. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Multinomial.py +0 -0
  114. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Neg.py +0 -0
  115. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/NonMaxSuppression.py +0 -0
  116. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/NonZero.py +0 -0
  117. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Not.py +0 -0
  118. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/OneHot.py +0 -0
  119. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/OptionalGetElement.py +0 -0
  120. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/OptionalHasElement.py +0 -0
  121. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Or.py +0 -0
  122. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/PRelu.py +0 -0
  123. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Pad.py +0 -0
  124. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Pow.py +0 -0
  125. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/QLinearAdd.py +0 -0
  126. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/QLinearConcat.py +0 -0
  127. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/QLinearConv.py +0 -0
  128. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/QLinearLeakyRelu.py +0 -0
  129. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/QLinearMatMul.py +0 -0
  130. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/QLinearMul.py +0 -0
  131. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/QLinearSigmoid.py +0 -0
  132. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/QLinearSoftmax.py +0 -0
  133. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/QuantizeLinear.py +0 -0
  134. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/RNN.py +0 -0
  135. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/RandomNormal.py +0 -0
  136. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/RandomNormalLike.py +0 -0
  137. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/RandomUniform.py +0 -0
  138. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/RandomUniformLike.py +0 -0
  139. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Range.py +0 -0
  140. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Reciprocal.py +0 -0
  141. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/ReduceL1.py +0 -0
  142. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/ReduceL2.py +0 -0
  143. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/ReduceLogSum.py +0 -0
  144. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/ReduceLogSumExp.py +0 -0
  145. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/ReduceMax.py +0 -0
  146. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/ReduceMean.py +0 -0
  147. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/ReduceMin.py +0 -0
  148. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/ReduceProd.py +0 -0
  149. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/ReduceSum.py +0 -0
  150. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/ReduceSumSquare.py +0 -0
  151. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Relu.py +0 -0
  152. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Reshape.py +0 -0
  153. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Resize.py +0 -0
  154. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/ReverseSequence.py +0 -0
  155. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/RoiAlign.py +0 -0
  156. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Round.py +0 -0
  157. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/STFT.py +0 -0
  158. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/ScaleAndTranslate.py +0 -0
  159. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Scatter.py +0 -0
  160. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/ScatterElements.py +0 -0
  161. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/ScatterND.py +0 -0
  162. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Selu.py +0 -0
  163. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/SequenceAt.py +0 -0
  164. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/SequenceConstruct.py +0 -0
  165. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/SequenceEmpty.py +0 -0
  166. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/SequenceErase.py +0 -0
  167. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/SequenceInsert.py +0 -0
  168. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/SequenceLength.py +0 -0
  169. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Shape.py +0 -0
  170. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Shrink.py +0 -0
  171. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Sigmoid.py +0 -0
  172. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Sign.py +0 -0
  173. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Sin.py +0 -0
  174. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Sinh.py +0 -0
  175. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Size.py +0 -0
  176. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Softmax.py +0 -0
  177. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Softplus.py +0 -0
  178. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Softsign.py +0 -0
  179. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/SpaceToDepth.py +0 -0
  180. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Split.py +0 -0
  181. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/SplitToSequence.py +0 -0
  182. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Sqrt.py +0 -0
  183. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Squeeze.py +0 -0
  184. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/StringNormalizer.py +0 -0
  185. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Sub.py +0 -0
  186. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Sum.py +0 -0
  187. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Tan.py +0 -0
  188. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Tanh.py +0 -0
  189. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/ThresholdedRelu.py +0 -0
  190. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Tile.py +0 -0
  191. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/TopK.py +0 -0
  192. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Transpose.py +0 -0
  193. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Trilu.py +0 -0
  194. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Unique.py +0 -0
  195. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Unsqueeze.py +0 -0
  196. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Upsample.py +0 -0
  197. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Where.py +0 -0
  198. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/Xor.py +0 -0
  199. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/ops/__init__.py +0 -0
  200. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/utils/__init__.py +0 -0
  201. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/utils/enums.py +0 -0
  202. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/utils/iterative_json_optimizer.py +0 -0
  203. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/utils/json_auto_generator.py +0 -0
  204. {onnx2tf-1.29.12 → onnx2tf-1.29.14}/onnx2tf/utils/logging.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: onnx2tf
3
- Version: 1.29.12
3
+ Version: 1.29.14
4
4
  Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
5
5
  Keywords: onnx,tensorflow,tflite,keras,deep-learning,machine-learning
6
6
  Author: Katsuya Hyodo
@@ -13,6 +13,7 @@ Classifier: License :: OSI Approved :: MIT License
13
13
  Classifier: Operating System :: POSIX :: Linux
14
14
  Classifier: Operating System :: Unix
15
15
  Classifier: Programming Language :: Python :: 3
16
+ Classifier: Programming Language :: Python :: 3.10
16
17
  Classifier: Programming Language :: Python :: 3.11
17
18
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
18
19
  Requires-Dist: requests==2.32.5
@@ -363,7 +364,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
363
364
  docker run --rm -it \
364
365
  -v `pwd`:/workdir \
365
366
  -w /workdir \
366
- ghcr.io/pinto0309/onnx2tf:1.29.12
367
+ ghcr.io/pinto0309/onnx2tf:1.29.14
367
368
 
368
369
  or
369
370
 
@@ -371,7 +372,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
371
372
  docker run --rm -it \
372
373
  -v `pwd`:/workdir \
373
374
  -w /workdir \
374
- docker.io/pinto0309/onnx2tf:1.29.12
375
+ docker.io/pinto0309/onnx2tf:1.29.14
375
376
 
376
377
  or
377
378
 
@@ -322,7 +322,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
322
322
  docker run --rm -it \
323
323
  -v `pwd`:/workdir \
324
324
  -w /workdir \
325
- ghcr.io/pinto0309/onnx2tf:1.29.12
325
+ ghcr.io/pinto0309/onnx2tf:1.29.14
326
326
 
327
327
  or
328
328
 
@@ -330,7 +330,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
330
330
  docker run --rm -it \
331
331
  -v `pwd`:/workdir \
332
332
  -w /workdir \
333
- docker.io/pinto0309/onnx2tf:1.29.12
333
+ docker.io/pinto0309/onnx2tf:1.29.14
334
334
 
335
335
  or
336
336
 
@@ -1,3 +1,3 @@
1
1
  from onnx2tf.onnx2tf import convert, main
2
2
 
3
- __version__ = '1.29.12'
3
+ __version__ = '1.29.14'
@@ -62,6 +62,73 @@ from onnx2tf.utils.enums import (
62
62
  from onnx2tf.utils.logging import *
63
63
  from sng4onnx import generate as op_name_auto_generate
64
64
 
65
+ def apply_nonzero_passthrough(
66
+ *,
67
+ graph: gs.Graph,
68
+ onnx_tensor_infos: Optional[Dict[str, np.ndarray]],
69
+ onnx_input_datas_for_validation: Optional[Dict[str, np.ndarray]] = None,
70
+ update_graph_shape: bool = False,
71
+ ) -> None:
72
+ if onnx_tensor_infos is None:
73
+ return
74
+ for graph_node in graph.nodes:
75
+ if graph_node.op != 'NonZero':
76
+ continue
77
+ if len(graph_node.inputs) == 0 or len(graph_node.outputs) == 0:
78
+ continue
79
+ nonzero_input = graph_node.inputs[0]
80
+ nonzero_output = graph_node.outputs[0]
81
+ passthrough_tensor = None
82
+ input_name = nonzero_input.name
83
+
84
+ if input_name in onnx_tensor_infos:
85
+ passthrough_tensor = onnx_tensor_infos[input_name]
86
+ elif onnx_input_datas_for_validation and input_name in onnx_input_datas_for_validation:
87
+ passthrough_tensor = onnx_input_datas_for_validation[input_name]
88
+ elif hasattr(nonzero_input, 'values'):
89
+ passthrough_tensor = nonzero_input.values
90
+
91
+ if passthrough_tensor is not None:
92
+ onnx_tensor_infos[nonzero_output.name] = passthrough_tensor
93
+ if update_graph_shape and hasattr(passthrough_tensor, 'shape'):
94
+ nonzero_output.shape = list(passthrough_tensor.shape)
95
+
96
+ def apply_nonzero_passthrough_tf(
97
+ *,
98
+ graph: gs.Graph,
99
+ tf_layers_dict: Dict[str, Any],
100
+ tf_tensor_infos: Optional[Dict[str, np.ndarray]],
101
+ tf_input_datas_for_validation: Optional[Dict[str, np.ndarray]] = None,
102
+ ) -> None:
103
+ if tf_tensor_infos is None:
104
+ return
105
+ for graph_node in graph.nodes:
106
+ if graph_node.op != 'NonZero':
107
+ continue
108
+ if len(graph_node.inputs) == 0 or len(graph_node.outputs) == 0:
109
+ continue
110
+ input_name = graph_node.inputs[0].name
111
+ output_name = graph_node.outputs[0].name
112
+ input_info = tf_layers_dict.get(input_name)
113
+ output_info = tf_layers_dict.get(output_name)
114
+ if input_info is None or output_info is None:
115
+ continue
116
+ input_tf_node = input_info.get('tf_node')
117
+ output_tf_node = output_info.get('tf_node')
118
+ if input_tf_node is None or output_tf_node is None:
119
+ continue
120
+ input_tf_name = input_tf_node.name
121
+ output_tf_name = output_tf_node.name
122
+ passthrough_tensor = None
123
+
124
+ if input_tf_name in tf_tensor_infos:
125
+ passthrough_tensor = tf_tensor_infos[input_tf_name]
126
+ elif tf_input_datas_for_validation and input_tf_name in tf_input_datas_for_validation:
127
+ passthrough_tensor = tf_input_datas_for_validation[input_tf_name]
128
+
129
+ if passthrough_tensor is not None:
130
+ tf_tensor_infos[output_tf_name] = passthrough_tensor
131
+
65
132
  def convert(
66
133
  input_onnx_file_path: Optional[str] = '',
67
134
  onnx_graph: Optional[onnx.ModelProto] = None,
@@ -1113,6 +1180,7 @@ def convert(
1113
1180
  # Used to verify the output error of each OP in the TensorFlow model.
1114
1181
  full_ops_output_names = []
1115
1182
  onnx_tensor_infos_for_validation = None
1183
+ onnx_input_datas_for_validation = {}
1116
1184
  for graph_node in graph.nodes:
1117
1185
  full_ops_output_names_sub = []
1118
1186
  for graph_node_output in graph_node.outputs:
@@ -1132,6 +1200,7 @@ def convert(
1132
1200
  enable_ort_output_memmap=onnxruntime_output_memmap,
1133
1201
  ort_output_memmap_dir=onnxruntime_output_memmap_dir,
1134
1202
  shape_hints=shape_hints if (check_onnx_tf_outputs_elementwise_close or check_onnx_tf_outputs_elementwise_close_full) else None,
1203
+ input_datas_for_validation=onnx_input_datas_for_validation,
1135
1204
  )
1136
1205
  """
1137
1206
  onnx_tensor_infos_for_validation:
@@ -1148,12 +1217,20 @@ def convert(
1148
1217
  in zip(full_ops_output_names, onnx_outputs_for_validation)
1149
1218
  }
1150
1219
  del onnx_outputs_for_validation
1220
+
1221
+ apply_nonzero_passthrough(
1222
+ graph=graph,
1223
+ onnx_tensor_infos=onnx_tensor_infos_for_validation,
1224
+ onnx_input_datas_for_validation=onnx_input_datas_for_validation,
1225
+ update_graph_shape=True,
1226
+ )
1151
1227
  except Exception as ex:
1152
1228
  warn(
1153
1229
  f'The optimization process for shape estimation is skipped ' +
1154
1230
  f'because it contains OPs that cannot be inferred by the standard onnxruntime.'
1155
1231
  )
1156
1232
  warn(f'{ex}')
1233
+ onnx_input_datas_for_validation = None
1157
1234
  additional_parameters['onnx_tensor_infos_for_validation'] = onnx_tensor_infos_for_validation
1158
1235
  additional_parameters['test_data_nhwc'] = test_data_nhwc
1159
1236
  additional_parameters['custom_input_op_name_np_data_path'] = custom_input_op_name_np_data_path
@@ -2061,6 +2138,7 @@ def convert(
2061
2138
  dummy_onnx_outputs = None
2062
2139
  try:
2063
2140
  # ONNX dummy inference
2141
+ onnx_input_datas_for_validation = {}
2064
2142
  dummy_onnx_outputs: List[np.ndarray] = \
2065
2143
  dummy_onnx_inference(
2066
2144
  onnx_graph=onnx_graph,
@@ -2072,6 +2150,7 @@ def convert(
2072
2150
  enable_ort_output_memmap=onnxruntime_output_memmap,
2073
2151
  ort_output_memmap_dir=onnxruntime_output_memmap_dir,
2074
2152
  shape_hints=shape_hints,
2153
+ input_datas_for_validation=onnx_input_datas_for_validation,
2075
2154
  )
2076
2155
  except Exception as ex:
2077
2156
  warn(
@@ -2081,6 +2160,7 @@ def convert(
2081
2160
  warn(f'{ex}')
2082
2161
  else:
2083
2162
  # TF dummy inference
2163
+ tf_input_datas_for_validation = {}
2084
2164
  tf_tensor_infos: Dict[Any] = \
2085
2165
  dummy_tf_inference(
2086
2166
  model=model,
@@ -2088,6 +2168,7 @@ def convert(
2088
2168
  test_data_nhwc=test_data_nhwc,
2089
2169
  custom_input_op_name_np_data_path=custom_input_op_name_np_data_path,
2090
2170
  shape_hints=shape_hints,
2171
+ input_datas_for_validation=tf_input_datas_for_validation,
2091
2172
  keep_shape_absolutely_input_names=keep_shape_absolutely_input_names,
2092
2173
  keep_ncw_or_nchw_or_ncdhw_input_names=keep_ncw_or_nchw_or_ncdhw_input_names,
2093
2174
  keep_nwc_or_nhwc_or_ndhwc_input_names=keep_nwc_or_nhwc_or_ndhwc_input_names,
@@ -2097,6 +2178,17 @@ def convert(
2097
2178
  output_name: dummy_onnx_output \
2098
2179
  for output_name, dummy_onnx_output in zip(ops_output_names, dummy_onnx_outputs)
2099
2180
  }
2181
+ apply_nonzero_passthrough(
2182
+ graph=graph,
2183
+ onnx_tensor_infos=onnx_tensor_infos,
2184
+ onnx_input_datas_for_validation=onnx_input_datas_for_validation,
2185
+ )
2186
+ apply_nonzero_passthrough_tf(
2187
+ graph=graph,
2188
+ tf_layers_dict=tf_layers_dict,
2189
+ tf_tensor_infos=tf_tensor_infos,
2190
+ tf_input_datas_for_validation=tf_input_datas_for_validation,
2191
+ )
2100
2192
  """
2101
2193
  np.allclose(
2102
2194
  dummy_onnx_outputs,
@@ -2326,6 +2418,7 @@ def convert(
2326
2418
  # Initial accuracy check
2327
2419
  try:
2328
2420
  # ONNX dummy inference
2421
+ onnx_input_datas_for_validation = {}
2329
2422
  dummy_onnx_outputs: List[np.ndarray] = \
2330
2423
  dummy_onnx_inference(
2331
2424
  onnx_graph=onnx_graph,
@@ -2337,9 +2430,11 @@ def convert(
2337
2430
  enable_ort_output_memmap=onnxruntime_output_memmap,
2338
2431
  ort_output_memmap_dir=onnxruntime_output_memmap_dir,
2339
2432
  shape_hints=shape_hints,
2433
+ input_datas_for_validation=onnx_input_datas_for_validation,
2340
2434
  )
2341
2435
 
2342
2436
  # TF dummy inference
2437
+ tf_input_datas_for_validation = {}
2343
2438
  tf_tensor_infos: Dict[Any] = \
2344
2439
  dummy_tf_inference(
2345
2440
  model=validation_model,
@@ -2347,6 +2442,7 @@ def convert(
2347
2442
  test_data_nhwc=test_data_nhwc,
2348
2443
  custom_input_op_name_np_data_path=custom_input_op_name_np_data_path,
2349
2444
  shape_hints=shape_hints,
2445
+ input_datas_for_validation=tf_input_datas_for_validation,
2350
2446
  keep_shape_absolutely_input_names=keep_shape_absolutely_input_names,
2351
2447
  keep_ncw_or_nchw_or_ncdhw_input_names=keep_ncw_or_nchw_or_ncdhw_input_names,
2352
2448
  keep_nwc_or_nhwc_or_ndhwc_input_names=keep_nwc_or_nhwc_or_ndhwc_input_names,
@@ -2357,6 +2453,17 @@ def convert(
2357
2453
  output_name: dummy_onnx_output \
2358
2454
  for output_name, dummy_onnx_output in zip(ops_output_names, dummy_onnx_outputs)
2359
2455
  }
2456
+ apply_nonzero_passthrough(
2457
+ graph=graph,
2458
+ onnx_tensor_infos=onnx_tensor_infos,
2459
+ onnx_input_datas_for_validation=onnx_input_datas_for_validation,
2460
+ )
2461
+ apply_nonzero_passthrough_tf(
2462
+ graph=graph,
2463
+ tf_layers_dict=tf_layers_dict,
2464
+ tf_tensor_infos=tf_tensor_infos,
2465
+ tf_input_datas_for_validation=tf_input_datas_for_validation,
2466
+ )
2360
2467
 
2361
2468
  input_names = [k.name for k in inputs]
2362
2469
  for k, v in tf_layers_dict.items():
@@ -370,6 +370,12 @@ def make_node(
370
370
  paddings=tf_pads,
371
371
  mode='CONSTANT',
372
372
  )
373
+ if input_tensor_shape is not None and len(input_tensor_shape) == spatial_size + 2:
374
+ # Preserve known batch/channel dims since dynamic paddings erase shape info.
375
+ padded_tensor = tf.ensure_shape(
376
+ padded_tensor,
377
+ [input_tensor_shape[0]] + [None] * spatial_size + [input_tensor_shape[-1]],
378
+ )
373
379
  else:
374
380
  if auto_pad == 'SAME_LOWER':
375
381
  # switch the order of pads
@@ -468,6 +474,49 @@ def make_node(
468
474
  print(error_msg)
469
475
  raise AssertionError(error_msg)
470
476
 
477
+ # Dynamic shape compensation for count_include_pad=False with explicit padding.
478
+ # Use pooled mask to compute valid element counts per window.
479
+ if not is_known_shape and is_explicit_padding and not count_include_pad:
480
+ mask = tf.ones_like(input_tensor, dtype=pooled_tensor.dtype)
481
+ if tf_pads is not None:
482
+ if tf.is_tensor(tf_pads):
483
+ mask = tf.pad(
484
+ tensor=mask,
485
+ paddings=tf_pads,
486
+ mode='CONSTANT',
487
+ )
488
+ elif tf_pads != [0] * spatial_size * 2:
489
+ mask = tf.pad(
490
+ tensor=mask,
491
+ paddings=tf_pads,
492
+ mode='CONSTANT',
493
+ )
494
+ if len(kernel_shape) == 1:
495
+ mask_pooled = AveragePooling1D(
496
+ pool_size=kernel_shape,
497
+ strides=strides,
498
+ padding=tf_pad_mode.upper(),
499
+ )(mask)
500
+ elif len(kernel_shape) == 2:
501
+ mask_pooled = AveragePooling2D(
502
+ pool_size=kernel_shape,
503
+ strides=strides,
504
+ padding=tf_pad_mode.upper(),
505
+ )(mask)
506
+ else:
507
+ mask_pooled = AveragePooling3D(
508
+ pool_size=kernel_shape,
509
+ strides=strides,
510
+ padding=tf_pad_mode.upper(),
511
+ )(mask)
512
+ kernel_volume = float(np.prod(kernel_shape))
513
+ count_valid = mask_pooled * tf.cast(kernel_volume, dtype=mask_pooled.dtype)
514
+ multiplier = tf.math.divide_no_nan(
515
+ tf.cast(kernel_volume, dtype=mask_pooled.dtype),
516
+ count_valid,
517
+ )
518
+ pooled_tensor = pooled_tensor * multiplier
519
+
471
520
  # tensorflow average pooling needs extra process to get same output with onnx
472
521
  # https://github.com/PINTO0309/onnx2tf/issues/124
473
522
  if average_multiplier is not None:
@@ -48,6 +48,7 @@ def make_node(
48
48
  tf_layers_dict.get(graph_node.inputs[0].name, {}).get('before_op_output_shape_trans', True)
49
49
  before_op_output_shape_trans_2 = \
50
50
  tf_layers_dict.get(graph_node.inputs[1].name, {}).get('before_op_output_shape_trans', True)
51
+ # Data layout follows input[0]; shape vector (input[1]) should align to it.
51
52
  before_op_output_shape_trans = \
52
53
  before_op_output_shape_trans_1 \
53
54
  and before_op_output_shape_trans_2
@@ -58,7 +59,7 @@ def make_node(
58
59
  )
59
60
  graph_node_input_2 = get_constant_or_variable(
60
61
  graph_node.inputs[1],
61
- before_op_output_shape_trans,
62
+ before_op_output_shape_trans_1,
62
63
  )
63
64
  graph_node_output: gs.Variable = graph_node.outputs[0]
64
65
  shape = graph_node_output.shape
@@ -106,6 +107,16 @@ def make_node(
106
107
  **kwargs,
107
108
  )
108
109
 
110
+ # If shape is dynamic (Tensor) and input was transposed to NHWC/NWC/NDHWC,
111
+ # align the shape vector order to TensorFlow's layout.
112
+ if before_op_output_shape_trans_1 \
113
+ and tf.is_tensor(input_tensor_shape) \
114
+ and input_tensor_rank > 2:
115
+ shape_rank = input_tensor_shape.shape.rank
116
+ if shape_rank == 1 or shape_rank is None:
117
+ perm = [0] + list(range(2, input_tensor_rank)) + [1]
118
+ input_tensor_shape = tf.gather(input_tensor_shape, perm)
119
+
109
120
  tf_type = None
110
121
  if \
111
122
  (
@@ -1,6 +1,7 @@
1
1
  import random
2
2
  random.seed(0)
3
3
  import numpy as np
4
+ import itertools
4
5
  np.random.seed(0)
5
6
  import tensorflow as tf
6
7
  import tf_keras
@@ -13,6 +14,8 @@ from onnx2tf.utils.common_functions import (
13
14
  print_node_info,
14
15
  inverted_operation_enable_disable,
15
16
  make_tf_node_info,
17
+ dummy_tf_inference,
18
+ get_tf_model_inputs,
16
19
  pre_process_transpose,
17
20
  post_process_transpose,
18
21
  transpose_with_flexing_deterrence,
@@ -84,6 +87,109 @@ def make_node(
84
87
  **kwargs,
85
88
  )
86
89
 
90
+ # Param replacement
91
+ input_tensor = replace_parameter(
92
+ value_before_replacement=input_tensor,
93
+ param_target='inputs',
94
+ param_name=graph_node.inputs[0].name,
95
+ **kwargs,
96
+ )
97
+
98
+ # Pre-process transpose
99
+ input_tensor = pre_process_transpose(
100
+ value_before_transpose=input_tensor,
101
+ param_target='inputs',
102
+ param_name=graph_node.inputs[0].name,
103
+ **kwargs,
104
+ )
105
+
106
+ perm = [
107
+ convert_axis(
108
+ axis=idx,
109
+ tensor_rank=input_tensor_rank,
110
+ before_op_output_shape_trans=before_op_output_shape_trans,
111
+ ) for idx in range(input_tensor_rank)
112
+ ]
113
+
114
+ # Brute-force transpose to match ONNX dummy inference outputs when available.
115
+ onnx_tensor_infos_for_validation = kwargs.get('onnx_tensor_infos_for_validation', None)
116
+ test_data_nhwc: np.ndarray = kwargs.get('test_data_nhwc', None)
117
+ custom_input_op_name_np_data_path: str = kwargs.get('custom_input_op_name_np_data_path', None)
118
+ disable_strict_mode: bool = kwargs.get('disable_strict_mode', False)
119
+ if not disable_strict_mode \
120
+ and onnx_tensor_infos_for_validation is not None \
121
+ and onnx_tensor_infos_for_validation.get(graph_node_output.name, None) is not None:
122
+ validation_input = None
123
+ if isinstance(input_tensor, np.ndarray):
124
+ validation_input = input_tensor
125
+ elif hasattr(input_tensor, 'numpy'):
126
+ try:
127
+ validation_input = input_tensor.numpy()
128
+ except Exception:
129
+ validation_input = None
130
+ else:
131
+ try:
132
+ tf_model_inputs = get_tf_model_inputs(tf_layers_dict=tf_layers_dict)
133
+ val_model = tf_keras.Model(
134
+ inputs=tf_model_inputs,
135
+ outputs=[input_tensor],
136
+ )
137
+ tf_pre_tensor_infos = dummy_tf_inference(
138
+ model=val_model,
139
+ inputs=tf_model_inputs,
140
+ test_data_nhwc=test_data_nhwc,
141
+ custom_input_op_name_np_data_path=custom_input_op_name_np_data_path,
142
+ )
143
+ if len(tf_pre_tensor_infos) >= 1:
144
+ validation_input = list(tf_pre_tensor_infos.values())[0]
145
+ del val_model
146
+ except Exception:
147
+ validation_input = None
148
+ if validation_input is None:
149
+ onnx_input_name = graph_node.inputs[0].name
150
+ if onnx_tensor_infos_for_validation.get(onnx_input_name, None) is not None:
151
+ validation_input = onnx_tensor_infos_for_validation[onnx_input_name]
152
+
153
+ onnx_output = onnx_tensor_infos_for_validation.get(graph_node_output.name, None)
154
+ if validation_input is not None and onnx_output is not None:
155
+ rank = len(validation_input.shape)
156
+ if rank <= 6:
157
+ perm_candidates = itertools.permutations(range(rank))
158
+ else:
159
+ perm_candidates = [perm]
160
+
161
+ def _flatten_np(arr, axis):
162
+ if axis == 0:
163
+ return arr.reshape(1, -1)
164
+ if axis >= arr.ndim:
165
+ return arr.reshape(-1, 1)
166
+ return arr.reshape(
167
+ int(np.prod(arr.shape[:axis])),
168
+ int(np.prod(arr.shape[axis:])),
169
+ )
170
+
171
+ matched_perm = None
172
+ matched_axis = None
173
+ for cand in perm_candidates:
174
+ try:
175
+ cand_arr = np.transpose(validation_input, cand)
176
+ for axis_candidate in range(0, rank + 1):
177
+ cand_flat = _flatten_np(cand_arr, axis_candidate)
178
+ if cand_flat.shape != onnx_output.shape:
179
+ continue
180
+ if np.allclose(cand_flat, onnx_output, rtol=0.0, atol=0.0, equal_nan=True):
181
+ matched_perm = list(cand)
182
+ matched_axis = axis_candidate
183
+ break
184
+ if matched_perm is not None:
185
+ break
186
+ except Exception:
187
+ continue
188
+ if matched_perm is not None:
189
+ perm = matched_perm
190
+ if matched_axis is not None:
191
+ axis = matched_axis
192
+
87
193
  # Generation of TF OP
88
194
  cal_shape = None
89
195
  if axis == 0:
@@ -134,30 +240,6 @@ def make_node(
134
240
  has_str_outputshape = True in [True for dim in output_shape if isinstance(dim, str)]
135
241
  has_undefined_outputshape = has_none_outputshape or has_str_outputshape
136
242
  cal_shape = cal_shape if has_undefined_outputshape else output_shape
137
-
138
- # Param replacement
139
- input_tensor = replace_parameter(
140
- value_before_replacement=input_tensor,
141
- param_target='inputs',
142
- param_name=graph_node.inputs[0].name,
143
- **kwargs,
144
- )
145
-
146
- # Pre-process transpose
147
- input_tensor = pre_process_transpose(
148
- value_before_transpose=input_tensor,
149
- param_target='inputs',
150
- param_name=graph_node.inputs[0].name,
151
- **kwargs,
152
- )
153
-
154
- perm = [
155
- convert_axis(
156
- axis=idx,
157
- tensor_rank=input_tensor_rank,
158
- before_op_output_shape_trans=before_op_output_shape_trans,
159
- ) for idx in range(input_tensor_rank)
160
- ]
161
243
  input_tensor = transpose_with_flexing_deterrence(
162
244
  input_tensor=input_tensor,
163
245
  perm=list(perm) if perm is not None else None,
@@ -434,7 +434,23 @@ def make_node(
434
434
  dtype=tf.int32,
435
435
  )
436
436
  if hasattr(begin_mask_, '_inferred_value') and begin_mask_._inferred_value == [None]:
437
- begin_mask_ = 0
437
+ axes_list = None
438
+ if axes is not None:
439
+ if isinstance(axes, (list, tuple)):
440
+ axes_list = list(axes)
441
+ elif isinstance(axes, np.ndarray):
442
+ axes_list = axes.tolist() if axes.ndim > 0 else [int(axes)]
443
+ elif tf.is_tensor(axes):
444
+ if hasattr(axes, 'numpy'):
445
+ axes_list = axes.numpy().tolist()
446
+ elif hasattr(axes, '_inferred_value') and axes._inferred_value not in (None, [None]):
447
+ axes_list = list(axes._inferred_value)
448
+ if axes_list is not None:
449
+ begin_mask_ = sum(
450
+ 1 << axis for axis in range(input_tensor_rank) if axis not in axes_list
451
+ )
452
+ else:
453
+ begin_mask_ = 0
438
454
 
439
455
  ##### end_mask
440
456
  end_bit_mask = tf.constant([2**idx for idx in range(input_tensor_rank)], dtype=tf.int32)
@@ -446,7 +462,23 @@ def make_node(
446
462
  dtype=tf.int32,
447
463
  )
448
464
  if hasattr(end_mask_, '_inferred_value') and end_mask_._inferred_value == [None]:
449
- end_mask_ = 0
465
+ axes_list = None
466
+ if axes is not None:
467
+ if isinstance(axes, (list, tuple)):
468
+ axes_list = list(axes)
469
+ elif isinstance(axes, np.ndarray):
470
+ axes_list = axes.tolist() if axes.ndim > 0 else [int(axes)]
471
+ elif tf.is_tensor(axes):
472
+ if hasattr(axes, 'numpy'):
473
+ axes_list = axes.numpy().tolist()
474
+ elif hasattr(axes, '_inferred_value') and axes._inferred_value not in (None, [None]):
475
+ axes_list = list(axes._inferred_value)
476
+ if axes_list is not None:
477
+ end_mask_ = sum(
478
+ 1 << axis for axis in range(input_tensor_rank) if axis not in axes_list
479
+ )
480
+ else:
481
+ end_mask_ = 0
450
482
 
451
483
  # strided_slice
452
484
  tf_layers_dict[graph_node_output.name]['tf_node'] = \