onnx2tf 1.27.6__tar.gz → 1.27.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (202) hide show
  1. {onnx2tf-1.27.6/onnx2tf.egg-info → onnx2tf-1.27.8}/PKG-INFO +3 -3
  2. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/README.md +2 -2
  3. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/__init__.py +1 -1
  4. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/onnx2tf.py +5 -3
  5. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/AveragePool.py +85 -24
  6. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/utils/common_functions.py +11 -6
  7. {onnx2tf-1.27.6 → onnx2tf-1.27.8/onnx2tf.egg-info}/PKG-INFO +3 -3
  8. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/LICENSE +0 -0
  9. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/LICENSE_onnx-tensorflow +0 -0
  10. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/__main__.py +0 -0
  11. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Abs.py +0 -0
  12. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Acos.py +0 -0
  13. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Acosh.py +0 -0
  14. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Add.py +0 -0
  15. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/And.py +0 -0
  16. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/ArgMax.py +0 -0
  17. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/ArgMin.py +0 -0
  18. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Asin.py +0 -0
  19. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Asinh.py +0 -0
  20. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Atan.py +0 -0
  21. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Atanh.py +0 -0
  22. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/BatchNormalization.py +0 -0
  23. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Bernoulli.py +0 -0
  24. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/BitShift.py +0 -0
  25. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Cast.py +0 -0
  26. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Ceil.py +0 -0
  27. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Celu.py +0 -0
  28. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Clip.py +0 -0
  29. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Col2Im.py +0 -0
  30. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Compress.py +0 -0
  31. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Concat.py +0 -0
  32. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/ConcatFromSequence.py +0 -0
  33. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Constant.py +0 -0
  34. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/ConstantOfShape.py +0 -0
  35. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Conv.py +0 -0
  36. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/ConvInteger.py +0 -0
  37. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/ConvTranspose.py +0 -0
  38. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Cos.py +0 -0
  39. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Cosh.py +0 -0
  40. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/CumSum.py +0 -0
  41. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/DepthToSpace.py +0 -0
  42. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/DequantizeLinear.py +0 -0
  43. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Det.py +0 -0
  44. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Div.py +0 -0
  45. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Dropout.py +0 -0
  46. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/DynamicQuantizeLinear.py +0 -0
  47. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Einsum.py +0 -0
  48. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Elu.py +0 -0
  49. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Equal.py +0 -0
  50. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Erf.py +0 -0
  51. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Exp.py +0 -0
  52. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Expand.py +0 -0
  53. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/EyeLike.py +0 -0
  54. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Flatten.py +0 -0
  55. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Floor.py +0 -0
  56. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/FusedConv.py +0 -0
  57. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/GRU.py +0 -0
  58. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Gather.py +0 -0
  59. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/GatherElements.py +0 -0
  60. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/GatherND.py +0 -0
  61. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Gelu.py +0 -0
  62. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Gemm.py +0 -0
  63. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/GlobalAveragePool.py +0 -0
  64. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/GlobalLpPool.py +0 -0
  65. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/GlobalMaxPool.py +0 -0
  66. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Greater.py +0 -0
  67. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/GreaterOrEqual.py +0 -0
  68. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/GridSample.py +0 -0
  69. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/GroupNorm.py +0 -0
  70. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/HammingWindow.py +0 -0
  71. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/HannWindow.py +0 -0
  72. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/HardSigmoid.py +0 -0
  73. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/HardSwish.py +0 -0
  74. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Hardmax.py +0 -0
  75. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Identity.py +0 -0
  76. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/If.py +0 -0
  77. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Input.py +0 -0
  78. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/InstanceNormalization.py +0 -0
  79. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Inverse.py +0 -0
  80. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/IsInf.py +0 -0
  81. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/IsNaN.py +0 -0
  82. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/LRN.py +0 -0
  83. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/LSTM.py +0 -0
  84. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/LayerNormalization.py +0 -0
  85. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/LeakyRelu.py +0 -0
  86. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Less.py +0 -0
  87. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/LessOrEqual.py +0 -0
  88. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Log.py +0 -0
  89. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/LogSoftmax.py +0 -0
  90. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/LpNormalization.py +0 -0
  91. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/MatMul.py +0 -0
  92. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/MatMulInteger.py +0 -0
  93. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Max.py +0 -0
  94. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/MaxPool.py +0 -0
  95. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/MaxUnpool.py +0 -0
  96. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Mean.py +0 -0
  97. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/MeanVarianceNormalization.py +0 -0
  98. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/MelWeightMatrix.py +0 -0
  99. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Min.py +0 -0
  100. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Mish.py +0 -0
  101. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Mod.py +0 -0
  102. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Mul.py +0 -0
  103. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Multinomial.py +0 -0
  104. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Neg.py +0 -0
  105. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/NonMaxSuppression.py +0 -0
  106. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/NonZero.py +0 -0
  107. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Not.py +0 -0
  108. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/OneHot.py +0 -0
  109. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/OptionalGetElement.py +0 -0
  110. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/OptionalHasElement.py +0 -0
  111. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Or.py +0 -0
  112. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/PRelu.py +0 -0
  113. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Pad.py +0 -0
  114. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Pow.py +0 -0
  115. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/QLinearAdd.py +0 -0
  116. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/QLinearConcat.py +0 -0
  117. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/QLinearConv.py +0 -0
  118. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/QLinearLeakyRelu.py +0 -0
  119. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/QLinearMatMul.py +0 -0
  120. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/QLinearMul.py +0 -0
  121. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/QLinearSigmoid.py +0 -0
  122. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/QLinearSoftmax.py +0 -0
  123. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/QuantizeLinear.py +0 -0
  124. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/RNN.py +0 -0
  125. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/RandomNormal.py +0 -0
  126. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/RandomNormalLike.py +0 -0
  127. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/RandomUniform.py +0 -0
  128. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/RandomUniformLike.py +0 -0
  129. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Range.py +0 -0
  130. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Reciprocal.py +0 -0
  131. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/ReduceL1.py +0 -0
  132. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/ReduceL2.py +0 -0
  133. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/ReduceLogSum.py +0 -0
  134. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/ReduceLogSumExp.py +0 -0
  135. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/ReduceMax.py +0 -0
  136. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/ReduceMean.py +0 -0
  137. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/ReduceMin.py +0 -0
  138. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/ReduceProd.py +0 -0
  139. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/ReduceSum.py +0 -0
  140. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/ReduceSumSquare.py +0 -0
  141. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Relu.py +0 -0
  142. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Reshape.py +0 -0
  143. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Resize.py +0 -0
  144. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/ReverseSequence.py +0 -0
  145. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/RoiAlign.py +0 -0
  146. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Round.py +0 -0
  147. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/STFT.py +0 -0
  148. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/ScaleAndTranslate.py +0 -0
  149. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Scatter.py +0 -0
  150. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/ScatterElements.py +0 -0
  151. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/ScatterND.py +0 -0
  152. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Selu.py +0 -0
  153. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/SequenceAt.py +0 -0
  154. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/SequenceConstruct.py +0 -0
  155. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/SequenceEmpty.py +0 -0
  156. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/SequenceErase.py +0 -0
  157. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/SequenceInsert.py +0 -0
  158. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/SequenceLength.py +0 -0
  159. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Shape.py +0 -0
  160. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Shrink.py +0 -0
  161. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Sigmoid.py +0 -0
  162. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Sign.py +0 -0
  163. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Sin.py +0 -0
  164. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Sinh.py +0 -0
  165. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Size.py +0 -0
  166. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Slice.py +0 -0
  167. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Softmax.py +0 -0
  168. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Softplus.py +0 -0
  169. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Softsign.py +0 -0
  170. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/SpaceToDepth.py +0 -0
  171. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Split.py +0 -0
  172. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/SplitToSequence.py +0 -0
  173. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Sqrt.py +0 -0
  174. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Squeeze.py +0 -0
  175. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/StringNormalizer.py +0 -0
  176. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Sub.py +0 -0
  177. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Sum.py +0 -0
  178. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Tan.py +0 -0
  179. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Tanh.py +0 -0
  180. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/ThresholdedRelu.py +0 -0
  181. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Tile.py +0 -0
  182. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/TopK.py +0 -0
  183. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Transpose.py +0 -0
  184. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Trilu.py +0 -0
  185. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Unique.py +0 -0
  186. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Unsqueeze.py +0 -0
  187. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Upsample.py +0 -0
  188. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Where.py +0 -0
  189. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/Xor.py +0 -0
  190. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/_Loop.py +0 -0
  191. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/__Loop.py +0 -0
  192. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/ops/__init__.py +0 -0
  193. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/utils/__init__.py +0 -0
  194. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/utils/enums.py +0 -0
  195. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf/utils/logging.py +0 -0
  196. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf.egg-info/SOURCES.txt +0 -0
  197. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf.egg-info/dependency_links.txt +0 -0
  198. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf.egg-info/entry_points.txt +0 -0
  199. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/onnx2tf.egg-info/top_level.txt +0 -0
  200. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/setup.cfg +0 -0
  201. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/setup.py +0 -0
  202. {onnx2tf-1.27.6 → onnx2tf-1.27.8}/tests/test_model_convert.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: onnx2tf
3
- Version: 1.27.6
3
+ Version: 1.27.8
4
4
  Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
5
5
  Home-page: https://github.com/PINTO0309/onnx2tf
6
6
  Author: Katsuya Hyodo
@@ -334,7 +334,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
334
334
  docker run --rm -it \
335
335
  -v `pwd`:/workdir \
336
336
  -w /workdir \
337
- ghcr.io/pinto0309/onnx2tf:1.27.6
337
+ ghcr.io/pinto0309/onnx2tf:1.27.8
338
338
 
339
339
  or
340
340
 
@@ -342,7 +342,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
342
342
  docker run --rm -it \
343
343
  -v `pwd`:/workdir \
344
344
  -w /workdir \
345
- docker.io/pinto0309/onnx2tf:1.27.6
345
+ docker.io/pinto0309/onnx2tf:1.27.8
346
346
 
347
347
  or
348
348
 
@@ -309,7 +309,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
309
309
  docker run --rm -it \
310
310
  -v `pwd`:/workdir \
311
311
  -w /workdir \
312
- ghcr.io/pinto0309/onnx2tf:1.27.6
312
+ ghcr.io/pinto0309/onnx2tf:1.27.8
313
313
 
314
314
  or
315
315
 
@@ -317,7 +317,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
317
317
  docker run --rm -it \
318
318
  -v `pwd`:/workdir \
319
319
  -w /workdir \
320
- docker.io/pinto0309/onnx2tf:1.27.6
320
+ docker.io/pinto0309/onnx2tf:1.27.8
321
321
 
322
322
  or
323
323
 
@@ -1,3 +1,3 @@
1
1
  from onnx2tf.onnx2tf import convert, main
2
2
 
3
- __version__ = '1.27.6'
3
+ __version__ = '1.27.8'
@@ -1947,6 +1947,8 @@ def convert(
1947
1947
  validated_onnx_tensor: np.ndarray = checked_value[0]
1948
1948
  matched_flg: int = checked_value[1]
1949
1949
  max_abs_err: Any = checked_value[2]
1950
+ onnx_shape_tf_shape: str = checked_value[3]
1951
+
1950
1952
  message = ''
1951
1953
  if matched_flg == 0:
1952
1954
  message = \
@@ -1960,11 +1962,11 @@ def convert(
1960
1962
  elif matched_flg == 2:
1961
1963
  message = \
1962
1964
  Color.GREEN(f'validate_result') + ': ' +\
1963
- Color.REVERSE(f'{Color.BLUE} Skipped (Deleted or Shape Unmatched) ')
1965
+ Color.REVERSE(f'{Color.BLUE} Skipped (Deleted or Shape Unmatched) {onnx_shape_tf_shape}')
1964
1966
  print(
1965
1967
  Color.GREEN(f'INFO:') + ' '+
1966
- Color.GREEN(f'onnx_output_name') + f': {onnx_output_name} '+
1967
- Color.GREEN(f'tf_output_name') + f': {tf_output_name} '+
1968
+ Color.GREEN(f'onnx_output_name') + f': {re.sub("^wa/", "/", onnx_output_name)} '+
1969
+ # Color.GREEN(f'tf_output_name') + f': {tf_output_name} '+
1968
1970
  Color.GREEN(f'shape') + f': {validated_onnx_tensor.shape} '+
1969
1971
  Color.GREEN(f'dtype') + f': {validated_onnx_tensor.dtype} '+
1970
1972
  f'{message}'
@@ -167,11 +167,46 @@ def make_node(
167
167
  input_tensor=input_tensor
168
168
  )
169
169
 
170
- func = math.ceil if ceil_mode else math.floor
171
- output_spatial_shape = [
172
- func((i + pb + pe - d * (k - 1) - 1) / s + 1)
173
- for i, pb, pe, k, d, s in zip(input_tensor_shape[1:-1], pads[:len(pads) // 2], pads[len(pads) // 2:], kernel_shape, dilations, strides)
174
- ]
170
+ if not is_known_shape:
171
+ def compute_output_spatial_shape_from_tensor(input_tensor, pads, kernel_shape, dilations, strides, ceil_mode=False):
172
+ input_shape = tf.shape(input_tensor) # Get dynamic shape
173
+ input_spatial = input_shape[1:-1] # Extract spatial dimensions only (NHWC format)
174
+
175
+ pad_begin = pads[:len(pads) // 2]
176
+ pad_end = pads[len(pads) // 2:]
177
+
178
+ round_func = tf.math.ceil if ceil_mode else tf.math.floor
179
+
180
+ output_spatial = []
181
+ for i, pb, pe, k, d, s in zip(tf.unstack(input_spatial), pad_begin, pad_end, kernel_shape, dilations, strides):
182
+ i = tf.cast(i, tf.float32)
183
+ pb = tf.constant(pb, dtype=tf.float32)
184
+ pe = tf.constant(pe, dtype=tf.float32)
185
+ k = tf.constant(k, dtype=tf.float32)
186
+ d = tf.constant(d, dtype=tf.float32)
187
+ s = tf.constant(s, dtype=tf.float32)
188
+
189
+ numerator = i + pb + pe - d * (k - 1) - 1
190
+ raw_output = numerator / s + 1
191
+ output_dim = tf.cast(round_func(raw_output), tf.int32)
192
+ output_spatial.append(output_dim)
193
+
194
+ return output_spatial
195
+
196
+ output_spatial_shape = compute_output_spatial_shape_from_tensor(
197
+ input_tensor=input_tensor,
198
+ pads=pads,
199
+ kernel_shape=kernel_shape,
200
+ dilations=dilations,
201
+ strides=strides,
202
+ ceil_mode=ceil_mode
203
+ )
204
+ else:
205
+ func = math.ceil if ceil_mode else math.floor
206
+ output_spatial_shape = [
207
+ func((i + pb + pe - d * (k - 1) - 1) / s + 1)
208
+ for i, pb, pe, k, d, s in zip(input_tensor_shape[1:-1], pads[:len(pads) // 2], pads[len(pads) // 2:], kernel_shape, dilations, strides)
209
+ ]
175
210
 
176
211
  # onnx padding value is ignored if auto_pad is not 'NOTSET'
177
212
  if auto_pad == 'NOTSET':
@@ -218,28 +253,52 @@ def make_node(
218
253
  # count nonzero elements in kernel each strides for the case count_include_pad is False
219
254
  non_zero_counts = []
220
255
 
221
- for input_spatial_shape, output_size, kernel, dilation, stride, pads_begin, pads_end \
222
- in zip(input_tensor_shape[1:-1], output_spatial_shape, kernel_shape,
223
- dilations, strides, pads[:len(pads) // 2], pads[len(pads) // 2:]):
224
- sample_target = np.concatenate([
225
- np.zeros(pads_begin),
226
- np.ones(input_spatial_shape),
227
- np.zeros(pads_end)]
256
+ if not is_known_shape:
257
+ def compute_non_zero_counts_loop(input_tensor, output_spatial_shape, kernel_shape, dilations, strides, pads):
258
+
259
+ counts_list = []
260
+
261
+ for dim in range(len(kernel_shape)):
262
+ k = kernel_shape[dim]
263
+ counts = [k] * (output_spatial_shape[dim].numpy() if tf.is_tensor(output_spatial_shape[dim]) and hasattr(output_spatial_shape[dim], 'numpy') else output_spatial_shape[dim])
264
+ counts_list.append(counts)
265
+
266
+ return counts_list
267
+
268
+ non_zero_counts = compute_non_zero_counts_loop(
269
+ input_tensor=input_tensor,
270
+ output_spatial_shape=output_spatial_shape,
271
+ kernel_shape=kernel_shape,
272
+ dilations=dilations,
273
+ strides=strides,
274
+ pads=pads
228
275
  )
229
- sample_kernel = np.zeros((kernel - 1) * dilation + 1)
230
- sample_kernel[::dilation] = 1
276
+ else:
277
+ for input_spatial_shape, output_size, kernel, dilation, stride, pads_begin, pads_end \
278
+ in zip(input_tensor_shape[1:-1], output_spatial_shape, kernel_shape,
279
+ dilations, strides, pads[:len(pads) // 2], pads[len(pads) // 2:]):
280
+ sample_target = np.concatenate([
281
+ np.zeros(pads_begin),
282
+ np.ones(input_spatial_shape),
283
+ np.zeros(pads_end)]
284
+ )
285
+ sample_kernel = np.zeros((kernel - 1) * dilation + 1)
286
+ sample_kernel[::dilation] = 1
231
287
 
232
- counts = []
233
- for i in range(output_size):
234
- start = i * stride
235
- stride_target = sample_target[start:start+len(sample_kernel)]
236
- # pad target to match size
237
- stride_target = np.concatenate([stride_target, np.zeros(len(sample_kernel) - len(stride_target))])
238
- counts.extend(np.convolve(stride_target, sample_kernel, mode='valid'))
288
+ counts = []
289
+ for i in range(output_size):
290
+ start = i * stride
291
+ stride_target = sample_target[start:start+len(sample_kernel)]
292
+ # pad target to match size
293
+ stride_target = np.concatenate([stride_target, np.zeros(len(sample_kernel) - len(stride_target))])
294
+ counts.extend(np.convolve(stride_target, sample_kernel, mode='valid'))
239
295
 
240
- non_zero_counts.append(counts)
296
+ non_zero_counts.append(counts)
241
297
 
242
- need_multiplier = len(set([i for sublist in non_zero_counts for i in sublist])) != 1
298
+ if not is_known_shape:
299
+ need_multiplier = False # Default to False for dynamic tensors to avoid errors
300
+ else:
301
+ need_multiplier = len(set([i for sublist in non_zero_counts for i in sublist])) != 1
243
302
 
244
303
  # default tensorflow option for count_include_pad is True and cannot control
245
304
  # average value should be compensated in cases below
@@ -295,7 +354,9 @@ def make_node(
295
354
  multiplier[-1] = k / (k - extra_pad)
296
355
  average_multiplier.append(multiplier)
297
356
  else:
298
- for i, k, non_zero_count, extra_pad in enumerate(zip(kernel_shape, non_zero_counts, extra_pads)):
357
+ for i in range(len(kernel_shape)):
358
+ k = kernel_shape[i]
359
+ extra_pad = extra_pads[i]
299
360
  average_multiplier[i][-1] = k / (k - extra_pad)
300
361
 
301
362
  # Preserving Graph Structure (Dict)
@@ -1,6 +1,7 @@
1
1
  import math
2
2
  import os
3
3
  import io
4
+ import re
4
5
  import sys
5
6
  import copy
6
7
  import json
@@ -280,7 +281,7 @@ def print_node_info(func):
280
281
  if graph_input is not None:
281
282
  debug(
282
283
  Color.GREEN(f'INFO:') + ' '+
283
- Color.GREEN(f'input_op_name') + f': {graph_input.name} '+
284
+ Color.GREEN(f'input_op_name') + f': {re.sub("^wa/", "/", graph_input.name)} '+
284
285
  Color.GREEN(f'shape') + f': {graph_input.shape} '+
285
286
  Color.GREEN(f'dtype') + f': {graph_input.dtype}'
286
287
  )
@@ -294,18 +295,18 @@ def print_node_info(func):
294
295
  )
295
296
  debug(
296
297
  Color.GREEN(f'INFO:') + ' ' + Color.MAGENTA(f'onnx_op_type') + ': '+
297
- f'{graph_node.op}' + Color.MAGENTA(' onnx_op_name') + f': {graph_node.name}')
298
+ f'{graph_node.op}' + Color.MAGENTA(' onnx_op_name') + f': {re.sub("^wa/", "/", graph_node.name)}')
298
299
  for idx, graph_node_input in enumerate(graph_node.inputs):
299
300
  debug(
300
301
  Color.GREEN(f'INFO:') + ' '+
301
- Color.CYAN(f' input_name.{idx+1}') + f': {graph_node_input.name} '+
302
+ Color.CYAN(f' input_name.{idx+1}') + f': {re.sub("^wa/", "/", graph_node_input.name)} '+
302
303
  Color.CYAN(f'shape') + f': {graph_node_input.shape} '+
303
304
  Color.CYAN(f'dtype') + f': {graph_node_input.dtype}'
304
305
  )
305
306
  for idx, graph_node_output in enumerate(graph_node.outputs):
306
307
  debug(
307
308
  Color.GREEN(f'INFO:') + ' '+
308
- Color.CYAN(f' output_name.{idx+1}') + f': {graph_node_output.name} '+
309
+ Color.CYAN(f' output_name.{idx+1}') + f': {re.sub("^wa/", "/", graph_node_output.name)} '+
309
310
  Color.CYAN(f'shape') + f': {graph_node_output.shape} '+
310
311
  Color.CYAN(f'dtype') + f': {graph_node_output.dtype}'
311
312
  )
@@ -4033,18 +4034,19 @@ def onnx_tf_tensor_validation(
4033
4034
 
4034
4035
  Returns
4035
4036
  ----------
4036
- check_results: Dict[str, List[np.ndarray, int, float|int]]
4037
+ check_results: Dict[str, List[np.ndarray, int, float|int], str]
4037
4038
  Tensor Comparison Results
4038
4039
  {
4039
4040
  onnx_output_name: [
4040
4041
  onnx_tensor,
4041
4042
  matched_flg, <--- 0: Unmatched, 1: Matched, 2: Skipped (Deleted or Shape Unmatched),
4042
4043
  max_abs_err,
4044
+ onnx_shape_tf_shape,
4043
4045
  ]
4044
4046
  }
4045
4047
  """
4046
4048
  check_results = {
4047
- k: [v[0], False, 0.0] \
4049
+ k: [v[0], False, 0.0, ""] \
4048
4050
  for k, v in output_pairs.items()
4049
4051
  }
4050
4052
 
@@ -4120,9 +4122,12 @@ def onnx_tf_tensor_validation(
4120
4122
  # If there was no match between ONNX and TensorFlow output shapes.
4121
4123
  check_results[names_pair][1] = 2
4122
4124
  check_results[names_pair][2] = max_abs_err
4125
+ check_results[names_pair][3] = \
4126
+ f"onnx.shape:{onnx_tensor.shape if hasattr(onnx_tensor, 'shape') else 'None'}/tf.shape:{tf_tensor.shape if hasattr(tf_tensor, 'shape') else 'None'}"
4123
4127
  else:
4124
4128
  check_results[names_pair][1] = validate_result
4125
4129
  check_results[names_pair][2] = max_abs_err
4130
+ check_results[names_pair][3] = ""
4126
4131
 
4127
4132
  return check_results
4128
4133
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: onnx2tf
3
- Version: 1.27.6
3
+ Version: 1.27.8
4
4
  Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
5
5
  Home-page: https://github.com/PINTO0309/onnx2tf
6
6
  Author: Katsuya Hyodo
@@ -334,7 +334,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
334
334
  docker run --rm -it \
335
335
  -v `pwd`:/workdir \
336
336
  -w /workdir \
337
- ghcr.io/pinto0309/onnx2tf:1.27.6
337
+ ghcr.io/pinto0309/onnx2tf:1.27.8
338
338
 
339
339
  or
340
340
 
@@ -342,7 +342,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
342
342
  docker run --rm -it \
343
343
  -v `pwd`:/workdir \
344
344
  -w /workdir \
345
- docker.io/pinto0309/onnx2tf:1.27.6
345
+ docker.io/pinto0309/onnx2tf:1.27.8
346
346
 
347
347
  or
348
348
 
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes