onnx2tf 1.25.15__tar.gz → 1.26.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (202) hide show
  1. {onnx2tf-1.25.15/onnx2tf.egg-info → onnx2tf-1.26.0}/PKG-INFO +19 -9
  2. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/README.md +18 -8
  3. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/__init__.py +1 -1
  4. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/onnx2tf.py +33 -13
  5. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Conv.py +12 -0
  6. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/utils/common_functions.py +13 -6
  7. {onnx2tf-1.25.15 → onnx2tf-1.26.0/onnx2tf.egg-info}/PKG-INFO +19 -9
  8. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/LICENSE +0 -0
  9. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/LICENSE_onnx-tensorflow +0 -0
  10. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/__main__.py +0 -0
  11. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Abs.py +0 -0
  12. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Acos.py +0 -0
  13. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Acosh.py +0 -0
  14. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Add.py +0 -0
  15. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/And.py +0 -0
  16. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/ArgMax.py +0 -0
  17. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/ArgMin.py +0 -0
  18. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Asin.py +0 -0
  19. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Asinh.py +0 -0
  20. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Atan.py +0 -0
  21. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Atanh.py +0 -0
  22. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/AveragePool.py +0 -0
  23. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/BatchNormalization.py +0 -0
  24. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Bernoulli.py +0 -0
  25. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/BitShift.py +0 -0
  26. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Cast.py +0 -0
  27. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Ceil.py +0 -0
  28. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Celu.py +0 -0
  29. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Clip.py +0 -0
  30. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Col2Im.py +0 -0
  31. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Compress.py +0 -0
  32. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Concat.py +0 -0
  33. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/ConcatFromSequence.py +0 -0
  34. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Constant.py +0 -0
  35. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/ConstantOfShape.py +0 -0
  36. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/ConvInteger.py +0 -0
  37. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/ConvTranspose.py +0 -0
  38. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Cos.py +0 -0
  39. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Cosh.py +0 -0
  40. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/CumSum.py +0 -0
  41. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/DepthToSpace.py +0 -0
  42. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/DequantizeLinear.py +0 -0
  43. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Det.py +0 -0
  44. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Div.py +0 -0
  45. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Dropout.py +0 -0
  46. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/DynamicQuantizeLinear.py +0 -0
  47. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Einsum.py +0 -0
  48. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Elu.py +0 -0
  49. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Equal.py +0 -0
  50. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Erf.py +0 -0
  51. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Exp.py +0 -0
  52. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Expand.py +0 -0
  53. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/EyeLike.py +0 -0
  54. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Flatten.py +0 -0
  55. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Floor.py +0 -0
  56. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/FusedConv.py +0 -0
  57. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/GRU.py +0 -0
  58. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Gather.py +0 -0
  59. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/GatherElements.py +0 -0
  60. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/GatherND.py +0 -0
  61. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Gelu.py +0 -0
  62. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Gemm.py +0 -0
  63. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/GlobalAveragePool.py +0 -0
  64. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/GlobalLpPool.py +0 -0
  65. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/GlobalMaxPool.py +0 -0
  66. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Greater.py +0 -0
  67. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/GreaterOrEqual.py +0 -0
  68. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/GridSample.py +0 -0
  69. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/GroupNorm.py +0 -0
  70. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/HammingWindow.py +0 -0
  71. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/HannWindow.py +0 -0
  72. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/HardSigmoid.py +0 -0
  73. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/HardSwish.py +0 -0
  74. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Hardmax.py +0 -0
  75. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Identity.py +0 -0
  76. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/If.py +0 -0
  77. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Input.py +0 -0
  78. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/InstanceNormalization.py +0 -0
  79. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Inverse.py +0 -0
  80. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/IsInf.py +0 -0
  81. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/IsNaN.py +0 -0
  82. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/LRN.py +0 -0
  83. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/LSTM.py +0 -0
  84. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/LayerNormalization.py +0 -0
  85. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/LeakyRelu.py +0 -0
  86. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Less.py +0 -0
  87. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/LessOrEqual.py +0 -0
  88. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Log.py +0 -0
  89. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/LogSoftmax.py +0 -0
  90. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/LpNormalization.py +0 -0
  91. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/MatMul.py +0 -0
  92. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/MatMulInteger.py +0 -0
  93. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Max.py +0 -0
  94. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/MaxPool.py +0 -0
  95. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/MaxUnpool.py +0 -0
  96. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Mean.py +0 -0
  97. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/MeanVarianceNormalization.py +0 -0
  98. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/MelWeightMatrix.py +0 -0
  99. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Min.py +0 -0
  100. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Mish.py +0 -0
  101. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Mod.py +0 -0
  102. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Mul.py +0 -0
  103. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Multinomial.py +0 -0
  104. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Neg.py +0 -0
  105. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/NonMaxSuppression.py +0 -0
  106. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/NonZero.py +0 -0
  107. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Not.py +0 -0
  108. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/OneHot.py +0 -0
  109. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/OptionalGetElement.py +0 -0
  110. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/OptionalHasElement.py +0 -0
  111. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Or.py +0 -0
  112. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/PRelu.py +0 -0
  113. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Pad.py +0 -0
  114. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Pow.py +0 -0
  115. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/QLinearAdd.py +0 -0
  116. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/QLinearConcat.py +0 -0
  117. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/QLinearConv.py +0 -0
  118. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/QLinearLeakyRelu.py +0 -0
  119. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/QLinearMatMul.py +0 -0
  120. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/QLinearMul.py +0 -0
  121. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/QLinearSigmoid.py +0 -0
  122. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/QLinearSoftmax.py +0 -0
  123. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/QuantizeLinear.py +0 -0
  124. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/RNN.py +0 -0
  125. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/RandomNormal.py +0 -0
  126. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/RandomNormalLike.py +0 -0
  127. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/RandomUniform.py +0 -0
  128. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/RandomUniformLike.py +0 -0
  129. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Range.py +0 -0
  130. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Reciprocal.py +0 -0
  131. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/ReduceL1.py +0 -0
  132. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/ReduceL2.py +0 -0
  133. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/ReduceLogSum.py +0 -0
  134. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/ReduceLogSumExp.py +0 -0
  135. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/ReduceMax.py +0 -0
  136. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/ReduceMean.py +0 -0
  137. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/ReduceMin.py +0 -0
  138. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/ReduceProd.py +0 -0
  139. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/ReduceSum.py +0 -0
  140. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/ReduceSumSquare.py +0 -0
  141. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Relu.py +0 -0
  142. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Reshape.py +0 -0
  143. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Resize.py +0 -0
  144. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/ReverseSequence.py +0 -0
  145. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/RoiAlign.py +0 -0
  146. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Round.py +0 -0
  147. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/STFT.py +0 -0
  148. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/ScaleAndTranslate.py +0 -0
  149. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Scatter.py +0 -0
  150. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/ScatterElements.py +0 -0
  151. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/ScatterND.py +0 -0
  152. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Selu.py +0 -0
  153. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/SequenceAt.py +0 -0
  154. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/SequenceConstruct.py +0 -0
  155. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/SequenceEmpty.py +0 -0
  156. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/SequenceErase.py +0 -0
  157. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/SequenceInsert.py +0 -0
  158. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/SequenceLength.py +0 -0
  159. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Shape.py +0 -0
  160. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Shrink.py +0 -0
  161. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Sigmoid.py +0 -0
  162. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Sign.py +0 -0
  163. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Sin.py +0 -0
  164. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Sinh.py +0 -0
  165. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Size.py +0 -0
  166. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Slice.py +0 -0
  167. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Softmax.py +0 -0
  168. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Softplus.py +0 -0
  169. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Softsign.py +0 -0
  170. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/SpaceToDepth.py +0 -0
  171. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Split.py +0 -0
  172. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/SplitToSequence.py +0 -0
  173. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Sqrt.py +0 -0
  174. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Squeeze.py +0 -0
  175. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/StringNormalizer.py +0 -0
  176. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Sub.py +0 -0
  177. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Sum.py +0 -0
  178. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Tan.py +0 -0
  179. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Tanh.py +0 -0
  180. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/ThresholdedRelu.py +0 -0
  181. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Tile.py +0 -0
  182. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/TopK.py +0 -0
  183. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Transpose.py +0 -0
  184. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Trilu.py +0 -0
  185. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Unique.py +0 -0
  186. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Unsqueeze.py +0 -0
  187. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Upsample.py +0 -0
  188. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Where.py +0 -0
  189. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/Xor.py +0 -0
  190. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/_Loop.py +0 -0
  191. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/__Loop.py +0 -0
  192. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/ops/__init__.py +0 -0
  193. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/utils/__init__.py +0 -0
  194. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/utils/enums.py +0 -0
  195. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf/utils/logging.py +0 -0
  196. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf.egg-info/SOURCES.txt +0 -0
  197. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf.egg-info/dependency_links.txt +0 -0
  198. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf.egg-info/entry_points.txt +0 -0
  199. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/onnx2tf.egg-info/top_level.txt +0 -0
  200. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/setup.cfg +0 -0
  201. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/setup.py +0 -0
  202. {onnx2tf-1.25.15 → onnx2tf-1.26.0}/tests/test_model_convert.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: onnx2tf
3
- Version: 1.25.15
3
+ Version: 1.26.0
4
4
  Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
5
5
  Home-page: https://github.com/PINTO0309/onnx2tf
6
6
  Author: Katsuya Hyodo
@@ -314,7 +314,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
314
314
  docker run --rm -it \
315
315
  -v `pwd`:/workdir \
316
316
  -w /workdir \
317
- ghcr.io/pinto0309/onnx2tf:1.25.15
317
+ ghcr.io/pinto0309/onnx2tf:1.26.0
318
318
 
319
319
  or
320
320
 
@@ -322,7 +322,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
322
322
  docker run --rm -it \
323
323
  -v `pwd`:/workdir \
324
324
  -w /workdir \
325
- docker.io/pinto0309/onnx2tf:1.25.15
325
+ docker.io/pinto0309/onnx2tf:1.26.0
326
326
 
327
327
  or
328
328
 
@@ -1529,7 +1529,8 @@ usage: onnx2tf
1529
1529
  [-oiqt]
1530
1530
  [-qt {per-channel,per-tensor}]
1531
1531
  [-cind INPUT_NAME NUMPY_FILE_PATH MEAN STD]
1532
- [-ioqd {int8,uint8}]
1532
+ [-iqd {int8,uint8}]
1533
+ [-oqd {int8,uint8}]
1533
1534
  [-nuo]
1534
1535
  [-nuonag]
1535
1536
  [-b BATCH_SIZE]
@@ -1686,8 +1687,12 @@ optional arguments:
1686
1687
  and {input_op_name}, {numpy_file_path}, {mean}, and {std} must all be entered.
1687
1688
  Otherwise, an error will occur during the -oiqt stage.
1688
1689
 
1689
- -ioqd {int8,uint8}, --input_output_quant_dtype {int8,uint8}
1690
- Input and Output dtypes when doing Full INT8 Quantization.
1690
+ -iqd {int8,uint8}, --input_quant_dtype {int8,uint8}
1691
+ Input dtypes when doing Full INT8 Quantization.
1692
+ "int8"(default) or "uint8"
1693
+
1694
+ -oqd {int8,uint8}, --output_quant_dtype {int8,uint8}
1695
+ Output dtypes when doing Full INT8 Quantization.
1691
1696
  "int8"(default) or "uint8"
1692
1697
 
1693
1698
  -nuo, --not_use_onnxsim
@@ -2008,7 +2013,8 @@ convert(
2008
2013
  output_integer_quantized_tflite: Optional[bool] = False,
2009
2014
  quant_type: Optional[str] = 'per-channel',
2010
2015
  custom_input_op_name_np_data_path: Optional[List] = None,
2011
- input_output_quant_dtype: Optional[str] = 'int8',
2016
+ input_quant_dtype: Optional[str] = 'int8',
2017
+ output_quant_dtype: Optional[str] = 'int8',
2012
2018
  not_use_onnxsim: Optional[bool] = False,
2013
2019
  not_use_opname_auto_generate: Optional[bool] = False,
2014
2020
  batch_size: Union[int, NoneType] = None,
@@ -2172,8 +2178,12 @@ convert(
2172
2178
  and {input_op_name}, {numpy_file_path}, {mean}, and {std} must all be entered.
2173
2179
  Otherwise, an error will occur during the -oiqt stage.
2174
2180
 
2175
- input_output_quant_dtype: Optional[str]
2176
- Input and Output dtypes when doing Full INT8 Quantization.
2181
+ input_quant_dtype: Optional[str]
2182
+ Input dtypes when doing Full INT8 Quantization.
2183
+ "int8"(default) or "uint8"
2184
+
2185
+ output_quant_dtype: Optional[str]
2186
+ Output dtypes when doing Full INT8 Quantization.
2177
2187
  "int8"(default) or "uint8"
2178
2188
 
2179
2189
  not_use_onnxsim: Optional[bool]
@@ -299,7 +299,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
299
299
  docker run --rm -it \
300
300
  -v `pwd`:/workdir \
301
301
  -w /workdir \
302
- ghcr.io/pinto0309/onnx2tf:1.25.15
302
+ ghcr.io/pinto0309/onnx2tf:1.26.0
303
303
 
304
304
  or
305
305
 
@@ -307,7 +307,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
307
307
  docker run --rm -it \
308
308
  -v `pwd`:/workdir \
309
309
  -w /workdir \
310
- docker.io/pinto0309/onnx2tf:1.25.15
310
+ docker.io/pinto0309/onnx2tf:1.26.0
311
311
 
312
312
  or
313
313
 
@@ -1514,7 +1514,8 @@ usage: onnx2tf
1514
1514
  [-oiqt]
1515
1515
  [-qt {per-channel,per-tensor}]
1516
1516
  [-cind INPUT_NAME NUMPY_FILE_PATH MEAN STD]
1517
- [-ioqd {int8,uint8}]
1517
+ [-iqd {int8,uint8}]
1518
+ [-oqd {int8,uint8}]
1518
1519
  [-nuo]
1519
1520
  [-nuonag]
1520
1521
  [-b BATCH_SIZE]
@@ -1671,8 +1672,12 @@ optional arguments:
1671
1672
  and {input_op_name}, {numpy_file_path}, {mean}, and {std} must all be entered.
1672
1673
  Otherwise, an error will occur during the -oiqt stage.
1673
1674
 
1674
- -ioqd {int8,uint8}, --input_output_quant_dtype {int8,uint8}
1675
- Input and Output dtypes when doing Full INT8 Quantization.
1675
+ -iqd {int8,uint8}, --input_quant_dtype {int8,uint8}
1676
+ Input dtypes when doing Full INT8 Quantization.
1677
+ "int8"(default) or "uint8"
1678
+
1679
+ -oqd {int8,uint8}, --output_quant_dtype {int8,uint8}
1680
+ Output dtypes when doing Full INT8 Quantization.
1676
1681
  "int8"(default) or "uint8"
1677
1682
 
1678
1683
  -nuo, --not_use_onnxsim
@@ -1993,7 +1998,8 @@ convert(
1993
1998
  output_integer_quantized_tflite: Optional[bool] = False,
1994
1999
  quant_type: Optional[str] = 'per-channel',
1995
2000
  custom_input_op_name_np_data_path: Optional[List] = None,
1996
- input_output_quant_dtype: Optional[str] = 'int8',
2001
+ input_quant_dtype: Optional[str] = 'int8',
2002
+ output_quant_dtype: Optional[str] = 'int8',
1997
2003
  not_use_onnxsim: Optional[bool] = False,
1998
2004
  not_use_opname_auto_generate: Optional[bool] = False,
1999
2005
  batch_size: Union[int, NoneType] = None,
@@ -2157,8 +2163,12 @@ convert(
2157
2163
  and {input_op_name}, {numpy_file_path}, {mean}, and {std} must all be entered.
2158
2164
  Otherwise, an error will occur during the -oiqt stage.
2159
2165
 
2160
- input_output_quant_dtype: Optional[str]
2161
- Input and Output dtypes when doing Full INT8 Quantization.
2166
+ input_quant_dtype: Optional[str]
2167
+ Input dtypes when doing Full INT8 Quantization.
2168
+ "int8"(default) or "uint8"
2169
+
2170
+ output_quant_dtype: Optional[str]
2171
+ Output dtypes when doing Full INT8 Quantization.
2162
2172
  "int8"(default) or "uint8"
2163
2173
 
2164
2174
  not_use_onnxsim: Optional[bool]
@@ -1,3 +1,3 @@
1
1
  from onnx2tf.onnx2tf import convert, main
2
2
 
3
- __version__ = '1.25.15'
3
+ __version__ = '1.26.0'
@@ -72,7 +72,8 @@ def convert(
72
72
  output_integer_quantized_tflite: Optional[bool] = False,
73
73
  quant_type: Optional[str] = 'per-channel',
74
74
  custom_input_op_name_np_data_path: Optional[List] = None,
75
- input_output_quant_dtype: Optional[str] = 'int8',
75
+ input_quant_dtype: Optional[str] = 'int8',
76
+ output_quant_dtype: Optional[str] = 'int8',
76
77
  not_use_onnxsim: Optional[bool] = False,
77
78
  not_use_opname_auto_generate: Optional[bool] = False,
78
79
  batch_size: Optional[int] = None,
@@ -1693,15 +1694,23 @@ def convert(
1693
1694
  converter._experimental_disable_per_channel = disable_per_channel
1694
1695
  converter.unfold_batchmatmul = enable_batchmatmul_unfold
1695
1696
  converter.representative_dataset = representative_dataset_gen
1696
- inf_type = None
1697
- if input_output_quant_dtype == 'int8':
1698
- inf_type = tf.int8
1699
- elif input_output_quant_dtype == 'uint8':
1700
- inf_type = tf.uint8
1697
+ inf_type_input = None
1698
+ inf_type_output = None
1699
+ if input_quant_dtype == 'int8':
1700
+ inf_type_input = tf.int8
1701
+ elif input_quant_dtype == 'uint8':
1702
+ inf_type_input = tf.uint8
1701
1703
  else:
1702
- inf_type = tf.int8
1703
- converter.inference_input_type = inf_type
1704
- converter.inference_output_type = inf_type
1704
+ inf_type_input = tf.int8
1705
+
1706
+ if output_quant_dtype == 'int8':
1707
+ inf_type_output = tf.int8
1708
+ elif output_quant_dtype == 'uint8':
1709
+ inf_type_output = tf.uint8
1710
+ else:
1711
+ inf_type_output = tf.int8
1712
+ converter.inference_input_type = inf_type_input
1713
+ converter.inference_output_type = inf_type_output
1705
1714
  tflite_model = converter.convert()
1706
1715
  with open(f'{output_folder_path}/{output_file_name}_full_integer_quant.tflite', 'wb') as w:
1707
1716
  w.write(tflite_model)
@@ -2128,13 +2137,23 @@ def main():
2128
2137
  'Otherwise, an error will occur during the -oiqt stage.'
2129
2138
  )
2130
2139
  parser.add_argument(
2131
- '-ioqd',
2132
- '--input_output_quant_dtype',
2140
+ '-iqd',
2141
+ '--input_quant_dtype',
2142
+ type=str,
2143
+ choices=['int8', 'uint8'],
2144
+ default='int8',
2145
+ help=\
2146
+ 'Input dtypes when doing Full INT8 Quantization. \n' +
2147
+ '"int8"(default) or "uint8"'
2148
+ )
2149
+ parser.add_argument(
2150
+ '-oqd',
2151
+ '--output_quant_dtype',
2133
2152
  type=str,
2134
2153
  choices=['int8', 'uint8'],
2135
2154
  default='int8',
2136
2155
  help=\
2137
- 'Input and Output dtypes when doing Full INT8 Quantization. \n' +
2156
+ 'Output dtypes when doing Full INT8 Quantization. \n' +
2138
2157
  '"int8"(default) or "uint8"'
2139
2158
  )
2140
2159
  parser.add_argument(
@@ -2584,7 +2603,8 @@ def main():
2584
2603
  output_integer_quantized_tflite=args.output_integer_quantized_tflite,
2585
2604
  quant_type=args.quant_type,
2586
2605
  custom_input_op_name_np_data_path=custom_params,
2587
- input_output_quant_dtype=args.input_output_quant_dtype,
2606
+ input_quant_dtype=args.input_quant_dtype,
2607
+ output_quant_dtype=args.output_quant_dtype,
2588
2608
  not_use_onnxsim=args.not_use_onnxsim,
2589
2609
  not_use_opname_auto_generate=args.not_use_opname_auto_generate,
2590
2610
  batch_size=args.batch_size,
@@ -14,6 +14,7 @@ from tensorflow.python.keras.layers import (
14
14
  )
15
15
  import onnx_graphsurgeon as gs
16
16
  from onnx2tf.utils.common_functions import (
17
+ get_replacement_parameter,
17
18
  get_constant_or_variable,
18
19
  get_weights_constant_or_variable,
19
20
  get_padding_as_op,
@@ -24,6 +25,7 @@ from onnx2tf.utils.common_functions import (
24
25
  transpose_with_flexing_deterrence,
25
26
  get_tf_model_inputs,
26
27
  onnx_tf_tensor_validation,
28
+ post_process_transpose,
27
29
  )
28
30
  from typing import Any, Dict
29
31
  from onnx2tf.utils.logging import *
@@ -33,6 +35,7 @@ INF_INDEX_VALUE: int = 4294967296
33
35
 
34
36
  @print_node_info
35
37
  @inverted_operation_enable_disable
38
+ @get_replacement_parameter
36
39
  def make_node(
37
40
  *,
38
41
  graph_node: gs.Node,
@@ -932,6 +935,15 @@ def make_node(
932
935
  dilations,
933
936
  )
934
937
 
938
+ # Post-process transpose
939
+ tf_layers_dict[graph_node_output.name]['tf_node'] = \
940
+ post_process_transpose(
941
+ value_before_transpose=tf_layers_dict[graph_node_output.name]['tf_node'],
942
+ param_target='outputs',
943
+ param_name=graph_node.outputs[0].name,
944
+ **kwargs,
945
+ )
946
+
935
947
  # Generation of Debug Info
936
948
  tf_layers_dict[graph_node_output.name]['tf_node_info'] = \
937
949
  make_tf_node_info(
@@ -5857,11 +5857,18 @@ def correction_process_for_accuracy_errors(
5857
5857
  onnx_output_same_shape_counts = collections.Counter(onnx_output_shape)
5858
5858
  if sum([1 if dim > 1 and cnt > 1 else 0 for dim, cnt in onnx_output_same_shape_counts.items()]) >= 1:
5859
5859
  # Generate dummy op
5860
- dummy_op = tf_func(
5861
- input_tensor_1,
5862
- input_tensor_2,
5863
- )
5864
- if dummy_op.shape != tf.TensorShape(None):
5860
+ dummy_op = None
5861
+ tensor_2_candidate_for_transpositions = list(itertools.permutations(range(len(input_tensor_2.shape))))
5862
+ for tensor_2_candidate_for_transposition in tensor_2_candidate_for_transpositions:
5863
+ try:
5864
+ dummy_op = tf_func(
5865
+ input_tensor_1,
5866
+ tf.transpose(a=input_tensor_2, perm=tensor_2_candidate_for_transposition),
5867
+ )
5868
+ break
5869
+ except Exception as ex:
5870
+ pass
5871
+ if dummy_op is not None and dummy_op.shape != tf.TensorShape(None):
5865
5872
  tf_output_shape = [dim if dim is not None else -1 for dim in dummy_op.shape]
5866
5873
  number_of_dim_other_than_1 = sum([1 if i != 1 else 0 for i in onnx_output_shape])
5867
5874
  # Processing continues only if there are two or more dimensions other than 1
@@ -5889,7 +5896,7 @@ def correction_process_for_accuracy_errors(
5889
5896
  tensor_1_candidate_for_transpositions = \
5890
5897
  obtaining_an_inverted_pattern_for_brute_force_validation(tensor_shape=validation_data_1.shape)
5891
5898
  tensor_2_candidate_for_transpositions = \
5892
- obtaining_an_inverted_pattern_for_brute_force_validation(tensor_shape=validation_data_2.shape)
5899
+ list(itertools.permutations(range(len(validation_data_2.shape))))
5893
5900
  for tensor_1_candidate_for_transposition in tensor_1_candidate_for_transpositions:
5894
5901
  for tensor_2_candidate_for_transposition in tensor_2_candidate_for_transpositions:
5895
5902
  try:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: onnx2tf
3
- Version: 1.25.15
3
+ Version: 1.26.0
4
4
  Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
5
5
  Home-page: https://github.com/PINTO0309/onnx2tf
6
6
  Author: Katsuya Hyodo
@@ -314,7 +314,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
314
314
  docker run --rm -it \
315
315
  -v `pwd`:/workdir \
316
316
  -w /workdir \
317
- ghcr.io/pinto0309/onnx2tf:1.25.15
317
+ ghcr.io/pinto0309/onnx2tf:1.26.0
318
318
 
319
319
  or
320
320
 
@@ -322,7 +322,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
322
322
  docker run --rm -it \
323
323
  -v `pwd`:/workdir \
324
324
  -w /workdir \
325
- docker.io/pinto0309/onnx2tf:1.25.15
325
+ docker.io/pinto0309/onnx2tf:1.26.0
326
326
 
327
327
  or
328
328
 
@@ -1529,7 +1529,8 @@ usage: onnx2tf
1529
1529
  [-oiqt]
1530
1530
  [-qt {per-channel,per-tensor}]
1531
1531
  [-cind INPUT_NAME NUMPY_FILE_PATH MEAN STD]
1532
- [-ioqd {int8,uint8}]
1532
+ [-iqd {int8,uint8}]
1533
+ [-oqd {int8,uint8}]
1533
1534
  [-nuo]
1534
1535
  [-nuonag]
1535
1536
  [-b BATCH_SIZE]
@@ -1686,8 +1687,12 @@ optional arguments:
1686
1687
  and {input_op_name}, {numpy_file_path}, {mean}, and {std} must all be entered.
1687
1688
  Otherwise, an error will occur during the -oiqt stage.
1688
1689
 
1689
- -ioqd {int8,uint8}, --input_output_quant_dtype {int8,uint8}
1690
- Input and Output dtypes when doing Full INT8 Quantization.
1690
+ -iqd {int8,uint8}, --input_quant_dtype {int8,uint8}
1691
+ Input dtypes when doing Full INT8 Quantization.
1692
+ "int8"(default) or "uint8"
1693
+
1694
+ -oqd {int8,uint8}, --output_quant_dtype {int8,uint8}
1695
+ Output dtypes when doing Full INT8 Quantization.
1691
1696
  "int8"(default) or "uint8"
1692
1697
 
1693
1698
  -nuo, --not_use_onnxsim
@@ -2008,7 +2013,8 @@ convert(
2008
2013
  output_integer_quantized_tflite: Optional[bool] = False,
2009
2014
  quant_type: Optional[str] = 'per-channel',
2010
2015
  custom_input_op_name_np_data_path: Optional[List] = None,
2011
- input_output_quant_dtype: Optional[str] = 'int8',
2016
+ input_quant_dtype: Optional[str] = 'int8',
2017
+ output_quant_dtype: Optional[str] = 'int8',
2012
2018
  not_use_onnxsim: Optional[bool] = False,
2013
2019
  not_use_opname_auto_generate: Optional[bool] = False,
2014
2020
  batch_size: Union[int, NoneType] = None,
@@ -2172,8 +2178,12 @@ convert(
2172
2178
  and {input_op_name}, {numpy_file_path}, {mean}, and {std} must all be entered.
2173
2179
  Otherwise, an error will occur during the -oiqt stage.
2174
2180
 
2175
- input_output_quant_dtype: Optional[str]
2176
- Input and Output dtypes when doing Full INT8 Quantization.
2181
+ input_quant_dtype: Optional[str]
2182
+ Input dtypes when doing Full INT8 Quantization.
2183
+ "int8"(default) or "uint8"
2184
+
2185
+ output_quant_dtype: Optional[str]
2186
+ Output dtypes when doing Full INT8 Quantization.
2177
2187
  "int8"(default) or "uint8"
2178
2188
 
2179
2189
  not_use_onnxsim: Optional[bool]
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes