onnx2tf 1.27.9__tar.gz → 1.28.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (204) hide show
  1. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/PKG-INFO +41 -4
  2. onnx2tf-1.27.9/onnx2tf.egg-info/PKG-INFO → onnx2tf-1.28.0/README.md +40 -28
  3. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/__init__.py +1 -1
  4. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/onnx2tf.py +398 -19
  5. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/utils/common_functions.py +4 -3
  6. onnx2tf-1.28.0/onnx2tf/utils/iterative_json_optimizer.py +258 -0
  7. onnx2tf-1.28.0/onnx2tf/utils/json_auto_generator.py +1505 -0
  8. onnx2tf-1.27.9/README.md → onnx2tf-1.28.0/onnx2tf.egg-info/PKG-INFO +65 -3
  9. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf.egg-info/SOURCES.txt +2 -0
  10. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/LICENSE +0 -0
  11. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/LICENSE_onnx-tensorflow +0 -0
  12. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/__main__.py +0 -0
  13. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Abs.py +0 -0
  14. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Acos.py +0 -0
  15. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Acosh.py +0 -0
  16. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Add.py +0 -0
  17. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/And.py +0 -0
  18. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/ArgMax.py +0 -0
  19. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/ArgMin.py +0 -0
  20. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Asin.py +0 -0
  21. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Asinh.py +0 -0
  22. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Atan.py +0 -0
  23. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Atanh.py +0 -0
  24. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/AveragePool.py +0 -0
  25. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/BatchNormalization.py +0 -0
  26. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Bernoulli.py +0 -0
  27. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/BitShift.py +0 -0
  28. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Cast.py +0 -0
  29. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Ceil.py +0 -0
  30. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Celu.py +0 -0
  31. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Clip.py +0 -0
  32. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Col2Im.py +0 -0
  33. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Compress.py +0 -0
  34. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Concat.py +0 -0
  35. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/ConcatFromSequence.py +0 -0
  36. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Constant.py +0 -0
  37. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/ConstantOfShape.py +0 -0
  38. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Conv.py +0 -0
  39. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/ConvInteger.py +0 -0
  40. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/ConvTranspose.py +0 -0
  41. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Cos.py +0 -0
  42. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Cosh.py +0 -0
  43. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/CumSum.py +0 -0
  44. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/DepthToSpace.py +0 -0
  45. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/DequantizeLinear.py +0 -0
  46. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Det.py +0 -0
  47. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Div.py +0 -0
  48. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Dropout.py +0 -0
  49. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/DynamicQuantizeLinear.py +0 -0
  50. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Einsum.py +0 -0
  51. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Elu.py +0 -0
  52. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Equal.py +0 -0
  53. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Erf.py +0 -0
  54. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Exp.py +0 -0
  55. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Expand.py +0 -0
  56. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/EyeLike.py +0 -0
  57. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Flatten.py +0 -0
  58. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Floor.py +0 -0
  59. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/FusedConv.py +0 -0
  60. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/GRU.py +0 -0
  61. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Gather.py +0 -0
  62. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/GatherElements.py +0 -0
  63. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/GatherND.py +0 -0
  64. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Gelu.py +0 -0
  65. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Gemm.py +0 -0
  66. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/GlobalAveragePool.py +0 -0
  67. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/GlobalLpPool.py +0 -0
  68. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/GlobalMaxPool.py +0 -0
  69. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Greater.py +0 -0
  70. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/GreaterOrEqual.py +0 -0
  71. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/GridSample.py +0 -0
  72. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/GroupNorm.py +0 -0
  73. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/HammingWindow.py +0 -0
  74. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/HannWindow.py +0 -0
  75. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/HardSigmoid.py +0 -0
  76. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/HardSwish.py +0 -0
  77. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Hardmax.py +0 -0
  78. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Identity.py +0 -0
  79. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/If.py +0 -0
  80. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Input.py +0 -0
  81. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/InstanceNormalization.py +0 -0
  82. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Inverse.py +0 -0
  83. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/IsInf.py +0 -0
  84. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/IsNaN.py +0 -0
  85. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/LRN.py +0 -0
  86. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/LSTM.py +0 -0
  87. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/LayerNormalization.py +0 -0
  88. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/LeakyRelu.py +0 -0
  89. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Less.py +0 -0
  90. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/LessOrEqual.py +0 -0
  91. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Log.py +0 -0
  92. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/LogSoftmax.py +0 -0
  93. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/LpNormalization.py +0 -0
  94. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/MatMul.py +0 -0
  95. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/MatMulInteger.py +0 -0
  96. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Max.py +0 -0
  97. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/MaxPool.py +0 -0
  98. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/MaxUnpool.py +0 -0
  99. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Mean.py +0 -0
  100. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/MeanVarianceNormalization.py +0 -0
  101. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/MelWeightMatrix.py +0 -0
  102. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Min.py +0 -0
  103. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Mish.py +0 -0
  104. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Mod.py +0 -0
  105. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Mul.py +0 -0
  106. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Multinomial.py +0 -0
  107. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Neg.py +0 -0
  108. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/NonMaxSuppression.py +0 -0
  109. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/NonZero.py +0 -0
  110. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Not.py +0 -0
  111. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/OneHot.py +0 -0
  112. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/OptionalGetElement.py +0 -0
  113. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/OptionalHasElement.py +0 -0
  114. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Or.py +0 -0
  115. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/PRelu.py +0 -0
  116. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Pad.py +0 -0
  117. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Pow.py +0 -0
  118. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/QLinearAdd.py +0 -0
  119. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/QLinearConcat.py +0 -0
  120. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/QLinearConv.py +0 -0
  121. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/QLinearLeakyRelu.py +0 -0
  122. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/QLinearMatMul.py +0 -0
  123. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/QLinearMul.py +0 -0
  124. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/QLinearSigmoid.py +0 -0
  125. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/QLinearSoftmax.py +0 -0
  126. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/QuantizeLinear.py +0 -0
  127. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/RNN.py +0 -0
  128. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/RandomNormal.py +0 -0
  129. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/RandomNormalLike.py +0 -0
  130. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/RandomUniform.py +0 -0
  131. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/RandomUniformLike.py +0 -0
  132. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Range.py +0 -0
  133. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Reciprocal.py +0 -0
  134. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/ReduceL1.py +0 -0
  135. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/ReduceL2.py +0 -0
  136. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/ReduceLogSum.py +0 -0
  137. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/ReduceLogSumExp.py +0 -0
  138. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/ReduceMax.py +0 -0
  139. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/ReduceMean.py +0 -0
  140. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/ReduceMin.py +0 -0
  141. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/ReduceProd.py +0 -0
  142. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/ReduceSum.py +0 -0
  143. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/ReduceSumSquare.py +0 -0
  144. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Relu.py +0 -0
  145. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Reshape.py +0 -0
  146. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Resize.py +0 -0
  147. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/ReverseSequence.py +0 -0
  148. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/RoiAlign.py +0 -0
  149. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Round.py +0 -0
  150. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/STFT.py +0 -0
  151. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/ScaleAndTranslate.py +0 -0
  152. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Scatter.py +0 -0
  153. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/ScatterElements.py +0 -0
  154. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/ScatterND.py +0 -0
  155. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Selu.py +0 -0
  156. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/SequenceAt.py +0 -0
  157. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/SequenceConstruct.py +0 -0
  158. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/SequenceEmpty.py +0 -0
  159. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/SequenceErase.py +0 -0
  160. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/SequenceInsert.py +0 -0
  161. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/SequenceLength.py +0 -0
  162. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Shape.py +0 -0
  163. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Shrink.py +0 -0
  164. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Sigmoid.py +0 -0
  165. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Sign.py +0 -0
  166. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Sin.py +0 -0
  167. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Sinh.py +0 -0
  168. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Size.py +0 -0
  169. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Slice.py +0 -0
  170. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Softmax.py +0 -0
  171. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Softplus.py +0 -0
  172. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Softsign.py +0 -0
  173. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/SpaceToDepth.py +0 -0
  174. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Split.py +0 -0
  175. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/SplitToSequence.py +0 -0
  176. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Sqrt.py +0 -0
  177. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Squeeze.py +0 -0
  178. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/StringNormalizer.py +0 -0
  179. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Sub.py +0 -0
  180. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Sum.py +0 -0
  181. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Tan.py +0 -0
  182. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Tanh.py +0 -0
  183. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/ThresholdedRelu.py +0 -0
  184. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Tile.py +0 -0
  185. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/TopK.py +0 -0
  186. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Transpose.py +0 -0
  187. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Trilu.py +0 -0
  188. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Unique.py +0 -0
  189. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Unsqueeze.py +0 -0
  190. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Upsample.py +0 -0
  191. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Where.py +0 -0
  192. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/Xor.py +0 -0
  193. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/_Loop.py +0 -0
  194. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/__Loop.py +0 -0
  195. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/ops/__init__.py +0 -0
  196. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/utils/__init__.py +0 -0
  197. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/utils/enums.py +0 -0
  198. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf/utils/logging.py +0 -0
  199. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf.egg-info/dependency_links.txt +0 -0
  200. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf.egg-info/entry_points.txt +0 -0
  201. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/onnx2tf.egg-info/top_level.txt +0 -0
  202. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/setup.cfg +0 -0
  203. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/setup.py +0 -0
  204. {onnx2tf-1.27.9 → onnx2tf-1.28.0}/tests/test_model_convert.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: onnx2tf
3
- Version: 1.27.9
3
+ Version: 1.28.0
4
4
  Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
5
5
  Home-page: https://github.com/PINTO0309/onnx2tf
6
6
  Author: Katsuya Hyodo
@@ -32,7 +32,7 @@ Incidentally, I have never used this tool in practice myself since I started wor
32
32
  <img src="https://user-images.githubusercontent.com/33194443/193840307-fa69eace-05a9-4d93-9c5d-999cf88af28e.png" />
33
33
  </p>
34
34
 
35
- [![Downloads](https://static.pepy.tech/personalized-badge/onnx2tf?period=total&units=none&left_color=grey&right_color=brightgreen&left_text=Downloads)](https://pepy.tech/project/onnx2tf) ![GitHub](https://img.shields.io/github/license/PINTO0309/onnx2tf?color=2BAF2B) [![Python](https://img.shields.io/badge/Python-3.10-2BAF2B)](https://img.shields.io/badge/Python-3.8-2BAF2B) [![PyPI](https://img.shields.io/pypi/v/onnx2tf?color=2BAF2B)](https://pypi.org/project/onnx2tf/) [![CodeQL](https://github.com/PINTO0309/onnx2tf/workflows/CodeQL/badge.svg)](https://github.com/PINTO0309/onnx2tf/actions?query=workflow%3ACodeQL) ![Model Convert Test Status](https://github.com/PINTO0309/onnx2tf/workflows/Model%20Convert%20Test/badge.svg) [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.7230085.svg)](https://doi.org/10.5281/zenodo.7230085)
35
+ [![Downloads](https://static.pepy.tech/personalized-badge/onnx2tf?period=total&units=none&left_color=grey&right_color=brightgreen&left_text=Downloads)](https://pepy.tech/project/onnx2tf) ![GitHub](https://img.shields.io/github/license/PINTO0309/onnx2tf?color=2BAF2B) [![Python](https://img.shields.io/badge/Python-3.10-2BAF2B)](https://img.shields.io/badge/Python-3.8-2BAF2B) [![PyPI](https://img.shields.io/pypi/v/onnx2tf?color=2BAF2B)](https://pypi.org/project/onnx2tf/) [![CodeQL](https://github.com/PINTO0309/onnx2tf/workflows/CodeQL/badge.svg)](https://github.com/PINTO0309/onnx2tf/actions?query=workflow%3ACodeQL) ![Model Convert Test Status](https://github.com/PINTO0309/onnx2tf/workflows/Model%20Convert%20Test/badge.svg) [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.7230085.svg)](https://doi.org/10.5281/zenodo.7230085) [![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/PINTO0309/onnx2tf)
36
36
 
37
37
  ## Note
38
38
  - The torch.script-based `torch.onnx.export` has already been moved to maintenance mode, and we recommend moving to the FX graph-based `torch.onnx.dynamo_export` starting with PyTorch v2.2.0.
@@ -334,7 +334,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
334
334
  docker run --rm -it \
335
335
  -v `pwd`:/workdir \
336
336
  -w /workdir \
337
- ghcr.io/pinto0309/onnx2tf:1.27.9
337
+ ghcr.io/pinto0309/onnx2tf:1.28.0
338
338
 
339
339
  or
340
340
 
@@ -342,7 +342,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
342
342
  docker run --rm -it \
343
343
  -v `pwd`:/workdir \
344
344
  -w /workdir \
345
- docker.io/pinto0309/onnx2tf:1.27.9
345
+ docker.io/pinto0309/onnx2tf:1.28.0
346
346
 
347
347
  or
348
348
 
@@ -498,6 +498,22 @@ onnx2tf -i resnet18-v1-7.onnx -okv3
498
498
  wget https://github.com/PINTO0309/onnx2tf/releases/download/0.0.2/resnet18-v1-7.onnx
499
499
  onnx2tf -i resnet18-v1-7.onnx -otfv1pb
500
500
 
501
+ # Automatic JSON generation only
502
+ # Generates an optimal parameter replacement JSON file for model conversion.
503
+ # The JSON file is saved to {model_name}_auto.json when conversion errors occur
504
+ # or accuracy issues are detected.
505
+ onnx2tf -i model.onnx -agj
506
+
507
+ # Accuracy validation only (no JSON generation)
508
+ # Validates the accuracy between ONNX and TensorFlow outputs without generating
509
+ # any parameter replacement JSON file.
510
+ onnx2tf -i model.onnx -cotof
511
+
512
+ # Accuracy validation + automatic JSON generation
513
+ # First generates an optimal parameter replacement JSON file, then uses it
514
+ # to validate the model accuracy. This ensures the best possible conversion accuracy.
515
+ onnx2tf -i model.onnx -agj -cotof
516
+
501
517
  # INT8 Quantization, Full INT8 Quantization
502
518
  # INT8 Quantization with INT16 activation, Full INT8 Quantization with INT16 activation
503
519
  # Dynamic Range Quantization
@@ -1561,6 +1577,7 @@ usage: onnx2tf
1561
1577
  [-nuonag]
1562
1578
  [-b BATCH_SIZE]
1563
1579
  [-ois OVERWRITE_INPUT_SHAPE [OVERWRITE_INPUT_SHAPE ...]]
1580
+ [-sh SHAPE_HINTS [SHAPE_HINTS ...]]
1564
1581
  [-nlt]
1565
1582
  [-onwdt]
1566
1583
  [-snms {v4,v5}]
@@ -1589,6 +1606,7 @@ usage: onnx2tf
1589
1606
  [-coton]
1590
1607
  [-cotor CHECK_ONNX_TF_OUTPUTS_ELEMENTWISE_CLOSE_RTOL]
1591
1608
  [-cotoa CHECK_ONNX_TF_OUTPUTS_ELEMENTWISE_CLOSE_ATOL]
1609
+ [-agj]
1592
1610
  [-dms]
1593
1611
  [-uc]
1594
1612
  [-n]
@@ -2024,6 +2042,15 @@ optional arguments:
2024
2042
  The absolute tolerance parameter.
2025
2043
  Default: 1e-4
2026
2044
 
2045
+ -agj, --auto_generate_json
2046
+ Automatically generates a parameter replacement JSON file that achieves minimal error
2047
+ when converting the model. This option explores various parameter combinations to find
2048
+ the best settings that result in successful conversion and highest accuracy.
2049
+ The search stops when the final output OP accuracy check shows "Matches".
2050
+ When used together with -cotof, the generated JSON is used to re-evaluate accuracy.
2051
+ WARNING: This option performs an exhaustive search to find the optimal conversion patterns,
2052
+ which can take a very long time depending on the model complexity.
2053
+
2027
2054
  -dms, --disable_model_save
2028
2055
  Does not save the converted model. For CIs RAM savings.
2029
2056
 
@@ -2093,6 +2120,7 @@ convert(
2093
2120
  replace_to_pseudo_operators: List[str] = None,
2094
2121
  mvn_epsilon: Union[float, NoneType] = 0.0000000001,
2095
2122
  param_replacement_file: Optional[str] = '',
2123
+ auto_generate_json: Optional[bool] = False,
2096
2124
  check_gpu_delegate_compatibility: Optional[bool] = False,
2097
2125
  check_onnx_tf_outputs_elementwise_close: Optional[bool] = False,
2098
2126
  check_onnx_tf_outputs_elementwise_close_full: Optional[bool] = False,
@@ -2437,6 +2465,15 @@ convert(
2437
2465
  param_replacement_file: Optional[str]
2438
2466
  Parameter replacement file path. (.json)
2439
2467
 
2468
+ auto_generate_json: Optional[bool]
2469
+ Automatically generates a parameter replacement JSON file that achieves minimal error
2470
+ when converting the model. This option explores various parameter combinations to find
2471
+ the best settings that result in successful conversion and highest accuracy.
2472
+ The search stops when the final output OP accuracy check shows "Matches".
2473
+ When used together with check_onnx_tf_outputs_elementwise_close_full,
2474
+ the generated JSON is used to re-evaluate accuracy.
2475
+ Default: False
2476
+
2440
2477
  check_gpu_delegate_compatibility: Optional[bool]
2441
2478
  Run TFLite ModelAnalyzer on the generated Float16 tflite model
2442
2479
  to check if the model can be supported by GPU Delegate.
@@ -1,28 +1,3 @@
1
- Metadata-Version: 2.4
2
- Name: onnx2tf
3
- Version: 1.27.9
4
- Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
5
- Home-page: https://github.com/PINTO0309/onnx2tf
6
- Author: Katsuya Hyodo
7
- Author-email: rmsdh122@yahoo.co.jp
8
- License: MIT License
9
- Platform: linux
10
- Platform: unix
11
- Requires-Python: >=3.10
12
- Description-Content-Type: text/markdown
13
- License-File: LICENSE
14
- License-File: LICENSE_onnx-tensorflow
15
- Dynamic: author
16
- Dynamic: author-email
17
- Dynamic: description
18
- Dynamic: description-content-type
19
- Dynamic: home-page
20
- Dynamic: license
21
- Dynamic: license-file
22
- Dynamic: platform
23
- Dynamic: requires-python
24
- Dynamic: summary
25
-
26
1
  # onnx2tf
27
2
  Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in [onnx-tensorflow](https://github.com/onnx/onnx-tensorflow) ([onnx-tf](https://pypi.org/project/onnx-tf/)). I don't need a Star, but give me a pull request. Since I am adding challenging model optimizations and fixing bugs almost daily, I frequently embed potential bugs that would otherwise break through CI's regression testing. Therefore, if you encounter new problems, I recommend that you try a package that is a few versions older, or try the latest package that will be released in a few days.
28
3
 
@@ -32,7 +7,7 @@ Incidentally, I have never used this tool in practice myself since I started wor
32
7
  <img src="https://user-images.githubusercontent.com/33194443/193840307-fa69eace-05a9-4d93-9c5d-999cf88af28e.png" />
33
8
  </p>
34
9
 
35
- [![Downloads](https://static.pepy.tech/personalized-badge/onnx2tf?period=total&units=none&left_color=grey&right_color=brightgreen&left_text=Downloads)](https://pepy.tech/project/onnx2tf) ![GitHub](https://img.shields.io/github/license/PINTO0309/onnx2tf?color=2BAF2B) [![Python](https://img.shields.io/badge/Python-3.10-2BAF2B)](https://img.shields.io/badge/Python-3.8-2BAF2B) [![PyPI](https://img.shields.io/pypi/v/onnx2tf?color=2BAF2B)](https://pypi.org/project/onnx2tf/) [![CodeQL](https://github.com/PINTO0309/onnx2tf/workflows/CodeQL/badge.svg)](https://github.com/PINTO0309/onnx2tf/actions?query=workflow%3ACodeQL) ![Model Convert Test Status](https://github.com/PINTO0309/onnx2tf/workflows/Model%20Convert%20Test/badge.svg) [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.7230085.svg)](https://doi.org/10.5281/zenodo.7230085)
10
+ [![Downloads](https://static.pepy.tech/personalized-badge/onnx2tf?period=total&units=none&left_color=grey&right_color=brightgreen&left_text=Downloads)](https://pepy.tech/project/onnx2tf) ![GitHub](https://img.shields.io/github/license/PINTO0309/onnx2tf?color=2BAF2B) [![Python](https://img.shields.io/badge/Python-3.10-2BAF2B)](https://img.shields.io/badge/Python-3.8-2BAF2B) [![PyPI](https://img.shields.io/pypi/v/onnx2tf?color=2BAF2B)](https://pypi.org/project/onnx2tf/) [![CodeQL](https://github.com/PINTO0309/onnx2tf/workflows/CodeQL/badge.svg)](https://github.com/PINTO0309/onnx2tf/actions?query=workflow%3ACodeQL) ![Model Convert Test Status](https://github.com/PINTO0309/onnx2tf/workflows/Model%20Convert%20Test/badge.svg) [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.7230085.svg)](https://doi.org/10.5281/zenodo.7230085) [![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/PINTO0309/onnx2tf)
36
11
 
37
12
  ## Note
38
13
  - The torch.script-based `torch.onnx.export` has already been moved to maintenance mode, and we recommend moving to the FX graph-based `torch.onnx.dynamo_export` starting with PyTorch v2.2.0.
@@ -334,7 +309,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
334
309
  docker run --rm -it \
335
310
  -v `pwd`:/workdir \
336
311
  -w /workdir \
337
- ghcr.io/pinto0309/onnx2tf:1.27.9
312
+ ghcr.io/pinto0309/onnx2tf:1.28.0
338
313
 
339
314
  or
340
315
 
@@ -342,7 +317,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
342
317
  docker run --rm -it \
343
318
  -v `pwd`:/workdir \
344
319
  -w /workdir \
345
- docker.io/pinto0309/onnx2tf:1.27.9
320
+ docker.io/pinto0309/onnx2tf:1.28.0
346
321
 
347
322
  or
348
323
 
@@ -498,6 +473,22 @@ onnx2tf -i resnet18-v1-7.onnx -okv3
498
473
  wget https://github.com/PINTO0309/onnx2tf/releases/download/0.0.2/resnet18-v1-7.onnx
499
474
  onnx2tf -i resnet18-v1-7.onnx -otfv1pb
500
475
 
476
+ # Automatic JSON generation only
477
+ # Generates an optimal parameter replacement JSON file for model conversion.
478
+ # The JSON file is saved to {model_name}_auto.json when conversion errors occur
479
+ # or accuracy issues are detected.
480
+ onnx2tf -i model.onnx -agj
481
+
482
+ # Accuracy validation only (no JSON generation)
483
+ # Validates the accuracy between ONNX and TensorFlow outputs without generating
484
+ # any parameter replacement JSON file.
485
+ onnx2tf -i model.onnx -cotof
486
+
487
+ # Accuracy validation + automatic JSON generation
488
+ # First generates an optimal parameter replacement JSON file, then uses it
489
+ # to validate the model accuracy. This ensures the best possible conversion accuracy.
490
+ onnx2tf -i model.onnx -agj -cotof
491
+
501
492
  # INT8 Quantization, Full INT8 Quantization
502
493
  # INT8 Quantization with INT16 activation, Full INT8 Quantization with INT16 activation
503
494
  # Dynamic Range Quantization
@@ -1561,6 +1552,7 @@ usage: onnx2tf
1561
1552
  [-nuonag]
1562
1553
  [-b BATCH_SIZE]
1563
1554
  [-ois OVERWRITE_INPUT_SHAPE [OVERWRITE_INPUT_SHAPE ...]]
1555
+ [-sh SHAPE_HINTS [SHAPE_HINTS ...]]
1564
1556
  [-nlt]
1565
1557
  [-onwdt]
1566
1558
  [-snms {v4,v5}]
@@ -1589,6 +1581,7 @@ usage: onnx2tf
1589
1581
  [-coton]
1590
1582
  [-cotor CHECK_ONNX_TF_OUTPUTS_ELEMENTWISE_CLOSE_RTOL]
1591
1583
  [-cotoa CHECK_ONNX_TF_OUTPUTS_ELEMENTWISE_CLOSE_ATOL]
1584
+ [-agj]
1592
1585
  [-dms]
1593
1586
  [-uc]
1594
1587
  [-n]
@@ -2024,6 +2017,15 @@ optional arguments:
2024
2017
  The absolute tolerance parameter.
2025
2018
  Default: 1e-4
2026
2019
 
2020
+ -agj, --auto_generate_json
2021
+ Automatically generates a parameter replacement JSON file that achieves minimal error
2022
+ when converting the model. This option explores various parameter combinations to find
2023
+ the best settings that result in successful conversion and highest accuracy.
2024
+ The search stops when the final output OP accuracy check shows "Matches".
2025
+ When used together with -cotof, the generated JSON is used to re-evaluate accuracy.
2026
+ WARNING: This option performs an exhaustive search to find the optimal conversion patterns,
2027
+ which can take a very long time depending on the model complexity.
2028
+
2027
2029
  -dms, --disable_model_save
2028
2030
  Does not save the converted model. For CIs RAM savings.
2029
2031
 
@@ -2093,6 +2095,7 @@ convert(
2093
2095
  replace_to_pseudo_operators: List[str] = None,
2094
2096
  mvn_epsilon: Union[float, NoneType] = 0.0000000001,
2095
2097
  param_replacement_file: Optional[str] = '',
2098
+ auto_generate_json: Optional[bool] = False,
2096
2099
  check_gpu_delegate_compatibility: Optional[bool] = False,
2097
2100
  check_onnx_tf_outputs_elementwise_close: Optional[bool] = False,
2098
2101
  check_onnx_tf_outputs_elementwise_close_full: Optional[bool] = False,
@@ -2437,6 +2440,15 @@ convert(
2437
2440
  param_replacement_file: Optional[str]
2438
2441
  Parameter replacement file path. (.json)
2439
2442
 
2443
+ auto_generate_json: Optional[bool]
2444
+ Automatically generates a parameter replacement JSON file that achieves minimal error
2445
+ when converting the model. This option explores various parameter combinations to find
2446
+ the best settings that result in successful conversion and highest accuracy.
2447
+ The search stops when the final output OP accuracy check shows "Matches".
2448
+ When used together with check_onnx_tf_outputs_elementwise_close_full,
2449
+ the generated JSON is used to re-evaluate accuracy.
2450
+ Default: False
2451
+
2440
2452
  check_gpu_delegate_compatibility: Optional[bool]
2441
2453
  Run TFLite ModelAnalyzer on the generated Float16 tflite model
2442
2454
  to check if the model can be supported by GPU Delegate.
@@ -1,3 +1,3 @@
1
1
  from onnx2tf.onnx2tf import convert, main
2
2
 
3
- __version__ = '1.27.9'
3
+ __version__ = '1.28.0'