onnx2tf 1.28.8__tar.gz → 1.29.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (206) hide show
  1. {onnx2tf-1.28.8/onnx2tf.egg-info → onnx2tf-1.29.0}/PKG-INFO +20 -9
  2. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/README.md +2 -2
  3. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/__init__.py +1 -1
  4. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/onnx2tf.py +41 -0
  5. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/utils/common_functions.py +104 -3
  6. {onnx2tf-1.28.8 → onnx2tf-1.29.0/onnx2tf.egg-info}/PKG-INFO +20 -9
  7. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf.egg-info/SOURCES.txt +2 -1
  8. onnx2tf-1.29.0/onnx2tf.egg-info/requires.txt +16 -0
  9. onnx2tf-1.29.0/pyproject.toml +29 -0
  10. onnx2tf-1.28.8/onnx2tf.egg-info/entry_points.txt +0 -2
  11. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/LICENSE +0 -0
  12. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/LICENSE_onnx-tensorflow +0 -0
  13. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/__main__.py +0 -0
  14. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Abs.py +0 -0
  15. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Acos.py +0 -0
  16. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Acosh.py +0 -0
  17. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Add.py +0 -0
  18. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/And.py +0 -0
  19. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/ArgMax.py +0 -0
  20. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/ArgMin.py +0 -0
  21. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Asin.py +0 -0
  22. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Asinh.py +0 -0
  23. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Atan.py +0 -0
  24. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Atanh.py +0 -0
  25. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/AveragePool.py +0 -0
  26. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/BatchNormalization.py +0 -0
  27. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Bernoulli.py +0 -0
  28. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/BitShift.py +0 -0
  29. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Cast.py +0 -0
  30. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Ceil.py +0 -0
  31. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Celu.py +0 -0
  32. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Clip.py +0 -0
  33. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Col2Im.py +0 -0
  34. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Compress.py +0 -0
  35. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Concat.py +0 -0
  36. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/ConcatFromSequence.py +0 -0
  37. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Constant.py +0 -0
  38. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/ConstantOfShape.py +0 -0
  39. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Conv.py +0 -0
  40. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/ConvInteger.py +0 -0
  41. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/ConvTranspose.py +0 -0
  42. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Cos.py +0 -0
  43. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Cosh.py +0 -0
  44. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/CumSum.py +0 -0
  45. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/DepthToSpace.py +0 -0
  46. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/DequantizeLinear.py +0 -0
  47. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Det.py +0 -0
  48. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Div.py +0 -0
  49. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Dropout.py +0 -0
  50. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/DynamicQuantizeLinear.py +0 -0
  51. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Einsum.py +0 -0
  52. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Elu.py +0 -0
  53. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Equal.py +0 -0
  54. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Erf.py +0 -0
  55. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Exp.py +0 -0
  56. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Expand.py +0 -0
  57. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/EyeLike.py +0 -0
  58. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Flatten.py +0 -0
  59. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Floor.py +0 -0
  60. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/FusedConv.py +0 -0
  61. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/GRU.py +0 -0
  62. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Gather.py +0 -0
  63. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/GatherElements.py +0 -0
  64. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/GatherND.py +0 -0
  65. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Gelu.py +0 -0
  66. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Gemm.py +0 -0
  67. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/GlobalAveragePool.py +0 -0
  68. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/GlobalLpPool.py +0 -0
  69. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/GlobalMaxPool.py +0 -0
  70. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Greater.py +0 -0
  71. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/GreaterOrEqual.py +0 -0
  72. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/GridSample.py +0 -0
  73. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/GroupNorm.py +0 -0
  74. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/HammingWindow.py +0 -0
  75. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/HannWindow.py +0 -0
  76. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/HardSigmoid.py +0 -0
  77. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/HardSwish.py +0 -0
  78. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Hardmax.py +0 -0
  79. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Identity.py +0 -0
  80. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/If.py +0 -0
  81. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Input.py +0 -0
  82. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/InstanceNormalization.py +0 -0
  83. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Inverse.py +0 -0
  84. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/IsInf.py +0 -0
  85. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/IsNaN.py +0 -0
  86. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/LRN.py +0 -0
  87. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/LSTM.py +0 -0
  88. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/LayerNormalization.py +0 -0
  89. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/LeakyRelu.py +0 -0
  90. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Less.py +0 -0
  91. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/LessOrEqual.py +0 -0
  92. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Log.py +0 -0
  93. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/LogSoftmax.py +0 -0
  94. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/LpNormalization.py +0 -0
  95. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/MatMul.py +0 -0
  96. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/MatMulInteger.py +0 -0
  97. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Max.py +0 -0
  98. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/MaxPool.py +0 -0
  99. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/MaxUnpool.py +0 -0
  100. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Mean.py +0 -0
  101. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/MeanVarianceNormalization.py +0 -0
  102. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/MelWeightMatrix.py +0 -0
  103. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Min.py +0 -0
  104. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Mish.py +0 -0
  105. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Mod.py +0 -0
  106. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Mul.py +0 -0
  107. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Multinomial.py +0 -0
  108. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Neg.py +0 -0
  109. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/NonMaxSuppression.py +0 -0
  110. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/NonZero.py +0 -0
  111. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Not.py +0 -0
  112. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/OneHot.py +0 -0
  113. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/OptionalGetElement.py +0 -0
  114. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/OptionalHasElement.py +0 -0
  115. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Or.py +0 -0
  116. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/PRelu.py +0 -0
  117. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Pad.py +0 -0
  118. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Pow.py +0 -0
  119. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/QLinearAdd.py +0 -0
  120. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/QLinearConcat.py +0 -0
  121. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/QLinearConv.py +0 -0
  122. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/QLinearLeakyRelu.py +0 -0
  123. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/QLinearMatMul.py +0 -0
  124. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/QLinearMul.py +0 -0
  125. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/QLinearSigmoid.py +0 -0
  126. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/QLinearSoftmax.py +0 -0
  127. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/QuantizeLinear.py +0 -0
  128. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/RNN.py +0 -0
  129. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/RandomNormal.py +0 -0
  130. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/RandomNormalLike.py +0 -0
  131. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/RandomUniform.py +0 -0
  132. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/RandomUniformLike.py +0 -0
  133. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Range.py +0 -0
  134. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Reciprocal.py +0 -0
  135. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/ReduceL1.py +0 -0
  136. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/ReduceL2.py +0 -0
  137. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/ReduceLogSum.py +0 -0
  138. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/ReduceLogSumExp.py +0 -0
  139. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/ReduceMax.py +0 -0
  140. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/ReduceMean.py +0 -0
  141. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/ReduceMin.py +0 -0
  142. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/ReduceProd.py +0 -0
  143. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/ReduceSum.py +0 -0
  144. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/ReduceSumSquare.py +0 -0
  145. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Relu.py +0 -0
  146. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Reshape.py +0 -0
  147. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Resize.py +0 -0
  148. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/ReverseSequence.py +0 -0
  149. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/RoiAlign.py +0 -0
  150. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Round.py +0 -0
  151. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/STFT.py +0 -0
  152. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/ScaleAndTranslate.py +0 -0
  153. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Scatter.py +0 -0
  154. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/ScatterElements.py +0 -0
  155. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/ScatterND.py +0 -0
  156. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Selu.py +0 -0
  157. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/SequenceAt.py +0 -0
  158. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/SequenceConstruct.py +0 -0
  159. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/SequenceEmpty.py +0 -0
  160. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/SequenceErase.py +0 -0
  161. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/SequenceInsert.py +0 -0
  162. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/SequenceLength.py +0 -0
  163. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Shape.py +0 -0
  164. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Shrink.py +0 -0
  165. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Sigmoid.py +0 -0
  166. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Sign.py +0 -0
  167. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Sin.py +0 -0
  168. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Sinh.py +0 -0
  169. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Size.py +0 -0
  170. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Slice.py +0 -0
  171. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Softmax.py +0 -0
  172. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Softplus.py +0 -0
  173. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Softsign.py +0 -0
  174. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/SpaceToDepth.py +0 -0
  175. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Split.py +0 -0
  176. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/SplitToSequence.py +0 -0
  177. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Sqrt.py +0 -0
  178. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Squeeze.py +0 -0
  179. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/StringNormalizer.py +0 -0
  180. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Sub.py +0 -0
  181. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Sum.py +0 -0
  182. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Tan.py +0 -0
  183. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Tanh.py +0 -0
  184. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/ThresholdedRelu.py +0 -0
  185. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Tile.py +0 -0
  186. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/TopK.py +0 -0
  187. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Transpose.py +0 -0
  188. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Trilu.py +0 -0
  189. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Unique.py +0 -0
  190. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Unsqueeze.py +0 -0
  191. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Upsample.py +0 -0
  192. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Where.py +0 -0
  193. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/Xor.py +0 -0
  194. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/_Loop.py +0 -0
  195. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/__Loop.py +0 -0
  196. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/ops/__init__.py +0 -0
  197. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/utils/__init__.py +0 -0
  198. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/utils/enums.py +0 -0
  199. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/utils/iterative_json_optimizer.py +0 -0
  200. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/utils/json_auto_generator.py +0 -0
  201. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf/utils/logging.py +0 -0
  202. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf.egg-info/dependency_links.txt +0 -0
  203. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/onnx2tf.egg-info/top_level.txt +0 -0
  204. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/setup.cfg +0 -0
  205. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/setup.py +0 -0
  206. {onnx2tf-1.28.8 → onnx2tf-1.29.0}/tests/test_model_convert.py +0 -0
@@ -1,27 +1,38 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: onnx2tf
3
- Version: 1.28.8
4
- Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
3
+ Version: 1.29.0
4
+ Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC).
5
5
  Home-page: https://github.com/PINTO0309/onnx2tf
6
6
  Author: Katsuya Hyodo
7
7
  Author-email: rmsdh122@yahoo.co.jp
8
- License: MIT License
9
8
  Platform: linux
10
9
  Platform: unix
11
10
  Requires-Python: >=3.10
12
11
  Description-Content-Type: text/markdown
13
12
  License-File: LICENSE
14
13
  License-File: LICENSE_onnx-tensorflow
14
+ Requires-Dist: requests==2.32.5
15
+ Requires-Dist: numpy==1.26.4
16
+ Requires-Dist: onnx==1.19.0
17
+ Requires-Dist: onnxruntime==1.23.0
18
+ Requires-Dist: opencv-python==4.11.0.86
19
+ Requires-Dist: onnxsim==0.4.30
20
+ Requires-Dist: ai-edge-litert==2.1.0
21
+ Requires-Dist: tensorflow==2.19.0
22
+ Requires-Dist: tf-keras==2.19.0
23
+ Requires-Dist: onnx-graphsurgeon==0.5.8
24
+ Requires-Dist: simple-onnx-processing-tools==1.1.32
25
+ Requires-Dist: psutil==5.9.5
26
+ Requires-Dist: protobuf==4.25.5
27
+ Requires-Dist: h5py==3.11.0
28
+ Requires-Dist: ml_dtypes==0.5.1
29
+ Requires-Dist: flatbuffers==25.12.19
15
30
  Dynamic: author
16
31
  Dynamic: author-email
17
- Dynamic: description
18
- Dynamic: description-content-type
19
32
  Dynamic: home-page
20
- Dynamic: license
21
33
  Dynamic: license-file
22
34
  Dynamic: platform
23
35
  Dynamic: requires-python
24
- Dynamic: summary
25
36
 
26
37
  # onnx2tf
27
38
  Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in [onnx-tensorflow](https://github.com/onnx/onnx-tensorflow) ([onnx-tf](https://pypi.org/project/onnx-tf/)). I don't need a Star, but give me a pull request. Since I am adding challenging model optimizations and fixing bugs almost daily, I frequently embed potential bugs that would otherwise break through CI's regression testing. Therefore, if you encounter new problems, I recommend that you try a package that is a few versions older, or try the latest package that will be released in a few days.
@@ -334,7 +345,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
334
345
  docker run --rm -it \
335
346
  -v `pwd`:/workdir \
336
347
  -w /workdir \
337
- ghcr.io/pinto0309/onnx2tf:1.28.8
348
+ ghcr.io/pinto0309/onnx2tf:1.29.0
338
349
 
339
350
  or
340
351
 
@@ -342,7 +353,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
342
353
  docker run --rm -it \
343
354
  -v `pwd`:/workdir \
344
355
  -w /workdir \
345
- docker.io/pinto0309/onnx2tf:1.28.8
356
+ docker.io/pinto0309/onnx2tf:1.29.0
346
357
 
347
358
  or
348
359
 
@@ -309,7 +309,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
309
309
  docker run --rm -it \
310
310
  -v `pwd`:/workdir \
311
311
  -w /workdir \
312
- ghcr.io/pinto0309/onnx2tf:1.28.8
312
+ ghcr.io/pinto0309/onnx2tf:1.29.0
313
313
 
314
314
  or
315
315
 
@@ -317,7 +317,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
317
317
  docker run --rm -it \
318
318
  -v `pwd`:/workdir \
319
319
  -w /workdir \
320
- docker.io/pinto0309/onnx2tf:1.28.8
320
+ docker.io/pinto0309/onnx2tf:1.29.0
321
321
 
322
322
  or
323
323
 
@@ -1,3 +1,3 @@
1
1
  from onnx2tf.onnx2tf import convert, main
2
2
 
3
- __version__ = '1.28.8'
3
+ __version__ = '1.29.0'
@@ -99,6 +99,8 @@ def convert(
99
99
  enable_rnn_unroll: Optional[bool] = False,
100
100
  disable_suppression_flextranspose: Optional[bool] = False,
101
101
  disable_strict_mode: Optional[bool] = False,
102
+ onnxruntime_output_memmap: Optional[bool] = True,
103
+ onnxruntime_output_memmap_dir: Optional[str] = None,
102
104
  number_of_dimensions_after_flextranspose_compression: Optional[int] = 6,
103
105
  disable_suppression_flexstridedslice: Optional[bool] = False,
104
106
  number_of_dimensions_after_flexstridedslice_compression: Optional[int] = 5,
@@ -372,6 +374,15 @@ def convert(
372
374
  correction process is skipped, but the frequency of transposition errors increases\n
373
375
  and accuracy errors are more likely to occur. Strict mode is enabled by default.
374
376
 
377
+ onnxruntime_output_memmap: Optional[bool]
378
+ Use onnxruntime IOBinding with np.memmap for dummy inference outputs when\n
379
+ the estimated output tensor size exceeds available RAM. This avoids OOM\n
380
+ but increases disk I/O and may slow down validation.
381
+
382
+ onnxruntime_output_memmap_dir: Optional[str]
383
+ Directory for memmap files used by onnxruntime_output_memmap.\n
384
+ If omitted, a temporary directory is created and removed on exit.
385
+
375
386
  number_of_dimensions_after_flextranspose_compression: Optional[int]
376
387
  Number of Transpose OP dimensions generated after avoiding FlexTranspose generation.\n
377
388
  Also suppress the creation of the Transpose itself by specifying 2.\n
@@ -1118,6 +1129,8 @@ def convert(
1118
1129
  tf_layers_dict=tf_layers_dict,
1119
1130
  use_cuda=use_cuda,
1120
1131
  disable_strict_mode=disable_strict_mode,
1132
+ enable_ort_output_memmap=onnxruntime_output_memmap,
1133
+ ort_output_memmap_dir=onnxruntime_output_memmap_dir,
1121
1134
  shape_hints=shape_hints if (check_onnx_tf_outputs_elementwise_close or check_onnx_tf_outputs_elementwise_close_full) else None,
1122
1135
  )
1123
1136
  """
@@ -2041,6 +2054,8 @@ def convert(
2041
2054
  custom_input_op_name_np_data_path=custom_input_op_name_np_data_path,
2042
2055
  tf_layers_dict=tf_layers_dict,
2043
2056
  use_cuda=use_cuda,
2057
+ enable_ort_output_memmap=onnxruntime_output_memmap,
2058
+ ort_output_memmap_dir=onnxruntime_output_memmap_dir,
2044
2059
  shape_hints=shape_hints,
2045
2060
  )
2046
2061
  except Exception as ex:
@@ -2304,6 +2319,8 @@ def convert(
2304
2319
  custom_input_op_name_np_data_path=custom_input_op_name_np_data_path,
2305
2320
  tf_layers_dict=tf_layers_dict,
2306
2321
  use_cuda=use_cuda,
2322
+ enable_ort_output_memmap=onnxruntime_output_memmap,
2323
+ ort_output_memmap_dir=onnxruntime_output_memmap_dir,
2307
2324
  shape_hints=shape_hints,
2308
2325
  )
2309
2326
 
@@ -2836,6 +2853,28 @@ def main():
2836
2853
  'correction process is skipped, but the frequency of transposition errors increases \n' +
2837
2854
  'and accuracy errors are more likely to occur. Strict mode is enabled by default.'
2838
2855
  )
2856
+ parser.add_argument(
2857
+ '-doem',
2858
+ '--disable_onnxruntime_output_memmap',
2859
+ dest='disable_onnxruntime_output_memmap',
2860
+ action='store_true',
2861
+ help=\
2862
+ 'Disable onnxruntime output memmap. \n' +
2863
+ 'By default, onnx2tf uses onnxruntime IOBinding with np.memmap for dummy inference \n' +
2864
+ 'outputs only when the estimated output tensor size exceeds available RAM. \n' +
2865
+ 'Use this flag to force the standard in-memory output path instead. \n' +
2866
+ 'Default: disabled (memmap enabled when needed).'
2867
+ )
2868
+ parser.set_defaults(disable_onnxruntime_output_memmap=False)
2869
+ parser.add_argument(
2870
+ '-oemd',
2871
+ '--onnxruntime_output_memmap_dir',
2872
+ type=str,
2873
+ help=\
2874
+ 'Directory for memmap files used by onnxruntime output memmap. \n' +
2875
+ 'If omitted, a temporary directory is created and removed on exit. \n' +
2876
+ 'This setting is used only when memmap is actually enabled.'
2877
+ )
2839
2878
  parser.add_argument(
2840
2879
  '-nodafsc',
2841
2880
  '--number_of_dimensions_after_flexstridedslice_compression',
@@ -3133,6 +3172,8 @@ def main():
3133
3172
  enable_rnn_unroll=args.enable_rnn_unroll,
3134
3173
  disable_suppression_flextranspose=args.disable_suppression_flextranspose,
3135
3174
  disable_strict_mode=args.disable_strict_mode,
3175
+ onnxruntime_output_memmap=not args.disable_onnxruntime_output_memmap,
3176
+ onnxruntime_output_memmap_dir=args.onnxruntime_output_memmap_dir,
3136
3177
  number_of_dimensions_after_flextranspose_compression=args.number_of_dimensions_after_flextranspose_compression,
3137
3178
  disable_suppression_flexstridedslice=args.disable_suppression_flexstridedslice,
3138
3179
  number_of_dimensions_after_flexstridedslice_compression=args.number_of_dimensions_after_flexstridedslice_compression,
@@ -7,6 +7,9 @@ import copy
7
7
  import json
8
8
  import psutil
9
9
  import random
10
+ import atexit
11
+ import tempfile
12
+ import shutil
10
13
  random.seed(0)
11
14
  import requests
12
15
  import flatbuffers
@@ -3634,6 +3637,8 @@ def dummy_onnx_inference(
3634
3637
  tf_layers_dict: Optional[Dict] = None,
3635
3638
  use_cuda: bool = False,
3636
3639
  disable_strict_mode: bool = False,
3640
+ enable_ort_output_memmap: bool = False,
3641
+ ort_output_memmap_dir: Optional[str] = None,
3637
3642
  shape_hints: Optional[List[str]] = None,
3638
3643
  ) -> List[np.ndarray]:
3639
3644
  """Perform inference on ONNX subgraphs with an all-1 dummy tensor.
@@ -3663,6 +3668,14 @@ def dummy_onnx_inference(
3663
3668
  disable_strict_mode: Optional[bool]
3664
3669
  True to disable strict inference mode, False to enable it.
3665
3670
 
3671
+ enable_ort_output_memmap: bool
3672
+ True to use onnxruntime IOBinding with np.memmap for outputs when
3673
+ output tensors are too large for available RAM.
3674
+
3675
+ ort_output_memmap_dir: Optional[str]
3676
+ Directory to store memmap files. If not specified, a temporary
3677
+ directory is created and removed on exit.
3678
+
3666
3679
  Returns
3667
3680
  ----------
3668
3681
  outputs: List[np.ndarray]
@@ -3880,7 +3893,7 @@ def dummy_onnx_inference(
3880
3893
  op_output_size: int = 1
3881
3894
  if gs_graph_output.shape is not None:
3882
3895
  for s in gs_graph_output.shape:
3883
- if isinstance(s, int):
3896
+ if isinstance(s, (int, np.integer)):
3884
3897
  op_output_size *= s
3885
3898
  # Total bytes
3886
3899
  total_output_size += op_output_size * dtype_sizes.get(gs_graph_output.dtype, 4)
@@ -3888,7 +3901,8 @@ def dummy_onnx_inference(
3888
3901
  # When exact inference mode is enabled and the total size of the tensor of inference results exceeds approximately 80% of available RAM
3889
3902
  mem_available = psutil.virtual_memory().available * 0.80 // 1024 // 1024 //1024
3890
3903
  total_output_size_gb = (total_output_size // 1024 // 1024 //1024)
3891
- if (not disable_strict_mode and total_output_size_gb > mem_available):
3904
+ use_memmap_outputs = enable_ort_output_memmap and total_output_size_gb > mem_available
3905
+ if (not disable_strict_mode and total_output_size_gb > mem_available and not use_memmap_outputs):
3892
3906
  if tmp_onnx_path:
3893
3907
  os.remove(tmp_onnx_path)
3894
3908
  os.remove(tmp_onnx_external_weights_path)
@@ -3896,7 +3910,94 @@ def dummy_onnx_inference(
3896
3910
  f'The tool skipped dummy inference to avoid SWAP processing because the total size of the tensor of inference results exceeded about {mem_available} GB. (results: {total_output_size_gb} GB)'
3897
3911
  )
3898
3912
 
3899
- outputs = onnx_session.run(None, input_datas)
3913
+ if use_memmap_outputs:
3914
+ output_shapes = []
3915
+ output_names_order = [out.name for out in gs_graph.outputs]
3916
+ for out in gs_graph.outputs:
3917
+ shape = out.shape
3918
+ if shape is None or any(not isinstance(s, (int, np.integer)) for s in shape):
3919
+ if tmp_onnx_path:
3920
+ os.remove(tmp_onnx_path)
3921
+ os.remove(tmp_onnx_external_weights_path)
3922
+ raise Exception(
3923
+ 'onnxruntime output memmap requires static output shapes. ' +
3924
+ 'Provide --shape_hints or reduce validation outputs.'
3925
+ )
3926
+ output_shapes.append([int(s) for s in shape])
3927
+
3928
+ memmap_dir = ort_output_memmap_dir
3929
+ cleanup_memmap_dir = False
3930
+ if memmap_dir is None:
3931
+ memmap_dir = tempfile.mkdtemp(prefix='onnx2tf_ort_mm_')
3932
+ cleanup_memmap_dir = True
3933
+ os.makedirs(memmap_dir, exist_ok=True)
3934
+
3935
+ try:
3936
+ disk_free = psutil.disk_usage(memmap_dir).free
3937
+ if total_output_size > disk_free:
3938
+ raise Exception(
3939
+ f'Not enough disk space for memmap outputs. ' +
3940
+ f'Required: {total_output_size} bytes, Free: {disk_free} bytes.'
3941
+ )
3942
+ except Exception as ex:
3943
+ if 'Not enough disk space' in str(ex):
3944
+ if tmp_onnx_path:
3945
+ os.remove(tmp_onnx_path)
3946
+ os.remove(tmp_onnx_external_weights_path)
3947
+ raise
3948
+
3949
+ if cleanup_memmap_dir:
3950
+ atexit.register(shutil.rmtree, memmap_dir, ignore_errors=True)
3951
+
3952
+ info(
3953
+ f'onnxruntime output memmap enabled. ' +
3954
+ f'Outputs: {len(output_names_order)}, Path: {memmap_dir}'
3955
+ )
3956
+
3957
+ io_binding = onnx_session.io_binding()
3958
+
3959
+ for input_name, input_data in input_datas.items():
3960
+ if not input_data.flags['C_CONTIGUOUS']:
3961
+ input_data = np.ascontiguousarray(input_data)
3962
+ input_datas[input_name] = input_data
3963
+ io_binding.bind_input(
3964
+ input_name,
3965
+ 'cpu',
3966
+ 0,
3967
+ input_data.dtype,
3968
+ input_data.shape,
3969
+ input_data.__array_interface__['data'][0],
3970
+ )
3971
+
3972
+ memmap_outputs = {}
3973
+ for idx, (output_name, output_shape) in enumerate(zip(output_names_order, output_shapes)):
3974
+ safe_output_name = re.sub(r'[^0-9A-Za-z._-]+', '_', output_name)
3975
+ memmap_path = os.path.join(memmap_dir, f'ort_output_{idx}_{safe_output_name}.mmap')
3976
+ output_dtype = np.dtype(gs_graph.outputs[idx].dtype)
3977
+ memmap_array = np.memmap(
3978
+ memmap_path,
3979
+ dtype=output_dtype,
3980
+ mode='w+',
3981
+ shape=tuple(output_shape),
3982
+ )
3983
+ memmap_outputs[output_name] = memmap_array
3984
+ io_binding.bind_output(
3985
+ output_name,
3986
+ 'cpu',
3987
+ 0,
3988
+ output_dtype,
3989
+ output_shape,
3990
+ memmap_array.__array_interface__['data'][0],
3991
+ )
3992
+
3993
+ onnx_session.run_with_iobinding(io_binding)
3994
+ io_binding.synchronize_outputs()
3995
+ for memmap_array in memmap_outputs.values():
3996
+ memmap_array.flush()
3997
+
3998
+ outputs = [memmap_outputs[name] for name in output_names_order]
3999
+ else:
4000
+ outputs = onnx_session.run(None, input_datas)
3900
4001
  if tmp_onnx_path:
3901
4002
  os.remove(tmp_onnx_path)
3902
4003
  os.remove(tmp_onnx_external_weights_path)
@@ -1,27 +1,38 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: onnx2tf
3
- Version: 1.28.8
4
- Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
3
+ Version: 1.29.0
4
+ Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC).
5
5
  Home-page: https://github.com/PINTO0309/onnx2tf
6
6
  Author: Katsuya Hyodo
7
7
  Author-email: rmsdh122@yahoo.co.jp
8
- License: MIT License
9
8
  Platform: linux
10
9
  Platform: unix
11
10
  Requires-Python: >=3.10
12
11
  Description-Content-Type: text/markdown
13
12
  License-File: LICENSE
14
13
  License-File: LICENSE_onnx-tensorflow
14
+ Requires-Dist: requests==2.32.5
15
+ Requires-Dist: numpy==1.26.4
16
+ Requires-Dist: onnx==1.19.0
17
+ Requires-Dist: onnxruntime==1.23.0
18
+ Requires-Dist: opencv-python==4.11.0.86
19
+ Requires-Dist: onnxsim==0.4.30
20
+ Requires-Dist: ai-edge-litert==2.1.0
21
+ Requires-Dist: tensorflow==2.19.0
22
+ Requires-Dist: tf-keras==2.19.0
23
+ Requires-Dist: onnx-graphsurgeon==0.5.8
24
+ Requires-Dist: simple-onnx-processing-tools==1.1.32
25
+ Requires-Dist: psutil==5.9.5
26
+ Requires-Dist: protobuf==4.25.5
27
+ Requires-Dist: h5py==3.11.0
28
+ Requires-Dist: ml_dtypes==0.5.1
29
+ Requires-Dist: flatbuffers==25.12.19
15
30
  Dynamic: author
16
31
  Dynamic: author-email
17
- Dynamic: description
18
- Dynamic: description-content-type
19
32
  Dynamic: home-page
20
- Dynamic: license
21
33
  Dynamic: license-file
22
34
  Dynamic: platform
23
35
  Dynamic: requires-python
24
- Dynamic: summary
25
36
 
26
37
  # onnx2tf
27
38
  Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in [onnx-tensorflow](https://github.com/onnx/onnx-tensorflow) ([onnx-tf](https://pypi.org/project/onnx-tf/)). I don't need a Star, but give me a pull request. Since I am adding challenging model optimizations and fixing bugs almost daily, I frequently embed potential bugs that would otherwise break through CI's regression testing. Therefore, if you encounter new problems, I recommend that you try a package that is a few versions older, or try the latest package that will be released in a few days.
@@ -334,7 +345,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
334
345
  docker run --rm -it \
335
346
  -v `pwd`:/workdir \
336
347
  -w /workdir \
337
- ghcr.io/pinto0309/onnx2tf:1.28.8
348
+ ghcr.io/pinto0309/onnx2tf:1.29.0
338
349
 
339
350
  or
340
351
 
@@ -342,7 +353,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
342
353
  docker run --rm -it \
343
354
  -v `pwd`:/workdir \
344
355
  -w /workdir \
345
- docker.io/pinto0309/onnx2tf:1.28.8
356
+ docker.io/pinto0309/onnx2tf:1.29.0
346
357
 
347
358
  or
348
359
 
@@ -1,6 +1,7 @@
1
1
  LICENSE
2
2
  LICENSE_onnx-tensorflow
3
3
  README.md
4
+ pyproject.toml
4
5
  setup.py
5
6
  onnx2tf/__init__.py
6
7
  onnx2tf/__main__.py
@@ -8,7 +9,7 @@ onnx2tf/onnx2tf.py
8
9
  onnx2tf.egg-info/PKG-INFO
9
10
  onnx2tf.egg-info/SOURCES.txt
10
11
  onnx2tf.egg-info/dependency_links.txt
11
- onnx2tf.egg-info/entry_points.txt
12
+ onnx2tf.egg-info/requires.txt
12
13
  onnx2tf.egg-info/top_level.txt
13
14
  onnx2tf/ops/Abs.py
14
15
  onnx2tf/ops/Acos.py
@@ -0,0 +1,16 @@
1
+ requests==2.32.5
2
+ numpy==1.26.4
3
+ onnx==1.19.0
4
+ onnxruntime==1.23.0
5
+ opencv-python==4.11.0.86
6
+ onnxsim==0.4.30
7
+ ai-edge-litert==2.1.0
8
+ tensorflow==2.19.0
9
+ tf-keras==2.19.0
10
+ onnx-graphsurgeon==0.5.8
11
+ simple-onnx-processing-tools==1.1.32
12
+ psutil==5.9.5
13
+ protobuf==4.25.5
14
+ h5py==3.11.0
15
+ ml_dtypes==0.5.1
16
+ flatbuffers==25.12.19
@@ -0,0 +1,29 @@
1
+ [project]
2
+ name = "onnx2tf"
3
+ version = "1.29.0"
4
+ description = "Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC)."
5
+ readme = "README.md"
6
+ requires-python = "==3.11.12"
7
+ dependencies = [
8
+ "requests==2.32.5",
9
+ "numpy==1.26.4",
10
+ "onnx==1.19.0",
11
+ "onnxruntime==1.23.0",
12
+ "opencv-python==4.11.0.86",
13
+ "onnxsim==0.4.30",
14
+ "ai-edge-litert==2.1.0",
15
+ "tensorflow==2.19.0",
16
+ "tf-keras==2.19.0",
17
+ "onnx-graphsurgeon==0.5.8",
18
+ "simple-onnx-processing-tools==1.1.32",
19
+ "psutil==5.9.5",
20
+ "protobuf==4.25.5",
21
+ "h5py==3.11.0",
22
+ "ml_dtypes==0.5.1",
23
+ "flatbuffers==25.12.19",
24
+ ]
25
+ [tool.uv]
26
+ override-dependencies = [
27
+ "onnx==1.19.0",
28
+ "onnxsim==0.4.30",
29
+ ]
@@ -1,2 +0,0 @@
1
- [console_scripts]
2
- onnx2tf = onnx2tf:main
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes