onnx2tf 1.29.19__tar.gz → 1.29.21__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (216) hide show
  1. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/PKG-INFO +51 -8
  2. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/README.md +49 -6
  3. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/__init__.py +1 -1
  4. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/onnx2tf.py +996 -27
  5. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/GatherElements.py +25 -7
  6. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/GatherND.py +28 -1
  7. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/ScatterElements.py +25 -7
  8. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/ScatterND.py +45 -6
  9. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/TensorScatter.py +20 -6
  10. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/utils/common_functions.py +198 -4
  11. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/pyproject.toml +2 -2
  12. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/__main__.py +0 -0
  13. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Abs.py +0 -0
  14. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Acos.py +0 -0
  15. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Acosh.py +0 -0
  16. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Add.py +0 -0
  17. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/AffineGrid.py +0 -0
  18. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/And.py +0 -0
  19. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/ArgMax.py +0 -0
  20. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/ArgMin.py +0 -0
  21. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Asin.py +0 -0
  22. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Asinh.py +0 -0
  23. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Atan.py +0 -0
  24. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Atanh.py +0 -0
  25. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Attention.py +0 -0
  26. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/AveragePool.py +0 -0
  27. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/BatchNormalization.py +0 -0
  28. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Bernoulli.py +0 -0
  29. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/BitShift.py +0 -0
  30. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/BitwiseAnd.py +0 -0
  31. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/BitwiseNot.py +0 -0
  32. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/BitwiseOr.py +0 -0
  33. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/BitwiseXor.py +0 -0
  34. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/BlackmanWindow.py +0 -0
  35. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Cast.py +0 -0
  36. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Ceil.py +0 -0
  37. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Celu.py +0 -0
  38. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Clip.py +0 -0
  39. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Col2Im.py +0 -0
  40. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Compress.py +0 -0
  41. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Concat.py +0 -0
  42. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/ConcatFromSequence.py +0 -0
  43. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Constant.py +0 -0
  44. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/ConstantOfShape.py +0 -0
  45. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Conv.py +0 -0
  46. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/ConvInteger.py +0 -0
  47. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/ConvTranspose.py +0 -0
  48. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Cos.py +0 -0
  49. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Cosh.py +0 -0
  50. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/CumProd.py +0 -0
  51. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/CumSum.py +0 -0
  52. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/DFT.py +0 -0
  53. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/DeformConv.py +0 -0
  54. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/DepthToSpace.py +0 -0
  55. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/DequantizeLinear.py +0 -0
  56. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Det.py +0 -0
  57. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Div.py +0 -0
  58. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Dropout.py +0 -0
  59. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/DynamicQuantizeLinear.py +0 -0
  60. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Einsum.py +0 -0
  61. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Elu.py +0 -0
  62. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Equal.py +0 -0
  63. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Erf.py +0 -0
  64. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Exp.py +0 -0
  65. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Expand.py +0 -0
  66. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/EyeLike.py +0 -0
  67. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Flatten.py +0 -0
  68. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Floor.py +0 -0
  69. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/FusedConv.py +0 -0
  70. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/GRU.py +0 -0
  71. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Gather.py +0 -0
  72. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Gelu.py +0 -0
  73. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Gemm.py +0 -0
  74. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/GlobalAveragePool.py +0 -0
  75. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/GlobalLpPool.py +0 -0
  76. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/GlobalMaxPool.py +0 -0
  77. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Greater.py +0 -0
  78. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/GreaterOrEqual.py +0 -0
  79. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/GridSample.py +0 -0
  80. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/GroupNorm.py +0 -0
  81. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/HammingWindow.py +0 -0
  82. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/HannWindow.py +0 -0
  83. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/HardSigmoid.py +0 -0
  84. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/HardSwish.py +0 -0
  85. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Hardmax.py +0 -0
  86. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Identity.py +0 -0
  87. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/If.py +0 -0
  88. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/ImageDecoder.py +0 -0
  89. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Input.py +0 -0
  90. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/InstanceNormalization.py +0 -0
  91. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Inverse.py +0 -0
  92. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/IsInf.py +0 -0
  93. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/IsNaN.py +0 -0
  94. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/LRN.py +0 -0
  95. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/LSTM.py +0 -0
  96. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/LayerNormalization.py +0 -0
  97. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/LeakyRelu.py +0 -0
  98. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Less.py +0 -0
  99. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/LessOrEqual.py +0 -0
  100. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Log.py +0 -0
  101. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/LogSoftmax.py +0 -0
  102. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Loop.py +0 -0
  103. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/LpNormalization.py +0 -0
  104. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/LpPool.py +0 -0
  105. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/MatMul.py +0 -0
  106. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/MatMulInteger.py +0 -0
  107. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Max.py +0 -0
  108. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/MaxPool.py +0 -0
  109. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/MaxRoiPool.py +0 -0
  110. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/MaxUnpool.py +0 -0
  111. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Mean.py +0 -0
  112. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/MeanVarianceNormalization.py +0 -0
  113. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/MelWeightMatrix.py +0 -0
  114. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Min.py +0 -0
  115. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Mish.py +0 -0
  116. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Mod.py +0 -0
  117. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Mul.py +0 -0
  118. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Multinomial.py +0 -0
  119. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Neg.py +0 -0
  120. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/NegativeLogLikelihoodLoss.py +0 -0
  121. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/NonMaxSuppression.py +0 -0
  122. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/NonZero.py +0 -0
  123. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Not.py +0 -0
  124. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/OneHot.py +0 -0
  125. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/OptionalGetElement.py +0 -0
  126. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/OptionalHasElement.py +0 -0
  127. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Or.py +0 -0
  128. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/PRelu.py +0 -0
  129. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Pad.py +0 -0
  130. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Pow.py +0 -0
  131. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/QLinearAdd.py +0 -0
  132. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/QLinearConcat.py +0 -0
  133. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/QLinearConv.py +0 -0
  134. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/QLinearLeakyRelu.py +0 -0
  135. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/QLinearMatMul.py +0 -0
  136. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/QLinearMul.py +0 -0
  137. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/QLinearSigmoid.py +0 -0
  138. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/QLinearSoftmax.py +0 -0
  139. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/QuantizeLinear.py +0 -0
  140. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/RMSNormalization.py +0 -0
  141. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/RNN.py +0 -0
  142. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/RandomNormal.py +0 -0
  143. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/RandomNormalLike.py +0 -0
  144. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/RandomUniform.py +0 -0
  145. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/RandomUniformLike.py +0 -0
  146. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Range.py +0 -0
  147. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Reciprocal.py +0 -0
  148. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/ReduceL1.py +0 -0
  149. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/ReduceL2.py +0 -0
  150. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/ReduceLogSum.py +0 -0
  151. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/ReduceLogSumExp.py +0 -0
  152. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/ReduceMax.py +0 -0
  153. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/ReduceMean.py +0 -0
  154. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/ReduceMin.py +0 -0
  155. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/ReduceProd.py +0 -0
  156. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/ReduceSum.py +0 -0
  157. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/ReduceSumSquare.py +0 -0
  158. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/RegexFullMatch.py +0 -0
  159. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Relu.py +0 -0
  160. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Reshape.py +0 -0
  161. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Resize.py +0 -0
  162. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/ReverseSequence.py +0 -0
  163. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/RoiAlign.py +0 -0
  164. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/RotaryEmbedding.py +0 -0
  165. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Round.py +0 -0
  166. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/STFT.py +0 -0
  167. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/ScaleAndTranslate.py +0 -0
  168. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Scan.py +0 -0
  169. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Scatter.py +0 -0
  170. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Selu.py +0 -0
  171. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/SequenceAt.py +0 -0
  172. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/SequenceConstruct.py +0 -0
  173. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/SequenceEmpty.py +0 -0
  174. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/SequenceErase.py +0 -0
  175. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/SequenceInsert.py +0 -0
  176. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/SequenceLength.py +0 -0
  177. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Shape.py +0 -0
  178. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Shrink.py +0 -0
  179. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Sigmoid.py +0 -0
  180. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Sign.py +0 -0
  181. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Sin.py +0 -0
  182. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Sinh.py +0 -0
  183. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Size.py +0 -0
  184. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Slice.py +0 -0
  185. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Softmax.py +0 -0
  186. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/SoftmaxCrossEntropyLoss.py +0 -0
  187. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Softplus.py +0 -0
  188. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Softsign.py +0 -0
  189. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/SpaceToDepth.py +0 -0
  190. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Split.py +0 -0
  191. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/SplitToSequence.py +0 -0
  192. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Sqrt.py +0 -0
  193. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Squeeze.py +0 -0
  194. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/StringConcat.py +0 -0
  195. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/StringNormalizer.py +0 -0
  196. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/StringSplit.py +0 -0
  197. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Sub.py +0 -0
  198. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Sum.py +0 -0
  199. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Tan.py +0 -0
  200. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Tanh.py +0 -0
  201. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/ThresholdedRelu.py +0 -0
  202. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Tile.py +0 -0
  203. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/TopK.py +0 -0
  204. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Transpose.py +0 -0
  205. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Trilu.py +0 -0
  206. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Unique.py +0 -0
  207. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Unsqueeze.py +0 -0
  208. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Upsample.py +0 -0
  209. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Where.py +0 -0
  210. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/Xor.py +0 -0
  211. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/ops/__init__.py +0 -0
  212. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/utils/__init__.py +0 -0
  213. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/utils/enums.py +0 -0
  214. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/utils/iterative_json_optimizer.py +0 -0
  215. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/utils/json_auto_generator.py +0 -0
  216. {onnx2tf-1.29.19 → onnx2tf-1.29.21}/onnx2tf/utils/logging.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: onnx2tf
3
- Version: 1.29.19
3
+ Version: 1.29.21
4
4
  Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
5
5
  Keywords: onnx,tensorflow,tflite,keras,deep-learning,machine-learning
6
6
  Author: Katsuya Hyodo
@@ -18,7 +18,7 @@ Classifier: Programming Language :: Python :: 3.11
18
18
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
19
19
  Requires-Dist: requests==2.32.5
20
20
  Requires-Dist: numpy==1.26.4
21
- Requires-Dist: onnx==1.19.0
21
+ Requires-Dist: onnx==1.19.1
22
22
  Requires-Dist: onnxruntime==1.23.0
23
23
  Requires-Dist: opencv-python==4.11.0.86
24
24
  Requires-Dist: onnxsim==0.4.36
@@ -365,7 +365,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
365
365
  docker run --rm -it \
366
366
  -v `pwd`:/workdir \
367
367
  -w /workdir \
368
- ghcr.io/pinto0309/onnx2tf:1.29.19
368
+ ghcr.io/pinto0309/onnx2tf:1.29.21
369
369
 
370
370
  or
371
371
 
@@ -373,18 +373,18 @@ Video speed is adjusted approximately 50 times slower than actual speed.
373
373
  docker run --rm -it \
374
374
  -v `pwd`:/workdir \
375
375
  -w /workdir \
376
- docker.io/pinto0309/onnx2tf:1.29.19
376
+ docker.io/pinto0309/onnx2tf:1.29.21
377
377
 
378
378
  or
379
379
 
380
- pip install -U onnx==1.19.0 \
380
+ pip install -U onnx==1.19.1 \
381
381
  && pip install -U onnx-graphsurgeon==0.5.8 \
382
382
  && pip install -U onnxruntime==1.23.0 \
383
383
  && pip install -U onnxsim==0.4.36 \
384
384
  && pip install -U onnxoptimizer==0.4.2 \
385
385
  && pip install -U simple_onnx_processing_tools==1.1.32 \
386
- && pip install -U sne4onnx>=1.0.13 \
387
- && pip install -U sng4onnx>=1.0.4 \
386
+ && pip install -U sne4onnx==1.0.15 \
387
+ && pip install -U sng4onnx==1.0.5 \
388
388
  && pip install -U ai_edge_litert==1.2.0 \
389
389
  && pip install -U tensorflow==2.19.0 \
390
390
  && pip install -U protobuf==3.20.3 \
@@ -630,7 +630,7 @@ After many upgrades, the need for JSON parameter correction has become much less
630
630
 
631
631
  `-ois` an option to overwrite the input OP to a static size if it has undefined dimensions. `-cotof` option checks the accuracy of all OPs one by one. `-cotoa` is the error value of the threshold for determining an accuracy error. If there are undefined dimensions in the input OP, it is better to fix them to the static geometry to improve the accuracy of the accuracy measurement.
632
632
 
633
- Also, you can use the `-cind` option to specify custom input for `-cotof`, instead of using the default dummy input. Otherwise, all input values will be set to 1. For more information about the `-cind` option, please refer to [here](#cli-parameter).
633
+ Also, you can use the `-cind` option to specify custom input for `-cotof`, instead of using the default dummy input. Otherwise, all input values will be set to 1. You can override the dummy input values with `--value_hints` (scalar only, `*:default` supported). For more information about the `-cind` option, please refer to [here](#cli-parameter).
634
634
 
635
635
  The `-cotof` option only compares the original ONNX and converted TensorFlow (Keras) models at Float32 precision, not at Float16 or INT8 precision.
636
636
 
@@ -644,6 +644,10 @@ onnx2tf -i mobilenetv2-12.onnx -b 1 -cotof -cotoa 1e-1
644
644
  or
645
645
 
646
646
  onnx2tf -i mobilenetv2-12.onnx -cotof -cotoa 1e-1 -cind "input" "/your/path/x.npy"
647
+
648
+ or
649
+
650
+ onnx2tf -i mobilenetv2-12.onnx -cotof -cotoa 1e-1 --value_hints "input:0.5" "*:1.0"
647
651
  ```
648
652
  ![image](https://user-images.githubusercontent.com/33194443/216901668-5fdb1e38-8670-46a4-b4b9-8a774fa7545e.png)
649
653
 
@@ -1826,6 +1830,14 @@ optional arguments:
1826
1830
  A value of 1 or more must be specified.
1827
1831
  Numerical values other than dynamic dimensions are ignored.
1828
1832
 
1833
+ -vh VALUE_HINTS [VALUE_HINTS ...], \
1834
+ --value_hints VALUE_HINTS [VALUE_HINTS ...]
1835
+ Value hints for dummy inference input tensors.
1836
+ The format is
1837
+ "input_name_1:value" "input_name_2:value" "*:default_value"
1838
+ "*" applies to all inputs not explicitly specified.
1839
+ Values are scalar only.
1840
+
1829
1841
  -nlt, --no_large_tensor
1830
1842
  Suppresses constant bloat caused by Tile OP when optimizing models in onnxsim.
1831
1843
  See: https://github.com/daquexian/onnx-simplifier/issues/178
@@ -1887,6 +1899,15 @@ optional arguments:
1887
1899
  model partitioned into subgraphs.
1888
1900
  e.g. --output_names_to_interrupt_model_conversion "output0" "output1" "output2"
1889
1901
 
1902
+ -easm, --enable_auto_split_model
1903
+ Force auto split regardless of the ONNX file size.
1904
+ Uses --auto_split_max_size_mb as the target partition size.
1905
+
1906
+ -asmsm AUTO_SPLIT_MAX_SIZE_MB, --auto_split_max_size_mb AUTO_SPLIT_MAX_SIZE_MB
1907
+ Target maximum size per partition in MB based on ONNX initializer sizes.
1908
+ Used when auto-split is triggered or forced.
1909
+ Default: 1024
1910
+
1890
1911
  -dgc, --disable_group_convolution
1891
1912
  Disable GroupConvolution and replace it with SeparableConvolution for
1892
1913
  output to saved_model format.
@@ -2148,6 +2169,7 @@ convert(
2148
2169
  batch_size: Union[int, NoneType] = None,
2149
2170
  overwrite_input_shape: Union[List[str], NoneType] = None,
2150
2171
  shape_hints: Union[List[str], NoneType] = None,
2172
+ value_hints: Union[List[str], NoneType] = None,
2151
2173
  no_large_tensor: Optional[bool] = False,
2152
2174
  output_nms_with_dynamic_tensor: Optional[bool] = False,
2153
2175
  switch_nms_version: Optional[str] = 'v4',
@@ -2156,6 +2178,8 @@ convert(
2156
2178
  keep_shape_absolutely_input_names: Optional[List[str]] = None,
2157
2179
  input_names_to_interrupt_model_conversion: Union[List[str], NoneType] = None,
2158
2180
  output_names_to_interrupt_model_conversion: Union[List[str], NoneType] = None,
2181
+ enable_auto_split_model: Optional[bool] = False,
2182
+ auto_split_max_size_mb: Optional[int] = 1024,
2159
2183
  disable_group_convolution: Union[bool, NoneType] = False,
2160
2184
  enable_batchmatmul_unfold: Optional[bool] = False,
2161
2185
  enable_rnn_unroll: Optional[bool] = False,
@@ -2366,6 +2390,13 @@ convert(
2366
2390
  A value of 1 or more must be specified.
2367
2391
  Numerical values other than dynamic dimensions are ignored.
2368
2392
 
2393
+ value_hints: Optional[List[str]]
2394
+ Value hints for dummy inference input tensors.
2395
+ The format is
2396
+ ['input_name_1:value', 'input_name_2:value', '*:default_value']
2397
+ "*" applies to all inputs not explicitly specified.
2398
+ Values are scalar only.
2399
+
2369
2400
  no_large_tensor: Optional[bool]
2370
2401
  Suppresses constant bloat caused by Tile OP when optimizing models in onnxsim.
2371
2402
  See: https://github.com/daquexian/onnx-simplifier/issues/178
@@ -2424,6 +2455,17 @@ convert(
2424
2455
  e.g.
2425
2456
  output_names_to_interrupt_model_conversion=['output0','output1','output2']
2426
2457
 
2458
+ enable_auto_split_model: Optional[bool]
2459
+ Force auto split regardless of the ONNX file size.
2460
+ Uses auto_split_max_size_mb as the target partition size.
2461
+ Short option: -easm
2462
+ Default: False
2463
+
2464
+ auto_split_max_size_mb: Optional[int]
2465
+ Target maximum size per partition in MB based on ONNX initializer sizes.
2466
+ Used when auto-split is triggered or forced.
2467
+ Default: 1024
2468
+
2427
2469
  disable_group_convolution: Optional[bool]
2428
2470
  Disable GroupConvolution and replace it with SeparableConvolution for
2429
2471
  output to saved_model format.
@@ -3010,6 +3052,7 @@ The above differences often cannot be dealt with by simply converting the model
3010
3052
  14. [nobuco](https://github.com/AlexanderLutsenko/nobuco)
3011
3053
  15. [onnx2torch](https://github.com/ENOT-AutoDL/onnx2torch)
3012
3054
  16. [ai-edge-torch](https://github.com/google-ai-edge/ai-edge-torch)
3055
+ 17. [LiteRT.js](https://ai.google.dev/edge/litert/web)
3013
3056
 
3014
3057
  ## Acknowledgement
3015
3058
  1. https://github.com/onnx/models
@@ -323,7 +323,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
323
323
  docker run --rm -it \
324
324
  -v `pwd`:/workdir \
325
325
  -w /workdir \
326
- ghcr.io/pinto0309/onnx2tf:1.29.19
326
+ ghcr.io/pinto0309/onnx2tf:1.29.21
327
327
 
328
328
  or
329
329
 
@@ -331,18 +331,18 @@ Video speed is adjusted approximately 50 times slower than actual speed.
331
331
  docker run --rm -it \
332
332
  -v `pwd`:/workdir \
333
333
  -w /workdir \
334
- docker.io/pinto0309/onnx2tf:1.29.19
334
+ docker.io/pinto0309/onnx2tf:1.29.21
335
335
 
336
336
  or
337
337
 
338
- pip install -U onnx==1.19.0 \
338
+ pip install -U onnx==1.19.1 \
339
339
  && pip install -U onnx-graphsurgeon==0.5.8 \
340
340
  && pip install -U onnxruntime==1.23.0 \
341
341
  && pip install -U onnxsim==0.4.36 \
342
342
  && pip install -U onnxoptimizer==0.4.2 \
343
343
  && pip install -U simple_onnx_processing_tools==1.1.32 \
344
- && pip install -U sne4onnx>=1.0.13 \
345
- && pip install -U sng4onnx>=1.0.4 \
344
+ && pip install -U sne4onnx==1.0.15 \
345
+ && pip install -U sng4onnx==1.0.5 \
346
346
  && pip install -U ai_edge_litert==1.2.0 \
347
347
  && pip install -U tensorflow==2.19.0 \
348
348
  && pip install -U protobuf==3.20.3 \
@@ -588,7 +588,7 @@ After many upgrades, the need for JSON parameter correction has become much less
588
588
 
589
589
  `-ois` an option to overwrite the input OP to a static size if it has undefined dimensions. `-cotof` option checks the accuracy of all OPs one by one. `-cotoa` is the error value of the threshold for determining an accuracy error. If there are undefined dimensions in the input OP, it is better to fix them to the static geometry to improve the accuracy of the accuracy measurement.
590
590
 
591
- Also, you can use the `-cind` option to specify custom input for `-cotof`, instead of using the default dummy input. Otherwise, all input values will be set to 1. For more information about the `-cind` option, please refer to [here](#cli-parameter).
591
+ Also, you can use the `-cind` option to specify custom input for `-cotof`, instead of using the default dummy input. Otherwise, all input values will be set to 1. You can override the dummy input values with `--value_hints` (scalar only, `*:default` supported). For more information about the `-cind` option, please refer to [here](#cli-parameter).
592
592
 
593
593
  The `-cotof` option only compares the original ONNX and converted TensorFlow (Keras) models at Float32 precision, not at Float16 or INT8 precision.
594
594
 
@@ -602,6 +602,10 @@ onnx2tf -i mobilenetv2-12.onnx -b 1 -cotof -cotoa 1e-1
602
602
  or
603
603
 
604
604
  onnx2tf -i mobilenetv2-12.onnx -cotof -cotoa 1e-1 -cind "input" "/your/path/x.npy"
605
+
606
+ or
607
+
608
+ onnx2tf -i mobilenetv2-12.onnx -cotof -cotoa 1e-1 --value_hints "input:0.5" "*:1.0"
605
609
  ```
606
610
  ![image](https://user-images.githubusercontent.com/33194443/216901668-5fdb1e38-8670-46a4-b4b9-8a774fa7545e.png)
607
611
 
@@ -1784,6 +1788,14 @@ optional arguments:
1784
1788
  A value of 1 or more must be specified.
1785
1789
  Numerical values other than dynamic dimensions are ignored.
1786
1790
 
1791
+ -vh VALUE_HINTS [VALUE_HINTS ...], \
1792
+ --value_hints VALUE_HINTS [VALUE_HINTS ...]
1793
+ Value hints for dummy inference input tensors.
1794
+ The format is
1795
+ "input_name_1:value" "input_name_2:value" "*:default_value"
1796
+ "*" applies to all inputs not explicitly specified.
1797
+ Values are scalar only.
1798
+
1787
1799
  -nlt, --no_large_tensor
1788
1800
  Suppresses constant bloat caused by Tile OP when optimizing models in onnxsim.
1789
1801
  See: https://github.com/daquexian/onnx-simplifier/issues/178
@@ -1845,6 +1857,15 @@ optional arguments:
1845
1857
  model partitioned into subgraphs.
1846
1858
  e.g. --output_names_to_interrupt_model_conversion "output0" "output1" "output2"
1847
1859
 
1860
+ -easm, --enable_auto_split_model
1861
+ Force auto split regardless of the ONNX file size.
1862
+ Uses --auto_split_max_size_mb as the target partition size.
1863
+
1864
+ -asmsm AUTO_SPLIT_MAX_SIZE_MB, --auto_split_max_size_mb AUTO_SPLIT_MAX_SIZE_MB
1865
+ Target maximum size per partition in MB based on ONNX initializer sizes.
1866
+ Used when auto-split is triggered or forced.
1867
+ Default: 1024
1868
+
1848
1869
  -dgc, --disable_group_convolution
1849
1870
  Disable GroupConvolution and replace it with SeparableConvolution for
1850
1871
  output to saved_model format.
@@ -2106,6 +2127,7 @@ convert(
2106
2127
  batch_size: Union[int, NoneType] = None,
2107
2128
  overwrite_input_shape: Union[List[str], NoneType] = None,
2108
2129
  shape_hints: Union[List[str], NoneType] = None,
2130
+ value_hints: Union[List[str], NoneType] = None,
2109
2131
  no_large_tensor: Optional[bool] = False,
2110
2132
  output_nms_with_dynamic_tensor: Optional[bool] = False,
2111
2133
  switch_nms_version: Optional[str] = 'v4',
@@ -2114,6 +2136,8 @@ convert(
2114
2136
  keep_shape_absolutely_input_names: Optional[List[str]] = None,
2115
2137
  input_names_to_interrupt_model_conversion: Union[List[str], NoneType] = None,
2116
2138
  output_names_to_interrupt_model_conversion: Union[List[str], NoneType] = None,
2139
+ enable_auto_split_model: Optional[bool] = False,
2140
+ auto_split_max_size_mb: Optional[int] = 1024,
2117
2141
  disable_group_convolution: Union[bool, NoneType] = False,
2118
2142
  enable_batchmatmul_unfold: Optional[bool] = False,
2119
2143
  enable_rnn_unroll: Optional[bool] = False,
@@ -2324,6 +2348,13 @@ convert(
2324
2348
  A value of 1 or more must be specified.
2325
2349
  Numerical values other than dynamic dimensions are ignored.
2326
2350
 
2351
+ value_hints: Optional[List[str]]
2352
+ Value hints for dummy inference input tensors.
2353
+ The format is
2354
+ ['input_name_1:value', 'input_name_2:value', '*:default_value']
2355
+ "*" applies to all inputs not explicitly specified.
2356
+ Values are scalar only.
2357
+
2327
2358
  no_large_tensor: Optional[bool]
2328
2359
  Suppresses constant bloat caused by Tile OP when optimizing models in onnxsim.
2329
2360
  See: https://github.com/daquexian/onnx-simplifier/issues/178
@@ -2382,6 +2413,17 @@ convert(
2382
2413
  e.g.
2383
2414
  output_names_to_interrupt_model_conversion=['output0','output1','output2']
2384
2415
 
2416
+ enable_auto_split_model: Optional[bool]
2417
+ Force auto split regardless of the ONNX file size.
2418
+ Uses auto_split_max_size_mb as the target partition size.
2419
+ Short option: -easm
2420
+ Default: False
2421
+
2422
+ auto_split_max_size_mb: Optional[int]
2423
+ Target maximum size per partition in MB based on ONNX initializer sizes.
2424
+ Used when auto-split is triggered or forced.
2425
+ Default: 1024
2426
+
2385
2427
  disable_group_convolution: Optional[bool]
2386
2428
  Disable GroupConvolution and replace it with SeparableConvolution for
2387
2429
  output to saved_model format.
@@ -2968,6 +3010,7 @@ The above differences often cannot be dealt with by simply converting the model
2968
3010
  14. [nobuco](https://github.com/AlexanderLutsenko/nobuco)
2969
3011
  15. [onnx2torch](https://github.com/ENOT-AutoDL/onnx2torch)
2970
3012
  16. [ai-edge-torch](https://github.com/google-ai-edge/ai-edge-torch)
3013
+ 17. [LiteRT.js](https://ai.google.dev/edge/litert/web)
2971
3014
 
2972
3015
  ## Acknowledgement
2973
3016
  1. https://github.com/onnx/models
@@ -1,3 +1,3 @@
1
1
  from onnx2tf.onnx2tf import convert, main
2
2
 
3
- __version__ = '1.29.19'
3
+ __version__ = '1.29.21'