onnx2tf 1.26.9__tar.gz → 1.27.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (202) hide show
  1. {onnx2tf-1.26.9/onnx2tf.egg-info → onnx2tf-1.27.1}/PKG-INFO +32 -27
  2. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/README.md +29 -25
  3. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/__init__.py +1 -1
  4. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Add.py +2 -2
  5. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Cast.py +7 -1
  6. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Div.py +2 -2
  7. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/MatMul.py +6 -0
  8. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Mod.py +2 -2
  9. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Mul.py +2 -2
  10. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Sub.py +2 -2
  11. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/utils/common_functions.py +2 -2
  12. {onnx2tf-1.26.9 → onnx2tf-1.27.1/onnx2tf.egg-info}/PKG-INFO +32 -27
  13. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/LICENSE +0 -0
  14. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/LICENSE_onnx-tensorflow +0 -0
  15. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/__main__.py +0 -0
  16. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/onnx2tf.py +0 -0
  17. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Abs.py +0 -0
  18. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Acos.py +0 -0
  19. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Acosh.py +0 -0
  20. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/And.py +0 -0
  21. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/ArgMax.py +0 -0
  22. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/ArgMin.py +0 -0
  23. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Asin.py +0 -0
  24. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Asinh.py +0 -0
  25. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Atan.py +0 -0
  26. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Atanh.py +0 -0
  27. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/AveragePool.py +0 -0
  28. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/BatchNormalization.py +0 -0
  29. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Bernoulli.py +0 -0
  30. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/BitShift.py +0 -0
  31. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Ceil.py +0 -0
  32. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Celu.py +0 -0
  33. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Clip.py +0 -0
  34. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Col2Im.py +0 -0
  35. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Compress.py +0 -0
  36. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Concat.py +0 -0
  37. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/ConcatFromSequence.py +0 -0
  38. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Constant.py +0 -0
  39. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/ConstantOfShape.py +0 -0
  40. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Conv.py +0 -0
  41. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/ConvInteger.py +0 -0
  42. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/ConvTranspose.py +0 -0
  43. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Cos.py +0 -0
  44. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Cosh.py +0 -0
  45. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/CumSum.py +0 -0
  46. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/DepthToSpace.py +0 -0
  47. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/DequantizeLinear.py +0 -0
  48. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Det.py +0 -0
  49. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Dropout.py +0 -0
  50. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/DynamicQuantizeLinear.py +0 -0
  51. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Einsum.py +0 -0
  52. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Elu.py +0 -0
  53. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Equal.py +0 -0
  54. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Erf.py +0 -0
  55. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Exp.py +0 -0
  56. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Expand.py +0 -0
  57. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/EyeLike.py +0 -0
  58. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Flatten.py +0 -0
  59. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Floor.py +0 -0
  60. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/FusedConv.py +0 -0
  61. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/GRU.py +0 -0
  62. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Gather.py +0 -0
  63. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/GatherElements.py +0 -0
  64. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/GatherND.py +0 -0
  65. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Gelu.py +0 -0
  66. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Gemm.py +0 -0
  67. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/GlobalAveragePool.py +0 -0
  68. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/GlobalLpPool.py +0 -0
  69. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/GlobalMaxPool.py +0 -0
  70. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Greater.py +0 -0
  71. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/GreaterOrEqual.py +0 -0
  72. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/GridSample.py +0 -0
  73. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/GroupNorm.py +0 -0
  74. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/HammingWindow.py +0 -0
  75. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/HannWindow.py +0 -0
  76. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/HardSigmoid.py +0 -0
  77. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/HardSwish.py +0 -0
  78. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Hardmax.py +0 -0
  79. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Identity.py +0 -0
  80. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/If.py +0 -0
  81. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Input.py +0 -0
  82. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/InstanceNormalization.py +0 -0
  83. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Inverse.py +0 -0
  84. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/IsInf.py +0 -0
  85. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/IsNaN.py +0 -0
  86. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/LRN.py +0 -0
  87. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/LSTM.py +0 -0
  88. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/LayerNormalization.py +0 -0
  89. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/LeakyRelu.py +0 -0
  90. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Less.py +0 -0
  91. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/LessOrEqual.py +0 -0
  92. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Log.py +0 -0
  93. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/LogSoftmax.py +0 -0
  94. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/LpNormalization.py +0 -0
  95. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/MatMulInteger.py +0 -0
  96. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Max.py +0 -0
  97. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/MaxPool.py +0 -0
  98. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/MaxUnpool.py +0 -0
  99. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Mean.py +0 -0
  100. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/MeanVarianceNormalization.py +0 -0
  101. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/MelWeightMatrix.py +0 -0
  102. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Min.py +0 -0
  103. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Mish.py +0 -0
  104. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Multinomial.py +0 -0
  105. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Neg.py +0 -0
  106. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/NonMaxSuppression.py +0 -0
  107. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/NonZero.py +0 -0
  108. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Not.py +0 -0
  109. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/OneHot.py +0 -0
  110. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/OptionalGetElement.py +0 -0
  111. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/OptionalHasElement.py +0 -0
  112. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Or.py +0 -0
  113. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/PRelu.py +0 -0
  114. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Pad.py +0 -0
  115. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Pow.py +0 -0
  116. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/QLinearAdd.py +0 -0
  117. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/QLinearConcat.py +0 -0
  118. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/QLinearConv.py +0 -0
  119. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/QLinearLeakyRelu.py +0 -0
  120. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/QLinearMatMul.py +0 -0
  121. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/QLinearMul.py +0 -0
  122. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/QLinearSigmoid.py +0 -0
  123. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/QLinearSoftmax.py +0 -0
  124. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/QuantizeLinear.py +0 -0
  125. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/RNN.py +0 -0
  126. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/RandomNormal.py +0 -0
  127. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/RandomNormalLike.py +0 -0
  128. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/RandomUniform.py +0 -0
  129. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/RandomUniformLike.py +0 -0
  130. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Range.py +0 -0
  131. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Reciprocal.py +0 -0
  132. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/ReduceL1.py +0 -0
  133. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/ReduceL2.py +0 -0
  134. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/ReduceLogSum.py +0 -0
  135. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/ReduceLogSumExp.py +0 -0
  136. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/ReduceMax.py +0 -0
  137. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/ReduceMean.py +0 -0
  138. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/ReduceMin.py +0 -0
  139. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/ReduceProd.py +0 -0
  140. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/ReduceSum.py +0 -0
  141. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/ReduceSumSquare.py +0 -0
  142. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Relu.py +0 -0
  143. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Reshape.py +0 -0
  144. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Resize.py +0 -0
  145. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/ReverseSequence.py +0 -0
  146. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/RoiAlign.py +0 -0
  147. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Round.py +0 -0
  148. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/STFT.py +0 -0
  149. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/ScaleAndTranslate.py +0 -0
  150. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Scatter.py +0 -0
  151. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/ScatterElements.py +0 -0
  152. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/ScatterND.py +0 -0
  153. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Selu.py +0 -0
  154. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/SequenceAt.py +0 -0
  155. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/SequenceConstruct.py +0 -0
  156. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/SequenceEmpty.py +0 -0
  157. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/SequenceErase.py +0 -0
  158. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/SequenceInsert.py +0 -0
  159. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/SequenceLength.py +0 -0
  160. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Shape.py +0 -0
  161. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Shrink.py +0 -0
  162. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Sigmoid.py +0 -0
  163. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Sign.py +0 -0
  164. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Sin.py +0 -0
  165. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Sinh.py +0 -0
  166. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Size.py +0 -0
  167. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Slice.py +0 -0
  168. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Softmax.py +0 -0
  169. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Softplus.py +0 -0
  170. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Softsign.py +0 -0
  171. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/SpaceToDepth.py +0 -0
  172. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Split.py +0 -0
  173. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/SplitToSequence.py +0 -0
  174. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Sqrt.py +0 -0
  175. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Squeeze.py +0 -0
  176. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/StringNormalizer.py +0 -0
  177. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Sum.py +0 -0
  178. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Tan.py +0 -0
  179. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Tanh.py +0 -0
  180. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/ThresholdedRelu.py +0 -0
  181. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Tile.py +0 -0
  182. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/TopK.py +0 -0
  183. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Transpose.py +0 -0
  184. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Trilu.py +0 -0
  185. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Unique.py +0 -0
  186. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Unsqueeze.py +0 -0
  187. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Upsample.py +0 -0
  188. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Where.py +0 -0
  189. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/Xor.py +0 -0
  190. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/_Loop.py +0 -0
  191. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/__Loop.py +0 -0
  192. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/ops/__init__.py +0 -0
  193. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/utils/__init__.py +0 -0
  194. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/utils/enums.py +0 -0
  195. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf/utils/logging.py +0 -0
  196. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf.egg-info/SOURCES.txt +0 -0
  197. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf.egg-info/dependency_links.txt +0 -0
  198. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf.egg-info/entry_points.txt +0 -0
  199. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/onnx2tf.egg-info/top_level.txt +0 -0
  200. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/setup.cfg +0 -0
  201. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/setup.py +0 -0
  202. {onnx2tf-1.26.9 → onnx2tf-1.27.1}/tests/test_model_convert.py +0 -0
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.2
1
+ Metadata-Version: 2.4
2
2
  Name: onnx2tf
3
- Version: 1.26.9
3
+ Version: 1.27.1
4
4
  Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
5
5
  Home-page: https://github.com/PINTO0309/onnx2tf
6
6
  Author: Katsuya Hyodo
@@ -18,6 +18,7 @@ Dynamic: description
18
18
  Dynamic: description-content-type
19
19
  Dynamic: home-page
20
20
  Dynamic: license
21
+ Dynamic: license-file
21
22
  Dynamic: platform
22
23
  Dynamic: requires-python
23
24
  Dynamic: summary
@@ -280,14 +281,16 @@ Video speed is adjusted approximately 50 times slower than actual speed.
280
281
 
281
282
  ## Environment
282
283
  - Linux / Windows
283
- - onnx==1.16.1
284
+ - onnx==1.17.0
284
285
  - onnxruntime==1.18.1
285
286
  - onnx-simplifier==0.4.33 or 0.4.30 `(onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] (op_type:Slice, node name: /xxxx/Slice): [ShapeInferenceError] Inferred shape and existing shape differ in rank: (x) vs (y))`
286
287
  - onnx_graphsurgeon
287
288
  - simple_onnx_processing_tools
288
- - tensorflow==2.17.0, Special bugs: [#436](https://github.com/PINTO0309/onnx2tf/issues/436)
289
+ - tensorflow==2.19.0, Special bugs: [#436](https://github.com/PINTO0309/onnx2tf/issues/436)
290
+ - tf-keras==2.19.0
291
+ - ai-edge-litert==1.2.0
289
292
  - psutil==5.9.5
290
- - ml_dtypes==0.3.2
293
+ - ml_dtypes==0.5.1
291
294
  - flatbuffers-compiler (Optional, Only when using the `-coion` option. Executable file named `flatc`.)
292
295
  - flatbuffers>=23.1.21
293
296
  ```bash
@@ -331,7 +334,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
331
334
  docker run --rm -it \
332
335
  -v `pwd`:/workdir \
333
336
  -w /workdir \
334
- ghcr.io/pinto0309/onnx2tf:1.26.9
337
+ ghcr.io/pinto0309/onnx2tf:1.27.1
335
338
 
336
339
  or
337
340
 
@@ -339,11 +342,11 @@ Video speed is adjusted approximately 50 times slower than actual speed.
339
342
  docker run --rm -it \
340
343
  -v `pwd`:/workdir \
341
344
  -w /workdir \
342
- docker.io/pinto0309/onnx2tf:1.26.9
345
+ docker.io/pinto0309/onnx2tf:1.27.1
343
346
 
344
347
  or
345
348
 
346
- pip install -U onnx==1.16.1 \
349
+ pip install -U onnx==1.17.0 \
347
350
  && pip install -U nvidia-pyindex \
348
351
  && pip install -U onnx-graphsurgeon \
349
352
  && pip install -U onnxruntime==1.18.1 \
@@ -351,13 +354,14 @@ Video speed is adjusted approximately 50 times slower than actual speed.
351
354
  && pip install -U simple_onnx_processing_tools \
352
355
  && pip install -U sne4onnx>=1.0.13 \
353
356
  && pip install -U sng4onnx>=1.0.4 \
354
- && pip install -U tensorflow==2.17.0 \
357
+ && pip install -U ai_edge_litert==1.2.0 \
358
+ && pip install -U tensorflow==2.19.0 \
355
359
  && pip install -U protobuf==3.20.3 \
356
360
  && pip install -U onnx2tf \
357
361
  && pip install -U h5py==3.11.0 \
358
362
  && pip install -U psutil==5.9.5 \
359
- && pip install -U ml_dtypes==0.3.2 \
360
- && pip install -U tf-keras~=2.16 \
363
+ && pip install -U ml_dtypes==0.5.1 \
364
+ && pip install -U tf-keras==2.19.0 \
361
365
  && pip install flatbuffers>=23.5.26
362
366
 
363
367
  or
@@ -381,8 +385,9 @@ or
381
385
  && sudo chmod +x flatc \
382
386
  && sudo mv flatc /usr/bin/
383
387
  !pip install -U pip \
384
- && pip install tensorflow==2.17.0 \
385
- && pip install -U onnx==1.16.1 \
388
+ && pip install tensorflow==2.19.0 \
389
+ && pip install ai_edge_litert==1.2.0 \
390
+ && pip install -U onnx==1.17.0 \
386
391
  && python -m pip install onnx_graphsurgeon \
387
392
  --index-url https://pypi.ngc.nvidia.com \
388
393
  && pip install -U onnxruntime==1.18.1 \
@@ -392,8 +397,8 @@ or
392
397
  && pip install -U protobuf==3.20.3 \
393
398
  && pip install -U h5py==3.11.0 \
394
399
  && pip install -U psutil==5.9.5 \
395
- && pip install -U ml_dtypes==0.3.2 \
396
- && pip install -U tf-keras~=2.16 \
400
+ && pip install -U ml_dtypes==0.5.1 \
401
+ && pip install -U tf-keras==2.19.0 \
397
402
  && pip install flatbuffers>=23.5.26
398
403
  ```
399
404
 
@@ -608,7 +613,7 @@ import onnxruntime
608
613
  import numpy as np
609
614
  import onnx2tf
610
615
  import tensorflow as tf
611
- from tensorflow.lite.python import interpreter as tflite_interpreter
616
+ from ai_edge_litert.interpreter import Interpreter
612
617
 
613
618
  class Model(torch.nn.Module):
614
619
  def forward(self, x, y):
@@ -647,7 +652,7 @@ onnx2tf.convert(
647
652
  )
648
653
 
649
654
  # Now, test the newer TFLite model
650
- interpreter = tf.lite.Interpreter(model_path="model.tf/model_float32.tflite")
655
+ interpreter = Interpreter(model_path="model.tf/model_float32.tflite")
651
656
  tf_lite_model = interpreter.get_signature_runner()
652
657
  inputs = {
653
658
  'x': np.asarray([10], dtype=np.int64),
@@ -1061,10 +1066,10 @@ Now, let's try inference with the TFLite runtime instead of the TensorFlow runti
1061
1066
  import time
1062
1067
  import numpy as np
1063
1068
  np.random.seed(0)
1064
- import tensorflow as tf
1069
+ from ai_edge_litert.interpreter import Interpreter
1065
1070
 
1066
1071
  # Load TFLite model
1067
- interpreter = tf.lite.Interpreter(model_path="./saved_model/prelu_check_float32.tflite")
1072
+ interpreter = Interpreter(model_path="./saved_model/prelu_check_float32.tflite")
1068
1073
  interpreter.allocate_tensors()
1069
1074
  tensor_shape = (256, 20)
1070
1075
  input_data = {'waveform': np.random.randn(*tensor_shape).astype(np.float32)}
@@ -1232,10 +1237,10 @@ You can use `signature_runner` to handle dynamic input tensors by performing inf
1232
1237
  - `test.py` - Batch size: `5`
1233
1238
  ```python
1234
1239
  import numpy as np
1235
- import tensorflow as tf
1240
+ from ai_edge_litert.interpreter import Interpreter
1236
1241
  from pprint import pprint
1237
1242
 
1238
- interpreter = tf.lite.Interpreter(model_path="saved_model/osnet_x0_25_msmt17_float32.tflite")
1243
+ interpreter = Interpreter(model_path="saved_model/osnet_x0_25_msmt17_float32.tflite")
1239
1244
  tf_lite_model = interpreter.get_signature_runner()
1240
1245
  inputs = {
1241
1246
  'images': np.ones([5,256,128,3], dtype=np.float32),
@@ -1263,10 +1268,10 @@ You can use `signature_runner` to handle dynamic input tensors by performing inf
1263
1268
  - `test.py` - Batch size: `3`
1264
1269
  ```python
1265
1270
  import numpy as np
1266
- import tensorflow as tf
1271
+ from ai_edge_litert.interpreter import Interpreter
1267
1272
  from pprint import pprint
1268
1273
 
1269
- interpreter = tf.lite.Interpreter(model_path="saved_model/osnet_x0_25_msmt17_float32.tflite")
1274
+ interpreter = Interpreter(model_path="saved_model/osnet_x0_25_msmt17_float32.tflite")
1270
1275
  tf_lite_model = interpreter.get_signature_runner()
1271
1276
  inputs = {
1272
1277
  'images': np.ones([3,256,128,3], dtype=np.float32),
@@ -1350,11 +1355,11 @@ The relationship between the ONNX before conversion and the TFLite file after co
1350
1355
  Use the generated TFLite file to inference and ensure that it always contains fixed value output.
1351
1356
 
1352
1357
  ```python
1353
- import tensorflow as tf
1358
+ from ai_edge_litert.interpreter import Interpreter
1354
1359
  import numpy as np
1355
1360
  from pprint import pprint
1356
1361
 
1357
- interpreter = tf.lite.Interpreter(model_path="saved_model/toy_with_constant_float32.tflite")
1362
+ interpreter = Interpreter(model_path="saved_model/toy_with_constant_float32.tflite")
1358
1363
  interpreter.allocate_tensors()
1359
1364
 
1360
1365
  input_details = interpreter.get_input_details()
@@ -1508,7 +1513,7 @@ See: https://github.com/tensorflow/tfjs/tree/master/tfjs-converter
1508
1513
  When converting to CoreML, process as follows. The `-k` option is for conversion while maintaining the input channel order in ONNX's NCHW format.
1509
1514
 
1510
1515
  ```bash
1511
- pip install coremltools
1516
+ pip install coremltools==8.2
1512
1517
 
1513
1518
  onnx2tf -i mobilenetv2-12.onnx -k input -ois input:1,3,224,224 -osd
1514
1519
  ```
@@ -1521,7 +1526,7 @@ model = ct.convert(
1521
1526
  model=FOLDER_PATH,
1522
1527
  source='tensorflow',
1523
1528
  )
1524
- model.save(f'{FOLDER_PATH}/model.mlmodel')
1529
+ model.save(f'{FOLDER_PATH}/model.mlpackage')
1525
1530
  ```
1526
1531
 
1527
1532
  See: https://github.com/apple/coremltools
@@ -256,14 +256,16 @@ Video speed is adjusted approximately 50 times slower than actual speed.
256
256
 
257
257
  ## Environment
258
258
  - Linux / Windows
259
- - onnx==1.16.1
259
+ - onnx==1.17.0
260
260
  - onnxruntime==1.18.1
261
261
  - onnx-simplifier==0.4.33 or 0.4.30 `(onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] (op_type:Slice, node name: /xxxx/Slice): [ShapeInferenceError] Inferred shape and existing shape differ in rank: (x) vs (y))`
262
262
  - onnx_graphsurgeon
263
263
  - simple_onnx_processing_tools
264
- - tensorflow==2.17.0, Special bugs: [#436](https://github.com/PINTO0309/onnx2tf/issues/436)
264
+ - tensorflow==2.19.0, Special bugs: [#436](https://github.com/PINTO0309/onnx2tf/issues/436)
265
+ - tf-keras==2.19.0
266
+ - ai-edge-litert==1.2.0
265
267
  - psutil==5.9.5
266
- - ml_dtypes==0.3.2
268
+ - ml_dtypes==0.5.1
267
269
  - flatbuffers-compiler (Optional, Only when using the `-coion` option. Executable file named `flatc`.)
268
270
  - flatbuffers>=23.1.21
269
271
  ```bash
@@ -307,7 +309,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
307
309
  docker run --rm -it \
308
310
  -v `pwd`:/workdir \
309
311
  -w /workdir \
310
- ghcr.io/pinto0309/onnx2tf:1.26.9
312
+ ghcr.io/pinto0309/onnx2tf:1.27.1
311
313
 
312
314
  or
313
315
 
@@ -315,11 +317,11 @@ Video speed is adjusted approximately 50 times slower than actual speed.
315
317
  docker run --rm -it \
316
318
  -v `pwd`:/workdir \
317
319
  -w /workdir \
318
- docker.io/pinto0309/onnx2tf:1.26.9
320
+ docker.io/pinto0309/onnx2tf:1.27.1
319
321
 
320
322
  or
321
323
 
322
- pip install -U onnx==1.16.1 \
324
+ pip install -U onnx==1.17.0 \
323
325
  && pip install -U nvidia-pyindex \
324
326
  && pip install -U onnx-graphsurgeon \
325
327
  && pip install -U onnxruntime==1.18.1 \
@@ -327,13 +329,14 @@ Video speed is adjusted approximately 50 times slower than actual speed.
327
329
  && pip install -U simple_onnx_processing_tools \
328
330
  && pip install -U sne4onnx>=1.0.13 \
329
331
  && pip install -U sng4onnx>=1.0.4 \
330
- && pip install -U tensorflow==2.17.0 \
332
+ && pip install -U ai_edge_litert==1.2.0 \
333
+ && pip install -U tensorflow==2.19.0 \
331
334
  && pip install -U protobuf==3.20.3 \
332
335
  && pip install -U onnx2tf \
333
336
  && pip install -U h5py==3.11.0 \
334
337
  && pip install -U psutil==5.9.5 \
335
- && pip install -U ml_dtypes==0.3.2 \
336
- && pip install -U tf-keras~=2.16 \
338
+ && pip install -U ml_dtypes==0.5.1 \
339
+ && pip install -U tf-keras==2.19.0 \
337
340
  && pip install flatbuffers>=23.5.26
338
341
 
339
342
  or
@@ -357,8 +360,9 @@ or
357
360
  && sudo chmod +x flatc \
358
361
  && sudo mv flatc /usr/bin/
359
362
  !pip install -U pip \
360
- && pip install tensorflow==2.17.0 \
361
- && pip install -U onnx==1.16.1 \
363
+ && pip install tensorflow==2.19.0 \
364
+ && pip install ai_edge_litert==1.2.0 \
365
+ && pip install -U onnx==1.17.0 \
362
366
  && python -m pip install onnx_graphsurgeon \
363
367
  --index-url https://pypi.ngc.nvidia.com \
364
368
  && pip install -U onnxruntime==1.18.1 \
@@ -368,8 +372,8 @@ or
368
372
  && pip install -U protobuf==3.20.3 \
369
373
  && pip install -U h5py==3.11.0 \
370
374
  && pip install -U psutil==5.9.5 \
371
- && pip install -U ml_dtypes==0.3.2 \
372
- && pip install -U tf-keras~=2.16 \
375
+ && pip install -U ml_dtypes==0.5.1 \
376
+ && pip install -U tf-keras==2.19.0 \
373
377
  && pip install flatbuffers>=23.5.26
374
378
  ```
375
379
 
@@ -584,7 +588,7 @@ import onnxruntime
584
588
  import numpy as np
585
589
  import onnx2tf
586
590
  import tensorflow as tf
587
- from tensorflow.lite.python import interpreter as tflite_interpreter
591
+ from ai_edge_litert.interpreter import Interpreter
588
592
 
589
593
  class Model(torch.nn.Module):
590
594
  def forward(self, x, y):
@@ -623,7 +627,7 @@ onnx2tf.convert(
623
627
  )
624
628
 
625
629
  # Now, test the newer TFLite model
626
- interpreter = tf.lite.Interpreter(model_path="model.tf/model_float32.tflite")
630
+ interpreter = Interpreter(model_path="model.tf/model_float32.tflite")
627
631
  tf_lite_model = interpreter.get_signature_runner()
628
632
  inputs = {
629
633
  'x': np.asarray([10], dtype=np.int64),
@@ -1037,10 +1041,10 @@ Now, let's try inference with the TFLite runtime instead of the TensorFlow runti
1037
1041
  import time
1038
1042
  import numpy as np
1039
1043
  np.random.seed(0)
1040
- import tensorflow as tf
1044
+ from ai_edge_litert.interpreter import Interpreter
1041
1045
 
1042
1046
  # Load TFLite model
1043
- interpreter = tf.lite.Interpreter(model_path="./saved_model/prelu_check_float32.tflite")
1047
+ interpreter = Interpreter(model_path="./saved_model/prelu_check_float32.tflite")
1044
1048
  interpreter.allocate_tensors()
1045
1049
  tensor_shape = (256, 20)
1046
1050
  input_data = {'waveform': np.random.randn(*tensor_shape).astype(np.float32)}
@@ -1208,10 +1212,10 @@ You can use `signature_runner` to handle dynamic input tensors by performing inf
1208
1212
  - `test.py` - Batch size: `5`
1209
1213
  ```python
1210
1214
  import numpy as np
1211
- import tensorflow as tf
1215
+ from ai_edge_litert.interpreter import Interpreter
1212
1216
  from pprint import pprint
1213
1217
 
1214
- interpreter = tf.lite.Interpreter(model_path="saved_model/osnet_x0_25_msmt17_float32.tflite")
1218
+ interpreter = Interpreter(model_path="saved_model/osnet_x0_25_msmt17_float32.tflite")
1215
1219
  tf_lite_model = interpreter.get_signature_runner()
1216
1220
  inputs = {
1217
1221
  'images': np.ones([5,256,128,3], dtype=np.float32),
@@ -1239,10 +1243,10 @@ You can use `signature_runner` to handle dynamic input tensors by performing inf
1239
1243
  - `test.py` - Batch size: `3`
1240
1244
  ```python
1241
1245
  import numpy as np
1242
- import tensorflow as tf
1246
+ from ai_edge_litert.interpreter import Interpreter
1243
1247
  from pprint import pprint
1244
1248
 
1245
- interpreter = tf.lite.Interpreter(model_path="saved_model/osnet_x0_25_msmt17_float32.tflite")
1249
+ interpreter = Interpreter(model_path="saved_model/osnet_x0_25_msmt17_float32.tflite")
1246
1250
  tf_lite_model = interpreter.get_signature_runner()
1247
1251
  inputs = {
1248
1252
  'images': np.ones([3,256,128,3], dtype=np.float32),
@@ -1326,11 +1330,11 @@ The relationship between the ONNX before conversion and the TFLite file after co
1326
1330
  Use the generated TFLite file to inference and ensure that it always contains fixed value output.
1327
1331
 
1328
1332
  ```python
1329
- import tensorflow as tf
1333
+ from ai_edge_litert.interpreter import Interpreter
1330
1334
  import numpy as np
1331
1335
  from pprint import pprint
1332
1336
 
1333
- interpreter = tf.lite.Interpreter(model_path="saved_model/toy_with_constant_float32.tflite")
1337
+ interpreter = Interpreter(model_path="saved_model/toy_with_constant_float32.tflite")
1334
1338
  interpreter.allocate_tensors()
1335
1339
 
1336
1340
  input_details = interpreter.get_input_details()
@@ -1484,7 +1488,7 @@ See: https://github.com/tensorflow/tfjs/tree/master/tfjs-converter
1484
1488
  When converting to CoreML, process as follows. The `-k` option is for conversion while maintaining the input channel order in ONNX's NCHW format.
1485
1489
 
1486
1490
  ```bash
1487
- pip install coremltools
1491
+ pip install coremltools==8.2
1488
1492
 
1489
1493
  onnx2tf -i mobilenetv2-12.onnx -k input -ois input:1,3,224,224 -osd
1490
1494
  ```
@@ -1497,7 +1501,7 @@ model = ct.convert(
1497
1501
  model=FOLDER_PATH,
1498
1502
  source='tensorflow',
1499
1503
  )
1500
- model.save(f'{FOLDER_PATH}/model.mlmodel')
1504
+ model.save(f'{FOLDER_PATH}/model.mlpackage')
1501
1505
  ```
1502
1506
 
1503
1507
  See: https://github.com/apple/coremltools
@@ -1,3 +1,3 @@
1
1
  from onnx2tf.onnx2tf import convert, main
2
2
 
3
- __version__ = '1.26.9'
3
+ __version__ = '1.27.1'
@@ -130,9 +130,9 @@ def make_node(
130
130
  )
131
131
 
132
132
  # Workaround for ConvInteger
133
- if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64]:
133
+ if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64, tf.float16]:
134
134
  input_tensor_2 = tf.cast(input_tensor_2, dtype=tf.float32)
135
- elif input_tensor_1.dtype in [tf.int32, tf.int64] and input_tensor_2.dtype == tf.float32:
135
+ elif input_tensor_1.dtype in [tf.int32, tf.int64, tf.float16] and input_tensor_2.dtype == tf.float32:
136
136
  input_tensor_1 = tf.cast(input_tensor_1, dtype=tf.float32)
137
137
 
138
138
  # Disable unnecessary Transpose
@@ -96,10 +96,16 @@ def make_node(
96
96
  tf_layers_dict[graph_node_output.name].pop('nhwc')
97
97
 
98
98
  # Suppression of FlexCast generation
99
- # Float32 -> Float64
99
+ # Float64 -> Float32
100
+ # Float16 -> Float32
100
101
  if input_tensor.dtype == tf.float32 \
101
102
  and to == tf.float64:
102
103
  to = tf.float32
104
+ elif isinstance(graph_node.inputs[0], gs.Variable) \
105
+ and hasattr(graph_node.inputs[0], "dtype") \
106
+ and graph_node.inputs[0].dtype == np.float32 \
107
+ and to == tf.float16:
108
+ to = tf.float32
103
109
 
104
110
  # Generation of TF OP
105
111
  tf_layers_dict[graph_node_output.name]['tf_node'] = \
@@ -116,9 +116,9 @@ def make_node(
116
116
  )
117
117
 
118
118
  # Workaround for ConvInteger
119
- if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64]:
119
+ if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64, tf.float16]:
120
120
  input_tensor_2 = tf.cast(input_tensor_2, dtype=tf.float32)
121
- elif input_tensor_1.dtype in [tf.int32, tf.int64] and input_tensor_2.dtype == tf.float32:
121
+ elif input_tensor_1.dtype in [tf.int32, tf.int64, tf.float16] and input_tensor_2.dtype == tf.float32:
122
122
  input_tensor_1 = tf.cast(input_tensor_1, dtype=tf.float32)
123
123
 
124
124
  # Disable unnecessary Transpose
@@ -128,6 +128,12 @@ def make_node(
128
128
  output_dtype = NUMPY_DTYPES_TO_TF_DTYPES[dtype] \
129
129
  if isinstance(dtype, np.dtype) else dtype
130
130
 
131
+ # Workaround for Float16
132
+ if input_tensor_1.dtype == tf.float32 and output_dtype in [tf.int32, tf.int64, tf.float16]:
133
+ output_dtype = tf.float32
134
+ elif output_dtype and input_tensor_2.dtype == tf.float32:
135
+ output_dtype = tf.float32
136
+
131
137
  # Shape Unmatch Error Mitigation Measures
132
138
  # Search for and transpose shapes that do not cause shape unmatch errors
133
139
  min_abs_err = sys.maxsize
@@ -115,9 +115,9 @@ def make_node(
115
115
  )
116
116
 
117
117
  # Workaround for ConvInteger
118
- if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64]:
118
+ if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64, tf.float16]:
119
119
  input_tensor_2 = tf.cast(input_tensor_2, dtype=tf.float32)
120
- elif input_tensor_1.dtype in [tf.int32, tf.int64] and input_tensor_2.dtype == tf.float32:
120
+ elif input_tensor_1.dtype in [tf.int32, tf.int64, tf.float16] and input_tensor_2.dtype == tf.float32:
121
121
  input_tensor_1 = tf.cast(input_tensor_1, dtype=tf.float32)
122
122
 
123
123
  # Disable unnecessary Transpose
@@ -120,9 +120,9 @@ def make_node(
120
120
  )
121
121
 
122
122
  # Workaround for ConvInteger
123
- if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64]:
123
+ if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64, tf.float16]:
124
124
  input_tensor_2 = tf.cast(input_tensor_2, dtype=tf.float32)
125
- elif input_tensor_1.dtype in [tf.int32, tf.int64] and input_tensor_2.dtype == tf.float32:
125
+ elif input_tensor_1.dtype in [tf.int32, tf.int64, tf.float16] and input_tensor_2.dtype == tf.float32:
126
126
  input_tensor_1 = tf.cast(input_tensor_1, dtype=tf.float32)
127
127
 
128
128
  # Disable unnecessary Transpose
@@ -114,9 +114,9 @@ def make_node(
114
114
  )
115
115
 
116
116
  # Workaround for ConvInteger
117
- if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64]:
117
+ if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64, tf.float16]:
118
118
  input_tensor_2 = tf.cast(input_tensor_2, dtype=tf.float32)
119
- elif input_tensor_1.dtype in [tf.int32, tf.int64] and input_tensor_2.dtype == tf.float32:
119
+ elif input_tensor_1.dtype in [tf.int32, tf.int64, tf.float16] and input_tensor_2.dtype == tf.float32:
120
120
  input_tensor_1 = tf.cast(input_tensor_1, dtype=tf.float32)
121
121
 
122
122
  # Disable unnecessary Transpose
@@ -16,6 +16,7 @@ import subprocess
16
16
  import numpy as np
17
17
  np.random.seed(0)
18
18
  import tensorflow as tf
19
+ from ai_edge_litert.interpreter import Interpreter
19
20
  import tf_keras
20
21
  from tensorflow.python.keras.layers import Lambda
21
22
  from tensorflow.python.keras.utils import conv_utils
@@ -4143,8 +4144,7 @@ def weights_export(
4143
4144
  Path to file in hdf5 format to save the extracted weights
4144
4145
  """
4145
4146
  import h5py
4146
- from tensorflow.lite.python import interpreter as interpreter_wrapper
4147
- interpreter = interpreter_wrapper.Interpreter(
4147
+ interpreter = Interpreter(
4148
4148
  model_path=extract_target_tflite_file_path,
4149
4149
  )
4150
4150
  interpreter.allocate_tensors()
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.2
1
+ Metadata-Version: 2.4
2
2
  Name: onnx2tf
3
- Version: 1.26.9
3
+ Version: 1.27.1
4
4
  Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
5
5
  Home-page: https://github.com/PINTO0309/onnx2tf
6
6
  Author: Katsuya Hyodo
@@ -18,6 +18,7 @@ Dynamic: description
18
18
  Dynamic: description-content-type
19
19
  Dynamic: home-page
20
20
  Dynamic: license
21
+ Dynamic: license-file
21
22
  Dynamic: platform
22
23
  Dynamic: requires-python
23
24
  Dynamic: summary
@@ -280,14 +281,16 @@ Video speed is adjusted approximately 50 times slower than actual speed.
280
281
 
281
282
  ## Environment
282
283
  - Linux / Windows
283
- - onnx==1.16.1
284
+ - onnx==1.17.0
284
285
  - onnxruntime==1.18.1
285
286
  - onnx-simplifier==0.4.33 or 0.4.30 `(onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] (op_type:Slice, node name: /xxxx/Slice): [ShapeInferenceError] Inferred shape and existing shape differ in rank: (x) vs (y))`
286
287
  - onnx_graphsurgeon
287
288
  - simple_onnx_processing_tools
288
- - tensorflow==2.17.0, Special bugs: [#436](https://github.com/PINTO0309/onnx2tf/issues/436)
289
+ - tensorflow==2.19.0, Special bugs: [#436](https://github.com/PINTO0309/onnx2tf/issues/436)
290
+ - tf-keras==2.19.0
291
+ - ai-edge-litert==1.2.0
289
292
  - psutil==5.9.5
290
- - ml_dtypes==0.3.2
293
+ - ml_dtypes==0.5.1
291
294
  - flatbuffers-compiler (Optional, Only when using the `-coion` option. Executable file named `flatc`.)
292
295
  - flatbuffers>=23.1.21
293
296
  ```bash
@@ -331,7 +334,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
331
334
  docker run --rm -it \
332
335
  -v `pwd`:/workdir \
333
336
  -w /workdir \
334
- ghcr.io/pinto0309/onnx2tf:1.26.9
337
+ ghcr.io/pinto0309/onnx2tf:1.27.1
335
338
 
336
339
  or
337
340
 
@@ -339,11 +342,11 @@ Video speed is adjusted approximately 50 times slower than actual speed.
339
342
  docker run --rm -it \
340
343
  -v `pwd`:/workdir \
341
344
  -w /workdir \
342
- docker.io/pinto0309/onnx2tf:1.26.9
345
+ docker.io/pinto0309/onnx2tf:1.27.1
343
346
 
344
347
  or
345
348
 
346
- pip install -U onnx==1.16.1 \
349
+ pip install -U onnx==1.17.0 \
347
350
  && pip install -U nvidia-pyindex \
348
351
  && pip install -U onnx-graphsurgeon \
349
352
  && pip install -U onnxruntime==1.18.1 \
@@ -351,13 +354,14 @@ Video speed is adjusted approximately 50 times slower than actual speed.
351
354
  && pip install -U simple_onnx_processing_tools \
352
355
  && pip install -U sne4onnx>=1.0.13 \
353
356
  && pip install -U sng4onnx>=1.0.4 \
354
- && pip install -U tensorflow==2.17.0 \
357
+ && pip install -U ai_edge_litert==1.2.0 \
358
+ && pip install -U tensorflow==2.19.0 \
355
359
  && pip install -U protobuf==3.20.3 \
356
360
  && pip install -U onnx2tf \
357
361
  && pip install -U h5py==3.11.0 \
358
362
  && pip install -U psutil==5.9.5 \
359
- && pip install -U ml_dtypes==0.3.2 \
360
- && pip install -U tf-keras~=2.16 \
363
+ && pip install -U ml_dtypes==0.5.1 \
364
+ && pip install -U tf-keras==2.19.0 \
361
365
  && pip install flatbuffers>=23.5.26
362
366
 
363
367
  or
@@ -381,8 +385,9 @@ or
381
385
  && sudo chmod +x flatc \
382
386
  && sudo mv flatc /usr/bin/
383
387
  !pip install -U pip \
384
- && pip install tensorflow==2.17.0 \
385
- && pip install -U onnx==1.16.1 \
388
+ && pip install tensorflow==2.19.0 \
389
+ && pip install ai_edge_litert==1.2.0 \
390
+ && pip install -U onnx==1.17.0 \
386
391
  && python -m pip install onnx_graphsurgeon \
387
392
  --index-url https://pypi.ngc.nvidia.com \
388
393
  && pip install -U onnxruntime==1.18.1 \
@@ -392,8 +397,8 @@ or
392
397
  && pip install -U protobuf==3.20.3 \
393
398
  && pip install -U h5py==3.11.0 \
394
399
  && pip install -U psutil==5.9.5 \
395
- && pip install -U ml_dtypes==0.3.2 \
396
- && pip install -U tf-keras~=2.16 \
400
+ && pip install -U ml_dtypes==0.5.1 \
401
+ && pip install -U tf-keras==2.19.0 \
397
402
  && pip install flatbuffers>=23.5.26
398
403
  ```
399
404
 
@@ -608,7 +613,7 @@ import onnxruntime
608
613
  import numpy as np
609
614
  import onnx2tf
610
615
  import tensorflow as tf
611
- from tensorflow.lite.python import interpreter as tflite_interpreter
616
+ from ai_edge_litert.interpreter import Interpreter
612
617
 
613
618
  class Model(torch.nn.Module):
614
619
  def forward(self, x, y):
@@ -647,7 +652,7 @@ onnx2tf.convert(
647
652
  )
648
653
 
649
654
  # Now, test the newer TFLite model
650
- interpreter = tf.lite.Interpreter(model_path="model.tf/model_float32.tflite")
655
+ interpreter = Interpreter(model_path="model.tf/model_float32.tflite")
651
656
  tf_lite_model = interpreter.get_signature_runner()
652
657
  inputs = {
653
658
  'x': np.asarray([10], dtype=np.int64),
@@ -1061,10 +1066,10 @@ Now, let's try inference with the TFLite runtime instead of the TensorFlow runti
1061
1066
  import time
1062
1067
  import numpy as np
1063
1068
  np.random.seed(0)
1064
- import tensorflow as tf
1069
+ from ai_edge_litert.interpreter import Interpreter
1065
1070
 
1066
1071
  # Load TFLite model
1067
- interpreter = tf.lite.Interpreter(model_path="./saved_model/prelu_check_float32.tflite")
1072
+ interpreter = Interpreter(model_path="./saved_model/prelu_check_float32.tflite")
1068
1073
  interpreter.allocate_tensors()
1069
1074
  tensor_shape = (256, 20)
1070
1075
  input_data = {'waveform': np.random.randn(*tensor_shape).astype(np.float32)}
@@ -1232,10 +1237,10 @@ You can use `signature_runner` to handle dynamic input tensors by performing inf
1232
1237
  - `test.py` - Batch size: `5`
1233
1238
  ```python
1234
1239
  import numpy as np
1235
- import tensorflow as tf
1240
+ from ai_edge_litert.interpreter import Interpreter
1236
1241
  from pprint import pprint
1237
1242
 
1238
- interpreter = tf.lite.Interpreter(model_path="saved_model/osnet_x0_25_msmt17_float32.tflite")
1243
+ interpreter = Interpreter(model_path="saved_model/osnet_x0_25_msmt17_float32.tflite")
1239
1244
  tf_lite_model = interpreter.get_signature_runner()
1240
1245
  inputs = {
1241
1246
  'images': np.ones([5,256,128,3], dtype=np.float32),
@@ -1263,10 +1268,10 @@ You can use `signature_runner` to handle dynamic input tensors by performing inf
1263
1268
  - `test.py` - Batch size: `3`
1264
1269
  ```python
1265
1270
  import numpy as np
1266
- import tensorflow as tf
1271
+ from ai_edge_litert.interpreter import Interpreter
1267
1272
  from pprint import pprint
1268
1273
 
1269
- interpreter = tf.lite.Interpreter(model_path="saved_model/osnet_x0_25_msmt17_float32.tflite")
1274
+ interpreter = Interpreter(model_path="saved_model/osnet_x0_25_msmt17_float32.tflite")
1270
1275
  tf_lite_model = interpreter.get_signature_runner()
1271
1276
  inputs = {
1272
1277
  'images': np.ones([3,256,128,3], dtype=np.float32),
@@ -1350,11 +1355,11 @@ The relationship between the ONNX before conversion and the TFLite file after co
1350
1355
  Use the generated TFLite file to inference and ensure that it always contains fixed value output.
1351
1356
 
1352
1357
  ```python
1353
- import tensorflow as tf
1358
+ from ai_edge_litert.interpreter import Interpreter
1354
1359
  import numpy as np
1355
1360
  from pprint import pprint
1356
1361
 
1357
- interpreter = tf.lite.Interpreter(model_path="saved_model/toy_with_constant_float32.tflite")
1362
+ interpreter = Interpreter(model_path="saved_model/toy_with_constant_float32.tflite")
1358
1363
  interpreter.allocate_tensors()
1359
1364
 
1360
1365
  input_details = interpreter.get_input_details()
@@ -1508,7 +1513,7 @@ See: https://github.com/tensorflow/tfjs/tree/master/tfjs-converter
1508
1513
  When converting to CoreML, process as follows. The `-k` option is for conversion while maintaining the input channel order in ONNX's NCHW format.
1509
1514
 
1510
1515
  ```bash
1511
- pip install coremltools
1516
+ pip install coremltools==8.2
1512
1517
 
1513
1518
  onnx2tf -i mobilenetv2-12.onnx -k input -ois input:1,3,224,224 -osd
1514
1519
  ```
@@ -1521,7 +1526,7 @@ model = ct.convert(
1521
1526
  model=FOLDER_PATH,
1522
1527
  source='tensorflow',
1523
1528
  )
1524
- model.save(f'{FOLDER_PATH}/model.mlmodel')
1529
+ model.save(f'{FOLDER_PATH}/model.mlpackage')
1525
1530
  ```
1526
1531
 
1527
1532
  See: https://github.com/apple/coremltools
File without changes