onnx2tf 1.26.8__tar.gz → 1.27.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (202) hide show
  1. {onnx2tf-1.26.8/onnx2tf.egg-info → onnx2tf-1.27.0}/PKG-INFO +27 -23
  2. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/README.md +26 -22
  3. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/__init__.py +1 -1
  4. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/ReduceL1.py +2 -4
  5. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/ReduceL2.py +6 -6
  6. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/utils/common_functions.py +2 -2
  7. {onnx2tf-1.26.8 → onnx2tf-1.27.0/onnx2tf.egg-info}/PKG-INFO +27 -23
  8. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/LICENSE +0 -0
  9. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/LICENSE_onnx-tensorflow +0 -0
  10. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/__main__.py +0 -0
  11. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/onnx2tf.py +0 -0
  12. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Abs.py +0 -0
  13. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Acos.py +0 -0
  14. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Acosh.py +0 -0
  15. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Add.py +0 -0
  16. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/And.py +0 -0
  17. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/ArgMax.py +0 -0
  18. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/ArgMin.py +0 -0
  19. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Asin.py +0 -0
  20. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Asinh.py +0 -0
  21. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Atan.py +0 -0
  22. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Atanh.py +0 -0
  23. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/AveragePool.py +0 -0
  24. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/BatchNormalization.py +0 -0
  25. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Bernoulli.py +0 -0
  26. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/BitShift.py +0 -0
  27. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Cast.py +0 -0
  28. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Ceil.py +0 -0
  29. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Celu.py +0 -0
  30. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Clip.py +0 -0
  31. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Col2Im.py +0 -0
  32. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Compress.py +0 -0
  33. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Concat.py +0 -0
  34. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/ConcatFromSequence.py +0 -0
  35. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Constant.py +0 -0
  36. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/ConstantOfShape.py +0 -0
  37. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Conv.py +0 -0
  38. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/ConvInteger.py +0 -0
  39. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/ConvTranspose.py +0 -0
  40. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Cos.py +0 -0
  41. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Cosh.py +0 -0
  42. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/CumSum.py +0 -0
  43. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/DepthToSpace.py +0 -0
  44. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/DequantizeLinear.py +0 -0
  45. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Det.py +0 -0
  46. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Div.py +0 -0
  47. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Dropout.py +0 -0
  48. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/DynamicQuantizeLinear.py +0 -0
  49. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Einsum.py +0 -0
  50. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Elu.py +0 -0
  51. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Equal.py +0 -0
  52. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Erf.py +0 -0
  53. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Exp.py +0 -0
  54. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Expand.py +0 -0
  55. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/EyeLike.py +0 -0
  56. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Flatten.py +0 -0
  57. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Floor.py +0 -0
  58. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/FusedConv.py +0 -0
  59. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/GRU.py +0 -0
  60. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Gather.py +0 -0
  61. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/GatherElements.py +0 -0
  62. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/GatherND.py +0 -0
  63. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Gelu.py +0 -0
  64. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Gemm.py +0 -0
  65. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/GlobalAveragePool.py +0 -0
  66. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/GlobalLpPool.py +0 -0
  67. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/GlobalMaxPool.py +0 -0
  68. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Greater.py +0 -0
  69. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/GreaterOrEqual.py +0 -0
  70. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/GridSample.py +0 -0
  71. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/GroupNorm.py +0 -0
  72. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/HammingWindow.py +0 -0
  73. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/HannWindow.py +0 -0
  74. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/HardSigmoid.py +0 -0
  75. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/HardSwish.py +0 -0
  76. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Hardmax.py +0 -0
  77. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Identity.py +0 -0
  78. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/If.py +0 -0
  79. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Input.py +0 -0
  80. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/InstanceNormalization.py +0 -0
  81. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Inverse.py +0 -0
  82. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/IsInf.py +0 -0
  83. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/IsNaN.py +0 -0
  84. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/LRN.py +0 -0
  85. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/LSTM.py +0 -0
  86. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/LayerNormalization.py +0 -0
  87. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/LeakyRelu.py +0 -0
  88. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Less.py +0 -0
  89. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/LessOrEqual.py +0 -0
  90. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Log.py +0 -0
  91. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/LogSoftmax.py +0 -0
  92. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/LpNormalization.py +0 -0
  93. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/MatMul.py +0 -0
  94. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/MatMulInteger.py +0 -0
  95. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Max.py +0 -0
  96. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/MaxPool.py +0 -0
  97. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/MaxUnpool.py +0 -0
  98. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Mean.py +0 -0
  99. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/MeanVarianceNormalization.py +0 -0
  100. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/MelWeightMatrix.py +0 -0
  101. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Min.py +0 -0
  102. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Mish.py +0 -0
  103. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Mod.py +0 -0
  104. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Mul.py +0 -0
  105. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Multinomial.py +0 -0
  106. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Neg.py +0 -0
  107. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/NonMaxSuppression.py +0 -0
  108. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/NonZero.py +0 -0
  109. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Not.py +0 -0
  110. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/OneHot.py +0 -0
  111. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/OptionalGetElement.py +0 -0
  112. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/OptionalHasElement.py +0 -0
  113. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Or.py +0 -0
  114. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/PRelu.py +0 -0
  115. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Pad.py +0 -0
  116. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Pow.py +0 -0
  117. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/QLinearAdd.py +0 -0
  118. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/QLinearConcat.py +0 -0
  119. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/QLinearConv.py +0 -0
  120. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/QLinearLeakyRelu.py +0 -0
  121. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/QLinearMatMul.py +0 -0
  122. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/QLinearMul.py +0 -0
  123. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/QLinearSigmoid.py +0 -0
  124. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/QLinearSoftmax.py +0 -0
  125. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/QuantizeLinear.py +0 -0
  126. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/RNN.py +0 -0
  127. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/RandomNormal.py +0 -0
  128. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/RandomNormalLike.py +0 -0
  129. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/RandomUniform.py +0 -0
  130. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/RandomUniformLike.py +0 -0
  131. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Range.py +0 -0
  132. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Reciprocal.py +0 -0
  133. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/ReduceLogSum.py +0 -0
  134. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/ReduceLogSumExp.py +0 -0
  135. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/ReduceMax.py +0 -0
  136. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/ReduceMean.py +0 -0
  137. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/ReduceMin.py +0 -0
  138. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/ReduceProd.py +0 -0
  139. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/ReduceSum.py +0 -0
  140. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/ReduceSumSquare.py +0 -0
  141. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Relu.py +0 -0
  142. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Reshape.py +0 -0
  143. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Resize.py +0 -0
  144. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/ReverseSequence.py +0 -0
  145. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/RoiAlign.py +0 -0
  146. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Round.py +0 -0
  147. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/STFT.py +0 -0
  148. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/ScaleAndTranslate.py +0 -0
  149. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Scatter.py +0 -0
  150. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/ScatterElements.py +0 -0
  151. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/ScatterND.py +0 -0
  152. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Selu.py +0 -0
  153. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/SequenceAt.py +0 -0
  154. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/SequenceConstruct.py +0 -0
  155. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/SequenceEmpty.py +0 -0
  156. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/SequenceErase.py +0 -0
  157. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/SequenceInsert.py +0 -0
  158. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/SequenceLength.py +0 -0
  159. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Shape.py +0 -0
  160. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Shrink.py +0 -0
  161. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Sigmoid.py +0 -0
  162. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Sign.py +0 -0
  163. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Sin.py +0 -0
  164. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Sinh.py +0 -0
  165. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Size.py +0 -0
  166. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Slice.py +0 -0
  167. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Softmax.py +0 -0
  168. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Softplus.py +0 -0
  169. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Softsign.py +0 -0
  170. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/SpaceToDepth.py +0 -0
  171. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Split.py +0 -0
  172. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/SplitToSequence.py +0 -0
  173. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Sqrt.py +0 -0
  174. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Squeeze.py +0 -0
  175. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/StringNormalizer.py +0 -0
  176. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Sub.py +0 -0
  177. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Sum.py +0 -0
  178. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Tan.py +0 -0
  179. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Tanh.py +0 -0
  180. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/ThresholdedRelu.py +0 -0
  181. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Tile.py +0 -0
  182. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/TopK.py +0 -0
  183. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Transpose.py +0 -0
  184. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Trilu.py +0 -0
  185. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Unique.py +0 -0
  186. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Unsqueeze.py +0 -0
  187. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Upsample.py +0 -0
  188. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Where.py +0 -0
  189. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/Xor.py +0 -0
  190. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/_Loop.py +0 -0
  191. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/__Loop.py +0 -0
  192. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/ops/__init__.py +0 -0
  193. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/utils/__init__.py +0 -0
  194. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/utils/enums.py +0 -0
  195. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf/utils/logging.py +0 -0
  196. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf.egg-info/SOURCES.txt +0 -0
  197. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf.egg-info/dependency_links.txt +0 -0
  198. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf.egg-info/entry_points.txt +0 -0
  199. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/onnx2tf.egg-info/top_level.txt +0 -0
  200. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/setup.cfg +0 -0
  201. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/setup.py +0 -0
  202. {onnx2tf-1.26.8 → onnx2tf-1.27.0}/tests/test_model_convert.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: onnx2tf
3
- Version: 1.26.8
3
+ Version: 1.27.0
4
4
  Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
5
5
  Home-page: https://github.com/PINTO0309/onnx2tf
6
6
  Author: Katsuya Hyodo
@@ -285,9 +285,11 @@ Video speed is adjusted approximately 50 times slower than actual speed.
285
285
  - onnx-simplifier==0.4.33 or 0.4.30 `(onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] (op_type:Slice, node name: /xxxx/Slice): [ShapeInferenceError] Inferred shape and existing shape differ in rank: (x) vs (y))`
286
286
  - onnx_graphsurgeon
287
287
  - simple_onnx_processing_tools
288
- - tensorflow==2.17.0, Special bugs: [#436](https://github.com/PINTO0309/onnx2tf/issues/436)
288
+ - tensorflow==2.19.0, Special bugs: [#436](https://github.com/PINTO0309/onnx2tf/issues/436)
289
+ - tf-keras==2.19.0
290
+ - ai-edge-litert==1.2.0
289
291
  - psutil==5.9.5
290
- - ml_dtypes==0.3.2
292
+ - ml_dtypes==0.5.1
291
293
  - flatbuffers-compiler (Optional, Only when using the `-coion` option. Executable file named `flatc`.)
292
294
  - flatbuffers>=23.1.21
293
295
  ```bash
@@ -331,7 +333,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
331
333
  docker run --rm -it \
332
334
  -v `pwd`:/workdir \
333
335
  -w /workdir \
334
- ghcr.io/pinto0309/onnx2tf:1.26.8
336
+ ghcr.io/pinto0309/onnx2tf:1.27.0
335
337
 
336
338
  or
337
339
 
@@ -339,11 +341,11 @@ Video speed is adjusted approximately 50 times slower than actual speed.
339
341
  docker run --rm -it \
340
342
  -v `pwd`:/workdir \
341
343
  -w /workdir \
342
- docker.io/pinto0309/onnx2tf:1.26.8
344
+ docker.io/pinto0309/onnx2tf:1.27.0
343
345
 
344
346
  or
345
347
 
346
- pip install -U onnx==1.16.1 \
348
+ pip install -U onnx==1.17.0 \
347
349
  && pip install -U nvidia-pyindex \
348
350
  && pip install -U onnx-graphsurgeon \
349
351
  && pip install -U onnxruntime==1.18.1 \
@@ -351,13 +353,14 @@ Video speed is adjusted approximately 50 times slower than actual speed.
351
353
  && pip install -U simple_onnx_processing_tools \
352
354
  && pip install -U sne4onnx>=1.0.13 \
353
355
  && pip install -U sng4onnx>=1.0.4 \
354
- && pip install -U tensorflow==2.17.0 \
356
+ && pip install -U ai_edge_litert==1.2.0 \
357
+ && pip install -U tensorflow==2.19.0 \
355
358
  && pip install -U protobuf==3.20.3 \
356
359
  && pip install -U onnx2tf \
357
360
  && pip install -U h5py==3.11.0 \
358
361
  && pip install -U psutil==5.9.5 \
359
- && pip install -U ml_dtypes==0.3.2 \
360
- && pip install -U tf-keras~=2.16 \
362
+ && pip install -U ml_dtypes==0.5.1 \
363
+ && pip install -U tf-keras==2.19.0 \
361
364
  && pip install flatbuffers>=23.5.26
362
365
 
363
366
  or
@@ -381,8 +384,9 @@ or
381
384
  && sudo chmod +x flatc \
382
385
  && sudo mv flatc /usr/bin/
383
386
  !pip install -U pip \
384
- && pip install tensorflow==2.17.0 \
385
- && pip install -U onnx==1.16.1 \
387
+ && pip install tensorflow==2.19.0 \
388
+ && pip install ai_edge_litert==1.2.0 \
389
+ && pip install -U onnx==1.17.0 \
386
390
  && python -m pip install onnx_graphsurgeon \
387
391
  --index-url https://pypi.ngc.nvidia.com \
388
392
  && pip install -U onnxruntime==1.18.1 \
@@ -392,8 +396,8 @@ or
392
396
  && pip install -U protobuf==3.20.3 \
393
397
  && pip install -U h5py==3.11.0 \
394
398
  && pip install -U psutil==5.9.5 \
395
- && pip install -U ml_dtypes==0.3.2 \
396
- && pip install -U tf-keras~=2.16 \
399
+ && pip install -U ml_dtypes==0.5.1 \
400
+ && pip install -U tf-keras==2.19.0 \
397
401
  && pip install flatbuffers>=23.5.26
398
402
  ```
399
403
 
@@ -608,7 +612,7 @@ import onnxruntime
608
612
  import numpy as np
609
613
  import onnx2tf
610
614
  import tensorflow as tf
611
- from tensorflow.lite.python import interpreter as tflite_interpreter
615
+ from ai_edge_litert.interpreter import Interpreter
612
616
 
613
617
  class Model(torch.nn.Module):
614
618
  def forward(self, x, y):
@@ -647,7 +651,7 @@ onnx2tf.convert(
647
651
  )
648
652
 
649
653
  # Now, test the newer TFLite model
650
- interpreter = tf.lite.Interpreter(model_path="model.tf/model_float32.tflite")
654
+ interpreter = Interpreter(model_path="model.tf/model_float32.tflite")
651
655
  tf_lite_model = interpreter.get_signature_runner()
652
656
  inputs = {
653
657
  'x': np.asarray([10], dtype=np.int64),
@@ -1061,10 +1065,10 @@ Now, let's try inference with the TFLite runtime instead of the TensorFlow runti
1061
1065
  import time
1062
1066
  import numpy as np
1063
1067
  np.random.seed(0)
1064
- import tensorflow as tf
1068
+ from ai_edge_litert.interpreter import Interpreter
1065
1069
 
1066
1070
  # Load TFLite model
1067
- interpreter = tf.lite.Interpreter(model_path="./saved_model/prelu_check_float32.tflite")
1071
+ interpreter = Interpreter(model_path="./saved_model/prelu_check_float32.tflite")
1068
1072
  interpreter.allocate_tensors()
1069
1073
  tensor_shape = (256, 20)
1070
1074
  input_data = {'waveform': np.random.randn(*tensor_shape).astype(np.float32)}
@@ -1232,10 +1236,10 @@ You can use `signature_runner` to handle dynamic input tensors by performing inf
1232
1236
  - `test.py` - Batch size: `5`
1233
1237
  ```python
1234
1238
  import numpy as np
1235
- import tensorflow as tf
1239
+ from ai_edge_litert.interpreter import Interpreter
1236
1240
  from pprint import pprint
1237
1241
 
1238
- interpreter = tf.lite.Interpreter(model_path="saved_model/osnet_x0_25_msmt17_float32.tflite")
1242
+ interpreter = Interpreter(model_path="saved_model/osnet_x0_25_msmt17_float32.tflite")
1239
1243
  tf_lite_model = interpreter.get_signature_runner()
1240
1244
  inputs = {
1241
1245
  'images': np.ones([5,256,128,3], dtype=np.float32),
@@ -1263,10 +1267,10 @@ You can use `signature_runner` to handle dynamic input tensors by performing inf
1263
1267
  - `test.py` - Batch size: `3`
1264
1268
  ```python
1265
1269
  import numpy as np
1266
- import tensorflow as tf
1270
+ from ai_edge_litert.interpreter import Interpreter
1267
1271
  from pprint import pprint
1268
1272
 
1269
- interpreter = tf.lite.Interpreter(model_path="saved_model/osnet_x0_25_msmt17_float32.tflite")
1273
+ interpreter = Interpreter(model_path="saved_model/osnet_x0_25_msmt17_float32.tflite")
1270
1274
  tf_lite_model = interpreter.get_signature_runner()
1271
1275
  inputs = {
1272
1276
  'images': np.ones([3,256,128,3], dtype=np.float32),
@@ -1350,11 +1354,11 @@ The relationship between the ONNX before conversion and the TFLite file after co
1350
1354
  Use the generated TFLite file to inference and ensure that it always contains fixed value output.
1351
1355
 
1352
1356
  ```python
1353
- import tensorflow as tf
1357
+ from ai_edge_litert.interpreter import Interpreter
1354
1358
  import numpy as np
1355
1359
  from pprint import pprint
1356
1360
 
1357
- interpreter = tf.lite.Interpreter(model_path="saved_model/toy_with_constant_float32.tflite")
1361
+ interpreter = Interpreter(model_path="saved_model/toy_with_constant_float32.tflite")
1358
1362
  interpreter.allocate_tensors()
1359
1363
 
1360
1364
  input_details = interpreter.get_input_details()
@@ -261,9 +261,11 @@ Video speed is adjusted approximately 50 times slower than actual speed.
261
261
  - onnx-simplifier==0.4.33 or 0.4.30 `(onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] (op_type:Slice, node name: /xxxx/Slice): [ShapeInferenceError] Inferred shape and existing shape differ in rank: (x) vs (y))`
262
262
  - onnx_graphsurgeon
263
263
  - simple_onnx_processing_tools
264
- - tensorflow==2.17.0, Special bugs: [#436](https://github.com/PINTO0309/onnx2tf/issues/436)
264
+ - tensorflow==2.19.0, Special bugs: [#436](https://github.com/PINTO0309/onnx2tf/issues/436)
265
+ - tf-keras==2.19.0
266
+ - ai-edge-litert==1.2.0
265
267
  - psutil==5.9.5
266
- - ml_dtypes==0.3.2
268
+ - ml_dtypes==0.5.1
267
269
  - flatbuffers-compiler (Optional, Only when using the `-coion` option. Executable file named `flatc`.)
268
270
  - flatbuffers>=23.1.21
269
271
  ```bash
@@ -307,7 +309,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
307
309
  docker run --rm -it \
308
310
  -v `pwd`:/workdir \
309
311
  -w /workdir \
310
- ghcr.io/pinto0309/onnx2tf:1.26.8
312
+ ghcr.io/pinto0309/onnx2tf:1.27.0
311
313
 
312
314
  or
313
315
 
@@ -315,11 +317,11 @@ Video speed is adjusted approximately 50 times slower than actual speed.
315
317
  docker run --rm -it \
316
318
  -v `pwd`:/workdir \
317
319
  -w /workdir \
318
- docker.io/pinto0309/onnx2tf:1.26.8
320
+ docker.io/pinto0309/onnx2tf:1.27.0
319
321
 
320
322
  or
321
323
 
322
- pip install -U onnx==1.16.1 \
324
+ pip install -U onnx==1.17.0 \
323
325
  && pip install -U nvidia-pyindex \
324
326
  && pip install -U onnx-graphsurgeon \
325
327
  && pip install -U onnxruntime==1.18.1 \
@@ -327,13 +329,14 @@ Video speed is adjusted approximately 50 times slower than actual speed.
327
329
  && pip install -U simple_onnx_processing_tools \
328
330
  && pip install -U sne4onnx>=1.0.13 \
329
331
  && pip install -U sng4onnx>=1.0.4 \
330
- && pip install -U tensorflow==2.17.0 \
332
+ && pip install -U ai_edge_litert==1.2.0 \
333
+ && pip install -U tensorflow==2.19.0 \
331
334
  && pip install -U protobuf==3.20.3 \
332
335
  && pip install -U onnx2tf \
333
336
  && pip install -U h5py==3.11.0 \
334
337
  && pip install -U psutil==5.9.5 \
335
- && pip install -U ml_dtypes==0.3.2 \
336
- && pip install -U tf-keras~=2.16 \
338
+ && pip install -U ml_dtypes==0.5.1 \
339
+ && pip install -U tf-keras==2.19.0 \
337
340
  && pip install flatbuffers>=23.5.26
338
341
 
339
342
  or
@@ -357,8 +360,9 @@ or
357
360
  && sudo chmod +x flatc \
358
361
  && sudo mv flatc /usr/bin/
359
362
  !pip install -U pip \
360
- && pip install tensorflow==2.17.0 \
361
- && pip install -U onnx==1.16.1 \
363
+ && pip install tensorflow==2.19.0 \
364
+ && pip install ai_edge_litert==1.2.0 \
365
+ && pip install -U onnx==1.17.0 \
362
366
  && python -m pip install onnx_graphsurgeon \
363
367
  --index-url https://pypi.ngc.nvidia.com \
364
368
  && pip install -U onnxruntime==1.18.1 \
@@ -368,8 +372,8 @@ or
368
372
  && pip install -U protobuf==3.20.3 \
369
373
  && pip install -U h5py==3.11.0 \
370
374
  && pip install -U psutil==5.9.5 \
371
- && pip install -U ml_dtypes==0.3.2 \
372
- && pip install -U tf-keras~=2.16 \
375
+ && pip install -U ml_dtypes==0.5.1 \
376
+ && pip install -U tf-keras==2.19.0 \
373
377
  && pip install flatbuffers>=23.5.26
374
378
  ```
375
379
 
@@ -584,7 +588,7 @@ import onnxruntime
584
588
  import numpy as np
585
589
  import onnx2tf
586
590
  import tensorflow as tf
587
- from tensorflow.lite.python import interpreter as tflite_interpreter
591
+ from ai_edge_litert.interpreter import Interpreter
588
592
 
589
593
  class Model(torch.nn.Module):
590
594
  def forward(self, x, y):
@@ -623,7 +627,7 @@ onnx2tf.convert(
623
627
  )
624
628
 
625
629
  # Now, test the newer TFLite model
626
- interpreter = tf.lite.Interpreter(model_path="model.tf/model_float32.tflite")
630
+ interpreter = Interpreter(model_path="model.tf/model_float32.tflite")
627
631
  tf_lite_model = interpreter.get_signature_runner()
628
632
  inputs = {
629
633
  'x': np.asarray([10], dtype=np.int64),
@@ -1037,10 +1041,10 @@ Now, let's try inference with the TFLite runtime instead of the TensorFlow runti
1037
1041
  import time
1038
1042
  import numpy as np
1039
1043
  np.random.seed(0)
1040
- import tensorflow as tf
1044
+ from ai_edge_litert.interpreter import Interpreter
1041
1045
 
1042
1046
  # Load TFLite model
1043
- interpreter = tf.lite.Interpreter(model_path="./saved_model/prelu_check_float32.tflite")
1047
+ interpreter = Interpreter(model_path="./saved_model/prelu_check_float32.tflite")
1044
1048
  interpreter.allocate_tensors()
1045
1049
  tensor_shape = (256, 20)
1046
1050
  input_data = {'waveform': np.random.randn(*tensor_shape).astype(np.float32)}
@@ -1208,10 +1212,10 @@ You can use `signature_runner` to handle dynamic input tensors by performing inf
1208
1212
  - `test.py` - Batch size: `5`
1209
1213
  ```python
1210
1214
  import numpy as np
1211
- import tensorflow as tf
1215
+ from ai_edge_litert.interpreter import Interpreter
1212
1216
  from pprint import pprint
1213
1217
 
1214
- interpreter = tf.lite.Interpreter(model_path="saved_model/osnet_x0_25_msmt17_float32.tflite")
1218
+ interpreter = Interpreter(model_path="saved_model/osnet_x0_25_msmt17_float32.tflite")
1215
1219
  tf_lite_model = interpreter.get_signature_runner()
1216
1220
  inputs = {
1217
1221
  'images': np.ones([5,256,128,3], dtype=np.float32),
@@ -1239,10 +1243,10 @@ You can use `signature_runner` to handle dynamic input tensors by performing inf
1239
1243
  - `test.py` - Batch size: `3`
1240
1244
  ```python
1241
1245
  import numpy as np
1242
- import tensorflow as tf
1246
+ from ai_edge_litert.interpreter import Interpreter
1243
1247
  from pprint import pprint
1244
1248
 
1245
- interpreter = tf.lite.Interpreter(model_path="saved_model/osnet_x0_25_msmt17_float32.tflite")
1249
+ interpreter = Interpreter(model_path="saved_model/osnet_x0_25_msmt17_float32.tflite")
1246
1250
  tf_lite_model = interpreter.get_signature_runner()
1247
1251
  inputs = {
1248
1252
  'images': np.ones([3,256,128,3], dtype=np.float32),
@@ -1326,11 +1330,11 @@ The relationship between the ONNX before conversion and the TFLite file after co
1326
1330
  Use the generated TFLite file to inference and ensure that it always contains fixed value output.
1327
1331
 
1328
1332
  ```python
1329
- import tensorflow as tf
1333
+ from ai_edge_litert.interpreter import Interpreter
1330
1334
  import numpy as np
1331
1335
  from pprint import pprint
1332
1336
 
1333
- interpreter = tf.lite.Interpreter(model_path="saved_model/toy_with_constant_float32.tflite")
1337
+ interpreter = Interpreter(model_path="saved_model/toy_with_constant_float32.tflite")
1334
1338
  interpreter.allocate_tensors()
1335
1339
 
1336
1340
  input_details = interpreter.get_input_details()
@@ -1,3 +1,3 @@
1
1
  from onnx2tf.onnx2tf import convert, main
2
2
 
3
- __version__ = '1.26.8'
3
+ __version__ = '1.27.0'
@@ -298,12 +298,10 @@ def make_node(
298
298
  # Generation of TF OP
299
299
  axes = list(axes) if axes is not None else None
300
300
  tf_layers_dict[graph_node_output.name]['tf_node'] = \
301
- tf.norm(
302
- tensor=input_tensor,
303
- ord=1,
301
+ tf.reduce_sum(
302
+ tf.abs(input_tensor),
304
303
  axis=axes if len(axes) > 1 else axes[0],
305
304
  keepdims=keepdims,
306
- name=graph_node.name,
307
305
  )
308
306
 
309
307
  # Post-process transpose
@@ -297,12 +297,12 @@ def make_node(
297
297
  # Generation of TF OP
298
298
  axes = list(axes) if axes is not None else None
299
299
  tf_layers_dict[graph_node_output.name]['tf_node'] = \
300
- tf.norm(
301
- tensor=input_tensor,
302
- ord=2,
303
- axis=axes if len(axes) > 1 else axes[0],
304
- keepdims=keepdims,
305
- name=graph_node.name,
300
+ tf.sqrt(
301
+ tf.reduce_sum(
302
+ tf.square(input_tensor),
303
+ axis=axes,
304
+ keepdims=keepdims
305
+ )
306
306
  )
307
307
 
308
308
  # Post-process transpose
@@ -16,6 +16,7 @@ import subprocess
16
16
  import numpy as np
17
17
  np.random.seed(0)
18
18
  import tensorflow as tf
19
+ from ai_edge_litert.interpreter import Interpreter
19
20
  import tf_keras
20
21
  from tensorflow.python.keras.layers import Lambda
21
22
  from tensorflow.python.keras.utils import conv_utils
@@ -4143,8 +4144,7 @@ def weights_export(
4143
4144
  Path to file in hdf5 format to save the extracted weights
4144
4145
  """
4145
4146
  import h5py
4146
- from tensorflow.lite.python import interpreter as interpreter_wrapper
4147
- interpreter = interpreter_wrapper.Interpreter(
4147
+ interpreter = Interpreter(
4148
4148
  model_path=extract_target_tflite_file_path,
4149
4149
  )
4150
4150
  interpreter.allocate_tensors()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: onnx2tf
3
- Version: 1.26.8
3
+ Version: 1.27.0
4
4
  Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
5
5
  Home-page: https://github.com/PINTO0309/onnx2tf
6
6
  Author: Katsuya Hyodo
@@ -285,9 +285,11 @@ Video speed is adjusted approximately 50 times slower than actual speed.
285
285
  - onnx-simplifier==0.4.33 or 0.4.30 `(onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] (op_type:Slice, node name: /xxxx/Slice): [ShapeInferenceError] Inferred shape and existing shape differ in rank: (x) vs (y))`
286
286
  - onnx_graphsurgeon
287
287
  - simple_onnx_processing_tools
288
- - tensorflow==2.17.0, Special bugs: [#436](https://github.com/PINTO0309/onnx2tf/issues/436)
288
+ - tensorflow==2.19.0, Special bugs: [#436](https://github.com/PINTO0309/onnx2tf/issues/436)
289
+ - tf-keras==2.19.0
290
+ - ai-edge-litert==1.2.0
289
291
  - psutil==5.9.5
290
- - ml_dtypes==0.3.2
292
+ - ml_dtypes==0.5.1
291
293
  - flatbuffers-compiler (Optional, Only when using the `-coion` option. Executable file named `flatc`.)
292
294
  - flatbuffers>=23.1.21
293
295
  ```bash
@@ -331,7 +333,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
331
333
  docker run --rm -it \
332
334
  -v `pwd`:/workdir \
333
335
  -w /workdir \
334
- ghcr.io/pinto0309/onnx2tf:1.26.8
336
+ ghcr.io/pinto0309/onnx2tf:1.27.0
335
337
 
336
338
  or
337
339
 
@@ -339,11 +341,11 @@ Video speed is adjusted approximately 50 times slower than actual speed.
339
341
  docker run --rm -it \
340
342
  -v `pwd`:/workdir \
341
343
  -w /workdir \
342
- docker.io/pinto0309/onnx2tf:1.26.8
344
+ docker.io/pinto0309/onnx2tf:1.27.0
343
345
 
344
346
  or
345
347
 
346
- pip install -U onnx==1.16.1 \
348
+ pip install -U onnx==1.17.0 \
347
349
  && pip install -U nvidia-pyindex \
348
350
  && pip install -U onnx-graphsurgeon \
349
351
  && pip install -U onnxruntime==1.18.1 \
@@ -351,13 +353,14 @@ Video speed is adjusted approximately 50 times slower than actual speed.
351
353
  && pip install -U simple_onnx_processing_tools \
352
354
  && pip install -U sne4onnx>=1.0.13 \
353
355
  && pip install -U sng4onnx>=1.0.4 \
354
- && pip install -U tensorflow==2.17.0 \
356
+ && pip install -U ai_edge_litert==1.2.0 \
357
+ && pip install -U tensorflow==2.19.0 \
355
358
  && pip install -U protobuf==3.20.3 \
356
359
  && pip install -U onnx2tf \
357
360
  && pip install -U h5py==3.11.0 \
358
361
  && pip install -U psutil==5.9.5 \
359
- && pip install -U ml_dtypes==0.3.2 \
360
- && pip install -U tf-keras~=2.16 \
362
+ && pip install -U ml_dtypes==0.5.1 \
363
+ && pip install -U tf-keras==2.19.0 \
361
364
  && pip install flatbuffers>=23.5.26
362
365
 
363
366
  or
@@ -381,8 +384,9 @@ or
381
384
  && sudo chmod +x flatc \
382
385
  && sudo mv flatc /usr/bin/
383
386
  !pip install -U pip \
384
- && pip install tensorflow==2.17.0 \
385
- && pip install -U onnx==1.16.1 \
387
+ && pip install tensorflow==2.19.0 \
388
+ && pip install ai_edge_litert==1.2.0 \
389
+ && pip install -U onnx==1.17.0 \
386
390
  && python -m pip install onnx_graphsurgeon \
387
391
  --index-url https://pypi.ngc.nvidia.com \
388
392
  && pip install -U onnxruntime==1.18.1 \
@@ -392,8 +396,8 @@ or
392
396
  && pip install -U protobuf==3.20.3 \
393
397
  && pip install -U h5py==3.11.0 \
394
398
  && pip install -U psutil==5.9.5 \
395
- && pip install -U ml_dtypes==0.3.2 \
396
- && pip install -U tf-keras~=2.16 \
399
+ && pip install -U ml_dtypes==0.5.1 \
400
+ && pip install -U tf-keras==2.19.0 \
397
401
  && pip install flatbuffers>=23.5.26
398
402
  ```
399
403
 
@@ -608,7 +612,7 @@ import onnxruntime
608
612
  import numpy as np
609
613
  import onnx2tf
610
614
  import tensorflow as tf
611
- from tensorflow.lite.python import interpreter as tflite_interpreter
615
+ from ai_edge_litert.interpreter import Interpreter
612
616
 
613
617
  class Model(torch.nn.Module):
614
618
  def forward(self, x, y):
@@ -647,7 +651,7 @@ onnx2tf.convert(
647
651
  )
648
652
 
649
653
  # Now, test the newer TFLite model
650
- interpreter = tf.lite.Interpreter(model_path="model.tf/model_float32.tflite")
654
+ interpreter = Interpreter(model_path="model.tf/model_float32.tflite")
651
655
  tf_lite_model = interpreter.get_signature_runner()
652
656
  inputs = {
653
657
  'x': np.asarray([10], dtype=np.int64),
@@ -1061,10 +1065,10 @@ Now, let's try inference with the TFLite runtime instead of the TensorFlow runti
1061
1065
  import time
1062
1066
  import numpy as np
1063
1067
  np.random.seed(0)
1064
- import tensorflow as tf
1068
+ from ai_edge_litert.interpreter import Interpreter
1065
1069
 
1066
1070
  # Load TFLite model
1067
- interpreter = tf.lite.Interpreter(model_path="./saved_model/prelu_check_float32.tflite")
1071
+ interpreter = Interpreter(model_path="./saved_model/prelu_check_float32.tflite")
1068
1072
  interpreter.allocate_tensors()
1069
1073
  tensor_shape = (256, 20)
1070
1074
  input_data = {'waveform': np.random.randn(*tensor_shape).astype(np.float32)}
@@ -1232,10 +1236,10 @@ You can use `signature_runner` to handle dynamic input tensors by performing inf
1232
1236
  - `test.py` - Batch size: `5`
1233
1237
  ```python
1234
1238
  import numpy as np
1235
- import tensorflow as tf
1239
+ from ai_edge_litert.interpreter import Interpreter
1236
1240
  from pprint import pprint
1237
1241
 
1238
- interpreter = tf.lite.Interpreter(model_path="saved_model/osnet_x0_25_msmt17_float32.tflite")
1242
+ interpreter = Interpreter(model_path="saved_model/osnet_x0_25_msmt17_float32.tflite")
1239
1243
  tf_lite_model = interpreter.get_signature_runner()
1240
1244
  inputs = {
1241
1245
  'images': np.ones([5,256,128,3], dtype=np.float32),
@@ -1263,10 +1267,10 @@ You can use `signature_runner` to handle dynamic input tensors by performing inf
1263
1267
  - `test.py` - Batch size: `3`
1264
1268
  ```python
1265
1269
  import numpy as np
1266
- import tensorflow as tf
1270
+ from ai_edge_litert.interpreter import Interpreter
1267
1271
  from pprint import pprint
1268
1272
 
1269
- interpreter = tf.lite.Interpreter(model_path="saved_model/osnet_x0_25_msmt17_float32.tflite")
1273
+ interpreter = Interpreter(model_path="saved_model/osnet_x0_25_msmt17_float32.tflite")
1270
1274
  tf_lite_model = interpreter.get_signature_runner()
1271
1275
  inputs = {
1272
1276
  'images': np.ones([3,256,128,3], dtype=np.float32),
@@ -1350,11 +1354,11 @@ The relationship between the ONNX before conversion and the TFLite file after co
1350
1354
  Use the generated TFLite file to inference and ensure that it always contains fixed value output.
1351
1355
 
1352
1356
  ```python
1353
- import tensorflow as tf
1357
+ from ai_edge_litert.interpreter import Interpreter
1354
1358
  import numpy as np
1355
1359
  from pprint import pprint
1356
1360
 
1357
- interpreter = tf.lite.Interpreter(model_path="saved_model/toy_with_constant_float32.tflite")
1361
+ interpreter = Interpreter(model_path="saved_model/toy_with_constant_float32.tflite")
1358
1362
  interpreter.allocate_tensors()
1359
1363
 
1360
1364
  input_details = interpreter.get_input_details()
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes