onnx2tf 1.27.0__tar.gz → 1.27.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (202) hide show
  1. {onnx2tf-1.27.0/onnx2tf.egg-info → onnx2tf-1.27.2}/PKG-INFO +8 -7
  2. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/README.md +5 -5
  3. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/__init__.py +1 -1
  4. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Add.py +2 -2
  5. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Cast.py +7 -1
  6. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Div.py +2 -2
  7. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/MatMul.py +6 -0
  8. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Mod.py +2 -2
  9. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Mul.py +2 -2
  10. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Split.py +42 -1
  11. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Sub.py +2 -2
  12. {onnx2tf-1.27.0 → onnx2tf-1.27.2/onnx2tf.egg-info}/PKG-INFO +8 -7
  13. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/LICENSE +0 -0
  14. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/LICENSE_onnx-tensorflow +0 -0
  15. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/__main__.py +0 -0
  16. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/onnx2tf.py +0 -0
  17. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Abs.py +0 -0
  18. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Acos.py +0 -0
  19. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Acosh.py +0 -0
  20. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/And.py +0 -0
  21. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/ArgMax.py +0 -0
  22. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/ArgMin.py +0 -0
  23. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Asin.py +0 -0
  24. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Asinh.py +0 -0
  25. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Atan.py +0 -0
  26. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Atanh.py +0 -0
  27. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/AveragePool.py +0 -0
  28. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/BatchNormalization.py +0 -0
  29. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Bernoulli.py +0 -0
  30. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/BitShift.py +0 -0
  31. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Ceil.py +0 -0
  32. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Celu.py +0 -0
  33. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Clip.py +0 -0
  34. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Col2Im.py +0 -0
  35. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Compress.py +0 -0
  36. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Concat.py +0 -0
  37. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/ConcatFromSequence.py +0 -0
  38. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Constant.py +0 -0
  39. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/ConstantOfShape.py +0 -0
  40. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Conv.py +0 -0
  41. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/ConvInteger.py +0 -0
  42. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/ConvTranspose.py +0 -0
  43. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Cos.py +0 -0
  44. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Cosh.py +0 -0
  45. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/CumSum.py +0 -0
  46. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/DepthToSpace.py +0 -0
  47. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/DequantizeLinear.py +0 -0
  48. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Det.py +0 -0
  49. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Dropout.py +0 -0
  50. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/DynamicQuantizeLinear.py +0 -0
  51. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Einsum.py +0 -0
  52. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Elu.py +0 -0
  53. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Equal.py +0 -0
  54. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Erf.py +0 -0
  55. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Exp.py +0 -0
  56. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Expand.py +0 -0
  57. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/EyeLike.py +0 -0
  58. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Flatten.py +0 -0
  59. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Floor.py +0 -0
  60. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/FusedConv.py +0 -0
  61. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/GRU.py +0 -0
  62. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Gather.py +0 -0
  63. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/GatherElements.py +0 -0
  64. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/GatherND.py +0 -0
  65. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Gelu.py +0 -0
  66. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Gemm.py +0 -0
  67. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/GlobalAveragePool.py +0 -0
  68. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/GlobalLpPool.py +0 -0
  69. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/GlobalMaxPool.py +0 -0
  70. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Greater.py +0 -0
  71. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/GreaterOrEqual.py +0 -0
  72. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/GridSample.py +0 -0
  73. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/GroupNorm.py +0 -0
  74. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/HammingWindow.py +0 -0
  75. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/HannWindow.py +0 -0
  76. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/HardSigmoid.py +0 -0
  77. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/HardSwish.py +0 -0
  78. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Hardmax.py +0 -0
  79. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Identity.py +0 -0
  80. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/If.py +0 -0
  81. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Input.py +0 -0
  82. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/InstanceNormalization.py +0 -0
  83. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Inverse.py +0 -0
  84. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/IsInf.py +0 -0
  85. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/IsNaN.py +0 -0
  86. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/LRN.py +0 -0
  87. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/LSTM.py +0 -0
  88. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/LayerNormalization.py +0 -0
  89. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/LeakyRelu.py +0 -0
  90. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Less.py +0 -0
  91. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/LessOrEqual.py +0 -0
  92. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Log.py +0 -0
  93. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/LogSoftmax.py +0 -0
  94. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/LpNormalization.py +0 -0
  95. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/MatMulInteger.py +0 -0
  96. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Max.py +0 -0
  97. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/MaxPool.py +0 -0
  98. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/MaxUnpool.py +0 -0
  99. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Mean.py +0 -0
  100. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/MeanVarianceNormalization.py +0 -0
  101. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/MelWeightMatrix.py +0 -0
  102. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Min.py +0 -0
  103. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Mish.py +0 -0
  104. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Multinomial.py +0 -0
  105. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Neg.py +0 -0
  106. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/NonMaxSuppression.py +0 -0
  107. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/NonZero.py +0 -0
  108. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Not.py +0 -0
  109. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/OneHot.py +0 -0
  110. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/OptionalGetElement.py +0 -0
  111. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/OptionalHasElement.py +0 -0
  112. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Or.py +0 -0
  113. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/PRelu.py +0 -0
  114. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Pad.py +0 -0
  115. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Pow.py +0 -0
  116. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/QLinearAdd.py +0 -0
  117. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/QLinearConcat.py +0 -0
  118. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/QLinearConv.py +0 -0
  119. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/QLinearLeakyRelu.py +0 -0
  120. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/QLinearMatMul.py +0 -0
  121. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/QLinearMul.py +0 -0
  122. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/QLinearSigmoid.py +0 -0
  123. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/QLinearSoftmax.py +0 -0
  124. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/QuantizeLinear.py +0 -0
  125. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/RNN.py +0 -0
  126. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/RandomNormal.py +0 -0
  127. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/RandomNormalLike.py +0 -0
  128. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/RandomUniform.py +0 -0
  129. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/RandomUniformLike.py +0 -0
  130. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Range.py +0 -0
  131. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Reciprocal.py +0 -0
  132. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/ReduceL1.py +0 -0
  133. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/ReduceL2.py +0 -0
  134. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/ReduceLogSum.py +0 -0
  135. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/ReduceLogSumExp.py +0 -0
  136. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/ReduceMax.py +0 -0
  137. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/ReduceMean.py +0 -0
  138. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/ReduceMin.py +0 -0
  139. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/ReduceProd.py +0 -0
  140. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/ReduceSum.py +0 -0
  141. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/ReduceSumSquare.py +0 -0
  142. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Relu.py +0 -0
  143. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Reshape.py +0 -0
  144. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Resize.py +0 -0
  145. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/ReverseSequence.py +0 -0
  146. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/RoiAlign.py +0 -0
  147. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Round.py +0 -0
  148. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/STFT.py +0 -0
  149. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/ScaleAndTranslate.py +0 -0
  150. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Scatter.py +0 -0
  151. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/ScatterElements.py +0 -0
  152. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/ScatterND.py +0 -0
  153. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Selu.py +0 -0
  154. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/SequenceAt.py +0 -0
  155. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/SequenceConstruct.py +0 -0
  156. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/SequenceEmpty.py +0 -0
  157. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/SequenceErase.py +0 -0
  158. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/SequenceInsert.py +0 -0
  159. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/SequenceLength.py +0 -0
  160. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Shape.py +0 -0
  161. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Shrink.py +0 -0
  162. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Sigmoid.py +0 -0
  163. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Sign.py +0 -0
  164. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Sin.py +0 -0
  165. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Sinh.py +0 -0
  166. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Size.py +0 -0
  167. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Slice.py +0 -0
  168. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Softmax.py +0 -0
  169. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Softplus.py +0 -0
  170. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Softsign.py +0 -0
  171. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/SpaceToDepth.py +0 -0
  172. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/SplitToSequence.py +0 -0
  173. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Sqrt.py +0 -0
  174. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Squeeze.py +0 -0
  175. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/StringNormalizer.py +0 -0
  176. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Sum.py +0 -0
  177. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Tan.py +0 -0
  178. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Tanh.py +0 -0
  179. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/ThresholdedRelu.py +0 -0
  180. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Tile.py +0 -0
  181. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/TopK.py +0 -0
  182. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Transpose.py +0 -0
  183. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Trilu.py +0 -0
  184. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Unique.py +0 -0
  185. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Unsqueeze.py +0 -0
  186. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Upsample.py +0 -0
  187. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Where.py +0 -0
  188. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/Xor.py +0 -0
  189. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/_Loop.py +0 -0
  190. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/__Loop.py +0 -0
  191. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/ops/__init__.py +0 -0
  192. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/utils/__init__.py +0 -0
  193. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/utils/common_functions.py +0 -0
  194. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/utils/enums.py +0 -0
  195. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf/utils/logging.py +0 -0
  196. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf.egg-info/SOURCES.txt +0 -0
  197. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf.egg-info/dependency_links.txt +0 -0
  198. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf.egg-info/entry_points.txt +0 -0
  199. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/onnx2tf.egg-info/top_level.txt +0 -0
  200. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/setup.cfg +0 -0
  201. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/setup.py +0 -0
  202. {onnx2tf-1.27.0 → onnx2tf-1.27.2}/tests/test_model_convert.py +0 -0
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.2
1
+ Metadata-Version: 2.4
2
2
  Name: onnx2tf
3
- Version: 1.27.0
3
+ Version: 1.27.2
4
4
  Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
5
5
  Home-page: https://github.com/PINTO0309/onnx2tf
6
6
  Author: Katsuya Hyodo
@@ -18,6 +18,7 @@ Dynamic: description
18
18
  Dynamic: description-content-type
19
19
  Dynamic: home-page
20
20
  Dynamic: license
21
+ Dynamic: license-file
21
22
  Dynamic: platform
22
23
  Dynamic: requires-python
23
24
  Dynamic: summary
@@ -280,7 +281,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
280
281
 
281
282
  ## Environment
282
283
  - Linux / Windows
283
- - onnx==1.16.1
284
+ - onnx==1.17.0
284
285
  - onnxruntime==1.18.1
285
286
  - onnx-simplifier==0.4.33 or 0.4.30 `(onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] (op_type:Slice, node name: /xxxx/Slice): [ShapeInferenceError] Inferred shape and existing shape differ in rank: (x) vs (y))`
286
287
  - onnx_graphsurgeon
@@ -333,7 +334,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
333
334
  docker run --rm -it \
334
335
  -v `pwd`:/workdir \
335
336
  -w /workdir \
336
- ghcr.io/pinto0309/onnx2tf:1.27.0
337
+ ghcr.io/pinto0309/onnx2tf:1.27.2
337
338
 
338
339
  or
339
340
 
@@ -341,7 +342,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
341
342
  docker run --rm -it \
342
343
  -v `pwd`:/workdir \
343
344
  -w /workdir \
344
- docker.io/pinto0309/onnx2tf:1.27.0
345
+ docker.io/pinto0309/onnx2tf:1.27.2
345
346
 
346
347
  or
347
348
 
@@ -1512,7 +1513,7 @@ See: https://github.com/tensorflow/tfjs/tree/master/tfjs-converter
1512
1513
  When converting to CoreML, process as follows. The `-k` option is for conversion while maintaining the input channel order in ONNX's NCHW format.
1513
1514
 
1514
1515
  ```bash
1515
- pip install coremltools
1516
+ pip install coremltools==8.2
1516
1517
 
1517
1518
  onnx2tf -i mobilenetv2-12.onnx -k input -ois input:1,3,224,224 -osd
1518
1519
  ```
@@ -1525,7 +1526,7 @@ model = ct.convert(
1525
1526
  model=FOLDER_PATH,
1526
1527
  source='tensorflow',
1527
1528
  )
1528
- model.save(f'{FOLDER_PATH}/model.mlmodel')
1529
+ model.save(f'{FOLDER_PATH}/model.mlpackage')
1529
1530
  ```
1530
1531
 
1531
1532
  See: https://github.com/apple/coremltools
@@ -256,7 +256,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
256
256
 
257
257
  ## Environment
258
258
  - Linux / Windows
259
- - onnx==1.16.1
259
+ - onnx==1.17.0
260
260
  - onnxruntime==1.18.1
261
261
  - onnx-simplifier==0.4.33 or 0.4.30 `(onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] (op_type:Slice, node name: /xxxx/Slice): [ShapeInferenceError] Inferred shape and existing shape differ in rank: (x) vs (y))`
262
262
  - onnx_graphsurgeon
@@ -309,7 +309,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
309
309
  docker run --rm -it \
310
310
  -v `pwd`:/workdir \
311
311
  -w /workdir \
312
- ghcr.io/pinto0309/onnx2tf:1.27.0
312
+ ghcr.io/pinto0309/onnx2tf:1.27.2
313
313
 
314
314
  or
315
315
 
@@ -317,7 +317,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
317
317
  docker run --rm -it \
318
318
  -v `pwd`:/workdir \
319
319
  -w /workdir \
320
- docker.io/pinto0309/onnx2tf:1.27.0
320
+ docker.io/pinto0309/onnx2tf:1.27.2
321
321
 
322
322
  or
323
323
 
@@ -1488,7 +1488,7 @@ See: https://github.com/tensorflow/tfjs/tree/master/tfjs-converter
1488
1488
  When converting to CoreML, process as follows. The `-k` option is for conversion while maintaining the input channel order in ONNX's NCHW format.
1489
1489
 
1490
1490
  ```bash
1491
- pip install coremltools
1491
+ pip install coremltools==8.2
1492
1492
 
1493
1493
  onnx2tf -i mobilenetv2-12.onnx -k input -ois input:1,3,224,224 -osd
1494
1494
  ```
@@ -1501,7 +1501,7 @@ model = ct.convert(
1501
1501
  model=FOLDER_PATH,
1502
1502
  source='tensorflow',
1503
1503
  )
1504
- model.save(f'{FOLDER_PATH}/model.mlmodel')
1504
+ model.save(f'{FOLDER_PATH}/model.mlpackage')
1505
1505
  ```
1506
1506
 
1507
1507
  See: https://github.com/apple/coremltools
@@ -1,3 +1,3 @@
1
1
  from onnx2tf.onnx2tf import convert, main
2
2
 
3
- __version__ = '1.27.0'
3
+ __version__ = '1.27.2'
@@ -130,9 +130,9 @@ def make_node(
130
130
  )
131
131
 
132
132
  # Workaround for ConvInteger
133
- if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64]:
133
+ if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64, tf.float16]:
134
134
  input_tensor_2 = tf.cast(input_tensor_2, dtype=tf.float32)
135
- elif input_tensor_1.dtype in [tf.int32, tf.int64] and input_tensor_2.dtype == tf.float32:
135
+ elif input_tensor_1.dtype in [tf.int32, tf.int64, tf.float16] and input_tensor_2.dtype == tf.float32:
136
136
  input_tensor_1 = tf.cast(input_tensor_1, dtype=tf.float32)
137
137
 
138
138
  # Disable unnecessary Transpose
@@ -96,10 +96,16 @@ def make_node(
96
96
  tf_layers_dict[graph_node_output.name].pop('nhwc')
97
97
 
98
98
  # Suppression of FlexCast generation
99
- # Float32 -> Float64
99
+ # Float64 -> Float32
100
+ # Float16 -> Float32
100
101
  if input_tensor.dtype == tf.float32 \
101
102
  and to == tf.float64:
102
103
  to = tf.float32
104
+ elif isinstance(graph_node.inputs[0], gs.Variable) \
105
+ and hasattr(graph_node.inputs[0], "dtype") \
106
+ and graph_node.inputs[0].dtype == np.float32 \
107
+ and to == tf.float16:
108
+ to = tf.float32
103
109
 
104
110
  # Generation of TF OP
105
111
  tf_layers_dict[graph_node_output.name]['tf_node'] = \
@@ -116,9 +116,9 @@ def make_node(
116
116
  )
117
117
 
118
118
  # Workaround for ConvInteger
119
- if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64]:
119
+ if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64, tf.float16]:
120
120
  input_tensor_2 = tf.cast(input_tensor_2, dtype=tf.float32)
121
- elif input_tensor_1.dtype in [tf.int32, tf.int64] and input_tensor_2.dtype == tf.float32:
121
+ elif input_tensor_1.dtype in [tf.int32, tf.int64, tf.float16] and input_tensor_2.dtype == tf.float32:
122
122
  input_tensor_1 = tf.cast(input_tensor_1, dtype=tf.float32)
123
123
 
124
124
  # Disable unnecessary Transpose
@@ -128,6 +128,12 @@ def make_node(
128
128
  output_dtype = NUMPY_DTYPES_TO_TF_DTYPES[dtype] \
129
129
  if isinstance(dtype, np.dtype) else dtype
130
130
 
131
+ # Workaround for Float16
132
+ if input_tensor_1.dtype == tf.float32 and output_dtype in [tf.int32, tf.int64, tf.float16]:
133
+ output_dtype = tf.float32
134
+ elif output_dtype and input_tensor_2.dtype == tf.float32:
135
+ output_dtype = tf.float32
136
+
131
137
  # Shape Unmatch Error Mitigation Measures
132
138
  # Search for and transpose shapes that do not cause shape unmatch errors
133
139
  min_abs_err = sys.maxsize
@@ -115,9 +115,9 @@ def make_node(
115
115
  )
116
116
 
117
117
  # Workaround for ConvInteger
118
- if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64]:
118
+ if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64, tf.float16]:
119
119
  input_tensor_2 = tf.cast(input_tensor_2, dtype=tf.float32)
120
- elif input_tensor_1.dtype in [tf.int32, tf.int64] and input_tensor_2.dtype == tf.float32:
120
+ elif input_tensor_1.dtype in [tf.int32, tf.int64, tf.float16] and input_tensor_2.dtype == tf.float32:
121
121
  input_tensor_1 = tf.cast(input_tensor_1, dtype=tf.float32)
122
122
 
123
123
  # Disable unnecessary Transpose
@@ -120,9 +120,9 @@ def make_node(
120
120
  )
121
121
 
122
122
  # Workaround for ConvInteger
123
- if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64]:
123
+ if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64, tf.float16]:
124
124
  input_tensor_2 = tf.cast(input_tensor_2, dtype=tf.float32)
125
- elif input_tensor_1.dtype in [tf.int32, tf.int64] and input_tensor_2.dtype == tf.float32:
125
+ elif input_tensor_1.dtype in [tf.int32, tf.int64, tf.float16] and input_tensor_2.dtype == tf.float32:
126
126
  input_tensor_1 = tf.cast(input_tensor_1, dtype=tf.float32)
127
127
 
128
128
  # Disable unnecessary Transpose
@@ -158,15 +158,56 @@ def make_node(
158
158
  if idx == axis:
159
159
  end_.append(split_idx + 1)
160
160
  elif input_tensor_shape[idx] is None:
161
- end_.append(-1)
161
+ end_.append(0)
162
162
  else:
163
163
  end_.append(input_tensor_shape[idx])
164
164
 
165
+ begin_mask_ = np.sum([2**idx if idx != axis else 0 for idx in range(input_tensor_rank)])
166
+ end_mask_ = np.sum([2**idx if idx != axis else 0 for idx in range(input_tensor_rank)])
167
+
165
168
  splited_tensors.append(
166
169
  tf.strided_slice(
167
170
  input_=input_tensor,
168
171
  begin=begin_,
169
172
  end=end_,
173
+ begin_mask=begin_mask_,
174
+ end_mask=end_mask_,
175
+ )
176
+ )
177
+ elif isinstance(split, np.ndarray) \
178
+ and len(list(split)) > 1 \
179
+ and np.prod(split) != 1 \
180
+ and np.all(split == split[0]) \
181
+ and isinstance(input_tensor_shape[axis], int) \
182
+ and input_tensor_shape[axis] == np.sum(split):
183
+ # strided_slice - Slice everything in same size
184
+ # Suppression of FlexSplitV generation
185
+ # https://github.com/PINTO0309/onnx2tf/issues/751
186
+ splited_tensors = []
187
+ split_size = split[0]
188
+ for split_idx in range(len(list(split))):
189
+ begin_ = [
190
+ split_size * split_idx if idx == axis else 0 for idx in range(input_tensor_rank)
191
+ ]
192
+ end_ = []
193
+ for idx in range(input_tensor_rank):
194
+ if idx == axis:
195
+ end_.append(split_size * split_idx + split_size)
196
+ elif input_tensor_shape[idx] is None:
197
+ end_.append(0)
198
+ else:
199
+ end_.append(input_tensor_shape[idx])
200
+
201
+ begin_mask_ = np.sum([2**idx if idx != axis else 0 for idx in range(input_tensor_rank)])
202
+ end_mask_ = np.sum([2**idx if idx != axis else 0 for idx in range(input_tensor_rank)])
203
+
204
+ splited_tensors.append(
205
+ tf.strided_slice(
206
+ input_=input_tensor,
207
+ begin=begin_,
208
+ end=end_,
209
+ begin_mask=begin_mask_,
210
+ end_mask=end_mask_,
170
211
  )
171
212
  )
172
213
  elif isinstance(split, np.ndarray) \
@@ -114,9 +114,9 @@ def make_node(
114
114
  )
115
115
 
116
116
  # Workaround for ConvInteger
117
- if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64]:
117
+ if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64, tf.float16]:
118
118
  input_tensor_2 = tf.cast(input_tensor_2, dtype=tf.float32)
119
- elif input_tensor_1.dtype in [tf.int32, tf.int64] and input_tensor_2.dtype == tf.float32:
119
+ elif input_tensor_1.dtype in [tf.int32, tf.int64, tf.float16] and input_tensor_2.dtype == tf.float32:
120
120
  input_tensor_1 = tf.cast(input_tensor_1, dtype=tf.float32)
121
121
 
122
122
  # Disable unnecessary Transpose
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.2
1
+ Metadata-Version: 2.4
2
2
  Name: onnx2tf
3
- Version: 1.27.0
3
+ Version: 1.27.2
4
4
  Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
5
5
  Home-page: https://github.com/PINTO0309/onnx2tf
6
6
  Author: Katsuya Hyodo
@@ -18,6 +18,7 @@ Dynamic: description
18
18
  Dynamic: description-content-type
19
19
  Dynamic: home-page
20
20
  Dynamic: license
21
+ Dynamic: license-file
21
22
  Dynamic: platform
22
23
  Dynamic: requires-python
23
24
  Dynamic: summary
@@ -280,7 +281,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
280
281
 
281
282
  ## Environment
282
283
  - Linux / Windows
283
- - onnx==1.16.1
284
+ - onnx==1.17.0
284
285
  - onnxruntime==1.18.1
285
286
  - onnx-simplifier==0.4.33 or 0.4.30 `(onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] (op_type:Slice, node name: /xxxx/Slice): [ShapeInferenceError] Inferred shape and existing shape differ in rank: (x) vs (y))`
286
287
  - onnx_graphsurgeon
@@ -333,7 +334,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
333
334
  docker run --rm -it \
334
335
  -v `pwd`:/workdir \
335
336
  -w /workdir \
336
- ghcr.io/pinto0309/onnx2tf:1.27.0
337
+ ghcr.io/pinto0309/onnx2tf:1.27.2
337
338
 
338
339
  or
339
340
 
@@ -341,7 +342,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
341
342
  docker run --rm -it \
342
343
  -v `pwd`:/workdir \
343
344
  -w /workdir \
344
- docker.io/pinto0309/onnx2tf:1.27.0
345
+ docker.io/pinto0309/onnx2tf:1.27.2
345
346
 
346
347
  or
347
348
 
@@ -1512,7 +1513,7 @@ See: https://github.com/tensorflow/tfjs/tree/master/tfjs-converter
1512
1513
  When converting to CoreML, process as follows. The `-k` option is for conversion while maintaining the input channel order in ONNX's NCHW format.
1513
1514
 
1514
1515
  ```bash
1515
- pip install coremltools
1516
+ pip install coremltools==8.2
1516
1517
 
1517
1518
  onnx2tf -i mobilenetv2-12.onnx -k input -ois input:1,3,224,224 -osd
1518
1519
  ```
@@ -1525,7 +1526,7 @@ model = ct.convert(
1525
1526
  model=FOLDER_PATH,
1526
1527
  source='tensorflow',
1527
1528
  )
1528
- model.save(f'{FOLDER_PATH}/model.mlmodel')
1529
+ model.save(f'{FOLDER_PATH}/model.mlpackage')
1529
1530
  ```
1530
1531
 
1531
1532
  See: https://github.com/apple/coremltools
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes