onnx2tf 1.29.22__tar.gz → 1.29.24__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (220) hide show
  1. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/PKG-INFO +16 -9
  2. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/README.md +15 -8
  3. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/__init__.py +1 -1
  4. onnx2tf-1.29.24/onnx2tf/ops/CenterCropPad.py +192 -0
  5. onnx2tf-1.29.24/onnx2tf/ops/GroupNormalization.py +234 -0
  6. onnx2tf-1.29.24/onnx2tf/ops/Optional.py +127 -0
  7. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/OptionalGetElement.py +3 -13
  8. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/OptionalHasElement.py +3 -13
  9. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/PRelu.py +44 -11
  10. onnx2tf-1.29.24/onnx2tf/ops/TfIdfVectorizer.py +431 -0
  11. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/pyproject.toml +2 -2
  12. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/__main__.py +0 -0
  13. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/onnx2tf.py +0 -0
  14. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Abs.py +0 -0
  15. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Acos.py +0 -0
  16. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Acosh.py +0 -0
  17. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Add.py +0 -0
  18. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/AffineGrid.py +0 -0
  19. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/And.py +0 -0
  20. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/ArgMax.py +0 -0
  21. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/ArgMin.py +0 -0
  22. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Asin.py +0 -0
  23. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Asinh.py +0 -0
  24. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Atan.py +0 -0
  25. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Atanh.py +0 -0
  26. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Attention.py +0 -0
  27. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/AveragePool.py +0 -0
  28. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/BatchNormalization.py +0 -0
  29. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Bernoulli.py +0 -0
  30. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/BitShift.py +0 -0
  31. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/BitwiseAnd.py +0 -0
  32. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/BitwiseNot.py +0 -0
  33. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/BitwiseOr.py +0 -0
  34. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/BitwiseXor.py +0 -0
  35. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/BlackmanWindow.py +0 -0
  36. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Cast.py +0 -0
  37. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Ceil.py +0 -0
  38. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Celu.py +0 -0
  39. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Clip.py +0 -0
  40. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Col2Im.py +0 -0
  41. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Compress.py +0 -0
  42. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Concat.py +0 -0
  43. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/ConcatFromSequence.py +0 -0
  44. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Constant.py +0 -0
  45. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/ConstantOfShape.py +0 -0
  46. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Conv.py +0 -0
  47. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/ConvInteger.py +0 -0
  48. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/ConvTranspose.py +0 -0
  49. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Cos.py +0 -0
  50. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Cosh.py +0 -0
  51. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/CumProd.py +0 -0
  52. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/CumSum.py +0 -0
  53. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/DFT.py +0 -0
  54. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/DeformConv.py +0 -0
  55. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/DepthToSpace.py +0 -0
  56. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/DequantizeLinear.py +0 -0
  57. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Det.py +0 -0
  58. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Div.py +0 -0
  59. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Dropout.py +0 -0
  60. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/DynamicQuantizeLinear.py +0 -0
  61. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Einsum.py +0 -0
  62. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Elu.py +0 -0
  63. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Equal.py +0 -0
  64. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Erf.py +0 -0
  65. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Exp.py +0 -0
  66. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Expand.py +0 -0
  67. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/EyeLike.py +0 -0
  68. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Flatten.py +0 -0
  69. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Floor.py +0 -0
  70. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/FusedConv.py +0 -0
  71. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/GRU.py +0 -0
  72. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Gather.py +0 -0
  73. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/GatherElements.py +0 -0
  74. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/GatherND.py +0 -0
  75. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Gelu.py +0 -0
  76. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Gemm.py +0 -0
  77. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/GlobalAveragePool.py +0 -0
  78. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/GlobalLpPool.py +0 -0
  79. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/GlobalMaxPool.py +0 -0
  80. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Greater.py +0 -0
  81. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/GreaterOrEqual.py +0 -0
  82. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/GridSample.py +0 -0
  83. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/GroupNorm.py +0 -0
  84. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/HammingWindow.py +0 -0
  85. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/HannWindow.py +0 -0
  86. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/HardSigmoid.py +0 -0
  87. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/HardSwish.py +0 -0
  88. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Hardmax.py +0 -0
  89. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Identity.py +0 -0
  90. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/If.py +0 -0
  91. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/ImageDecoder.py +0 -0
  92. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Input.py +0 -0
  93. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/InstanceNormalization.py +0 -0
  94. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Inverse.py +0 -0
  95. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/IsInf.py +0 -0
  96. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/IsNaN.py +0 -0
  97. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/LRN.py +0 -0
  98. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/LSTM.py +0 -0
  99. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/LayerNormalization.py +0 -0
  100. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/LeakyRelu.py +0 -0
  101. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Less.py +0 -0
  102. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/LessOrEqual.py +0 -0
  103. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Log.py +0 -0
  104. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/LogSoftmax.py +0 -0
  105. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Loop.py +0 -0
  106. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/LpNormalization.py +0 -0
  107. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/LpPool.py +0 -0
  108. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/MatMul.py +0 -0
  109. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/MatMulInteger.py +0 -0
  110. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Max.py +0 -0
  111. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/MaxPool.py +0 -0
  112. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/MaxRoiPool.py +0 -0
  113. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/MaxUnpool.py +0 -0
  114. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Mean.py +0 -0
  115. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/MeanVarianceNormalization.py +0 -0
  116. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/MelWeightMatrix.py +0 -0
  117. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Min.py +0 -0
  118. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Mish.py +0 -0
  119. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Mod.py +0 -0
  120. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Mul.py +0 -0
  121. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Multinomial.py +0 -0
  122. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Neg.py +0 -0
  123. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/NegativeLogLikelihoodLoss.py +0 -0
  124. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/NonMaxSuppression.py +0 -0
  125. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/NonZero.py +0 -0
  126. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Not.py +0 -0
  127. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/OneHot.py +0 -0
  128. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Or.py +0 -0
  129. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Pad.py +0 -0
  130. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Pow.py +0 -0
  131. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/QLinearAdd.py +0 -0
  132. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/QLinearConcat.py +0 -0
  133. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/QLinearConv.py +0 -0
  134. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/QLinearLeakyRelu.py +0 -0
  135. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/QLinearMatMul.py +0 -0
  136. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/QLinearMul.py +0 -0
  137. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/QLinearSigmoid.py +0 -0
  138. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/QLinearSoftmax.py +0 -0
  139. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/QuantizeLinear.py +0 -0
  140. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/RMSNormalization.py +0 -0
  141. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/RNN.py +0 -0
  142. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/RandomNormal.py +0 -0
  143. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/RandomNormalLike.py +0 -0
  144. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/RandomUniform.py +0 -0
  145. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/RandomUniformLike.py +0 -0
  146. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Range.py +0 -0
  147. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Reciprocal.py +0 -0
  148. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/ReduceL1.py +0 -0
  149. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/ReduceL2.py +0 -0
  150. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/ReduceLogSum.py +0 -0
  151. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/ReduceLogSumExp.py +0 -0
  152. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/ReduceMax.py +0 -0
  153. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/ReduceMean.py +0 -0
  154. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/ReduceMin.py +0 -0
  155. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/ReduceProd.py +0 -0
  156. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/ReduceSum.py +0 -0
  157. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/ReduceSumSquare.py +0 -0
  158. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/RegexFullMatch.py +0 -0
  159. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Relu.py +0 -0
  160. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Reshape.py +0 -0
  161. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Resize.py +0 -0
  162. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/ReverseSequence.py +0 -0
  163. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/RoiAlign.py +0 -0
  164. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/RotaryEmbedding.py +0 -0
  165. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Round.py +0 -0
  166. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/STFT.py +0 -0
  167. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/ScaleAndTranslate.py +0 -0
  168. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Scan.py +0 -0
  169. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Scatter.py +0 -0
  170. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/ScatterElements.py +0 -0
  171. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/ScatterND.py +0 -0
  172. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Selu.py +0 -0
  173. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/SequenceAt.py +0 -0
  174. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/SequenceConstruct.py +0 -0
  175. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/SequenceEmpty.py +0 -0
  176. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/SequenceErase.py +0 -0
  177. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/SequenceInsert.py +0 -0
  178. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/SequenceLength.py +0 -0
  179. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Shape.py +0 -0
  180. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Shrink.py +0 -0
  181. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Sigmoid.py +0 -0
  182. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Sign.py +0 -0
  183. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Sin.py +0 -0
  184. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Sinh.py +0 -0
  185. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Size.py +0 -0
  186. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Slice.py +0 -0
  187. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Softmax.py +0 -0
  188. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/SoftmaxCrossEntropyLoss.py +0 -0
  189. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Softplus.py +0 -0
  190. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Softsign.py +0 -0
  191. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/SpaceToDepth.py +0 -0
  192. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Split.py +0 -0
  193. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/SplitToSequence.py +0 -0
  194. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Sqrt.py +0 -0
  195. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Squeeze.py +0 -0
  196. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/StringConcat.py +0 -0
  197. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/StringNormalizer.py +0 -0
  198. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/StringSplit.py +0 -0
  199. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Sub.py +0 -0
  200. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Sum.py +0 -0
  201. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Tan.py +0 -0
  202. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Tanh.py +0 -0
  203. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/TensorScatter.py +0 -0
  204. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/ThresholdedRelu.py +0 -0
  205. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Tile.py +0 -0
  206. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/TopK.py +0 -0
  207. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Transpose.py +0 -0
  208. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Trilu.py +0 -0
  209. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Unique.py +0 -0
  210. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Unsqueeze.py +0 -0
  211. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Upsample.py +0 -0
  212. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Where.py +0 -0
  213. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/Xor.py +0 -0
  214. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/ops/__init__.py +0 -0
  215. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/utils/__init__.py +0 -0
  216. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/utils/common_functions.py +0 -0
  217. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/utils/enums.py +0 -0
  218. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/utils/iterative_json_optimizer.py +0 -0
  219. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/utils/json_auto_generator.py +0 -0
  220. {onnx2tf-1.29.22 → onnx2tf-1.29.24}/onnx2tf/utils/logging.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: onnx2tf
3
- Version: 1.29.22
3
+ Version: 1.29.24
4
4
  Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
5
5
  Keywords: onnx,tensorflow,tflite,keras,deep-learning,machine-learning
6
6
  Author: Katsuya Hyodo
@@ -122,7 +122,7 @@ https://github.com/PINTO0309/onnx2tf/wiki/model_status
122
122
  |Cast|:heavy_check_mark:|
123
123
  |Ceil|:heavy_check_mark:|
124
124
  |Celu|:heavy_check_mark:|
125
- |CenterCropPad|**Help wanted**|
125
+ |CenterCropPad|:heavy_check_mark:|
126
126
  |Clip|:heavy_check_mark:|
127
127
  |Col2Im|:white_check_mark:|
128
128
  |Compress|:heavy_check_mark:|
@@ -166,7 +166,7 @@ https://github.com/PINTO0309/onnx2tf/wiki/model_status
166
166
  |GreaterOrEqual|:heavy_check_mark:|
167
167
  |Greater|:heavy_check_mark:|
168
168
  |GridSample|:white_check_mark:|
169
- |GroupNormalization|**Help wanted**|
169
+ |GroupNormalization|:heavy_check_mark:|
170
170
  |GRU|:heavy_check_mark:|
171
171
  |HammingWindow|:white_check_mark:|
172
172
  |HannWindow|:white_check_mark:|
@@ -210,7 +210,7 @@ https://github.com/PINTO0309/onnx2tf/wiki/model_status
210
210
  |NegativeLogLikelihoodLoss|:heavy_check_mark:|
211
211
  |NonMaxSuppression|:heavy_check_mark:|
212
212
  |NonZero|:heavy_check_mark:|
213
- |Optional|**Help wanted**|
213
+ |Optional|:heavy_check_mark:|
214
214
  |OptionalGetElement|:heavy_check_mark:|
215
215
  |OptionalHasElement|:heavy_check_mark:|
216
216
  |Not|:heavy_check_mark:|
@@ -291,7 +291,7 @@ https://github.com/PINTO0309/onnx2tf/wiki/model_status
291
291
  |Tan|:heavy_check_mark:|
292
292
  |Tanh|:heavy_check_mark:|
293
293
  |TensorScatter|:heavy_check_mark:|
294
- |TfIdfVectorizer|**Help wanted**|
294
+ |TfIdfVectorizer|:white_check_mark:|
295
295
  |ThresholdedRelu|:heavy_check_mark:|
296
296
  |Tile|:heavy_check_mark:|
297
297
  |TopK|:heavy_check_mark:|
@@ -365,7 +365,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
365
365
  docker run --rm -it \
366
366
  -v `pwd`:/workdir \
367
367
  -w /workdir \
368
- ghcr.io/pinto0309/onnx2tf:1.29.22
368
+ ghcr.io/pinto0309/onnx2tf:1.29.24
369
369
 
370
370
  or
371
371
 
@@ -373,7 +373,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
373
373
  docker run --rm -it \
374
374
  -v `pwd`:/workdir \
375
375
  -w /workdir \
376
- docker.io/pinto0309/onnx2tf:1.29.22
376
+ docker.io/pinto0309/onnx2tf:1.29.24
377
377
 
378
378
  or
379
379
 
@@ -493,13 +493,20 @@ onnx2tf -i resnet18-v1-7.onnx -v info
493
493
  # without input OP name.
494
494
  # Note that if there are multiple input OPs, the zero dimension of all input OPs is
495
495
  # forced to be rewritten.
496
- # The `-ois` option allows undefined dimensions in all dimensions, including
497
- # the zero dimensionality, to be overwritten to a static shape, but requires
496
+ # The `-sh/--shape-hints` option provides shape hints for input tensors with undefined
497
+ # dimensions, significantly improving the conversion success rate for models with dynamic
498
+ # input shapes. Specifying this option in combination with the `-b` option will further
499
+ # improve the success rate of model conversion. The `-sh` option does not change ONNX
500
+ # input OPs to static shapes.
501
+ # The `-ois/--overwrite_input_shape` option allows undefined dimensions in all dimensions,
502
+ # including the zero dimensionality, to be overwritten to a static shape, but requires
498
503
  # the input OP name to be specified.
499
504
  # e.g. -ois data1:1,3,224,224 data2:1,255 data3:1,224,6
500
505
  wget https://github.com/PINTO0309/onnx2tf/releases/download/0.0.2/resnet18-v1-7.onnx
501
506
  onnx2tf -i resnet18-v1-7.onnx -b 1
502
507
  or
508
+ onnx2tf -i resnet18-v1-7.onnx -sh data:1,3,224,224 -b 1
509
+ or
503
510
  onnx2tf -i resnet18-v1-7.onnx -ois data:1,3,224,224
504
511
 
505
512
  # Suppress automatic transposition of input OPs from NCW, NCHW, NCDHW to NWC, NHWC, NDHWC.
@@ -80,7 +80,7 @@ https://github.com/PINTO0309/onnx2tf/wiki/model_status
80
80
  |Cast|:heavy_check_mark:|
81
81
  |Ceil|:heavy_check_mark:|
82
82
  |Celu|:heavy_check_mark:|
83
- |CenterCropPad|**Help wanted**|
83
+ |CenterCropPad|:heavy_check_mark:|
84
84
  |Clip|:heavy_check_mark:|
85
85
  |Col2Im|:white_check_mark:|
86
86
  |Compress|:heavy_check_mark:|
@@ -124,7 +124,7 @@ https://github.com/PINTO0309/onnx2tf/wiki/model_status
124
124
  |GreaterOrEqual|:heavy_check_mark:|
125
125
  |Greater|:heavy_check_mark:|
126
126
  |GridSample|:white_check_mark:|
127
- |GroupNormalization|**Help wanted**|
127
+ |GroupNormalization|:heavy_check_mark:|
128
128
  |GRU|:heavy_check_mark:|
129
129
  |HammingWindow|:white_check_mark:|
130
130
  |HannWindow|:white_check_mark:|
@@ -168,7 +168,7 @@ https://github.com/PINTO0309/onnx2tf/wiki/model_status
168
168
  |NegativeLogLikelihoodLoss|:heavy_check_mark:|
169
169
  |NonMaxSuppression|:heavy_check_mark:|
170
170
  |NonZero|:heavy_check_mark:|
171
- |Optional|**Help wanted**|
171
+ |Optional|:heavy_check_mark:|
172
172
  |OptionalGetElement|:heavy_check_mark:|
173
173
  |OptionalHasElement|:heavy_check_mark:|
174
174
  |Not|:heavy_check_mark:|
@@ -249,7 +249,7 @@ https://github.com/PINTO0309/onnx2tf/wiki/model_status
249
249
  |Tan|:heavy_check_mark:|
250
250
  |Tanh|:heavy_check_mark:|
251
251
  |TensorScatter|:heavy_check_mark:|
252
- |TfIdfVectorizer|**Help wanted**|
252
+ |TfIdfVectorizer|:white_check_mark:|
253
253
  |ThresholdedRelu|:heavy_check_mark:|
254
254
  |Tile|:heavy_check_mark:|
255
255
  |TopK|:heavy_check_mark:|
@@ -323,7 +323,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
323
323
  docker run --rm -it \
324
324
  -v `pwd`:/workdir \
325
325
  -w /workdir \
326
- ghcr.io/pinto0309/onnx2tf:1.29.22
326
+ ghcr.io/pinto0309/onnx2tf:1.29.24
327
327
 
328
328
  or
329
329
 
@@ -331,7 +331,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
331
331
  docker run --rm -it \
332
332
  -v `pwd`:/workdir \
333
333
  -w /workdir \
334
- docker.io/pinto0309/onnx2tf:1.29.22
334
+ docker.io/pinto0309/onnx2tf:1.29.24
335
335
 
336
336
  or
337
337
 
@@ -451,13 +451,20 @@ onnx2tf -i resnet18-v1-7.onnx -v info
451
451
  # without input OP name.
452
452
  # Note that if there are multiple input OPs, the zero dimension of all input OPs is
453
453
  # forced to be rewritten.
454
- # The `-ois` option allows undefined dimensions in all dimensions, including
455
- # the zero dimensionality, to be overwritten to a static shape, but requires
454
+ # The `-sh/--shape-hints` option provides shape hints for input tensors with undefined
455
+ # dimensions, significantly improving the conversion success rate for models with dynamic
456
+ # input shapes. Specifying this option in combination with the `-b` option will further
457
+ # improve the success rate of model conversion. The `-sh` option does not change ONNX
458
+ # input OPs to static shapes.
459
+ # The `-ois/--overwrite_input_shape` option allows undefined dimensions in all dimensions,
460
+ # including the zero dimensionality, to be overwritten to a static shape, but requires
456
461
  # the input OP name to be specified.
457
462
  # e.g. -ois data1:1,3,224,224 data2:1,255 data3:1,224,6
458
463
  wget https://github.com/PINTO0309/onnx2tf/releases/download/0.0.2/resnet18-v1-7.onnx
459
464
  onnx2tf -i resnet18-v1-7.onnx -b 1
460
465
  or
466
+ onnx2tf -i resnet18-v1-7.onnx -sh data:1,3,224,224 -b 1
467
+ or
461
468
  onnx2tf -i resnet18-v1-7.onnx -ois data:1,3,224,224
462
469
 
463
470
  # Suppress automatic transposition of input OPs from NCW, NCHW, NCDHW to NWC, NHWC, NDHWC.
@@ -1,3 +1,3 @@
1
1
  from onnx2tf.onnx2tf import convert, main
2
2
 
3
- __version__ = '1.29.22'
3
+ __version__ = '1.29.24'
@@ -0,0 +1,192 @@
1
+ import random
2
+ random.seed(0)
3
+ import numpy as np
4
+ np.random.seed(0)
5
+ import tensorflow as tf
6
+ import tf_keras
7
+ import onnx_graphsurgeon as gs
8
+ from onnx2tf.utils.common_functions import (
9
+ get_constant_or_variable,
10
+ print_node_info,
11
+ inverted_operation_enable_disable,
12
+ make_tf_node_info,
13
+ get_replacement_parameter,
14
+ pre_process_transpose,
15
+ post_process_transpose,
16
+ convert_axis,
17
+ )
18
+
19
+
20
+ @print_node_info
21
+ @inverted_operation_enable_disable
22
+ @get_replacement_parameter
23
+ def make_node(
24
+ *,
25
+ graph_node: gs.Node,
26
+ tf_layers_dict: dict,
27
+ **kwargs: dict,
28
+ ):
29
+ """CenterCropPad
30
+
31
+ Parameters
32
+ ----------
33
+ graph_node: gs.Node
34
+ graph_surgeon Node
35
+
36
+ tf_layers_dict: dict
37
+ optype, shape, dtype, tensorflow graph
38
+ """
39
+ before_op_output_shape_trans_1 = \
40
+ tf_layers_dict.get(graph_node.inputs[0].name, {}).get('before_op_output_shape_trans', True)
41
+ before_op_output_shape_trans = \
42
+ before_op_output_shape_trans_1
43
+
44
+ graph_node_input = get_constant_or_variable(
45
+ graph_node.inputs[0],
46
+ before_op_output_shape_trans,
47
+ )
48
+ graph_node_shape = get_constant_or_variable(
49
+ graph_node.inputs[1],
50
+ False,
51
+ )
52
+ graph_node_output: gs.Variable = graph_node.outputs[0]
53
+ shape = graph_node_output.shape
54
+ dtype = graph_node_output.dtype
55
+
56
+ input_tensor = tf_layers_dict[graph_node_input.name]['tf_node'] \
57
+ if isinstance(graph_node_input, gs.Variable) else graph_node_input
58
+ target_shape = tf_layers_dict[graph_node_shape.name]['tf_node'] \
59
+ if isinstance(graph_node_shape, gs.Variable) else graph_node_shape
60
+
61
+ # Preserving Graph Structure (Dict)
62
+ tf_layers_dict[graph_node_output.name] = {
63
+ 'optype': graph_node.op,
64
+ 'shape': shape,
65
+ 'dtype': dtype,
66
+ 'nhwc': tf_layers_dict[graph_node_input.name]['nhwc'] \
67
+ if isinstance(graph_node_input, gs.Variable) \
68
+ and 'nhwc' in tf_layers_dict[graph_node_input.name].keys() else False
69
+ }
70
+
71
+ # Pre-process transpose
72
+ input_tensor = pre_process_transpose(
73
+ value_before_transpose=input_tensor,
74
+ param_target='inputs',
75
+ param_name=graph_node.inputs[0].name,
76
+ **kwargs,
77
+ )
78
+
79
+ input_rank = input_tensor.shape.rank
80
+ if input_rank is None:
81
+ input_rank = tf.rank(input_tensor)
82
+
83
+ axes = graph_node.attrs.get('axes', None)
84
+ if isinstance(axes, np.ndarray):
85
+ axes = axes.tolist()
86
+
87
+ if axes is None:
88
+ if isinstance(input_rank, int):
89
+ axes_list = list(range(input_rank))
90
+ if before_op_output_shape_trans:
91
+ axes_list = [
92
+ convert_axis(
93
+ axis=axis,
94
+ tensor_rank=input_rank,
95
+ before_op_output_shape_trans=before_op_output_shape_trans,
96
+ ) for axis in axes_list
97
+ ]
98
+ axes_tensor = tf.constant(axes_list, dtype=tf.int32)
99
+ else:
100
+ rank_t = tf.cast(input_rank, tf.int32)
101
+ axes_tensor = tf.range(rank_t)
102
+ if before_op_output_shape_trans:
103
+ axes_tensor = tf.where(
104
+ tf.equal(axes_tensor, 0),
105
+ 0,
106
+ tf.where(tf.equal(axes_tensor, 1), rank_t - 1, axes_tensor - 1),
107
+ )
108
+ else:
109
+ if not isinstance(axes, list):
110
+ axes = [axes]
111
+ if isinstance(input_rank, int):
112
+ axes_conv = [
113
+ convert_axis(
114
+ axis=axis,
115
+ tensor_rank=input_rank,
116
+ before_op_output_shape_trans=before_op_output_shape_trans,
117
+ ) for axis in axes
118
+ ]
119
+ axes_tensor = tf.constant(axes_conv, dtype=tf.int32)
120
+ else:
121
+ axes_tensor = tf.convert_to_tensor(axes, dtype=tf.int32)
122
+ if before_op_output_shape_trans:
123
+ rank_t = tf.cast(input_rank, tf.int32)
124
+ axes_tensor = tf.where(axes_tensor < 0, axes_tensor + rank_t, axes_tensor)
125
+ axes_tensor = tf.where(
126
+ tf.equal(axes_tensor, 0),
127
+ 0,
128
+ tf.where(tf.equal(axes_tensor, 1), rank_t - 1, axes_tensor - 1),
129
+ )
130
+
131
+ if isinstance(target_shape, list):
132
+ target_shape = tf.constant(np.asarray(target_shape, dtype=np.int32))
133
+ elif isinstance(target_shape, np.ndarray):
134
+ target_shape = tf.convert_to_tensor(target_shape.astype(np.int32))
135
+ else:
136
+ target_shape = tf.cast(target_shape, tf.int32)
137
+
138
+ input_shape = tf.shape(input_tensor, out_type=tf.int32)
139
+ target_shape_full = tf.tensor_scatter_nd_update(
140
+ input_shape,
141
+ tf.expand_dims(axes_tensor, axis=1),
142
+ target_shape,
143
+ )
144
+
145
+ diff = target_shape_full - input_shape
146
+
147
+ pad_before = tf.where(diff > 0, tf.math.floordiv(diff, 2), 0)
148
+ pad_after = tf.where(diff > 0, diff - tf.math.floordiv(diff, 2), 0)
149
+ crop_before = tf.where(diff < 0, tf.math.floordiv(-diff, 2), 0)
150
+ crop_after = tf.where(diff < 0, (-diff) - tf.math.floordiv(-diff, 2), 0)
151
+
152
+ begin = crop_before
153
+ size = input_shape - crop_before - crop_after
154
+ cropped = tf.slice(input_tensor, begin, size)
155
+
156
+ paddings = tf.stack([pad_before, pad_after], axis=1)
157
+ if input_tensor.dtype == tf.string:
158
+ pad_value = tf.constant('', dtype=tf.string)
159
+ else:
160
+ pad_value = tf.cast(0, input_tensor.dtype)
161
+
162
+ tf_layers_dict[graph_node_output.name]['tf_node'] = \
163
+ tf.pad(
164
+ tensor=cropped,
165
+ paddings=paddings,
166
+ constant_values=pad_value,
167
+ name=graph_node.name,
168
+ )
169
+
170
+ # Post-process transpose
171
+ tf_layers_dict[graph_node_output.name]['tf_node'] = post_process_transpose(
172
+ value_before_transpose=tf_layers_dict[graph_node_output.name]['tf_node'],
173
+ param_target='outputs',
174
+ param_name=graph_node.outputs[0].name,
175
+ **kwargs,
176
+ )
177
+
178
+ # Generation of Debug Info
179
+ tf_layers_dict[graph_node_output.name]['tf_node_info'] = \
180
+ make_tf_node_info(
181
+ node_info={
182
+ 'tf_op_type': 'CenterCropPad',
183
+ 'tf_inputs': {
184
+ 'input': input_tensor,
185
+ 'shape': target_shape,
186
+ 'axes': axes,
187
+ },
188
+ 'tf_outputs': {
189
+ 'output': tf_layers_dict[graph_node_output.name]['tf_node'],
190
+ },
191
+ }
192
+ )
@@ -0,0 +1,234 @@
1
+ import random
2
+ random.seed(0)
3
+ import numpy as np
4
+ np.random.seed(0)
5
+ import tensorflow as tf
6
+ import tf_keras
7
+ import onnx_graphsurgeon as gs
8
+ from onnx2tf.utils.common_functions import (
9
+ get_constant_or_variable,
10
+ print_node_info,
11
+ inverted_operation_enable_disable,
12
+ make_tf_node_info,
13
+ get_replacement_parameter,
14
+ pre_process_transpose,
15
+ post_process_transpose,
16
+ transpose_with_flexing_deterrence,
17
+ )
18
+ from onnx2tf.utils.enums import NUMPY_DTYPES_TO_TF_DTYPES
19
+
20
+
21
+ @print_node_info
22
+ @inverted_operation_enable_disable
23
+ @get_replacement_parameter
24
+ def make_node(
25
+ *,
26
+ graph_node: gs.Node,
27
+ tf_layers_dict: dict,
28
+ **kwargs: dict,
29
+ ):
30
+ """GroupNormalization
31
+
32
+ Parameters
33
+ ----------
34
+ graph_node: gs.Node
35
+ graph_surgeon Node
36
+
37
+ tf_layers_dict: dict
38
+ optype, shape, dtype, tensorflow graph
39
+ """
40
+ before_op_output_shape_trans_1 = \
41
+ tf_layers_dict.get(graph_node.inputs[0].name, {}).get('before_op_output_shape_trans', True)
42
+ before_op_output_shape_trans_2 = \
43
+ tf_layers_dict.get(graph_node.inputs[1].name, {}).get('before_op_output_shape_trans', True)
44
+ before_op_output_shape_trans_3 = \
45
+ tf_layers_dict.get(graph_node.inputs[2].name, {}).get('before_op_output_shape_trans', True)
46
+ before_op_output_shape_trans = \
47
+ before_op_output_shape_trans_1 \
48
+ and before_op_output_shape_trans_2 \
49
+ and before_op_output_shape_trans_3
50
+
51
+ graph_node_input = get_constant_or_variable(
52
+ graph_node.inputs[0],
53
+ before_op_output_shape_trans,
54
+ )
55
+ input_tensor = tf_layers_dict[graph_node_input.name]['tf_node'] \
56
+ if isinstance(graph_node_input, gs.Variable) else graph_node_input
57
+
58
+ # Pre-process transpose
59
+ input_tensor = pre_process_transpose(
60
+ value_before_transpose=input_tensor,
61
+ param_target='inputs',
62
+ param_name=graph_node.inputs[0].name,
63
+ **kwargs,
64
+ )
65
+
66
+ scale = get_constant_or_variable(
67
+ graph_node.inputs[1],
68
+ before_op_output_shape_trans \
69
+ if graph_node.inputs[1].shape is not None and len(graph_node.inputs[1].shape) != 1 else False,
70
+ is_bias=True,
71
+ )
72
+ scale_dtype = NUMPY_DTYPES_TO_TF_DTYPES[scale.dtype] \
73
+ if isinstance(scale.dtype, np.dtype) else scale.dtype
74
+ scale = tf.convert_to_tensor(scale, dtype=scale_dtype) \
75
+ if isinstance(scale, np.ndarray) else scale
76
+
77
+ bias = get_constant_or_variable(
78
+ graph_node.inputs[2],
79
+ before_op_output_shape_trans \
80
+ if graph_node.inputs[2].shape is not None and len(graph_node.inputs[2].shape) != 1 else False,
81
+ is_bias=True,
82
+ )
83
+ bias_dtype = NUMPY_DTYPES_TO_TF_DTYPES[bias.dtype] \
84
+ if isinstance(bias.dtype, np.dtype) else bias.dtype
85
+ bias = tf.convert_to_tensor(bias, dtype=bias_dtype) \
86
+ if isinstance(bias, np.ndarray) else bias
87
+
88
+ graph_node_output: gs.Variable = graph_node.outputs[0]
89
+ shape = graph_node_output.shape
90
+ dtype = graph_node_output.dtype
91
+
92
+ epsilon = graph_node.attrs.get('epsilon', 1e-05)
93
+ num_groups = int(graph_node.attrs.get('num_groups', 1))
94
+ stash_type = int(graph_node.attrs.get('stash_type', 1))
95
+ opset = kwargs.get('opset', None)
96
+
97
+ # Preserving Graph Structure (Dict)
98
+ tf_layers_dict[graph_node_output.name] = {
99
+ 'optype': graph_node.op,
100
+ 'shape': shape,
101
+ 'dtype': dtype,
102
+ 'nhwc': tf_layers_dict[graph_node_input.name]['nhwc'] \
103
+ if isinstance(graph_node_input, gs.Variable) \
104
+ and 'nhwc' in tf_layers_dict[graph_node_input.name].keys() else False
105
+ }
106
+
107
+ input_rank = input_tensor.shape.rank
108
+ if input_rank is None:
109
+ input_rank = tf.rank(input_tensor)
110
+
111
+ channel_axis = -1 if before_op_output_shape_trans else 1
112
+ channel_axis_idx = channel_axis
113
+ if isinstance(input_rank, int):
114
+ channel_axis_idx = channel_axis if channel_axis >= 0 else input_rank + channel_axis
115
+
116
+ internal_perm = None
117
+ internal_inverse_perm = None
118
+ if isinstance(input_rank, int) and channel_axis_idx != (input_rank - 1):
119
+ perm = [i for i in range(input_rank) if i != channel_axis_idx] + [channel_axis_idx]
120
+ internal_perm = perm
121
+ internal_inverse_perm = [0] * input_rank
122
+ for i, p in enumerate(perm):
123
+ internal_inverse_perm[p] = i
124
+ elif not isinstance(input_rank, int) and channel_axis != -1:
125
+ rank_t = tf.cast(input_rank, tf.int32)
126
+ perm = tf.concat([
127
+ tf.range(channel_axis),
128
+ tf.range(channel_axis + 1, rank_t),
129
+ [channel_axis],
130
+ ], axis=0)
131
+ internal_perm = perm
132
+ internal_inverse_perm = tf.argsort(perm)
133
+
134
+ x = input_tensor
135
+ if internal_perm is not None:
136
+ x = transpose_with_flexing_deterrence(
137
+ input_tensor=x,
138
+ perm=internal_perm,
139
+ **kwargs,
140
+ )
141
+
142
+ input_dtype = x.dtype
143
+ calc_dtype = tf.float32 if stash_type == 1 else input_dtype
144
+ x = tf.cast(x, calc_dtype)
145
+
146
+ x_shape = tf.shape(x, out_type=tf.int32)
147
+ channels = x_shape[-1]
148
+ group_size = tf.math.floordiv(channels, num_groups)
149
+
150
+ group_shape = tf.stack([num_groups, group_size], axis=0)
151
+ new_shape = tf.concat([x_shape[:-1], group_shape], axis=0)
152
+ x_grouped = tf.reshape(x, new_shape)
153
+
154
+ rank_with_group = tf.rank(x_grouped)
155
+ spatial_axes = tf.range(1, rank_with_group - 2)
156
+ reduce_axes = tf.concat(
157
+ [spatial_axes, tf.expand_dims(rank_with_group - 1, axis=0)],
158
+ axis=0,
159
+ )
160
+
161
+ mean, variance = tf.nn.moments(x_grouped, axes=reduce_axes, keepdims=True)
162
+ x_norm = (x_grouped - mean) * tf.math.rsqrt(variance + tf.cast(epsilon, calc_dtype))
163
+ x_norm = tf.cast(x_norm, input_dtype)
164
+
165
+ if opset is not None and opset < 21:
166
+ rank_with_group = x_grouped.shape.rank
167
+ if rank_with_group is not None:
168
+ scale_shape = [1] * (rank_with_group - 2) + [num_groups, 1]
169
+ scale_group = tf.reshape(scale, scale_shape)
170
+ bias_group = tf.reshape(bias, scale_shape)
171
+ else:
172
+ rank_with_group = tf.rank(x_grouped)
173
+ prefix_ones = tf.fill([rank_with_group - 2], 1)
174
+ scale_shape = tf.concat(
175
+ [prefix_ones, tf.constant([num_groups, 1], dtype=tf.int32)],
176
+ axis=0,
177
+ )
178
+ scale_group = tf.reshape(scale, scale_shape)
179
+ bias_group = tf.reshape(bias, scale_shape)
180
+ x_norm = x_norm * tf.cast(scale_group, input_dtype) + tf.cast(bias_group, input_dtype)
181
+
182
+ x_norm = tf.reshape(x_norm, x_shape)
183
+
184
+ if opset is None or opset >= 21:
185
+ rank_out = x_norm.shape.rank
186
+ if rank_out is not None:
187
+ scale_reshape = tf.reshape(scale, [1] * (rank_out - 1) + [-1])
188
+ bias_reshape = tf.reshape(bias, [1] * (rank_out - 1) + [-1])
189
+ else:
190
+ rank_out = tf.rank(x_norm)
191
+ prefix_ones = tf.fill([rank_out - 1], 1)
192
+ scale_shape = tf.concat(
193
+ [prefix_ones, tf.constant([-1], dtype=tf.int32)],
194
+ axis=0,
195
+ )
196
+ scale_reshape = tf.reshape(scale, scale_shape)
197
+ bias_reshape = tf.reshape(bias, scale_shape)
198
+ x_norm = x_norm * tf.cast(scale_reshape, input_dtype) + tf.cast(bias_reshape, input_dtype)
199
+
200
+ if internal_inverse_perm is not None:
201
+ x_norm = transpose_with_flexing_deterrence(
202
+ input_tensor=x_norm,
203
+ perm=internal_inverse_perm,
204
+ **kwargs,
205
+ )
206
+
207
+ tf_layers_dict[graph_node_output.name]['tf_node'] = x_norm
208
+
209
+ # Post-process transpose
210
+ tf_layers_dict[graph_node_output.name]['tf_node'] = post_process_transpose(
211
+ value_before_transpose=tf_layers_dict[graph_node_output.name]['tf_node'],
212
+ param_target='outputs',
213
+ param_name=graph_node.outputs[0].name,
214
+ **kwargs,
215
+ )
216
+
217
+ # Generation of Debug Info
218
+ tf_layers_dict[graph_node_output.name]['tf_node_info'] = \
219
+ make_tf_node_info(
220
+ node_info={
221
+ 'tf_op_type': 'GroupNormalization',
222
+ 'tf_inputs': {
223
+ 'x': input_tensor,
224
+ 'scale': scale,
225
+ 'bias': bias,
226
+ 'num_groups': num_groups,
227
+ 'epsilon': epsilon,
228
+ 'stash_type': stash_type,
229
+ },
230
+ 'tf_outputs': {
231
+ 'output': tf_layers_dict[graph_node_output.name]['tf_node'],
232
+ },
233
+ }
234
+ )