onnx2tf 1.29.23__tar.gz → 1.29.24__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/PKG-INFO +7 -7
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/README.md +6 -6
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/__init__.py +1 -1
- onnx2tf-1.29.24/onnx2tf/ops/CenterCropPad.py +192 -0
- onnx2tf-1.29.24/onnx2tf/ops/GroupNormalization.py +234 -0
- onnx2tf-1.29.24/onnx2tf/ops/Optional.py +127 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/OptionalGetElement.py +3 -13
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/OptionalHasElement.py +3 -13
- onnx2tf-1.29.24/onnx2tf/ops/TfIdfVectorizer.py +431 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/pyproject.toml +1 -1
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/__main__.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/onnx2tf.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Abs.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Acos.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Acosh.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Add.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/AffineGrid.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/And.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/ArgMax.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/ArgMin.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Asin.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Asinh.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Atan.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Atanh.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Attention.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/AveragePool.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/BatchNormalization.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Bernoulli.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/BitShift.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/BitwiseAnd.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/BitwiseNot.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/BitwiseOr.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/BitwiseXor.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/BlackmanWindow.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Cast.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Ceil.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Celu.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Clip.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Col2Im.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Compress.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Concat.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/ConcatFromSequence.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Constant.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/ConstantOfShape.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Conv.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/ConvInteger.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/ConvTranspose.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Cos.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Cosh.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/CumProd.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/CumSum.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/DFT.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/DeformConv.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/DepthToSpace.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/DequantizeLinear.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Det.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Div.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Dropout.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/DynamicQuantizeLinear.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Einsum.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Elu.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Equal.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Erf.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Exp.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Expand.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/EyeLike.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Flatten.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Floor.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/FusedConv.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/GRU.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Gather.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/GatherElements.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/GatherND.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Gelu.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Gemm.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/GlobalAveragePool.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/GlobalLpPool.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/GlobalMaxPool.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Greater.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/GreaterOrEqual.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/GridSample.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/GroupNorm.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/HammingWindow.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/HannWindow.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/HardSigmoid.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/HardSwish.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Hardmax.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Identity.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/If.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/ImageDecoder.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Input.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/InstanceNormalization.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Inverse.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/IsInf.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/IsNaN.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/LRN.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/LSTM.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/LayerNormalization.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/LeakyRelu.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Less.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/LessOrEqual.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Log.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/LogSoftmax.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Loop.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/LpNormalization.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/LpPool.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/MatMul.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/MatMulInteger.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Max.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/MaxPool.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/MaxRoiPool.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/MaxUnpool.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Mean.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/MeanVarianceNormalization.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/MelWeightMatrix.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Min.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Mish.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Mod.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Mul.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Multinomial.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Neg.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/NegativeLogLikelihoodLoss.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/NonMaxSuppression.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/NonZero.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Not.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/OneHot.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Or.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/PRelu.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Pad.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Pow.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/QLinearAdd.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/QLinearConcat.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/QLinearConv.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/QLinearLeakyRelu.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/QLinearMatMul.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/QLinearMul.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/QLinearSigmoid.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/QLinearSoftmax.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/QuantizeLinear.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/RMSNormalization.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/RNN.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/RandomNormal.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/RandomNormalLike.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/RandomUniform.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/RandomUniformLike.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Range.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Reciprocal.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/ReduceL1.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/ReduceL2.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/ReduceLogSum.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/ReduceLogSumExp.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/ReduceMax.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/ReduceMean.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/ReduceMin.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/ReduceProd.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/ReduceSum.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/ReduceSumSquare.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/RegexFullMatch.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Relu.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Reshape.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Resize.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/ReverseSequence.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/RoiAlign.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/RotaryEmbedding.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Round.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/STFT.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/ScaleAndTranslate.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Scan.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Scatter.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/ScatterElements.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/ScatterND.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Selu.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/SequenceAt.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/SequenceConstruct.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/SequenceEmpty.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/SequenceErase.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/SequenceInsert.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/SequenceLength.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Shape.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Shrink.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Sigmoid.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Sign.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Sin.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Sinh.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Size.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Slice.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Softmax.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/SoftmaxCrossEntropyLoss.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Softplus.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Softsign.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/SpaceToDepth.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Split.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/SplitToSequence.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Sqrt.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Squeeze.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/StringConcat.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/StringNormalizer.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/StringSplit.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Sub.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Sum.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Tan.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Tanh.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/TensorScatter.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/ThresholdedRelu.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Tile.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/TopK.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Transpose.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Trilu.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Unique.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Unsqueeze.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Upsample.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Where.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/Xor.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/ops/__init__.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/utils/__init__.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/utils/common_functions.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/utils/enums.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/utils/iterative_json_optimizer.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/utils/json_auto_generator.py +0 -0
- {onnx2tf-1.29.23 → onnx2tf-1.29.24}/onnx2tf/utils/logging.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: onnx2tf
|
|
3
|
-
Version: 1.29.
|
|
3
|
+
Version: 1.29.24
|
|
4
4
|
Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
|
|
5
5
|
Keywords: onnx,tensorflow,tflite,keras,deep-learning,machine-learning
|
|
6
6
|
Author: Katsuya Hyodo
|
|
@@ -122,7 +122,7 @@ https://github.com/PINTO0309/onnx2tf/wiki/model_status
|
|
|
122
122
|
|Cast|:heavy_check_mark:|
|
|
123
123
|
|Ceil|:heavy_check_mark:|
|
|
124
124
|
|Celu|:heavy_check_mark:|
|
|
125
|
-
|CenterCropPad
|
|
125
|
+
|CenterCropPad|:heavy_check_mark:|
|
|
126
126
|
|Clip|:heavy_check_mark:|
|
|
127
127
|
|Col2Im|:white_check_mark:|
|
|
128
128
|
|Compress|:heavy_check_mark:|
|
|
@@ -166,7 +166,7 @@ https://github.com/PINTO0309/onnx2tf/wiki/model_status
|
|
|
166
166
|
|GreaterOrEqual|:heavy_check_mark:|
|
|
167
167
|
|Greater|:heavy_check_mark:|
|
|
168
168
|
|GridSample|:white_check_mark:|
|
|
169
|
-
|GroupNormalization
|
|
169
|
+
|GroupNormalization|:heavy_check_mark:|
|
|
170
170
|
|GRU|:heavy_check_mark:|
|
|
171
171
|
|HammingWindow|:white_check_mark:|
|
|
172
172
|
|HannWindow|:white_check_mark:|
|
|
@@ -210,7 +210,7 @@ https://github.com/PINTO0309/onnx2tf/wiki/model_status
|
|
|
210
210
|
|NegativeLogLikelihoodLoss|:heavy_check_mark:|
|
|
211
211
|
|NonMaxSuppression|:heavy_check_mark:|
|
|
212
212
|
|NonZero|:heavy_check_mark:|
|
|
213
|
-
|Optional
|
|
213
|
+
|Optional|:heavy_check_mark:|
|
|
214
214
|
|OptionalGetElement|:heavy_check_mark:|
|
|
215
215
|
|OptionalHasElement|:heavy_check_mark:|
|
|
216
216
|
|Not|:heavy_check_mark:|
|
|
@@ -291,7 +291,7 @@ https://github.com/PINTO0309/onnx2tf/wiki/model_status
|
|
|
291
291
|
|Tan|:heavy_check_mark:|
|
|
292
292
|
|Tanh|:heavy_check_mark:|
|
|
293
293
|
|TensorScatter|:heavy_check_mark:|
|
|
294
|
-
|TfIdfVectorizer
|
|
294
|
+
|TfIdfVectorizer|:white_check_mark:|
|
|
295
295
|
|ThresholdedRelu|:heavy_check_mark:|
|
|
296
296
|
|Tile|:heavy_check_mark:|
|
|
297
297
|
|TopK|:heavy_check_mark:|
|
|
@@ -365,7 +365,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
365
365
|
docker run --rm -it \
|
|
366
366
|
-v `pwd`:/workdir \
|
|
367
367
|
-w /workdir \
|
|
368
|
-
ghcr.io/pinto0309/onnx2tf:1.29.
|
|
368
|
+
ghcr.io/pinto0309/onnx2tf:1.29.24
|
|
369
369
|
|
|
370
370
|
or
|
|
371
371
|
|
|
@@ -373,7 +373,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
373
373
|
docker run --rm -it \
|
|
374
374
|
-v `pwd`:/workdir \
|
|
375
375
|
-w /workdir \
|
|
376
|
-
docker.io/pinto0309/onnx2tf:1.29.
|
|
376
|
+
docker.io/pinto0309/onnx2tf:1.29.24
|
|
377
377
|
|
|
378
378
|
or
|
|
379
379
|
|
|
@@ -80,7 +80,7 @@ https://github.com/PINTO0309/onnx2tf/wiki/model_status
|
|
|
80
80
|
|Cast|:heavy_check_mark:|
|
|
81
81
|
|Ceil|:heavy_check_mark:|
|
|
82
82
|
|Celu|:heavy_check_mark:|
|
|
83
|
-
|CenterCropPad
|
|
83
|
+
|CenterCropPad|:heavy_check_mark:|
|
|
84
84
|
|Clip|:heavy_check_mark:|
|
|
85
85
|
|Col2Im|:white_check_mark:|
|
|
86
86
|
|Compress|:heavy_check_mark:|
|
|
@@ -124,7 +124,7 @@ https://github.com/PINTO0309/onnx2tf/wiki/model_status
|
|
|
124
124
|
|GreaterOrEqual|:heavy_check_mark:|
|
|
125
125
|
|Greater|:heavy_check_mark:|
|
|
126
126
|
|GridSample|:white_check_mark:|
|
|
127
|
-
|GroupNormalization
|
|
127
|
+
|GroupNormalization|:heavy_check_mark:|
|
|
128
128
|
|GRU|:heavy_check_mark:|
|
|
129
129
|
|HammingWindow|:white_check_mark:|
|
|
130
130
|
|HannWindow|:white_check_mark:|
|
|
@@ -168,7 +168,7 @@ https://github.com/PINTO0309/onnx2tf/wiki/model_status
|
|
|
168
168
|
|NegativeLogLikelihoodLoss|:heavy_check_mark:|
|
|
169
169
|
|NonMaxSuppression|:heavy_check_mark:|
|
|
170
170
|
|NonZero|:heavy_check_mark:|
|
|
171
|
-
|Optional
|
|
171
|
+
|Optional|:heavy_check_mark:|
|
|
172
172
|
|OptionalGetElement|:heavy_check_mark:|
|
|
173
173
|
|OptionalHasElement|:heavy_check_mark:|
|
|
174
174
|
|Not|:heavy_check_mark:|
|
|
@@ -249,7 +249,7 @@ https://github.com/PINTO0309/onnx2tf/wiki/model_status
|
|
|
249
249
|
|Tan|:heavy_check_mark:|
|
|
250
250
|
|Tanh|:heavy_check_mark:|
|
|
251
251
|
|TensorScatter|:heavy_check_mark:|
|
|
252
|
-
|TfIdfVectorizer
|
|
252
|
+
|TfIdfVectorizer|:white_check_mark:|
|
|
253
253
|
|ThresholdedRelu|:heavy_check_mark:|
|
|
254
254
|
|Tile|:heavy_check_mark:|
|
|
255
255
|
|TopK|:heavy_check_mark:|
|
|
@@ -323,7 +323,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
323
323
|
docker run --rm -it \
|
|
324
324
|
-v `pwd`:/workdir \
|
|
325
325
|
-w /workdir \
|
|
326
|
-
ghcr.io/pinto0309/onnx2tf:1.29.
|
|
326
|
+
ghcr.io/pinto0309/onnx2tf:1.29.24
|
|
327
327
|
|
|
328
328
|
or
|
|
329
329
|
|
|
@@ -331,7 +331,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
331
331
|
docker run --rm -it \
|
|
332
332
|
-v `pwd`:/workdir \
|
|
333
333
|
-w /workdir \
|
|
334
|
-
docker.io/pinto0309/onnx2tf:1.29.
|
|
334
|
+
docker.io/pinto0309/onnx2tf:1.29.24
|
|
335
335
|
|
|
336
336
|
or
|
|
337
337
|
|
|
@@ -0,0 +1,192 @@
|
|
|
1
|
+
import random
|
|
2
|
+
random.seed(0)
|
|
3
|
+
import numpy as np
|
|
4
|
+
np.random.seed(0)
|
|
5
|
+
import tensorflow as tf
|
|
6
|
+
import tf_keras
|
|
7
|
+
import onnx_graphsurgeon as gs
|
|
8
|
+
from onnx2tf.utils.common_functions import (
|
|
9
|
+
get_constant_or_variable,
|
|
10
|
+
print_node_info,
|
|
11
|
+
inverted_operation_enable_disable,
|
|
12
|
+
make_tf_node_info,
|
|
13
|
+
get_replacement_parameter,
|
|
14
|
+
pre_process_transpose,
|
|
15
|
+
post_process_transpose,
|
|
16
|
+
convert_axis,
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@print_node_info
|
|
21
|
+
@inverted_operation_enable_disable
|
|
22
|
+
@get_replacement_parameter
|
|
23
|
+
def make_node(
|
|
24
|
+
*,
|
|
25
|
+
graph_node: gs.Node,
|
|
26
|
+
tf_layers_dict: dict,
|
|
27
|
+
**kwargs: dict,
|
|
28
|
+
):
|
|
29
|
+
"""CenterCropPad
|
|
30
|
+
|
|
31
|
+
Parameters
|
|
32
|
+
----------
|
|
33
|
+
graph_node: gs.Node
|
|
34
|
+
graph_surgeon Node
|
|
35
|
+
|
|
36
|
+
tf_layers_dict: dict
|
|
37
|
+
optype, shape, dtype, tensorflow graph
|
|
38
|
+
"""
|
|
39
|
+
before_op_output_shape_trans_1 = \
|
|
40
|
+
tf_layers_dict.get(graph_node.inputs[0].name, {}).get('before_op_output_shape_trans', True)
|
|
41
|
+
before_op_output_shape_trans = \
|
|
42
|
+
before_op_output_shape_trans_1
|
|
43
|
+
|
|
44
|
+
graph_node_input = get_constant_or_variable(
|
|
45
|
+
graph_node.inputs[0],
|
|
46
|
+
before_op_output_shape_trans,
|
|
47
|
+
)
|
|
48
|
+
graph_node_shape = get_constant_or_variable(
|
|
49
|
+
graph_node.inputs[1],
|
|
50
|
+
False,
|
|
51
|
+
)
|
|
52
|
+
graph_node_output: gs.Variable = graph_node.outputs[0]
|
|
53
|
+
shape = graph_node_output.shape
|
|
54
|
+
dtype = graph_node_output.dtype
|
|
55
|
+
|
|
56
|
+
input_tensor = tf_layers_dict[graph_node_input.name]['tf_node'] \
|
|
57
|
+
if isinstance(graph_node_input, gs.Variable) else graph_node_input
|
|
58
|
+
target_shape = tf_layers_dict[graph_node_shape.name]['tf_node'] \
|
|
59
|
+
if isinstance(graph_node_shape, gs.Variable) else graph_node_shape
|
|
60
|
+
|
|
61
|
+
# Preserving Graph Structure (Dict)
|
|
62
|
+
tf_layers_dict[graph_node_output.name] = {
|
|
63
|
+
'optype': graph_node.op,
|
|
64
|
+
'shape': shape,
|
|
65
|
+
'dtype': dtype,
|
|
66
|
+
'nhwc': tf_layers_dict[graph_node_input.name]['nhwc'] \
|
|
67
|
+
if isinstance(graph_node_input, gs.Variable) \
|
|
68
|
+
and 'nhwc' in tf_layers_dict[graph_node_input.name].keys() else False
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
# Pre-process transpose
|
|
72
|
+
input_tensor = pre_process_transpose(
|
|
73
|
+
value_before_transpose=input_tensor,
|
|
74
|
+
param_target='inputs',
|
|
75
|
+
param_name=graph_node.inputs[0].name,
|
|
76
|
+
**kwargs,
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
input_rank = input_tensor.shape.rank
|
|
80
|
+
if input_rank is None:
|
|
81
|
+
input_rank = tf.rank(input_tensor)
|
|
82
|
+
|
|
83
|
+
axes = graph_node.attrs.get('axes', None)
|
|
84
|
+
if isinstance(axes, np.ndarray):
|
|
85
|
+
axes = axes.tolist()
|
|
86
|
+
|
|
87
|
+
if axes is None:
|
|
88
|
+
if isinstance(input_rank, int):
|
|
89
|
+
axes_list = list(range(input_rank))
|
|
90
|
+
if before_op_output_shape_trans:
|
|
91
|
+
axes_list = [
|
|
92
|
+
convert_axis(
|
|
93
|
+
axis=axis,
|
|
94
|
+
tensor_rank=input_rank,
|
|
95
|
+
before_op_output_shape_trans=before_op_output_shape_trans,
|
|
96
|
+
) for axis in axes_list
|
|
97
|
+
]
|
|
98
|
+
axes_tensor = tf.constant(axes_list, dtype=tf.int32)
|
|
99
|
+
else:
|
|
100
|
+
rank_t = tf.cast(input_rank, tf.int32)
|
|
101
|
+
axes_tensor = tf.range(rank_t)
|
|
102
|
+
if before_op_output_shape_trans:
|
|
103
|
+
axes_tensor = tf.where(
|
|
104
|
+
tf.equal(axes_tensor, 0),
|
|
105
|
+
0,
|
|
106
|
+
tf.where(tf.equal(axes_tensor, 1), rank_t - 1, axes_tensor - 1),
|
|
107
|
+
)
|
|
108
|
+
else:
|
|
109
|
+
if not isinstance(axes, list):
|
|
110
|
+
axes = [axes]
|
|
111
|
+
if isinstance(input_rank, int):
|
|
112
|
+
axes_conv = [
|
|
113
|
+
convert_axis(
|
|
114
|
+
axis=axis,
|
|
115
|
+
tensor_rank=input_rank,
|
|
116
|
+
before_op_output_shape_trans=before_op_output_shape_trans,
|
|
117
|
+
) for axis in axes
|
|
118
|
+
]
|
|
119
|
+
axes_tensor = tf.constant(axes_conv, dtype=tf.int32)
|
|
120
|
+
else:
|
|
121
|
+
axes_tensor = tf.convert_to_tensor(axes, dtype=tf.int32)
|
|
122
|
+
if before_op_output_shape_trans:
|
|
123
|
+
rank_t = tf.cast(input_rank, tf.int32)
|
|
124
|
+
axes_tensor = tf.where(axes_tensor < 0, axes_tensor + rank_t, axes_tensor)
|
|
125
|
+
axes_tensor = tf.where(
|
|
126
|
+
tf.equal(axes_tensor, 0),
|
|
127
|
+
0,
|
|
128
|
+
tf.where(tf.equal(axes_tensor, 1), rank_t - 1, axes_tensor - 1),
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
if isinstance(target_shape, list):
|
|
132
|
+
target_shape = tf.constant(np.asarray(target_shape, dtype=np.int32))
|
|
133
|
+
elif isinstance(target_shape, np.ndarray):
|
|
134
|
+
target_shape = tf.convert_to_tensor(target_shape.astype(np.int32))
|
|
135
|
+
else:
|
|
136
|
+
target_shape = tf.cast(target_shape, tf.int32)
|
|
137
|
+
|
|
138
|
+
input_shape = tf.shape(input_tensor, out_type=tf.int32)
|
|
139
|
+
target_shape_full = tf.tensor_scatter_nd_update(
|
|
140
|
+
input_shape,
|
|
141
|
+
tf.expand_dims(axes_tensor, axis=1),
|
|
142
|
+
target_shape,
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
diff = target_shape_full - input_shape
|
|
146
|
+
|
|
147
|
+
pad_before = tf.where(diff > 0, tf.math.floordiv(diff, 2), 0)
|
|
148
|
+
pad_after = tf.where(diff > 0, diff - tf.math.floordiv(diff, 2), 0)
|
|
149
|
+
crop_before = tf.where(diff < 0, tf.math.floordiv(-diff, 2), 0)
|
|
150
|
+
crop_after = tf.where(diff < 0, (-diff) - tf.math.floordiv(-diff, 2), 0)
|
|
151
|
+
|
|
152
|
+
begin = crop_before
|
|
153
|
+
size = input_shape - crop_before - crop_after
|
|
154
|
+
cropped = tf.slice(input_tensor, begin, size)
|
|
155
|
+
|
|
156
|
+
paddings = tf.stack([pad_before, pad_after], axis=1)
|
|
157
|
+
if input_tensor.dtype == tf.string:
|
|
158
|
+
pad_value = tf.constant('', dtype=tf.string)
|
|
159
|
+
else:
|
|
160
|
+
pad_value = tf.cast(0, input_tensor.dtype)
|
|
161
|
+
|
|
162
|
+
tf_layers_dict[graph_node_output.name]['tf_node'] = \
|
|
163
|
+
tf.pad(
|
|
164
|
+
tensor=cropped,
|
|
165
|
+
paddings=paddings,
|
|
166
|
+
constant_values=pad_value,
|
|
167
|
+
name=graph_node.name,
|
|
168
|
+
)
|
|
169
|
+
|
|
170
|
+
# Post-process transpose
|
|
171
|
+
tf_layers_dict[graph_node_output.name]['tf_node'] = post_process_transpose(
|
|
172
|
+
value_before_transpose=tf_layers_dict[graph_node_output.name]['tf_node'],
|
|
173
|
+
param_target='outputs',
|
|
174
|
+
param_name=graph_node.outputs[0].name,
|
|
175
|
+
**kwargs,
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
# Generation of Debug Info
|
|
179
|
+
tf_layers_dict[graph_node_output.name]['tf_node_info'] = \
|
|
180
|
+
make_tf_node_info(
|
|
181
|
+
node_info={
|
|
182
|
+
'tf_op_type': 'CenterCropPad',
|
|
183
|
+
'tf_inputs': {
|
|
184
|
+
'input': input_tensor,
|
|
185
|
+
'shape': target_shape,
|
|
186
|
+
'axes': axes,
|
|
187
|
+
},
|
|
188
|
+
'tf_outputs': {
|
|
189
|
+
'output': tf_layers_dict[graph_node_output.name]['tf_node'],
|
|
190
|
+
},
|
|
191
|
+
}
|
|
192
|
+
)
|
|
@@ -0,0 +1,234 @@
|
|
|
1
|
+
import random
|
|
2
|
+
random.seed(0)
|
|
3
|
+
import numpy as np
|
|
4
|
+
np.random.seed(0)
|
|
5
|
+
import tensorflow as tf
|
|
6
|
+
import tf_keras
|
|
7
|
+
import onnx_graphsurgeon as gs
|
|
8
|
+
from onnx2tf.utils.common_functions import (
|
|
9
|
+
get_constant_or_variable,
|
|
10
|
+
print_node_info,
|
|
11
|
+
inverted_operation_enable_disable,
|
|
12
|
+
make_tf_node_info,
|
|
13
|
+
get_replacement_parameter,
|
|
14
|
+
pre_process_transpose,
|
|
15
|
+
post_process_transpose,
|
|
16
|
+
transpose_with_flexing_deterrence,
|
|
17
|
+
)
|
|
18
|
+
from onnx2tf.utils.enums import NUMPY_DTYPES_TO_TF_DTYPES
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@print_node_info
|
|
22
|
+
@inverted_operation_enable_disable
|
|
23
|
+
@get_replacement_parameter
|
|
24
|
+
def make_node(
|
|
25
|
+
*,
|
|
26
|
+
graph_node: gs.Node,
|
|
27
|
+
tf_layers_dict: dict,
|
|
28
|
+
**kwargs: dict,
|
|
29
|
+
):
|
|
30
|
+
"""GroupNormalization
|
|
31
|
+
|
|
32
|
+
Parameters
|
|
33
|
+
----------
|
|
34
|
+
graph_node: gs.Node
|
|
35
|
+
graph_surgeon Node
|
|
36
|
+
|
|
37
|
+
tf_layers_dict: dict
|
|
38
|
+
optype, shape, dtype, tensorflow graph
|
|
39
|
+
"""
|
|
40
|
+
before_op_output_shape_trans_1 = \
|
|
41
|
+
tf_layers_dict.get(graph_node.inputs[0].name, {}).get('before_op_output_shape_trans', True)
|
|
42
|
+
before_op_output_shape_trans_2 = \
|
|
43
|
+
tf_layers_dict.get(graph_node.inputs[1].name, {}).get('before_op_output_shape_trans', True)
|
|
44
|
+
before_op_output_shape_trans_3 = \
|
|
45
|
+
tf_layers_dict.get(graph_node.inputs[2].name, {}).get('before_op_output_shape_trans', True)
|
|
46
|
+
before_op_output_shape_trans = \
|
|
47
|
+
before_op_output_shape_trans_1 \
|
|
48
|
+
and before_op_output_shape_trans_2 \
|
|
49
|
+
and before_op_output_shape_trans_3
|
|
50
|
+
|
|
51
|
+
graph_node_input = get_constant_or_variable(
|
|
52
|
+
graph_node.inputs[0],
|
|
53
|
+
before_op_output_shape_trans,
|
|
54
|
+
)
|
|
55
|
+
input_tensor = tf_layers_dict[graph_node_input.name]['tf_node'] \
|
|
56
|
+
if isinstance(graph_node_input, gs.Variable) else graph_node_input
|
|
57
|
+
|
|
58
|
+
# Pre-process transpose
|
|
59
|
+
input_tensor = pre_process_transpose(
|
|
60
|
+
value_before_transpose=input_tensor,
|
|
61
|
+
param_target='inputs',
|
|
62
|
+
param_name=graph_node.inputs[0].name,
|
|
63
|
+
**kwargs,
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
scale = get_constant_or_variable(
|
|
67
|
+
graph_node.inputs[1],
|
|
68
|
+
before_op_output_shape_trans \
|
|
69
|
+
if graph_node.inputs[1].shape is not None and len(graph_node.inputs[1].shape) != 1 else False,
|
|
70
|
+
is_bias=True,
|
|
71
|
+
)
|
|
72
|
+
scale_dtype = NUMPY_DTYPES_TO_TF_DTYPES[scale.dtype] \
|
|
73
|
+
if isinstance(scale.dtype, np.dtype) else scale.dtype
|
|
74
|
+
scale = tf.convert_to_tensor(scale, dtype=scale_dtype) \
|
|
75
|
+
if isinstance(scale, np.ndarray) else scale
|
|
76
|
+
|
|
77
|
+
bias = get_constant_or_variable(
|
|
78
|
+
graph_node.inputs[2],
|
|
79
|
+
before_op_output_shape_trans \
|
|
80
|
+
if graph_node.inputs[2].shape is not None and len(graph_node.inputs[2].shape) != 1 else False,
|
|
81
|
+
is_bias=True,
|
|
82
|
+
)
|
|
83
|
+
bias_dtype = NUMPY_DTYPES_TO_TF_DTYPES[bias.dtype] \
|
|
84
|
+
if isinstance(bias.dtype, np.dtype) else bias.dtype
|
|
85
|
+
bias = tf.convert_to_tensor(bias, dtype=bias_dtype) \
|
|
86
|
+
if isinstance(bias, np.ndarray) else bias
|
|
87
|
+
|
|
88
|
+
graph_node_output: gs.Variable = graph_node.outputs[0]
|
|
89
|
+
shape = graph_node_output.shape
|
|
90
|
+
dtype = graph_node_output.dtype
|
|
91
|
+
|
|
92
|
+
epsilon = graph_node.attrs.get('epsilon', 1e-05)
|
|
93
|
+
num_groups = int(graph_node.attrs.get('num_groups', 1))
|
|
94
|
+
stash_type = int(graph_node.attrs.get('stash_type', 1))
|
|
95
|
+
opset = kwargs.get('opset', None)
|
|
96
|
+
|
|
97
|
+
# Preserving Graph Structure (Dict)
|
|
98
|
+
tf_layers_dict[graph_node_output.name] = {
|
|
99
|
+
'optype': graph_node.op,
|
|
100
|
+
'shape': shape,
|
|
101
|
+
'dtype': dtype,
|
|
102
|
+
'nhwc': tf_layers_dict[graph_node_input.name]['nhwc'] \
|
|
103
|
+
if isinstance(graph_node_input, gs.Variable) \
|
|
104
|
+
and 'nhwc' in tf_layers_dict[graph_node_input.name].keys() else False
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
input_rank = input_tensor.shape.rank
|
|
108
|
+
if input_rank is None:
|
|
109
|
+
input_rank = tf.rank(input_tensor)
|
|
110
|
+
|
|
111
|
+
channel_axis = -1 if before_op_output_shape_trans else 1
|
|
112
|
+
channel_axis_idx = channel_axis
|
|
113
|
+
if isinstance(input_rank, int):
|
|
114
|
+
channel_axis_idx = channel_axis if channel_axis >= 0 else input_rank + channel_axis
|
|
115
|
+
|
|
116
|
+
internal_perm = None
|
|
117
|
+
internal_inverse_perm = None
|
|
118
|
+
if isinstance(input_rank, int) and channel_axis_idx != (input_rank - 1):
|
|
119
|
+
perm = [i for i in range(input_rank) if i != channel_axis_idx] + [channel_axis_idx]
|
|
120
|
+
internal_perm = perm
|
|
121
|
+
internal_inverse_perm = [0] * input_rank
|
|
122
|
+
for i, p in enumerate(perm):
|
|
123
|
+
internal_inverse_perm[p] = i
|
|
124
|
+
elif not isinstance(input_rank, int) and channel_axis != -1:
|
|
125
|
+
rank_t = tf.cast(input_rank, tf.int32)
|
|
126
|
+
perm = tf.concat([
|
|
127
|
+
tf.range(channel_axis),
|
|
128
|
+
tf.range(channel_axis + 1, rank_t),
|
|
129
|
+
[channel_axis],
|
|
130
|
+
], axis=0)
|
|
131
|
+
internal_perm = perm
|
|
132
|
+
internal_inverse_perm = tf.argsort(perm)
|
|
133
|
+
|
|
134
|
+
x = input_tensor
|
|
135
|
+
if internal_perm is not None:
|
|
136
|
+
x = transpose_with_flexing_deterrence(
|
|
137
|
+
input_tensor=x,
|
|
138
|
+
perm=internal_perm,
|
|
139
|
+
**kwargs,
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
input_dtype = x.dtype
|
|
143
|
+
calc_dtype = tf.float32 if stash_type == 1 else input_dtype
|
|
144
|
+
x = tf.cast(x, calc_dtype)
|
|
145
|
+
|
|
146
|
+
x_shape = tf.shape(x, out_type=tf.int32)
|
|
147
|
+
channels = x_shape[-1]
|
|
148
|
+
group_size = tf.math.floordiv(channels, num_groups)
|
|
149
|
+
|
|
150
|
+
group_shape = tf.stack([num_groups, group_size], axis=0)
|
|
151
|
+
new_shape = tf.concat([x_shape[:-1], group_shape], axis=0)
|
|
152
|
+
x_grouped = tf.reshape(x, new_shape)
|
|
153
|
+
|
|
154
|
+
rank_with_group = tf.rank(x_grouped)
|
|
155
|
+
spatial_axes = tf.range(1, rank_with_group - 2)
|
|
156
|
+
reduce_axes = tf.concat(
|
|
157
|
+
[spatial_axes, tf.expand_dims(rank_with_group - 1, axis=0)],
|
|
158
|
+
axis=0,
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
mean, variance = tf.nn.moments(x_grouped, axes=reduce_axes, keepdims=True)
|
|
162
|
+
x_norm = (x_grouped - mean) * tf.math.rsqrt(variance + tf.cast(epsilon, calc_dtype))
|
|
163
|
+
x_norm = tf.cast(x_norm, input_dtype)
|
|
164
|
+
|
|
165
|
+
if opset is not None and opset < 21:
|
|
166
|
+
rank_with_group = x_grouped.shape.rank
|
|
167
|
+
if rank_with_group is not None:
|
|
168
|
+
scale_shape = [1] * (rank_with_group - 2) + [num_groups, 1]
|
|
169
|
+
scale_group = tf.reshape(scale, scale_shape)
|
|
170
|
+
bias_group = tf.reshape(bias, scale_shape)
|
|
171
|
+
else:
|
|
172
|
+
rank_with_group = tf.rank(x_grouped)
|
|
173
|
+
prefix_ones = tf.fill([rank_with_group - 2], 1)
|
|
174
|
+
scale_shape = tf.concat(
|
|
175
|
+
[prefix_ones, tf.constant([num_groups, 1], dtype=tf.int32)],
|
|
176
|
+
axis=0,
|
|
177
|
+
)
|
|
178
|
+
scale_group = tf.reshape(scale, scale_shape)
|
|
179
|
+
bias_group = tf.reshape(bias, scale_shape)
|
|
180
|
+
x_norm = x_norm * tf.cast(scale_group, input_dtype) + tf.cast(bias_group, input_dtype)
|
|
181
|
+
|
|
182
|
+
x_norm = tf.reshape(x_norm, x_shape)
|
|
183
|
+
|
|
184
|
+
if opset is None or opset >= 21:
|
|
185
|
+
rank_out = x_norm.shape.rank
|
|
186
|
+
if rank_out is not None:
|
|
187
|
+
scale_reshape = tf.reshape(scale, [1] * (rank_out - 1) + [-1])
|
|
188
|
+
bias_reshape = tf.reshape(bias, [1] * (rank_out - 1) + [-1])
|
|
189
|
+
else:
|
|
190
|
+
rank_out = tf.rank(x_norm)
|
|
191
|
+
prefix_ones = tf.fill([rank_out - 1], 1)
|
|
192
|
+
scale_shape = tf.concat(
|
|
193
|
+
[prefix_ones, tf.constant([-1], dtype=tf.int32)],
|
|
194
|
+
axis=0,
|
|
195
|
+
)
|
|
196
|
+
scale_reshape = tf.reshape(scale, scale_shape)
|
|
197
|
+
bias_reshape = tf.reshape(bias, scale_shape)
|
|
198
|
+
x_norm = x_norm * tf.cast(scale_reshape, input_dtype) + tf.cast(bias_reshape, input_dtype)
|
|
199
|
+
|
|
200
|
+
if internal_inverse_perm is not None:
|
|
201
|
+
x_norm = transpose_with_flexing_deterrence(
|
|
202
|
+
input_tensor=x_norm,
|
|
203
|
+
perm=internal_inverse_perm,
|
|
204
|
+
**kwargs,
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
tf_layers_dict[graph_node_output.name]['tf_node'] = x_norm
|
|
208
|
+
|
|
209
|
+
# Post-process transpose
|
|
210
|
+
tf_layers_dict[graph_node_output.name]['tf_node'] = post_process_transpose(
|
|
211
|
+
value_before_transpose=tf_layers_dict[graph_node_output.name]['tf_node'],
|
|
212
|
+
param_target='outputs',
|
|
213
|
+
param_name=graph_node.outputs[0].name,
|
|
214
|
+
**kwargs,
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
# Generation of Debug Info
|
|
218
|
+
tf_layers_dict[graph_node_output.name]['tf_node_info'] = \
|
|
219
|
+
make_tf_node_info(
|
|
220
|
+
node_info={
|
|
221
|
+
'tf_op_type': 'GroupNormalization',
|
|
222
|
+
'tf_inputs': {
|
|
223
|
+
'x': input_tensor,
|
|
224
|
+
'scale': scale,
|
|
225
|
+
'bias': bias,
|
|
226
|
+
'num_groups': num_groups,
|
|
227
|
+
'epsilon': epsilon,
|
|
228
|
+
'stash_type': stash_type,
|
|
229
|
+
},
|
|
230
|
+
'tf_outputs': {
|
|
231
|
+
'output': tf_layers_dict[graph_node_output.name]['tf_node'],
|
|
232
|
+
},
|
|
233
|
+
}
|
|
234
|
+
)
|
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
import random
|
|
2
|
+
random.seed(0)
|
|
3
|
+
import numpy as np
|
|
4
|
+
np.random.seed(0)
|
|
5
|
+
import tensorflow as tf
|
|
6
|
+
import tf_keras
|
|
7
|
+
import onnx_graphsurgeon as gs
|
|
8
|
+
from onnx2tf.utils.common_functions import (
|
|
9
|
+
get_constant_or_variable,
|
|
10
|
+
print_node_info,
|
|
11
|
+
inverted_operation_enable_disable,
|
|
12
|
+
make_tf_node_info,
|
|
13
|
+
get_replacement_parameter,
|
|
14
|
+
)
|
|
15
|
+
from onnx2tf.utils.enums import ONNX_DTYPES_TO_TF_DTYPES
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def _type_proto_to_spec(type_proto):
|
|
19
|
+
if type_proto is None:
|
|
20
|
+
return None
|
|
21
|
+
|
|
22
|
+
if hasattr(type_proto, 'optional_type') and type_proto.HasField('optional_type'):
|
|
23
|
+
return _type_proto_to_spec(type_proto.optional_type.elem_type)
|
|
24
|
+
|
|
25
|
+
if hasattr(type_proto, 'tensor_type') and type_proto.HasField('tensor_type'):
|
|
26
|
+
elem_type = type_proto.tensor_type.elem_type
|
|
27
|
+
tf_dtype = ONNX_DTYPES_TO_TF_DTYPES.get(elem_type, tf.float32)
|
|
28
|
+
dims = []
|
|
29
|
+
for dim in type_proto.tensor_type.shape.dim:
|
|
30
|
+
if dim.HasField('dim_value'):
|
|
31
|
+
dims.append(dim.dim_value)
|
|
32
|
+
else:
|
|
33
|
+
dims.append(None)
|
|
34
|
+
return tf.TensorSpec(shape=dims, dtype=tf_dtype)
|
|
35
|
+
|
|
36
|
+
if hasattr(type_proto, 'sequence_type') and type_proto.HasField('sequence_type'):
|
|
37
|
+
elem_spec = _type_proto_to_spec(type_proto.sequence_type.elem_type)
|
|
38
|
+
if isinstance(elem_spec, tf.TensorSpec):
|
|
39
|
+
elem_shape = list(elem_spec.shape)
|
|
40
|
+
return tf.RaggedTensorSpec(
|
|
41
|
+
shape=[None] + elem_shape,
|
|
42
|
+
dtype=elem_spec.dtype,
|
|
43
|
+
ragged_rank=1,
|
|
44
|
+
)
|
|
45
|
+
return None
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
@print_node_info
|
|
49
|
+
@inverted_operation_enable_disable
|
|
50
|
+
@get_replacement_parameter
|
|
51
|
+
def make_node(
|
|
52
|
+
*,
|
|
53
|
+
graph_node: gs.Node,
|
|
54
|
+
tf_layers_dict: dict,
|
|
55
|
+
**kwargs: dict,
|
|
56
|
+
):
|
|
57
|
+
"""Optional
|
|
58
|
+
|
|
59
|
+
Parameters
|
|
60
|
+
----------
|
|
61
|
+
graph_node: gs.Node
|
|
62
|
+
graph_surgeon Node
|
|
63
|
+
|
|
64
|
+
tf_layers_dict: dict
|
|
65
|
+
optype, shape, dtype, tensorflow graph
|
|
66
|
+
"""
|
|
67
|
+
graph_node_input = None
|
|
68
|
+
input_tensor = None
|
|
69
|
+
if len(graph_node.inputs) >= 1 and graph_node.inputs[0].name != '':
|
|
70
|
+
graph_node_input = get_constant_or_variable(
|
|
71
|
+
graph_node.inputs[0],
|
|
72
|
+
before_op_output_shape_trans=False,
|
|
73
|
+
)
|
|
74
|
+
input_tensor = tf_layers_dict[graph_node_input.name]['tf_node'] \
|
|
75
|
+
if isinstance(graph_node_input, gs.Variable) else graph_node_input
|
|
76
|
+
|
|
77
|
+
graph_node_output: gs.Variable = graph_node.outputs[0]
|
|
78
|
+
shape = graph_node_output.shape
|
|
79
|
+
dtype = graph_node_output.dtype
|
|
80
|
+
|
|
81
|
+
# Preserving Graph Structure (Dict)
|
|
82
|
+
tf_layers_dict[graph_node_output.name] = {
|
|
83
|
+
'optype': graph_node.op,
|
|
84
|
+
'shape': shape,
|
|
85
|
+
'dtype': dtype,
|
|
86
|
+
'nhwc': tf_layers_dict[graph_node_input.name]['nhwc'] \
|
|
87
|
+
if isinstance(graph_node_input, gs.Variable) \
|
|
88
|
+
and 'nhwc' in tf_layers_dict[graph_node_input.name].keys() else False
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
# Generation of TF OP
|
|
92
|
+
if input_tensor is None:
|
|
93
|
+
type_proto = graph_node.attrs.get('type', None)
|
|
94
|
+
if type_proto is None and hasattr(dtype, 'HasField'):
|
|
95
|
+
type_proto = dtype
|
|
96
|
+
spec = _type_proto_to_spec(type_proto)
|
|
97
|
+
if spec is None:
|
|
98
|
+
spec = tf.TensorSpec(shape=None, dtype=tf.float32)
|
|
99
|
+
tf_layers_dict[graph_node_output.name]['tf_node'] = \
|
|
100
|
+
tf.experimental.Optional.empty(
|
|
101
|
+
element_spec=spec,
|
|
102
|
+
)
|
|
103
|
+
elif isinstance(input_tensor, tf.experimental.Optional):
|
|
104
|
+
tf_layers_dict[graph_node_output.name]['tf_node'] = input_tensor
|
|
105
|
+
else:
|
|
106
|
+
value = input_tensor
|
|
107
|
+
if isinstance(input_tensor, np.ndarray):
|
|
108
|
+
value = tf.convert_to_tensor(input_tensor)
|
|
109
|
+
tf_layers_dict[graph_node_output.name]['tf_node'] = \
|
|
110
|
+
tf.experimental.Optional.from_value(
|
|
111
|
+
value=value,
|
|
112
|
+
name=graph_node.name,
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
# Generation of Debug Info
|
|
116
|
+
tf_layers_dict[graph_node_output.name]['tf_node_info'] = \
|
|
117
|
+
make_tf_node_info(
|
|
118
|
+
node_info={
|
|
119
|
+
'tf_op_type': tf.experimental.Optional,
|
|
120
|
+
'tf_inputs': {
|
|
121
|
+
'input': input_tensor,
|
|
122
|
+
},
|
|
123
|
+
'tf_outputs': {
|
|
124
|
+
'output': tf_layers_dict[graph_node_output.name]['tf_node'],
|
|
125
|
+
},
|
|
126
|
+
}
|
|
127
|
+
)
|
|
@@ -63,24 +63,14 @@ def make_node(
|
|
|
63
63
|
)
|
|
64
64
|
|
|
65
65
|
# Generation of TF OP
|
|
66
|
-
if isinstance(input_tensor,
|
|
66
|
+
if isinstance(input_tensor, tf.experimental.Optional):
|
|
67
|
+
optional = input_tensor
|
|
68
|
+
else:
|
|
67
69
|
optional = \
|
|
68
70
|
tf.experimental.Optional.from_value(
|
|
69
71
|
value=tf.convert_to_tensor(input_tensor),
|
|
70
72
|
name=graph_node.name,
|
|
71
73
|
)
|
|
72
|
-
else:
|
|
73
|
-
converted_tenosr = tf.convert_to_tensor(input_tensor)
|
|
74
|
-
spec = None
|
|
75
|
-
if tf_keras.backend.is_keras_tensor(converted_tenosr):
|
|
76
|
-
spec = converted_tenosr.type_spec
|
|
77
|
-
else:
|
|
78
|
-
spec = tf.TensorSpec.from_tensor(converted_tenosr)
|
|
79
|
-
|
|
80
|
-
optional = \
|
|
81
|
-
tf.experimental.Optional.empty(
|
|
82
|
-
element_spec=spec,
|
|
83
|
-
)
|
|
84
74
|
tf_layers_dict[graph_node_output.name]['tf_node'] = optional.get_value()
|
|
85
75
|
|
|
86
76
|
# Post-process transpose
|