onnx2tf 1.29.21__tar.gz → 1.29.23__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/PKG-INFO +12 -5
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/README.md +11 -4
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/__init__.py +1 -1
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/MaxPool.py +54 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/PRelu.py +44 -11
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/ScatterElements.py +50 -1
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Unique.py +71 -11
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/pyproject.toml +2 -2
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/__main__.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/onnx2tf.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Abs.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Acos.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Acosh.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Add.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/AffineGrid.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/And.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/ArgMax.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/ArgMin.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Asin.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Asinh.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Atan.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Atanh.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Attention.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/AveragePool.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/BatchNormalization.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Bernoulli.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/BitShift.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/BitwiseAnd.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/BitwiseNot.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/BitwiseOr.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/BitwiseXor.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/BlackmanWindow.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Cast.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Ceil.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Celu.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Clip.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Col2Im.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Compress.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Concat.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/ConcatFromSequence.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Constant.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/ConstantOfShape.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Conv.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/ConvInteger.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/ConvTranspose.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Cos.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Cosh.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/CumProd.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/CumSum.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/DFT.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/DeformConv.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/DepthToSpace.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/DequantizeLinear.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Det.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Div.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Dropout.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/DynamicQuantizeLinear.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Einsum.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Elu.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Equal.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Erf.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Exp.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Expand.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/EyeLike.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Flatten.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Floor.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/FusedConv.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/GRU.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Gather.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/GatherElements.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/GatherND.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Gelu.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Gemm.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/GlobalAveragePool.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/GlobalLpPool.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/GlobalMaxPool.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Greater.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/GreaterOrEqual.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/GridSample.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/GroupNorm.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/HammingWindow.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/HannWindow.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/HardSigmoid.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/HardSwish.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Hardmax.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Identity.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/If.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/ImageDecoder.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Input.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/InstanceNormalization.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Inverse.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/IsInf.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/IsNaN.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/LRN.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/LSTM.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/LayerNormalization.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/LeakyRelu.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Less.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/LessOrEqual.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Log.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/LogSoftmax.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Loop.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/LpNormalization.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/LpPool.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/MatMul.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/MatMulInteger.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Max.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/MaxRoiPool.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/MaxUnpool.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Mean.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/MeanVarianceNormalization.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/MelWeightMatrix.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Min.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Mish.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Mod.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Mul.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Multinomial.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Neg.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/NegativeLogLikelihoodLoss.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/NonMaxSuppression.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/NonZero.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Not.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/OneHot.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/OptionalGetElement.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/OptionalHasElement.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Or.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Pad.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Pow.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/QLinearAdd.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/QLinearConcat.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/QLinearConv.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/QLinearLeakyRelu.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/QLinearMatMul.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/QLinearMul.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/QLinearSigmoid.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/QLinearSoftmax.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/QuantizeLinear.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/RMSNormalization.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/RNN.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/RandomNormal.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/RandomNormalLike.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/RandomUniform.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/RandomUniformLike.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Range.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Reciprocal.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/ReduceL1.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/ReduceL2.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/ReduceLogSum.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/ReduceLogSumExp.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/ReduceMax.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/ReduceMean.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/ReduceMin.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/ReduceProd.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/ReduceSum.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/ReduceSumSquare.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/RegexFullMatch.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Relu.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Reshape.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Resize.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/ReverseSequence.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/RoiAlign.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/RotaryEmbedding.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Round.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/STFT.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/ScaleAndTranslate.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Scan.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Scatter.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/ScatterND.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Selu.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/SequenceAt.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/SequenceConstruct.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/SequenceEmpty.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/SequenceErase.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/SequenceInsert.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/SequenceLength.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Shape.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Shrink.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Sigmoid.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Sign.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Sin.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Sinh.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Size.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Slice.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Softmax.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/SoftmaxCrossEntropyLoss.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Softplus.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Softsign.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/SpaceToDepth.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Split.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/SplitToSequence.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Sqrt.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Squeeze.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/StringConcat.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/StringNormalizer.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/StringSplit.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Sub.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Sum.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Tan.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Tanh.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/TensorScatter.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/ThresholdedRelu.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Tile.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/TopK.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Transpose.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Trilu.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Unsqueeze.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Upsample.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Where.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/Xor.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/ops/__init__.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/utils/__init__.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/utils/common_functions.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/utils/enums.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/utils/iterative_json_optimizer.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/utils/json_auto_generator.py +0 -0
- {onnx2tf-1.29.21 → onnx2tf-1.29.23}/onnx2tf/utils/logging.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: onnx2tf
|
|
3
|
-
Version: 1.29.
|
|
3
|
+
Version: 1.29.23
|
|
4
4
|
Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
|
|
5
5
|
Keywords: onnx,tensorflow,tflite,keras,deep-learning,machine-learning
|
|
6
6
|
Author: Katsuya Hyodo
|
|
@@ -365,7 +365,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
365
365
|
docker run --rm -it \
|
|
366
366
|
-v `pwd`:/workdir \
|
|
367
367
|
-w /workdir \
|
|
368
|
-
ghcr.io/pinto0309/onnx2tf:1.29.
|
|
368
|
+
ghcr.io/pinto0309/onnx2tf:1.29.23
|
|
369
369
|
|
|
370
370
|
or
|
|
371
371
|
|
|
@@ -373,7 +373,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
373
373
|
docker run --rm -it \
|
|
374
374
|
-v `pwd`:/workdir \
|
|
375
375
|
-w /workdir \
|
|
376
|
-
docker.io/pinto0309/onnx2tf:1.29.
|
|
376
|
+
docker.io/pinto0309/onnx2tf:1.29.23
|
|
377
377
|
|
|
378
378
|
or
|
|
379
379
|
|
|
@@ -493,13 +493,20 @@ onnx2tf -i resnet18-v1-7.onnx -v info
|
|
|
493
493
|
# without input OP name.
|
|
494
494
|
# Note that if there are multiple input OPs, the zero dimension of all input OPs is
|
|
495
495
|
# forced to be rewritten.
|
|
496
|
-
# The `-
|
|
497
|
-
#
|
|
496
|
+
# The `-sh/--shape-hints` option provides shape hints for input tensors with undefined
|
|
497
|
+
# dimensions, significantly improving the conversion success rate for models with dynamic
|
|
498
|
+
# input shapes. Specifying this option in combination with the `-b` option will further
|
|
499
|
+
# improve the success rate of model conversion. The `-sh` option does not change ONNX
|
|
500
|
+
# input OPs to static shapes.
|
|
501
|
+
# The `-ois/--overwrite_input_shape` option allows undefined dimensions in all dimensions,
|
|
502
|
+
# including the zero dimensionality, to be overwritten to a static shape, but requires
|
|
498
503
|
# the input OP name to be specified.
|
|
499
504
|
# e.g. -ois data1:1,3,224,224 data2:1,255 data3:1,224,6
|
|
500
505
|
wget https://github.com/PINTO0309/onnx2tf/releases/download/0.0.2/resnet18-v1-7.onnx
|
|
501
506
|
onnx2tf -i resnet18-v1-7.onnx -b 1
|
|
502
507
|
or
|
|
508
|
+
onnx2tf -i resnet18-v1-7.onnx -sh data:1,3,224,224 -b 1
|
|
509
|
+
or
|
|
503
510
|
onnx2tf -i resnet18-v1-7.onnx -ois data:1,3,224,224
|
|
504
511
|
|
|
505
512
|
# Suppress automatic transposition of input OPs from NCW, NCHW, NCDHW to NWC, NHWC, NDHWC.
|
|
@@ -323,7 +323,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
323
323
|
docker run --rm -it \
|
|
324
324
|
-v `pwd`:/workdir \
|
|
325
325
|
-w /workdir \
|
|
326
|
-
ghcr.io/pinto0309/onnx2tf:1.29.
|
|
326
|
+
ghcr.io/pinto0309/onnx2tf:1.29.23
|
|
327
327
|
|
|
328
328
|
or
|
|
329
329
|
|
|
@@ -331,7 +331,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
331
331
|
docker run --rm -it \
|
|
332
332
|
-v `pwd`:/workdir \
|
|
333
333
|
-w /workdir \
|
|
334
|
-
docker.io/pinto0309/onnx2tf:1.29.
|
|
334
|
+
docker.io/pinto0309/onnx2tf:1.29.23
|
|
335
335
|
|
|
336
336
|
or
|
|
337
337
|
|
|
@@ -451,13 +451,20 @@ onnx2tf -i resnet18-v1-7.onnx -v info
|
|
|
451
451
|
# without input OP name.
|
|
452
452
|
# Note that if there are multiple input OPs, the zero dimension of all input OPs is
|
|
453
453
|
# forced to be rewritten.
|
|
454
|
-
# The `-
|
|
455
|
-
#
|
|
454
|
+
# The `-sh/--shape-hints` option provides shape hints for input tensors with undefined
|
|
455
|
+
# dimensions, significantly improving the conversion success rate for models with dynamic
|
|
456
|
+
# input shapes. Specifying this option in combination with the `-b` option will further
|
|
457
|
+
# improve the success rate of model conversion. The `-sh` option does not change ONNX
|
|
458
|
+
# input OPs to static shapes.
|
|
459
|
+
# The `-ois/--overwrite_input_shape` option allows undefined dimensions in all dimensions,
|
|
460
|
+
# including the zero dimensionality, to be overwritten to a static shape, but requires
|
|
456
461
|
# the input OP name to be specified.
|
|
457
462
|
# e.g. -ois data1:1,3,224,224 data2:1,255 data3:1,224,6
|
|
458
463
|
wget https://github.com/PINTO0309/onnx2tf/releases/download/0.0.2/resnet18-v1-7.onnx
|
|
459
464
|
onnx2tf -i resnet18-v1-7.onnx -b 1
|
|
460
465
|
or
|
|
466
|
+
onnx2tf -i resnet18-v1-7.onnx -sh data:1,3,224,224 -b 1
|
|
467
|
+
or
|
|
461
468
|
onnx2tf -i resnet18-v1-7.onnx -ois data:1,3,224,224
|
|
462
469
|
|
|
463
470
|
# Suppress automatic transposition of input OPs from NCW, NCHW, NCDHW to NWC, NHWC, NDHWC.
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import random
|
|
2
2
|
random.seed(0)
|
|
3
3
|
import numpy as np
|
|
4
|
+
import itertools
|
|
4
5
|
np.random.seed(0)
|
|
5
6
|
import tensorflow as tf
|
|
6
7
|
import tf_keras
|
|
@@ -119,6 +120,59 @@ def make_node(
|
|
|
119
120
|
**kwargs,
|
|
120
121
|
)
|
|
121
122
|
|
|
123
|
+
# Guard: brute-force axis alignment between NCHW and NHWC when batch dim mismatches.
|
|
124
|
+
# Only trigger when shapes are fully known to avoid destabilizing existing behavior.
|
|
125
|
+
def _shape_matches(shape_a, shape_b):
|
|
126
|
+
if shape_a is None or shape_b is None or len(shape_a) != len(shape_b):
|
|
127
|
+
return False
|
|
128
|
+
for dim_a, dim_b in zip(shape_a, shape_b):
|
|
129
|
+
if dim_a is None or dim_b is None:
|
|
130
|
+
continue
|
|
131
|
+
if dim_a != dim_b:
|
|
132
|
+
return False
|
|
133
|
+
return True
|
|
134
|
+
|
|
135
|
+
def _best_perm_to_match(cur_shape, target_shape):
|
|
136
|
+
rank = len(cur_shape)
|
|
137
|
+
best_perm = None
|
|
138
|
+
best_cost = None
|
|
139
|
+
for perm in itertools.permutations(range(rank)):
|
|
140
|
+
permuted = [cur_shape[i] for i in perm]
|
|
141
|
+
if not _shape_matches(permuted, target_shape):
|
|
142
|
+
continue
|
|
143
|
+
cost = sum(abs(i - perm[i]) for i in range(rank))
|
|
144
|
+
if best_cost is None or cost < best_cost:
|
|
145
|
+
best_cost = cost
|
|
146
|
+
best_perm = perm
|
|
147
|
+
return best_perm
|
|
148
|
+
|
|
149
|
+
try:
|
|
150
|
+
current_shape = input_tensor.shape.as_list()
|
|
151
|
+
except Exception:
|
|
152
|
+
current_shape = None
|
|
153
|
+
|
|
154
|
+
if onnx_input_shape is not None and current_shape is not None:
|
|
155
|
+
onnx_shape = [
|
|
156
|
+
dim if isinstance(dim, int) else None for dim in onnx_input_shape
|
|
157
|
+
]
|
|
158
|
+
cur_shape = [
|
|
159
|
+
dim if isinstance(dim, int) else None for dim in current_shape
|
|
160
|
+
]
|
|
161
|
+
if len(onnx_shape) in (3, 4, 5) \
|
|
162
|
+
and len(cur_shape) == len(onnx_shape) \
|
|
163
|
+
and None not in onnx_shape \
|
|
164
|
+
and None not in cur_shape:
|
|
165
|
+
expected_shape = [onnx_shape[0]] + onnx_shape[2:] + [onnx_shape[1]]
|
|
166
|
+
if cur_shape[0] != onnx_shape[0] \
|
|
167
|
+
and not _shape_matches(cur_shape, expected_shape):
|
|
168
|
+
perm = _best_perm_to_match(cur_shape, expected_shape)
|
|
169
|
+
if perm is not None:
|
|
170
|
+
input_tensor = transpose_with_flexing_deterrence(
|
|
171
|
+
input_tensor=input_tensor,
|
|
172
|
+
perm=list(perm),
|
|
173
|
+
**kwargs,
|
|
174
|
+
)
|
|
175
|
+
|
|
122
176
|
filter = None
|
|
123
177
|
|
|
124
178
|
auto_pad = graph_node.attrs.get('auto_pad', 'NOTSET')
|
|
@@ -124,22 +124,55 @@ def make_node(
|
|
|
124
124
|
tf_layers_dict[graph_node_output.name].pop('nhwc')
|
|
125
125
|
|
|
126
126
|
# Generation of TF OP
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
127
|
+
shared_axes = None
|
|
128
|
+
input_shape = input_tensor.shape
|
|
129
|
+
slope_shape = slope.shape if hasattr(slope, 'shape') else None
|
|
130
|
+
if input_shape is not None and slope_shape is not None:
|
|
131
|
+
input_rank = len(input_shape)
|
|
132
|
+
if len(slope_shape) == input_rank - 1:
|
|
133
|
+
shared_axes = [
|
|
134
|
+
i + 1 for i, dim in enumerate(slope_shape)
|
|
135
|
+
if dim is not None and dim == 1
|
|
136
|
+
]
|
|
137
|
+
elif len(slope_shape) == 1 and input_rank >= 3:
|
|
138
|
+
slope_dim = slope_shape[0]
|
|
139
|
+
channel_axis = None
|
|
140
|
+
if isinstance(slope_dim, int):
|
|
141
|
+
if input_shape[1] == slope_dim:
|
|
142
|
+
channel_axis = 1
|
|
143
|
+
elif input_shape[-1] == slope_dim:
|
|
144
|
+
channel_axis = input_rank - 1
|
|
145
|
+
if channel_axis is not None:
|
|
146
|
+
shared_axes = [ax for ax in range(1, input_rank) if ax != channel_axis]
|
|
147
|
+
|
|
148
|
+
if shared_axes is None:
|
|
132
149
|
if slope.shape is not None \
|
|
133
150
|
and len(slope.shape) > 0 \
|
|
134
151
|
and sum([1 if dim is not None and dim == 1 else 0 for dim in slope.shape]) == len(slope.shape):
|
|
135
152
|
shared_axes = [val + 1 for val in range(len(input_tensor.shape) - 1)]
|
|
136
153
|
else:
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
shared_axes=
|
|
142
|
-
|
|
154
|
+
input_nhwc = tf_layers_dict.get(graph_node_output.name, {}).get('nhwc', False)
|
|
155
|
+
if input_nhwc:
|
|
156
|
+
shared_axes = [val + 1 for val in range(len(input_tensor.shape) - 2)]
|
|
157
|
+
else:
|
|
158
|
+
shared_axes = [val + 2 for val in range(len(input_tensor.shape) - 2)]
|
|
159
|
+
|
|
160
|
+
use_native_prelu = not replace_prelu_to_pseudo_prelu
|
|
161
|
+
if not use_native_prelu:
|
|
162
|
+
pos = tf.nn.relu(input_tensor)
|
|
163
|
+
neg = (input_tensor - abs(input_tensor)) * (slope * 0.5)
|
|
164
|
+
tf_layers_dict[graph_node_output.name]['tf_node'] = pos + neg
|
|
165
|
+
else:
|
|
166
|
+
try:
|
|
167
|
+
tf_layers_dict[graph_node_output.name]['tf_node'] = \
|
|
168
|
+
PReLU(
|
|
169
|
+
weights=slope,
|
|
170
|
+
shared_axes=shared_axes,
|
|
171
|
+
)(input_tensor)
|
|
172
|
+
except Exception:
|
|
173
|
+
pos = tf.nn.relu(input_tensor)
|
|
174
|
+
neg = (input_tensor - abs(input_tensor)) * (slope * 0.5)
|
|
175
|
+
tf_layers_dict[graph_node_output.name]['tf_node'] = pos + neg
|
|
143
176
|
|
|
144
177
|
# Post-process transpose
|
|
145
178
|
before_trans_shape = tf_layers_dict[graph_node_output.name]['tf_node'].shape
|
|
@@ -146,6 +146,48 @@ def make_node(
|
|
|
146
146
|
axis=axis,
|
|
147
147
|
indices=indices_tensor,
|
|
148
148
|
)
|
|
149
|
+
indices_rank = None
|
|
150
|
+
if hasattr(indices_tensor, "shape") and indices_tensor.shape is not None:
|
|
151
|
+
try:
|
|
152
|
+
indices_rank = len(indices_tensor.shape)
|
|
153
|
+
except TypeError:
|
|
154
|
+
indices_rank = indices_tensor.shape.rank
|
|
155
|
+
updates_rank = updates_tensor_rank
|
|
156
|
+
broadcast_shape = None
|
|
157
|
+
pad_rank = 0
|
|
158
|
+
|
|
159
|
+
def _pad_and_broadcast(target_tensor, pad_rank, target_shape):
|
|
160
|
+
tensor = target_tensor
|
|
161
|
+
if isinstance(tensor, np.ndarray):
|
|
162
|
+
tensor = tf.convert_to_tensor(tensor)
|
|
163
|
+
if pad_rank <= 0:
|
|
164
|
+
return tf.broadcast_to(tensor, target_shape)
|
|
165
|
+
tensor_shape = tf.shape(tensor)
|
|
166
|
+
new_shape = tf.concat(
|
|
167
|
+
[tf.ones([pad_rank], dtype=tf.int32), tensor_shape],
|
|
168
|
+
axis=0,
|
|
169
|
+
)
|
|
170
|
+
tensor = tf.reshape(tensor, new_shape)
|
|
171
|
+
return tf.broadcast_to(tensor, target_shape)
|
|
172
|
+
|
|
173
|
+
updates_tensor_for_scatter = updates_tensor
|
|
174
|
+
if indices_rank is not None and updates_rank is not None and indices_rank != updates_rank:
|
|
175
|
+
if indices_rank > updates_rank:
|
|
176
|
+
broadcast_shape = tf.shape(indices_tensor)
|
|
177
|
+
pad_rank = indices_rank - updates_rank
|
|
178
|
+
updates_tensor_for_scatter = _pad_and_broadcast(
|
|
179
|
+
updates_tensor,
|
|
180
|
+
pad_rank,
|
|
181
|
+
broadcast_shape,
|
|
182
|
+
)
|
|
183
|
+
else:
|
|
184
|
+
broadcast_shape = tf.shape(updates_tensor)
|
|
185
|
+
pad_rank = updates_rank - indices_rank
|
|
186
|
+
indices_tensor = _pad_and_broadcast(
|
|
187
|
+
indices_tensor,
|
|
188
|
+
pad_rank,
|
|
189
|
+
broadcast_shape,
|
|
190
|
+
)
|
|
149
191
|
sparsified_dense_idx_shape = updates_tensor_shape
|
|
150
192
|
|
|
151
193
|
if None not in sparsified_dense_idx_shape:
|
|
@@ -160,6 +202,13 @@ def make_node(
|
|
|
160
202
|
]
|
|
161
203
|
|
|
162
204
|
idx_tensors_per_axis = tf.meshgrid(*idx_tensors_per_axis, indexing='ij')
|
|
205
|
+
if indices_rank is not None \
|
|
206
|
+
and updates_rank is not None \
|
|
207
|
+
and indices_rank > updates_rank:
|
|
208
|
+
idx_tensors_per_axis = [
|
|
209
|
+
_pad_and_broadcast(idx_tensor, pad_rank, broadcast_shape)
|
|
210
|
+
for idx_tensor in idx_tensors_per_axis
|
|
211
|
+
]
|
|
163
212
|
idx_tensors_per_axis[axis] = indices_tensor
|
|
164
213
|
dim_expanded_idx_tensors_per_axis = [
|
|
165
214
|
tf.expand_dims(idx_tensor, axis=-1)
|
|
@@ -194,7 +243,7 @@ def make_node(
|
|
|
194
243
|
)
|
|
195
244
|
|
|
196
245
|
indices = tf.reshape(coordinate, [-1, input_tensor_rank])
|
|
197
|
-
updates = tf.reshape(
|
|
246
|
+
updates = tf.reshape(updates_tensor_for_scatter, [-1])
|
|
198
247
|
output = tf.tensor_scatter_nd_update(
|
|
199
248
|
tensor=input_tensor,
|
|
200
249
|
indices=indices,
|
|
@@ -14,6 +14,7 @@ from onnx2tf.utils.common_functions import (
|
|
|
14
14
|
make_tf_node_info,
|
|
15
15
|
get_replacement_parameter,
|
|
16
16
|
pre_process_transpose,
|
|
17
|
+
convert_axis,
|
|
17
18
|
)
|
|
18
19
|
from onnx2tf.utils.logging import Color
|
|
19
20
|
|
|
@@ -69,8 +70,20 @@ def make_node(
|
|
|
69
70
|
**kwargs,
|
|
70
71
|
)
|
|
71
72
|
|
|
73
|
+
input_tensor_shape = input_tensor.shape
|
|
74
|
+
tensor_rank = len(input_tensor_shape) \
|
|
75
|
+
if input_tensor_shape != tf.TensorShape(None) else 1
|
|
76
|
+
|
|
72
77
|
axis = graph_node.attrs.get('axis', None)
|
|
73
78
|
sorted = graph_node.attrs.get('sorted', 1)
|
|
79
|
+
if axis is not None:
|
|
80
|
+
if isinstance(axis, np.ndarray) and axis.shape == ():
|
|
81
|
+
axis = int(axis)
|
|
82
|
+
axis = convert_axis(
|
|
83
|
+
axis=int(axis),
|
|
84
|
+
tensor_rank=tensor_rank,
|
|
85
|
+
before_op_output_shape_trans=before_op_output_shape_trans,
|
|
86
|
+
)
|
|
74
87
|
|
|
75
88
|
# Preserving Graph Structure (Dict)
|
|
76
89
|
for graph_node_output in graph_node_outputs:
|
|
@@ -101,17 +114,64 @@ def make_node(
|
|
|
101
114
|
|
|
102
115
|
# tf unique returns unsorted tensor, need to sort if option is enabled
|
|
103
116
|
if sorted:
|
|
104
|
-
#
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
117
|
+
# Sort unique outputs to match ONNX sorted behavior.
|
|
118
|
+
def _argsort_supported(dtype):
|
|
119
|
+
return dtype.is_floating or dtype.is_integer or dtype == tf.bool
|
|
120
|
+
|
|
121
|
+
y_rank = y.shape.rank
|
|
122
|
+
axis_ = axis
|
|
123
|
+
if axis_ is None:
|
|
124
|
+
axis_ = 0
|
|
125
|
+
if axis_ < 0 and y_rank is not None:
|
|
126
|
+
axis_ = axis_ + y_rank
|
|
127
|
+
|
|
128
|
+
def _lexsort_perm(flat_2d):
|
|
129
|
+
if not _argsort_supported(flat_2d.dtype):
|
|
130
|
+
return None
|
|
131
|
+
cols = flat_2d.shape[1]
|
|
132
|
+
if cols is None:
|
|
133
|
+
return None
|
|
134
|
+
order = tf.range(tf.shape(flat_2d)[0])
|
|
135
|
+
for col in reversed(range(cols)):
|
|
136
|
+
col_vals = tf.gather(flat_2d, order)[:, col]
|
|
137
|
+
if col_vals.dtype == tf.bool:
|
|
138
|
+
col_vals = tf.cast(col_vals, tf.int32)
|
|
139
|
+
order = tf.gather(order, tf.argsort(col_vals, stable=True))
|
|
140
|
+
return order
|
|
141
|
+
|
|
142
|
+
order = None
|
|
143
|
+
if y_rank is not None and y_rank == 1:
|
|
144
|
+
if _argsort_supported(y.dtype):
|
|
145
|
+
sort_vals = y
|
|
146
|
+
if sort_vals.dtype == tf.bool:
|
|
147
|
+
sort_vals = tf.cast(sort_vals, tf.int32)
|
|
148
|
+
order = tf.argsort(sort_vals, stable=True)
|
|
149
|
+
elif y_rank is not None and axis_ is not None and 0 <= axis_ < y_rank:
|
|
150
|
+
perm = [axis_] + [i for i in range(y_rank) if i != axis_]
|
|
151
|
+
y_t = tf.transpose(y, perm)
|
|
152
|
+
flat = tf.reshape(y_t, [tf.shape(y_t)[0], -1])
|
|
153
|
+
order = _lexsort_perm(flat)
|
|
154
|
+
|
|
155
|
+
if order is None:
|
|
156
|
+
warn_msg = f'' + \
|
|
157
|
+
Color.YELLOW(f'WARNING:') + ' ' + \
|
|
158
|
+
f'Unique sort fallback to unsorted due to dynamic shape or unsupported dtype.'
|
|
159
|
+
print(warn_msg)
|
|
160
|
+
else:
|
|
161
|
+
y = tf.gather(y, order, axis=axis_)
|
|
162
|
+
count = tf.gather(count, order)
|
|
163
|
+
indices = tf.gather(indices, order)
|
|
164
|
+
inv_order = tf.argsort(order)
|
|
165
|
+
inverse_indices = tf.gather(inv_order, inverse_indices)
|
|
166
|
+
|
|
167
|
+
if len(graph_node_outputs) >= 1:
|
|
168
|
+
tf_layers_dict[graph_node_outputs[0].name]['tf_node'] = y
|
|
169
|
+
if len(graph_node_outputs) >= 2:
|
|
170
|
+
tf_layers_dict[graph_node_outputs[1].name]['tf_node'] = indices
|
|
171
|
+
if len(graph_node_outputs) >= 3:
|
|
172
|
+
tf_layers_dict[graph_node_outputs[2].name]['tf_node'] = inverse_indices
|
|
173
|
+
if len(graph_node_outputs) >= 4:
|
|
174
|
+
tf_layers_dict[graph_node_outputs[3].name]['tf_node'] = count
|
|
115
175
|
|
|
116
176
|
# Generation of Debug Info
|
|
117
177
|
tf_outputs = {f"output{idx}": value for idx, value in enumerate([y, indices, inverse_indices, count])}
|
|
@@ -4,7 +4,7 @@ build-backend = "uv_build"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "onnx2tf"
|
|
7
|
-
version = "1.29.
|
|
7
|
+
version = "1.29.23"
|
|
8
8
|
description = "Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf)."
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
requires-python = ">=3.10.12"
|
|
@@ -56,7 +56,7 @@ Issues = "https://github.com/PINTO0309/onnx2tf/issues"
|
|
|
56
56
|
|
|
57
57
|
[tool.uv]
|
|
58
58
|
override-dependencies = [
|
|
59
|
-
"onnx==1.19.
|
|
59
|
+
"onnx==1.19.1",
|
|
60
60
|
"onnxsim==0.4.36",
|
|
61
61
|
"ml-dtypes==0.5.1",
|
|
62
62
|
"numpy==1.26.4",
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|