onnx2tf 1.29.12__tar.gz → 1.29.13__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/PKG-INFO +4 -3
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/README.md +2 -2
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/__init__.py +1 -1
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/AveragePool.py +49 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Expand.py +12 -1
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Slice.py +34 -2
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/utils/common_functions.py +173 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/pyproject.toml +4 -1
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/__main__.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/onnx2tf.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Abs.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Acos.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Acosh.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Add.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/AffineGrid.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/And.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/ArgMax.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/ArgMin.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Asin.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Asinh.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Atan.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Atanh.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Attention.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/BatchNormalization.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Bernoulli.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/BitShift.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/BitwiseAnd.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/BitwiseNot.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/BitwiseOr.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/BitwiseXor.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/BlackmanWindow.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Cast.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Ceil.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Celu.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Clip.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Col2Im.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Compress.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Concat.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/ConcatFromSequence.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Constant.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/ConstantOfShape.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Conv.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/ConvInteger.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/ConvTranspose.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Cos.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Cosh.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/CumProd.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/CumSum.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/DepthToSpace.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/DequantizeLinear.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Det.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Div.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Dropout.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/DynamicQuantizeLinear.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Einsum.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Elu.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Equal.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Erf.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Exp.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/EyeLike.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Flatten.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Floor.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/FusedConv.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/GRU.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Gather.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/GatherElements.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/GatherND.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Gelu.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Gemm.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/GlobalAveragePool.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/GlobalLpPool.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/GlobalMaxPool.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Greater.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/GreaterOrEqual.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/GridSample.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/GroupNorm.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/HammingWindow.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/HannWindow.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/HardSigmoid.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/HardSwish.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Hardmax.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Identity.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/If.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Input.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/InstanceNormalization.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Inverse.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/IsInf.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/IsNaN.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/LRN.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/LSTM.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/LayerNormalization.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/LeakyRelu.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Less.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/LessOrEqual.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Log.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/LogSoftmax.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Loop.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/LpNormalization.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/LpPool.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/MatMul.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/MatMulInteger.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Max.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/MaxPool.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/MaxRoiPool.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/MaxUnpool.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Mean.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/MeanVarianceNormalization.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/MelWeightMatrix.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Min.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Mish.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Mod.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Mul.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Multinomial.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Neg.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/NonMaxSuppression.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/NonZero.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Not.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/OneHot.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/OptionalGetElement.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/OptionalHasElement.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Or.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/PRelu.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Pad.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Pow.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/QLinearAdd.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/QLinearConcat.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/QLinearConv.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/QLinearLeakyRelu.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/QLinearMatMul.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/QLinearMul.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/QLinearSigmoid.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/QLinearSoftmax.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/QuantizeLinear.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/RNN.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/RandomNormal.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/RandomNormalLike.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/RandomUniform.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/RandomUniformLike.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Range.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Reciprocal.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/ReduceL1.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/ReduceL2.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/ReduceLogSum.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/ReduceLogSumExp.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/ReduceMax.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/ReduceMean.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/ReduceMin.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/ReduceProd.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/ReduceSum.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/ReduceSumSquare.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Relu.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Reshape.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Resize.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/ReverseSequence.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/RoiAlign.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Round.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/STFT.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/ScaleAndTranslate.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Scatter.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/ScatterElements.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/ScatterND.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Selu.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/SequenceAt.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/SequenceConstruct.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/SequenceEmpty.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/SequenceErase.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/SequenceInsert.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/SequenceLength.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Shape.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Shrink.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Sigmoid.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Sign.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Sin.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Sinh.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Size.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Softmax.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Softplus.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Softsign.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/SpaceToDepth.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Split.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/SplitToSequence.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Sqrt.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Squeeze.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/StringNormalizer.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Sub.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Sum.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Tan.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Tanh.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/ThresholdedRelu.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Tile.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/TopK.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Transpose.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Trilu.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Unique.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Unsqueeze.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Upsample.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Where.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/Xor.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/ops/__init__.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/utils/__init__.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/utils/enums.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/utils/iterative_json_optimizer.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/utils/json_auto_generator.py +0 -0
- {onnx2tf-1.29.12 → onnx2tf-1.29.13}/onnx2tf/utils/logging.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: onnx2tf
|
|
3
|
-
Version: 1.29.
|
|
3
|
+
Version: 1.29.13
|
|
4
4
|
Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
|
|
5
5
|
Keywords: onnx,tensorflow,tflite,keras,deep-learning,machine-learning
|
|
6
6
|
Author: Katsuya Hyodo
|
|
@@ -13,6 +13,7 @@ Classifier: License :: OSI Approved :: MIT License
|
|
|
13
13
|
Classifier: Operating System :: POSIX :: Linux
|
|
14
14
|
Classifier: Operating System :: Unix
|
|
15
15
|
Classifier: Programming Language :: Python :: 3
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
16
17
|
Classifier: Programming Language :: Python :: 3.11
|
|
17
18
|
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
18
19
|
Requires-Dist: requests==2.32.5
|
|
@@ -363,7 +364,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
363
364
|
docker run --rm -it \
|
|
364
365
|
-v `pwd`:/workdir \
|
|
365
366
|
-w /workdir \
|
|
366
|
-
ghcr.io/pinto0309/onnx2tf:1.29.
|
|
367
|
+
ghcr.io/pinto0309/onnx2tf:1.29.13
|
|
367
368
|
|
|
368
369
|
or
|
|
369
370
|
|
|
@@ -371,7 +372,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
371
372
|
docker run --rm -it \
|
|
372
373
|
-v `pwd`:/workdir \
|
|
373
374
|
-w /workdir \
|
|
374
|
-
docker.io/pinto0309/onnx2tf:1.29.
|
|
375
|
+
docker.io/pinto0309/onnx2tf:1.29.13
|
|
375
376
|
|
|
376
377
|
or
|
|
377
378
|
|
|
@@ -322,7 +322,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
322
322
|
docker run --rm -it \
|
|
323
323
|
-v `pwd`:/workdir \
|
|
324
324
|
-w /workdir \
|
|
325
|
-
ghcr.io/pinto0309/onnx2tf:1.29.
|
|
325
|
+
ghcr.io/pinto0309/onnx2tf:1.29.13
|
|
326
326
|
|
|
327
327
|
or
|
|
328
328
|
|
|
@@ -330,7 +330,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
330
330
|
docker run --rm -it \
|
|
331
331
|
-v `pwd`:/workdir \
|
|
332
332
|
-w /workdir \
|
|
333
|
-
docker.io/pinto0309/onnx2tf:1.29.
|
|
333
|
+
docker.io/pinto0309/onnx2tf:1.29.13
|
|
334
334
|
|
|
335
335
|
or
|
|
336
336
|
|
|
@@ -370,6 +370,12 @@ def make_node(
|
|
|
370
370
|
paddings=tf_pads,
|
|
371
371
|
mode='CONSTANT',
|
|
372
372
|
)
|
|
373
|
+
if input_tensor_shape is not None and len(input_tensor_shape) == spatial_size + 2:
|
|
374
|
+
# Preserve known batch/channel dims since dynamic paddings erase shape info.
|
|
375
|
+
padded_tensor = tf.ensure_shape(
|
|
376
|
+
padded_tensor,
|
|
377
|
+
[input_tensor_shape[0]] + [None] * spatial_size + [input_tensor_shape[-1]],
|
|
378
|
+
)
|
|
373
379
|
else:
|
|
374
380
|
if auto_pad == 'SAME_LOWER':
|
|
375
381
|
# switch the order of pads
|
|
@@ -468,6 +474,49 @@ def make_node(
|
|
|
468
474
|
print(error_msg)
|
|
469
475
|
raise AssertionError(error_msg)
|
|
470
476
|
|
|
477
|
+
# Dynamic shape compensation for count_include_pad=False with explicit padding.
|
|
478
|
+
# Use pooled mask to compute valid element counts per window.
|
|
479
|
+
if not is_known_shape and is_explicit_padding and not count_include_pad:
|
|
480
|
+
mask = tf.ones_like(input_tensor, dtype=pooled_tensor.dtype)
|
|
481
|
+
if tf_pads is not None:
|
|
482
|
+
if tf.is_tensor(tf_pads):
|
|
483
|
+
mask = tf.pad(
|
|
484
|
+
tensor=mask,
|
|
485
|
+
paddings=tf_pads,
|
|
486
|
+
mode='CONSTANT',
|
|
487
|
+
)
|
|
488
|
+
elif tf_pads != [0] * spatial_size * 2:
|
|
489
|
+
mask = tf.pad(
|
|
490
|
+
tensor=mask,
|
|
491
|
+
paddings=tf_pads,
|
|
492
|
+
mode='CONSTANT',
|
|
493
|
+
)
|
|
494
|
+
if len(kernel_shape) == 1:
|
|
495
|
+
mask_pooled = AveragePooling1D(
|
|
496
|
+
pool_size=kernel_shape,
|
|
497
|
+
strides=strides,
|
|
498
|
+
padding=tf_pad_mode.upper(),
|
|
499
|
+
)(mask)
|
|
500
|
+
elif len(kernel_shape) == 2:
|
|
501
|
+
mask_pooled = AveragePooling2D(
|
|
502
|
+
pool_size=kernel_shape,
|
|
503
|
+
strides=strides,
|
|
504
|
+
padding=tf_pad_mode.upper(),
|
|
505
|
+
)(mask)
|
|
506
|
+
else:
|
|
507
|
+
mask_pooled = AveragePooling3D(
|
|
508
|
+
pool_size=kernel_shape,
|
|
509
|
+
strides=strides,
|
|
510
|
+
padding=tf_pad_mode.upper(),
|
|
511
|
+
)(mask)
|
|
512
|
+
kernel_volume = float(np.prod(kernel_shape))
|
|
513
|
+
count_valid = mask_pooled * tf.cast(kernel_volume, dtype=mask_pooled.dtype)
|
|
514
|
+
multiplier = tf.math.divide_no_nan(
|
|
515
|
+
tf.cast(kernel_volume, dtype=mask_pooled.dtype),
|
|
516
|
+
count_valid,
|
|
517
|
+
)
|
|
518
|
+
pooled_tensor = pooled_tensor * multiplier
|
|
519
|
+
|
|
471
520
|
# tensorflow average pooling needs extra process to get same output with onnx
|
|
472
521
|
# https://github.com/PINTO0309/onnx2tf/issues/124
|
|
473
522
|
if average_multiplier is not None:
|
|
@@ -48,6 +48,7 @@ def make_node(
|
|
|
48
48
|
tf_layers_dict.get(graph_node.inputs[0].name, {}).get('before_op_output_shape_trans', True)
|
|
49
49
|
before_op_output_shape_trans_2 = \
|
|
50
50
|
tf_layers_dict.get(graph_node.inputs[1].name, {}).get('before_op_output_shape_trans', True)
|
|
51
|
+
# Data layout follows input[0]; shape vector (input[1]) should align to it.
|
|
51
52
|
before_op_output_shape_trans = \
|
|
52
53
|
before_op_output_shape_trans_1 \
|
|
53
54
|
and before_op_output_shape_trans_2
|
|
@@ -58,7 +59,7 @@ def make_node(
|
|
|
58
59
|
)
|
|
59
60
|
graph_node_input_2 = get_constant_or_variable(
|
|
60
61
|
graph_node.inputs[1],
|
|
61
|
-
|
|
62
|
+
before_op_output_shape_trans_1,
|
|
62
63
|
)
|
|
63
64
|
graph_node_output: gs.Variable = graph_node.outputs[0]
|
|
64
65
|
shape = graph_node_output.shape
|
|
@@ -106,6 +107,16 @@ def make_node(
|
|
|
106
107
|
**kwargs,
|
|
107
108
|
)
|
|
108
109
|
|
|
110
|
+
# If shape is dynamic (Tensor) and input was transposed to NHWC/NWC/NDHWC,
|
|
111
|
+
# align the shape vector order to TensorFlow's layout.
|
|
112
|
+
if before_op_output_shape_trans_1 \
|
|
113
|
+
and tf.is_tensor(input_tensor_shape) \
|
|
114
|
+
and input_tensor_rank > 2:
|
|
115
|
+
shape_rank = input_tensor_shape.shape.rank
|
|
116
|
+
if shape_rank == 1 or shape_rank is None:
|
|
117
|
+
perm = [0] + list(range(2, input_tensor_rank)) + [1]
|
|
118
|
+
input_tensor_shape = tf.gather(input_tensor_shape, perm)
|
|
119
|
+
|
|
109
120
|
tf_type = None
|
|
110
121
|
if \
|
|
111
122
|
(
|
|
@@ -434,7 +434,23 @@ def make_node(
|
|
|
434
434
|
dtype=tf.int32,
|
|
435
435
|
)
|
|
436
436
|
if hasattr(begin_mask_, '_inferred_value') and begin_mask_._inferred_value == [None]:
|
|
437
|
-
|
|
437
|
+
axes_list = None
|
|
438
|
+
if axes is not None:
|
|
439
|
+
if isinstance(axes, (list, tuple)):
|
|
440
|
+
axes_list = list(axes)
|
|
441
|
+
elif isinstance(axes, np.ndarray):
|
|
442
|
+
axes_list = axes.tolist() if axes.ndim > 0 else [int(axes)]
|
|
443
|
+
elif tf.is_tensor(axes):
|
|
444
|
+
if hasattr(axes, 'numpy'):
|
|
445
|
+
axes_list = axes.numpy().tolist()
|
|
446
|
+
elif hasattr(axes, '_inferred_value') and axes._inferred_value not in (None, [None]):
|
|
447
|
+
axes_list = list(axes._inferred_value)
|
|
448
|
+
if axes_list is not None:
|
|
449
|
+
begin_mask_ = sum(
|
|
450
|
+
1 << axis for axis in range(input_tensor_rank) if axis not in axes_list
|
|
451
|
+
)
|
|
452
|
+
else:
|
|
453
|
+
begin_mask_ = 0
|
|
438
454
|
|
|
439
455
|
##### end_mask
|
|
440
456
|
end_bit_mask = tf.constant([2**idx for idx in range(input_tensor_rank)], dtype=tf.int32)
|
|
@@ -446,7 +462,23 @@ def make_node(
|
|
|
446
462
|
dtype=tf.int32,
|
|
447
463
|
)
|
|
448
464
|
if hasattr(end_mask_, '_inferred_value') and end_mask_._inferred_value == [None]:
|
|
449
|
-
|
|
465
|
+
axes_list = None
|
|
466
|
+
if axes is not None:
|
|
467
|
+
if isinstance(axes, (list, tuple)):
|
|
468
|
+
axes_list = list(axes)
|
|
469
|
+
elif isinstance(axes, np.ndarray):
|
|
470
|
+
axes_list = axes.tolist() if axes.ndim > 0 else [int(axes)]
|
|
471
|
+
elif tf.is_tensor(axes):
|
|
472
|
+
if hasattr(axes, 'numpy'):
|
|
473
|
+
axes_list = axes.numpy().tolist()
|
|
474
|
+
elif hasattr(axes, '_inferred_value') and axes._inferred_value not in (None, [None]):
|
|
475
|
+
axes_list = list(axes._inferred_value)
|
|
476
|
+
if axes_list is not None:
|
|
477
|
+
end_mask_ = sum(
|
|
478
|
+
1 << axis for axis in range(input_tensor_rank) if axis not in axes_list
|
|
479
|
+
)
|
|
480
|
+
else:
|
|
481
|
+
end_mask_ = 0
|
|
450
482
|
|
|
451
483
|
# strided_slice
|
|
452
484
|
tf_layers_dict[graph_node_output.name]['tf_node'] = \
|
|
@@ -2403,6 +2403,179 @@ def shape_unmatched_special_avoidance_workaround(
|
|
|
2403
2403
|
return input_tensor_1, input_tensor_2
|
|
2404
2404
|
except:
|
|
2405
2405
|
pass
|
|
2406
|
+
|
|
2407
|
+
def _normalize_shape(shape):
|
|
2408
|
+
if shape is None:
|
|
2409
|
+
return None
|
|
2410
|
+
return [dim if isinstance(dim, int) else None for dim in shape]
|
|
2411
|
+
|
|
2412
|
+
def _broadcastable(shape_a, shape_b):
|
|
2413
|
+
if shape_a is None or shape_b is None:
|
|
2414
|
+
return False
|
|
2415
|
+
if len(shape_a) != len(shape_b):
|
|
2416
|
+
return False
|
|
2417
|
+
for dim_a, dim_b in zip(shape_a[::-1], shape_b[::-1]):
|
|
2418
|
+
if dim_a is None or dim_b is None:
|
|
2419
|
+
continue
|
|
2420
|
+
if dim_a != dim_b and dim_a != 1 and dim_b != 1:
|
|
2421
|
+
return False
|
|
2422
|
+
return True
|
|
2423
|
+
|
|
2424
|
+
def _match_score(shape_a, shape_b):
|
|
2425
|
+
score = 0
|
|
2426
|
+
for dim_a, dim_b in zip(shape_a, shape_b):
|
|
2427
|
+
if dim_a is None or dim_b is None:
|
|
2428
|
+
continue
|
|
2429
|
+
if dim_a == dim_b:
|
|
2430
|
+
score += 1
|
|
2431
|
+
return score
|
|
2432
|
+
|
|
2433
|
+
def _shape_matches(shape_a, shape_b):
|
|
2434
|
+
if shape_a is None or shape_b is None:
|
|
2435
|
+
return False
|
|
2436
|
+
if len(shape_a) != len(shape_b):
|
|
2437
|
+
return False
|
|
2438
|
+
for dim_a, dim_b in zip(shape_a, shape_b):
|
|
2439
|
+
if dim_a is None or dim_b is None:
|
|
2440
|
+
continue
|
|
2441
|
+
if dim_a != dim_b:
|
|
2442
|
+
return False
|
|
2443
|
+
return True
|
|
2444
|
+
|
|
2445
|
+
# Generic layout-alignment for channel-first/last in 3D/4D/5D.
|
|
2446
|
+
# Try a small set of canonical perms and apply the best one if it makes broadcasting possible.
|
|
2447
|
+
try:
|
|
2448
|
+
if hasattr(input_tensor_1, "shape") and hasattr(input_tensor_2, "shape"):
|
|
2449
|
+
input_shape_1 = _normalize_shape(input_tensor_1.shape)
|
|
2450
|
+
input_shape_2 = _normalize_shape(input_tensor_2.shape)
|
|
2451
|
+
if input_shape_1 is not None and input_shape_2 is not None \
|
|
2452
|
+
and len(input_shape_1) == len(input_shape_2) \
|
|
2453
|
+
and len(input_shape_1) in (3, 4, 5):
|
|
2454
|
+
if not _broadcastable(input_shape_1, input_shape_2):
|
|
2455
|
+
rank = len(input_shape_1)
|
|
2456
|
+
perm_cf2cl = [0] + list(range(2, rank)) + [1]
|
|
2457
|
+
perm_cl2cf = [0, rank - 1] + list(range(1, rank - 1))
|
|
2458
|
+
perms = []
|
|
2459
|
+
if perm_cf2cl != list(range(rank)):
|
|
2460
|
+
perms.append(perm_cf2cl)
|
|
2461
|
+
if perm_cl2cf != list(range(rank)) and perm_cl2cf != perm_cf2cl:
|
|
2462
|
+
perms.append(perm_cl2cf)
|
|
2463
|
+
|
|
2464
|
+
onnx_shape_1 = _normalize_shape(
|
|
2465
|
+
graph_node_input_1.shape if hasattr(graph_node_input_1, "shape") else None
|
|
2466
|
+
)
|
|
2467
|
+
onnx_shape_2 = _normalize_shape(
|
|
2468
|
+
graph_node_input_2.shape if hasattr(graph_node_input_2, "shape") else None
|
|
2469
|
+
)
|
|
2470
|
+
|
|
2471
|
+
candidates = []
|
|
2472
|
+
for idx, (shape, other_shape) in enumerate(
|
|
2473
|
+
[(input_shape_1, input_shape_2), (input_shape_2, input_shape_1)]
|
|
2474
|
+
):
|
|
2475
|
+
for perm in perms:
|
|
2476
|
+
permuted = [shape[p] for p in perm]
|
|
2477
|
+
if _broadcastable(permuted, other_shape):
|
|
2478
|
+
score = _match_score(permuted, other_shape)
|
|
2479
|
+
# Prefer transposing the input whose ONNX shape matches current layout.
|
|
2480
|
+
if idx == 0 and _shape_matches(onnx_shape_1, shape):
|
|
2481
|
+
score += 2
|
|
2482
|
+
if idx == 1 and _shape_matches(onnx_shape_2, shape):
|
|
2483
|
+
score += 2
|
|
2484
|
+
candidates.append((score, idx, perm))
|
|
2485
|
+
|
|
2486
|
+
if candidates:
|
|
2487
|
+
candidates.sort(reverse=True)
|
|
2488
|
+
best_score, best_idx, best_perm = candidates[0]
|
|
2489
|
+
# Avoid ambiguous ties.
|
|
2490
|
+
if len(candidates) == 1 or best_score > candidates[1][0]:
|
|
2491
|
+
if best_idx == 0:
|
|
2492
|
+
input_tensor_1 = \
|
|
2493
|
+
transpose_with_flexing_deterrence(
|
|
2494
|
+
input_tensor=input_tensor_1,
|
|
2495
|
+
perm=best_perm,
|
|
2496
|
+
**kwargs,
|
|
2497
|
+
)
|
|
2498
|
+
else:
|
|
2499
|
+
input_tensor_2 = \
|
|
2500
|
+
transpose_with_flexing_deterrence(
|
|
2501
|
+
input_tensor=input_tensor_2,
|
|
2502
|
+
perm=best_perm,
|
|
2503
|
+
**kwargs,
|
|
2504
|
+
)
|
|
2505
|
+
except Exception:
|
|
2506
|
+
pass
|
|
2507
|
+
|
|
2508
|
+
# Heuristic for 3D tensors where one input is (N,1,C) and the other is (N,C,W).
|
|
2509
|
+
# Align by transposing the (N,C,W) tensor to (N,W,C).
|
|
2510
|
+
try:
|
|
2511
|
+
if hasattr(input_tensor_1, "shape") and hasattr(input_tensor_2, "shape"):
|
|
2512
|
+
s1 = list(input_tensor_1.shape)
|
|
2513
|
+
s2 = list(input_tensor_2.shape)
|
|
2514
|
+
if len(s1) == len(s2) == 3:
|
|
2515
|
+
# Normalize unknown dims to None
|
|
2516
|
+
s1 = [dim if isinstance(dim, int) else None for dim in s1]
|
|
2517
|
+
s2 = [dim if isinstance(dim, int) else None for dim in s2]
|
|
2518
|
+
if s1[1] == 1 and s1[2] is not None and s2[1] == s1[2]:
|
|
2519
|
+
input_tensor_2 = \
|
|
2520
|
+
transpose_with_flexing_deterrence(
|
|
2521
|
+
input_tensor=input_tensor_2,
|
|
2522
|
+
perm=[0, 2, 1],
|
|
2523
|
+
**kwargs,
|
|
2524
|
+
)
|
|
2525
|
+
elif s2[1] == 1 and s2[2] is not None and s1[1] == s2[2]:
|
|
2526
|
+
input_tensor_1 = \
|
|
2527
|
+
transpose_with_flexing_deterrence(
|
|
2528
|
+
input_tensor=input_tensor_1,
|
|
2529
|
+
perm=[0, 2, 1],
|
|
2530
|
+
**kwargs,
|
|
2531
|
+
)
|
|
2532
|
+
except Exception:
|
|
2533
|
+
pass
|
|
2534
|
+
|
|
2535
|
+
# Layout mismatch mitigation based on ONNX shapes:
|
|
2536
|
+
# If one input matches ONNX layout and the other matches the transposed layout,
|
|
2537
|
+
# transpose the ONNX-layout input to align with the transposed one.
|
|
2538
|
+
try:
|
|
2539
|
+
if hasattr(input_tensor_1, "shape") and hasattr(input_tensor_2, "shape"):
|
|
2540
|
+
input_shape_1 = list(input_tensor_1.shape)
|
|
2541
|
+
input_shape_2 = list(input_tensor_2.shape)
|
|
2542
|
+
if len(input_shape_1) == len(input_shape_2) and len(input_shape_1) in (3, 4, 5):
|
|
2543
|
+
onnx_shape_1 = None
|
|
2544
|
+
onnx_shape_2 = None
|
|
2545
|
+
if hasattr(graph_node_input_1, "shape") and graph_node_input_1.shape is not None:
|
|
2546
|
+
onnx_shape_1 = [
|
|
2547
|
+
dim if not isinstance(dim, str) else None for dim in graph_node_input_1.shape
|
|
2548
|
+
]
|
|
2549
|
+
if hasattr(graph_node_input_2, "shape") and graph_node_input_2.shape is not None:
|
|
2550
|
+
onnx_shape_2 = [
|
|
2551
|
+
dim if not isinstance(dim, str) else None for dim in graph_node_input_2.shape
|
|
2552
|
+
]
|
|
2553
|
+
if onnx_shape_1 is not None and onnx_shape_2 is not None:
|
|
2554
|
+
perm = [0] + list(range(2, len(input_shape_1))) + [1]
|
|
2555
|
+
permuted_onnx_shape_1 = [onnx_shape_1[p] for p in perm]
|
|
2556
|
+
permuted_onnx_shape_2 = [onnx_shape_2[p] for p in perm]
|
|
2557
|
+
|
|
2558
|
+
in1_matches_onnx = _shape_matches(input_shape_1, onnx_shape_1)
|
|
2559
|
+
in1_matches_perm = _shape_matches(input_shape_1, permuted_onnx_shape_1)
|
|
2560
|
+
in2_matches_onnx = _shape_matches(input_shape_2, onnx_shape_2)
|
|
2561
|
+
in2_matches_perm = _shape_matches(input_shape_2, permuted_onnx_shape_2)
|
|
2562
|
+
|
|
2563
|
+
if in1_matches_perm and in2_matches_onnx and not in2_matches_perm:
|
|
2564
|
+
input_tensor_2 = \
|
|
2565
|
+
transpose_with_flexing_deterrence(
|
|
2566
|
+
input_tensor=input_tensor_2,
|
|
2567
|
+
perm=perm,
|
|
2568
|
+
**kwargs,
|
|
2569
|
+
)
|
|
2570
|
+
elif in2_matches_perm and in1_matches_onnx and not in1_matches_perm:
|
|
2571
|
+
input_tensor_1 = \
|
|
2572
|
+
transpose_with_flexing_deterrence(
|
|
2573
|
+
input_tensor=input_tensor_1,
|
|
2574
|
+
perm=perm,
|
|
2575
|
+
**kwargs,
|
|
2576
|
+
)
|
|
2577
|
+
except Exception:
|
|
2578
|
+
pass
|
|
2406
2579
|
# At least one True value for same_input_shape_as_onnx
|
|
2407
2580
|
# At least one True value in nhwc_flags
|
|
2408
2581
|
# same_input_shape_as_onnx == True and nhwc_flags == False and 3D or 4D or 5D tensor is NHWC transposed
|
|
@@ -4,7 +4,7 @@ build-backend = "uv_build"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "onnx2tf"
|
|
7
|
-
version = "1.29.
|
|
7
|
+
version = "1.29.13"
|
|
8
8
|
description = "Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf)."
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
requires-python = ">=3.10.12"
|
|
@@ -21,6 +21,7 @@ classifiers = [
|
|
|
21
21
|
"Operating System :: POSIX :: Linux",
|
|
22
22
|
"Operating System :: Unix",
|
|
23
23
|
"Programming Language :: Python :: 3",
|
|
24
|
+
"Programming Language :: Python :: 3.10",
|
|
24
25
|
"Programming Language :: Python :: 3.11",
|
|
25
26
|
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
|
26
27
|
]
|
|
@@ -57,6 +58,8 @@ Issues = "https://github.com/PINTO0309/onnx2tf/issues"
|
|
|
57
58
|
override-dependencies = [
|
|
58
59
|
"onnx==1.19.0",
|
|
59
60
|
"onnxsim==0.4.36",
|
|
61
|
+
"ml-dtypes==0.5.1",
|
|
62
|
+
"numpy==1.26.4",
|
|
60
63
|
]
|
|
61
64
|
|
|
62
65
|
[tool.uv.build-backend]
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|