onnx2tf 1.29.20__tar.gz → 1.29.21__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/PKG-INFO +29 -8
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/README.md +27 -6
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/__init__.py +1 -1
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/onnx2tf.py +31 -2
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/utils/common_functions.py +99 -2
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/pyproject.toml +2 -2
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/__main__.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Abs.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Acos.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Acosh.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Add.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/AffineGrid.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/And.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/ArgMax.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/ArgMin.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Asin.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Asinh.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Atan.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Atanh.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Attention.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/AveragePool.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/BatchNormalization.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Bernoulli.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/BitShift.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/BitwiseAnd.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/BitwiseNot.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/BitwiseOr.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/BitwiseXor.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/BlackmanWindow.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Cast.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Ceil.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Celu.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Clip.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Col2Im.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Compress.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Concat.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/ConcatFromSequence.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Constant.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/ConstantOfShape.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Conv.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/ConvInteger.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/ConvTranspose.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Cos.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Cosh.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/CumProd.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/CumSum.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/DFT.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/DeformConv.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/DepthToSpace.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/DequantizeLinear.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Det.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Div.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Dropout.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/DynamicQuantizeLinear.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Einsum.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Elu.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Equal.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Erf.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Exp.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Expand.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/EyeLike.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Flatten.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Floor.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/FusedConv.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/GRU.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Gather.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/GatherElements.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/GatherND.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Gelu.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Gemm.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/GlobalAveragePool.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/GlobalLpPool.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/GlobalMaxPool.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Greater.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/GreaterOrEqual.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/GridSample.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/GroupNorm.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/HammingWindow.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/HannWindow.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/HardSigmoid.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/HardSwish.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Hardmax.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Identity.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/If.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/ImageDecoder.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Input.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/InstanceNormalization.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Inverse.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/IsInf.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/IsNaN.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/LRN.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/LSTM.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/LayerNormalization.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/LeakyRelu.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Less.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/LessOrEqual.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Log.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/LogSoftmax.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Loop.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/LpNormalization.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/LpPool.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/MatMul.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/MatMulInteger.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Max.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/MaxPool.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/MaxRoiPool.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/MaxUnpool.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Mean.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/MeanVarianceNormalization.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/MelWeightMatrix.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Min.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Mish.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Mod.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Mul.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Multinomial.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Neg.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/NegativeLogLikelihoodLoss.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/NonMaxSuppression.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/NonZero.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Not.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/OneHot.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/OptionalGetElement.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/OptionalHasElement.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Or.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/PRelu.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Pad.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Pow.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/QLinearAdd.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/QLinearConcat.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/QLinearConv.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/QLinearLeakyRelu.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/QLinearMatMul.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/QLinearMul.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/QLinearSigmoid.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/QLinearSoftmax.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/QuantizeLinear.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/RMSNormalization.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/RNN.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/RandomNormal.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/RandomNormalLike.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/RandomUniform.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/RandomUniformLike.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Range.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Reciprocal.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/ReduceL1.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/ReduceL2.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/ReduceLogSum.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/ReduceLogSumExp.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/ReduceMax.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/ReduceMean.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/ReduceMin.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/ReduceProd.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/ReduceSum.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/ReduceSumSquare.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/RegexFullMatch.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Relu.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Reshape.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Resize.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/ReverseSequence.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/RoiAlign.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/RotaryEmbedding.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Round.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/STFT.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/ScaleAndTranslate.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Scan.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Scatter.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/ScatterElements.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/ScatterND.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Selu.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/SequenceAt.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/SequenceConstruct.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/SequenceEmpty.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/SequenceErase.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/SequenceInsert.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/SequenceLength.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Shape.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Shrink.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Sigmoid.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Sign.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Sin.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Sinh.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Size.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Slice.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Softmax.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/SoftmaxCrossEntropyLoss.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Softplus.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Softsign.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/SpaceToDepth.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Split.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/SplitToSequence.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Sqrt.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Squeeze.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/StringConcat.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/StringNormalizer.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/StringSplit.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Sub.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Sum.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Tan.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Tanh.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/TensorScatter.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/ThresholdedRelu.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Tile.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/TopK.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Transpose.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Trilu.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Unique.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Unsqueeze.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Upsample.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Where.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/Xor.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/ops/__init__.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/utils/__init__.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/utils/enums.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/utils/iterative_json_optimizer.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/utils/json_auto_generator.py +0 -0
- {onnx2tf-1.29.20 → onnx2tf-1.29.21}/onnx2tf/utils/logging.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: onnx2tf
|
|
3
|
-
Version: 1.29.
|
|
3
|
+
Version: 1.29.21
|
|
4
4
|
Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
|
|
5
5
|
Keywords: onnx,tensorflow,tflite,keras,deep-learning,machine-learning
|
|
6
6
|
Author: Katsuya Hyodo
|
|
@@ -18,7 +18,7 @@ Classifier: Programming Language :: Python :: 3.11
|
|
|
18
18
|
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
19
19
|
Requires-Dist: requests==2.32.5
|
|
20
20
|
Requires-Dist: numpy==1.26.4
|
|
21
|
-
Requires-Dist: onnx==1.19.
|
|
21
|
+
Requires-Dist: onnx==1.19.1
|
|
22
22
|
Requires-Dist: onnxruntime==1.23.0
|
|
23
23
|
Requires-Dist: opencv-python==4.11.0.86
|
|
24
24
|
Requires-Dist: onnxsim==0.4.36
|
|
@@ -365,7 +365,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
365
365
|
docker run --rm -it \
|
|
366
366
|
-v `pwd`:/workdir \
|
|
367
367
|
-w /workdir \
|
|
368
|
-
ghcr.io/pinto0309/onnx2tf:1.29.
|
|
368
|
+
ghcr.io/pinto0309/onnx2tf:1.29.21
|
|
369
369
|
|
|
370
370
|
or
|
|
371
371
|
|
|
@@ -373,18 +373,18 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
373
373
|
docker run --rm -it \
|
|
374
374
|
-v `pwd`:/workdir \
|
|
375
375
|
-w /workdir \
|
|
376
|
-
docker.io/pinto0309/onnx2tf:1.29.
|
|
376
|
+
docker.io/pinto0309/onnx2tf:1.29.21
|
|
377
377
|
|
|
378
378
|
or
|
|
379
379
|
|
|
380
|
-
pip install -U onnx==1.19.
|
|
380
|
+
pip install -U onnx==1.19.1 \
|
|
381
381
|
&& pip install -U onnx-graphsurgeon==0.5.8 \
|
|
382
382
|
&& pip install -U onnxruntime==1.23.0 \
|
|
383
383
|
&& pip install -U onnxsim==0.4.36 \
|
|
384
384
|
&& pip install -U onnxoptimizer==0.4.2 \
|
|
385
385
|
&& pip install -U simple_onnx_processing_tools==1.1.32 \
|
|
386
|
-
&& pip install -U sne4onnx
|
|
387
|
-
&& pip install -U sng4onnx
|
|
386
|
+
&& pip install -U sne4onnx==1.0.15 \
|
|
387
|
+
&& pip install -U sng4onnx==1.0.5 \
|
|
388
388
|
&& pip install -U ai_edge_litert==1.2.0 \
|
|
389
389
|
&& pip install -U tensorflow==2.19.0 \
|
|
390
390
|
&& pip install -U protobuf==3.20.3 \
|
|
@@ -630,7 +630,7 @@ After many upgrades, the need for JSON parameter correction has become much less
|
|
|
630
630
|
|
|
631
631
|
`-ois` an option to overwrite the input OP to a static size if it has undefined dimensions. `-cotof` option checks the accuracy of all OPs one by one. `-cotoa` is the error value of the threshold for determining an accuracy error. If there are undefined dimensions in the input OP, it is better to fix them to the static geometry to improve the accuracy of the accuracy measurement.
|
|
632
632
|
|
|
633
|
-
Also, you can use the `-cind` option to specify custom input for `-cotof`, instead of using the default dummy input. Otherwise, all input values will be set to 1. For more information about the `-cind` option, please refer to [here](#cli-parameter).
|
|
633
|
+
Also, you can use the `-cind` option to specify custom input for `-cotof`, instead of using the default dummy input. Otherwise, all input values will be set to 1. You can override the dummy input values with `--value_hints` (scalar only, `*:default` supported). For more information about the `-cind` option, please refer to [here](#cli-parameter).
|
|
634
634
|
|
|
635
635
|
The `-cotof` option only compares the original ONNX and converted TensorFlow (Keras) models at Float32 precision, not at Float16 or INT8 precision.
|
|
636
636
|
|
|
@@ -644,6 +644,10 @@ onnx2tf -i mobilenetv2-12.onnx -b 1 -cotof -cotoa 1e-1
|
|
|
644
644
|
or
|
|
645
645
|
|
|
646
646
|
onnx2tf -i mobilenetv2-12.onnx -cotof -cotoa 1e-1 -cind "input" "/your/path/x.npy"
|
|
647
|
+
|
|
648
|
+
or
|
|
649
|
+
|
|
650
|
+
onnx2tf -i mobilenetv2-12.onnx -cotof -cotoa 1e-1 --value_hints "input:0.5" "*:1.0"
|
|
647
651
|
```
|
|
648
652
|

|
|
649
653
|
|
|
@@ -1826,6 +1830,14 @@ optional arguments:
|
|
|
1826
1830
|
A value of 1 or more must be specified.
|
|
1827
1831
|
Numerical values other than dynamic dimensions are ignored.
|
|
1828
1832
|
|
|
1833
|
+
-vh VALUE_HINTS [VALUE_HINTS ...], \
|
|
1834
|
+
--value_hints VALUE_HINTS [VALUE_HINTS ...]
|
|
1835
|
+
Value hints for dummy inference input tensors.
|
|
1836
|
+
The format is
|
|
1837
|
+
"input_name_1:value" "input_name_2:value" "*:default_value"
|
|
1838
|
+
"*" applies to all inputs not explicitly specified.
|
|
1839
|
+
Values are scalar only.
|
|
1840
|
+
|
|
1829
1841
|
-nlt, --no_large_tensor
|
|
1830
1842
|
Suppresses constant bloat caused by Tile OP when optimizing models in onnxsim.
|
|
1831
1843
|
See: https://github.com/daquexian/onnx-simplifier/issues/178
|
|
@@ -2157,6 +2169,7 @@ convert(
|
|
|
2157
2169
|
batch_size: Union[int, NoneType] = None,
|
|
2158
2170
|
overwrite_input_shape: Union[List[str], NoneType] = None,
|
|
2159
2171
|
shape_hints: Union[List[str], NoneType] = None,
|
|
2172
|
+
value_hints: Union[List[str], NoneType] = None,
|
|
2160
2173
|
no_large_tensor: Optional[bool] = False,
|
|
2161
2174
|
output_nms_with_dynamic_tensor: Optional[bool] = False,
|
|
2162
2175
|
switch_nms_version: Optional[str] = 'v4',
|
|
@@ -2377,6 +2390,13 @@ convert(
|
|
|
2377
2390
|
A value of 1 or more must be specified.
|
|
2378
2391
|
Numerical values other than dynamic dimensions are ignored.
|
|
2379
2392
|
|
|
2393
|
+
value_hints: Optional[List[str]]
|
|
2394
|
+
Value hints for dummy inference input tensors.
|
|
2395
|
+
The format is
|
|
2396
|
+
['input_name_1:value', 'input_name_2:value', '*:default_value']
|
|
2397
|
+
"*" applies to all inputs not explicitly specified.
|
|
2398
|
+
Values are scalar only.
|
|
2399
|
+
|
|
2380
2400
|
no_large_tensor: Optional[bool]
|
|
2381
2401
|
Suppresses constant bloat caused by Tile OP when optimizing models in onnxsim.
|
|
2382
2402
|
See: https://github.com/daquexian/onnx-simplifier/issues/178
|
|
@@ -3032,6 +3052,7 @@ The above differences often cannot be dealt with by simply converting the model
|
|
|
3032
3052
|
14. [nobuco](https://github.com/AlexanderLutsenko/nobuco)
|
|
3033
3053
|
15. [onnx2torch](https://github.com/ENOT-AutoDL/onnx2torch)
|
|
3034
3054
|
16. [ai-edge-torch](https://github.com/google-ai-edge/ai-edge-torch)
|
|
3055
|
+
17. [LiteRT.js](https://ai.google.dev/edge/litert/web)
|
|
3035
3056
|
|
|
3036
3057
|
## Acknowledgement
|
|
3037
3058
|
1. https://github.com/onnx/models
|
|
@@ -323,7 +323,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
323
323
|
docker run --rm -it \
|
|
324
324
|
-v `pwd`:/workdir \
|
|
325
325
|
-w /workdir \
|
|
326
|
-
ghcr.io/pinto0309/onnx2tf:1.29.
|
|
326
|
+
ghcr.io/pinto0309/onnx2tf:1.29.21
|
|
327
327
|
|
|
328
328
|
or
|
|
329
329
|
|
|
@@ -331,18 +331,18 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
331
331
|
docker run --rm -it \
|
|
332
332
|
-v `pwd`:/workdir \
|
|
333
333
|
-w /workdir \
|
|
334
|
-
docker.io/pinto0309/onnx2tf:1.29.
|
|
334
|
+
docker.io/pinto0309/onnx2tf:1.29.21
|
|
335
335
|
|
|
336
336
|
or
|
|
337
337
|
|
|
338
|
-
pip install -U onnx==1.19.
|
|
338
|
+
pip install -U onnx==1.19.1 \
|
|
339
339
|
&& pip install -U onnx-graphsurgeon==0.5.8 \
|
|
340
340
|
&& pip install -U onnxruntime==1.23.0 \
|
|
341
341
|
&& pip install -U onnxsim==0.4.36 \
|
|
342
342
|
&& pip install -U onnxoptimizer==0.4.2 \
|
|
343
343
|
&& pip install -U simple_onnx_processing_tools==1.1.32 \
|
|
344
|
-
&& pip install -U sne4onnx
|
|
345
|
-
&& pip install -U sng4onnx
|
|
344
|
+
&& pip install -U sne4onnx==1.0.15 \
|
|
345
|
+
&& pip install -U sng4onnx==1.0.5 \
|
|
346
346
|
&& pip install -U ai_edge_litert==1.2.0 \
|
|
347
347
|
&& pip install -U tensorflow==2.19.0 \
|
|
348
348
|
&& pip install -U protobuf==3.20.3 \
|
|
@@ -588,7 +588,7 @@ After many upgrades, the need for JSON parameter correction has become much less
|
|
|
588
588
|
|
|
589
589
|
`-ois` an option to overwrite the input OP to a static size if it has undefined dimensions. `-cotof` option checks the accuracy of all OPs one by one. `-cotoa` is the error value of the threshold for determining an accuracy error. If there are undefined dimensions in the input OP, it is better to fix them to the static geometry to improve the accuracy of the accuracy measurement.
|
|
590
590
|
|
|
591
|
-
Also, you can use the `-cind` option to specify custom input for `-cotof`, instead of using the default dummy input. Otherwise, all input values will be set to 1. For more information about the `-cind` option, please refer to [here](#cli-parameter).
|
|
591
|
+
Also, you can use the `-cind` option to specify custom input for `-cotof`, instead of using the default dummy input. Otherwise, all input values will be set to 1. You can override the dummy input values with `--value_hints` (scalar only, `*:default` supported). For more information about the `-cind` option, please refer to [here](#cli-parameter).
|
|
592
592
|
|
|
593
593
|
The `-cotof` option only compares the original ONNX and converted TensorFlow (Keras) models at Float32 precision, not at Float16 or INT8 precision.
|
|
594
594
|
|
|
@@ -602,6 +602,10 @@ onnx2tf -i mobilenetv2-12.onnx -b 1 -cotof -cotoa 1e-1
|
|
|
602
602
|
or
|
|
603
603
|
|
|
604
604
|
onnx2tf -i mobilenetv2-12.onnx -cotof -cotoa 1e-1 -cind "input" "/your/path/x.npy"
|
|
605
|
+
|
|
606
|
+
or
|
|
607
|
+
|
|
608
|
+
onnx2tf -i mobilenetv2-12.onnx -cotof -cotoa 1e-1 --value_hints "input:0.5" "*:1.0"
|
|
605
609
|
```
|
|
606
610
|

|
|
607
611
|
|
|
@@ -1784,6 +1788,14 @@ optional arguments:
|
|
|
1784
1788
|
A value of 1 or more must be specified.
|
|
1785
1789
|
Numerical values other than dynamic dimensions are ignored.
|
|
1786
1790
|
|
|
1791
|
+
-vh VALUE_HINTS [VALUE_HINTS ...], \
|
|
1792
|
+
--value_hints VALUE_HINTS [VALUE_HINTS ...]
|
|
1793
|
+
Value hints for dummy inference input tensors.
|
|
1794
|
+
The format is
|
|
1795
|
+
"input_name_1:value" "input_name_2:value" "*:default_value"
|
|
1796
|
+
"*" applies to all inputs not explicitly specified.
|
|
1797
|
+
Values are scalar only.
|
|
1798
|
+
|
|
1787
1799
|
-nlt, --no_large_tensor
|
|
1788
1800
|
Suppresses constant bloat caused by Tile OP when optimizing models in onnxsim.
|
|
1789
1801
|
See: https://github.com/daquexian/onnx-simplifier/issues/178
|
|
@@ -2115,6 +2127,7 @@ convert(
|
|
|
2115
2127
|
batch_size: Union[int, NoneType] = None,
|
|
2116
2128
|
overwrite_input_shape: Union[List[str], NoneType] = None,
|
|
2117
2129
|
shape_hints: Union[List[str], NoneType] = None,
|
|
2130
|
+
value_hints: Union[List[str], NoneType] = None,
|
|
2118
2131
|
no_large_tensor: Optional[bool] = False,
|
|
2119
2132
|
output_nms_with_dynamic_tensor: Optional[bool] = False,
|
|
2120
2133
|
switch_nms_version: Optional[str] = 'v4',
|
|
@@ -2335,6 +2348,13 @@ convert(
|
|
|
2335
2348
|
A value of 1 or more must be specified.
|
|
2336
2349
|
Numerical values other than dynamic dimensions are ignored.
|
|
2337
2350
|
|
|
2351
|
+
value_hints: Optional[List[str]]
|
|
2352
|
+
Value hints for dummy inference input tensors.
|
|
2353
|
+
The format is
|
|
2354
|
+
['input_name_1:value', 'input_name_2:value', '*:default_value']
|
|
2355
|
+
"*" applies to all inputs not explicitly specified.
|
|
2356
|
+
Values are scalar only.
|
|
2357
|
+
|
|
2338
2358
|
no_large_tensor: Optional[bool]
|
|
2339
2359
|
Suppresses constant bloat caused by Tile OP when optimizing models in onnxsim.
|
|
2340
2360
|
See: https://github.com/daquexian/onnx-simplifier/issues/178
|
|
@@ -2990,6 +3010,7 @@ The above differences often cannot be dealt with by simply converting the model
|
|
|
2990
3010
|
14. [nobuco](https://github.com/AlexanderLutsenko/nobuco)
|
|
2991
3011
|
15. [onnx2torch](https://github.com/ENOT-AutoDL/onnx2torch)
|
|
2992
3012
|
16. [ai-edge-torch](https://github.com/google-ai-edge/ai-edge-torch)
|
|
3013
|
+
17. [LiteRT.js](https://ai.google.dev/edge/litert/web)
|
|
2993
3014
|
|
|
2994
3015
|
## Acknowledgement
|
|
2995
3016
|
1. https://github.com/onnx/models
|
|
@@ -43,6 +43,7 @@ from typing import Optional, List, Any, Dict
|
|
|
43
43
|
from argparse import ArgumentParser
|
|
44
44
|
|
|
45
45
|
import importlib
|
|
46
|
+
import onnx2tf.utils.common_functions as common_functions
|
|
46
47
|
from onnx2tf.utils.common_functions import (
|
|
47
48
|
dummy_onnx_inference,
|
|
48
49
|
dummy_tf_inference,
|
|
@@ -639,6 +640,7 @@ def convert(
|
|
|
639
640
|
batch_size: Optional[int] = None,
|
|
640
641
|
overwrite_input_shape: Optional[List[str]] = None,
|
|
641
642
|
shape_hints: Optional[List[str]] = None,
|
|
643
|
+
value_hints: Optional[List[str]] = None,
|
|
642
644
|
no_large_tensor: Optional[bool] = False,
|
|
643
645
|
output_nms_with_dynamic_tensor: Optional[bool] = False,
|
|
644
646
|
switch_nms_version: Optional[str] = 'v4',
|
|
@@ -850,6 +852,15 @@ def convert(
|
|
|
850
852
|
A value of 1 or more must be specified.\n
|
|
851
853
|
Numerical values other than dynamic dimensions are ignored.
|
|
852
854
|
|
|
855
|
+
value_hints: Optional[List[str]]
|
|
856
|
+
Value hints for dummy inference input tensors.\n
|
|
857
|
+
The format is\n
|
|
858
|
+
["input_name_1:value","input_name_2:value","*:default_value"].\n
|
|
859
|
+
"*" applies to all inputs not explicitly specified.\n
|
|
860
|
+
Values are scalar only.\n
|
|
861
|
+
e.g.\n
|
|
862
|
+
['input0:0.5','mask:0','*:1.0']\n
|
|
863
|
+
|
|
853
864
|
no_large_tensor: Optional[bool]
|
|
854
865
|
Suppresses constant bloat caused by Tile OP when optimizing models in onnxsim.\n
|
|
855
866
|
See: https://github.com/daquexian/onnx-simplifier/issues/178
|
|
@@ -1110,6 +1121,8 @@ def convert(
|
|
|
1110
1121
|
if verbosity is None:
|
|
1111
1122
|
verbosity = 'debug'
|
|
1112
1123
|
set_log_level('error' if non_verbose else verbosity)
|
|
1124
|
+
common_functions.set_dummy_shape_hints(shape_hints)
|
|
1125
|
+
common_functions.set_dummy_value_hints(value_hints)
|
|
1113
1126
|
|
|
1114
1127
|
# Either designation required
|
|
1115
1128
|
if not input_onnx_file_path and not onnx_graph:
|
|
@@ -1326,6 +1339,10 @@ def convert(
|
|
|
1326
1339
|
'Failed to optimize the onnx file.'
|
|
1327
1340
|
)
|
|
1328
1341
|
|
|
1342
|
+
has_external_data = False
|
|
1343
|
+
if input_onnx_file_path and os.path.exists(input_onnx_file_path):
|
|
1344
|
+
has_external_data = check_has_external_data(input_onnx_file_path)
|
|
1345
|
+
|
|
1329
1346
|
# Automatic generation of each OP name - sng4onnx
|
|
1330
1347
|
if not not_use_opname_auto_generate:
|
|
1331
1348
|
info('')
|
|
@@ -1357,9 +1374,7 @@ def convert(
|
|
|
1357
1374
|
|
|
1358
1375
|
# Loading Graphs
|
|
1359
1376
|
# onnx_graph If specified, onnx_graph is processed first
|
|
1360
|
-
has_external_data = False
|
|
1361
1377
|
if not onnx_graph:
|
|
1362
|
-
has_external_data = check_has_external_data(input_onnx_file_path)
|
|
1363
1378
|
onnx_graph = onnx.load(input_onnx_file_path)
|
|
1364
1379
|
|
|
1365
1380
|
if not auto_split_model and onnx_graph is not None:
|
|
@@ -1686,6 +1701,7 @@ def convert(
|
|
|
1686
1701
|
'batch_size': batch_size,
|
|
1687
1702
|
'overwrite_input_shape': overwrite_input_shape,
|
|
1688
1703
|
'shape_hints': shape_hints,
|
|
1704
|
+
'value_hints': value_hints,
|
|
1689
1705
|
'no_large_tensor': no_large_tensor,
|
|
1690
1706
|
'output_nms_with_dynamic_tensor': output_nms_with_dynamic_tensor,
|
|
1691
1707
|
'switch_nms_version': switch_nms_version,
|
|
@@ -3874,6 +3890,18 @@ def main():
|
|
|
3874
3890
|
'Only applied to dynamic dimensions in inputs. \n' +
|
|
3875
3891
|
'Only used when -cotof or -coto are specified.'
|
|
3876
3892
|
)
|
|
3893
|
+
parser.add_argument(
|
|
3894
|
+
'-vh',
|
|
3895
|
+
'--value_hints',
|
|
3896
|
+
type=str,
|
|
3897
|
+
nargs='+',
|
|
3898
|
+
help=\
|
|
3899
|
+
'Value hints for dummy inference input tensors. \n' +
|
|
3900
|
+
'The format is\n' +
|
|
3901
|
+
'"input_name_1:value" "input_name_2:value" "*:default_value". \n' +
|
|
3902
|
+
'"*" applies to all inputs not explicitly specified. \n' +
|
|
3903
|
+
'Values are scalar only.'
|
|
3904
|
+
)
|
|
3877
3905
|
parser.add_argument(
|
|
3878
3906
|
'-nlt',
|
|
3879
3907
|
'--no_large_tensor',
|
|
@@ -4359,6 +4387,7 @@ def main():
|
|
|
4359
4387
|
batch_size=args.batch_size,
|
|
4360
4388
|
overwrite_input_shape=args.overwrite_input_shape,
|
|
4361
4389
|
shape_hints=args.shape_hints,
|
|
4390
|
+
value_hints=args.value_hints,
|
|
4362
4391
|
no_large_tensor=args.no_large_tensor,
|
|
4363
4392
|
output_nms_with_dynamic_tensor=args.output_nms_with_dynamic_tensor,
|
|
4364
4393
|
switch_nms_version=args.switch_nms_version,
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import math
|
|
2
|
+
import ast
|
|
2
3
|
import os
|
|
3
4
|
import io
|
|
4
5
|
import re
|
|
@@ -45,6 +46,59 @@ from onnx2tf.utils.enums import (
|
|
|
45
46
|
INF_INDEX_VALUE: int = 4294967296
|
|
46
47
|
ONNX_INF_INDEX_VALUE = sys.maxsize # 9223372036854775807
|
|
47
48
|
|
|
49
|
+
_DEFAULT_DUMMY_SHAPE_HINTS: Optional[List[str]] = None
|
|
50
|
+
_DEFAULT_DUMMY_VALUE_HINTS: Optional[List[str]] = None
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def set_dummy_shape_hints(shape_hints: Optional[List[str]]) -> None:
|
|
54
|
+
global _DEFAULT_DUMMY_SHAPE_HINTS
|
|
55
|
+
_DEFAULT_DUMMY_SHAPE_HINTS = shape_hints
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def set_dummy_value_hints(value_hints: Optional[List[str]]) -> None:
|
|
59
|
+
global _DEFAULT_DUMMY_VALUE_HINTS
|
|
60
|
+
_DEFAULT_DUMMY_VALUE_HINTS = value_hints
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def _parse_value_hint_scalar(value: str) -> Optional[Any]:
|
|
64
|
+
try:
|
|
65
|
+
parsed = ast.literal_eval(value)
|
|
66
|
+
except Exception:
|
|
67
|
+
try:
|
|
68
|
+
parsed = float(value)
|
|
69
|
+
except Exception:
|
|
70
|
+
return None
|
|
71
|
+
if isinstance(parsed, (list, tuple, dict, set, np.ndarray)):
|
|
72
|
+
return None
|
|
73
|
+
if isinstance(parsed, (int, float, bool, np.number)):
|
|
74
|
+
return parsed
|
|
75
|
+
return None
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def _parse_value_hints(
|
|
79
|
+
value_hints: Optional[List[str]]
|
|
80
|
+
) -> Tuple[Dict[str, Any], Optional[Any], bool]:
|
|
81
|
+
if not value_hints:
|
|
82
|
+
return {}, None, False
|
|
83
|
+
hints: Dict[str, Any] = {}
|
|
84
|
+
default_value: Optional[Any] = None
|
|
85
|
+
for hint in value_hints:
|
|
86
|
+
if not isinstance(hint, str):
|
|
87
|
+
continue
|
|
88
|
+
parts = hint.split(':', 1)
|
|
89
|
+
if len(parts) != 2:
|
|
90
|
+
continue
|
|
91
|
+
input_name, value_str = parts[0], parts[1]
|
|
92
|
+
parsed_value = _parse_value_hint_scalar(value_str)
|
|
93
|
+
if parsed_value is None:
|
|
94
|
+
warn(f'Invalid --value_hints entry ignored: {hint}')
|
|
95
|
+
continue
|
|
96
|
+
if input_name == '*':
|
|
97
|
+
default_value = parsed_value
|
|
98
|
+
else:
|
|
99
|
+
hints[input_name] = parsed_value
|
|
100
|
+
return hints, default_value, default_value is not None
|
|
101
|
+
|
|
48
102
|
|
|
49
103
|
|
|
50
104
|
|
|
@@ -3854,6 +3908,7 @@ def dummy_onnx_inference(
|
|
|
3854
3908
|
enable_ort_output_memmap: bool = False,
|
|
3855
3909
|
ort_output_memmap_dir: Optional[str] = None,
|
|
3856
3910
|
shape_hints: Optional[List[str]] = None,
|
|
3911
|
+
value_hints: Optional[List[str]] = None,
|
|
3857
3912
|
input_datas_for_validation: Optional[Dict[str, np.ndarray]] = None,
|
|
3858
3913
|
) -> List[np.ndarray]:
|
|
3859
3914
|
"""Perform inference on ONNX subgraphs with an all-1 dummy tensor.
|
|
@@ -3891,6 +3946,10 @@ def dummy_onnx_inference(
|
|
|
3891
3946
|
Directory to store memmap files. If not specified, a temporary
|
|
3892
3947
|
directory is created and removed on exit.
|
|
3893
3948
|
|
|
3949
|
+
value_hints: Optional[List[str]]
|
|
3950
|
+
Value hints for dummy inference input tensors.
|
|
3951
|
+
Format: ["input_name:value", "*:default_value"].
|
|
3952
|
+
|
|
3894
3953
|
input_datas_for_validation: Optional[Dict[str, np.ndarray]]
|
|
3895
3954
|
Optional dict to be filled with the input tensors used for inference.
|
|
3896
3955
|
|
|
@@ -3899,6 +3958,11 @@ def dummy_onnx_inference(
|
|
|
3899
3958
|
outputs: List[np.ndarray]
|
|
3900
3959
|
Results of inference using dummy tensor
|
|
3901
3960
|
"""
|
|
3961
|
+
if shape_hints is None:
|
|
3962
|
+
shape_hints = _DEFAULT_DUMMY_SHAPE_HINTS
|
|
3963
|
+
if value_hints is None:
|
|
3964
|
+
value_hints = _DEFAULT_DUMMY_VALUE_HINTS
|
|
3965
|
+
|
|
3902
3966
|
# Separate onnx at specified output_names position
|
|
3903
3967
|
domain: str = onnx_graph.domain
|
|
3904
3968
|
ir_version: int = onnx_graph.ir_version
|
|
@@ -4053,6 +4117,7 @@ def dummy_onnx_inference(
|
|
|
4053
4117
|
name: tuple(size) for name, size in zip(input_names, input_sizes)
|
|
4054
4118
|
}
|
|
4055
4119
|
input_datas = {}
|
|
4120
|
+
value_hints_dict, default_value, has_default = _parse_value_hints(value_hints)
|
|
4056
4121
|
|
|
4057
4122
|
# -cid
|
|
4058
4123
|
if custom_input_op_name_np_data_path:
|
|
@@ -4086,7 +4151,17 @@ def dummy_onnx_inference(
|
|
|
4086
4151
|
|
|
4087
4152
|
else:
|
|
4088
4153
|
for input_name, input_size, input_dtype in zip(input_names, input_sizes, input_dtypes):
|
|
4089
|
-
|
|
4154
|
+
hint_value = value_hints_dict.get(
|
|
4155
|
+
input_name,
|
|
4156
|
+
default_value if has_default else None,
|
|
4157
|
+
)
|
|
4158
|
+
if hint_value is not None:
|
|
4159
|
+
input_datas[input_name] = np.full(
|
|
4160
|
+
input_size,
|
|
4161
|
+
hint_value,
|
|
4162
|
+
dtype=input_dtype,
|
|
4163
|
+
)
|
|
4164
|
+
elif test_data_nhwc is None:
|
|
4090
4165
|
input_datas[input_name] = np.ones(
|
|
4091
4166
|
input_size,
|
|
4092
4167
|
dtype=input_dtype,
|
|
@@ -4245,6 +4320,7 @@ def dummy_tf_inference(
|
|
|
4245
4320
|
verification_datas: Optional[List[np.ndarray]] = None,
|
|
4246
4321
|
custom_input_op_name_np_data_path: Optional[str] = None,
|
|
4247
4322
|
shape_hints: Optional[List[str]] = None,
|
|
4323
|
+
value_hints: Optional[List[str]] = None,
|
|
4248
4324
|
input_datas_for_validation: Optional[Dict[str, np.ndarray]] = None,
|
|
4249
4325
|
prefilled_input_datas: Optional[Dict[str, np.ndarray]] = None,
|
|
4250
4326
|
keep_shape_absolutely_input_names: Optional[List[str]] = None,
|
|
@@ -4269,6 +4345,11 @@ def dummy_tf_inference(
|
|
|
4269
4345
|
|
|
4270
4346
|
custom_input_op_name_np_data_path
|
|
4271
4347
|
Path to Numpy file for custom data used for dummy inference
|
|
4348
|
+
|
|
4349
|
+
value_hints: Optional[List[str]]
|
|
4350
|
+
Value hints for dummy inference input tensors.
|
|
4351
|
+
Format: ["input_name:value", "*:default_value"].
|
|
4352
|
+
|
|
4272
4353
|
input_datas_for_validation: Optional[Dict[str, np.ndarray]]
|
|
4273
4354
|
Optional dict to be filled with the input tensors used for inference.
|
|
4274
4355
|
|
|
@@ -4278,6 +4359,11 @@ def dummy_tf_inference(
|
|
|
4278
4359
|
Results of inference using dummy tensor.
|
|
4279
4360
|
Dict of tensorflow node and corresponding ndarray output.
|
|
4280
4361
|
"""
|
|
4362
|
+
if shape_hints is None:
|
|
4363
|
+
shape_hints = _DEFAULT_DUMMY_SHAPE_HINTS
|
|
4364
|
+
if value_hints is None:
|
|
4365
|
+
value_hints = _DEFAULT_DUMMY_VALUE_HINTS
|
|
4366
|
+
|
|
4281
4367
|
input_names: List[str] = [inp.name for inp in inputs]
|
|
4282
4368
|
input_sizes: List[int] = [inp.shape for inp in inputs]
|
|
4283
4369
|
input_size_map = {name: size for name, size in zip(input_names, input_sizes)}
|
|
@@ -4353,6 +4439,7 @@ def dummy_tf_inference(
|
|
|
4353
4439
|
|
|
4354
4440
|
input_dtypes: List[Any] = [inp.dtype for inp in inputs]
|
|
4355
4441
|
input_datas = {}
|
|
4442
|
+
value_hints_dict, default_value, has_default = _parse_value_hints(value_hints)
|
|
4356
4443
|
|
|
4357
4444
|
# -cid
|
|
4358
4445
|
if custom_input_op_name_np_data_path:
|
|
@@ -4409,7 +4496,17 @@ def dummy_tf_inference(
|
|
|
4409
4496
|
else:
|
|
4410
4497
|
if verification_datas is None:
|
|
4411
4498
|
for input_name, input_size, input_dtype in zip(input_names, input_sizes, input_dtypes):
|
|
4412
|
-
|
|
4499
|
+
hint_value = value_hints_dict.get(
|
|
4500
|
+
input_name,
|
|
4501
|
+
default_value if has_default else None,
|
|
4502
|
+
)
|
|
4503
|
+
if hint_value is not None:
|
|
4504
|
+
input_datas[input_name] = np.full(
|
|
4505
|
+
input_size,
|
|
4506
|
+
hint_value,
|
|
4507
|
+
dtype=TF_DTYPES_TO_NUMPY_DTYPES[input_dtype],
|
|
4508
|
+
)
|
|
4509
|
+
elif test_data_nhwc is None:
|
|
4413
4510
|
input_datas[input_name] = np.ones(
|
|
4414
4511
|
input_size,
|
|
4415
4512
|
dtype=TF_DTYPES_TO_NUMPY_DTYPES[input_dtype],
|
|
@@ -4,7 +4,7 @@ build-backend = "uv_build"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "onnx2tf"
|
|
7
|
-
version = "1.29.
|
|
7
|
+
version = "1.29.21"
|
|
8
8
|
description = "Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf)."
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
requires-python = ">=3.10.12"
|
|
@@ -28,7 +28,7 @@ classifiers = [
|
|
|
28
28
|
dependencies = [
|
|
29
29
|
"requests==2.32.5",
|
|
30
30
|
"numpy==1.26.4",
|
|
31
|
-
"onnx==1.19.
|
|
31
|
+
"onnx==1.19.1",
|
|
32
32
|
"onnxruntime==1.23.0",
|
|
33
33
|
"opencv-python==4.11.0.86",
|
|
34
34
|
"onnxsim==0.4.36",
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|