onnx2tf 1.25.15__tar.gz → 1.26.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {onnx2tf-1.25.15/onnx2tf.egg-info → onnx2tf-1.26.1}/PKG-INFO +22 -12
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/README.md +21 -11
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/__init__.py +1 -1
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/onnx2tf.py +46 -18
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Conv.py +12 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/utils/common_functions.py +13 -6
- {onnx2tf-1.25.15 → onnx2tf-1.26.1/onnx2tf.egg-info}/PKG-INFO +22 -12
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/LICENSE +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/LICENSE_onnx-tensorflow +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/__main__.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Abs.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Acos.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Acosh.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Add.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/And.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/ArgMax.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/ArgMin.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Asin.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Asinh.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Atan.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Atanh.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/AveragePool.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/BatchNormalization.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Bernoulli.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/BitShift.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Cast.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Ceil.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Celu.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Clip.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Col2Im.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Compress.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Concat.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/ConcatFromSequence.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Constant.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/ConstantOfShape.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/ConvInteger.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/ConvTranspose.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Cos.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Cosh.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/CumSum.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/DepthToSpace.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/DequantizeLinear.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Det.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Div.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Dropout.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/DynamicQuantizeLinear.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Einsum.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Elu.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Equal.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Erf.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Exp.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Expand.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/EyeLike.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Flatten.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Floor.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/FusedConv.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/GRU.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Gather.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/GatherElements.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/GatherND.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Gelu.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Gemm.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/GlobalAveragePool.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/GlobalLpPool.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/GlobalMaxPool.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Greater.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/GreaterOrEqual.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/GridSample.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/GroupNorm.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/HammingWindow.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/HannWindow.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/HardSigmoid.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/HardSwish.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Hardmax.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Identity.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/If.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Input.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/InstanceNormalization.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Inverse.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/IsInf.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/IsNaN.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/LRN.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/LSTM.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/LayerNormalization.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/LeakyRelu.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Less.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/LessOrEqual.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Log.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/LogSoftmax.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/LpNormalization.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/MatMul.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/MatMulInteger.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Max.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/MaxPool.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/MaxUnpool.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Mean.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/MeanVarianceNormalization.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/MelWeightMatrix.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Min.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Mish.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Mod.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Mul.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Multinomial.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Neg.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/NonMaxSuppression.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/NonZero.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Not.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/OneHot.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/OptionalGetElement.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/OptionalHasElement.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Or.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/PRelu.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Pad.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Pow.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/QLinearAdd.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/QLinearConcat.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/QLinearConv.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/QLinearLeakyRelu.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/QLinearMatMul.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/QLinearMul.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/QLinearSigmoid.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/QLinearSoftmax.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/QuantizeLinear.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/RNN.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/RandomNormal.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/RandomNormalLike.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/RandomUniform.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/RandomUniformLike.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Range.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Reciprocal.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/ReduceL1.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/ReduceL2.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/ReduceLogSum.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/ReduceLogSumExp.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/ReduceMax.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/ReduceMean.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/ReduceMin.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/ReduceProd.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/ReduceSum.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/ReduceSumSquare.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Relu.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Reshape.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Resize.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/ReverseSequence.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/RoiAlign.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Round.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/STFT.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/ScaleAndTranslate.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Scatter.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/ScatterElements.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/ScatterND.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Selu.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/SequenceAt.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/SequenceConstruct.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/SequenceEmpty.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/SequenceErase.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/SequenceInsert.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/SequenceLength.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Shape.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Shrink.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Sigmoid.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Sign.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Sin.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Sinh.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Size.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Slice.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Softmax.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Softplus.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Softsign.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/SpaceToDepth.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Split.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/SplitToSequence.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Sqrt.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Squeeze.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/StringNormalizer.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Sub.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Sum.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Tan.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Tanh.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/ThresholdedRelu.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Tile.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/TopK.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Transpose.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Trilu.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Unique.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Unsqueeze.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Upsample.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Where.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/Xor.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/_Loop.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/__Loop.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/ops/__init__.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/utils/__init__.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/utils/enums.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf/utils/logging.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf.egg-info/SOURCES.txt +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf.egg-info/dependency_links.txt +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf.egg-info/entry_points.txt +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/onnx2tf.egg-info/top_level.txt +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/setup.cfg +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/setup.py +0 -0
- {onnx2tf-1.25.15 → onnx2tf-1.26.1}/tests/test_model_convert.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: onnx2tf
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.26.1
|
|
4
4
|
Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
|
|
5
5
|
Home-page: https://github.com/PINTO0309/onnx2tf
|
|
6
6
|
Author: Katsuya Hyodo
|
|
@@ -314,7 +314,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
314
314
|
docker run --rm -it \
|
|
315
315
|
-v `pwd`:/workdir \
|
|
316
316
|
-w /workdir \
|
|
317
|
-
ghcr.io/pinto0309/onnx2tf:1.
|
|
317
|
+
ghcr.io/pinto0309/onnx2tf:1.26.1
|
|
318
318
|
|
|
319
319
|
or
|
|
320
320
|
|
|
@@ -322,7 +322,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
322
322
|
docker run --rm -it \
|
|
323
323
|
-v `pwd`:/workdir \
|
|
324
324
|
-w /workdir \
|
|
325
|
-
docker.io/pinto0309/onnx2tf:1.
|
|
325
|
+
docker.io/pinto0309/onnx2tf:1.26.1
|
|
326
326
|
|
|
327
327
|
or
|
|
328
328
|
|
|
@@ -1529,7 +1529,8 @@ usage: onnx2tf
|
|
|
1529
1529
|
[-oiqt]
|
|
1530
1530
|
[-qt {per-channel,per-tensor}]
|
|
1531
1531
|
[-cind INPUT_NAME NUMPY_FILE_PATH MEAN STD]
|
|
1532
|
-
[-
|
|
1532
|
+
[-iqd {int8,uint8,float32}]
|
|
1533
|
+
[-oqd {int8,uint8,float32}]
|
|
1533
1534
|
[-nuo]
|
|
1534
1535
|
[-nuonag]
|
|
1535
1536
|
[-b BATCH_SIZE]
|
|
@@ -1686,9 +1687,13 @@ optional arguments:
|
|
|
1686
1687
|
and {input_op_name}, {numpy_file_path}, {mean}, and {std} must all be entered.
|
|
1687
1688
|
Otherwise, an error will occur during the -oiqt stage.
|
|
1688
1689
|
|
|
1689
|
-
-
|
|
1690
|
-
Input
|
|
1691
|
-
"int8"(default) or "uint8"
|
|
1690
|
+
-iqd {int8,uint8,float32}, --input_quant_dtype {int8,uint8,float32}
|
|
1691
|
+
Input dtypes when doing Full INT8 Quantization.
|
|
1692
|
+
"int8"(default) or "uint8" or "float32"
|
|
1693
|
+
|
|
1694
|
+
-oqd {int8,uint8,float32}, --output_quant_dtype {int8,uint8,float32}
|
|
1695
|
+
Output dtypes when doing Full INT8 Quantization.
|
|
1696
|
+
"int8"(default) or "uint8" or "float32"
|
|
1692
1697
|
|
|
1693
1698
|
-nuo, --not_use_onnxsim
|
|
1694
1699
|
No optimization by onnx-simplifier is performed.
|
|
@@ -2008,7 +2013,8 @@ convert(
|
|
|
2008
2013
|
output_integer_quantized_tflite: Optional[bool] = False,
|
|
2009
2014
|
quant_type: Optional[str] = 'per-channel',
|
|
2010
2015
|
custom_input_op_name_np_data_path: Optional[List] = None,
|
|
2011
|
-
|
|
2016
|
+
input_quant_dtype: Optional[str] = 'int8',
|
|
2017
|
+
output_quant_dtype: Optional[str] = 'int8',
|
|
2012
2018
|
not_use_onnxsim: Optional[bool] = False,
|
|
2013
2019
|
not_use_opname_auto_generate: Optional[bool] = False,
|
|
2014
2020
|
batch_size: Union[int, NoneType] = None,
|
|
@@ -2172,9 +2178,13 @@ convert(
|
|
|
2172
2178
|
and {input_op_name}, {numpy_file_path}, {mean}, and {std} must all be entered.
|
|
2173
2179
|
Otherwise, an error will occur during the -oiqt stage.
|
|
2174
2180
|
|
|
2175
|
-
|
|
2176
|
-
Input
|
|
2177
|
-
"int8"(default) or "uint8"
|
|
2181
|
+
input_quant_dtype: Optional[str]
|
|
2182
|
+
Input dtypes when doing Full INT8 Quantization.
|
|
2183
|
+
"int8"(default) or "uint8" or "float32"
|
|
2184
|
+
|
|
2185
|
+
output_quant_dtype: Optional[str]
|
|
2186
|
+
Output dtypes when doing Full INT8 Quantization.
|
|
2187
|
+
"int8"(default) or "uint8" or "float32"
|
|
2178
2188
|
|
|
2179
2189
|
not_use_onnxsim: Optional[bool]
|
|
2180
2190
|
No optimization by onnx-simplifier is performed.
|
|
@@ -2600,7 +2610,7 @@ Do not submit an issue that only contains an amount of information that cannot b
|
|
|
2600
2610
|
|14|Unsqueeze|1. "param_target": "inputs"<br>`pre_process_transpose_perm`: Transpose is applied to the tensor before the Unsqueeze operation with the perm specified as pre-processing.<br>2. "param_target": "outputs"<br>`post_process_transpose_perm`: Transpose is applied to the tensor after the Unsqueeze operation with the perm specified as post-processing.<br>3. "param_target": "op"<br>`new_shape`: Specifies directly the shape after Unsqueeze processing.<br>{<br> "op_name": "/backbone/backbone.1/Unsqueeze_1",<br> "param_target": "op",<br> "new_shape": [1,15,15,1]<br>}|
|
|
2601
2611
|
|15|Reshape|1. "param_target": "inputs"<br>`values`: Value of `shape`<br>`pre_process_transpose_perm`: Transpose is applied to the tensor before the Reshape operation with the perm specified as pre-processing.<br>2. "param_target": "outputs"<br>`post_process_transpose_perm`: Transpose is applied to the tensor after the Reshape operation with the perm specified as post-processing.|
|
|
2602
2612
|
|16|Resize|1. "param_target": "attributes"<br>`coordinate_transformation_mode`: Value of `coordinate_transformation_mode`<br>`extrapolation_value`: Value of `extrapolation_value`<br>`mode`: Value of `mode`<br>2. "param_target": "inputs"<br>`values`: Value of `roi` or `scales` or `sizes`. `scales`=`[scale_h,scale_w]`,`sizes`=`[h,w]`<br>`pre_process_transpose_perm`: Transpose is applied to the tensor before the Resize operation with the perm specified as pre-processing.<br>3. "param_target": "outputs"<br>`post_process_transpose_perm`: Transpose is applied to the tensor after the Resize operation with the perm specified as post-processing.|
|
|
2603
|
-
|17|Slice|`Slice` implements special replacements separately ignore all automatic conversions and generate `tf.strided_slice` directly by specifying all parameters of `tf.strided_slice` directly.<br>https://www.tensorflow.org/api_docs/python/tf/strided_slice<br>See [replace_slice.json](https://github.com/PINTO0309/onnx2tf/blob/main/replace_slice.json) for a sample description.<br><br>1. "param_target": "op"<br>`begin`: Value of `begin`<br>`end`: Value of `end`<br>`strides`: Value of `strides`<br>`begin_mask`: Value of `begin_mask`<br>`end_mask`: Value of `end_mask`<br>`ellipsis_mask`: Value of `ellipsis_mask`<br>`new_axis_mask`: Value of `new_axis_mask`<br>`shrink_axis_mask`: Value of `shrink_axis_mask`<br>{<br> "op_name": "/Slice",<br> "param_target": "op",<br> "begin": [0,0,1,0],<br> "end": [0,0,0,0],<br> "end_mask": 15<br>}|
|
|
2613
|
+
|17|Slice|`Slice` implements special replacements separately ignore all automatic conversions and generate `tf.strided_slice` directly by specifying all parameters of `tf.strided_slice` directly.<br>https://www.tensorflow.org/api_docs/python/tf/strided_slice<br>See [json_samples/replace_slice.json](https://github.com/PINTO0309/onnx2tf/blob/main/json_samples/replace_slice.json) for a sample description.<br><br>1. "param_target": "op"<br>`begin`: Value of `begin`<br>`end`: Value of `end`<br>`strides`: Value of `strides`<br>`begin_mask`: Value of `begin_mask`<br>`end_mask`: Value of `end_mask`<br>`ellipsis_mask`: Value of `ellipsis_mask`<br>`new_axis_mask`: Value of `new_axis_mask`<br>`shrink_axis_mask`: Value of `shrink_axis_mask`<br>{<br> "op_name": "/Slice",<br> "param_target": "op",<br> "begin": [0,0,1,0],<br> "end": [0,0,0,0],<br> "end_mask": 15<br>}|
|
|
2604
2614
|
|18|Softmax|1. "param_target": "attributes"<br>`axis`: Value of `axis`. The transpositions corresponding to the specified axis are extrapolated before and after `Softmax`.<br>2. "param_target": "inputs"<br>`values`: Value of `tensor`|
|
|
2605
2615
|
|19|Split|1. "param_target": "inputs"<br>`values`: Value of `split`<br>2. "param_target": "attributes"<br>`axis`: Value of `axis`.<br>`num_outputs`: Value of `num_outputs`.|
|
|
2606
2616
|
|20|Sub|1. "param_target": "inputs"<br>`values`: Value of `input`<br>`pre_process_transpose_perm`: Transpose is applied to the tensor before the Sub operation with the perm specified as pre-processing.<br>2. "param_target": "outputs"<br>`post_process_transpose_perm`: Transpose is applied to the tensor after the Sub operation with the perm specified as post-processing.|
|
|
@@ -299,7 +299,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
299
299
|
docker run --rm -it \
|
|
300
300
|
-v `pwd`:/workdir \
|
|
301
301
|
-w /workdir \
|
|
302
|
-
ghcr.io/pinto0309/onnx2tf:1.
|
|
302
|
+
ghcr.io/pinto0309/onnx2tf:1.26.1
|
|
303
303
|
|
|
304
304
|
or
|
|
305
305
|
|
|
@@ -307,7 +307,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
307
307
|
docker run --rm -it \
|
|
308
308
|
-v `pwd`:/workdir \
|
|
309
309
|
-w /workdir \
|
|
310
|
-
docker.io/pinto0309/onnx2tf:1.
|
|
310
|
+
docker.io/pinto0309/onnx2tf:1.26.1
|
|
311
311
|
|
|
312
312
|
or
|
|
313
313
|
|
|
@@ -1514,7 +1514,8 @@ usage: onnx2tf
|
|
|
1514
1514
|
[-oiqt]
|
|
1515
1515
|
[-qt {per-channel,per-tensor}]
|
|
1516
1516
|
[-cind INPUT_NAME NUMPY_FILE_PATH MEAN STD]
|
|
1517
|
-
[-
|
|
1517
|
+
[-iqd {int8,uint8,float32}]
|
|
1518
|
+
[-oqd {int8,uint8,float32}]
|
|
1518
1519
|
[-nuo]
|
|
1519
1520
|
[-nuonag]
|
|
1520
1521
|
[-b BATCH_SIZE]
|
|
@@ -1671,9 +1672,13 @@ optional arguments:
|
|
|
1671
1672
|
and {input_op_name}, {numpy_file_path}, {mean}, and {std} must all be entered.
|
|
1672
1673
|
Otherwise, an error will occur during the -oiqt stage.
|
|
1673
1674
|
|
|
1674
|
-
-
|
|
1675
|
-
Input
|
|
1676
|
-
"int8"(default) or "uint8"
|
|
1675
|
+
-iqd {int8,uint8,float32}, --input_quant_dtype {int8,uint8,float32}
|
|
1676
|
+
Input dtypes when doing Full INT8 Quantization.
|
|
1677
|
+
"int8"(default) or "uint8" or "float32"
|
|
1678
|
+
|
|
1679
|
+
-oqd {int8,uint8,float32}, --output_quant_dtype {int8,uint8,float32}
|
|
1680
|
+
Output dtypes when doing Full INT8 Quantization.
|
|
1681
|
+
"int8"(default) or "uint8" or "float32"
|
|
1677
1682
|
|
|
1678
1683
|
-nuo, --not_use_onnxsim
|
|
1679
1684
|
No optimization by onnx-simplifier is performed.
|
|
@@ -1993,7 +1998,8 @@ convert(
|
|
|
1993
1998
|
output_integer_quantized_tflite: Optional[bool] = False,
|
|
1994
1999
|
quant_type: Optional[str] = 'per-channel',
|
|
1995
2000
|
custom_input_op_name_np_data_path: Optional[List] = None,
|
|
1996
|
-
|
|
2001
|
+
input_quant_dtype: Optional[str] = 'int8',
|
|
2002
|
+
output_quant_dtype: Optional[str] = 'int8',
|
|
1997
2003
|
not_use_onnxsim: Optional[bool] = False,
|
|
1998
2004
|
not_use_opname_auto_generate: Optional[bool] = False,
|
|
1999
2005
|
batch_size: Union[int, NoneType] = None,
|
|
@@ -2157,9 +2163,13 @@ convert(
|
|
|
2157
2163
|
and {input_op_name}, {numpy_file_path}, {mean}, and {std} must all be entered.
|
|
2158
2164
|
Otherwise, an error will occur during the -oiqt stage.
|
|
2159
2165
|
|
|
2160
|
-
|
|
2161
|
-
Input
|
|
2162
|
-
"int8"(default) or "uint8"
|
|
2166
|
+
input_quant_dtype: Optional[str]
|
|
2167
|
+
Input dtypes when doing Full INT8 Quantization.
|
|
2168
|
+
"int8"(default) or "uint8" or "float32"
|
|
2169
|
+
|
|
2170
|
+
output_quant_dtype: Optional[str]
|
|
2171
|
+
Output dtypes when doing Full INT8 Quantization.
|
|
2172
|
+
"int8"(default) or "uint8" or "float32"
|
|
2163
2173
|
|
|
2164
2174
|
not_use_onnxsim: Optional[bool]
|
|
2165
2175
|
No optimization by onnx-simplifier is performed.
|
|
@@ -2585,7 +2595,7 @@ Do not submit an issue that only contains an amount of information that cannot b
|
|
|
2585
2595
|
|14|Unsqueeze|1. "param_target": "inputs"<br>`pre_process_transpose_perm`: Transpose is applied to the tensor before the Unsqueeze operation with the perm specified as pre-processing.<br>2. "param_target": "outputs"<br>`post_process_transpose_perm`: Transpose is applied to the tensor after the Unsqueeze operation with the perm specified as post-processing.<br>3. "param_target": "op"<br>`new_shape`: Specifies directly the shape after Unsqueeze processing.<br>{<br> "op_name": "/backbone/backbone.1/Unsqueeze_1",<br> "param_target": "op",<br> "new_shape": [1,15,15,1]<br>}|
|
|
2586
2596
|
|15|Reshape|1. "param_target": "inputs"<br>`values`: Value of `shape`<br>`pre_process_transpose_perm`: Transpose is applied to the tensor before the Reshape operation with the perm specified as pre-processing.<br>2. "param_target": "outputs"<br>`post_process_transpose_perm`: Transpose is applied to the tensor after the Reshape operation with the perm specified as post-processing.|
|
|
2587
2597
|
|16|Resize|1. "param_target": "attributes"<br>`coordinate_transformation_mode`: Value of `coordinate_transformation_mode`<br>`extrapolation_value`: Value of `extrapolation_value`<br>`mode`: Value of `mode`<br>2. "param_target": "inputs"<br>`values`: Value of `roi` or `scales` or `sizes`. `scales`=`[scale_h,scale_w]`,`sizes`=`[h,w]`<br>`pre_process_transpose_perm`: Transpose is applied to the tensor before the Resize operation with the perm specified as pre-processing.<br>3. "param_target": "outputs"<br>`post_process_transpose_perm`: Transpose is applied to the tensor after the Resize operation with the perm specified as post-processing.|
|
|
2588
|
-
|17|Slice|`Slice` implements special replacements separately ignore all automatic conversions and generate `tf.strided_slice` directly by specifying all parameters of `tf.strided_slice` directly.<br>https://www.tensorflow.org/api_docs/python/tf/strided_slice<br>See [replace_slice.json](https://github.com/PINTO0309/onnx2tf/blob/main/replace_slice.json) for a sample description.<br><br>1. "param_target": "op"<br>`begin`: Value of `begin`<br>`end`: Value of `end`<br>`strides`: Value of `strides`<br>`begin_mask`: Value of `begin_mask`<br>`end_mask`: Value of `end_mask`<br>`ellipsis_mask`: Value of `ellipsis_mask`<br>`new_axis_mask`: Value of `new_axis_mask`<br>`shrink_axis_mask`: Value of `shrink_axis_mask`<br>{<br> "op_name": "/Slice",<br> "param_target": "op",<br> "begin": [0,0,1,0],<br> "end": [0,0,0,0],<br> "end_mask": 15<br>}|
|
|
2598
|
+
|17|Slice|`Slice` implements special replacements separately ignore all automatic conversions and generate `tf.strided_slice` directly by specifying all parameters of `tf.strided_slice` directly.<br>https://www.tensorflow.org/api_docs/python/tf/strided_slice<br>See [json_samples/replace_slice.json](https://github.com/PINTO0309/onnx2tf/blob/main/json_samples/replace_slice.json) for a sample description.<br><br>1. "param_target": "op"<br>`begin`: Value of `begin`<br>`end`: Value of `end`<br>`strides`: Value of `strides`<br>`begin_mask`: Value of `begin_mask`<br>`end_mask`: Value of `end_mask`<br>`ellipsis_mask`: Value of `ellipsis_mask`<br>`new_axis_mask`: Value of `new_axis_mask`<br>`shrink_axis_mask`: Value of `shrink_axis_mask`<br>{<br> "op_name": "/Slice",<br> "param_target": "op",<br> "begin": [0,0,1,0],<br> "end": [0,0,0,0],<br> "end_mask": 15<br>}|
|
|
2589
2599
|
|18|Softmax|1. "param_target": "attributes"<br>`axis`: Value of `axis`. The transpositions corresponding to the specified axis are extrapolated before and after `Softmax`.<br>2. "param_target": "inputs"<br>`values`: Value of `tensor`|
|
|
2590
2600
|
|19|Split|1. "param_target": "inputs"<br>`values`: Value of `split`<br>2. "param_target": "attributes"<br>`axis`: Value of `axis`.<br>`num_outputs`: Value of `num_outputs`.|
|
|
2591
2601
|
|20|Sub|1. "param_target": "inputs"<br>`values`: Value of `input`<br>`pre_process_transpose_perm`: Transpose is applied to the tensor before the Sub operation with the perm specified as pre-processing.<br>2. "param_target": "outputs"<br>`post_process_transpose_perm`: Transpose is applied to the tensor after the Sub operation with the perm specified as post-processing.|
|
|
@@ -72,7 +72,8 @@ def convert(
|
|
|
72
72
|
output_integer_quantized_tflite: Optional[bool] = False,
|
|
73
73
|
quant_type: Optional[str] = 'per-channel',
|
|
74
74
|
custom_input_op_name_np_data_path: Optional[List] = None,
|
|
75
|
-
|
|
75
|
+
input_quant_dtype: Optional[str] = 'int8',
|
|
76
|
+
output_quant_dtype: Optional[str] = 'int8',
|
|
76
77
|
not_use_onnxsim: Optional[bool] = False,
|
|
77
78
|
not_use_opname_auto_generate: Optional[bool] = False,
|
|
78
79
|
batch_size: Optional[int] = None,
|
|
@@ -221,9 +222,13 @@ def convert(
|
|
|
221
222
|
["input2","input2.npy",[0.3],[0.07]],\n
|
|
222
223
|
]
|
|
223
224
|
|
|
224
|
-
|
|
225
|
-
Input
|
|
226
|
-
"int8"(default) or "uint8"
|
|
225
|
+
input_quant_dtype: Optional[str]
|
|
226
|
+
Input dtypes when doing Full INT8 Quantization.\n
|
|
227
|
+
"int8"(default) or "uint8" or "float32"
|
|
228
|
+
|
|
229
|
+
output_quant_dtype: Optional[str]
|
|
230
|
+
Output dtypes when doing Full INT8 Quantization.\n
|
|
231
|
+
"int8"(default) or "uint8" or "float32"
|
|
227
232
|
|
|
228
233
|
not_use_onnxsim: Optional[bool]
|
|
229
234
|
No optimization by onnx-simplifier is performed.\n
|
|
@@ -1693,15 +1698,27 @@ def convert(
|
|
|
1693
1698
|
converter._experimental_disable_per_channel = disable_per_channel
|
|
1694
1699
|
converter.unfold_batchmatmul = enable_batchmatmul_unfold
|
|
1695
1700
|
converter.representative_dataset = representative_dataset_gen
|
|
1696
|
-
|
|
1697
|
-
|
|
1698
|
-
|
|
1699
|
-
|
|
1700
|
-
|
|
1701
|
+
inf_type_input = None
|
|
1702
|
+
inf_type_output = None
|
|
1703
|
+
if input_quant_dtype == 'int8':
|
|
1704
|
+
inf_type_input = tf.int8
|
|
1705
|
+
elif input_quant_dtype == 'uint8':
|
|
1706
|
+
inf_type_input = tf.uint8
|
|
1707
|
+
elif input_quant_dtype == 'float32':
|
|
1708
|
+
inf_type_input = tf.float32
|
|
1709
|
+
else:
|
|
1710
|
+
inf_type_input = tf.int8
|
|
1711
|
+
|
|
1712
|
+
if output_quant_dtype == 'int8':
|
|
1713
|
+
inf_type_output = tf.int8
|
|
1714
|
+
elif output_quant_dtype == 'uint8':
|
|
1715
|
+
inf_type_output = tf.uint8
|
|
1716
|
+
elif output_quant_dtype == 'float32':
|
|
1717
|
+
inf_type_output = tf.float32
|
|
1701
1718
|
else:
|
|
1702
|
-
|
|
1703
|
-
converter.inference_input_type =
|
|
1704
|
-
converter.inference_output_type =
|
|
1719
|
+
inf_type_output = tf.int8
|
|
1720
|
+
converter.inference_input_type = inf_type_input
|
|
1721
|
+
converter.inference_output_type = inf_type_output
|
|
1705
1722
|
tflite_model = converter.convert()
|
|
1706
1723
|
with open(f'{output_folder_path}/{output_file_name}_full_integer_quant.tflite', 'wb') as w:
|
|
1707
1724
|
w.write(tflite_model)
|
|
@@ -2128,14 +2145,24 @@ def main():
|
|
|
2128
2145
|
'Otherwise, an error will occur during the -oiqt stage.'
|
|
2129
2146
|
)
|
|
2130
2147
|
parser.add_argument(
|
|
2131
|
-
'-
|
|
2132
|
-
'--
|
|
2148
|
+
'-iqd',
|
|
2149
|
+
'--input_quant_dtype',
|
|
2150
|
+
type=str,
|
|
2151
|
+
choices=['int8', 'uint8', 'float32'],
|
|
2152
|
+
default='int8',
|
|
2153
|
+
help=\
|
|
2154
|
+
'Input dtypes when doing Full INT8 Quantization. \n' +
|
|
2155
|
+
'"int8"(default) or "uint8" or "float32"'
|
|
2156
|
+
)
|
|
2157
|
+
parser.add_argument(
|
|
2158
|
+
'-oqd',
|
|
2159
|
+
'--output_quant_dtype',
|
|
2133
2160
|
type=str,
|
|
2134
|
-
choices=['int8', 'uint8'],
|
|
2161
|
+
choices=['int8', 'uint8', 'float32'],
|
|
2135
2162
|
default='int8',
|
|
2136
2163
|
help=\
|
|
2137
|
-
'
|
|
2138
|
-
'"int8"(default) or "uint8"'
|
|
2164
|
+
'Output dtypes when doing Full INT8 Quantization. \n' +
|
|
2165
|
+
'"int8"(default) or "uint8" or "float32"'
|
|
2139
2166
|
)
|
|
2140
2167
|
parser.add_argument(
|
|
2141
2168
|
'-nuo',
|
|
@@ -2584,7 +2611,8 @@ def main():
|
|
|
2584
2611
|
output_integer_quantized_tflite=args.output_integer_quantized_tflite,
|
|
2585
2612
|
quant_type=args.quant_type,
|
|
2586
2613
|
custom_input_op_name_np_data_path=custom_params,
|
|
2587
|
-
|
|
2614
|
+
input_quant_dtype=args.input_quant_dtype,
|
|
2615
|
+
output_quant_dtype=args.output_quant_dtype,
|
|
2588
2616
|
not_use_onnxsim=args.not_use_onnxsim,
|
|
2589
2617
|
not_use_opname_auto_generate=args.not_use_opname_auto_generate,
|
|
2590
2618
|
batch_size=args.batch_size,
|
|
@@ -14,6 +14,7 @@ from tensorflow.python.keras.layers import (
|
|
|
14
14
|
)
|
|
15
15
|
import onnx_graphsurgeon as gs
|
|
16
16
|
from onnx2tf.utils.common_functions import (
|
|
17
|
+
get_replacement_parameter,
|
|
17
18
|
get_constant_or_variable,
|
|
18
19
|
get_weights_constant_or_variable,
|
|
19
20
|
get_padding_as_op,
|
|
@@ -24,6 +25,7 @@ from onnx2tf.utils.common_functions import (
|
|
|
24
25
|
transpose_with_flexing_deterrence,
|
|
25
26
|
get_tf_model_inputs,
|
|
26
27
|
onnx_tf_tensor_validation,
|
|
28
|
+
post_process_transpose,
|
|
27
29
|
)
|
|
28
30
|
from typing import Any, Dict
|
|
29
31
|
from onnx2tf.utils.logging import *
|
|
@@ -33,6 +35,7 @@ INF_INDEX_VALUE: int = 4294967296
|
|
|
33
35
|
|
|
34
36
|
@print_node_info
|
|
35
37
|
@inverted_operation_enable_disable
|
|
38
|
+
@get_replacement_parameter
|
|
36
39
|
def make_node(
|
|
37
40
|
*,
|
|
38
41
|
graph_node: gs.Node,
|
|
@@ -932,6 +935,15 @@ def make_node(
|
|
|
932
935
|
dilations,
|
|
933
936
|
)
|
|
934
937
|
|
|
938
|
+
# Post-process transpose
|
|
939
|
+
tf_layers_dict[graph_node_output.name]['tf_node'] = \
|
|
940
|
+
post_process_transpose(
|
|
941
|
+
value_before_transpose=tf_layers_dict[graph_node_output.name]['tf_node'],
|
|
942
|
+
param_target='outputs',
|
|
943
|
+
param_name=graph_node.outputs[0].name,
|
|
944
|
+
**kwargs,
|
|
945
|
+
)
|
|
946
|
+
|
|
935
947
|
# Generation of Debug Info
|
|
936
948
|
tf_layers_dict[graph_node_output.name]['tf_node_info'] = \
|
|
937
949
|
make_tf_node_info(
|
|
@@ -5857,11 +5857,18 @@ def correction_process_for_accuracy_errors(
|
|
|
5857
5857
|
onnx_output_same_shape_counts = collections.Counter(onnx_output_shape)
|
|
5858
5858
|
if sum([1 if dim > 1 and cnt > 1 else 0 for dim, cnt in onnx_output_same_shape_counts.items()]) >= 1:
|
|
5859
5859
|
# Generate dummy op
|
|
5860
|
-
dummy_op =
|
|
5861
|
-
|
|
5862
|
-
|
|
5863
|
-
|
|
5864
|
-
|
|
5860
|
+
dummy_op = None
|
|
5861
|
+
tensor_2_candidate_for_transpositions = list(itertools.permutations(range(len(input_tensor_2.shape))))
|
|
5862
|
+
for tensor_2_candidate_for_transposition in tensor_2_candidate_for_transpositions:
|
|
5863
|
+
try:
|
|
5864
|
+
dummy_op = tf_func(
|
|
5865
|
+
input_tensor_1,
|
|
5866
|
+
tf.transpose(a=input_tensor_2, perm=tensor_2_candidate_for_transposition),
|
|
5867
|
+
)
|
|
5868
|
+
break
|
|
5869
|
+
except Exception as ex:
|
|
5870
|
+
pass
|
|
5871
|
+
if dummy_op is not None and dummy_op.shape != tf.TensorShape(None):
|
|
5865
5872
|
tf_output_shape = [dim if dim is not None else -1 for dim in dummy_op.shape]
|
|
5866
5873
|
number_of_dim_other_than_1 = sum([1 if i != 1 else 0 for i in onnx_output_shape])
|
|
5867
5874
|
# Processing continues only if there are two or more dimensions other than 1
|
|
@@ -5889,7 +5896,7 @@ def correction_process_for_accuracy_errors(
|
|
|
5889
5896
|
tensor_1_candidate_for_transpositions = \
|
|
5890
5897
|
obtaining_an_inverted_pattern_for_brute_force_validation(tensor_shape=validation_data_1.shape)
|
|
5891
5898
|
tensor_2_candidate_for_transpositions = \
|
|
5892
|
-
|
|
5899
|
+
list(itertools.permutations(range(len(validation_data_2.shape))))
|
|
5893
5900
|
for tensor_1_candidate_for_transposition in tensor_1_candidate_for_transpositions:
|
|
5894
5901
|
for tensor_2_candidate_for_transposition in tensor_2_candidate_for_transpositions:
|
|
5895
5902
|
try:
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: onnx2tf
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.26.1
|
|
4
4
|
Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
|
|
5
5
|
Home-page: https://github.com/PINTO0309/onnx2tf
|
|
6
6
|
Author: Katsuya Hyodo
|
|
@@ -314,7 +314,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
314
314
|
docker run --rm -it \
|
|
315
315
|
-v `pwd`:/workdir \
|
|
316
316
|
-w /workdir \
|
|
317
|
-
ghcr.io/pinto0309/onnx2tf:1.
|
|
317
|
+
ghcr.io/pinto0309/onnx2tf:1.26.1
|
|
318
318
|
|
|
319
319
|
or
|
|
320
320
|
|
|
@@ -322,7 +322,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
322
322
|
docker run --rm -it \
|
|
323
323
|
-v `pwd`:/workdir \
|
|
324
324
|
-w /workdir \
|
|
325
|
-
docker.io/pinto0309/onnx2tf:1.
|
|
325
|
+
docker.io/pinto0309/onnx2tf:1.26.1
|
|
326
326
|
|
|
327
327
|
or
|
|
328
328
|
|
|
@@ -1529,7 +1529,8 @@ usage: onnx2tf
|
|
|
1529
1529
|
[-oiqt]
|
|
1530
1530
|
[-qt {per-channel,per-tensor}]
|
|
1531
1531
|
[-cind INPUT_NAME NUMPY_FILE_PATH MEAN STD]
|
|
1532
|
-
[-
|
|
1532
|
+
[-iqd {int8,uint8,float32}]
|
|
1533
|
+
[-oqd {int8,uint8,float32}]
|
|
1533
1534
|
[-nuo]
|
|
1534
1535
|
[-nuonag]
|
|
1535
1536
|
[-b BATCH_SIZE]
|
|
@@ -1686,9 +1687,13 @@ optional arguments:
|
|
|
1686
1687
|
and {input_op_name}, {numpy_file_path}, {mean}, and {std} must all be entered.
|
|
1687
1688
|
Otherwise, an error will occur during the -oiqt stage.
|
|
1688
1689
|
|
|
1689
|
-
-
|
|
1690
|
-
Input
|
|
1691
|
-
"int8"(default) or "uint8"
|
|
1690
|
+
-iqd {int8,uint8,float32}, --input_quant_dtype {int8,uint8,float32}
|
|
1691
|
+
Input dtypes when doing Full INT8 Quantization.
|
|
1692
|
+
"int8"(default) or "uint8" or "float32"
|
|
1693
|
+
|
|
1694
|
+
-oqd {int8,uint8,float32}, --output_quant_dtype {int8,uint8,float32}
|
|
1695
|
+
Output dtypes when doing Full INT8 Quantization.
|
|
1696
|
+
"int8"(default) or "uint8" or "float32"
|
|
1692
1697
|
|
|
1693
1698
|
-nuo, --not_use_onnxsim
|
|
1694
1699
|
No optimization by onnx-simplifier is performed.
|
|
@@ -2008,7 +2013,8 @@ convert(
|
|
|
2008
2013
|
output_integer_quantized_tflite: Optional[bool] = False,
|
|
2009
2014
|
quant_type: Optional[str] = 'per-channel',
|
|
2010
2015
|
custom_input_op_name_np_data_path: Optional[List] = None,
|
|
2011
|
-
|
|
2016
|
+
input_quant_dtype: Optional[str] = 'int8',
|
|
2017
|
+
output_quant_dtype: Optional[str] = 'int8',
|
|
2012
2018
|
not_use_onnxsim: Optional[bool] = False,
|
|
2013
2019
|
not_use_opname_auto_generate: Optional[bool] = False,
|
|
2014
2020
|
batch_size: Union[int, NoneType] = None,
|
|
@@ -2172,9 +2178,13 @@ convert(
|
|
|
2172
2178
|
and {input_op_name}, {numpy_file_path}, {mean}, and {std} must all be entered.
|
|
2173
2179
|
Otherwise, an error will occur during the -oiqt stage.
|
|
2174
2180
|
|
|
2175
|
-
|
|
2176
|
-
Input
|
|
2177
|
-
"int8"(default) or "uint8"
|
|
2181
|
+
input_quant_dtype: Optional[str]
|
|
2182
|
+
Input dtypes when doing Full INT8 Quantization.
|
|
2183
|
+
"int8"(default) or "uint8" or "float32"
|
|
2184
|
+
|
|
2185
|
+
output_quant_dtype: Optional[str]
|
|
2186
|
+
Output dtypes when doing Full INT8 Quantization.
|
|
2187
|
+
"int8"(default) or "uint8" or "float32"
|
|
2178
2188
|
|
|
2179
2189
|
not_use_onnxsim: Optional[bool]
|
|
2180
2190
|
No optimization by onnx-simplifier is performed.
|
|
@@ -2600,7 +2610,7 @@ Do not submit an issue that only contains an amount of information that cannot b
|
|
|
2600
2610
|
|14|Unsqueeze|1. "param_target": "inputs"<br>`pre_process_transpose_perm`: Transpose is applied to the tensor before the Unsqueeze operation with the perm specified as pre-processing.<br>2. "param_target": "outputs"<br>`post_process_transpose_perm`: Transpose is applied to the tensor after the Unsqueeze operation with the perm specified as post-processing.<br>3. "param_target": "op"<br>`new_shape`: Specifies directly the shape after Unsqueeze processing.<br>{<br> "op_name": "/backbone/backbone.1/Unsqueeze_1",<br> "param_target": "op",<br> "new_shape": [1,15,15,1]<br>}|
|
|
2601
2611
|
|15|Reshape|1. "param_target": "inputs"<br>`values`: Value of `shape`<br>`pre_process_transpose_perm`: Transpose is applied to the tensor before the Reshape operation with the perm specified as pre-processing.<br>2. "param_target": "outputs"<br>`post_process_transpose_perm`: Transpose is applied to the tensor after the Reshape operation with the perm specified as post-processing.|
|
|
2602
2612
|
|16|Resize|1. "param_target": "attributes"<br>`coordinate_transformation_mode`: Value of `coordinate_transformation_mode`<br>`extrapolation_value`: Value of `extrapolation_value`<br>`mode`: Value of `mode`<br>2. "param_target": "inputs"<br>`values`: Value of `roi` or `scales` or `sizes`. `scales`=`[scale_h,scale_w]`,`sizes`=`[h,w]`<br>`pre_process_transpose_perm`: Transpose is applied to the tensor before the Resize operation with the perm specified as pre-processing.<br>3. "param_target": "outputs"<br>`post_process_transpose_perm`: Transpose is applied to the tensor after the Resize operation with the perm specified as post-processing.|
|
|
2603
|
-
|17|Slice|`Slice` implements special replacements separately ignore all automatic conversions and generate `tf.strided_slice` directly by specifying all parameters of `tf.strided_slice` directly.<br>https://www.tensorflow.org/api_docs/python/tf/strided_slice<br>See [replace_slice.json](https://github.com/PINTO0309/onnx2tf/blob/main/replace_slice.json) for a sample description.<br><br>1. "param_target": "op"<br>`begin`: Value of `begin`<br>`end`: Value of `end`<br>`strides`: Value of `strides`<br>`begin_mask`: Value of `begin_mask`<br>`end_mask`: Value of `end_mask`<br>`ellipsis_mask`: Value of `ellipsis_mask`<br>`new_axis_mask`: Value of `new_axis_mask`<br>`shrink_axis_mask`: Value of `shrink_axis_mask`<br>{<br> "op_name": "/Slice",<br> "param_target": "op",<br> "begin": [0,0,1,0],<br> "end": [0,0,0,0],<br> "end_mask": 15<br>}|
|
|
2613
|
+
|17|Slice|`Slice` implements special replacements separately ignore all automatic conversions and generate `tf.strided_slice` directly by specifying all parameters of `tf.strided_slice` directly.<br>https://www.tensorflow.org/api_docs/python/tf/strided_slice<br>See [json_samples/replace_slice.json](https://github.com/PINTO0309/onnx2tf/blob/main/json_samples/replace_slice.json) for a sample description.<br><br>1. "param_target": "op"<br>`begin`: Value of `begin`<br>`end`: Value of `end`<br>`strides`: Value of `strides`<br>`begin_mask`: Value of `begin_mask`<br>`end_mask`: Value of `end_mask`<br>`ellipsis_mask`: Value of `ellipsis_mask`<br>`new_axis_mask`: Value of `new_axis_mask`<br>`shrink_axis_mask`: Value of `shrink_axis_mask`<br>{<br> "op_name": "/Slice",<br> "param_target": "op",<br> "begin": [0,0,1,0],<br> "end": [0,0,0,0],<br> "end_mask": 15<br>}|
|
|
2604
2614
|
|18|Softmax|1. "param_target": "attributes"<br>`axis`: Value of `axis`. The transpositions corresponding to the specified axis are extrapolated before and after `Softmax`.<br>2. "param_target": "inputs"<br>`values`: Value of `tensor`|
|
|
2605
2615
|
|19|Split|1. "param_target": "inputs"<br>`values`: Value of `split`<br>2. "param_target": "attributes"<br>`axis`: Value of `axis`.<br>`num_outputs`: Value of `num_outputs`.|
|
|
2606
2616
|
|20|Sub|1. "param_target": "inputs"<br>`values`: Value of `input`<br>`pre_process_transpose_perm`: Transpose is applied to the tensor before the Sub operation with the perm specified as pre-processing.<br>2. "param_target": "outputs"<br>`post_process_transpose_perm`: Transpose is applied to the tensor after the Sub operation with the perm specified as post-processing.|
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|