onnx2tf 1.28.2__tar.gz → 1.28.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/PKG-INFO +20 -4
- onnx2tf-1.28.2/onnx2tf.egg-info/PKG-INFO → onnx2tf-1.28.3/README.md +19 -28
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/__init__.py +1 -1
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/onnx2tf.py +78 -59
- onnx2tf-1.28.2/README.md → onnx2tf-1.28.3/onnx2tf.egg-info/PKG-INFO +44 -3
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/LICENSE +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/LICENSE_onnx-tensorflow +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/__main__.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Abs.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Acos.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Acosh.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Add.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/And.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/ArgMax.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/ArgMin.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Asin.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Asinh.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Atan.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Atanh.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/AveragePool.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/BatchNormalization.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Bernoulli.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/BitShift.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Cast.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Ceil.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Celu.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Clip.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Col2Im.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Compress.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Concat.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/ConcatFromSequence.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Constant.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/ConstantOfShape.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Conv.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/ConvInteger.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/ConvTranspose.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Cos.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Cosh.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/CumSum.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/DepthToSpace.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/DequantizeLinear.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Det.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Div.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Dropout.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/DynamicQuantizeLinear.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Einsum.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Elu.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Equal.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Erf.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Exp.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Expand.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/EyeLike.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Flatten.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Floor.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/FusedConv.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/GRU.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Gather.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/GatherElements.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/GatherND.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Gelu.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Gemm.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/GlobalAveragePool.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/GlobalLpPool.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/GlobalMaxPool.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Greater.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/GreaterOrEqual.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/GridSample.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/GroupNorm.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/HammingWindow.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/HannWindow.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/HardSigmoid.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/HardSwish.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Hardmax.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Identity.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/If.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Input.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/InstanceNormalization.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Inverse.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/IsInf.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/IsNaN.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/LRN.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/LSTM.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/LayerNormalization.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/LeakyRelu.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Less.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/LessOrEqual.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Log.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/LogSoftmax.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/LpNormalization.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/MatMul.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/MatMulInteger.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Max.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/MaxPool.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/MaxUnpool.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Mean.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/MeanVarianceNormalization.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/MelWeightMatrix.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Min.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Mish.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Mod.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Mul.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Multinomial.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Neg.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/NonMaxSuppression.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/NonZero.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Not.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/OneHot.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/OptionalGetElement.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/OptionalHasElement.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Or.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/PRelu.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Pad.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Pow.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/QLinearAdd.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/QLinearConcat.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/QLinearConv.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/QLinearLeakyRelu.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/QLinearMatMul.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/QLinearMul.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/QLinearSigmoid.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/QLinearSoftmax.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/QuantizeLinear.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/RNN.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/RandomNormal.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/RandomNormalLike.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/RandomUniform.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/RandomUniformLike.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Range.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Reciprocal.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/ReduceL1.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/ReduceL2.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/ReduceLogSum.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/ReduceLogSumExp.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/ReduceMax.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/ReduceMean.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/ReduceMin.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/ReduceProd.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/ReduceSum.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/ReduceSumSquare.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Relu.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Reshape.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Resize.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/ReverseSequence.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/RoiAlign.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Round.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/STFT.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/ScaleAndTranslate.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Scatter.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/ScatterElements.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/ScatterND.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Selu.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/SequenceAt.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/SequenceConstruct.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/SequenceEmpty.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/SequenceErase.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/SequenceInsert.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/SequenceLength.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Shape.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Shrink.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Sigmoid.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Sign.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Sin.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Sinh.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Size.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Slice.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Softmax.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Softplus.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Softsign.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/SpaceToDepth.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Split.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/SplitToSequence.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Sqrt.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Squeeze.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/StringNormalizer.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Sub.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Sum.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Tan.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Tanh.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/ThresholdedRelu.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Tile.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/TopK.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Transpose.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Trilu.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Unique.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Unsqueeze.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Upsample.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Where.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/Xor.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/_Loop.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/__Loop.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/ops/__init__.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/utils/__init__.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/utils/common_functions.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/utils/enums.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/utils/iterative_json_optimizer.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/utils/json_auto_generator.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf/utils/logging.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf.egg-info/SOURCES.txt +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf.egg-info/dependency_links.txt +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf.egg-info/entry_points.txt +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/onnx2tf.egg-info/top_level.txt +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/setup.cfg +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/setup.py +0 -0
- {onnx2tf-1.28.2 → onnx2tf-1.28.3}/tests/test_model_convert.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: onnx2tf
|
|
3
|
-
Version: 1.28.
|
|
3
|
+
Version: 1.28.3
|
|
4
4
|
Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
|
|
5
5
|
Home-page: https://github.com/PINTO0309/onnx2tf
|
|
6
6
|
Author: Katsuya Hyodo
|
|
@@ -334,7 +334,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
334
334
|
docker run --rm -it \
|
|
335
335
|
-v `pwd`:/workdir \
|
|
336
336
|
-w /workdir \
|
|
337
|
-
ghcr.io/pinto0309/onnx2tf:1.28.
|
|
337
|
+
ghcr.io/pinto0309/onnx2tf:1.28.3
|
|
338
338
|
|
|
339
339
|
or
|
|
340
340
|
|
|
@@ -342,7 +342,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
342
342
|
docker run --rm -it \
|
|
343
343
|
-v `pwd`:/workdir \
|
|
344
344
|
-w /workdir \
|
|
345
|
-
docker.io/pinto0309/onnx2tf:1.28.
|
|
345
|
+
docker.io/pinto0309/onnx2tf:1.28.3
|
|
346
346
|
|
|
347
347
|
or
|
|
348
348
|
|
|
@@ -501,7 +501,7 @@ onnx2tf -i resnet18-v1-7.onnx -otfv1pb
|
|
|
501
501
|
# Automatic JSON generation only
|
|
502
502
|
# Generates an optimal parameter replacement JSON file for model conversion.
|
|
503
503
|
# The JSON file is saved to {model_name}_auto.json when conversion errors occur
|
|
504
|
-
# or accuracy issues are detected.
|
|
504
|
+
# or accuracy issues are detected and the feature is explicitly enabled.
|
|
505
505
|
onnx2tf -i model.onnx -agj
|
|
506
506
|
|
|
507
507
|
# Accuracy validation only (no JSON generation)
|
|
@@ -514,6 +514,11 @@ onnx2tf -i model.onnx -cotof
|
|
|
514
514
|
# to validate the model accuracy. This ensures the best possible conversion accuracy.
|
|
515
515
|
onnx2tf -i model.onnx -agj -cotof
|
|
516
516
|
|
|
517
|
+
# Accuracy validation with opt-in JSON generation on error
|
|
518
|
+
# Generates a parameter replacement JSON only when accuracy errors greater than 1e-2
|
|
519
|
+
# are detected during validation.
|
|
520
|
+
onnx2tf -i model.onnx -cotof -agje
|
|
521
|
+
|
|
517
522
|
# INT8 Quantization, Full INT8 Quantization
|
|
518
523
|
# INT8 Quantization with INT16 activation, Full INT8 Quantization with INT16 activation
|
|
519
524
|
# Dynamic Range Quantization
|
|
@@ -2051,6 +2056,11 @@ optional arguments:
|
|
|
2051
2056
|
WARNING: This option performs an exhaustive search to find the optimal conversion patterns,
|
|
2052
2057
|
which can take a very long time depending on the model complexity.
|
|
2053
2058
|
|
|
2059
|
+
-agje, --auto_generate_json_on_error
|
|
2060
|
+
Attempts to generate a parameter replacement JSON when accuracy validation finds errors
|
|
2061
|
+
greater than 1e-2. Useful for quickly capturing fixes during -cotof runs.
|
|
2062
|
+
Disabled by default to avoid unexpected file generation.
|
|
2063
|
+
|
|
2054
2064
|
-dms, --disable_model_save
|
|
2055
2065
|
Does not save the converted model. For CIs RAM savings.
|
|
2056
2066
|
|
|
@@ -2121,6 +2131,7 @@ convert(
|
|
|
2121
2131
|
mvn_epsilon: Union[float, NoneType] = 0.0000000001,
|
|
2122
2132
|
param_replacement_file: Optional[str] = '',
|
|
2123
2133
|
auto_generate_json: Optional[bool] = False,
|
|
2134
|
+
auto_generate_json_on_error: Optional[bool] = False,
|
|
2124
2135
|
check_gpu_delegate_compatibility: Optional[bool] = False,
|
|
2125
2136
|
check_onnx_tf_outputs_elementwise_close: Optional[bool] = False,
|
|
2126
2137
|
check_onnx_tf_outputs_elementwise_close_full: Optional[bool] = False,
|
|
@@ -2474,6 +2485,11 @@ convert(
|
|
|
2474
2485
|
the generated JSON is used to re-evaluate accuracy.
|
|
2475
2486
|
Default: False
|
|
2476
2487
|
|
|
2488
|
+
auto_generate_json_on_error: Optional[bool]
|
|
2489
|
+
When accuracy validation detects errors greater than 1e-2, attempts to generate
|
|
2490
|
+
a parameter replacement JSON as a best-effort fix.
|
|
2491
|
+
Default: False
|
|
2492
|
+
|
|
2477
2493
|
check_gpu_delegate_compatibility: Optional[bool]
|
|
2478
2494
|
Run TFLite ModelAnalyzer on the generated Float16 tflite model
|
|
2479
2495
|
to check if the model can be supported by GPU Delegate.
|
|
@@ -1,28 +1,3 @@
|
|
|
1
|
-
Metadata-Version: 2.4
|
|
2
|
-
Name: onnx2tf
|
|
3
|
-
Version: 1.28.2
|
|
4
|
-
Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
|
|
5
|
-
Home-page: https://github.com/PINTO0309/onnx2tf
|
|
6
|
-
Author: Katsuya Hyodo
|
|
7
|
-
Author-email: rmsdh122@yahoo.co.jp
|
|
8
|
-
License: MIT License
|
|
9
|
-
Platform: linux
|
|
10
|
-
Platform: unix
|
|
11
|
-
Requires-Python: >=3.10
|
|
12
|
-
Description-Content-Type: text/markdown
|
|
13
|
-
License-File: LICENSE
|
|
14
|
-
License-File: LICENSE_onnx-tensorflow
|
|
15
|
-
Dynamic: author
|
|
16
|
-
Dynamic: author-email
|
|
17
|
-
Dynamic: description
|
|
18
|
-
Dynamic: description-content-type
|
|
19
|
-
Dynamic: home-page
|
|
20
|
-
Dynamic: license
|
|
21
|
-
Dynamic: license-file
|
|
22
|
-
Dynamic: platform
|
|
23
|
-
Dynamic: requires-python
|
|
24
|
-
Dynamic: summary
|
|
25
|
-
|
|
26
1
|
# onnx2tf
|
|
27
2
|
Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in [onnx-tensorflow](https://github.com/onnx/onnx-tensorflow) ([onnx-tf](https://pypi.org/project/onnx-tf/)). I don't need a Star, but give me a pull request. Since I am adding challenging model optimizations and fixing bugs almost daily, I frequently embed potential bugs that would otherwise break through CI's regression testing. Therefore, if you encounter new problems, I recommend that you try a package that is a few versions older, or try the latest package that will be released in a few days.
|
|
28
3
|
|
|
@@ -334,7 +309,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
334
309
|
docker run --rm -it \
|
|
335
310
|
-v `pwd`:/workdir \
|
|
336
311
|
-w /workdir \
|
|
337
|
-
ghcr.io/pinto0309/onnx2tf:1.28.
|
|
312
|
+
ghcr.io/pinto0309/onnx2tf:1.28.3
|
|
338
313
|
|
|
339
314
|
or
|
|
340
315
|
|
|
@@ -342,7 +317,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
342
317
|
docker run --rm -it \
|
|
343
318
|
-v `pwd`:/workdir \
|
|
344
319
|
-w /workdir \
|
|
345
|
-
docker.io/pinto0309/onnx2tf:1.28.
|
|
320
|
+
docker.io/pinto0309/onnx2tf:1.28.3
|
|
346
321
|
|
|
347
322
|
or
|
|
348
323
|
|
|
@@ -501,7 +476,7 @@ onnx2tf -i resnet18-v1-7.onnx -otfv1pb
|
|
|
501
476
|
# Automatic JSON generation only
|
|
502
477
|
# Generates an optimal parameter replacement JSON file for model conversion.
|
|
503
478
|
# The JSON file is saved to {model_name}_auto.json when conversion errors occur
|
|
504
|
-
# or accuracy issues are detected.
|
|
479
|
+
# or accuracy issues are detected and the feature is explicitly enabled.
|
|
505
480
|
onnx2tf -i model.onnx -agj
|
|
506
481
|
|
|
507
482
|
# Accuracy validation only (no JSON generation)
|
|
@@ -514,6 +489,11 @@ onnx2tf -i model.onnx -cotof
|
|
|
514
489
|
# to validate the model accuracy. This ensures the best possible conversion accuracy.
|
|
515
490
|
onnx2tf -i model.onnx -agj -cotof
|
|
516
491
|
|
|
492
|
+
# Accuracy validation with opt-in JSON generation on error
|
|
493
|
+
# Generates a parameter replacement JSON only when accuracy errors greater than 1e-2
|
|
494
|
+
# are detected during validation.
|
|
495
|
+
onnx2tf -i model.onnx -cotof -agje
|
|
496
|
+
|
|
517
497
|
# INT8 Quantization, Full INT8 Quantization
|
|
518
498
|
# INT8 Quantization with INT16 activation, Full INT8 Quantization with INT16 activation
|
|
519
499
|
# Dynamic Range Quantization
|
|
@@ -2051,6 +2031,11 @@ optional arguments:
|
|
|
2051
2031
|
WARNING: This option performs an exhaustive search to find the optimal conversion patterns,
|
|
2052
2032
|
which can take a very long time depending on the model complexity.
|
|
2053
2033
|
|
|
2034
|
+
-agje, --auto_generate_json_on_error
|
|
2035
|
+
Attempts to generate a parameter replacement JSON when accuracy validation finds errors
|
|
2036
|
+
greater than 1e-2. Useful for quickly capturing fixes during -cotof runs.
|
|
2037
|
+
Disabled by default to avoid unexpected file generation.
|
|
2038
|
+
|
|
2054
2039
|
-dms, --disable_model_save
|
|
2055
2040
|
Does not save the converted model. For CIs RAM savings.
|
|
2056
2041
|
|
|
@@ -2121,6 +2106,7 @@ convert(
|
|
|
2121
2106
|
mvn_epsilon: Union[float, NoneType] = 0.0000000001,
|
|
2122
2107
|
param_replacement_file: Optional[str] = '',
|
|
2123
2108
|
auto_generate_json: Optional[bool] = False,
|
|
2109
|
+
auto_generate_json_on_error: Optional[bool] = False,
|
|
2124
2110
|
check_gpu_delegate_compatibility: Optional[bool] = False,
|
|
2125
2111
|
check_onnx_tf_outputs_elementwise_close: Optional[bool] = False,
|
|
2126
2112
|
check_onnx_tf_outputs_elementwise_close_full: Optional[bool] = False,
|
|
@@ -2474,6 +2460,11 @@ convert(
|
|
|
2474
2460
|
the generated JSON is used to re-evaluate accuracy.
|
|
2475
2461
|
Default: False
|
|
2476
2462
|
|
|
2463
|
+
auto_generate_json_on_error: Optional[bool]
|
|
2464
|
+
When accuracy validation detects errors greater than 1e-2, attempts to generate
|
|
2465
|
+
a parameter replacement JSON as a best-effort fix.
|
|
2466
|
+
Default: False
|
|
2467
|
+
|
|
2477
2468
|
check_gpu_delegate_compatibility: Optional[bool]
|
|
2478
2469
|
Run TFLite ModelAnalyzer on the generated Float16 tflite model
|
|
2479
2470
|
to check if the model can be supported by GPU Delegate.
|
|
@@ -109,6 +109,7 @@ def convert(
|
|
|
109
109
|
replace_to_pseudo_operators: List[str] = None,
|
|
110
110
|
param_replacement_file: Optional[str] = '',
|
|
111
111
|
auto_generate_json: Optional[bool] = False,
|
|
112
|
+
auto_generate_json_on_error: Optional[bool] = False,
|
|
112
113
|
check_gpu_delegate_compatibility: Optional[bool] = False,
|
|
113
114
|
check_onnx_tf_outputs_elementwise_close: Optional[bool] = False,
|
|
114
115
|
check_onnx_tf_outputs_elementwise_close_full: Optional[bool] = False,
|
|
@@ -445,6 +446,12 @@ def convert(
|
|
|
445
446
|
which can take a very long time depending on the model complexity.\n
|
|
446
447
|
Default: False
|
|
447
448
|
|
|
449
|
+
auto_generate_json_on_error: Optional[bool]
|
|
450
|
+
When accuracy validation detects errors greater than the allowed threshold, automatically\n
|
|
451
|
+
generate a parameter replacement JSON as a best-effort fix.\n
|
|
452
|
+
This is now opt-in and requires explicitly enabling the feature.\n
|
|
453
|
+
Default: False
|
|
454
|
+
|
|
448
455
|
check_gpu_delegate_compatibility: Optional[bool]
|
|
449
456
|
Run TFLite ModelAnalyzer on the generated Float16 tflite model\n
|
|
450
457
|
to check if the model can be supported by GPU Delegate.
|
|
@@ -1200,24 +1207,24 @@ def convert(
|
|
|
1200
1207
|
error_onnx_op_name = graph_node.name if 'graph_node' in locals() else None
|
|
1201
1208
|
# Attach it to the exception for later use
|
|
1202
1209
|
ex.onnx_op_name = error_onnx_op_name
|
|
1203
|
-
|
|
1210
|
+
|
|
1204
1211
|
# If no replacement file was provided, try to generate one automatically
|
|
1205
1212
|
if not param_replacement_file and input_onnx_file_path:
|
|
1206
1213
|
info('')
|
|
1207
1214
|
info(Color.REVERSE(f'Attempting automatic JSON generation due to conversion error'), '=' * 30)
|
|
1208
1215
|
if error_onnx_op_name:
|
|
1209
1216
|
info(f'Error occurred at ONNX operation: {error_onnx_op_name}')
|
|
1210
|
-
|
|
1217
|
+
|
|
1211
1218
|
# Try iterative JSON generation with multiple attempts
|
|
1212
1219
|
max_attempts = 3
|
|
1213
1220
|
attempt = 0
|
|
1214
1221
|
successful_conversion = False
|
|
1215
1222
|
best_json = None
|
|
1216
|
-
|
|
1223
|
+
|
|
1217
1224
|
while attempt < max_attempts and not successful_conversion:
|
|
1218
1225
|
attempt += 1
|
|
1219
1226
|
info(f'\nJSON generation attempt {attempt}/{max_attempts}')
|
|
1220
|
-
|
|
1227
|
+
|
|
1221
1228
|
try:
|
|
1222
1229
|
# Generate JSON with unlimited mode for exhaustive search
|
|
1223
1230
|
auto_json = generate_auto_replacement_json(
|
|
@@ -1230,28 +1237,28 @@ def convert(
|
|
|
1230
1237
|
max_iterations=attempt * 3, # Increase iterations with each attempt
|
|
1231
1238
|
unlimited_mode=True, # Enable unlimited mode
|
|
1232
1239
|
)
|
|
1233
|
-
|
|
1240
|
+
|
|
1234
1241
|
if auto_json.get('operations'):
|
|
1235
1242
|
best_json = auto_json
|
|
1236
|
-
|
|
1243
|
+
|
|
1237
1244
|
# Save temporary JSON
|
|
1238
1245
|
temp_json_path = os.path.join(output_folder_path, f'_temp_attempt_{attempt}.json')
|
|
1239
1246
|
with open(temp_json_path, 'w') as f:
|
|
1240
1247
|
json.dump(auto_json, f, indent=2)
|
|
1241
|
-
|
|
1248
|
+
|
|
1242
1249
|
info(f'Testing generated JSON with {len(auto_json["operations"])} operations...')
|
|
1243
|
-
|
|
1250
|
+
|
|
1244
1251
|
# Try to re-run just the problematic operation with the JSON
|
|
1245
1252
|
# This is a simplified test - in practice we'd need to re-run the full conversion
|
|
1246
1253
|
# For now, we'll assume the JSON might work and save it
|
|
1247
|
-
|
|
1254
|
+
|
|
1248
1255
|
# Clean up temp file
|
|
1249
1256
|
if os.path.exists(temp_json_path):
|
|
1250
1257
|
os.remove(temp_json_path)
|
|
1251
|
-
|
|
1258
|
+
|
|
1252
1259
|
except Exception as json_ex:
|
|
1253
1260
|
error(f"Error in attempt {attempt}: {type(json_ex).__name__}: {str(json_ex)}")
|
|
1254
|
-
|
|
1261
|
+
|
|
1255
1262
|
# Save the best JSON we generated
|
|
1256
1263
|
if best_json and best_json.get('operations'):
|
|
1257
1264
|
json_path = save_auto_replacement_json(
|
|
@@ -2065,23 +2072,22 @@ def convert(
|
|
|
2065
2072
|
rtol=check_onnx_tf_outputs_elementwise_close_rtol,
|
|
2066
2073
|
atol=check_onnx_tf_outputs_elementwise_close_atol,
|
|
2067
2074
|
)
|
|
2068
|
-
|
|
2069
|
-
#
|
|
2070
|
-
|
|
2071
|
-
|
|
2072
|
-
|
|
2073
|
-
|
|
2074
|
-
|
|
2075
|
-
|
|
2076
|
-
|
|
2077
|
-
max_abs_err
|
|
2078
|
-
|
|
2079
|
-
|
|
2080
|
-
|
|
2081
|
-
|
|
2082
|
-
|
|
2083
|
-
|
|
2084
|
-
if has_significant_errors:
|
|
2075
|
+
|
|
2076
|
+
# Inspect validation errors for optional auto JSON generation on error
|
|
2077
|
+
max_error_found = 0.0
|
|
2078
|
+
has_significant_errors = False
|
|
2079
|
+
error_count = 0
|
|
2080
|
+
for (onnx_name, tf_name), checked_value in check_results.items():
|
|
2081
|
+
matched_flg = checked_value[1]
|
|
2082
|
+
max_abs_err = checked_value[2]
|
|
2083
|
+
if (matched_flg == 0 or matched_flg is False) and isinstance(max_abs_err, (int, float, np.float32, np.float64)):
|
|
2084
|
+
if max_abs_err > 1e-2:
|
|
2085
|
+
has_significant_errors = True
|
|
2086
|
+
error_count += 1
|
|
2087
|
+
max_error_found = max(max_error_found, max_abs_err)
|
|
2088
|
+
|
|
2089
|
+
if has_significant_errors and not auto_generate_json:
|
|
2090
|
+
if auto_generate_json_on_error and not param_replacement_file and input_onnx_file_path:
|
|
2085
2091
|
info('')
|
|
2086
2092
|
info(Color.REVERSE(f'Attempting automatic JSON generation due to accuracy errors > 1e-2'), '=' * 25)
|
|
2087
2093
|
info(f'Found {error_count} operations with errors > 1e-2')
|
|
@@ -2106,9 +2112,14 @@ def convert(
|
|
|
2106
2112
|
)
|
|
2107
2113
|
else:
|
|
2108
2114
|
warn(
|
|
2109
|
-
|
|
2115
|
+
'Accuracy errors > 1e-2 found but automatic JSON generation could not find a solution.'
|
|
2110
2116
|
)
|
|
2111
|
-
|
|
2117
|
+
elif not auto_generate_json_on_error:
|
|
2118
|
+
warn(
|
|
2119
|
+
'Accuracy validation found errors > 1e-2. Automatic JSON generation on error is disabled by default.\n' +
|
|
2120
|
+
'Re-run with --auto_generate_json_on_error or provide a parameter replacement JSON file.'
|
|
2121
|
+
)
|
|
2122
|
+
|
|
2112
2123
|
for (onnx_output_name, tf_output_name), checked_value in check_results.items():
|
|
2113
2124
|
validated_onnx_tensor: np.ndarray = checked_value[0]
|
|
2114
2125
|
matched_flg: int = checked_value[1]
|
|
@@ -2143,34 +2154,34 @@ def convert(
|
|
|
2143
2154
|
if auto_generate_json:
|
|
2144
2155
|
# Store the generated JSON path for later use
|
|
2145
2156
|
generated_json_path = None
|
|
2146
|
-
|
|
2157
|
+
|
|
2147
2158
|
# Check if -cotof was already executed and we have check_results
|
|
2148
2159
|
if check_onnx_tf_outputs_elementwise_close_full and 'check_results' in locals():
|
|
2149
2160
|
# We already have validation results from -cotof
|
|
2150
2161
|
info('')
|
|
2151
2162
|
info(Color.REVERSE(f'Auto JSON generation started (using -cotof results)'), '=' * 35)
|
|
2152
|
-
|
|
2163
|
+
|
|
2153
2164
|
# Check if any errors exist
|
|
2154
2165
|
all_matched = True
|
|
2155
2166
|
max_error = 0.0
|
|
2156
2167
|
error_count = 0
|
|
2157
|
-
|
|
2168
|
+
|
|
2158
2169
|
for (onnx_name, tf_name), checked_value in check_results.items():
|
|
2159
2170
|
matched_flg = checked_value[1]
|
|
2160
2171
|
max_abs_err = checked_value[2]
|
|
2161
|
-
|
|
2172
|
+
|
|
2162
2173
|
if matched_flg == 0: # Unmatched
|
|
2163
2174
|
all_matched = False
|
|
2164
2175
|
if isinstance(max_abs_err, (int, float, np.float32, np.float64)):
|
|
2165
2176
|
max_error = max(max_error, max_abs_err)
|
|
2166
2177
|
error_count += 1
|
|
2167
|
-
|
|
2178
|
+
|
|
2168
2179
|
if all_matched:
|
|
2169
2180
|
info(Color.GREEN('All outputs already match! No JSON generation needed.'))
|
|
2170
2181
|
else:
|
|
2171
2182
|
info(f'Found {error_count} outputs with errors, max error: {max_error:.6f}')
|
|
2172
2183
|
info('Generating optimal JSON...')
|
|
2173
|
-
|
|
2184
|
+
|
|
2174
2185
|
# Generate auto replacement JSON
|
|
2175
2186
|
auto_json = generate_auto_replacement_json(
|
|
2176
2187
|
onnx_graph=gs.import_onnx(onnx_graph),
|
|
@@ -2183,7 +2194,7 @@ def convert(
|
|
|
2183
2194
|
target_accuracy=check_onnx_tf_outputs_elementwise_close_atol,
|
|
2184
2195
|
unlimited_mode=True,
|
|
2185
2196
|
)
|
|
2186
|
-
|
|
2197
|
+
|
|
2187
2198
|
if auto_json.get('operations'):
|
|
2188
2199
|
# Save the JSON
|
|
2189
2200
|
generated_json_path = save_auto_replacement_json(
|
|
@@ -2192,17 +2203,17 @@ def convert(
|
|
|
2192
2203
|
output_dir=output_folder_path,
|
|
2193
2204
|
)
|
|
2194
2205
|
info(f'Generated JSON with {len(auto_json["operations"])} operations: {generated_json_path}')
|
|
2195
|
-
|
|
2206
|
+
|
|
2196
2207
|
# If both -cotof and -agj are specified, re-run validation with the generated JSON
|
|
2197
2208
|
info('')
|
|
2198
2209
|
info(Color.REVERSE(f'Re-running validation with auto-generated JSON'), '=' * 35)
|
|
2199
|
-
|
|
2210
|
+
|
|
2200
2211
|
# TODO: In a full implementation, we would need to:
|
|
2201
2212
|
# 1. Re-run the entire conversion with the generated JSON
|
|
2202
2213
|
# 2. Re-validate the outputs
|
|
2203
2214
|
# 3. Display the new validation results
|
|
2204
2215
|
# For now, we just inform the user
|
|
2205
|
-
|
|
2216
|
+
|
|
2206
2217
|
info(Color.GREEN(f'\nAuto-generated JSON saved to: {generated_json_path}'))
|
|
2207
2218
|
info(
|
|
2208
2219
|
f'To see the validation results with the generated JSON, please re-run with:\n' +
|
|
@@ -2210,7 +2221,7 @@ def convert(
|
|
|
2210
2221
|
)
|
|
2211
2222
|
else:
|
|
2212
2223
|
warn('No viable parameter replacements found.')
|
|
2213
|
-
|
|
2224
|
+
|
|
2214
2225
|
else:
|
|
2215
2226
|
# -agj is specified but -cotof is not, so we need to run our own validation
|
|
2216
2227
|
try:
|
|
@@ -2222,16 +2233,16 @@ def convert(
|
|
|
2222
2233
|
f'you must install onnxruntime and sne4onnx. pip install sne4onnx onnxruntime'
|
|
2223
2234
|
)
|
|
2224
2235
|
sys.exit(1)
|
|
2225
|
-
|
|
2236
|
+
|
|
2226
2237
|
info('')
|
|
2227
2238
|
info(Color.REVERSE(f'Auto JSON generation started'), '=' * 50)
|
|
2228
2239
|
info(
|
|
2229
2240
|
'Searching for optimal parameter replacement JSON to achieve minimum error...'
|
|
2230
2241
|
)
|
|
2231
|
-
|
|
2242
|
+
|
|
2232
2243
|
# Run validation for final outputs only
|
|
2233
2244
|
ops_output_names = output_names
|
|
2234
|
-
|
|
2245
|
+
|
|
2235
2246
|
# Rebuild model for validation
|
|
2236
2247
|
outputs = [
|
|
2237
2248
|
layer_info['tf_node'] \
|
|
@@ -2246,13 +2257,13 @@ def convert(
|
|
|
2246
2257
|
and hasattr(layer_info['tf_node'], 'numpy')
|
|
2247
2258
|
]
|
|
2248
2259
|
validation_model = tf_keras.Model(inputs=inputs, outputs=outputs)
|
|
2249
|
-
|
|
2260
|
+
|
|
2250
2261
|
# Exclude output OPs not subject to validation
|
|
2251
2262
|
ops_output_names = [
|
|
2252
2263
|
ops_output_name for ops_output_name in ops_output_names \
|
|
2253
2264
|
if ops_output_name not in exclude_output_names
|
|
2254
2265
|
]
|
|
2255
|
-
|
|
2266
|
+
|
|
2256
2267
|
# Initial accuracy check
|
|
2257
2268
|
try:
|
|
2258
2269
|
# ONNX dummy inference
|
|
@@ -2266,7 +2277,7 @@ def convert(
|
|
|
2266
2277
|
use_cuda=use_cuda,
|
|
2267
2278
|
shape_hints=shape_hints,
|
|
2268
2279
|
)
|
|
2269
|
-
|
|
2280
|
+
|
|
2270
2281
|
# TF dummy inference
|
|
2271
2282
|
tf_tensor_infos: Dict[Any] = \
|
|
2272
2283
|
dummy_tf_inference(
|
|
@@ -2279,13 +2290,13 @@ def convert(
|
|
|
2279
2290
|
keep_ncw_or_nchw_or_ncdhw_input_names=keep_ncw_or_nchw_or_ncdhw_input_names,
|
|
2280
2291
|
keep_nwc_or_nhwc_or_ndhwc_input_names=keep_nwc_or_nhwc_or_ndhwc_input_names,
|
|
2281
2292
|
)
|
|
2282
|
-
|
|
2293
|
+
|
|
2283
2294
|
# Validation
|
|
2284
2295
|
onnx_tensor_infos = {
|
|
2285
2296
|
output_name: dummy_onnx_output \
|
|
2286
2297
|
for output_name, dummy_onnx_output in zip(ops_output_names, dummy_onnx_outputs)
|
|
2287
2298
|
}
|
|
2288
|
-
|
|
2299
|
+
|
|
2289
2300
|
input_names = [k.name for k in inputs]
|
|
2290
2301
|
for k, v in tf_layers_dict.items():
|
|
2291
2302
|
if 'tf_node_info' in v:
|
|
@@ -2296,34 +2307,34 @@ def convert(
|
|
|
2296
2307
|
for k, v in tf_layers_dict.items() \
|
|
2297
2308
|
if k not in input_names and not hasattr(v['tf_node'], 'numpy') and k in onnx_tensor_infos
|
|
2298
2309
|
}
|
|
2299
|
-
|
|
2310
|
+
|
|
2300
2311
|
agj_check_results = onnx_tf_tensor_validation(
|
|
2301
2312
|
output_pairs=onnx_tf_output_pairs,
|
|
2302
2313
|
rtol=0.0,
|
|
2303
2314
|
atol=1e-4,
|
|
2304
2315
|
)
|
|
2305
|
-
|
|
2316
|
+
|
|
2306
2317
|
# Check if all outputs match
|
|
2307
2318
|
all_matched = True
|
|
2308
2319
|
max_error = 0.0
|
|
2309
2320
|
error_count = 0
|
|
2310
|
-
|
|
2321
|
+
|
|
2311
2322
|
for (onnx_name, tf_name), checked_value in agj_check_results.items():
|
|
2312
2323
|
matched_flg = checked_value[1]
|
|
2313
2324
|
max_abs_err = checked_value[2]
|
|
2314
|
-
|
|
2325
|
+
|
|
2315
2326
|
if matched_flg == 0: # Unmatched
|
|
2316
2327
|
all_matched = False
|
|
2317
2328
|
if isinstance(max_abs_err, (int, float, np.float32, np.float64)):
|
|
2318
2329
|
max_error = max(max_error, max_abs_err)
|
|
2319
2330
|
error_count += 1
|
|
2320
|
-
|
|
2331
|
+
|
|
2321
2332
|
if all_matched:
|
|
2322
2333
|
info(Color.GREEN('All outputs already match! No JSON generation needed.'))
|
|
2323
2334
|
else:
|
|
2324
2335
|
info(f'Initial validation: {error_count} outputs have errors, max error: {max_error:.6f}')
|
|
2325
2336
|
info('Generating optimal JSON...')
|
|
2326
|
-
|
|
2337
|
+
|
|
2327
2338
|
# Generate auto replacement JSON
|
|
2328
2339
|
auto_json = generate_auto_replacement_json(
|
|
2329
2340
|
onnx_graph=gs.import_onnx(onnx_graph),
|
|
@@ -2336,7 +2347,7 @@ def convert(
|
|
|
2336
2347
|
target_accuracy=1e-4,
|
|
2337
2348
|
unlimited_mode=True,
|
|
2338
2349
|
)
|
|
2339
|
-
|
|
2350
|
+
|
|
2340
2351
|
if auto_json.get('operations'):
|
|
2341
2352
|
# Save the JSON
|
|
2342
2353
|
generated_json_path = save_auto_replacement_json(
|
|
@@ -2345,7 +2356,7 @@ def convert(
|
|
|
2345
2356
|
output_dir=output_folder_path,
|
|
2346
2357
|
)
|
|
2347
2358
|
info(f'Generated JSON with {len(auto_json["operations"])} operations: {generated_json_path}')
|
|
2348
|
-
|
|
2359
|
+
|
|
2349
2360
|
info(Color.GREEN(f'\nAuto-generated JSON saved to: {generated_json_path}'))
|
|
2350
2361
|
info(
|
|
2351
2362
|
f'Please re-run the conversion with: -prf {generated_json_path}\n' +
|
|
@@ -2353,7 +2364,7 @@ def convert(
|
|
|
2353
2364
|
)
|
|
2354
2365
|
else:
|
|
2355
2366
|
warn('No viable parameter replacements found.')
|
|
2356
|
-
|
|
2367
|
+
|
|
2357
2368
|
except Exception as ex:
|
|
2358
2369
|
warn(
|
|
2359
2370
|
f'Auto JSON generation failed: {ex}'
|
|
@@ -2970,6 +2981,14 @@ def main():
|
|
|
2970
2981
|
'The search stops when the final output OP accuracy check shows "Matches". ' +
|
|
2971
2982
|
'Cannot be used together with -cotof. When -cotof is specified, JSON auto-generation is disabled.'
|
|
2972
2983
|
)
|
|
2984
|
+
parser.add_argument(
|
|
2985
|
+
'-agje',
|
|
2986
|
+
'--auto_generate_json_on_error',
|
|
2987
|
+
action='store_true',
|
|
2988
|
+
help=\
|
|
2989
|
+
'Attempts to generate a parameter replacement JSON when accuracy validation detects errors ' +
|
|
2990
|
+
'greater than 1e-2. Requires -cotof to collect accuracy metrics. Disabled by default.'
|
|
2991
|
+
)
|
|
2973
2992
|
parser.add_argument(
|
|
2974
2993
|
'-dms',
|
|
2975
2994
|
'--disable_model_save',
|
|
@@ -3077,6 +3096,7 @@ def main():
|
|
|
3077
3096
|
replace_to_pseudo_operators=args.replace_to_pseudo_operators,
|
|
3078
3097
|
param_replacement_file=args.param_replacement_file,
|
|
3079
3098
|
auto_generate_json=args.auto_generate_json,
|
|
3099
|
+
auto_generate_json_on_error=args.auto_generate_json_on_error,
|
|
3080
3100
|
check_gpu_delegate_compatibility=args.check_gpu_delegate_compatibility,
|
|
3081
3101
|
check_onnx_tf_outputs_elementwise_close=args.check_onnx_tf_outputs_elementwise_close,
|
|
3082
3102
|
check_onnx_tf_outputs_elementwise_close_full=args.check_onnx_tf_outputs_elementwise_close_full,
|
|
@@ -3092,4 +3112,3 @@ def main():
|
|
|
3092
3112
|
|
|
3093
3113
|
if __name__ == '__main__':
|
|
3094
3114
|
main()
|
|
3095
|
-
|
|
@@ -1,3 +1,28 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: onnx2tf
|
|
3
|
+
Version: 1.28.3
|
|
4
|
+
Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
|
|
5
|
+
Home-page: https://github.com/PINTO0309/onnx2tf
|
|
6
|
+
Author: Katsuya Hyodo
|
|
7
|
+
Author-email: rmsdh122@yahoo.co.jp
|
|
8
|
+
License: MIT License
|
|
9
|
+
Platform: linux
|
|
10
|
+
Platform: unix
|
|
11
|
+
Requires-Python: >=3.10
|
|
12
|
+
Description-Content-Type: text/markdown
|
|
13
|
+
License-File: LICENSE
|
|
14
|
+
License-File: LICENSE_onnx-tensorflow
|
|
15
|
+
Dynamic: author
|
|
16
|
+
Dynamic: author-email
|
|
17
|
+
Dynamic: description
|
|
18
|
+
Dynamic: description-content-type
|
|
19
|
+
Dynamic: home-page
|
|
20
|
+
Dynamic: license
|
|
21
|
+
Dynamic: license-file
|
|
22
|
+
Dynamic: platform
|
|
23
|
+
Dynamic: requires-python
|
|
24
|
+
Dynamic: summary
|
|
25
|
+
|
|
1
26
|
# onnx2tf
|
|
2
27
|
Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in [onnx-tensorflow](https://github.com/onnx/onnx-tensorflow) ([onnx-tf](https://pypi.org/project/onnx-tf/)). I don't need a Star, but give me a pull request. Since I am adding challenging model optimizations and fixing bugs almost daily, I frequently embed potential bugs that would otherwise break through CI's regression testing. Therefore, if you encounter new problems, I recommend that you try a package that is a few versions older, or try the latest package that will be released in a few days.
|
|
3
28
|
|
|
@@ -309,7 +334,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
309
334
|
docker run --rm -it \
|
|
310
335
|
-v `pwd`:/workdir \
|
|
311
336
|
-w /workdir \
|
|
312
|
-
ghcr.io/pinto0309/onnx2tf:1.28.
|
|
337
|
+
ghcr.io/pinto0309/onnx2tf:1.28.3
|
|
313
338
|
|
|
314
339
|
or
|
|
315
340
|
|
|
@@ -317,7 +342,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
317
342
|
docker run --rm -it \
|
|
318
343
|
-v `pwd`:/workdir \
|
|
319
344
|
-w /workdir \
|
|
320
|
-
docker.io/pinto0309/onnx2tf:1.28.
|
|
345
|
+
docker.io/pinto0309/onnx2tf:1.28.3
|
|
321
346
|
|
|
322
347
|
or
|
|
323
348
|
|
|
@@ -476,7 +501,7 @@ onnx2tf -i resnet18-v1-7.onnx -otfv1pb
|
|
|
476
501
|
# Automatic JSON generation only
|
|
477
502
|
# Generates an optimal parameter replacement JSON file for model conversion.
|
|
478
503
|
# The JSON file is saved to {model_name}_auto.json when conversion errors occur
|
|
479
|
-
# or accuracy issues are detected.
|
|
504
|
+
# or accuracy issues are detected and the feature is explicitly enabled.
|
|
480
505
|
onnx2tf -i model.onnx -agj
|
|
481
506
|
|
|
482
507
|
# Accuracy validation only (no JSON generation)
|
|
@@ -489,6 +514,11 @@ onnx2tf -i model.onnx -cotof
|
|
|
489
514
|
# to validate the model accuracy. This ensures the best possible conversion accuracy.
|
|
490
515
|
onnx2tf -i model.onnx -agj -cotof
|
|
491
516
|
|
|
517
|
+
# Accuracy validation with opt-in JSON generation on error
|
|
518
|
+
# Generates a parameter replacement JSON only when accuracy errors greater than 1e-2
|
|
519
|
+
# are detected during validation.
|
|
520
|
+
onnx2tf -i model.onnx -cotof -agje
|
|
521
|
+
|
|
492
522
|
# INT8 Quantization, Full INT8 Quantization
|
|
493
523
|
# INT8 Quantization with INT16 activation, Full INT8 Quantization with INT16 activation
|
|
494
524
|
# Dynamic Range Quantization
|
|
@@ -2026,6 +2056,11 @@ optional arguments:
|
|
|
2026
2056
|
WARNING: This option performs an exhaustive search to find the optimal conversion patterns,
|
|
2027
2057
|
which can take a very long time depending on the model complexity.
|
|
2028
2058
|
|
|
2059
|
+
-agje, --auto_generate_json_on_error
|
|
2060
|
+
Attempts to generate a parameter replacement JSON when accuracy validation finds errors
|
|
2061
|
+
greater than 1e-2. Useful for quickly capturing fixes during -cotof runs.
|
|
2062
|
+
Disabled by default to avoid unexpected file generation.
|
|
2063
|
+
|
|
2029
2064
|
-dms, --disable_model_save
|
|
2030
2065
|
Does not save the converted model. For CIs RAM savings.
|
|
2031
2066
|
|
|
@@ -2096,6 +2131,7 @@ convert(
|
|
|
2096
2131
|
mvn_epsilon: Union[float, NoneType] = 0.0000000001,
|
|
2097
2132
|
param_replacement_file: Optional[str] = '',
|
|
2098
2133
|
auto_generate_json: Optional[bool] = False,
|
|
2134
|
+
auto_generate_json_on_error: Optional[bool] = False,
|
|
2099
2135
|
check_gpu_delegate_compatibility: Optional[bool] = False,
|
|
2100
2136
|
check_onnx_tf_outputs_elementwise_close: Optional[bool] = False,
|
|
2101
2137
|
check_onnx_tf_outputs_elementwise_close_full: Optional[bool] = False,
|
|
@@ -2449,6 +2485,11 @@ convert(
|
|
|
2449
2485
|
the generated JSON is used to re-evaluate accuracy.
|
|
2450
2486
|
Default: False
|
|
2451
2487
|
|
|
2488
|
+
auto_generate_json_on_error: Optional[bool]
|
|
2489
|
+
When accuracy validation detects errors greater than 1e-2, attempts to generate
|
|
2490
|
+
a parameter replacement JSON as a best-effort fix.
|
|
2491
|
+
Default: False
|
|
2492
|
+
|
|
2452
2493
|
check_gpu_delegate_compatibility: Optional[bool]
|
|
2453
2494
|
Run TFLite ModelAnalyzer on the generated Float16 tflite model
|
|
2454
2495
|
to check if the model can be supported by GPU Delegate.
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|