onnx2tf 1.27.10__tar.gz → 1.28.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {onnx2tf-1.27.10/onnx2tf.egg-info → onnx2tf-1.28.1}/PKG-INFO +40 -4
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/README.md +39 -3
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/__init__.py +1 -1
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/onnx2tf.py +398 -19
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/utils/common_functions.py +3 -2
- onnx2tf-1.28.1/onnx2tf/utils/iterative_json_optimizer.py +258 -0
- onnx2tf-1.28.1/onnx2tf/utils/json_auto_generator.py +1505 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1/onnx2tf.egg-info}/PKG-INFO +40 -4
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf.egg-info/SOURCES.txt +2 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/LICENSE +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/LICENSE_onnx-tensorflow +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/__main__.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Abs.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Acos.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Acosh.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Add.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/And.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/ArgMax.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/ArgMin.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Asin.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Asinh.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Atan.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Atanh.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/AveragePool.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/BatchNormalization.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Bernoulli.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/BitShift.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Cast.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Ceil.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Celu.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Clip.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Col2Im.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Compress.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Concat.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/ConcatFromSequence.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Constant.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/ConstantOfShape.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Conv.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/ConvInteger.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/ConvTranspose.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Cos.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Cosh.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/CumSum.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/DepthToSpace.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/DequantizeLinear.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Det.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Div.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Dropout.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/DynamicQuantizeLinear.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Einsum.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Elu.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Equal.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Erf.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Exp.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Expand.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/EyeLike.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Flatten.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Floor.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/FusedConv.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/GRU.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Gather.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/GatherElements.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/GatherND.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Gelu.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Gemm.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/GlobalAveragePool.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/GlobalLpPool.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/GlobalMaxPool.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Greater.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/GreaterOrEqual.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/GridSample.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/GroupNorm.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/HammingWindow.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/HannWindow.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/HardSigmoid.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/HardSwish.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Hardmax.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Identity.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/If.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Input.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/InstanceNormalization.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Inverse.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/IsInf.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/IsNaN.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/LRN.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/LSTM.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/LayerNormalization.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/LeakyRelu.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Less.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/LessOrEqual.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Log.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/LogSoftmax.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/LpNormalization.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/MatMul.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/MatMulInteger.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Max.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/MaxPool.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/MaxUnpool.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Mean.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/MeanVarianceNormalization.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/MelWeightMatrix.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Min.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Mish.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Mod.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Mul.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Multinomial.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Neg.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/NonMaxSuppression.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/NonZero.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Not.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/OneHot.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/OptionalGetElement.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/OptionalHasElement.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Or.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/PRelu.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Pad.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Pow.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/QLinearAdd.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/QLinearConcat.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/QLinearConv.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/QLinearLeakyRelu.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/QLinearMatMul.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/QLinearMul.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/QLinearSigmoid.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/QLinearSoftmax.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/QuantizeLinear.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/RNN.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/RandomNormal.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/RandomNormalLike.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/RandomUniform.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/RandomUniformLike.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Range.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Reciprocal.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/ReduceL1.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/ReduceL2.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/ReduceLogSum.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/ReduceLogSumExp.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/ReduceMax.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/ReduceMean.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/ReduceMin.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/ReduceProd.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/ReduceSum.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/ReduceSumSquare.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Relu.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Reshape.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Resize.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/ReverseSequence.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/RoiAlign.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Round.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/STFT.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/ScaleAndTranslate.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Scatter.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/ScatterElements.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/ScatterND.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Selu.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/SequenceAt.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/SequenceConstruct.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/SequenceEmpty.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/SequenceErase.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/SequenceInsert.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/SequenceLength.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Shape.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Shrink.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Sigmoid.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Sign.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Sin.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Sinh.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Size.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Slice.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Softmax.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Softplus.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Softsign.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/SpaceToDepth.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Split.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/SplitToSequence.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Sqrt.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Squeeze.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/StringNormalizer.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Sub.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Sum.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Tan.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Tanh.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/ThresholdedRelu.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Tile.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/TopK.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Transpose.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Trilu.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Unique.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Unsqueeze.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Upsample.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Where.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/Xor.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/_Loop.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/__Loop.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/ops/__init__.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/utils/__init__.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/utils/enums.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf/utils/logging.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf.egg-info/dependency_links.txt +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf.egg-info/entry_points.txt +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/onnx2tf.egg-info/top_level.txt +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/setup.cfg +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/setup.py +0 -0
- {onnx2tf-1.27.10 → onnx2tf-1.28.1}/tests/test_model_convert.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: onnx2tf
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.28.1
|
|
4
4
|
Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
|
|
5
5
|
Home-page: https://github.com/PINTO0309/onnx2tf
|
|
6
6
|
Author: Katsuya Hyodo
|
|
@@ -32,7 +32,7 @@ Incidentally, I have never used this tool in practice myself since I started wor
|
|
|
32
32
|
<img src="https://user-images.githubusercontent.com/33194443/193840307-fa69eace-05a9-4d93-9c5d-999cf88af28e.png" />
|
|
33
33
|
</p>
|
|
34
34
|
|
|
35
|
-
[](https://pepy.tech/project/onnx2tf)  [](https://img.shields.io/badge/Python-3.8-2BAF2B) [](https://pypi.org/project/onnx2tf/) [](https://github.com/PINTO0309/onnx2tf/actions?query=workflow%3ACodeQL)  [](https://doi.org/10.5281/zenodo.7230085)
|
|
35
|
+
[](https://pepy.tech/project/onnx2tf)  [](https://img.shields.io/badge/Python-3.8-2BAF2B) [](https://pypi.org/project/onnx2tf/) [](https://github.com/PINTO0309/onnx2tf/actions?query=workflow%3ACodeQL)  [](https://doi.org/10.5281/zenodo.7230085) [](https://deepwiki.com/PINTO0309/onnx2tf)
|
|
36
36
|
|
|
37
37
|
## Note
|
|
38
38
|
- The torch.script-based `torch.onnx.export` has already been moved to maintenance mode, and we recommend moving to the FX graph-based `torch.onnx.dynamo_export` starting with PyTorch v2.2.0.
|
|
@@ -334,7 +334,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
334
334
|
docker run --rm -it \
|
|
335
335
|
-v `pwd`:/workdir \
|
|
336
336
|
-w /workdir \
|
|
337
|
-
ghcr.io/pinto0309/onnx2tf:1.
|
|
337
|
+
ghcr.io/pinto0309/onnx2tf:1.28.1
|
|
338
338
|
|
|
339
339
|
or
|
|
340
340
|
|
|
@@ -342,7 +342,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
342
342
|
docker run --rm -it \
|
|
343
343
|
-v `pwd`:/workdir \
|
|
344
344
|
-w /workdir \
|
|
345
|
-
docker.io/pinto0309/onnx2tf:1.
|
|
345
|
+
docker.io/pinto0309/onnx2tf:1.28.1
|
|
346
346
|
|
|
347
347
|
or
|
|
348
348
|
|
|
@@ -498,6 +498,22 @@ onnx2tf -i resnet18-v1-7.onnx -okv3
|
|
|
498
498
|
wget https://github.com/PINTO0309/onnx2tf/releases/download/0.0.2/resnet18-v1-7.onnx
|
|
499
499
|
onnx2tf -i resnet18-v1-7.onnx -otfv1pb
|
|
500
500
|
|
|
501
|
+
# Automatic JSON generation only
|
|
502
|
+
# Generates an optimal parameter replacement JSON file for model conversion.
|
|
503
|
+
# The JSON file is saved to {model_name}_auto.json when conversion errors occur
|
|
504
|
+
# or accuracy issues are detected.
|
|
505
|
+
onnx2tf -i model.onnx -agj
|
|
506
|
+
|
|
507
|
+
# Accuracy validation only (no JSON generation)
|
|
508
|
+
# Validates the accuracy between ONNX and TensorFlow outputs without generating
|
|
509
|
+
# any parameter replacement JSON file.
|
|
510
|
+
onnx2tf -i model.onnx -cotof
|
|
511
|
+
|
|
512
|
+
# Accuracy validation + automatic JSON generation
|
|
513
|
+
# First generates an optimal parameter replacement JSON file, then uses it
|
|
514
|
+
# to validate the model accuracy. This ensures the best possible conversion accuracy.
|
|
515
|
+
onnx2tf -i model.onnx -agj -cotof
|
|
516
|
+
|
|
501
517
|
# INT8 Quantization, Full INT8 Quantization
|
|
502
518
|
# INT8 Quantization with INT16 activation, Full INT8 Quantization with INT16 activation
|
|
503
519
|
# Dynamic Range Quantization
|
|
@@ -1590,6 +1606,7 @@ usage: onnx2tf
|
|
|
1590
1606
|
[-coton]
|
|
1591
1607
|
[-cotor CHECK_ONNX_TF_OUTPUTS_ELEMENTWISE_CLOSE_RTOL]
|
|
1592
1608
|
[-cotoa CHECK_ONNX_TF_OUTPUTS_ELEMENTWISE_CLOSE_ATOL]
|
|
1609
|
+
[-agj]
|
|
1593
1610
|
[-dms]
|
|
1594
1611
|
[-uc]
|
|
1595
1612
|
[-n]
|
|
@@ -2025,6 +2042,15 @@ optional arguments:
|
|
|
2025
2042
|
The absolute tolerance parameter.
|
|
2026
2043
|
Default: 1e-4
|
|
2027
2044
|
|
|
2045
|
+
-agj, --auto_generate_json
|
|
2046
|
+
Automatically generates a parameter replacement JSON file that achieves minimal error
|
|
2047
|
+
when converting the model. This option explores various parameter combinations to find
|
|
2048
|
+
the best settings that result in successful conversion and highest accuracy.
|
|
2049
|
+
The search stops when the final output OP accuracy check shows "Matches".
|
|
2050
|
+
When used together with -cotof, the generated JSON is used to re-evaluate accuracy.
|
|
2051
|
+
WARNING: This option performs an exhaustive search to find the optimal conversion patterns,
|
|
2052
|
+
which can take a very long time depending on the model complexity.
|
|
2053
|
+
|
|
2028
2054
|
-dms, --disable_model_save
|
|
2029
2055
|
Does not save the converted model. For CIs RAM savings.
|
|
2030
2056
|
|
|
@@ -2094,6 +2120,7 @@ convert(
|
|
|
2094
2120
|
replace_to_pseudo_operators: List[str] = None,
|
|
2095
2121
|
mvn_epsilon: Union[float, NoneType] = 0.0000000001,
|
|
2096
2122
|
param_replacement_file: Optional[str] = '',
|
|
2123
|
+
auto_generate_json: Optional[bool] = False,
|
|
2097
2124
|
check_gpu_delegate_compatibility: Optional[bool] = False,
|
|
2098
2125
|
check_onnx_tf_outputs_elementwise_close: Optional[bool] = False,
|
|
2099
2126
|
check_onnx_tf_outputs_elementwise_close_full: Optional[bool] = False,
|
|
@@ -2438,6 +2465,15 @@ convert(
|
|
|
2438
2465
|
param_replacement_file: Optional[str]
|
|
2439
2466
|
Parameter replacement file path. (.json)
|
|
2440
2467
|
|
|
2468
|
+
auto_generate_json: Optional[bool]
|
|
2469
|
+
Automatically generates a parameter replacement JSON file that achieves minimal error
|
|
2470
|
+
when converting the model. This option explores various parameter combinations to find
|
|
2471
|
+
the best settings that result in successful conversion and highest accuracy.
|
|
2472
|
+
The search stops when the final output OP accuracy check shows "Matches".
|
|
2473
|
+
When used together with check_onnx_tf_outputs_elementwise_close_full,
|
|
2474
|
+
the generated JSON is used to re-evaluate accuracy.
|
|
2475
|
+
Default: False
|
|
2476
|
+
|
|
2441
2477
|
check_gpu_delegate_compatibility: Optional[bool]
|
|
2442
2478
|
Run TFLite ModelAnalyzer on the generated Float16 tflite model
|
|
2443
2479
|
to check if the model can be supported by GPU Delegate.
|
|
@@ -7,7 +7,7 @@ Incidentally, I have never used this tool in practice myself since I started wor
|
|
|
7
7
|
<img src="https://user-images.githubusercontent.com/33194443/193840307-fa69eace-05a9-4d93-9c5d-999cf88af28e.png" />
|
|
8
8
|
</p>
|
|
9
9
|
|
|
10
|
-
[](https://pepy.tech/project/onnx2tf)  [](https://img.shields.io/badge/Python-3.8-2BAF2B) [](https://pypi.org/project/onnx2tf/) [](https://github.com/PINTO0309/onnx2tf/actions?query=workflow%3ACodeQL)  [](https://doi.org/10.5281/zenodo.7230085)
|
|
10
|
+
[](https://pepy.tech/project/onnx2tf)  [](https://img.shields.io/badge/Python-3.8-2BAF2B) [](https://pypi.org/project/onnx2tf/) [](https://github.com/PINTO0309/onnx2tf/actions?query=workflow%3ACodeQL)  [](https://doi.org/10.5281/zenodo.7230085) [](https://deepwiki.com/PINTO0309/onnx2tf)
|
|
11
11
|
|
|
12
12
|
## Note
|
|
13
13
|
- The torch.script-based `torch.onnx.export` has already been moved to maintenance mode, and we recommend moving to the FX graph-based `torch.onnx.dynamo_export` starting with PyTorch v2.2.0.
|
|
@@ -309,7 +309,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
309
309
|
docker run --rm -it \
|
|
310
310
|
-v `pwd`:/workdir \
|
|
311
311
|
-w /workdir \
|
|
312
|
-
ghcr.io/pinto0309/onnx2tf:1.
|
|
312
|
+
ghcr.io/pinto0309/onnx2tf:1.28.1
|
|
313
313
|
|
|
314
314
|
or
|
|
315
315
|
|
|
@@ -317,7 +317,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
317
317
|
docker run --rm -it \
|
|
318
318
|
-v `pwd`:/workdir \
|
|
319
319
|
-w /workdir \
|
|
320
|
-
docker.io/pinto0309/onnx2tf:1.
|
|
320
|
+
docker.io/pinto0309/onnx2tf:1.28.1
|
|
321
321
|
|
|
322
322
|
or
|
|
323
323
|
|
|
@@ -473,6 +473,22 @@ onnx2tf -i resnet18-v1-7.onnx -okv3
|
|
|
473
473
|
wget https://github.com/PINTO0309/onnx2tf/releases/download/0.0.2/resnet18-v1-7.onnx
|
|
474
474
|
onnx2tf -i resnet18-v1-7.onnx -otfv1pb
|
|
475
475
|
|
|
476
|
+
# Automatic JSON generation only
|
|
477
|
+
# Generates an optimal parameter replacement JSON file for model conversion.
|
|
478
|
+
# The JSON file is saved to {model_name}_auto.json when conversion errors occur
|
|
479
|
+
# or accuracy issues are detected.
|
|
480
|
+
onnx2tf -i model.onnx -agj
|
|
481
|
+
|
|
482
|
+
# Accuracy validation only (no JSON generation)
|
|
483
|
+
# Validates the accuracy between ONNX and TensorFlow outputs without generating
|
|
484
|
+
# any parameter replacement JSON file.
|
|
485
|
+
onnx2tf -i model.onnx -cotof
|
|
486
|
+
|
|
487
|
+
# Accuracy validation + automatic JSON generation
|
|
488
|
+
# First generates an optimal parameter replacement JSON file, then uses it
|
|
489
|
+
# to validate the model accuracy. This ensures the best possible conversion accuracy.
|
|
490
|
+
onnx2tf -i model.onnx -agj -cotof
|
|
491
|
+
|
|
476
492
|
# INT8 Quantization, Full INT8 Quantization
|
|
477
493
|
# INT8 Quantization with INT16 activation, Full INT8 Quantization with INT16 activation
|
|
478
494
|
# Dynamic Range Quantization
|
|
@@ -1565,6 +1581,7 @@ usage: onnx2tf
|
|
|
1565
1581
|
[-coton]
|
|
1566
1582
|
[-cotor CHECK_ONNX_TF_OUTPUTS_ELEMENTWISE_CLOSE_RTOL]
|
|
1567
1583
|
[-cotoa CHECK_ONNX_TF_OUTPUTS_ELEMENTWISE_CLOSE_ATOL]
|
|
1584
|
+
[-agj]
|
|
1568
1585
|
[-dms]
|
|
1569
1586
|
[-uc]
|
|
1570
1587
|
[-n]
|
|
@@ -2000,6 +2017,15 @@ optional arguments:
|
|
|
2000
2017
|
The absolute tolerance parameter.
|
|
2001
2018
|
Default: 1e-4
|
|
2002
2019
|
|
|
2020
|
+
-agj, --auto_generate_json
|
|
2021
|
+
Automatically generates a parameter replacement JSON file that achieves minimal error
|
|
2022
|
+
when converting the model. This option explores various parameter combinations to find
|
|
2023
|
+
the best settings that result in successful conversion and highest accuracy.
|
|
2024
|
+
The search stops when the final output OP accuracy check shows "Matches".
|
|
2025
|
+
When used together with -cotof, the generated JSON is used to re-evaluate accuracy.
|
|
2026
|
+
WARNING: This option performs an exhaustive search to find the optimal conversion patterns,
|
|
2027
|
+
which can take a very long time depending on the model complexity.
|
|
2028
|
+
|
|
2003
2029
|
-dms, --disable_model_save
|
|
2004
2030
|
Does not save the converted model. For CIs RAM savings.
|
|
2005
2031
|
|
|
@@ -2069,6 +2095,7 @@ convert(
|
|
|
2069
2095
|
replace_to_pseudo_operators: List[str] = None,
|
|
2070
2096
|
mvn_epsilon: Union[float, NoneType] = 0.0000000001,
|
|
2071
2097
|
param_replacement_file: Optional[str] = '',
|
|
2098
|
+
auto_generate_json: Optional[bool] = False,
|
|
2072
2099
|
check_gpu_delegate_compatibility: Optional[bool] = False,
|
|
2073
2100
|
check_onnx_tf_outputs_elementwise_close: Optional[bool] = False,
|
|
2074
2101
|
check_onnx_tf_outputs_elementwise_close_full: Optional[bool] = False,
|
|
@@ -2413,6 +2440,15 @@ convert(
|
|
|
2413
2440
|
param_replacement_file: Optional[str]
|
|
2414
2441
|
Parameter replacement file path. (.json)
|
|
2415
2442
|
|
|
2443
|
+
auto_generate_json: Optional[bool]
|
|
2444
|
+
Automatically generates a parameter replacement JSON file that achieves minimal error
|
|
2445
|
+
when converting the model. This option explores various parameter combinations to find
|
|
2446
|
+
the best settings that result in successful conversion and highest accuracy.
|
|
2447
|
+
The search stops when the final output OP accuracy check shows "Matches".
|
|
2448
|
+
When used together with check_onnx_tf_outputs_elementwise_close_full,
|
|
2449
|
+
the generated JSON is used to re-evaluate accuracy.
|
|
2450
|
+
Default: False
|
|
2451
|
+
|
|
2416
2452
|
check_gpu_delegate_compatibility: Optional[bool]
|
|
2417
2453
|
Run TFLite ModelAnalyzer on the generated Float16 tflite model
|
|
2418
2454
|
to check if the model can be supported by GPU Delegate.
|