tico 0.1.0.dev250701__py3-none-any.whl → 0.1.0.dev250703__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tico/__init__.py +1 -1
- tico/interpreter/infer.py +33 -27
- tico/serialize/operators/op_conv2d.py +16 -32
- tico/serialize/operators/op_depthwise_conv2d.py +22 -37
- tico/serialize/operators/op_transpose_conv.py +16 -32
- tico/utils/installed_packages.py +35 -0
- tico/utils/padding.py +59 -19
- {tico-0.1.0.dev250701.dist-info → tico-0.1.0.dev250703.dist-info}/METADATA +1 -1
- {tico-0.1.0.dev250701.dist-info → tico-0.1.0.dev250703.dist-info}/RECORD +13 -12
- {tico-0.1.0.dev250701.dist-info → tico-0.1.0.dev250703.dist-info}/LICENSE +0 -0
- {tico-0.1.0.dev250701.dist-info → tico-0.1.0.dev250703.dist-info}/WHEEL +0 -0
- {tico-0.1.0.dev250701.dist-info → tico-0.1.0.dev250703.dist-info}/entry_points.txt +0 -0
- {tico-0.1.0.dev250701.dist-info → tico-0.1.0.dev250703.dist-info}/top_level.txt +0 -0
tico/__init__.py
CHANGED
@@ -21,7 +21,7 @@ from tico.config import CompileConfigV1, get_default_config
|
|
21
21
|
from tico.utils.convert import convert, convert_from_exported_program, convert_from_pt2
|
22
22
|
|
23
23
|
# THIS LINE IS AUTOMATICALLY GENERATED BY setup.py
|
24
|
-
__version__ = "0.1.0.
|
24
|
+
__version__ = "0.1.0.dev250703"
|
25
25
|
|
26
26
|
MINIMUM_SUPPORTED_VERSION = "2.5.0"
|
27
27
|
SECURE_TORCH_VERSION = "2.6.0"
|
tico/interpreter/infer.py
CHANGED
@@ -12,7 +12,7 @@
|
|
12
12
|
# See the License for the specific language governing permissions and
|
13
13
|
# limitations under the License.
|
14
14
|
|
15
|
-
from typing import Any
|
15
|
+
from typing import Any, Sequence
|
16
16
|
|
17
17
|
import numpy as np
|
18
18
|
import torch
|
@@ -20,40 +20,46 @@ from circle_schema import circle
|
|
20
20
|
|
21
21
|
from tico.interpreter.interpreter import Interpreter
|
22
22
|
from tico.serialize.circle_mapping import np_dtype_from_circle_dtype, to_circle_dtype
|
23
|
+
from tico.utils.installed_packages import is_dynamic_cache_available
|
23
24
|
|
24
25
|
|
25
|
-
def
|
26
|
-
|
27
|
-
|
26
|
+
def flatten_and_convert(inputs: Sequence) -> tuple:
|
27
|
+
result = [] # type: ignore[var-annotated]
|
28
|
+
for item in inputs:
|
29
|
+
if item is None:
|
30
|
+
continue
|
28
31
|
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
"""
|
33
|
-
l = []
|
34
|
-
for value in inputs:
|
35
|
-
if value == None:
|
32
|
+
# 1. recursion on list and tuple
|
33
|
+
if isinstance(item, (list, tuple)):
|
34
|
+
result.extend(flatten_and_convert(item))
|
36
35
|
continue
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
36
|
+
|
37
|
+
# 2. handle DynamicCache
|
38
|
+
if is_dynamic_cache_available():
|
39
|
+
from transformers.cache_utils import DynamicCache
|
40
|
+
|
41
|
+
if isinstance(item, DynamicCache):
|
42
|
+
# NOTE The tensor order is: key_in → key_out → value_in → value_out
|
43
|
+
#
|
44
|
+
# Refer to https://github.com/huggingface/transformers/blob/3457e8e73e4f5532cc69059682b1ba4484d7e7e8/src/transformers/cache_utils.py#L557
|
45
|
+
# ```py
|
46
|
+
# self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=-2)
|
47
|
+
# self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=-2)
|
48
|
+
# ```
|
49
|
+
result.extend(item.key_cache)
|
50
|
+
result.extend(item.value_cache)
|
51
|
+
continue
|
52
|
+
|
53
|
+
# 3. Convert to tensors
|
54
|
+
result.append(item if isinstance(item, torch.Tensor) else torch.tensor(item))
|
55
|
+
|
56
|
+
return tuple(result)
|
46
57
|
|
47
58
|
|
48
59
|
def infer(circle_binary: bytes, *args: Any, **kwargs: Any) -> Any:
|
49
60
|
# When converting a model, it is assumed that the order of keyword arguments is maintained.
|
50
|
-
|
51
|
-
user_inputs =
|
52
|
-
# Cast them to torch.Tensor to make it simple.
|
53
|
-
user_inputs = tuple(
|
54
|
-
torch.tensor(user_input) if type(user_input) != torch.Tensor else user_input
|
55
|
-
for user_input in user_inputs
|
56
|
-
)
|
61
|
+
raw_inputs = args + tuple(kwargs.values())
|
62
|
+
user_inputs = flatten_and_convert(raw_inputs)
|
57
63
|
|
58
64
|
# Get input spec from circle binary.
|
59
65
|
model = circle.Model.Model.GetRootAsModel(circle_binary, 0)
|
@@ -26,7 +26,7 @@ from tico.serialize.operators.node_visitor import NodeVisitor, register_node_vis
|
|
26
26
|
from tico.serialize.operators.utils import create_builtin_operator, get_op_index
|
27
27
|
from tico.serialize.quant_param import QPARAM_KEY, QuantParam
|
28
28
|
from tico.utils.define import define_pad_node
|
29
|
-
from tico.utils.padding import
|
29
|
+
from tico.utils.padding import identify_padding
|
30
30
|
from tico.utils.validate_args_kwargs import Conv2DArgs
|
31
31
|
|
32
32
|
|
@@ -111,53 +111,39 @@ class Conv2dVisitor(NodeVisitor):
|
|
111
111
|
|
112
112
|
assert groups == 1, "Only support group 1 conv2d"
|
113
113
|
|
114
|
-
input_dtype: int = extract_circle_dtype(input_)
|
115
114
|
input_shape = list(extract_shape(input_))
|
115
|
+
output_shape = list(extract_shape(node))
|
116
|
+
weight_shape = list(extract_shape(weight))
|
116
117
|
assert len(input_shape) == 4, len(input_shape)
|
117
|
-
output_shape = extract_shape(node)
|
118
118
|
assert len(output_shape) == 4, len(output_shape)
|
119
|
+
assert len(weight_shape) == 4, len(weight_shape)
|
119
120
|
|
120
|
-
|
121
|
-
weight_shape = list(extract_shape(weight))
|
122
|
-
|
123
|
-
if is_valid_padding(padding):
|
124
|
-
conv2d_padding_type = VALID
|
125
|
-
elif is_same_padding(padding, input_shape, output_shape) and stride == [1, 1]:
|
126
|
-
conv2d_padding_type = SAME
|
127
|
-
else:
|
128
|
-
assert isinstance(padding, list) and len(padding) == 2
|
121
|
+
pad_decision = identify_padding(padding, input_shape, output_shape, stride)
|
129
122
|
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
# when data_foramt is "NHWC", padding should be [[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]
|
123
|
+
conv_input: torch.fx.Node | circle.Tensor.TensorT = input_
|
124
|
+
if pad_decision.explicit_pad_hw is not None:
|
125
|
+
pad_h, pad_w = pad_decision.explicit_pad_hw
|
134
126
|
paddings = torch.tensor(
|
135
127
|
[
|
136
128
|
[0, 0],
|
137
|
-
[
|
138
|
-
[
|
129
|
+
[pad_h, pad_h],
|
130
|
+
[pad_w, pad_w],
|
139
131
|
[0, 0],
|
140
132
|
],
|
141
133
|
dtype=torch.int32,
|
142
134
|
)
|
143
135
|
pad_output_shape = [
|
144
136
|
input_shape[0],
|
145
|
-
input_shape[1],
|
146
|
-
input_shape[2],
|
137
|
+
input_shape[1] + pad_h * 2,
|
138
|
+
input_shape[2] + pad_w * 2,
|
147
139
|
input_shape[3],
|
148
140
|
]
|
149
|
-
# Add (pad_top+pad_bottom) to pad_output_shape_h
|
150
|
-
pad_output_shape[1] += padding[0] * 2
|
151
|
-
# Add (pad_left+pad_Right) to pad_output_shape_w
|
152
|
-
pad_output_shape[2] += padding[1] * 2
|
153
141
|
# create padded output tensor
|
154
|
-
input_qparam: Optional[QuantParam] = (
|
155
|
-
input_.meta[QPARAM_KEY] if QPARAM_KEY in input_.meta else None
|
156
|
-
)
|
142
|
+
input_qparam: Optional[QuantParam] = input_.meta.get(QPARAM_KEY)
|
157
143
|
pad_output = self.graph.add_tensor_from_scratch(
|
158
144
|
prefix=f"{node.name}_input_pad_output",
|
159
145
|
shape=pad_output_shape,
|
160
|
-
dtype=
|
146
|
+
dtype=extract_circle_dtype(input_),
|
161
147
|
qparam=input_qparam,
|
162
148
|
source_node=node,
|
163
149
|
)
|
@@ -170,13 +156,11 @@ class Conv2dVisitor(NodeVisitor):
|
|
170
156
|
|
171
157
|
if bias is None:
|
172
158
|
# luci-interpreter can't run no bias conv. Let's add zero vector for bias.
|
173
|
-
|
174
|
-
out_channel = weight_shape[0]
|
175
|
-
bias = [0.0] * out_channel # type: ignore[assignment]
|
159
|
+
bias = [0.0] * weight_shape[0] # type: ignore[assignment]
|
176
160
|
|
177
161
|
# Conv2D
|
178
162
|
conv2d_operator = self.define_conv2d_node(
|
179
|
-
|
163
|
+
pad_decision.conv_padding_type, # 'SAME'(0) or 'VALID'(1)
|
180
164
|
stride,
|
181
165
|
dilation,
|
182
166
|
[conv_input, weight, bias],
|
@@ -12,7 +12,7 @@
|
|
12
12
|
# See the License for the specific language governing permissions and
|
13
13
|
# limitations under the License.
|
14
14
|
|
15
|
-
from typing import Dict, List, TYPE_CHECKING
|
15
|
+
from typing import Dict, List, Optional, TYPE_CHECKING
|
16
16
|
|
17
17
|
if TYPE_CHECKING:
|
18
18
|
import torch._ops
|
@@ -24,8 +24,9 @@ from tico.serialize.circle_mapping import extract_circle_dtype, extract_shape
|
|
24
24
|
from tico.serialize.operators.hashable_opcode import OpCode
|
25
25
|
from tico.serialize.operators.node_visitor import NodeVisitor, register_node_visitor
|
26
26
|
from tico.serialize.operators.utils import create_builtin_operator, get_op_index
|
27
|
+
from tico.serialize.quant_param import QPARAM_KEY, QuantParam
|
27
28
|
from tico.utils.define import define_pad_node
|
28
|
-
from tico.utils.padding import
|
29
|
+
from tico.utils.padding import identify_padding
|
29
30
|
from tico.utils.validate_args_kwargs import Conv2DArgs
|
30
31
|
|
31
32
|
|
@@ -114,63 +115,49 @@ class DepthwiseConv2dVisitor(NodeVisitor):
|
|
114
115
|
dilation = args.dilation
|
115
116
|
groups = args.groups
|
116
117
|
|
117
|
-
input_dtype: int = extract_circle_dtype(input_)
|
118
118
|
input_shape = list(extract_shape(input_)) # OHWI
|
119
|
-
assert len(input_shape) == 4, len(input_shape)
|
120
|
-
|
121
119
|
output_shape = list(extract_shape(node)) # OHWI
|
122
|
-
assert len(output_shape) == 4, len(output_shape)
|
123
|
-
|
124
120
|
weight_shape = list(extract_shape(weight)) # 1HWO
|
125
|
-
assert (
|
126
|
-
|
127
|
-
)
|
128
|
-
|
121
|
+
assert len(input_shape) == 4, len(input_shape)
|
122
|
+
assert len(output_shape) == 4, len(output_shape)
|
123
|
+
assert len(weight_shape) == 4
|
129
124
|
assert weight_shape[0] == 1
|
130
125
|
assert weight_shape[3] == output_shape[3]
|
131
126
|
assert input_shape[3] == groups
|
127
|
+
assert (
|
128
|
+
weight_shape[3] % groups == 0
|
129
|
+
), "Depthwise convolution requires output channel to be divisible by groups"
|
132
130
|
|
133
131
|
depthMultiplier = weight_shape[3] // input_shape[3]
|
134
132
|
assert weight_shape[3] % input_shape[3] == 0, "depthMultiplier must be integer"
|
135
133
|
|
136
|
-
|
137
|
-
|
138
|
-
if is_valid_padding(padding):
|
139
|
-
dconv2d_padding_type = VALID
|
140
|
-
elif is_same_padding(padding, input_shape, output_shape):
|
141
|
-
dconv2d_padding_type = SAME
|
142
|
-
else:
|
143
|
-
assert isinstance(padding, list) and len(padding) == 2
|
134
|
+
pad_decision = identify_padding(padding, input_shape, output_shape, stride)
|
144
135
|
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
# when data_format is "NHWC", padding should be [[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]
|
136
|
+
conv_input: torch.fx.Node | circle.Tensor.TensorT = input_
|
137
|
+
if pad_decision.explicit_pad_hw is not None:
|
138
|
+
pad_h, pad_w = pad_decision.explicit_pad_hw
|
149
139
|
paddings = torch.tensor(
|
150
140
|
[
|
151
141
|
[0, 0],
|
152
|
-
[
|
153
|
-
[
|
142
|
+
[pad_h, pad_h],
|
143
|
+
[pad_w, pad_w],
|
154
144
|
[0, 0],
|
155
145
|
],
|
156
146
|
dtype=torch.int32,
|
157
147
|
)
|
158
148
|
pad_output_shape = [
|
159
149
|
input_shape[0],
|
160
|
-
input_shape[1],
|
161
|
-
input_shape[2],
|
150
|
+
input_shape[1] + pad_h * 2,
|
151
|
+
input_shape[2] + pad_w * 2,
|
162
152
|
input_shape[3],
|
163
153
|
]
|
164
|
-
# Add (pad_top+pad_bottom) to pad_output_shape_h
|
165
|
-
pad_output_shape[1] += padding[0] * 2
|
166
|
-
# Add (pad_left+pad_Right) to pad_output_shape_w
|
167
|
-
pad_output_shape[2] += padding[1] * 2
|
168
154
|
# create padded output tensor
|
169
|
-
|
155
|
+
input_qparam: Optional[QuantParam] = input_.meta.get(QPARAM_KEY)
|
170
156
|
pad_output = self.graph.add_tensor_from_scratch(
|
171
157
|
prefix=f"{node.name}_input_pad_output",
|
172
158
|
shape=pad_output_shape,
|
173
|
-
dtype=
|
159
|
+
dtype=extract_circle_dtype(input_),
|
160
|
+
qparam=input_qparam,
|
174
161
|
source_node=node,
|
175
162
|
)
|
176
163
|
# CirclePad
|
@@ -182,13 +169,11 @@ class DepthwiseConv2dVisitor(NodeVisitor):
|
|
182
169
|
|
183
170
|
if bias is None:
|
184
171
|
# luci-interpreter can't run no bias conv. Let's add zero vector for bias.
|
185
|
-
|
186
|
-
out_channel = weight_shape[3]
|
187
|
-
bias = [0.0] * out_channel # type: ignore[assignment]
|
172
|
+
bias = [0.0] * weight_shape[3] # type: ignore[assignment]
|
188
173
|
|
189
174
|
# DConv2D
|
190
175
|
dconv2d_operator = self.define_dconv_node(
|
191
|
-
|
176
|
+
pad_decision.conv_padding_type,
|
192
177
|
stride,
|
193
178
|
dilation,
|
194
179
|
depthMultiplier,
|
@@ -30,7 +30,7 @@ from tico.serialize.operators.node_visitor import NodeVisitor, register_node_vis
|
|
30
30
|
from tico.serialize.operators.utils import create_builtin_operator, get_op_index
|
31
31
|
from tico.serialize.quant_param import QPARAM_KEY, QuantParam
|
32
32
|
from tico.utils.define import define_pad_node
|
33
|
-
from tico.utils.padding import
|
33
|
+
from tico.utils.padding import identify_padding
|
34
34
|
from tico.utils.validate_args_kwargs import ConvTranspose2DArgs
|
35
35
|
|
36
36
|
|
@@ -82,53 +82,39 @@ class TransposeConvVisitor(NodeVisitor):
|
|
82
82
|
|
83
83
|
assert groups == 1, "Only support group 1"
|
84
84
|
|
85
|
-
input_dtype: int = extract_circle_dtype(input_)
|
86
85
|
input_shape = list(extract_shape(input_))
|
86
|
+
output_shape = list(extract_shape(node))
|
87
|
+
weight_shape = list(extract_shape(weight))
|
87
88
|
assert len(input_shape) == 4, len(input_shape)
|
88
|
-
output_shape = extract_shape(node)
|
89
89
|
assert len(output_shape) == 4, len(output_shape)
|
90
|
+
assert len(weight_shape) == 4, len(weight_shape)
|
90
91
|
|
91
|
-
|
92
|
-
weight_shape = list(extract_shape(weight))
|
93
|
-
|
94
|
-
if is_valid_padding(padding):
|
95
|
-
tconv2d_padding_type = VALID
|
96
|
-
elif is_same_padding(padding, input_shape, output_shape) and stride == [1, 1]:
|
97
|
-
tconv2d_padding_type = SAME
|
98
|
-
else:
|
99
|
-
assert isinstance(padding, list) and len(padding) == 2
|
92
|
+
pad_decision = identify_padding(padding, input_shape, output_shape, stride)
|
100
93
|
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
# when data_foramt is "NHWC", padding should be [[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]
|
94
|
+
conv_input: torch.fx.Node | circle.Tensor.TensorT = input_
|
95
|
+
if pad_decision.explicit_pad_hw is not None:
|
96
|
+
pad_h, pad_w = pad_decision.explicit_pad_hw
|
105
97
|
paddings = torch.tensor(
|
106
98
|
[
|
107
99
|
[0, 0],
|
108
|
-
[
|
109
|
-
[
|
100
|
+
[pad_h, pad_h],
|
101
|
+
[pad_w, pad_w],
|
110
102
|
[0, 0],
|
111
103
|
],
|
112
104
|
dtype=torch.int32,
|
113
105
|
)
|
114
106
|
pad_output_shape = [
|
115
107
|
input_shape[0],
|
116
|
-
input_shape[1],
|
117
|
-
input_shape[2],
|
108
|
+
input_shape[1] + pad_h * 2,
|
109
|
+
input_shape[2] + pad_w * 2,
|
118
110
|
input_shape[3],
|
119
111
|
]
|
120
|
-
# Add (pad_top+pad_bottom) to pad_output_shape_h
|
121
|
-
pad_output_shape[1] += padding[0] * 2
|
122
|
-
# Add (pad_left+pad_Right) to pad_output_shape_w
|
123
|
-
pad_output_shape[2] += padding[1] * 2
|
124
112
|
# create padded output tensor
|
125
|
-
input_qparam: Optional[QuantParam] = (
|
126
|
-
input_.meta[QPARAM_KEY] if QPARAM_KEY in input_.meta else None
|
127
|
-
)
|
113
|
+
input_qparam: Optional[QuantParam] = input_.meta.get(QPARAM_KEY)
|
128
114
|
pad_output = self.graph.add_tensor_from_scratch(
|
129
115
|
prefix=f"{node.name}_input_pad_output",
|
130
116
|
shape=pad_output_shape,
|
131
|
-
dtype=
|
117
|
+
dtype=extract_circle_dtype(input_),
|
132
118
|
qparam=input_qparam,
|
133
119
|
source_node=node,
|
134
120
|
)
|
@@ -141,9 +127,7 @@ class TransposeConvVisitor(NodeVisitor):
|
|
141
127
|
|
142
128
|
if bias is None:
|
143
129
|
# luci-interpreter can't run no bias conv. Let's add zero vector for bias.
|
144
|
-
|
145
|
-
out_channel = weight_shape[0]
|
146
|
-
bias = [0.0] * out_channel # type: ignore[assignment]
|
130
|
+
bias = [0.0] * weight_shape[0] # type: ignore[assignment]
|
147
131
|
|
148
132
|
# First arguemnt is output shape of tconv.
|
149
133
|
assert output_shape[0] == input_shape[0]
|
@@ -156,7 +140,7 @@ class TransposeConvVisitor(NodeVisitor):
|
|
156
140
|
|
157
141
|
# TConv2D
|
158
142
|
tconv2d_operator = self.define_transpose_conv_node(
|
159
|
-
|
143
|
+
pad_decision.conv_padding_type, # 'SAME'(0) or 'VALID'(1)
|
160
144
|
stride,
|
161
145
|
[tconv_output_tensor, weight, conv_input, bias],
|
162
146
|
[node],
|
@@ -0,0 +1,35 @@
|
|
1
|
+
# Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
##############################
|
16
|
+
#### Transformers Package ####
|
17
|
+
##############################
|
18
|
+
|
19
|
+
|
20
|
+
def is_transformers_installed():
|
21
|
+
try:
|
22
|
+
import transformers
|
23
|
+
|
24
|
+
return True
|
25
|
+
except ImportError:
|
26
|
+
return False
|
27
|
+
|
28
|
+
|
29
|
+
def is_dynamic_cache_available():
|
30
|
+
try:
|
31
|
+
from transformers.cache_utils import DynamicCache
|
32
|
+
|
33
|
+
return True
|
34
|
+
except ImportError:
|
35
|
+
return False
|
tico/utils/padding.py
CHANGED
@@ -12,36 +12,76 @@
|
|
12
12
|
# See the License for the specific language governing permissions and
|
13
13
|
# limitations under the License.
|
14
14
|
|
15
|
+
from enum import IntEnum
|
16
|
+
from typing import NamedTuple, Optional, Sequence, Tuple, Union
|
17
|
+
|
15
18
|
import torch
|
16
19
|
|
17
20
|
from tico.utils.errors import InvalidArgumentError
|
18
21
|
|
19
|
-
SAME = 0
|
20
|
-
VALID = 1
|
21
22
|
|
23
|
+
PaddingValue = Union[str, Sequence[int]] # "same" | "valid" | [pad_h, pad_w]
|
24
|
+
|
25
|
+
|
26
|
+
class ConvPadding(IntEnum):
|
27
|
+
SAME = 0 # auto-pad, HW out == HW in
|
28
|
+
VALID = 1 # no implicit padding
|
22
29
|
|
23
|
-
def is_valid_padding(padding: str | list):
|
24
|
-
if isinstance(padding, str):
|
25
|
-
return padding == "valid"
|
26
30
|
|
27
|
-
|
28
|
-
|
29
|
-
|
31
|
+
class ConvPaddingInfo(NamedTuple):
|
32
|
+
"""
|
33
|
+
Result of padding analysis.
|
34
|
+
"""
|
30
35
|
|
31
|
-
|
36
|
+
conv_padding_type: ConvPadding
|
37
|
+
explicit_pad_hw: Optional[Tuple[int, int]] # None -> no extra Pad() op needed
|
32
38
|
|
33
39
|
|
34
|
-
def
|
35
|
-
padding:
|
36
|
-
|
40
|
+
def identify_padding(
|
41
|
+
padding: PaddingValue,
|
42
|
+
input_shape: Sequence[int],
|
43
|
+
output_shape: Sequence[int],
|
44
|
+
stride: Sequence[int],
|
45
|
+
) -> ConvPaddingInfo:
|
46
|
+
"""
|
47
|
+
Normalizes all PyTorch `padding` variants to a single decision.
|
48
|
+
|
49
|
+
Rules
|
50
|
+
-----
|
51
|
+
1. "valid" or [0, 0] → VALID, no Pad().
|
52
|
+
2. "same" or the shapes already match (stride==1) → SAME, no Pad().
|
53
|
+
3. Any other 2-element list → VALID + explicit Pad().
|
54
|
+
|
55
|
+
TODO The following SAME padding check assumes stride == 1.
|
56
|
+
For stride > 1, Conv2D and TransposeConv2D require different formulas
|
57
|
+
to determine the SAME padding. Update this logic to handle general
|
58
|
+
stride values correctly for both cases.
|
59
|
+
"""
|
60
|
+
# ─── 1. String form ────────────────────────────────────────────────────
|
37
61
|
if isinstance(padding, str):
|
38
|
-
|
62
|
+
pad = padding.lower()
|
63
|
+
if pad == "valid":
|
64
|
+
return ConvPaddingInfo(ConvPadding.VALID, None)
|
65
|
+
if pad == "same":
|
66
|
+
return ConvPaddingInfo(ConvPadding.SAME, None)
|
67
|
+
raise InvalidArgumentError(f"Unknown padding string: {padding}")
|
68
|
+
|
69
|
+
# ─── 2. List / tuple form ─────────────────────────────────────────────
|
70
|
+
if not (isinstance(padding, (list, tuple)) and len(padding) == 2):
|
71
|
+
raise InvalidArgumentError(
|
72
|
+
"Padding must be 'valid', 'same', or a [pad_h, pad_w] list"
|
73
|
+
)
|
39
74
|
|
40
|
-
|
41
|
-
|
75
|
+
pad_h, pad_w = padding
|
76
|
+
# [0, 0] → VALID
|
77
|
+
if pad_h == 0 and pad_w == 0:
|
78
|
+
return ConvPaddingInfo(ConvPadding.VALID, None)
|
42
79
|
|
43
|
-
|
44
|
-
|
45
|
-
|
80
|
+
# SAME heuristic: output H/W already match input when stride is 1
|
81
|
+
hw_in = tuple(input_shape[1:3])
|
82
|
+
hw_out = tuple(output_shape[1:3])
|
83
|
+
if hw_in == hw_out and stride == [1, 1]:
|
84
|
+
return ConvPaddingInfo(ConvPadding.SAME, None)
|
46
85
|
|
47
|
-
|
86
|
+
# Anything else = explicit symmetric padding
|
87
|
+
return ConvPaddingInfo(ConvPadding.VALID, (pad_h, pad_w))
|
@@ -1,4 +1,4 @@
|
|
1
|
-
tico/__init__.py,sha256=
|
1
|
+
tico/__init__.py,sha256=P6Y7D-gIsRLMIUA9w5OkdqONBxv5lRR70wvkWFHh1wY,1743
|
2
2
|
tico/pt2_to_circle.py,sha256=gu3MD4Iqc0zMZcCZ2IT8oGbyj21CTSbT3Rgd9s2B_9A,2767
|
3
3
|
tico/config/__init__.py,sha256=xZzCXjZ84qE-CsBi-dfaL05bqpQ3stKKfTXhnrJRyVs,142
|
4
4
|
tico/config/base.py,sha256=anwOiJFkUxUi7Cef573JgQcjk6S-FSi6O_TLjYASW-g,1244
|
@@ -57,7 +57,7 @@ tico/experimental/quantization/passes/propagate_qparam_forward.py,sha256=RhUHGCR
|
|
57
57
|
tico/experimental/quantization/passes/quantize_bias.py,sha256=ZQ3rETYStpW28JUbODRixbq5sDEOiIOB_qWA-Jzuu-Y,4337
|
58
58
|
tico/experimental/quantization/passes/remove_weight_dequant_op.py,sha256=Klc_9-94tl0_AuAToKOjsWED_YPk5RB67eum0ddPX7o,6588
|
59
59
|
tico/interpreter/__init__.py,sha256=IO6FP_xYbGy0dW0HL26GXD3ouxARaxCK7bz9dn4blPQ,26
|
60
|
-
tico/interpreter/infer.py,sha256=
|
60
|
+
tico/interpreter/infer.py,sha256=1ZFe3DVMR2mlwBosoedqoL0-CGN_01CKLgMgxuw62KA,4861
|
61
61
|
tico/interpreter/interpreter.py,sha256=tGbluCbrehTCqBu8mtGDNzby_ieJ2ry8_RH_eC0CQxk,3828
|
62
62
|
tico/passes/__init__.py,sha256=IO6FP_xYbGy0dW0HL26GXD3ouxARaxCK7bz9dn4blPQ,26
|
63
63
|
tico/passes/cast_aten_where_arg_type.py,sha256=ybtGj1L7_2zGyfb_G-_y1N1mRgKHVq6fBZc-9-fH9sA,7229
|
@@ -115,11 +115,11 @@ tico/serialize/operators/op_cat.py,sha256=XDYOh0XAyrM0TlxVm6Sa0OFFGrKk7aSDcGXC-h
|
|
115
115
|
tico/serialize/operators/op_clamp.py,sha256=ZRAsXLGsZqJEh4wXxESEpRJkRtUuJWTDgAem6lr9_5I,4298
|
116
116
|
tico/serialize/operators/op_clone.py,sha256=vzDYJ8TS3tc2BAyd_z8nt5VqT1inpymSseMEhd9dva0,2394
|
117
117
|
tico/serialize/operators/op_constant_pad_nd.py,sha256=OpP4AP-d1IFcWZolNa-o9ZxzXJQkMdG9WQ66soX3s-E,2675
|
118
|
-
tico/serialize/operators/op_conv2d.py,sha256=
|
118
|
+
tico/serialize/operators/op_conv2d.py,sha256=UfYk5xnA9PqVYyjU9dUCSW0CiCmcEK3LnlnFh0WY4Gg,6599
|
119
119
|
tico/serialize/operators/op_copy.py,sha256=vaianLQ19-2ZQZ-MdQ07YuOPeFeo_HAx2a0Qfn7I5Kk,6122
|
120
120
|
tico/serialize/operators/op_cos.py,sha256=N12bNyuTQIxRnD0eHRPdFVzRQPMy1NFM4iM8oQ4lYzw,2034
|
121
121
|
tico/serialize/operators/op_cumsum.py,sha256=3fmOf1mIeCX1uhTBcSJmRGXejzLtO8UwaI1eEQDC6nA,3798
|
122
|
-
tico/serialize/operators/op_depthwise_conv2d.py,sha256=
|
122
|
+
tico/serialize/operators/op_depthwise_conv2d.py,sha256=wH1SFjhWJdJrb8xi2qCiCeSWNxlL8IjEwALGCxTQxbc,7034
|
123
123
|
tico/serialize/operators/op_dequantize_per_channel.py,sha256=aPcVxjdgvfSFoLnv9NL-RxO5vZYj8ulqriMP5LHIWs0,3133
|
124
124
|
tico/serialize/operators/op_dequantize_per_tensor.py,sha256=u9aK_Xle9rDN0EHLE0YrCTlXY4Q53Ch9Di4qmx7ynps,2304
|
125
125
|
tico/serialize/operators/op_div.py,sha256=WjeM2Ux7TyGlSNx2aVC783JvcL0xnY6FBYo1Q_kdb5Q,2201
|
@@ -175,7 +175,7 @@ tico/serialize/operators/op_sub.py,sha256=yZskQJF0ylXVk02Uid8djPNIWDJ-0uHJar4UYh
|
|
175
175
|
tico/serialize/operators/op_sum.py,sha256=B5aSwQMhyoBe2JYdE5nVQ3QeVDSzL-yuZZujsG08OdQ,2294
|
176
176
|
tico/serialize/operators/op_tanh.py,sha256=rs7FsbQeUQ7Ak8RoQV9ymNGXHXRObojfY_SiqJiyqdA,1846
|
177
177
|
tico/serialize/operators/op_to_copy.py,sha256=a8T0uPMavMO_md1a-4_0dlvDHyZS_xew0qB6xjf69rI,3934
|
178
|
-
tico/serialize/operators/op_transpose_conv.py,sha256=
|
178
|
+
tico/serialize/operators/op_transpose_conv.py,sha256=YDObXXaHNOD7yjO1ccaB_NCfc5-L76ClvT3pduL8E90,5631
|
179
179
|
tico/serialize/operators/op_unsqueeze.py,sha256=ZHhfVXSWEiwb2VDYX5uhxbGQyzZjKT7CrbBpVGxVHBU,2310
|
180
180
|
tico/serialize/operators/op_view.py,sha256=5EMww-ve17Vm9XPuV03Tn7vJsjpU2J8U4d_FOrlm9_o,2546
|
181
181
|
tico/serialize/operators/op_where.py,sha256=doE81GSwygrPBm3JIfN9w7kKXxeIYKxgk0eoY22QIcg,2845
|
@@ -186,9 +186,10 @@ tico/utils/define.py,sha256=Ypgp7YffM4pgPl4Zh6TmogSn1OxGBMRw_e09qYGflZk,1467
|
|
186
186
|
tico/utils/diff_graph.py,sha256=_eDGGPDPYQD4b--MXX0DLoVgSt_wLfNPt47UlolLLR4,5272
|
187
187
|
tico/utils/errors.py,sha256=f3csJjgbXG9W1aHhqEcou008Aor19W57X8oT5Hx8w1M,954
|
188
188
|
tico/utils/graph.py,sha256=Y6aODsnc_-9l61oanknb7K1jqJ8B35iPypOKkM0Qkk0,9149
|
189
|
+
tico/utils/installed_packages.py,sha256=J0FTwnkCGs0MxRWoCMYAqiwH7Z0GWFDLV--x-IndSp4,1017
|
189
190
|
tico/utils/logging.py,sha256=IlbBWscsaHidI0dNqro1HEXAbIcbkR3BD5ukLy2m95k,1286
|
190
191
|
tico/utils/model.py,sha256=Uqc92AnJXQ2pbvctS2z2F3Ku3yNrwXZ9O33hZVis7is,1250
|
191
|
-
tico/utils/padding.py,sha256=
|
192
|
+
tico/utils/padding.py,sha256=jyNhGmlLZfruWZ6n5hll8RZOFg85iCZP8OJqnHGS97g,3293
|
192
193
|
tico/utils/passes.py,sha256=kGmDe__5cPaO6i5EDAoXSVe6yXEoX9hAny4ROb3ZEmQ,2409
|
193
194
|
tico/utils/register_custom_op.py,sha256=3-Yl6iYmx1qQA2igNHt4hYhQhQMkdPb7gF50LIY8yvc,27350
|
194
195
|
tico/utils/serialize.py,sha256=AQXMBOLu-Kg2Rn-qbqsAtHndjZAZIavlKA0QFgJREHM,1420
|
@@ -199,9 +200,9 @@ tico/utils/mx/__init__.py,sha256=IO6FP_xYbGy0dW0HL26GXD3ouxARaxCK7bz9dn4blPQ,26
|
|
199
200
|
tico/utils/mx/elemwise_ops.py,sha256=V6glyAHsVR1joqpsgnNytatCD_ew92xNWZ19UFDoMTA,10281
|
200
201
|
tico/utils/mx/formats.py,sha256=uzNWyu-1onUlwQfX5cZ6fZSUfHMRqorper7_T1k3jfk,3404
|
201
202
|
tico/utils/mx/mx_ops.py,sha256=RcfUTYVi-wilGB2sC35OeARdwDqnixv7dG5iyZ-fQT8,8555
|
202
|
-
tico-0.1.0.
|
203
|
-
tico-0.1.0.
|
204
|
-
tico-0.1.0.
|
205
|
-
tico-0.1.0.
|
206
|
-
tico-0.1.0.
|
207
|
-
tico-0.1.0.
|
203
|
+
tico-0.1.0.dev250703.dist-info/LICENSE,sha256=kp4JLII7bzRhPb0CPD5XTDZMh22BQ7h3k3B7t8TiSbw,12644
|
204
|
+
tico-0.1.0.dev250703.dist-info/METADATA,sha256=b1B8214FjIYVG6Z4kL5NDM0EN-jksdYbAtiViQ-hGtU,8846
|
205
|
+
tico-0.1.0.dev250703.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
|
206
|
+
tico-0.1.0.dev250703.dist-info/entry_points.txt,sha256=kBKYSS_IYrSXmUYevmmepqIVPScq5vF8ulQRu3I_Zf0,59
|
207
|
+
tico-0.1.0.dev250703.dist-info/top_level.txt,sha256=oqs7UPoNSKZEwqsX8B-KAWdQwfAa7i60pbxW_Jk7P3w,5
|
208
|
+
tico-0.1.0.dev250703.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|