ai-edge-torch-nightly 0.5.0.dev20250504__py3-none-any.whl → 0.5.0.dev20250505__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ai_edge_torch/examples/__init__.py +14 -0
- ai_edge_torch/examples/selfie_segmentation/__init__.py +14 -0
- ai_edge_torch/examples/selfie_segmentation/model.py +584 -0
- ai_edge_torch/version.py +1 -1
- {ai_edge_torch_nightly-0.5.0.dev20250504.dist-info → ai_edge_torch_nightly-0.5.0.dev20250505.dist-info}/METADATA +1 -1
- {ai_edge_torch_nightly-0.5.0.dev20250504.dist-info → ai_edge_torch_nightly-0.5.0.dev20250505.dist-info}/RECORD +9 -6
- {ai_edge_torch_nightly-0.5.0.dev20250504.dist-info → ai_edge_torch_nightly-0.5.0.dev20250505.dist-info}/LICENSE +0 -0
- {ai_edge_torch_nightly-0.5.0.dev20250504.dist-info → ai_edge_torch_nightly-0.5.0.dev20250505.dist-info}/WHEEL +0 -0
- {ai_edge_torch_nightly-0.5.0.dev20250504.dist-info → ai_edge_torch_nightly-0.5.0.dev20250505.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,14 @@
|
|
1
|
+
# Copyright 2025 The AI Edge Torch Authors.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
# ==============================================================================
|
@@ -0,0 +1,14 @@
|
|
1
|
+
# Copyright 2025 The AI Edge Torch Authors.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
# ==============================================================================
|
@@ -0,0 +1,584 @@
|
|
1
|
+
# Copyright 2025 The AI Edge Torch Authors.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
# ==============================================================================
|
15
|
+
"""MediaPipe Selfie segmentation model ported to PyTorch.
|
16
|
+
|
17
|
+
# pylint: disable=line-too-long
|
18
|
+
First published in:
|
19
|
+
-
|
20
|
+
https://ai.google.dev/edge/mediapipe/solutions/vision/image_segmenter#selfie-model
|
21
|
+
|
22
|
+
Model Card
|
23
|
+
-
|
24
|
+
https://storage.googleapis.com/mediapipe-assets/Model%20Card%20MediaPipe%20Selfie%20Segmentation.pdf
|
25
|
+
# pylint: enable=line-too-long
|
26
|
+
"""
|
27
|
+
|
28
|
+
import torch
|
29
|
+
from torch import nn
|
30
|
+
|
31
|
+
|
32
|
+
def _DepthwiseConv2D(**kwargs):
|
33
|
+
"""Short-cut for creating a depthwise convolution."""
|
34
|
+
return nn.Conv2d(groups=kwargs["in_channels"], **kwargs)
|
35
|
+
|
36
|
+
|
37
|
+
class SelfieSegmentation(nn.Module):
|
38
|
+
"""Selfie segmentation model."""
|
39
|
+
|
40
|
+
def __init__(self):
|
41
|
+
super(SelfieSegmentation, self).__init__()
|
42
|
+
self.conv2d = nn.Conv2d(
|
43
|
+
in_channels=3,
|
44
|
+
out_channels=16,
|
45
|
+
kernel_size=(3, 3),
|
46
|
+
stride=(2, 2),
|
47
|
+
padding=1,
|
48
|
+
)
|
49
|
+
self.hardswish = nn.Hardswish()
|
50
|
+
self.conv2d_1 = nn.Conv2d(
|
51
|
+
in_channels=16,
|
52
|
+
out_channels=16,
|
53
|
+
kernel_size=(1, 1),
|
54
|
+
)
|
55
|
+
self.relu = nn.ReLU()
|
56
|
+
self.depthwise_conv2d = _DepthwiseConv2D(
|
57
|
+
in_channels=16,
|
58
|
+
out_channels=16,
|
59
|
+
kernel_size=(3, 3),
|
60
|
+
stride=(2, 2),
|
61
|
+
padding=1,
|
62
|
+
)
|
63
|
+
self.average_pooling2d = nn.AvgPool2d(
|
64
|
+
kernel_size=(64, 64),
|
65
|
+
stride=(64, 64),
|
66
|
+
padding=0,
|
67
|
+
)
|
68
|
+
self.conv2d_2 = nn.Conv2d(
|
69
|
+
in_channels=16,
|
70
|
+
out_channels=8,
|
71
|
+
kernel_size=(1, 1),
|
72
|
+
stride=(1, 1),
|
73
|
+
padding=0,
|
74
|
+
)
|
75
|
+
self.conv2d_3 = nn.Conv2d(
|
76
|
+
in_channels=8,
|
77
|
+
out_channels=16,
|
78
|
+
kernel_size=(1, 1),
|
79
|
+
stride=(1, 1),
|
80
|
+
padding=0,
|
81
|
+
)
|
82
|
+
self.activation = nn.Sigmoid()
|
83
|
+
self.conv2d_4 = nn.Conv2d(
|
84
|
+
in_channels=16,
|
85
|
+
out_channels=16,
|
86
|
+
kernel_size=(1, 1),
|
87
|
+
)
|
88
|
+
self.conv2d_5 = nn.Conv2d(
|
89
|
+
in_channels=16,
|
90
|
+
out_channels=72,
|
91
|
+
kernel_size=(1, 1),
|
92
|
+
)
|
93
|
+
self.depthwise_conv2d_1 = _DepthwiseConv2D(
|
94
|
+
in_channels=72,
|
95
|
+
out_channels=72,
|
96
|
+
kernel_size=(3, 3),
|
97
|
+
stride=(2, 2),
|
98
|
+
padding=1,
|
99
|
+
)
|
100
|
+
self.conv2d_6 = nn.Conv2d(
|
101
|
+
in_channels=72,
|
102
|
+
out_channels=24,
|
103
|
+
kernel_size=(1, 1),
|
104
|
+
)
|
105
|
+
self.conv2d_7 = nn.Conv2d(
|
106
|
+
in_channels=24,
|
107
|
+
out_channels=88,
|
108
|
+
kernel_size=(1, 1),
|
109
|
+
)
|
110
|
+
self.depthwise_conv2d_2 = _DepthwiseConv2D(
|
111
|
+
in_channels=88,
|
112
|
+
out_channels=88,
|
113
|
+
kernel_size=(3, 3),
|
114
|
+
stride=(1, 1),
|
115
|
+
padding=1,
|
116
|
+
)
|
117
|
+
self.conv2d_8 = nn.Conv2d(
|
118
|
+
in_channels=88,
|
119
|
+
out_channels=24,
|
120
|
+
kernel_size=(1, 1),
|
121
|
+
)
|
122
|
+
self.conv2d_9 = nn.Conv2d(
|
123
|
+
in_channels=24,
|
124
|
+
out_channels=96,
|
125
|
+
kernel_size=(1, 1),
|
126
|
+
stride=(1, 1),
|
127
|
+
padding=0,
|
128
|
+
)
|
129
|
+
self.depthwise_conv2d_3 = _DepthwiseConv2D(
|
130
|
+
in_channels=96,
|
131
|
+
out_channels=96,
|
132
|
+
kernel_size=(5, 5),
|
133
|
+
stride=(2, 2),
|
134
|
+
padding=2,
|
135
|
+
)
|
136
|
+
self.average_pooling2d_1 = nn.AvgPool2d(
|
137
|
+
kernel_size=(16, 16), stride=(16, 16), padding=0
|
138
|
+
)
|
139
|
+
self.conv2d_10 = nn.Conv2d(
|
140
|
+
in_channels=96,
|
141
|
+
out_channels=24,
|
142
|
+
kernel_size=(1, 1),
|
143
|
+
stride=(1, 1),
|
144
|
+
padding=0,
|
145
|
+
)
|
146
|
+
self.conv2d_11 = nn.Conv2d(
|
147
|
+
in_channels=24,
|
148
|
+
out_channels=96,
|
149
|
+
kernel_size=(1, 1),
|
150
|
+
stride=(1, 1),
|
151
|
+
padding=0,
|
152
|
+
)
|
153
|
+
self.conv2d_12 = nn.Conv2d(
|
154
|
+
in_channels=96,
|
155
|
+
out_channels=32,
|
156
|
+
kernel_size=(1, 1),
|
157
|
+
)
|
158
|
+
self.conv2d_13 = nn.Conv2d(
|
159
|
+
in_channels=32,
|
160
|
+
out_channels=128,
|
161
|
+
kernel_size=(1, 1),
|
162
|
+
stride=(1, 1),
|
163
|
+
padding=0,
|
164
|
+
)
|
165
|
+
self.depthwise_conv2d_4 = _DepthwiseConv2D(
|
166
|
+
in_channels=128,
|
167
|
+
out_channels=128,
|
168
|
+
kernel_size=(5, 5),
|
169
|
+
stride=(1, 1),
|
170
|
+
padding=2,
|
171
|
+
)
|
172
|
+
self.average_pooling2d_2 = nn.AvgPool2d(
|
173
|
+
kernel_size=(16, 16), stride=(16, 16), padding=0
|
174
|
+
)
|
175
|
+
self.conv2d_14 = nn.Conv2d(
|
176
|
+
in_channels=128,
|
177
|
+
out_channels=32,
|
178
|
+
kernel_size=(1, 1),
|
179
|
+
stride=(1, 1),
|
180
|
+
padding=0,
|
181
|
+
)
|
182
|
+
self.conv2d_15 = nn.Conv2d(
|
183
|
+
in_channels=32,
|
184
|
+
out_channels=128,
|
185
|
+
kernel_size=(1, 1),
|
186
|
+
stride=(1, 1),
|
187
|
+
padding=0,
|
188
|
+
)
|
189
|
+
self.conv2d_16 = nn.Conv2d(
|
190
|
+
in_channels=128,
|
191
|
+
out_channels=32,
|
192
|
+
kernel_size=(1, 1),
|
193
|
+
)
|
194
|
+
self.conv2d_17 = nn.Conv2d(
|
195
|
+
in_channels=32,
|
196
|
+
out_channels=128,
|
197
|
+
kernel_size=(1, 1),
|
198
|
+
stride=(1, 1),
|
199
|
+
padding=0,
|
200
|
+
)
|
201
|
+
self.depthwise_conv2d_5 = _DepthwiseConv2D(
|
202
|
+
in_channels=128,
|
203
|
+
out_channels=128,
|
204
|
+
kernel_size=(5, 5),
|
205
|
+
stride=(1, 1),
|
206
|
+
padding=2,
|
207
|
+
)
|
208
|
+
self.average_pooling2d_3 = nn.AvgPool2d(
|
209
|
+
kernel_size=(16, 16), stride=(16, 16), padding=0
|
210
|
+
)
|
211
|
+
self.conv2d_18 = nn.Conv2d(
|
212
|
+
in_channels=128,
|
213
|
+
out_channels=32,
|
214
|
+
kernel_size=(1, 1),
|
215
|
+
stride=(1, 1),
|
216
|
+
padding=0,
|
217
|
+
)
|
218
|
+
self.conv2d_19 = nn.Conv2d(
|
219
|
+
in_channels=32,
|
220
|
+
out_channels=128,
|
221
|
+
kernel_size=(1, 1),
|
222
|
+
stride=(1, 1),
|
223
|
+
padding=0,
|
224
|
+
)
|
225
|
+
self.conv2d_20 = nn.Conv2d(
|
226
|
+
in_channels=128,
|
227
|
+
out_channels=32,
|
228
|
+
kernel_size=(1, 1),
|
229
|
+
)
|
230
|
+
self.conv2d_21 = nn.Conv2d(
|
231
|
+
in_channels=32,
|
232
|
+
out_channels=96,
|
233
|
+
kernel_size=(1, 1),
|
234
|
+
stride=(1, 1),
|
235
|
+
padding=0,
|
236
|
+
)
|
237
|
+
self.depthwise_conv2d_6 = _DepthwiseConv2D(
|
238
|
+
in_channels=96,
|
239
|
+
out_channels=96,
|
240
|
+
kernel_size=(5, 5),
|
241
|
+
stride=(1, 1),
|
242
|
+
padding=2,
|
243
|
+
)
|
244
|
+
self.average_pooling2d_4 = nn.AvgPool2d(
|
245
|
+
kernel_size=(16, 16), stride=(16, 16), padding=0
|
246
|
+
)
|
247
|
+
self.conv2d_22 = nn.Conv2d(
|
248
|
+
in_channels=96,
|
249
|
+
out_channels=24,
|
250
|
+
kernel_size=(1, 1),
|
251
|
+
stride=(1, 1),
|
252
|
+
padding=0,
|
253
|
+
)
|
254
|
+
self.conv2d_23 = nn.Conv2d(
|
255
|
+
in_channels=24,
|
256
|
+
out_channels=96,
|
257
|
+
kernel_size=(1, 1),
|
258
|
+
stride=(1, 1),
|
259
|
+
padding=0,
|
260
|
+
)
|
261
|
+
self.conv2d_24 = nn.Conv2d(
|
262
|
+
in_channels=96,
|
263
|
+
out_channels=32,
|
264
|
+
kernel_size=(1, 1),
|
265
|
+
stride=(1, 1),
|
266
|
+
padding=0,
|
267
|
+
)
|
268
|
+
self.conv2d_25 = nn.Conv2d(
|
269
|
+
in_channels=32,
|
270
|
+
out_channels=96,
|
271
|
+
kernel_size=(1, 1),
|
272
|
+
stride=(1, 1),
|
273
|
+
padding=0,
|
274
|
+
)
|
275
|
+
self.depthwise_conv2d_7 = _DepthwiseConv2D(
|
276
|
+
in_channels=96,
|
277
|
+
out_channels=96,
|
278
|
+
kernel_size=(5, 5),
|
279
|
+
stride=(1, 1),
|
280
|
+
padding=2,
|
281
|
+
)
|
282
|
+
self.average_pooling2d_5 = nn.AvgPool2d(
|
283
|
+
kernel_size=(16, 16), stride=(16, 16), padding=0
|
284
|
+
)
|
285
|
+
self.conv2d_26 = nn.Conv2d(
|
286
|
+
in_channels=96,
|
287
|
+
out_channels=24,
|
288
|
+
kernel_size=(1, 1),
|
289
|
+
stride=(1, 1),
|
290
|
+
padding=0,
|
291
|
+
)
|
292
|
+
self.conv2d_27 = nn.Conv2d(
|
293
|
+
in_channels=24,
|
294
|
+
out_channels=96,
|
295
|
+
kernel_size=(1, 1),
|
296
|
+
stride=(1, 1),
|
297
|
+
padding=0,
|
298
|
+
)
|
299
|
+
self.conv2d_28 = nn.Conv2d(
|
300
|
+
in_channels=96,
|
301
|
+
out_channels=32,
|
302
|
+
kernel_size=(1, 1),
|
303
|
+
stride=(1, 1),
|
304
|
+
padding=0,
|
305
|
+
)
|
306
|
+
self.average_pooling2d_6 = nn.AvgPool2d(
|
307
|
+
kernel_size=(16, 16), stride=(16, 16), padding=0
|
308
|
+
)
|
309
|
+
self.conv2d_29 = nn.Conv2d(
|
310
|
+
in_channels=32,
|
311
|
+
out_channels=128,
|
312
|
+
kernel_size=(1, 1),
|
313
|
+
stride=(1, 1),
|
314
|
+
padding=0,
|
315
|
+
)
|
316
|
+
self.conv2d_30 = nn.Conv2d(
|
317
|
+
in_channels=32,
|
318
|
+
out_channels=128,
|
319
|
+
kernel_size=(1, 1),
|
320
|
+
stride=(1, 1),
|
321
|
+
padding=0,
|
322
|
+
)
|
323
|
+
self.conv2d_31 = nn.Conv2d(
|
324
|
+
in_channels=128,
|
325
|
+
out_channels=24,
|
326
|
+
kernel_size=(1, 1),
|
327
|
+
stride=(1, 1),
|
328
|
+
padding=0,
|
329
|
+
)
|
330
|
+
self.average_pooling2d_7 = nn.AvgPool2d(
|
331
|
+
kernel_size=(32, 32), stride=(32, 32), padding=0
|
332
|
+
)
|
333
|
+
self.conv2d_32 = nn.Conv2d(
|
334
|
+
in_channels=24,
|
335
|
+
out_channels=24,
|
336
|
+
kernel_size=(1, 1),
|
337
|
+
stride=(1, 1),
|
338
|
+
padding=0,
|
339
|
+
)
|
340
|
+
self.conv2d_33 = nn.Conv2d(
|
341
|
+
in_channels=24,
|
342
|
+
out_channels=24,
|
343
|
+
kernel_size=(1, 1),
|
344
|
+
stride=(1, 1),
|
345
|
+
padding=0,
|
346
|
+
)
|
347
|
+
self.conv2d_34 = nn.Conv2d(
|
348
|
+
in_channels=24,
|
349
|
+
out_channels=24,
|
350
|
+
kernel_size=(1, 1),
|
351
|
+
stride=(1, 1),
|
352
|
+
padding=0,
|
353
|
+
)
|
354
|
+
self.depthwise_conv2d_8 = _DepthwiseConv2D(
|
355
|
+
in_channels=24,
|
356
|
+
out_channels=24,
|
357
|
+
kernel_size=(3, 3),
|
358
|
+
stride=(1, 1),
|
359
|
+
padding=1,
|
360
|
+
)
|
361
|
+
self.conv2d_35 = nn.Conv2d(
|
362
|
+
in_channels=24,
|
363
|
+
out_channels=16,
|
364
|
+
kernel_size=(1, 1),
|
365
|
+
stride=(1, 1),
|
366
|
+
padding=0,
|
367
|
+
)
|
368
|
+
self.average_pooling2d_8 = nn.AvgPool2d(
|
369
|
+
kernel_size=(64, 64), stride=(64, 64), padding=0
|
370
|
+
)
|
371
|
+
self.conv2d_36 = nn.Conv2d(
|
372
|
+
in_channels=16,
|
373
|
+
out_channels=16,
|
374
|
+
kernel_size=(1, 1),
|
375
|
+
stride=(1, 1),
|
376
|
+
padding=0,
|
377
|
+
)
|
378
|
+
self.conv2d_37 = nn.Conv2d(
|
379
|
+
in_channels=16,
|
380
|
+
out_channels=16,
|
381
|
+
kernel_size=(1, 1),
|
382
|
+
stride=(1, 1),
|
383
|
+
padding=0,
|
384
|
+
)
|
385
|
+
self.conv2d_38 = nn.Conv2d(
|
386
|
+
in_channels=16,
|
387
|
+
out_channels=16,
|
388
|
+
kernel_size=(1, 1),
|
389
|
+
stride=(1, 1),
|
390
|
+
padding=0,
|
391
|
+
)
|
392
|
+
self.depthwise_conv2d_9 = _DepthwiseConv2D(
|
393
|
+
in_channels=16,
|
394
|
+
out_channels=16,
|
395
|
+
kernel_size=(3, 3),
|
396
|
+
stride=(1, 1),
|
397
|
+
padding=1,
|
398
|
+
)
|
399
|
+
self.conv2d_39 = nn.Conv2d(
|
400
|
+
in_channels=16,
|
401
|
+
out_channels=16,
|
402
|
+
kernel_size=(1, 1),
|
403
|
+
stride=(1, 1),
|
404
|
+
padding=0,
|
405
|
+
)
|
406
|
+
self.average_pooling2d_9 = nn.AvgPool2d(
|
407
|
+
kernel_size=(128, 128), stride=(128, 128), padding=0
|
408
|
+
)
|
409
|
+
self.conv2d_40 = nn.Conv2d(
|
410
|
+
in_channels=16,
|
411
|
+
out_channels=16,
|
412
|
+
kernel_size=(1, 1),
|
413
|
+
stride=(1, 1),
|
414
|
+
padding=0,
|
415
|
+
)
|
416
|
+
self.conv2d_41 = nn.Conv2d(
|
417
|
+
in_channels=16,
|
418
|
+
out_channels=16,
|
419
|
+
kernel_size=(1, 1),
|
420
|
+
stride=(1, 1),
|
421
|
+
padding=0,
|
422
|
+
)
|
423
|
+
self.conv2d_42 = nn.Conv2d(
|
424
|
+
in_channels=16,
|
425
|
+
out_channels=16,
|
426
|
+
kernel_size=(1, 1),
|
427
|
+
stride=(1, 1),
|
428
|
+
padding=0,
|
429
|
+
)
|
430
|
+
self.depthwise_conv2d_10 = _DepthwiseConv2D(
|
431
|
+
in_channels=16,
|
432
|
+
out_channels=16,
|
433
|
+
kernel_size=(3, 3),
|
434
|
+
stride=(1, 1),
|
435
|
+
padding=1,
|
436
|
+
)
|
437
|
+
self.segment = nn.ConvTranspose2d(16, 1, 2, 2, 0)
|
438
|
+
self.up_sampling2d = nn.Upsample(scale_factor=2, mode="bilinear")
|
439
|
+
|
440
|
+
def forward(self, image):
|
441
|
+
conv2d = self.conv2d(image)
|
442
|
+
h_swish = self.hardswish(conv2d)
|
443
|
+
conv2d_1 = self.conv2d_1(h_swish)
|
444
|
+
re_lu = self.relu(conv2d_1)
|
445
|
+
depthwise_conv2d = self.depthwise_conv2d(re_lu)
|
446
|
+
re_lu_1 = self.relu(depthwise_conv2d)
|
447
|
+
average_pooling2d = self.average_pooling2d(re_lu_1)
|
448
|
+
conv2d_2 = self.conv2d_2(average_pooling2d)
|
449
|
+
re_lu_2 = self.relu(conv2d_2)
|
450
|
+
conv2d_3 = self.conv2d_3(re_lu_2)
|
451
|
+
activation = self.activation(conv2d_3)
|
452
|
+
multiply = re_lu_1 * activation
|
453
|
+
conv2d_4 = self.conv2d_4(multiply)
|
454
|
+
conv2d_5 = self.conv2d_5(conv2d_4)
|
455
|
+
re_lu_3 = self.relu(conv2d_5)
|
456
|
+
depthwise_conv2d_1 = self.depthwise_conv2d_1(re_lu_3)
|
457
|
+
re_lu_4 = self.relu(depthwise_conv2d_1)
|
458
|
+
conv2d_6 = self.conv2d_6(re_lu_4)
|
459
|
+
conv2d_7 = self.conv2d_7(conv2d_6)
|
460
|
+
re_lu_5 = self.relu(conv2d_7)
|
461
|
+
depthwise_conv2d_2 = self.depthwise_conv2d_2(re_lu_5)
|
462
|
+
re_lu_6 = self.relu(depthwise_conv2d_2)
|
463
|
+
conv2d_8 = self.conv2d_8(re_lu_6)
|
464
|
+
add = conv2d_8 + conv2d_6
|
465
|
+
conv2d_9 = self.conv2d_9(add)
|
466
|
+
h_swish_1 = self.hardswish(conv2d_9)
|
467
|
+
depthwise_conv2d_3 = self.depthwise_conv2d_3(h_swish_1)
|
468
|
+
h_swish_2 = self.hardswish(depthwise_conv2d_3)
|
469
|
+
average_pooling2d_1 = self.average_pooling2d_1(h_swish_2)
|
470
|
+
conv2d_10 = self.conv2d_10(average_pooling2d_1)
|
471
|
+
re_lu_7 = self.relu(conv2d_10)
|
472
|
+
conv2d_11 = self.conv2d_11(re_lu_7)
|
473
|
+
activation_1 = self.activation(conv2d_11)
|
474
|
+
multiply_1 = h_swish_2 * activation_1
|
475
|
+
conv2d_12 = self.conv2d_12(multiply_1)
|
476
|
+
conv2d_13 = self.conv2d_13(conv2d_12)
|
477
|
+
h_swish_3 = self.hardswish(conv2d_13)
|
478
|
+
depthwise_conv2d_4 = self.depthwise_conv2d_4(h_swish_3)
|
479
|
+
h_swish_4 = self.hardswish(depthwise_conv2d_4)
|
480
|
+
average_pooling2d_2 = self.average_pooling2d_2(h_swish_4)
|
481
|
+
conv2d_14 = self.conv2d_14(average_pooling2d_2)
|
482
|
+
re_lu_8 = self.relu(conv2d_14)
|
483
|
+
conv2d_15 = self.conv2d_15(re_lu_8)
|
484
|
+
activation_2 = self.activation(conv2d_15)
|
485
|
+
multiply_2 = h_swish_4 * activation_2
|
486
|
+
conv2d_16 = self.conv2d_16(multiply_2)
|
487
|
+
add_1 = conv2d_16 + conv2d_12
|
488
|
+
conv2d_17 = self.conv2d_17(add_1)
|
489
|
+
h_swish_5 = self.hardswish(conv2d_17)
|
490
|
+
depthwise_conv2d_5 = self.depthwise_conv2d_5(h_swish_5)
|
491
|
+
h_swish_6 = self.hardswish(depthwise_conv2d_5)
|
492
|
+
average_pooling2d_3 = self.average_pooling2d_3(h_swish_6)
|
493
|
+
conv2d_18 = self.conv2d_18(average_pooling2d_3)
|
494
|
+
re_lu_9 = self.relu(conv2d_18)
|
495
|
+
conv2d_19 = self.conv2d_19(re_lu_9)
|
496
|
+
activation_3 = self.activation(conv2d_19)
|
497
|
+
multiply_3 = h_swish_6 * activation_3
|
498
|
+
conv2d_20 = self.conv2d_20(multiply_3)
|
499
|
+
add_2 = conv2d_20 + add_1
|
500
|
+
conv2d_21 = self.conv2d_21(add_2)
|
501
|
+
h_swish_7 = self.hardswish(conv2d_21)
|
502
|
+
depthwise_conv2d_6 = self.depthwise_conv2d_6(h_swish_7)
|
503
|
+
h_swish_8 = self.hardswish(depthwise_conv2d_6)
|
504
|
+
average_pooling2d_4 = self.average_pooling2d_4(h_swish_8)
|
505
|
+
conv2d_22 = self.conv2d_22(average_pooling2d_4)
|
506
|
+
re_lu_10 = self.relu(conv2d_22)
|
507
|
+
conv2d_23 = self.conv2d_23(re_lu_10)
|
508
|
+
activation_4 = self.activation(conv2d_23)
|
509
|
+
multiply_4 = h_swish_8 * activation_4
|
510
|
+
conv2d_24 = self.conv2d_24(multiply_4)
|
511
|
+
add_3 = conv2d_24 + add_2
|
512
|
+
conv2d_25 = self.conv2d_25(add_3)
|
513
|
+
h_swish_9 = self.hardswish(conv2d_25)
|
514
|
+
depthwise_conv2d_7 = self.depthwise_conv2d_7(h_swish_9)
|
515
|
+
h_swish_10 = self.hardswish(depthwise_conv2d_7)
|
516
|
+
average_pooling2d_5 = self.average_pooling2d_5(h_swish_10)
|
517
|
+
conv2d_26 = self.conv2d_26(average_pooling2d_5)
|
518
|
+
re_lu_11 = self.relu(conv2d_26)
|
519
|
+
conv2d_27 = self.conv2d_27(re_lu_11)
|
520
|
+
activation_5 = self.activation(conv2d_27)
|
521
|
+
multiply_5 = h_swish_10 * activation_5
|
522
|
+
conv2d_28 = self.conv2d_28(multiply_5)
|
523
|
+
add_4 = conv2d_28 + add_3
|
524
|
+
average_pooling2d_6 = self.average_pooling2d_6(add_4)
|
525
|
+
conv2d_29 = self.conv2d_29(add_4)
|
526
|
+
conv2d_30 = self.conv2d_30(average_pooling2d_6)
|
527
|
+
re_lu_12 = self.relu(conv2d_29)
|
528
|
+
activation_6 = self.activation(conv2d_30)
|
529
|
+
multiply_6 = re_lu_12 * activation_6
|
530
|
+
up_sampling2d = self.up_sampling2d(multiply_6)
|
531
|
+
conv2d_31 = self.conv2d_31(up_sampling2d)
|
532
|
+
add_5 = add + conv2d_31
|
533
|
+
average_pooling2d_7 = self.average_pooling2d_7(add_5)
|
534
|
+
conv2d_32 = self.conv2d_32(average_pooling2d_7)
|
535
|
+
re_lu_13 = self.relu(conv2d_32)
|
536
|
+
conv2d_33 = self.conv2d_33(re_lu_13)
|
537
|
+
activation_7 = self.activation(conv2d_33)
|
538
|
+
multiply_7 = add * activation_7
|
539
|
+
add_6 = multiply_7 + conv2d_31
|
540
|
+
conv2d_34 = self.conv2d_34(add_6)
|
541
|
+
re_lu_14 = self.relu(conv2d_34)
|
542
|
+
depthwise_conv2d_8 = self.depthwise_conv2d_8(re_lu_14)
|
543
|
+
re_lu_15 = self.relu(depthwise_conv2d_8)
|
544
|
+
add_7 = re_lu_14 + re_lu_15
|
545
|
+
up_sampling2d_1 = self.up_sampling2d(add_7)
|
546
|
+
conv2d_35 = self.conv2d_35(up_sampling2d_1)
|
547
|
+
add_8 = conv2d_4 + conv2d_35
|
548
|
+
average_pooling2d_8 = self.average_pooling2d_8(add_8)
|
549
|
+
conv2d_36 = self.conv2d_36(average_pooling2d_8)
|
550
|
+
re_lu_16 = self.relu(conv2d_36)
|
551
|
+
conv2d_37 = self.conv2d_37(re_lu_16)
|
552
|
+
activation_8 = self.activation(conv2d_37)
|
553
|
+
multiply_8 = conv2d_4 + activation_8
|
554
|
+
add_9 = multiply_8 + conv2d_35
|
555
|
+
conv2d_38 = self.conv2d_38(add_9)
|
556
|
+
re_lu_17 = self.relu(conv2d_38)
|
557
|
+
depthwise_conv2d_9 = self.depthwise_conv2d_9(re_lu_17)
|
558
|
+
re_lu_18 = self.relu(depthwise_conv2d_9)
|
559
|
+
add_10 = re_lu_17 + re_lu_18
|
560
|
+
up_sampling2d_2 = self.up_sampling2d(add_10)
|
561
|
+
conv2d_39 = self.conv2d_39(up_sampling2d_2)
|
562
|
+
add_11 = h_swish + conv2d_39
|
563
|
+
average_pooling2d_9 = self.average_pooling2d_9(add_11)
|
564
|
+
conv2d_40 = self.conv2d_40(average_pooling2d_9)
|
565
|
+
re_lu_19 = self.relu(conv2d_40)
|
566
|
+
conv2d_41 = self.conv2d_41(re_lu_19)
|
567
|
+
activation_9 = self.activation(conv2d_41)
|
568
|
+
multiply_9 = h_swish * activation_9
|
569
|
+
add_12 = multiply_9 + conv2d_39
|
570
|
+
conv2d_42 = self.conv2d_42(add_12)
|
571
|
+
re_lu_20 = self.relu(conv2d_42)
|
572
|
+
depthwise_conv2d_10 = self.depthwise_conv2d_10(re_lu_20)
|
573
|
+
re_lu_21 = self.relu(depthwise_conv2d_10)
|
574
|
+
add_13 = re_lu_20 + re_lu_21
|
575
|
+
segment = self.segment(add_13)
|
576
|
+
return self.activation(segment)
|
577
|
+
|
578
|
+
def load_from_pth(self, pth_path: str):
|
579
|
+
"""Loads the model from a pth file.
|
580
|
+
|
581
|
+
Args:
|
582
|
+
pth_path: The path to the pth file to load the model from.
|
583
|
+
"""
|
584
|
+
self.load_state_dict(torch.load(pth_path))
|
ai_edge_torch/version.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: ai-edge-torch-nightly
|
3
|
-
Version: 0.5.0.
|
3
|
+
Version: 0.5.0.dev20250505
|
4
4
|
Summary: Supporting PyTorch models with the Google AI Edge TFLite runtime.
|
5
5
|
Home-page: https://github.com/google-ai-edge/ai-edge-torch
|
6
6
|
Keywords: On-Device ML,AI,Google,TFLite,PyTorch,LLMs,GenAI
|
@@ -2,7 +2,7 @@ ai_edge_torch/__init__.py,sha256=8sPR_5uXJA4NEE0nIwNdSl-ADOJEoR8hAgYvBQDY70Y,120
|
|
2
2
|
ai_edge_torch/_config.py,sha256=AiqhbcheF7j_ozIGDLC89k1we95aVgFDa-tR6h7UI0s,2529
|
3
3
|
ai_edge_torch/conftest.py,sha256=r0GTrhMRhlmOGrrkvumHN8hkmyug6WvF60vWq8wRIBI,758
|
4
4
|
ai_edge_torch/model.py,sha256=wxjSFq_rBSxSqbUE8E8EJTCkgvgaRLjq_ZuAM-IZpCU,5606
|
5
|
-
ai_edge_torch/version.py,sha256=
|
5
|
+
ai_edge_torch/version.py,sha256=UlppWeDH59T0hsvTeCBslgOAFD7YHxrj6bow0oDL4Z8,706
|
6
6
|
ai_edge_torch/_convert/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
|
7
7
|
ai_edge_torch/_convert/conversion.py,sha256=QVugYVfbyaeBgSKKbhFzHG5oXA7t3M-40JcpcdSu6W8,5436
|
8
8
|
ai_edge_torch/_convert/conversion_utils.py,sha256=Sr8qXVcTwc-ZnZmK7yxVrIOOp1S_vNrwzC0zUvLTI2o,2160
|
@@ -36,6 +36,9 @@ ai_edge_torch/debug/utils.py,sha256=vOAL4t6Lj47uhKapfEsc_WHmvwew3eKO9hSJyzvPXnU,
|
|
36
36
|
ai_edge_torch/debug/test/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
|
37
37
|
ai_edge_torch/debug/test/test_culprit.py,sha256=fRN-8jJicawJ2mhPRQNAQUZ8AdGg-s0tYMXyhnLAlWw,3875
|
38
38
|
ai_edge_torch/debug/test/test_search_model.py,sha256=-RuU0QsjqkfzZF2IbeA55MoeVOawhbgiSEu96PmioPE,1668
|
39
|
+
ai_edge_torch/examples/__init__.py,sha256=JaAnrFoXTl3RJX97XspklkTyqOHVyAgRJsZtzNDd10c,671
|
40
|
+
ai_edge_torch/examples/selfie_segmentation/__init__.py,sha256=JaAnrFoXTl3RJX97XspklkTyqOHVyAgRJsZtzNDd10c,671
|
41
|
+
ai_edge_torch/examples/selfie_segmentation/model.py,sha256=5otCH1MzNgSP0fikYq53hgiO1F0ZN1SCVzOIo7cVAcA,17136
|
39
42
|
ai_edge_torch/experimental/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
|
40
43
|
ai_edge_torch/fx_infra/__init__.py,sha256=APjkSqEfwDxcnI8k53rGi3Ef-G2L-M8fdaPGpxXtuiI,1347
|
41
44
|
ai_edge_torch/fx_infra/_canonicalize_pass.py,sha256=GDRoDdPVQw--QQFTT5J_C3TVuphL31m6K6F1-67SE4s,1097
|
@@ -248,8 +251,8 @@ ai_edge_torch/testing/__init__.py,sha256=_yGgvnBZWb7T3IN3mc4x1sS4vM96HZwM8pwIcPG
|
|
248
251
|
ai_edge_torch/testing/export.py,sha256=k5mGDGzwc23Z4zaIVDs8CNh-oOt64gsf9MS9NjhbPy4,3293
|
249
252
|
ai_edge_torch/testing/model_coverage/__init__.py,sha256=5P8J6Zk5YYtDvTBucFvB9NGSRI7Gw_24WnrbhXgycEE,765
|
250
253
|
ai_edge_torch/testing/model_coverage/model_coverage.py,sha256=UPB448aMDUyC0HNYVqio2rcJPnDN0tBQMP08J6vPYew,4718
|
251
|
-
ai_edge_torch_nightly-0.5.0.
|
252
|
-
ai_edge_torch_nightly-0.5.0.
|
253
|
-
ai_edge_torch_nightly-0.5.0.
|
254
|
-
ai_edge_torch_nightly-0.5.0.
|
255
|
-
ai_edge_torch_nightly-0.5.0.
|
254
|
+
ai_edge_torch_nightly-0.5.0.dev20250505.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
|
255
|
+
ai_edge_torch_nightly-0.5.0.dev20250505.dist-info/METADATA,sha256=nKqsuOLL8rs0NeA5dY1wYk69KHwbYRUpUWTtnAT0ELU,2051
|
256
|
+
ai_edge_torch_nightly-0.5.0.dev20250505.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
|
257
|
+
ai_edge_torch_nightly-0.5.0.dev20250505.dist-info/top_level.txt,sha256=5KXRaF2hwkApYxf7Y8y_tVb9aulGTlbOoNdbx1aKRkE,14
|
258
|
+
ai_edge_torch_nightly-0.5.0.dev20250505.dist-info/RECORD,,
|
File without changes
|
File without changes
|