mttf 1.0.2__py3-none-any.whl → 1.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mttf might be problematic. Click here for more details.
- mt/tf/version.py +1 -1
- {mttf-1.0.2.dist-info → mttf-1.0.3.dist-info}/METADATA +1 -1
- {mttf-1.0.2.dist-info → mttf-1.0.3.dist-info}/RECORD +12 -13
- mt/tf/keras_applications/efficientnet_v2.py +0 -1297
- {mttf-1.0.2.data → mttf-1.0.3.data}/scripts/dmt_pipi.sh +0 -0
- {mttf-1.0.2.data → mttf-1.0.3.data}/scripts/dmt_twineu.sh +0 -0
- {mttf-1.0.2.data → mttf-1.0.3.data}/scripts/twine_trusted.py +0 -0
- {mttf-1.0.2.data → mttf-1.0.3.data}/scripts/wml_nexus.py +0 -0
- {mttf-1.0.2.data → mttf-1.0.3.data}/scripts/wml_pipi.sh +0 -0
- {mttf-1.0.2.data → mttf-1.0.3.data}/scripts/wml_twineu.sh +0 -0
- {mttf-1.0.2.dist-info → mttf-1.0.3.dist-info}/LICENSE +0 -0
- {mttf-1.0.2.dist-info → mttf-1.0.3.dist-info}/WHEEL +0 -0
- {mttf-1.0.2.dist-info → mttf-1.0.3.dist-info}/top_level.txt +0 -0
mt/tf/version.py
CHANGED
|
@@ -2,9 +2,8 @@ mt/tf/__init__.py,sha256=M8xiJNdrAUJZgiZTOQOdfkehjO-CYzGpoxh5HVGBkms,338
|
|
|
2
2
|
mt/tf/init.py,sha256=Hbp0-daCDTUEkQiDwNpbv2qqP7tr9qCyBFUafMklkos,1298
|
|
3
3
|
mt/tf/mttf_version.py,sha256=ha53i-H9pE-crufFttUECgXHwPvam07zMKzApUts1Gs,206
|
|
4
4
|
mt/tf/utils.py,sha256=Copl5VM0PpuFUchK-AcBuGO6QitDwHcEs4FruZb2GAI,2460
|
|
5
|
-
mt/tf/version.py,sha256=
|
|
5
|
+
mt/tf/version.py,sha256=KUbBWU0KLSk6dWDvmoQItmb-bx2Va7p7yrB93zIS5Q0,206
|
|
6
6
|
mt/tf/keras_applications/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
7
|
-
mt/tf/keras_applications/efficientnet_v2.py,sha256=DYXP567HNUp2SO0_pXUfzO9-WqlTiGmXApAawkKaetc,39204
|
|
8
7
|
mt/tf/keras_applications/mobilenet_v3_split.py,sha256=GDEBHo-blR1Q3N8R89USZ8zDP0nq_oLzPNAnoIgkzgo,19305
|
|
9
8
|
mt/tf/keras_applications/mobilevit.py,sha256=FR1eMN4xg-yZ8Orr4ALOYmzCmkoBu7cVgTaK5sc4gsc,9806
|
|
10
9
|
mt/tf/keras_layers/__init__.py,sha256=fSfhKmDz4mIHUYXgRrditWY_aAkgWGM_KjmAilOauXg,578
|
|
@@ -20,14 +19,14 @@ mt/tfc/__init__.py,sha256=WnGNywMCwmmhWQaGqconT5f9n6IE5jDGflbD92E5iH0,8108
|
|
|
20
19
|
mt/tfg/__init__.py,sha256=6Ly2QImAyQTsg_ZszuAuK_L2n56v89Cix9yYmMVk0CM,304
|
|
21
20
|
mt/tfp/__init__.py,sha256=AQkGCkmDRwswEt3qoOSpxe-fZekx78sHHBs2ZVz33gc,383
|
|
22
21
|
mt/tfp/real_nvp.py,sha256=U9EmkXGqFcvtS2yeh5_RgbKlVKKlGFGklAb7Voyazz4,4440
|
|
23
|
-
mttf-1.0.
|
|
24
|
-
mttf-1.0.
|
|
25
|
-
mttf-1.0.
|
|
26
|
-
mttf-1.0.
|
|
27
|
-
mttf-1.0.
|
|
28
|
-
mttf-1.0.
|
|
29
|
-
mttf-1.0.
|
|
30
|
-
mttf-1.0.
|
|
31
|
-
mttf-1.0.
|
|
32
|
-
mttf-1.0.
|
|
33
|
-
mttf-1.0.
|
|
22
|
+
mttf-1.0.3.data/scripts/dmt_pipi.sh,sha256=VG5FvmtuR7_v_zhd52hI0mbyiiPCW_TWIpB_Oce0zm4,145
|
|
23
|
+
mttf-1.0.3.data/scripts/dmt_twineu.sh,sha256=ER8Z1it7hVdPtkZ0TA0SkzwqIQSUlFa_sO95H07ImTI,159
|
|
24
|
+
mttf-1.0.3.data/scripts/twine_trusted.py,sha256=ZEvTqT5ydAiOJBPQaUOMsbxX7qcGCjgGQeglzOwYLFI,2196
|
|
25
|
+
mttf-1.0.3.data/scripts/wml_nexus.py,sha256=geZih8al6iJhkMRYhIO7lG_oHIGYTddl0EGBPgNsVR4,1377
|
|
26
|
+
mttf-1.0.3.data/scripts/wml_pipi.sh,sha256=wDX4_7oKcYbxyKJz-wWvbkUY0NEuBuf0MpBfpmjZk60,133
|
|
27
|
+
mttf-1.0.3.data/scripts/wml_twineu.sh,sha256=FPBvsXM-81P11G2BCJtxVUBervfHQYYrXf0GjZeMt7w,146
|
|
28
|
+
mttf-1.0.3.dist-info/LICENSE,sha256=e_JtcszdGZ2ZGfjcymTGrcxFj_9XPicZOVtnsrPvruk,1070
|
|
29
|
+
mttf-1.0.3.dist-info/METADATA,sha256=990sEHjRNTJFZywaHis0ABqBhzfh23gQ1iU3rsAi4Us,416
|
|
30
|
+
mttf-1.0.3.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
|
|
31
|
+
mttf-1.0.3.dist-info/top_level.txt,sha256=WcqGFu9cV7iMZg09iam8eNxUvGpLSKKF2Iubf6SJVOo,3
|
|
32
|
+
mttf-1.0.3.dist-info/RECORD,,
|
|
@@ -1,1297 +0,0 @@
|
|
|
1
|
-
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
|
|
2
|
-
#
|
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
-
# you may not use this file except in compliance with the License.
|
|
5
|
-
# You may obtain a copy of the License at
|
|
6
|
-
#
|
|
7
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
-
#
|
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
-
# See the License for the specific language governing permissions and
|
|
13
|
-
# limitations under the License.
|
|
14
|
-
# ==============================================================================
|
|
15
|
-
# pylint: disable=invalid-name
|
|
16
|
-
# pylint: disable=missing-docstring
|
|
17
|
-
"""EfficientNet V2 models for Keras.
|
|
18
|
-
|
|
19
|
-
Reference:
|
|
20
|
-
- [EfficientNetV2: Smaller Models and Faster Training](
|
|
21
|
-
https://arxiv.org/abs/2104.00298) (ICML 2021)
|
|
22
|
-
"""
|
|
23
|
-
|
|
24
|
-
import copy
|
|
25
|
-
import math
|
|
26
|
-
|
|
27
|
-
from keras import backend
|
|
28
|
-
from keras import layers
|
|
29
|
-
from keras.applications import imagenet_utils
|
|
30
|
-
from keras.engine import training
|
|
31
|
-
from keras.utils import data_utils
|
|
32
|
-
from keras.utils import layer_utils
|
|
33
|
-
import tensorflow.compat.v2 as tf
|
|
34
|
-
# pylint: disable=g-direct-tensorflow-import
|
|
35
|
-
from tensorflow.python.util.tf_export import keras_export
|
|
36
|
-
|
|
37
|
-
BASE_WEIGHTS_PATH = "https://storage.googleapis.com/tensorflow/keras-applications/efficientnet_v2/"
|
|
38
|
-
|
|
39
|
-
WEIGHTS_HASHES = {
|
|
40
|
-
"b0": ("21ecbf6da12460d5c40bb2f29ceb2188",
|
|
41
|
-
"893217f2bb855e2983157299931e43ff"),
|
|
42
|
-
"b1": ("069f0534ff22adf035c89e2d9547a9dc",
|
|
43
|
-
"0e80663031ca32d657f9caa404b6ec37"),
|
|
44
|
-
"b2": ("424e49f28180edbde1e94797771950a7",
|
|
45
|
-
"1dfe2e7a5d45b6632553a8961ea609eb"),
|
|
46
|
-
"b3": ("1f1fc43bd98a6e4fd8fdfd551e02c7a0",
|
|
47
|
-
"f6abf7b5849ac99a89b50dd3fd532856"),
|
|
48
|
-
"-s": ("e1d88a8495beba45748fedd0cecbe016",
|
|
49
|
-
"af0682fb74e8c54910f2d4393339c070"),
|
|
50
|
-
"-m": ("a3bf6aa3276309f4fc6a34aa114c95cd",
|
|
51
|
-
"1b8dc055df72dde80d614482840fe342"),
|
|
52
|
-
"-l": ("27e6d408b53c7ebc868fefa357689935",
|
|
53
|
-
"b0b66b5c863aef5b46e8608fe1711615"),
|
|
54
|
-
}
|
|
55
|
-
|
|
56
|
-
DEFAULT_BLOCKS_ARGS = {
|
|
57
|
-
"efficientnetv2-s": [{
|
|
58
|
-
"kernel_size": 3,
|
|
59
|
-
"num_repeat": 2,
|
|
60
|
-
"input_filters": 24,
|
|
61
|
-
"output_filters": 24,
|
|
62
|
-
"expand_ratio": 1,
|
|
63
|
-
"se_ratio": 0.0,
|
|
64
|
-
"strides": 1,
|
|
65
|
-
"conv_type": 1,
|
|
66
|
-
}, {
|
|
67
|
-
"kernel_size": 3,
|
|
68
|
-
"num_repeat": 4,
|
|
69
|
-
"input_filters": 24,
|
|
70
|
-
"output_filters": 48,
|
|
71
|
-
"expand_ratio": 4,
|
|
72
|
-
"se_ratio": 0.0,
|
|
73
|
-
"strides": 2,
|
|
74
|
-
"conv_type": 1,
|
|
75
|
-
}, {
|
|
76
|
-
"conv_type": 1,
|
|
77
|
-
"expand_ratio": 4,
|
|
78
|
-
"input_filters": 48,
|
|
79
|
-
"kernel_size": 3,
|
|
80
|
-
"num_repeat": 4,
|
|
81
|
-
"output_filters": 64,
|
|
82
|
-
"se_ratio": 0,
|
|
83
|
-
"strides": 2,
|
|
84
|
-
}, {
|
|
85
|
-
"conv_type": 0,
|
|
86
|
-
"expand_ratio": 4,
|
|
87
|
-
"input_filters": 64,
|
|
88
|
-
"kernel_size": 3,
|
|
89
|
-
"num_repeat": 6,
|
|
90
|
-
"output_filters": 128,
|
|
91
|
-
"se_ratio": 0.25,
|
|
92
|
-
"strides": 2,
|
|
93
|
-
}, {
|
|
94
|
-
"conv_type": 0,
|
|
95
|
-
"expand_ratio": 6,
|
|
96
|
-
"input_filters": 128,
|
|
97
|
-
"kernel_size": 3,
|
|
98
|
-
"num_repeat": 9,
|
|
99
|
-
"output_filters": 160,
|
|
100
|
-
"se_ratio": 0.25,
|
|
101
|
-
"strides": 1,
|
|
102
|
-
}, {
|
|
103
|
-
"conv_type": 0,
|
|
104
|
-
"expand_ratio": 6,
|
|
105
|
-
"input_filters": 160,
|
|
106
|
-
"kernel_size": 3,
|
|
107
|
-
"num_repeat": 15,
|
|
108
|
-
"output_filters": 256,
|
|
109
|
-
"se_ratio": 0.25,
|
|
110
|
-
"strides": 2,
|
|
111
|
-
}],
|
|
112
|
-
"efficientnetv2-m": [
|
|
113
|
-
{
|
|
114
|
-
"kernel_size": 3,
|
|
115
|
-
"num_repeat": 3,
|
|
116
|
-
"input_filters": 24,
|
|
117
|
-
"output_filters": 24,
|
|
118
|
-
"expand_ratio": 1,
|
|
119
|
-
"se_ratio": 0,
|
|
120
|
-
"strides": 1,
|
|
121
|
-
"conv_type": 1,
|
|
122
|
-
},
|
|
123
|
-
{
|
|
124
|
-
"kernel_size": 3,
|
|
125
|
-
"num_repeat": 5,
|
|
126
|
-
"input_filters": 24,
|
|
127
|
-
"output_filters": 48,
|
|
128
|
-
"expand_ratio": 4,
|
|
129
|
-
"se_ratio": 0,
|
|
130
|
-
"strides": 2,
|
|
131
|
-
"conv_type": 1,
|
|
132
|
-
},
|
|
133
|
-
{
|
|
134
|
-
"kernel_size": 3,
|
|
135
|
-
"num_repeat": 5,
|
|
136
|
-
"input_filters": 48,
|
|
137
|
-
"output_filters": 80,
|
|
138
|
-
"expand_ratio": 4,
|
|
139
|
-
"se_ratio": 0,
|
|
140
|
-
"strides": 2,
|
|
141
|
-
"conv_type": 1,
|
|
142
|
-
},
|
|
143
|
-
{
|
|
144
|
-
"kernel_size": 3,
|
|
145
|
-
"num_repeat": 7,
|
|
146
|
-
"input_filters": 80,
|
|
147
|
-
"output_filters": 160,
|
|
148
|
-
"expand_ratio": 4,
|
|
149
|
-
"se_ratio": 0.25,
|
|
150
|
-
"strides": 2,
|
|
151
|
-
"conv_type": 0,
|
|
152
|
-
},
|
|
153
|
-
{
|
|
154
|
-
"kernel_size": 3,
|
|
155
|
-
"num_repeat": 14,
|
|
156
|
-
"input_filters": 160,
|
|
157
|
-
"output_filters": 176,
|
|
158
|
-
"expand_ratio": 6,
|
|
159
|
-
"se_ratio": 0.25,
|
|
160
|
-
"strides": 1,
|
|
161
|
-
"conv_type": 0,
|
|
162
|
-
},
|
|
163
|
-
{
|
|
164
|
-
"kernel_size": 3,
|
|
165
|
-
"num_repeat": 18,
|
|
166
|
-
"input_filters": 176,
|
|
167
|
-
"output_filters": 304,
|
|
168
|
-
"expand_ratio": 6,
|
|
169
|
-
"se_ratio": 0.25,
|
|
170
|
-
"strides": 2,
|
|
171
|
-
"conv_type": 0,
|
|
172
|
-
},
|
|
173
|
-
{
|
|
174
|
-
"kernel_size": 3,
|
|
175
|
-
"num_repeat": 5,
|
|
176
|
-
"input_filters": 304,
|
|
177
|
-
"output_filters": 512,
|
|
178
|
-
"expand_ratio": 6,
|
|
179
|
-
"se_ratio": 0.25,
|
|
180
|
-
"strides": 1,
|
|
181
|
-
"conv_type": 0,
|
|
182
|
-
},
|
|
183
|
-
],
|
|
184
|
-
"efficientnetv2-l": [
|
|
185
|
-
{
|
|
186
|
-
"kernel_size": 3,
|
|
187
|
-
"num_repeat": 4,
|
|
188
|
-
"input_filters": 32,
|
|
189
|
-
"output_filters": 32,
|
|
190
|
-
"expand_ratio": 1,
|
|
191
|
-
"se_ratio": 0,
|
|
192
|
-
"strides": 1,
|
|
193
|
-
"conv_type": 1,
|
|
194
|
-
},
|
|
195
|
-
{
|
|
196
|
-
"kernel_size": 3,
|
|
197
|
-
"num_repeat": 7,
|
|
198
|
-
"input_filters": 32,
|
|
199
|
-
"output_filters": 64,
|
|
200
|
-
"expand_ratio": 4,
|
|
201
|
-
"se_ratio": 0,
|
|
202
|
-
"strides": 2,
|
|
203
|
-
"conv_type": 1,
|
|
204
|
-
},
|
|
205
|
-
{
|
|
206
|
-
"kernel_size": 3,
|
|
207
|
-
"num_repeat": 7,
|
|
208
|
-
"input_filters": 64,
|
|
209
|
-
"output_filters": 96,
|
|
210
|
-
"expand_ratio": 4,
|
|
211
|
-
"se_ratio": 0,
|
|
212
|
-
"strides": 2,
|
|
213
|
-
"conv_type": 1,
|
|
214
|
-
},
|
|
215
|
-
{
|
|
216
|
-
"kernel_size": 3,
|
|
217
|
-
"num_repeat": 10,
|
|
218
|
-
"input_filters": 96,
|
|
219
|
-
"output_filters": 192,
|
|
220
|
-
"expand_ratio": 4,
|
|
221
|
-
"se_ratio": 0.25,
|
|
222
|
-
"strides": 2,
|
|
223
|
-
"conv_type": 0,
|
|
224
|
-
},
|
|
225
|
-
{
|
|
226
|
-
"kernel_size": 3,
|
|
227
|
-
"num_repeat": 19,
|
|
228
|
-
"input_filters": 192,
|
|
229
|
-
"output_filters": 224,
|
|
230
|
-
"expand_ratio": 6,
|
|
231
|
-
"se_ratio": 0.25,
|
|
232
|
-
"strides": 1,
|
|
233
|
-
"conv_type": 0,
|
|
234
|
-
},
|
|
235
|
-
{
|
|
236
|
-
"kernel_size": 3,
|
|
237
|
-
"num_repeat": 25,
|
|
238
|
-
"input_filters": 224,
|
|
239
|
-
"output_filters": 384,
|
|
240
|
-
"expand_ratio": 6,
|
|
241
|
-
"se_ratio": 0.25,
|
|
242
|
-
"strides": 2,
|
|
243
|
-
"conv_type": 0,
|
|
244
|
-
},
|
|
245
|
-
{
|
|
246
|
-
"kernel_size": 3,
|
|
247
|
-
"num_repeat": 7,
|
|
248
|
-
"input_filters": 384,
|
|
249
|
-
"output_filters": 640,
|
|
250
|
-
"expand_ratio": 6,
|
|
251
|
-
"se_ratio": 0.25,
|
|
252
|
-
"strides": 1,
|
|
253
|
-
"conv_type": 0,
|
|
254
|
-
},
|
|
255
|
-
],
|
|
256
|
-
"efficientnetv2-b0": [
|
|
257
|
-
{
|
|
258
|
-
"kernel_size": 3,
|
|
259
|
-
"num_repeat": 1,
|
|
260
|
-
"input_filters": 32,
|
|
261
|
-
"output_filters": 16,
|
|
262
|
-
"expand_ratio": 1,
|
|
263
|
-
"se_ratio": 0,
|
|
264
|
-
"strides": 1,
|
|
265
|
-
"conv_type": 1,
|
|
266
|
-
},
|
|
267
|
-
{
|
|
268
|
-
"kernel_size": 3,
|
|
269
|
-
"num_repeat": 2,
|
|
270
|
-
"input_filters": 16,
|
|
271
|
-
"output_filters": 32,
|
|
272
|
-
"expand_ratio": 4,
|
|
273
|
-
"se_ratio": 0,
|
|
274
|
-
"strides": 2,
|
|
275
|
-
"conv_type": 1,
|
|
276
|
-
},
|
|
277
|
-
{
|
|
278
|
-
"kernel_size": 3,
|
|
279
|
-
"num_repeat": 2,
|
|
280
|
-
"input_filters": 32,
|
|
281
|
-
"output_filters": 48,
|
|
282
|
-
"expand_ratio": 4,
|
|
283
|
-
"se_ratio": 0,
|
|
284
|
-
"strides": 2,
|
|
285
|
-
"conv_type": 1,
|
|
286
|
-
},
|
|
287
|
-
{
|
|
288
|
-
"kernel_size": 3,
|
|
289
|
-
"num_repeat": 3,
|
|
290
|
-
"input_filters": 48,
|
|
291
|
-
"output_filters": 96,
|
|
292
|
-
"expand_ratio": 4,
|
|
293
|
-
"se_ratio": 0.25,
|
|
294
|
-
"strides": 2,
|
|
295
|
-
"conv_type": 0,
|
|
296
|
-
},
|
|
297
|
-
{
|
|
298
|
-
"kernel_size": 3,
|
|
299
|
-
"num_repeat": 5,
|
|
300
|
-
"input_filters": 96,
|
|
301
|
-
"output_filters": 112,
|
|
302
|
-
"expand_ratio": 6,
|
|
303
|
-
"se_ratio": 0.25,
|
|
304
|
-
"strides": 1,
|
|
305
|
-
"conv_type": 0,
|
|
306
|
-
},
|
|
307
|
-
{
|
|
308
|
-
"kernel_size": 3,
|
|
309
|
-
"num_repeat": 8,
|
|
310
|
-
"input_filters": 112,
|
|
311
|
-
"output_filters": 192,
|
|
312
|
-
"expand_ratio": 6,
|
|
313
|
-
"se_ratio": 0.25,
|
|
314
|
-
"strides": 2,
|
|
315
|
-
"conv_type": 0,
|
|
316
|
-
},
|
|
317
|
-
],
|
|
318
|
-
"efficientnetv2-b1": [
|
|
319
|
-
{
|
|
320
|
-
"kernel_size": 3,
|
|
321
|
-
"num_repeat": 1,
|
|
322
|
-
"input_filters": 32,
|
|
323
|
-
"output_filters": 16,
|
|
324
|
-
"expand_ratio": 1,
|
|
325
|
-
"se_ratio": 0,
|
|
326
|
-
"strides": 1,
|
|
327
|
-
"conv_type": 1,
|
|
328
|
-
},
|
|
329
|
-
{
|
|
330
|
-
"kernel_size": 3,
|
|
331
|
-
"num_repeat": 2,
|
|
332
|
-
"input_filters": 16,
|
|
333
|
-
"output_filters": 32,
|
|
334
|
-
"expand_ratio": 4,
|
|
335
|
-
"se_ratio": 0,
|
|
336
|
-
"strides": 2,
|
|
337
|
-
"conv_type": 1,
|
|
338
|
-
},
|
|
339
|
-
{
|
|
340
|
-
"kernel_size": 3,
|
|
341
|
-
"num_repeat": 2,
|
|
342
|
-
"input_filters": 32,
|
|
343
|
-
"output_filters": 48,
|
|
344
|
-
"expand_ratio": 4,
|
|
345
|
-
"se_ratio": 0,
|
|
346
|
-
"strides": 2,
|
|
347
|
-
"conv_type": 1,
|
|
348
|
-
},
|
|
349
|
-
{
|
|
350
|
-
"kernel_size": 3,
|
|
351
|
-
"num_repeat": 3,
|
|
352
|
-
"input_filters": 48,
|
|
353
|
-
"output_filters": 96,
|
|
354
|
-
"expand_ratio": 4,
|
|
355
|
-
"se_ratio": 0.25,
|
|
356
|
-
"strides": 2,
|
|
357
|
-
"conv_type": 0,
|
|
358
|
-
},
|
|
359
|
-
{
|
|
360
|
-
"kernel_size": 3,
|
|
361
|
-
"num_repeat": 5,
|
|
362
|
-
"input_filters": 96,
|
|
363
|
-
"output_filters": 112,
|
|
364
|
-
"expand_ratio": 6,
|
|
365
|
-
"se_ratio": 0.25,
|
|
366
|
-
"strides": 1,
|
|
367
|
-
"conv_type": 0,
|
|
368
|
-
},
|
|
369
|
-
{
|
|
370
|
-
"kernel_size": 3,
|
|
371
|
-
"num_repeat": 8,
|
|
372
|
-
"input_filters": 112,
|
|
373
|
-
"output_filters": 192,
|
|
374
|
-
"expand_ratio": 6,
|
|
375
|
-
"se_ratio": 0.25,
|
|
376
|
-
"strides": 2,
|
|
377
|
-
"conv_type": 0,
|
|
378
|
-
},
|
|
379
|
-
],
|
|
380
|
-
"efficientnetv2-b2": [
|
|
381
|
-
{
|
|
382
|
-
"kernel_size": 3,
|
|
383
|
-
"num_repeat": 1,
|
|
384
|
-
"input_filters": 32,
|
|
385
|
-
"output_filters": 16,
|
|
386
|
-
"expand_ratio": 1,
|
|
387
|
-
"se_ratio": 0,
|
|
388
|
-
"strides": 1,
|
|
389
|
-
"conv_type": 1,
|
|
390
|
-
},
|
|
391
|
-
{
|
|
392
|
-
"kernel_size": 3,
|
|
393
|
-
"num_repeat": 2,
|
|
394
|
-
"input_filters": 16,
|
|
395
|
-
"output_filters": 32,
|
|
396
|
-
"expand_ratio": 4,
|
|
397
|
-
"se_ratio": 0,
|
|
398
|
-
"strides": 2,
|
|
399
|
-
"conv_type": 1,
|
|
400
|
-
},
|
|
401
|
-
{
|
|
402
|
-
"kernel_size": 3,
|
|
403
|
-
"num_repeat": 2,
|
|
404
|
-
"input_filters": 32,
|
|
405
|
-
"output_filters": 48,
|
|
406
|
-
"expand_ratio": 4,
|
|
407
|
-
"se_ratio": 0,
|
|
408
|
-
"strides": 2,
|
|
409
|
-
"conv_type": 1,
|
|
410
|
-
},
|
|
411
|
-
{
|
|
412
|
-
"kernel_size": 3,
|
|
413
|
-
"num_repeat": 3,
|
|
414
|
-
"input_filters": 48,
|
|
415
|
-
"output_filters": 96,
|
|
416
|
-
"expand_ratio": 4,
|
|
417
|
-
"se_ratio": 0.25,
|
|
418
|
-
"strides": 2,
|
|
419
|
-
"conv_type": 0,
|
|
420
|
-
},
|
|
421
|
-
{
|
|
422
|
-
"kernel_size": 3,
|
|
423
|
-
"num_repeat": 5,
|
|
424
|
-
"input_filters": 96,
|
|
425
|
-
"output_filters": 112,
|
|
426
|
-
"expand_ratio": 6,
|
|
427
|
-
"se_ratio": 0.25,
|
|
428
|
-
"strides": 1,
|
|
429
|
-
"conv_type": 0,
|
|
430
|
-
},
|
|
431
|
-
{
|
|
432
|
-
"kernel_size": 3,
|
|
433
|
-
"num_repeat": 8,
|
|
434
|
-
"input_filters": 112,
|
|
435
|
-
"output_filters": 192,
|
|
436
|
-
"expand_ratio": 6,
|
|
437
|
-
"se_ratio": 0.25,
|
|
438
|
-
"strides": 2,
|
|
439
|
-
"conv_type": 0,
|
|
440
|
-
},
|
|
441
|
-
],
|
|
442
|
-
"efficientnetv2-b3": [
|
|
443
|
-
{
|
|
444
|
-
"kernel_size": 3,
|
|
445
|
-
"num_repeat": 1,
|
|
446
|
-
"input_filters": 32,
|
|
447
|
-
"output_filters": 16,
|
|
448
|
-
"expand_ratio": 1,
|
|
449
|
-
"se_ratio": 0,
|
|
450
|
-
"strides": 1,
|
|
451
|
-
"conv_type": 1,
|
|
452
|
-
},
|
|
453
|
-
{
|
|
454
|
-
"kernel_size": 3,
|
|
455
|
-
"num_repeat": 2,
|
|
456
|
-
"input_filters": 16,
|
|
457
|
-
"output_filters": 32,
|
|
458
|
-
"expand_ratio": 4,
|
|
459
|
-
"se_ratio": 0,
|
|
460
|
-
"strides": 2,
|
|
461
|
-
"conv_type": 1,
|
|
462
|
-
},
|
|
463
|
-
{
|
|
464
|
-
"kernel_size": 3,
|
|
465
|
-
"num_repeat": 2,
|
|
466
|
-
"input_filters": 32,
|
|
467
|
-
"output_filters": 48,
|
|
468
|
-
"expand_ratio": 4,
|
|
469
|
-
"se_ratio": 0,
|
|
470
|
-
"strides": 2,
|
|
471
|
-
"conv_type": 1,
|
|
472
|
-
},
|
|
473
|
-
{
|
|
474
|
-
"kernel_size": 3,
|
|
475
|
-
"num_repeat": 3,
|
|
476
|
-
"input_filters": 48,
|
|
477
|
-
"output_filters": 96,
|
|
478
|
-
"expand_ratio": 4,
|
|
479
|
-
"se_ratio": 0.25,
|
|
480
|
-
"strides": 2,
|
|
481
|
-
"conv_type": 0,
|
|
482
|
-
},
|
|
483
|
-
{
|
|
484
|
-
"kernel_size": 3,
|
|
485
|
-
"num_repeat": 5,
|
|
486
|
-
"input_filters": 96,
|
|
487
|
-
"output_filters": 112,
|
|
488
|
-
"expand_ratio": 6,
|
|
489
|
-
"se_ratio": 0.25,
|
|
490
|
-
"strides": 1,
|
|
491
|
-
"conv_type": 0,
|
|
492
|
-
},
|
|
493
|
-
{
|
|
494
|
-
"kernel_size": 3,
|
|
495
|
-
"num_repeat": 8,
|
|
496
|
-
"input_filters": 112,
|
|
497
|
-
"output_filters": 192,
|
|
498
|
-
"expand_ratio": 6,
|
|
499
|
-
"se_ratio": 0.25,
|
|
500
|
-
"strides": 2,
|
|
501
|
-
"conv_type": 0,
|
|
502
|
-
},
|
|
503
|
-
],
|
|
504
|
-
}
|
|
505
|
-
|
|
506
|
-
CONV_KERNEL_INITIALIZER = {
|
|
507
|
-
"class_name": "VarianceScaling",
|
|
508
|
-
"config": {
|
|
509
|
-
"scale": 2.0,
|
|
510
|
-
"mode": "fan_out",
|
|
511
|
-
"distribution": "truncated_normal"
|
|
512
|
-
}
|
|
513
|
-
}
|
|
514
|
-
|
|
515
|
-
DENSE_KERNEL_INITIALIZER = {
|
|
516
|
-
"class_name": "VarianceScaling",
|
|
517
|
-
"config": {
|
|
518
|
-
"scale": 1. / 3.,
|
|
519
|
-
"mode": "fan_out",
|
|
520
|
-
"distribution": "uniform"
|
|
521
|
-
}
|
|
522
|
-
}
|
|
523
|
-
|
|
524
|
-
BASE_DOCSTRING = """Instantiates the {name} architecture.
|
|
525
|
-
|
|
526
|
-
Reference:
|
|
527
|
-
- [EfficientNetV2: Smaller Models and Faster Training](
|
|
528
|
-
https://arxiv.org/abs/2104.00298) (ICML 2021)
|
|
529
|
-
|
|
530
|
-
This function returns a Keras image classification model,
|
|
531
|
-
optionally loaded with weights pre-trained on ImageNet.
|
|
532
|
-
|
|
533
|
-
For image classification use cases, see
|
|
534
|
-
[this page for detailed examples](
|
|
535
|
-
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
|
|
536
|
-
|
|
537
|
-
For transfer learning use cases, make sure to read the
|
|
538
|
-
[guide to transfer learning & fine-tuning](
|
|
539
|
-
https://keras.io/guides/transfer_learning/).
|
|
540
|
-
|
|
541
|
-
Note: each Keras Application expects a specific kind of input preprocessing.
|
|
542
|
-
For EfficientNetV2, by default input preprocessing is included as a part of the
|
|
543
|
-
model (as a `Rescaling` layer), and thus
|
|
544
|
-
`tf.keras.applications.efficientnet_v2.preprocess_input` is actually a
|
|
545
|
-
pass-through function. In this use case, EfficientNetV2 models expect their inputs
|
|
546
|
-
to be float tensors of pixels with values in the [0-255] range.
|
|
547
|
-
At the same time, preprocessing as a part of the model (i.e. `Rescaling`
|
|
548
|
-
layer) can be disabled by setting `include_preprocessing` argument to False.
|
|
549
|
-
With preprocessing disabled EfficientNetV2 models expect their inputs to be float
|
|
550
|
-
tensors of pixels with values in the [-1, 1] range.
|
|
551
|
-
|
|
552
|
-
Args:
|
|
553
|
-
include_top: Boolean, whether to include the fully-connected
|
|
554
|
-
layer at the top of the network. Defaults to True.
|
|
555
|
-
weights: One of `None` (random initialization),
|
|
556
|
-
`"imagenet"` (pre-training on ImageNet),
|
|
557
|
-
or the path to the weights file to be loaded. Defaults to `"imagenet"`.
|
|
558
|
-
input_tensor: Optional Keras tensor
|
|
559
|
-
(i.e. output of `layers.Input()`)
|
|
560
|
-
to use as image input for the model.
|
|
561
|
-
input_shape: Optional shape tuple, only to be specified
|
|
562
|
-
if `include_top` is False.
|
|
563
|
-
It should have exactly 3 inputs channels.
|
|
564
|
-
pooling: Optional pooling mode for feature extraction
|
|
565
|
-
when `include_top` is `False`. Defaults to None.
|
|
566
|
-
- `None` means that the output of the model will be
|
|
567
|
-
the 4D tensor output of the
|
|
568
|
-
last convolutional layer.
|
|
569
|
-
- `"avg"` means that global average pooling
|
|
570
|
-
will be applied to the output of the
|
|
571
|
-
last convolutional layer, and thus
|
|
572
|
-
the output of the model will be a 2D tensor.
|
|
573
|
-
- `"max"` means that global max pooling will
|
|
574
|
-
be applied.
|
|
575
|
-
classes: Optional number of classes to classify images
|
|
576
|
-
into, only to be specified if `include_top` is True, and
|
|
577
|
-
if no `weights` argument is specified. Defaults to 1000 (number of
|
|
578
|
-
ImageNet classes).
|
|
579
|
-
classifier_activation: A string or callable. The activation function to use
|
|
580
|
-
on the `"top"` layer. Ignored unless `include_top=True`. Set
|
|
581
|
-
`classifier_activation=None` to return the logits of the "top" layer.
|
|
582
|
-
Defaults to `"softmax"`.
|
|
583
|
-
When loading pretrained weights, `classifier_activation` can only
|
|
584
|
-
be `None` or `"softmax"`.
|
|
585
|
-
|
|
586
|
-
Returns:
|
|
587
|
-
A `keras.Model` instance.
|
|
588
|
-
"""
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
def round_filters(filters, width_coefficient, min_depth, depth_divisor):
|
|
592
|
-
"""Round number of filters based on depth multiplier."""
|
|
593
|
-
filters *= width_coefficient
|
|
594
|
-
minimum_depth = min_depth or depth_divisor
|
|
595
|
-
new_filters = max(
|
|
596
|
-
minimum_depth,
|
|
597
|
-
int(filters + depth_divisor / 2) // depth_divisor * depth_divisor,
|
|
598
|
-
)
|
|
599
|
-
return int(new_filters)
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
def round_repeats(repeats, depth_coefficient):
|
|
603
|
-
"""Round number of repeats based on depth multiplier."""
|
|
604
|
-
return int(math.ceil(depth_coefficient * repeats))
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
def MBConvBlock(
|
|
608
|
-
input_filters: int,
|
|
609
|
-
output_filters: int,
|
|
610
|
-
expand_ratio=1,
|
|
611
|
-
kernel_size=3,
|
|
612
|
-
strides=1,
|
|
613
|
-
se_ratio=0.0,
|
|
614
|
-
bn_momentum=0.9,
|
|
615
|
-
activation="swish",
|
|
616
|
-
survival_probability: float = 0.8,
|
|
617
|
-
name=None,
|
|
618
|
-
):
|
|
619
|
-
"""MBConv block: Mobile Inverted Residual Bottleneck."""
|
|
620
|
-
bn_axis = 3 if backend.image_data_format() == "channels_last" else 1
|
|
621
|
-
|
|
622
|
-
if name is None:
|
|
623
|
-
name = backend.get_uid("block0")
|
|
624
|
-
|
|
625
|
-
def apply(inputs):
|
|
626
|
-
# Expansion phase
|
|
627
|
-
filters = input_filters * expand_ratio
|
|
628
|
-
if expand_ratio != 1:
|
|
629
|
-
x = layers.Conv2D(
|
|
630
|
-
filters=filters,
|
|
631
|
-
kernel_size=1,
|
|
632
|
-
strides=1,
|
|
633
|
-
kernel_initializer=CONV_KERNEL_INITIALIZER,
|
|
634
|
-
padding="same",
|
|
635
|
-
data_format="channels_last",
|
|
636
|
-
use_bias=False,
|
|
637
|
-
name=name + "expand_conv",
|
|
638
|
-
)(inputs)
|
|
639
|
-
x = layers.BatchNormalization(
|
|
640
|
-
axis=bn_axis,
|
|
641
|
-
momentum=bn_momentum,
|
|
642
|
-
name=name + "expand_bn",
|
|
643
|
-
)(x)
|
|
644
|
-
x = layers.Activation(activation, name=name + "expand_activation")(x)
|
|
645
|
-
else:
|
|
646
|
-
x = inputs
|
|
647
|
-
|
|
648
|
-
# Depthwise conv
|
|
649
|
-
x = layers.DepthwiseConv2D(
|
|
650
|
-
kernel_size=kernel_size,
|
|
651
|
-
strides=strides,
|
|
652
|
-
depthwise_initializer=CONV_KERNEL_INITIALIZER,
|
|
653
|
-
padding="same",
|
|
654
|
-
data_format="channels_last",
|
|
655
|
-
use_bias=False,
|
|
656
|
-
name=name + "dwconv2",
|
|
657
|
-
)(x)
|
|
658
|
-
x = layers.BatchNormalization(
|
|
659
|
-
axis=bn_axis, momentum=bn_momentum, name=name + "bn")(x)
|
|
660
|
-
x = layers.Activation(activation, name=name + "activation")(x)
|
|
661
|
-
|
|
662
|
-
# Squeeze and excite
|
|
663
|
-
if 0 < se_ratio <= 1:
|
|
664
|
-
filters_se = max(1, int(input_filters * se_ratio))
|
|
665
|
-
se = layers.GlobalAveragePooling2D(name=name + "se_squeeze")(x)
|
|
666
|
-
if bn_axis == 1:
|
|
667
|
-
se_shape = (filters, 1, 1)
|
|
668
|
-
else:
|
|
669
|
-
se_shape = (1, 1, filters)
|
|
670
|
-
se = layers.Reshape(se_shape, name=name + "se_reshape")(se)
|
|
671
|
-
|
|
672
|
-
se = layers.Conv2D(
|
|
673
|
-
filters_se,
|
|
674
|
-
1,
|
|
675
|
-
padding="same",
|
|
676
|
-
activation=activation,
|
|
677
|
-
kernel_initializer=CONV_KERNEL_INITIALIZER,
|
|
678
|
-
name=name + "se_reduce",
|
|
679
|
-
)(se)
|
|
680
|
-
se = layers.Conv2D(
|
|
681
|
-
filters,
|
|
682
|
-
1,
|
|
683
|
-
padding="same",
|
|
684
|
-
activation="sigmoid",
|
|
685
|
-
kernel_initializer=CONV_KERNEL_INITIALIZER,
|
|
686
|
-
name=name + "se_expand",
|
|
687
|
-
)(se)
|
|
688
|
-
|
|
689
|
-
x = layers.multiply([x, se], name=name + "se_excite")
|
|
690
|
-
|
|
691
|
-
# Output phase
|
|
692
|
-
x = layers.Conv2D(
|
|
693
|
-
filters=output_filters,
|
|
694
|
-
kernel_size=1,
|
|
695
|
-
strides=1,
|
|
696
|
-
kernel_initializer=CONV_KERNEL_INITIALIZER,
|
|
697
|
-
padding="same",
|
|
698
|
-
data_format="channels_last",
|
|
699
|
-
use_bias=False,
|
|
700
|
-
name=name + "project_conv",
|
|
701
|
-
)(x)
|
|
702
|
-
x = layers.BatchNormalization(
|
|
703
|
-
axis=bn_axis, momentum=bn_momentum, name=name + "project_bn")(x)
|
|
704
|
-
|
|
705
|
-
if strides == 1 and input_filters == output_filters:
|
|
706
|
-
if survival_probability:
|
|
707
|
-
x = layers.Dropout(
|
|
708
|
-
survival_probability,
|
|
709
|
-
noise_shape=(None, 1, 1, 1),
|
|
710
|
-
name=name + "drop",
|
|
711
|
-
)(x)
|
|
712
|
-
x = layers.add([x, inputs], name=name + "add")
|
|
713
|
-
return x
|
|
714
|
-
|
|
715
|
-
return apply
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
def FusedMBConvBlock(
|
|
719
|
-
input_filters: int,
|
|
720
|
-
output_filters: int,
|
|
721
|
-
expand_ratio=1,
|
|
722
|
-
kernel_size=3,
|
|
723
|
-
strides=1,
|
|
724
|
-
se_ratio=0.0,
|
|
725
|
-
bn_momentum=0.9,
|
|
726
|
-
activation="swish",
|
|
727
|
-
survival_probability: float = 0.8,
|
|
728
|
-
name=None,
|
|
729
|
-
):
|
|
730
|
-
"""Fused MBConv Block: Fusing the proj conv1x1 and depthwise_conv into a conv2d."""
|
|
731
|
-
bn_axis = 3 if backend.image_data_format() == "channels_last" else 1
|
|
732
|
-
|
|
733
|
-
if name is None:
|
|
734
|
-
name = backend.get_uid("block0")
|
|
735
|
-
|
|
736
|
-
def apply(inputs):
|
|
737
|
-
filters = input_filters * expand_ratio
|
|
738
|
-
if expand_ratio != 1:
|
|
739
|
-
x = layers.Conv2D(
|
|
740
|
-
filters,
|
|
741
|
-
kernel_size=kernel_size,
|
|
742
|
-
strides=strides,
|
|
743
|
-
kernel_initializer=CONV_KERNEL_INITIALIZER,
|
|
744
|
-
data_format="channels_last",
|
|
745
|
-
padding="same",
|
|
746
|
-
use_bias=False,
|
|
747
|
-
name=name + "expand_conv",
|
|
748
|
-
)(inputs)
|
|
749
|
-
x = layers.BatchNormalization(
|
|
750
|
-
axis=bn_axis, momentum=bn_momentum, name=name + "expand_bn")(x)
|
|
751
|
-
x = layers.Activation(
|
|
752
|
-
activation=activation, name=name + "expand_activation")(x)
|
|
753
|
-
else:
|
|
754
|
-
x = inputs
|
|
755
|
-
|
|
756
|
-
# Squeeze and excite
|
|
757
|
-
if 0 < se_ratio <= 1:
|
|
758
|
-
filters_se = max(1, int(input_filters * se_ratio))
|
|
759
|
-
se = layers.GlobalAveragePooling2D(name=name + "se_squeeze")(x)
|
|
760
|
-
if bn_axis == 1:
|
|
761
|
-
se_shape = (filters, 1, 1)
|
|
762
|
-
else:
|
|
763
|
-
se_shape = (1, 1, filters)
|
|
764
|
-
|
|
765
|
-
se = layers.Reshape(se_shape, name=name + "se_reshape")(se)
|
|
766
|
-
|
|
767
|
-
se = layers.Conv2D(
|
|
768
|
-
filters_se,
|
|
769
|
-
1,
|
|
770
|
-
padding="same",
|
|
771
|
-
activation=activation,
|
|
772
|
-
kernel_initializer=CONV_KERNEL_INITIALIZER,
|
|
773
|
-
name=name + "se_reduce",
|
|
774
|
-
)(se)
|
|
775
|
-
se = layers.Conv2D(
|
|
776
|
-
filters,
|
|
777
|
-
1,
|
|
778
|
-
padding="same",
|
|
779
|
-
activation="sigmoid",
|
|
780
|
-
kernel_initializer=CONV_KERNEL_INITIALIZER,
|
|
781
|
-
name=name + "se_expand",
|
|
782
|
-
)(se)
|
|
783
|
-
|
|
784
|
-
x = layers.multiply([x, se], name=name + "se_excite")
|
|
785
|
-
|
|
786
|
-
# Output phase:
|
|
787
|
-
x = layers.Conv2D(
|
|
788
|
-
output_filters,
|
|
789
|
-
kernel_size=1 if expand_ratio != 1 else kernel_size,
|
|
790
|
-
strides=1 if expand_ratio != 1 else strides,
|
|
791
|
-
kernel_initializer=CONV_KERNEL_INITIALIZER,
|
|
792
|
-
padding="same",
|
|
793
|
-
use_bias=False,
|
|
794
|
-
name=name + "project_conv",
|
|
795
|
-
)(x)
|
|
796
|
-
x = layers.BatchNormalization(
|
|
797
|
-
axis=bn_axis, momentum=bn_momentum, name=name + "project_bn")(x)
|
|
798
|
-
if expand_ratio == 1:
|
|
799
|
-
x = layers.Activation(
|
|
800
|
-
activation=activation, name=name + "project_activation")(x)
|
|
801
|
-
|
|
802
|
-
# Residual:
|
|
803
|
-
if strides == 1 and input_filters == output_filters:
|
|
804
|
-
if survival_probability:
|
|
805
|
-
x = layers.Dropout(
|
|
806
|
-
survival_probability,
|
|
807
|
-
noise_shape=(None, 1, 1, 1),
|
|
808
|
-
name=name + "drop",
|
|
809
|
-
)(x)
|
|
810
|
-
x = layers.add([x, inputs], name=name + "add")
|
|
811
|
-
return x
|
|
812
|
-
|
|
813
|
-
return apply
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
def EfficientNetV2(
|
|
817
|
-
width_coefficient,
|
|
818
|
-
depth_coefficient,
|
|
819
|
-
default_size,
|
|
820
|
-
dropout_rate=0.2,
|
|
821
|
-
drop_connect_rate=0.2,
|
|
822
|
-
depth_divisor=8,
|
|
823
|
-
min_depth=8,
|
|
824
|
-
bn_momentum=0.9,
|
|
825
|
-
activation="swish",
|
|
826
|
-
blocks_args="default",
|
|
827
|
-
model_name="efficientnetv2",
|
|
828
|
-
include_top=True,
|
|
829
|
-
weights="imagenet",
|
|
830
|
-
input_tensor=None,
|
|
831
|
-
input_shape=None,
|
|
832
|
-
pooling=None,
|
|
833
|
-
classes=1000,
|
|
834
|
-
classifier_activation="softmax",
|
|
835
|
-
include_preprocessing=True,
|
|
836
|
-
):
|
|
837
|
-
"""Instantiates the EfficientNetV2 architecture using given scaling coefficients.
|
|
838
|
-
|
|
839
|
-
Args:
|
|
840
|
-
width_coefficient: float, scaling coefficient for network width.
|
|
841
|
-
depth_coefficient: float, scaling coefficient for network depth.
|
|
842
|
-
default_size: integer, default input image size.
|
|
843
|
-
dropout_rate: float, dropout rate before final classifier layer.
|
|
844
|
-
drop_connect_rate: float, dropout rate at skip connections.
|
|
845
|
-
depth_divisor: integer, a unit of network width.
|
|
846
|
-
min_depth: integer, minimum number of filters.
|
|
847
|
-
bn_momentum: float. Momentum parameter for Batch Normalization layers.
|
|
848
|
-
activation: activation function.
|
|
849
|
-
blocks_args: list of dicts, parameters to construct block modules.
|
|
850
|
-
model_name: string, model name.
|
|
851
|
-
include_top: whether to include the fully-connected layer at the top of the
|
|
852
|
-
network.
|
|
853
|
-
weights: one of `None` (random initialization), `"imagenet"` (pre-training
|
|
854
|
-
on ImageNet), or the path to the weights file to be loaded.
|
|
855
|
-
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) or
|
|
856
|
-
numpy array to use as image input for the model.
|
|
857
|
-
input_shape: optional shape tuple, only to be specified if `include_top` is
|
|
858
|
-
False. It should have exactly 3 inputs channels.
|
|
859
|
-
pooling: optional pooling mode for feature extraction when `include_top` is
|
|
860
|
-
`False`. - `None` means that the output of the model will be the 4D tensor
|
|
861
|
-
output of the last convolutional layer. - "avg" means that global average
|
|
862
|
-
pooling will be applied to the output of the last convolutional layer, and
|
|
863
|
-
thus the output of the model will be a 2D tensor. - `"max"` means that
|
|
864
|
-
global max pooling will be applied.
|
|
865
|
-
classes: optional number of classes to classify images into, only to be
|
|
866
|
-
specified if `include_top` is True, and if no `weights` argument is
|
|
867
|
-
specified.
|
|
868
|
-
classifier_activation: A string or callable. The activation function to use
|
|
869
|
-
on the `"top"` layer. Ignored unless `include_top=True`. Set
|
|
870
|
-
`classifier_activation=None` to return the logits of the `"top"` layer.
|
|
871
|
-
include_preprocessing: Boolean, whether to include the preprocessing layer
|
|
872
|
-
(`Rescaling`) at the bottom of the network. Defaults to `True`.
|
|
873
|
-
|
|
874
|
-
Returns:
|
|
875
|
-
A `keras.Model` instance.
|
|
876
|
-
|
|
877
|
-
Raises:
|
|
878
|
-
ValueError: in case of invalid argument for `weights`,
|
|
879
|
-
or invalid input shape.
|
|
880
|
-
ValueError: if `classifier_activation` is not `"softmax"` or `None` when
|
|
881
|
-
using a pretrained top layer.
|
|
882
|
-
"""
|
|
883
|
-
|
|
884
|
-
if blocks_args == "default":
|
|
885
|
-
blocks_args = DEFAULT_BLOCKS_ARGS[model_name]
|
|
886
|
-
|
|
887
|
-
if not (weights in {"imagenet", None} or tf.io.gfile.exists(weights)):
|
|
888
|
-
raise ValueError("The `weights` argument should be either "
|
|
889
|
-
"`None` (random initialization), `imagenet` "
|
|
890
|
-
"(pre-training on ImageNet), "
|
|
891
|
-
"or the path to the weights file to be loaded."
|
|
892
|
-
f"Received: weights={weights}")
|
|
893
|
-
|
|
894
|
-
if weights == "imagenet" and include_top and classes != 1000:
|
|
895
|
-
raise ValueError("If using `weights` as `'imagenet'` with `include_top`"
|
|
896
|
-
" as true, `classes` should be 1000"
|
|
897
|
-
f"Received: classes={classes}")
|
|
898
|
-
|
|
899
|
-
# Determine proper input shape
|
|
900
|
-
input_shape = imagenet_utils.obtain_input_shape(
|
|
901
|
-
input_shape,
|
|
902
|
-
default_size=default_size,
|
|
903
|
-
min_size=32,
|
|
904
|
-
data_format=backend.image_data_format(),
|
|
905
|
-
require_flatten=include_top,
|
|
906
|
-
weights=weights)
|
|
907
|
-
|
|
908
|
-
if input_tensor is None:
|
|
909
|
-
img_input = layers.Input(shape=input_shape)
|
|
910
|
-
else:
|
|
911
|
-
if not backend.is_keras_tensor(input_tensor):
|
|
912
|
-
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
|
|
913
|
-
else:
|
|
914
|
-
img_input = input_tensor
|
|
915
|
-
|
|
916
|
-
bn_axis = 3 if backend.image_data_format() == "channels_last" else 1
|
|
917
|
-
|
|
918
|
-
x = img_input
|
|
919
|
-
|
|
920
|
-
if include_preprocessing:
|
|
921
|
-
# Apply original V1 preprocessing for Bx variants
|
|
922
|
-
# if number of channels allows it
|
|
923
|
-
num_channels = input_shape[bn_axis - 1]
|
|
924
|
-
if model_name.split("-")[-1].startswith("b") and num_channels == 3:
|
|
925
|
-
x = layers.Rescaling(scale=1. / 255)(x)
|
|
926
|
-
x = layers.Normalization(
|
|
927
|
-
mean=[0.485, 0.456, 0.406],
|
|
928
|
-
variance=[0.229**2, 0.224**2, 0.225**2],
|
|
929
|
-
axis=bn_axis,
|
|
930
|
-
)(x)
|
|
931
|
-
else:
|
|
932
|
-
x = layers.Rescaling(scale=1. / 128.0, offset=-1)(x)
|
|
933
|
-
|
|
934
|
-
# Build stem
|
|
935
|
-
stem_filters = round_filters(
|
|
936
|
-
filters=blocks_args[0]["input_filters"],
|
|
937
|
-
width_coefficient=width_coefficient,
|
|
938
|
-
min_depth=min_depth,
|
|
939
|
-
depth_divisor=depth_divisor,
|
|
940
|
-
)
|
|
941
|
-
x = layers.Conv2D(
|
|
942
|
-
filters=stem_filters,
|
|
943
|
-
kernel_size=3,
|
|
944
|
-
strides=2,
|
|
945
|
-
kernel_initializer=CONV_KERNEL_INITIALIZER,
|
|
946
|
-
padding="same",
|
|
947
|
-
use_bias=False,
|
|
948
|
-
name="stem_conv",
|
|
949
|
-
)(x)
|
|
950
|
-
x = layers.BatchNormalization(
|
|
951
|
-
axis=bn_axis,
|
|
952
|
-
momentum=bn_momentum,
|
|
953
|
-
name="stem_bn",
|
|
954
|
-
)(x)
|
|
955
|
-
x = layers.Activation(activation, name="stem_activation")(x)
|
|
956
|
-
|
|
957
|
-
# Build blocks
|
|
958
|
-
blocks_args = copy.deepcopy(blocks_args)
|
|
959
|
-
b = 0
|
|
960
|
-
blocks = float(sum(args["num_repeat"] for args in blocks_args))
|
|
961
|
-
|
|
962
|
-
for (i, args) in enumerate(blocks_args):
|
|
963
|
-
assert args["num_repeat"] > 0
|
|
964
|
-
|
|
965
|
-
# Update block input and output filters based on depth multiplier.
|
|
966
|
-
args["input_filters"] = round_filters(
|
|
967
|
-
filters=args["input_filters"],
|
|
968
|
-
width_coefficient=width_coefficient,
|
|
969
|
-
min_depth=min_depth,
|
|
970
|
-
depth_divisor=depth_divisor)
|
|
971
|
-
args["output_filters"] = round_filters(
|
|
972
|
-
filters=args["output_filters"],
|
|
973
|
-
width_coefficient=width_coefficient,
|
|
974
|
-
min_depth=min_depth,
|
|
975
|
-
depth_divisor=depth_divisor)
|
|
976
|
-
|
|
977
|
-
# Determine which conv type to use:
|
|
978
|
-
block = {0: MBConvBlock, 1: FusedMBConvBlock}[args.pop("conv_type")]
|
|
979
|
-
repeats = round_repeats(
|
|
980
|
-
repeats=args.pop("num_repeat"), depth_coefficient=depth_coefficient)
|
|
981
|
-
for j in range(repeats):
|
|
982
|
-
# The first block needs to take care of stride and filter size increase.
|
|
983
|
-
if j > 0:
|
|
984
|
-
args["strides"] = 1
|
|
985
|
-
args["input_filters"] = args["output_filters"]
|
|
986
|
-
|
|
987
|
-
x = block(
|
|
988
|
-
activation=activation,
|
|
989
|
-
bn_momentum=bn_momentum,
|
|
990
|
-
survival_probability=drop_connect_rate * b / blocks,
|
|
991
|
-
name="block{}{}_".format(i + 1, chr(j + 97)),
|
|
992
|
-
**args,
|
|
993
|
-
)(x)
|
|
994
|
-
|
|
995
|
-
# Build top
|
|
996
|
-
top_filters = round_filters(
|
|
997
|
-
filters=1280,
|
|
998
|
-
width_coefficient=width_coefficient,
|
|
999
|
-
min_depth=min_depth,
|
|
1000
|
-
depth_divisor=depth_divisor)
|
|
1001
|
-
x = layers.Conv2D(
|
|
1002
|
-
filters=top_filters,
|
|
1003
|
-
kernel_size=1,
|
|
1004
|
-
strides=1,
|
|
1005
|
-
kernel_initializer=CONV_KERNEL_INITIALIZER,
|
|
1006
|
-
padding="same",
|
|
1007
|
-
data_format="channels_last",
|
|
1008
|
-
use_bias=False,
|
|
1009
|
-
name="top_conv",
|
|
1010
|
-
)(x)
|
|
1011
|
-
x = layers.BatchNormalization(
|
|
1012
|
-
axis=bn_axis,
|
|
1013
|
-
momentum=bn_momentum,
|
|
1014
|
-
name="top_bn",
|
|
1015
|
-
)(x)
|
|
1016
|
-
x = layers.Activation(activation=activation, name="top_activation")(x)
|
|
1017
|
-
|
|
1018
|
-
if include_top:
|
|
1019
|
-
x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
|
|
1020
|
-
if dropout_rate > 0:
|
|
1021
|
-
x = layers.Dropout(dropout_rate, name="top_dropout")(x)
|
|
1022
|
-
imagenet_utils.validate_activation(classifier_activation, weights)
|
|
1023
|
-
x = layers.Dense(
|
|
1024
|
-
classes,
|
|
1025
|
-
activation=classifier_activation,
|
|
1026
|
-
kernel_initializer=DENSE_KERNEL_INITIALIZER,
|
|
1027
|
-
bias_initializer=tf.constant_initializer(0),
|
|
1028
|
-
name="predictions")(x)
|
|
1029
|
-
else:
|
|
1030
|
-
if pooling == "avg":
|
|
1031
|
-
x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
|
|
1032
|
-
elif pooling == "max":
|
|
1033
|
-
x = layers.GlobalMaxPooling2D(name="max_pool")(x)
|
|
1034
|
-
|
|
1035
|
-
# Ensure that the model takes into account
|
|
1036
|
-
# any potential predecessors of `input_tensor`.
|
|
1037
|
-
if input_tensor is not None:
|
|
1038
|
-
inputs = layer_utils.get_source_inputs(input_tensor)
|
|
1039
|
-
else:
|
|
1040
|
-
inputs = img_input
|
|
1041
|
-
|
|
1042
|
-
# Create model.
|
|
1043
|
-
model = training.Model(inputs, x, name=model_name)
|
|
1044
|
-
|
|
1045
|
-
# Load weights.
|
|
1046
|
-
if weights == "imagenet":
|
|
1047
|
-
if include_top:
|
|
1048
|
-
file_suffix = ".h5"
|
|
1049
|
-
file_hash = WEIGHTS_HASHES[model_name[-2:]][0]
|
|
1050
|
-
else:
|
|
1051
|
-
file_suffix = "_notop.h5"
|
|
1052
|
-
file_hash = WEIGHTS_HASHES[model_name[-2:]][1]
|
|
1053
|
-
file_name = model_name + file_suffix
|
|
1054
|
-
weights_path = data_utils.get_file(
|
|
1055
|
-
file_name,
|
|
1056
|
-
BASE_WEIGHTS_PATH + file_name,
|
|
1057
|
-
cache_subdir="models",
|
|
1058
|
-
file_hash=file_hash)
|
|
1059
|
-
model.load_weights(weights_path)
|
|
1060
|
-
elif weights is not None:
|
|
1061
|
-
model.load_weights(weights)
|
|
1062
|
-
|
|
1063
|
-
return model
|
|
1064
|
-
|
|
1065
|
-
|
|
1066
|
-
@keras_export("keras.applications.efficientnet_v2.EfficientNetV2B0",
|
|
1067
|
-
"keras.applications.EfficientNetV2B0")
|
|
1068
|
-
def EfficientNetV2B0(
|
|
1069
|
-
include_top=True,
|
|
1070
|
-
weights="imagenet",
|
|
1071
|
-
input_tensor=None,
|
|
1072
|
-
input_shape=None,
|
|
1073
|
-
pooling=None,
|
|
1074
|
-
classes=1000,
|
|
1075
|
-
classifier_activation="softmax",
|
|
1076
|
-
include_preprocessing=True,
|
|
1077
|
-
):
|
|
1078
|
-
return EfficientNetV2(
|
|
1079
|
-
width_coefficient=1.0,
|
|
1080
|
-
depth_coefficient=1.0,
|
|
1081
|
-
default_size=224,
|
|
1082
|
-
model_name="efficientnetv2-b0",
|
|
1083
|
-
include_top=include_top,
|
|
1084
|
-
weights=weights,
|
|
1085
|
-
input_tensor=input_tensor,
|
|
1086
|
-
input_shape=input_shape,
|
|
1087
|
-
pooling=pooling,
|
|
1088
|
-
classes=classes,
|
|
1089
|
-
classifier_activation=classifier_activation,
|
|
1090
|
-
include_preprocessing=include_preprocessing)
|
|
1091
|
-
|
|
1092
|
-
|
|
1093
|
-
@keras_export("keras.applications.efficientnet_v2.EfficientNetV2B1",
|
|
1094
|
-
"keras.applications.EfficientNetV2B1")
|
|
1095
|
-
def EfficientNetV2B1(
|
|
1096
|
-
include_top=True,
|
|
1097
|
-
weights="imagenet",
|
|
1098
|
-
input_tensor=None,
|
|
1099
|
-
input_shape=None,
|
|
1100
|
-
pooling=None,
|
|
1101
|
-
classes=1000,
|
|
1102
|
-
classifier_activation="softmax",
|
|
1103
|
-
include_preprocessing=True,
|
|
1104
|
-
):
|
|
1105
|
-
return EfficientNetV2(
|
|
1106
|
-
width_coefficient=1.0,
|
|
1107
|
-
depth_coefficient=1.1,
|
|
1108
|
-
default_size=240,
|
|
1109
|
-
model_name="efficientnetv2-b1",
|
|
1110
|
-
include_top=include_top,
|
|
1111
|
-
weights=weights,
|
|
1112
|
-
input_tensor=input_tensor,
|
|
1113
|
-
input_shape=input_shape,
|
|
1114
|
-
pooling=pooling,
|
|
1115
|
-
classes=classes,
|
|
1116
|
-
classifier_activation=classifier_activation,
|
|
1117
|
-
include_preprocessing=include_preprocessing,
|
|
1118
|
-
)
|
|
1119
|
-
|
|
1120
|
-
|
|
1121
|
-
@keras_export("keras.applications.efficientnet_v2.EfficientNetV2B2",
|
|
1122
|
-
"keras.applications.EfficientNetV2B2")
|
|
1123
|
-
def EfficientNetV2B2(
|
|
1124
|
-
include_top=True,
|
|
1125
|
-
weights="imagenet",
|
|
1126
|
-
input_tensor=None,
|
|
1127
|
-
input_shape=None,
|
|
1128
|
-
pooling=None,
|
|
1129
|
-
classes=1000,
|
|
1130
|
-
classifier_activation="softmax",
|
|
1131
|
-
include_preprocessing=True,
|
|
1132
|
-
):
|
|
1133
|
-
return EfficientNetV2(
|
|
1134
|
-
width_coefficient=1.1,
|
|
1135
|
-
depth_coefficient=1.2,
|
|
1136
|
-
default_size=260,
|
|
1137
|
-
model_name="efficientnetv2-b2",
|
|
1138
|
-
include_top=include_top,
|
|
1139
|
-
weights=weights,
|
|
1140
|
-
input_tensor=input_tensor,
|
|
1141
|
-
input_shape=input_shape,
|
|
1142
|
-
pooling=pooling,
|
|
1143
|
-
classes=classes,
|
|
1144
|
-
classifier_activation=classifier_activation,
|
|
1145
|
-
include_preprocessing=include_preprocessing,
|
|
1146
|
-
)
|
|
1147
|
-
|
|
1148
|
-
|
|
1149
|
-
@keras_export("keras.applications.efficientnet_v2.EfficientNetV2B3",
|
|
1150
|
-
"keras.applications.EfficientNetV2B3")
|
|
1151
|
-
def EfficientNetV2B3(
|
|
1152
|
-
include_top=True,
|
|
1153
|
-
weights="imagenet",
|
|
1154
|
-
input_tensor=None,
|
|
1155
|
-
input_shape=None,
|
|
1156
|
-
pooling=None,
|
|
1157
|
-
classes=1000,
|
|
1158
|
-
classifier_activation="softmax",
|
|
1159
|
-
include_preprocessing=True,
|
|
1160
|
-
):
|
|
1161
|
-
return EfficientNetV2(
|
|
1162
|
-
width_coefficient=1.2,
|
|
1163
|
-
depth_coefficient=1.4,
|
|
1164
|
-
default_size=300,
|
|
1165
|
-
model_name="efficientnetv2-b3",
|
|
1166
|
-
include_top=include_top,
|
|
1167
|
-
weights=weights,
|
|
1168
|
-
input_tensor=input_tensor,
|
|
1169
|
-
input_shape=input_shape,
|
|
1170
|
-
pooling=pooling,
|
|
1171
|
-
classes=classes,
|
|
1172
|
-
classifier_activation=classifier_activation,
|
|
1173
|
-
include_preprocessing=include_preprocessing,
|
|
1174
|
-
)
|
|
1175
|
-
|
|
1176
|
-
|
|
1177
|
-
@keras_export("keras.applications.efficientnet_v2.EfficientNetV2S",
|
|
1178
|
-
"keras.applications.EfficientNetV2S")
|
|
1179
|
-
def EfficientNetV2S(
|
|
1180
|
-
include_top=True,
|
|
1181
|
-
weights="imagenet",
|
|
1182
|
-
input_tensor=None,
|
|
1183
|
-
input_shape=None,
|
|
1184
|
-
pooling=None,
|
|
1185
|
-
classes=1000,
|
|
1186
|
-
classifier_activation="softmax",
|
|
1187
|
-
include_preprocessing=True,
|
|
1188
|
-
):
|
|
1189
|
-
return EfficientNetV2(
|
|
1190
|
-
width_coefficient=1.0,
|
|
1191
|
-
depth_coefficient=1.0,
|
|
1192
|
-
default_size=384,
|
|
1193
|
-
model_name="efficientnetv2-s",
|
|
1194
|
-
include_top=include_top,
|
|
1195
|
-
weights=weights,
|
|
1196
|
-
input_tensor=input_tensor,
|
|
1197
|
-
input_shape=input_shape,
|
|
1198
|
-
pooling=pooling,
|
|
1199
|
-
classes=classes,
|
|
1200
|
-
classifier_activation=classifier_activation,
|
|
1201
|
-
include_preprocessing=include_preprocessing,
|
|
1202
|
-
)
|
|
1203
|
-
|
|
1204
|
-
|
|
1205
|
-
@keras_export("keras.applications.efficientnet_v2.EfficientNetV2M",
|
|
1206
|
-
"keras.applications.EfficientNetV2M")
|
|
1207
|
-
def EfficientNetV2M(
|
|
1208
|
-
include_top=True,
|
|
1209
|
-
weights="imagenet",
|
|
1210
|
-
input_tensor=None,
|
|
1211
|
-
input_shape=None,
|
|
1212
|
-
pooling=None,
|
|
1213
|
-
classes=1000,
|
|
1214
|
-
classifier_activation="softmax",
|
|
1215
|
-
include_preprocessing=True,
|
|
1216
|
-
):
|
|
1217
|
-
return EfficientNetV2(
|
|
1218
|
-
width_coefficient=1.0,
|
|
1219
|
-
depth_coefficient=1.0,
|
|
1220
|
-
default_size=480,
|
|
1221
|
-
model_name="efficientnetv2-m",
|
|
1222
|
-
include_top=include_top,
|
|
1223
|
-
weights=weights,
|
|
1224
|
-
input_tensor=input_tensor,
|
|
1225
|
-
input_shape=input_shape,
|
|
1226
|
-
pooling=pooling,
|
|
1227
|
-
classes=classes,
|
|
1228
|
-
classifier_activation=classifier_activation,
|
|
1229
|
-
include_preprocessing=include_preprocessing,
|
|
1230
|
-
)
|
|
1231
|
-
|
|
1232
|
-
|
|
1233
|
-
@keras_export("keras.applications.efficientnet_v2.EfficientNetV2L",
|
|
1234
|
-
"keras.applications.EfficientNetV2L")
|
|
1235
|
-
def EfficientNetV2L(
|
|
1236
|
-
include_top=True,
|
|
1237
|
-
weights="imagenet",
|
|
1238
|
-
input_tensor=None,
|
|
1239
|
-
input_shape=None,
|
|
1240
|
-
pooling=None,
|
|
1241
|
-
classes=1000,
|
|
1242
|
-
classifier_activation="softmax",
|
|
1243
|
-
include_preprocessing=True,
|
|
1244
|
-
):
|
|
1245
|
-
return EfficientNetV2(
|
|
1246
|
-
width_coefficient=1.0,
|
|
1247
|
-
depth_coefficient=1.0,
|
|
1248
|
-
default_size=480,
|
|
1249
|
-
model_name="efficientnetv2-l",
|
|
1250
|
-
include_top=include_top,
|
|
1251
|
-
weights=weights,
|
|
1252
|
-
input_tensor=input_tensor,
|
|
1253
|
-
input_shape=input_shape,
|
|
1254
|
-
pooling=pooling,
|
|
1255
|
-
classes=classes,
|
|
1256
|
-
classifier_activation=classifier_activation,
|
|
1257
|
-
include_preprocessing=include_preprocessing,
|
|
1258
|
-
)
|
|
1259
|
-
|
|
1260
|
-
|
|
1261
|
-
EfficientNetV2B0.__doc__ = BASE_DOCSTRING.format(name="EfficientNetV2B0")
|
|
1262
|
-
EfficientNetV2B1.__doc__ = BASE_DOCSTRING.format(name="EfficientNetV2B1")
|
|
1263
|
-
EfficientNetV2B2.__doc__ = BASE_DOCSTRING.format(name="EfficientNetV2B2")
|
|
1264
|
-
EfficientNetV2B3.__doc__ = BASE_DOCSTRING.format(name="EfficientNetV2B3")
|
|
1265
|
-
EfficientNetV2S.__doc__ = BASE_DOCSTRING.format(name="EfficientNetV2S")
|
|
1266
|
-
EfficientNetV2M.__doc__ = BASE_DOCSTRING.format(name="EfficientNetV2M")
|
|
1267
|
-
EfficientNetV2L.__doc__ = BASE_DOCSTRING.format(name="EfficientNetV2L")
|
|
1268
|
-
|
|
1269
|
-
|
|
1270
|
-
@keras_export("keras.applications.efficientnet_v2.preprocess_input")
|
|
1271
|
-
def preprocess_input(x, data_format=None): # pylint: disable=unused-argument
|
|
1272
|
-
"""A placeholder method for backward compatibility.
|
|
1273
|
-
|
|
1274
|
-
The preprocessing logic has been included in the EfficientNetV2 model
|
|
1275
|
-
implementation. Users are no longer required to call this method to normalize
|
|
1276
|
-
the input data. This method does nothing and only kept as a placeholder to
|
|
1277
|
-
align the API surface between old and new version of model.
|
|
1278
|
-
|
|
1279
|
-
Args:
|
|
1280
|
-
x: A floating point `numpy.array` or a `tf.Tensor`.
|
|
1281
|
-
data_format: Optional data format of the image tensor/array. Defaults to
|
|
1282
|
-
None, in which case the global setting
|
|
1283
|
-
`tf.keras.backend.image_data_format()` is used (unless you changed it, it
|
|
1284
|
-
defaults to "channels_last").{mode}
|
|
1285
|
-
|
|
1286
|
-
Returns:
|
|
1287
|
-
Unchanged `numpy.array` or `tf.Tensor`.
|
|
1288
|
-
"""
|
|
1289
|
-
return x
|
|
1290
|
-
|
|
1291
|
-
|
|
1292
|
-
@keras_export("keras.applications.efficientnet_v2.decode_predictions")
|
|
1293
|
-
def decode_predictions(preds, top=5):
|
|
1294
|
-
return imagenet_utils.decode_predictions(preds, top=top)
|
|
1295
|
-
|
|
1296
|
-
|
|
1297
|
-
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|