mttf 1.3.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mt/keras/__init__.py +8 -0
- mt/keras_src/__init__.py +16 -0
- mt/keras_src/applications_src/__init__.py +33 -0
- mt/keras_src/applications_src/classifier.py +497 -0
- mt/keras_src/applications_src/mobilenet_v3_split.py +544 -0
- mt/keras_src/applications_src/mobilevit.py +292 -0
- mt/keras_src/base.py +28 -0
- mt/keras_src/constraints_src/__init__.py +14 -0
- mt/keras_src/constraints_src/center_around.py +19 -0
- mt/keras_src/layers_src/__init__.py +43 -0
- mt/keras_src/layers_src/counter.py +27 -0
- mt/keras_src/layers_src/floor.py +24 -0
- mt/keras_src/layers_src/identical.py +15 -0
- mt/keras_src/layers_src/image_sizing.py +1605 -0
- mt/keras_src/layers_src/normed_conv2d.py +239 -0
- mt/keras_src/layers_src/simple_mha.py +472 -0
- mt/keras_src/layers_src/soft_bend.py +36 -0
- mt/keras_src/layers_src/transformer_encoder.py +246 -0
- mt/keras_src/layers_src/utils.py +88 -0
- mt/keras_src/layers_src/var_regularizer.py +38 -0
- mt/tf/__init__.py +10 -0
- mt/tf/init.py +25 -0
- mt/tf/keras_applications/__init__.py +5 -0
- mt/tf/keras_layers/__init__.py +5 -0
- mt/tf/mttf_version.py +5 -0
- mt/tf/utils.py +44 -0
- mt/tf/version.py +5 -0
- mt/tfc/__init__.py +291 -0
- mt/tfg/__init__.py +8 -0
- mt/tfp/__init__.py +11 -0
- mt/tfp/real_nvp.py +116 -0
- mttf-1.3.6.data/scripts/dmt_build_package_and_upload_to_nexus.sh +25 -0
- mttf-1.3.6.data/scripts/dmt_pipi.sh +7 -0
- mttf-1.3.6.data/scripts/dmt_twineu.sh +2 -0
- mttf-1.3.6.data/scripts/pipi.sh +7 -0
- mttf-1.3.6.data/scripts/user_build_package_and_upload_to_nexus.sh +25 -0
- mttf-1.3.6.data/scripts/user_pipi.sh +8 -0
- mttf-1.3.6.data/scripts/user_twineu.sh +3 -0
- mttf-1.3.6.data/scripts/wml_build_package_and_upload_to_nexus.sh +25 -0
- mttf-1.3.6.data/scripts/wml_nexus.py +50 -0
- mttf-1.3.6.data/scripts/wml_pipi.sh +7 -0
- mttf-1.3.6.data/scripts/wml_twineu.sh +2 -0
- mttf-1.3.6.dist-info/METADATA +18 -0
- mttf-1.3.6.dist-info/RECORD +47 -0
- mttf-1.3.6.dist-info/WHEEL +5 -0
- mttf-1.3.6.dist-info/licenses/LICENSE +21 -0
- mttf-1.3.6.dist-info/top_level.txt +1 -0
mt/keras/__init__.py
ADDED
mt/keras_src/__init__.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
from packaging.version import Version
|
|
2
|
+
|
|
3
|
+
from .base import keras_version, keras_source
|
|
4
|
+
|
|
5
|
+
if keras_source == "tf_keras":
|
|
6
|
+
from tf_keras import *
|
|
7
|
+
elif keras_source == "keras":
|
|
8
|
+
from keras import *
|
|
9
|
+
elif keras_source == "tensorflow.keras":
|
|
10
|
+
from tensorflow.keras import *
|
|
11
|
+
else:
|
|
12
|
+
raise ImportError(f"Unknown value '{keras_source}' for variable 'keras_source'.")
|
|
13
|
+
|
|
14
|
+
d_modelFileFormats = {"H5": ".h5", "TF": ".tf"}
|
|
15
|
+
if Version(keras_version) >= Version("2.15"):
|
|
16
|
+
d_modelFileFormats["Keras"] = ".keras"
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
from .. import applications as _applications
|
|
2
|
+
|
|
3
|
+
for _x, _y in _applications.__dict__.items():
|
|
4
|
+
if _x.startswith("_"):
|
|
5
|
+
continue
|
|
6
|
+
globals()[_x] = _y
|
|
7
|
+
__doc__ = _applications.__doc__
|
|
8
|
+
|
|
9
|
+
from .mobilenet_v3_split import (
|
|
10
|
+
MobileNetV3Input,
|
|
11
|
+
MobileNetV3Parser,
|
|
12
|
+
MobileNetV3SmallBlock,
|
|
13
|
+
MobileNetV3LargeBlock,
|
|
14
|
+
MobileNetV3Mixer,
|
|
15
|
+
MobileNetV3Output,
|
|
16
|
+
MobileNetV3Split,
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
from .mobilevit import create_mobilevit
|
|
20
|
+
from .classifier import create_classifier_block
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
__api__ = [
|
|
24
|
+
"MobileNetV3Input",
|
|
25
|
+
"MobileNetV3Parser",
|
|
26
|
+
"MobileNetV3SmallBlock",
|
|
27
|
+
"MobileNetV3LargeBlock",
|
|
28
|
+
"MobileNetV3Mixer",
|
|
29
|
+
"MobileNetV3Output",
|
|
30
|
+
"MobileNetV3Split",
|
|
31
|
+
"create_mobilevit",
|
|
32
|
+
"create_classifier_block",
|
|
33
|
+
]
|
|
@@ -0,0 +1,497 @@
|
|
|
1
|
+
"""Standard classifier from a feature vector.
|
|
2
|
+
"""
|
|
3
|
+
|
|
4
|
+
from mt import tp, tfc, logg
|
|
5
|
+
from .. import models, layers, regularizers
|
|
6
|
+
|
|
7
|
+
from ..constraints_src import CenterAround
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def create_classifier_block(
|
|
11
|
+
input_dim: int,
|
|
12
|
+
n_classes: int,
|
|
13
|
+
name: str = "dense_classifier",
|
|
14
|
+
params: tfc.ClassifierParams = tfc.ClassifierParams(),
|
|
15
|
+
logger: tp.Optional[logg.IndentedLoggerAdapter] = None,
|
|
16
|
+
):
|
|
17
|
+
"""Creates a standard classifier block.
|
|
18
|
+
|
|
19
|
+
Parameters
|
|
20
|
+
----------
|
|
21
|
+
input_dim : int
|
|
22
|
+
feature dimensionality of the input tensor
|
|
23
|
+
n_classes : int
|
|
24
|
+
number of output classes
|
|
25
|
+
name : str, optional
|
|
26
|
+
the name of the classifier block
|
|
27
|
+
params : mt.tfc.ClassifierParams
|
|
28
|
+
parameters for creating the classifier block
|
|
29
|
+
logger : mt.logg.IndentedLoggerAdapter, optional
|
|
30
|
+
logger for debugging purposes
|
|
31
|
+
|
|
32
|
+
Returns
|
|
33
|
+
-------
|
|
34
|
+
model : tensorflow.keras.models.Model
|
|
35
|
+
an uninitialised model without any compilation details representing the classifier block.
|
|
36
|
+
The model returns `bv_logits` and `bv_probs`.
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
msg = f"Creating a classifier block of {n_classes} classes"
|
|
40
|
+
with logg.scoped_info(msg, logger=logger):
|
|
41
|
+
name_scope = tfc.NameScope(name)
|
|
42
|
+
|
|
43
|
+
x = bv_feats = layers.Input(shape=(input_dim,), name=name_scope("input"))
|
|
44
|
+
|
|
45
|
+
x = layers.LayerNormalization(name=name_scope("prenorm"))(x)
|
|
46
|
+
|
|
47
|
+
# dropout, optional
|
|
48
|
+
dropout = getattr(params, "dropout", None)
|
|
49
|
+
if dropout is not None and dropout > 0 and dropout < 1:
|
|
50
|
+
logg.info("Using dropout {dropout}.", logger=logger)
|
|
51
|
+
x = layers.Dropout(dropout, name=name_scope("dropout"))(x)
|
|
52
|
+
|
|
53
|
+
# Object classification branch
|
|
54
|
+
# MT-TODO: currently l2_coeff does not take into account batch size. In order to be truly
|
|
55
|
+
# independent of batch size, number of classes and feature dimensionality, the l2 coeff
|
|
56
|
+
# should be l2_coeff / bv_feats.shape[1] / n_classes / batch_size. So we need to pass the
|
|
57
|
+
# batch size to the function as an additional argument.
|
|
58
|
+
l2_coeff = getattr(params, "l2_coeff", None)
|
|
59
|
+
if l2_coeff is not None:
|
|
60
|
+
logg.info(
|
|
61
|
+
"Using param 'l2_coeff' for kernel and bias regularizers.",
|
|
62
|
+
logger=logger,
|
|
63
|
+
)
|
|
64
|
+
logg.info(f"l2_coeff: {l2_coeff}", logger=logger)
|
|
65
|
+
l2 = l2_coeff / bv_feats.shape[1] / n_classes
|
|
66
|
+
logg.info(f"kernel_l2: {l2}", logger=logger)
|
|
67
|
+
kernel_regularizer = regularizers.l2(l2)
|
|
68
|
+
l2 = l2_coeff / n_classes
|
|
69
|
+
logg.info(f"bias_l2: {l2}", logger=logger)
|
|
70
|
+
bias_regularizer = regularizers.l2(l2)
|
|
71
|
+
else:
|
|
72
|
+
kernel_regularizer = None
|
|
73
|
+
bias_regularizer = None
|
|
74
|
+
|
|
75
|
+
# zero mean logit biases
|
|
76
|
+
if getattr(params, "zero_mean_logit_biases", False):
|
|
77
|
+
logg.info("Logit biases are constrainted to have zero mean.", logger=logger)
|
|
78
|
+
bias_constraint = CenterAround()
|
|
79
|
+
else:
|
|
80
|
+
bias_constraint = None
|
|
81
|
+
|
|
82
|
+
# dense layer
|
|
83
|
+
bv_logits = x = layers.Dense(
|
|
84
|
+
n_classes,
|
|
85
|
+
name=name_scope("logits"),
|
|
86
|
+
kernel_regularizer=kernel_regularizer,
|
|
87
|
+
bias_regularizer=bias_regularizer,
|
|
88
|
+
bias_constraint=bias_constraint,
|
|
89
|
+
)(x)
|
|
90
|
+
|
|
91
|
+
bv_probs = x = layers.Softmax(name=name_scope("probs"))(x)
|
|
92
|
+
|
|
93
|
+
# model
|
|
94
|
+
model = models.Model(bv_feats, [bv_logits, bv_probs], name=name)
|
|
95
|
+
|
|
96
|
+
return model
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def MobileNetV3SmallBlock(
|
|
100
|
+
block_id: int, # only 0 to 3 are accepted here
|
|
101
|
+
input_tensor, # input tensor for the block
|
|
102
|
+
alpha=1.0,
|
|
103
|
+
minimalistic=False,
|
|
104
|
+
):
|
|
105
|
+
"""Prepares a MobileNetV3Small downsampling block."""
|
|
106
|
+
|
|
107
|
+
def depth(d):
|
|
108
|
+
return _depth(d * alpha)
|
|
109
|
+
|
|
110
|
+
if minimalistic:
|
|
111
|
+
kernel = 3
|
|
112
|
+
activation = relu
|
|
113
|
+
se_ratio = None
|
|
114
|
+
else:
|
|
115
|
+
kernel = 5
|
|
116
|
+
activation = hard_swish
|
|
117
|
+
se_ratio = 0.25
|
|
118
|
+
|
|
119
|
+
x = input_tensor
|
|
120
|
+
if block_id == 0:
|
|
121
|
+
x = _inverted_res_block(x, 1, depth(16), 3, 2, se_ratio, relu, 0)
|
|
122
|
+
elif block_id == 1:
|
|
123
|
+
x = _inverted_res_block(x, 72.0 / 16, depth(24), 3, 2, None, relu, 1)
|
|
124
|
+
x = _inverted_res_block(x, 88.0 / 24, depth(24), 3, 1, None, relu, 2)
|
|
125
|
+
elif block_id == 2:
|
|
126
|
+
x = _inverted_res_block(x, 4, depth(40), kernel, 2, se_ratio, activation, 3)
|
|
127
|
+
x = _inverted_res_block(x, 6, depth(40), kernel, 1, se_ratio, activation, 4)
|
|
128
|
+
x = _inverted_res_block(x, 6, depth(40), kernel, 1, se_ratio, activation, 5)
|
|
129
|
+
x = _inverted_res_block(x, 3, depth(48), kernel, 1, se_ratio, activation, 6)
|
|
130
|
+
x = _inverted_res_block(x, 3, depth(48), kernel, 1, se_ratio, activation, 7)
|
|
131
|
+
else:
|
|
132
|
+
x = _inverted_res_block(x, 6, depth(96), kernel, 2, se_ratio, activation, 8)
|
|
133
|
+
x = _inverted_res_block(x, 6, depth(96), kernel, 1, se_ratio, activation, 9)
|
|
134
|
+
x = _inverted_res_block(x, 6, depth(96), kernel, 1, se_ratio, activation, 10)
|
|
135
|
+
|
|
136
|
+
# Create model.
|
|
137
|
+
model = models.Model(input_tensor, x, name=f"MobileNetV3SmallBlock{block_id}")
|
|
138
|
+
|
|
139
|
+
return model
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
def MobileNetV3LargeBlock(
|
|
143
|
+
block_id: int, # only 0 to 4 are accepted here. 4 is only available as of 2023/05/15
|
|
144
|
+
input_tensor, # input tensor for the block
|
|
145
|
+
alpha=1.0,
|
|
146
|
+
minimalistic=False,
|
|
147
|
+
):
|
|
148
|
+
"""Prepares a MobileNetV3Large downsampling block."""
|
|
149
|
+
|
|
150
|
+
def depth(d):
|
|
151
|
+
return _depth(d * alpha)
|
|
152
|
+
|
|
153
|
+
if minimalistic:
|
|
154
|
+
kernel = 3
|
|
155
|
+
activation = relu
|
|
156
|
+
se_ratio = None
|
|
157
|
+
else:
|
|
158
|
+
kernel = 5
|
|
159
|
+
activation = hard_swish
|
|
160
|
+
se_ratio = 0.25
|
|
161
|
+
|
|
162
|
+
x = input_tensor
|
|
163
|
+
if block_id == 0:
|
|
164
|
+
x = _inverted_res_block(x, 1, depth(16), 3, 1, None, relu, 0)
|
|
165
|
+
x = _inverted_res_block(x, 4, depth(24), 3, 2, None, relu, 1)
|
|
166
|
+
x = _inverted_res_block(x, 3, depth(24), 3, 1, None, relu, 2)
|
|
167
|
+
elif block_id == 1:
|
|
168
|
+
x = _inverted_res_block(x, 3, depth(40), kernel, 2, se_ratio, relu, 3)
|
|
169
|
+
x = _inverted_res_block(x, 3, depth(40), kernel, 1, se_ratio, relu, 4)
|
|
170
|
+
x = _inverted_res_block(x, 3, depth(40), kernel, 1, se_ratio, relu, 5)
|
|
171
|
+
elif block_id == 2:
|
|
172
|
+
x = _inverted_res_block(x, 6, depth(80), 3, 2, None, activation, 6)
|
|
173
|
+
x = _inverted_res_block(x, 2.5, depth(80), 3, 1, None, activation, 7)
|
|
174
|
+
x = _inverted_res_block(x, 2.3, depth(80), 3, 1, None, activation, 8)
|
|
175
|
+
x = _inverted_res_block(x, 2.3, depth(80), 3, 1, None, activation, 9)
|
|
176
|
+
x = _inverted_res_block(x, 6, depth(112), 3, 1, se_ratio, activation, 10)
|
|
177
|
+
x = _inverted_res_block(x, 6, depth(112), 3, 1, se_ratio, activation, 11)
|
|
178
|
+
elif block_id == 3:
|
|
179
|
+
x = _inverted_res_block(x, 6, depth(160), kernel, 2, se_ratio, activation, 12)
|
|
180
|
+
x = _inverted_res_block(x, 6, depth(160), kernel, 1, se_ratio, activation, 13)
|
|
181
|
+
x = _inverted_res_block(x, 6, depth(160), kernel, 1, se_ratio, activation, 14)
|
|
182
|
+
else:
|
|
183
|
+
x = _inverted_res_block(x, 6, depth(320), kernel, 2, se_ratio, activation, 15)
|
|
184
|
+
x = _inverted_res_block(x, 6, depth(320), kernel, 1, se_ratio, activation, 16)
|
|
185
|
+
x = _inverted_res_block(x, 6, depth(320), kernel, 1, se_ratio, activation, 17)
|
|
186
|
+
|
|
187
|
+
# Create model.
|
|
188
|
+
model = models.Model(input_tensor, x, name=f"MobileNetV3LargeBlock{block_id}")
|
|
189
|
+
|
|
190
|
+
return model
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
def MobileNetV3Mixer(
|
|
194
|
+
input_tensor,
|
|
195
|
+
params: tfc.MobileNetV3MixerParams,
|
|
196
|
+
last_point_ch,
|
|
197
|
+
alpha=1.0,
|
|
198
|
+
model_type: str = "Large", # only 'Small' or 'Large' are accepted
|
|
199
|
+
minimalistic=False,
|
|
200
|
+
):
|
|
201
|
+
"""Prepares a MobileNetV3 mixer block."""
|
|
202
|
+
|
|
203
|
+
x = input_tensor
|
|
204
|
+
channel_axis = 1 if backend.image_data_format() == "channels_first" else -1
|
|
205
|
+
|
|
206
|
+
if params.variant == "mobilenet":
|
|
207
|
+
|
|
208
|
+
if minimalistic:
|
|
209
|
+
kernel = 3
|
|
210
|
+
activation = relu
|
|
211
|
+
se_ratio = None
|
|
212
|
+
else:
|
|
213
|
+
kernel = 5
|
|
214
|
+
activation = hard_swish
|
|
215
|
+
se_ratio = 0.25
|
|
216
|
+
|
|
217
|
+
last_conv_ch = _depth(backend.int_shape(x)[channel_axis] * 6)
|
|
218
|
+
|
|
219
|
+
# if the width multiplier is greater than 1 we
|
|
220
|
+
# increase the number of output channels
|
|
221
|
+
if alpha > 1.0:
|
|
222
|
+
last_point_ch = _depth(last_point_ch * alpha)
|
|
223
|
+
x = layers.Conv2D(
|
|
224
|
+
last_conv_ch, kernel_size=1, padding="same", use_bias=False, name="Conv_1"
|
|
225
|
+
)(x)
|
|
226
|
+
x = layers.BatchNormalization(
|
|
227
|
+
axis=channel_axis, epsilon=1e-3, momentum=0.999, name="Conv_1/BatchNorm"
|
|
228
|
+
)(x)
|
|
229
|
+
x = activation(x)
|
|
230
|
+
x = layers.GlobalAveragePooling2D()(x)
|
|
231
|
+
if channel_axis == 1:
|
|
232
|
+
x = layers.Reshape((last_conv_ch, 1, 1))(x)
|
|
233
|
+
else:
|
|
234
|
+
x = layers.Reshape((1, 1, last_conv_ch))(x)
|
|
235
|
+
x = layers.Conv2D(
|
|
236
|
+
last_point_ch, kernel_size=1, padding="same", use_bias=True, name="Conv_2"
|
|
237
|
+
)(x)
|
|
238
|
+
x = activation(x)
|
|
239
|
+
elif params.variant == "maxpool":
|
|
240
|
+
x = layers.GlobalMaxPool2D(x)
|
|
241
|
+
elif params.variant == "mhapool":
|
|
242
|
+
if backend.image_data_format() == "channels_first":
|
|
243
|
+
raise tfc.ModelSyntaxError(
|
|
244
|
+
"Mixer variant 'mhapool' requires channels_last image data format."
|
|
245
|
+
)
|
|
246
|
+
|
|
247
|
+
mhapool_params = params.mhapool_cascade_params
|
|
248
|
+
if not isinstance(mhapool_params, tfc.MHAPool2DCascadeParams):
|
|
249
|
+
raise tfc.ModelSyntaxError(
|
|
250
|
+
"Parameter 'params.mhapool_cascade_params' is not of type "
|
|
251
|
+
"mt.tfc.MHAPool2DCascadeParams. Got: {}.".format(type(mhapool_params))
|
|
252
|
+
)
|
|
253
|
+
|
|
254
|
+
from ..layers_src import MHAPool2D
|
|
255
|
+
|
|
256
|
+
n_heads = mhapool_params.n_heads
|
|
257
|
+
k = 0
|
|
258
|
+
outputs = []
|
|
259
|
+
while True:
|
|
260
|
+
h = x.shape[1]
|
|
261
|
+
w = x.shape[2]
|
|
262
|
+
|
|
263
|
+
if h <= 1 and w <= 1:
|
|
264
|
+
break
|
|
265
|
+
|
|
266
|
+
c = x.shape[3]
|
|
267
|
+
key_dim = (c + n_heads - 1) // n_heads
|
|
268
|
+
value_dim = int(key_dim * mhapool_params.expansion_factor)
|
|
269
|
+
k += 1
|
|
270
|
+
block_name = f"MHAPool2DCascade_block{k}"
|
|
271
|
+
if k > mhapool_params.max_num_pooling_layers: # GlobalMaxPool2D
|
|
272
|
+
x = layers.GlobalMaxPooling2D(
|
|
273
|
+
keepdims=True, name=block_name + "/GlobalMaxPool"
|
|
274
|
+
)(x)
|
|
275
|
+
else: # MHAPool2D
|
|
276
|
+
x = layers.LayerNormalization()(x)
|
|
277
|
+
if h <= 2 and w <= 2:
|
|
278
|
+
activation = mhapool_params.final_activation
|
|
279
|
+
else:
|
|
280
|
+
activation = mhapool_params.activation
|
|
281
|
+
x = MHAPool2D(
|
|
282
|
+
n_heads,
|
|
283
|
+
key_dim,
|
|
284
|
+
value_dim=value_dim,
|
|
285
|
+
pooling=mhapool_params.pooling,
|
|
286
|
+
dropout=mhapool_params.dropout,
|
|
287
|
+
name=block_name + "/MHAPool",
|
|
288
|
+
)(x)
|
|
289
|
+
|
|
290
|
+
if mhapool_params.output_all:
|
|
291
|
+
outputs.append(x)
|
|
292
|
+
else:
|
|
293
|
+
outputs = [x]
|
|
294
|
+
else:
|
|
295
|
+
raise tfc.ModelSyntaxError(
|
|
296
|
+
"Unknown mixer variant: '{}'.".format(params.variant)
|
|
297
|
+
)
|
|
298
|
+
|
|
299
|
+
# Create model.
|
|
300
|
+
model = models.Model(
|
|
301
|
+
input_tensor, outputs, name="MobileNetV3{}Mixer".format(model_type)
|
|
302
|
+
)
|
|
303
|
+
|
|
304
|
+
return model
|
|
305
|
+
|
|
306
|
+
|
|
307
|
+
def MobileNetV3Output(
|
|
308
|
+
input_tensor,
|
|
309
|
+
model_type: str = "Large", # only 'Small' or 'Large' are accepted
|
|
310
|
+
include_top=True,
|
|
311
|
+
classes=1000,
|
|
312
|
+
pooling=None,
|
|
313
|
+
dropout_rate=0.2,
|
|
314
|
+
classifier_activation="softmax",
|
|
315
|
+
):
|
|
316
|
+
"""Prepares a MobileNetV3 output block."""
|
|
317
|
+
|
|
318
|
+
x = input_tensor
|
|
319
|
+
if include_top:
|
|
320
|
+
if dropout_rate > 0:
|
|
321
|
+
x = layers.Dropout(dropout_rate)(x)
|
|
322
|
+
x = layers.Conv2D(classes, kernel_size=1, padding="same", name="Logits")(x)
|
|
323
|
+
x = layers.Flatten()(x)
|
|
324
|
+
x = layers.Activation(activation=classifier_activation, name="Predictions")(x)
|
|
325
|
+
else:
|
|
326
|
+
if pooling == "avg":
|
|
327
|
+
x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
|
|
328
|
+
elif pooling == "max":
|
|
329
|
+
x = layers.GlobalMaxPooling2D(name="max_pool")(x)
|
|
330
|
+
else:
|
|
331
|
+
return None
|
|
332
|
+
|
|
333
|
+
# Create model.
|
|
334
|
+
model = models.Model(input_tensor, x, name=f"MobileNetV3{model_type}Output")
|
|
335
|
+
|
|
336
|
+
return model
|
|
337
|
+
|
|
338
|
+
|
|
339
|
+
def MobileNetV3Split(
|
|
340
|
+
input_shape=None,
|
|
341
|
+
alpha: float = 1.0,
|
|
342
|
+
model_type: str = "Large",
|
|
343
|
+
max_n_blocks: int = 6,
|
|
344
|
+
minimalistic: bool = False,
|
|
345
|
+
mixer_params: tp.Optional[tfc.MobileNetV3MixerParams] = None,
|
|
346
|
+
include_top: bool = True,
|
|
347
|
+
pooling=None,
|
|
348
|
+
classes: int = 1000,
|
|
349
|
+
dropout_rate: float = 0.2,
|
|
350
|
+
classifier_activation="softmax",
|
|
351
|
+
output_all: bool = False,
|
|
352
|
+
name: tp.Optional[str] = None,
|
|
353
|
+
):
|
|
354
|
+
"""Prepares a model of submodels which is equivalent to a MobileNetV3 model.
|
|
355
|
+
|
|
356
|
+
Parameters
|
|
357
|
+
----------
|
|
358
|
+
input_shape : tuple
|
|
359
|
+
Optional shape tuple, to be specified if you would like to use a model with an input image
|
|
360
|
+
resolution that is not (224, 224, 3). It should have exactly 3 inputs channels
|
|
361
|
+
(224, 224, 3). You can also omit this option if you would like to infer input_shape from an
|
|
362
|
+
input_tensor. If you choose to include both input_tensor and input_shape then input_shape
|
|
363
|
+
will be used if they match, if the shapes do not match then we will throw an error. E.g.
|
|
364
|
+
`(160, 160, 3)` would be one valid value.
|
|
365
|
+
alpha : float
|
|
366
|
+
controls the width of the network. This is known as the depth multiplier in the MobileNetV3
|
|
367
|
+
paper, but the name is kept for consistency with MobileNetV1 in Keras.
|
|
368
|
+
- If `alpha` < 1.0, proportionally decreases the number
|
|
369
|
+
of filters in each layer.
|
|
370
|
+
- If `alpha` > 1.0, proportionally increases the number
|
|
371
|
+
of filters in each layer.
|
|
372
|
+
- If `alpha` = 1, default number of filters from the paper
|
|
373
|
+
are used at each layer.
|
|
374
|
+
the mobilenetv3 alpha value
|
|
375
|
+
model_type : {'Small', 'Large'}
|
|
376
|
+
whether it is the small variant or the large variant
|
|
377
|
+
max_n_blocks : int
|
|
378
|
+
the maximum number of blocks in the backbone. It is further constrained by the actual
|
|
379
|
+
maximum number of blocks that the variant can implement.
|
|
380
|
+
minimalistic : bool
|
|
381
|
+
In addition to large and small models this module also contains so-called minimalistic
|
|
382
|
+
models, these models have the same per-layer dimensions characteristic as MobilenetV3
|
|
383
|
+
however, they do not utilize any of the advanced blocks (squeeze-and-excite units,
|
|
384
|
+
hard-swish, and 5x5 convolutions). While these models are less efficient on CPU, they
|
|
385
|
+
are much more performant on GPU/DSP.
|
|
386
|
+
mixer_params : mt.tfc.MobileNetV3MixerParams, optional
|
|
387
|
+
parameters for defining the mixer block
|
|
388
|
+
include_top : bool, default True
|
|
389
|
+
whether to include the fully-connected layer at the top of the network. Only valid if
|
|
390
|
+
`mixer_params` is not null.
|
|
391
|
+
pooling : str, optional
|
|
392
|
+
Optional pooling mode for feature extraction when `include_top` is False and
|
|
393
|
+
`mixer_params` is not null.
|
|
394
|
+
- `None` means that the output of the model will be the 4D tensor output of the last
|
|
395
|
+
convolutional block.
|
|
396
|
+
- `avg` means that global average pooling will be applied to the output of the last
|
|
397
|
+
convolutional block, and thus the output of the model will be a 2D tensor.
|
|
398
|
+
- `max` means that global max pooling will be applied.
|
|
399
|
+
classes : int, optional
|
|
400
|
+
Optional number of classes to classify images into, only to be specified if `mixer_params`
|
|
401
|
+
is not null and `include_top` is True.
|
|
402
|
+
dropout_rate : float
|
|
403
|
+
fraction of the input units to drop on the last layer. Only to be specified if
|
|
404
|
+
`mixer_params` is not null and `include_top` is True.
|
|
405
|
+
classifier_activation : object
|
|
406
|
+
A `str` or callable. The activation function to use on the "top" layer. Ignored unless
|
|
407
|
+
`mixer_params` is not null and `include_top` is True. Set `classifier_activation=None` to
|
|
408
|
+
return the logits of the "top" layer. When loading pretrained weights,
|
|
409
|
+
`classifier_activation` can only be `None` or `"softmax"`.
|
|
410
|
+
output_all : bool
|
|
411
|
+
If True, the model returns the output tensor of every submodel other than the input layer.
|
|
412
|
+
Otherwise, it returns the output tensor of the last submodel.
|
|
413
|
+
name : str, optional
|
|
414
|
+
model name, if any. Default to 'MobileNetV3LargeSplit' or 'MobileNetV3SmallSplit'.
|
|
415
|
+
|
|
416
|
+
Returns
|
|
417
|
+
-------
|
|
418
|
+
tensorflow.keras.Model
|
|
419
|
+
the output MobileNetV3 model split into 5 submodels
|
|
420
|
+
"""
|
|
421
|
+
|
|
422
|
+
input_layer = MobileNetV3Input(input_shape=input_shape)
|
|
423
|
+
input_block = MobileNetV3Parser(
|
|
424
|
+
input_layer,
|
|
425
|
+
model_type=model_type,
|
|
426
|
+
minimalistic=minimalistic,
|
|
427
|
+
)
|
|
428
|
+
x = input_block(input_layer)
|
|
429
|
+
outputs = [x]
|
|
430
|
+
|
|
431
|
+
num_blocks = 5 if model_type == "Large" else 4
|
|
432
|
+
if num_blocks > max_n_blocks:
|
|
433
|
+
num_blocks = max_n_blocks
|
|
434
|
+
for i in range(num_blocks):
|
|
435
|
+
if model_type == "Large":
|
|
436
|
+
block = MobileNetV3LargeBlock(i, x, alpha=alpha, minimalistic=minimalistic)
|
|
437
|
+
else:
|
|
438
|
+
block = MobileNetV3SmallBlock(i, x, alpha=alpha, minimalistic=minimalistic)
|
|
439
|
+
x = block(x)
|
|
440
|
+
if output_all:
|
|
441
|
+
outputs.append(x)
|
|
442
|
+
else:
|
|
443
|
+
outputs = [x]
|
|
444
|
+
|
|
445
|
+
if mixer_params is not None:
|
|
446
|
+
if not isinstance(mixer_params, tfc.MobileNetV3MixerParams):
|
|
447
|
+
raise tfc.ModelSyntaxError(
|
|
448
|
+
"Argument 'mixer_params' is not an instance of "
|
|
449
|
+
"mt.tfc.MobileNetV3MixerParams. Got: {}.".format(type(mixer_params))
|
|
450
|
+
)
|
|
451
|
+
|
|
452
|
+
if model_type == "Large":
|
|
453
|
+
last_point_ch = 1280
|
|
454
|
+
else:
|
|
455
|
+
last_point_ch = 1024
|
|
456
|
+
mixer_block = MobileNetV3Mixer(
|
|
457
|
+
x,
|
|
458
|
+
mixer_params,
|
|
459
|
+
last_point_ch,
|
|
460
|
+
alpha=alpha,
|
|
461
|
+
model_type=model_type,
|
|
462
|
+
minimalistic=minimalistic,
|
|
463
|
+
)
|
|
464
|
+
x = mixer_block(x)
|
|
465
|
+
if output_all:
|
|
466
|
+
if isinstance(x, (list, tuple)):
|
|
467
|
+
outputs.extend(x)
|
|
468
|
+
else:
|
|
469
|
+
outputs.append(x)
|
|
470
|
+
else:
|
|
471
|
+
if isinstance(x, (list, tuple)):
|
|
472
|
+
outputs = [x[-1]]
|
|
473
|
+
else:
|
|
474
|
+
outputs = [x]
|
|
475
|
+
|
|
476
|
+
output_block = MobileNetV3Output(
|
|
477
|
+
x,
|
|
478
|
+
model_type=model_type,
|
|
479
|
+
include_top=include_top,
|
|
480
|
+
classes=classes,
|
|
481
|
+
pooling=pooling,
|
|
482
|
+
dropout_rate=dropout_rate,
|
|
483
|
+
classifier_activation=classifier_activation,
|
|
484
|
+
)
|
|
485
|
+
if output_block is not None:
|
|
486
|
+
x = output_block(x)
|
|
487
|
+
if output_all:
|
|
488
|
+
outputs.append(x)
|
|
489
|
+
else:
|
|
490
|
+
outputs = [x]
|
|
491
|
+
|
|
492
|
+
# Create model.
|
|
493
|
+
if name is None:
|
|
494
|
+
name = f"MobilenetV3{model_type}Split"
|
|
495
|
+
model = models.Model(input_layer, outputs, name=name)
|
|
496
|
+
|
|
497
|
+
return model
|