mttf 1.3.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. mt/keras/__init__.py +8 -0
  2. mt/keras_src/__init__.py +16 -0
  3. mt/keras_src/applications_src/__init__.py +33 -0
  4. mt/keras_src/applications_src/classifier.py +497 -0
  5. mt/keras_src/applications_src/mobilenet_v3_split.py +544 -0
  6. mt/keras_src/applications_src/mobilevit.py +292 -0
  7. mt/keras_src/base.py +28 -0
  8. mt/keras_src/constraints_src/__init__.py +14 -0
  9. mt/keras_src/constraints_src/center_around.py +19 -0
  10. mt/keras_src/layers_src/__init__.py +43 -0
  11. mt/keras_src/layers_src/counter.py +27 -0
  12. mt/keras_src/layers_src/floor.py +24 -0
  13. mt/keras_src/layers_src/identical.py +15 -0
  14. mt/keras_src/layers_src/image_sizing.py +1605 -0
  15. mt/keras_src/layers_src/normed_conv2d.py +239 -0
  16. mt/keras_src/layers_src/simple_mha.py +472 -0
  17. mt/keras_src/layers_src/soft_bend.py +36 -0
  18. mt/keras_src/layers_src/transformer_encoder.py +246 -0
  19. mt/keras_src/layers_src/utils.py +88 -0
  20. mt/keras_src/layers_src/var_regularizer.py +38 -0
  21. mt/tf/__init__.py +10 -0
  22. mt/tf/init.py +25 -0
  23. mt/tf/keras_applications/__init__.py +5 -0
  24. mt/tf/keras_layers/__init__.py +5 -0
  25. mt/tf/mttf_version.py +5 -0
  26. mt/tf/utils.py +44 -0
  27. mt/tf/version.py +5 -0
  28. mt/tfc/__init__.py +291 -0
  29. mt/tfg/__init__.py +8 -0
  30. mt/tfp/__init__.py +11 -0
  31. mt/tfp/real_nvp.py +116 -0
  32. mttf-1.3.6.data/scripts/dmt_build_package_and_upload_to_nexus.sh +25 -0
  33. mttf-1.3.6.data/scripts/dmt_pipi.sh +7 -0
  34. mttf-1.3.6.data/scripts/dmt_twineu.sh +2 -0
  35. mttf-1.3.6.data/scripts/pipi.sh +7 -0
  36. mttf-1.3.6.data/scripts/user_build_package_and_upload_to_nexus.sh +25 -0
  37. mttf-1.3.6.data/scripts/user_pipi.sh +8 -0
  38. mttf-1.3.6.data/scripts/user_twineu.sh +3 -0
  39. mttf-1.3.6.data/scripts/wml_build_package_and_upload_to_nexus.sh +25 -0
  40. mttf-1.3.6.data/scripts/wml_nexus.py +50 -0
  41. mttf-1.3.6.data/scripts/wml_pipi.sh +7 -0
  42. mttf-1.3.6.data/scripts/wml_twineu.sh +2 -0
  43. mttf-1.3.6.dist-info/METADATA +18 -0
  44. mttf-1.3.6.dist-info/RECORD +47 -0
  45. mttf-1.3.6.dist-info/WHEEL +5 -0
  46. mttf-1.3.6.dist-info/licenses/LICENSE +21 -0
  47. mttf-1.3.6.dist-info/top_level.txt +1 -0
@@ -0,0 +1,544 @@
1
+ # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ # pylint: disable=invalid-name
16
+ # pylint: disable=missing-function-docstring
17
+ """MobileNet v3 models split into 5 submodels.
18
+
19
+ The MobileNetV3 model is split into 5 parts:
20
+
21
+ - The input parser block that downsamples once (:func:`MobileNetV3Parser`).
22
+ - Block 0 to 3 that downample once for each block (:func:`MobileNetV3LargeBlock`
23
+ or :func:`MobileNetV3SmallBlock`). As of 2023/05/15, there's a possibility to have block 4
24
+ for MobileNetV3Large.
25
+ - The mixer block that turns the downsampled grid into a (1,1,feat_dim) batch
26
+ (:func:`MobileNetV3Mixer`).
27
+ - Optionally the output block that may or may not contain the clasification head
28
+ (:func:`MobileNetV3Output`).
29
+
30
+ Input arguments follow those of MobileNetV3. One can also use :func:`MobileNetV3Split` to create
31
+ a model of submodels that is theoretically equivalent to the original MobileNetV3 model. However,
32
+ no pre-trained weights exist.
33
+ """
34
+
35
+
36
+ from mt import tp, tfc
37
+ from .. import keras_source
38
+
39
+
40
+ if keras_source == "tf_keras":
41
+ from tf_keras.src.applications.mobilenet_v3 import (
42
+ relu,
43
+ hard_swish,
44
+ _depth,
45
+ _inverted_res_block,
46
+ )
47
+ from tf_keras import backend, models, layers
48
+ elif keras_source == "keras":
49
+ try:
50
+ from keras.applications.mobilenet_v3 import (
51
+ relu,
52
+ hard_swish,
53
+ _depth,
54
+ _inverted_res_block,
55
+ )
56
+ except ImportError:
57
+ from keras.src.applications.mobilenet_v3 import (
58
+ relu,
59
+ hard_swish,
60
+ _depth,
61
+ _inverted_res_block,
62
+ )
63
+ from keras import backend, models
64
+
65
+ try:
66
+ from keras.layers import VersionAwareLayers
67
+
68
+ layers = VersionAwareLayers()
69
+ except ImportError:
70
+ from keras import layers
71
+ elif keras_source == "tensorflow.keras":
72
+ from tensorflow.keras.src.applications.mobilenet_v3 import (
73
+ relu,
74
+ hard_swish,
75
+ _depth,
76
+ _inverted_res_block,
77
+ )
78
+ from tensorflow.python.keras import backend, models
79
+
80
+ try:
81
+ from tensorflow.keras.layers import VersionAwareLayers
82
+
83
+ layers = VersionAwareLayers()
84
+ except ImportError:
85
+ from tensorflow.keras import layers
86
+ else:
87
+ raise ImportError(f"Unknown value '{keras_source}' for variable 'keras_source'.")
88
+
89
+ from tensorflow.python.platform import tf_logging as logging
90
+
91
+
92
+ def MobileNetV3Input(
93
+ input_shape=None,
94
+ ):
95
+ """Prepares a MobileNetV3 input layer."""
96
+
97
+ # If input_shape is None and input_tensor is None using standard shape
98
+ if input_shape is None:
99
+ input_shape = (None, None, 3)
100
+
101
+ if backend.image_data_format() == "channels_last":
102
+ row_axis, col_axis = (0, 1)
103
+ else:
104
+ row_axis, col_axis = (1, 2)
105
+ rows = input_shape[row_axis]
106
+ cols = input_shape[col_axis]
107
+ if rows and cols and (rows < 32 or cols < 32):
108
+ raise tfc.ModelSyntaxError(
109
+ f"Input size must be at least 32x32; got `input_shape={input_shape}`"
110
+ )
111
+
112
+ img_input = layers.Input(shape=input_shape)
113
+ return img_input
114
+
115
+
116
+ def MobileNetV3Parser(
117
+ img_input,
118
+ model_type: str = "Large", # only 'Small' or 'Large' are accepted
119
+ minimalistic=False,
120
+ ):
121
+ """Prepares a MobileNetV3 parser block."""
122
+
123
+ channel_axis = 1 if backend.image_data_format() == "channels_first" else -1
124
+
125
+ if minimalistic:
126
+ activation = relu
127
+ else:
128
+ activation = hard_swish
129
+
130
+ x = img_input
131
+ x = layers.Rescaling(scale=1.0 / 127.5, offset=-1.0)(x)
132
+ x = layers.Conv2D(
133
+ 16, kernel_size=3, strides=(2, 2), padding="same", use_bias=False, name="Conv"
134
+ )(x)
135
+ x = layers.BatchNormalization(
136
+ axis=channel_axis, epsilon=1e-3, momentum=0.999, name="Conv/BatchNorm"
137
+ )(x)
138
+ x = activation(x)
139
+
140
+ # Create model.
141
+ model = models.Model(img_input, x, name=f"MobileNetV3{model_type}Parser")
142
+
143
+ return model
144
+
145
+
146
+ def MobileNetV3SmallBlock(
147
+ block_id: int, # only 0 to 3 are accepted here
148
+ input_tensor, # input tensor for the block
149
+ alpha=1.0,
150
+ minimalistic=False,
151
+ ):
152
+ """Prepares a MobileNetV3Small downsampling block."""
153
+
154
+ def depth(d):
155
+ return _depth(d * alpha)
156
+
157
+ if minimalistic:
158
+ kernel = 3
159
+ activation = relu
160
+ se_ratio = None
161
+ else:
162
+ kernel = 5
163
+ activation = hard_swish
164
+ se_ratio = 0.25
165
+
166
+ x = input_tensor
167
+ if block_id == 0:
168
+ x = _inverted_res_block(x, 1, depth(16), 3, 2, se_ratio, relu, 0)
169
+ elif block_id == 1:
170
+ x = _inverted_res_block(x, 72.0 / 16, depth(24), 3, 2, None, relu, 1)
171
+ x = _inverted_res_block(x, 88.0 / 24, depth(24), 3, 1, None, relu, 2)
172
+ elif block_id == 2:
173
+ x = _inverted_res_block(x, 4, depth(40), kernel, 2, se_ratio, activation, 3)
174
+ x = _inverted_res_block(x, 6, depth(40), kernel, 1, se_ratio, activation, 4)
175
+ x = _inverted_res_block(x, 6, depth(40), kernel, 1, se_ratio, activation, 5)
176
+ x = _inverted_res_block(x, 3, depth(48), kernel, 1, se_ratio, activation, 6)
177
+ x = _inverted_res_block(x, 3, depth(48), kernel, 1, se_ratio, activation, 7)
178
+ else:
179
+ x = _inverted_res_block(x, 6, depth(96), kernel, 2, se_ratio, activation, 8)
180
+ x = _inverted_res_block(x, 6, depth(96), kernel, 1, se_ratio, activation, 9)
181
+ x = _inverted_res_block(x, 6, depth(96), kernel, 1, se_ratio, activation, 10)
182
+
183
+ # Create model.
184
+ model = models.Model(input_tensor, x, name=f"MobileNetV3SmallBlock{block_id}")
185
+
186
+ return model
187
+
188
+
189
+ def MobileNetV3LargeBlock(
190
+ block_id: int, # only 0 to 4 are accepted here. 4 is only available as of 2023/05/15
191
+ input_tensor, # input tensor for the block
192
+ alpha=1.0,
193
+ minimalistic=False,
194
+ ):
195
+ """Prepares a MobileNetV3Large downsampling block."""
196
+
197
+ def depth(d):
198
+ return _depth(d * alpha)
199
+
200
+ if minimalistic:
201
+ kernel = 3
202
+ activation = relu
203
+ se_ratio = None
204
+ else:
205
+ kernel = 5
206
+ activation = hard_swish
207
+ se_ratio = 0.25
208
+
209
+ x = input_tensor
210
+ if block_id == 0:
211
+ x = _inverted_res_block(x, 1, depth(16), 3, 1, None, relu, 0)
212
+ x = _inverted_res_block(x, 4, depth(24), 3, 2, None, relu, 1)
213
+ x = _inverted_res_block(x, 3, depth(24), 3, 1, None, relu, 2)
214
+ elif block_id == 1:
215
+ x = _inverted_res_block(x, 3, depth(40), kernel, 2, se_ratio, relu, 3)
216
+ x = _inverted_res_block(x, 3, depth(40), kernel, 1, se_ratio, relu, 4)
217
+ x = _inverted_res_block(x, 3, depth(40), kernel, 1, se_ratio, relu, 5)
218
+ elif block_id == 2:
219
+ x = _inverted_res_block(x, 6, depth(80), 3, 2, None, activation, 6)
220
+ x = _inverted_res_block(x, 2.5, depth(80), 3, 1, None, activation, 7)
221
+ x = _inverted_res_block(x, 2.3, depth(80), 3, 1, None, activation, 8)
222
+ x = _inverted_res_block(x, 2.3, depth(80), 3, 1, None, activation, 9)
223
+ x = _inverted_res_block(x, 6, depth(112), 3, 1, se_ratio, activation, 10)
224
+ x = _inverted_res_block(x, 6, depth(112), 3, 1, se_ratio, activation, 11)
225
+ elif block_id == 3:
226
+ x = _inverted_res_block(x, 6, depth(160), kernel, 2, se_ratio, activation, 12)
227
+ x = _inverted_res_block(x, 6, depth(160), kernel, 1, se_ratio, activation, 13)
228
+ x = _inverted_res_block(x, 6, depth(160), kernel, 1, se_ratio, activation, 14)
229
+ else:
230
+ x = _inverted_res_block(x, 6, depth(320), kernel, 2, se_ratio, activation, 15)
231
+ x = _inverted_res_block(x, 6, depth(320), kernel, 1, se_ratio, activation, 16)
232
+ x = _inverted_res_block(x, 6, depth(320), kernel, 1, se_ratio, activation, 17)
233
+
234
+ # Create model.
235
+ model = models.Model(input_tensor, x, name=f"MobileNetV3LargeBlock{block_id}")
236
+
237
+ return model
238
+
239
+
240
+ def MobileNetV3Mixer(
241
+ input_tensor,
242
+ params: tfc.MobileNetV3MixerParams,
243
+ last_point_ch,
244
+ alpha=1.0,
245
+ model_type: str = "Large", # only 'Small' or 'Large' are accepted
246
+ minimalistic=False,
247
+ ):
248
+ """Prepares a MobileNetV3 mixer block."""
249
+
250
+ x = input_tensor
251
+ channel_axis = 1 if backend.image_data_format() == "channels_first" else -1
252
+
253
+ if params.variant == "mobilenet":
254
+
255
+ if minimalistic:
256
+ kernel = 3
257
+ activation = relu
258
+ se_ratio = None
259
+ else:
260
+ kernel = 5
261
+ activation = hard_swish
262
+ se_ratio = 0.25
263
+
264
+ last_conv_ch = _depth(backend.int_shape(x)[channel_axis] * 6)
265
+
266
+ # if the width multiplier is greater than 1 we
267
+ # increase the number of output channels
268
+ if alpha > 1.0:
269
+ last_point_ch = _depth(last_point_ch * alpha)
270
+ x = layers.Conv2D(
271
+ last_conv_ch, kernel_size=1, padding="same", use_bias=False, name="Conv_1"
272
+ )(x)
273
+ x = layers.BatchNormalization(
274
+ axis=channel_axis, epsilon=1e-3, momentum=0.999, name="Conv_1/BatchNorm"
275
+ )(x)
276
+ x = activation(x)
277
+ x = layers.GlobalAveragePooling2D()(x)
278
+ if channel_axis == 1:
279
+ x = layers.Reshape((last_conv_ch, 1, 1))(x)
280
+ else:
281
+ x = layers.Reshape((1, 1, last_conv_ch))(x)
282
+ x = layers.Conv2D(
283
+ last_point_ch, kernel_size=1, padding="same", use_bias=True, name="Conv_2"
284
+ )(x)
285
+ x = activation(x)
286
+ elif params.variant == "maxpool":
287
+ x = layers.GlobalMaxPool2D(x)
288
+ elif params.variant == "mhapool":
289
+ if backend.image_data_format() == "channels_first":
290
+ raise tfc.ModelSyntaxError(
291
+ "Mixer variant 'mhapool' requires channels_last image data format."
292
+ )
293
+
294
+ mhapool_params = params.mhapool_cascade_params
295
+ if not isinstance(mhapool_params, tfc.MHAPool2DCascadeParams):
296
+ raise tfc.ModelSyntaxError(
297
+ "Parameter 'params.mhapool_cascade_params' is not of type "
298
+ "mt.tfc.MHAPool2DCascadeParams. Got: {}.".format(type(mhapool_params))
299
+ )
300
+
301
+ from ..layers_src import MHAPool2D
302
+
303
+ n_heads = mhapool_params.n_heads
304
+ k = 0
305
+ outputs = []
306
+ while True:
307
+ h = x.shape[1]
308
+ w = x.shape[2]
309
+
310
+ if h <= 1 and w <= 1:
311
+ break
312
+
313
+ c = x.shape[3]
314
+ key_dim = (c + n_heads - 1) // n_heads
315
+ value_dim = int(key_dim * mhapool_params.expansion_factor)
316
+ k += 1
317
+ block_name = f"MHAPool2DCascade_block{k}"
318
+ if k > mhapool_params.max_num_pooling_layers: # GlobalMaxPool2D
319
+ x = layers.GlobalMaxPooling2D(
320
+ keepdims=True, name=block_name + "/GlobalMaxPool"
321
+ )(x)
322
+ else: # MHAPool2D
323
+ x = layers.LayerNormalization()(x)
324
+ if h <= 2 and w <= 2:
325
+ activation = mhapool_params.final_activation
326
+ else:
327
+ activation = mhapool_params.activation
328
+ x = MHAPool2D(
329
+ n_heads,
330
+ key_dim,
331
+ value_dim=value_dim,
332
+ pooling=mhapool_params.pooling,
333
+ dropout=mhapool_params.dropout,
334
+ name=block_name + "/MHAPool",
335
+ )(x)
336
+
337
+ if mhapool_params.output_all:
338
+ outputs.append(x)
339
+ else:
340
+ outputs = [x]
341
+ else:
342
+ raise tfc.ModelSyntaxError(
343
+ "Unknown mixer variant: '{}'.".format(params.variant)
344
+ )
345
+
346
+ # Create model.
347
+ model = models.Model(
348
+ input_tensor, outputs, name="MobileNetV3{}Mixer".format(model_type)
349
+ )
350
+
351
+ return model
352
+
353
+
354
+ def MobileNetV3Output(
355
+ input_tensor,
356
+ model_type: str = "Large", # only 'Small' or 'Large' are accepted
357
+ include_top=True,
358
+ classes=1000,
359
+ pooling=None,
360
+ dropout_rate=0.2,
361
+ classifier_activation="softmax",
362
+ ):
363
+ """Prepares a MobileNetV3 output block."""
364
+
365
+ x = input_tensor
366
+ if include_top:
367
+ if dropout_rate > 0:
368
+ x = layers.Dropout(dropout_rate)(x)
369
+ x = layers.Conv2D(classes, kernel_size=1, padding="same", name="Logits")(x)
370
+ x = layers.Flatten()(x)
371
+ x = layers.Activation(activation=classifier_activation, name="Predictions")(x)
372
+ else:
373
+ if pooling == "avg":
374
+ x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
375
+ elif pooling == "max":
376
+ x = layers.GlobalMaxPooling2D(name="max_pool")(x)
377
+ else:
378
+ return None
379
+
380
+ # Create model.
381
+ model = models.Model(input_tensor, x, name=f"MobileNetV3{model_type}Output")
382
+
383
+ return model
384
+
385
+
386
+ def MobileNetV3Split(
387
+ input_shape=None,
388
+ alpha: float = 1.0,
389
+ model_type: str = "Large",
390
+ max_n_blocks: int = 6,
391
+ minimalistic: bool = False,
392
+ mixer_params: tp.Optional[tfc.MobileNetV3MixerParams] = None,
393
+ include_top: bool = True,
394
+ pooling=None,
395
+ classes: int = 1000,
396
+ dropout_rate: float = 0.2,
397
+ classifier_activation="softmax",
398
+ output_all: bool = False,
399
+ name: tp.Optional[str] = None,
400
+ ):
401
+ """Prepares a model of submodels which is equivalent to a MobileNetV3 model.
402
+
403
+ Parameters
404
+ ----------
405
+ input_shape : tuple
406
+ Optional shape tuple, to be specified if you would like to use a model with an input image
407
+ resolution that is not (224, 224, 3). It should have exactly 3 inputs channels
408
+ (224, 224, 3). You can also omit this option if you would like to infer input_shape from an
409
+ input_tensor. If you choose to include both input_tensor and input_shape then input_shape
410
+ will be used if they match, if the shapes do not match then we will throw an error. E.g.
411
+ `(160, 160, 3)` would be one valid value.
412
+ alpha : float
413
+ controls the width of the network. This is known as the depth multiplier in the MobileNetV3
414
+ paper, but the name is kept for consistency with MobileNetV1 in Keras.
415
+ - If `alpha` < 1.0, proportionally decreases the number
416
+ of filters in each layer.
417
+ - If `alpha` > 1.0, proportionally increases the number
418
+ of filters in each layer.
419
+ - If `alpha` = 1, default number of filters from the paper
420
+ are used at each layer.
421
+ the mobilenetv3 alpha value
422
+ model_type : {'Small', 'Large'}
423
+ whether it is the small variant or the large variant
424
+ max_n_blocks : int
425
+ the maximum number of blocks in the backbone. It is further constrained by the actual
426
+ maximum number of blocks that the variant can implement.
427
+ minimalistic : bool
428
+ In addition to large and small models this module also contains so-called minimalistic
429
+ models, these models have the same per-layer dimensions characteristic as MobilenetV3
430
+ however, they do not utilize any of the advanced blocks (squeeze-and-excite units,
431
+ hard-swish, and 5x5 convolutions). While these models are less efficient on CPU, they
432
+ are much more performant on GPU/DSP.
433
+ mixer_params : mt.tfc.MobileNetV3MixerParams, optional
434
+ parameters for defining the mixer block
435
+ include_top : bool, default True
436
+ whether to include the fully-connected layer at the top of the network. Only valid if
437
+ `mixer_params` is not null.
438
+ pooling : str, optional
439
+ Optional pooling mode for feature extraction when `include_top` is False and
440
+ `mixer_params` is not null.
441
+ - `None` means that the output of the model will be the 4D tensor output of the last
442
+ convolutional block.
443
+ - `avg` means that global average pooling will be applied to the output of the last
444
+ convolutional block, and thus the output of the model will be a 2D tensor.
445
+ - `max` means that global max pooling will be applied.
446
+ classes : int, optional
447
+ Optional number of classes to classify images into, only to be specified if `mixer_params`
448
+ is not null and `include_top` is True.
449
+ dropout_rate : float
450
+ fraction of the input units to drop on the last layer. Only to be specified if
451
+ `mixer_params` is not null and `include_top` is True.
452
+ classifier_activation : object
453
+ A `str` or callable. The activation function to use on the "top" layer. Ignored unless
454
+ `mixer_params` is not null and `include_top` is True. Set `classifier_activation=None` to
455
+ return the logits of the "top" layer. When loading pretrained weights,
456
+ `classifier_activation` can only be `None` or `"softmax"`.
457
+ output_all : bool
458
+ If True, the model returns the output tensor of every submodel other than the input layer.
459
+ Otherwise, it returns the output tensor of the last submodel.
460
+ name : str, optional
461
+ model name, if any. Default to 'MobileNetV3LargeSplit' or 'MobileNetV3SmallSplit'.
462
+
463
+ Returns
464
+ -------
465
+ tensorflow.keras.Model
466
+ the output MobileNetV3 model split into 5 submodels
467
+ """
468
+
469
+ input_layer = MobileNetV3Input(input_shape=input_shape)
470
+ input_block = MobileNetV3Parser(
471
+ input_layer,
472
+ model_type=model_type,
473
+ minimalistic=minimalistic,
474
+ )
475
+ x = input_block(input_layer)
476
+ outputs = [x]
477
+
478
+ num_blocks = 5 if model_type == "Large" else 4
479
+ if num_blocks > max_n_blocks:
480
+ num_blocks = max_n_blocks
481
+ for i in range(num_blocks):
482
+ if model_type == "Large":
483
+ block = MobileNetV3LargeBlock(i, x, alpha=alpha, minimalistic=minimalistic)
484
+ else:
485
+ block = MobileNetV3SmallBlock(i, x, alpha=alpha, minimalistic=minimalistic)
486
+ x = block(x)
487
+ if output_all:
488
+ outputs.append(x)
489
+ else:
490
+ outputs = [x]
491
+
492
+ if mixer_params is not None:
493
+ if not isinstance(mixer_params, tfc.MobileNetV3MixerParams):
494
+ raise tfc.ModelSyntaxError(
495
+ "Argument 'mixer_params' is not an instance of "
496
+ "mt.tfc.MobileNetV3MixerParams. Got: {}.".format(type(mixer_params))
497
+ )
498
+
499
+ if model_type == "Large":
500
+ last_point_ch = 1280
501
+ else:
502
+ last_point_ch = 1024
503
+ mixer_block = MobileNetV3Mixer(
504
+ x,
505
+ mixer_params,
506
+ last_point_ch,
507
+ alpha=alpha,
508
+ model_type=model_type,
509
+ minimalistic=minimalistic,
510
+ )
511
+ x = mixer_block(x)
512
+ if output_all:
513
+ if isinstance(x, (list, tuple)):
514
+ outputs.extend(x)
515
+ else:
516
+ outputs.append(x)
517
+ else:
518
+ if isinstance(x, (list, tuple)):
519
+ outputs = [x[-1]]
520
+ else:
521
+ outputs = [x]
522
+
523
+ output_block = MobileNetV3Output(
524
+ x,
525
+ model_type=model_type,
526
+ include_top=include_top,
527
+ classes=classes,
528
+ pooling=pooling,
529
+ dropout_rate=dropout_rate,
530
+ classifier_activation=classifier_activation,
531
+ )
532
+ if output_block is not None:
533
+ x = output_block(x)
534
+ if output_all:
535
+ outputs.append(x)
536
+ else:
537
+ outputs = [x]
538
+
539
+ # Create model.
540
+ if name is None:
541
+ name = f"MobilenetV3{model_type}Split"
542
+ model = models.Model(input_layer, outputs, name=name)
543
+
544
+ return model