mttf 0.31.202309060229__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mttf might be problematic. Click here for more details.

mt/tf/mttf_version.py CHANGED
@@ -1,11 +1,5 @@
1
- VERSION_YEAR = 2023
2
- VERSION_MONTH = int('09')
3
- VERSION_DAY = int('06')
4
- VERSION_HOUR = int('02')
5
- VERSION_MINUTE = int('29')
6
- MAJOR_VERSION = 0
7
- MINOR_VERSION = 31
8
- PATCH_VERSION = 202309060229
9
- version_date = '2023/09/06 02:29'
1
+ MAJOR_VERSION = 1
2
+ MINOR_VERSION = 0
3
+ PATCH_VERSION = 0
10
4
  version = '{}.{}.{}'.format(MAJOR_VERSION, MINOR_VERSION, PATCH_VERSION)
11
- __all__ = ['MAJOR_VERSION', 'MINOR_VERSION', 'PATCH_VERSION', 'version_date', 'version']
5
+ __all__ = ['MAJOR_VERSION', 'MINOR_VERSION', 'PATCH_VERSION', 'version']
@@ -0,0 +1,2 @@
1
+ #/!bin/bash
2
+ wml_nexus.py pip3 install --pre --trusted-host localhost --extra-index https://localhost:5443/repository/minhtri-pypi-dev/simple/ $@
@@ -1,12 +1,12 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mttf
3
- Version: 0.31.202309060229
3
+ Version: 1.0.0
4
4
  Summary: A package to detect and monkey-patch TensorFlow, for Minh-Tri Pham
5
5
  Home-page: https://github.com/inteplus/mttf
6
6
  Author: ['Minh-Tri Pham']
7
7
  Project-URL: Documentation, https://mtdoc.readthedocs.io/en/latest/mt.tf/mt.tf.html
8
8
  Project-URL: Source Code, https://github.com/inteplus/mttf
9
9
  License-File: LICENSE
10
- Requires-Dist: mtbase (>=4.6)
11
- Requires-Dist: mtnet (>=0.1)
10
+ Requires-Dist: mtbase (>=4.26)
11
+ Requires-Dist: mtnet (>=0.2)
12
12
 
@@ -1,14 +1,11 @@
1
1
  mt/tf/__init__.py,sha256=M8xiJNdrAUJZgiZTOQOdfkehjO-CYzGpoxh5HVGBkms,338
2
2
  mt/tf/init.py,sha256=faUpNjMZ9fab5wGwNtPgGB9fwPRNh86yZsTmXb26dAk,2867
3
- mt/tf/mttf_version.py,sha256=lUdwptqi_VNmhWN8y5pLZa99-uH9Ks_E9j3RcMB08SM,397
3
+ mt/tf/mttf_version.py,sha256=ha53i-H9pE-crufFttUECgXHwPvam07zMKzApUts1Gs,206
4
4
  mt/tf/utils.py,sha256=Copl5VM0PpuFUchK-AcBuGO6QitDwHcEs4FruZb2GAI,2460
5
5
  mt/tf/keras_applications/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
6
  mt/tf/keras_applications/efficientnet_v2.py,sha256=DYXP567HNUp2SO0_pXUfzO9-WqlTiGmXApAawkKaetc,39204
7
- mt/tf/keras_applications/mobilenet_v3.py,sha256=wwAKizlhWm2YNzIk0DyH2UwYDE6Xk0DUuEU8KWNr08M,22568
8
7
  mt/tf/keras_applications/mobilenet_v3_split.py,sha256=GDEBHo-blR1Q3N8R89USZ8zDP0nq_oLzPNAnoIgkzgo,19305
9
8
  mt/tf/keras_applications/mobilevit.py,sha256=FR1eMN4xg-yZ8Orr4ALOYmzCmkoBu7cVgTaK5sc4gsc,9806
10
- mt/tf/keras_engine/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
- mt/tf/keras_engine/hdf5_format.py,sha256=f3DK63dAzif2szAPbHVtcohiUB3tC_JVgd3lu4cUhPQ,33446
12
9
  mt/tf/keras_layers/__init__.py,sha256=fSfhKmDz4mIHUYXgRrditWY_aAkgWGM_KjmAilOauXg,578
13
10
  mt/tf/keras_layers/counter.py,sha256=KsM25lRvxQZZUz0GLbeUNoPLwVoLF1MK0tQPn9-dVWQ,858
14
11
  mt/tf/keras_layers/floor.py,sha256=cSP-2B3o50ffTFMHZy-C-r7jWA6OFLMyaxTz9kAlsiM,513
@@ -18,19 +15,18 @@ mt/tf/keras_layers/normed_conv2d.py,sha256=UVV5x6jpjGeOqx6DlSbvdV1O7nO_64-wKT3rX
18
15
  mt/tf/keras_layers/simple_mha.py,sha256=Xv4lyFxDWRKYLmsPUUNJeZQYi8rLya7HlJsaXGbmV0E,19710
19
16
  mt/tf/keras_layers/utils.py,sha256=PzlrGrtgPMzTaNuY3p3QwnVSqsMnZ6NIbRSEqXEamGA,2828
20
17
  mt/tf/keras_layers/var_regularizer.py,sha256=Afe5Mpd8TynsXVD06RtEfFatuSKmruKcNze4_C2E2po,1108
21
- mt/tf/keras_optimizers/lr_extra.py,sha256=We0k3Uj2kBb18_V1Cw0wuAofLZoSuSekdpCBCQ6WoEM,3804
22
18
  mt/tfc/__init__.py,sha256=WnGNywMCwmmhWQaGqconT5f9n6IE5jDGflbD92E5iH0,8108
23
19
  mt/tfg/__init__.py,sha256=6Ly2QImAyQTsg_ZszuAuK_L2n56v89Cix9yYmMVk0CM,304
24
20
  mt/tfp/__init__.py,sha256=AQkGCkmDRwswEt3qoOSpxe-fZekx78sHHBs2ZVz33gc,383
25
21
  mt/tfp/real_nvp.py,sha256=U9EmkXGqFcvtS2yeh5_RgbKlVKKlGFGklAb7Voyazz4,4440
26
- mttf-0.31.202309060229.data/scripts/dmt_pipi.sh,sha256=jJn_6h8VZNbZJhJ-XO5WWioXyPc0u3UHBTHTmx7vymM,139
27
- mttf-0.31.202309060229.data/scripts/dmt_twineu.sh,sha256=ER8Z1it7hVdPtkZ0TA0SkzwqIQSUlFa_sO95H07ImTI,159
28
- mttf-0.31.202309060229.data/scripts/twine_trusted.py,sha256=ZEvTqT5ydAiOJBPQaUOMsbxX7qcGCjgGQeglzOwYLFI,2196
29
- mttf-0.31.202309060229.data/scripts/wml_nexus.py,sha256=geZih8al6iJhkMRYhIO7lG_oHIGYTddl0EGBPgNsVR4,1377
30
- mttf-0.31.202309060229.data/scripts/wml_pipi.sh,sha256=wDX4_7oKcYbxyKJz-wWvbkUY0NEuBuf0MpBfpmjZk60,133
31
- mttf-0.31.202309060229.data/scripts/wml_twineu.sh,sha256=FPBvsXM-81P11G2BCJtxVUBervfHQYYrXf0GjZeMt7w,146
32
- mttf-0.31.202309060229.dist-info/LICENSE,sha256=e_JtcszdGZ2ZGfjcymTGrcxFj_9XPicZOVtnsrPvruk,1070
33
- mttf-0.31.202309060229.dist-info/METADATA,sha256=ch0tKJBm0dmsZyusYhYkbqZ9JMgSDY6vu1uBqPHP1Qg,431
34
- mttf-0.31.202309060229.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
35
- mttf-0.31.202309060229.dist-info/top_level.txt,sha256=WcqGFu9cV7iMZg09iam8eNxUvGpLSKKF2Iubf6SJVOo,3
36
- mttf-0.31.202309060229.dist-info/RECORD,,
22
+ mttf-1.0.0.data/scripts/dmt_pipi.sh,sha256=VG5FvmtuR7_v_zhd52hI0mbyiiPCW_TWIpB_Oce0zm4,145
23
+ mttf-1.0.0.data/scripts/dmt_twineu.sh,sha256=ER8Z1it7hVdPtkZ0TA0SkzwqIQSUlFa_sO95H07ImTI,159
24
+ mttf-1.0.0.data/scripts/twine_trusted.py,sha256=ZEvTqT5ydAiOJBPQaUOMsbxX7qcGCjgGQeglzOwYLFI,2196
25
+ mttf-1.0.0.data/scripts/wml_nexus.py,sha256=geZih8al6iJhkMRYhIO7lG_oHIGYTddl0EGBPgNsVR4,1377
26
+ mttf-1.0.0.data/scripts/wml_pipi.sh,sha256=wDX4_7oKcYbxyKJz-wWvbkUY0NEuBuf0MpBfpmjZk60,133
27
+ mttf-1.0.0.data/scripts/wml_twineu.sh,sha256=FPBvsXM-81P11G2BCJtxVUBervfHQYYrXf0GjZeMt7w,146
28
+ mttf-1.0.0.dist-info/LICENSE,sha256=e_JtcszdGZ2ZGfjcymTGrcxFj_9XPicZOVtnsrPvruk,1070
29
+ mttf-1.0.0.dist-info/METADATA,sha256=-MktgEiCR4O_1QDuhjucJLk1IIfnC7mlGYpitwWrQY8,420
30
+ mttf-1.0.0.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
31
+ mttf-1.0.0.dist-info/top_level.txt,sha256=WcqGFu9cV7iMZg09iam8eNxUvGpLSKKF2Iubf6SJVOo,3
32
+ mttf-1.0.0.dist-info/RECORD,,
@@ -1,582 +0,0 @@
1
- # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # ==============================================================================
15
- # pylint: disable=invalid-name
16
- # pylint: disable=missing-function-docstring
17
- """MobileNet v3 models for Keras."""
18
-
19
- from tensorflow.python.keras import backend
20
- from tensorflow.python.keras import models
21
- from tensorflow.python.keras.applications import imagenet_utils
22
- from tensorflow.python.keras.layers import VersionAwareLayers
23
- from tensorflow.python.keras.utils import data_utils
24
- from tensorflow.python.keras.utils import layer_utils
25
- from tensorflow.python.lib.io import file_io
26
- from tensorflow.python.platform import tf_logging as logging
27
- from tensorflow.python.util.tf_export import keras_export
28
-
29
-
30
- # TODO(scottzhu): Change this to the GCS path.
31
- BASE_WEIGHT_PATH = ('https://storage.googleapis.com/tensorflow/'
32
- 'keras-applications/mobilenet_v3/')
33
- WEIGHTS_HASHES = {
34
- 'large_224_0.75_float': ('765b44a33ad4005b3ac83185abf1d0eb',
35
- 'e7b4d1071996dd51a2c2ca2424570e20'),
36
- 'large_224_1.0_float': ('59e551e166be033d707958cf9e29a6a7',
37
- '037116398e07f018c0005ffcb0406831'),
38
- 'large_minimalistic_224_1.0_float': ('675e7b876c45c57e9e63e6d90a36599c',
39
- 'a2c33aed672524d1d0b4431808177695'),
40
- 'small_224_0.75_float': ('cb65d4e5be93758266aa0a7f2c6708b7',
41
- '4d2fe46f1c1f38057392514b0df1d673'),
42
- 'small_224_1.0_float': ('8768d4c2e7dee89b9d02b2d03d65d862',
43
- 'be7100780f875c06bcab93d76641aa26'),
44
- 'small_minimalistic_224_1.0_float': ('99cd97fb2fcdad2bf028eb838de69e37',
45
- '20d4e357df3f7a6361f3a288857b1051'),
46
- }
47
-
48
- layers = VersionAwareLayers()
49
-
50
-
51
- BASE_DOCSTRING = """Instantiates the {name} architecture.
52
-
53
- Reference:
54
- - [Searching for MobileNetV3](
55
- https://arxiv.org/pdf/1905.02244.pdf) (ICCV 2019)
56
-
57
- The following table describes the performance of MobileNets v3:
58
- ------------------------------------------------------------------------
59
- MACs stands for Multiply Adds
60
-
61
- |Classification Checkpoint|MACs(M)|Parameters(M)|Top1 Accuracy|Pixel1 CPU(ms)|
62
- |---|---|---|---|---|
63
- | mobilenet_v3_large_1.0_224 | 217 | 5.4 | 75.6 | 51.2 |
64
- | mobilenet_v3_large_0.75_224 | 155 | 4.0 | 73.3 | 39.8 |
65
- | mobilenet_v3_large_minimalistic_1.0_224 | 209 | 3.9 | 72.3 | 44.1 |
66
- | mobilenet_v3_small_1.0_224 | 66 | 2.9 | 68.1 | 15.8 |
67
- | mobilenet_v3_small_0.75_224 | 44 | 2.4 | 65.4 | 12.8 |
68
- | mobilenet_v3_small_minimalistic_1.0_224 | 65 | 2.0 | 61.9 | 12.2 |
69
-
70
- For image classification use cases, see
71
- [this page for detailed examples](
72
- https://keras.io/api/applications/#usage-examples-for-image-classification-models).
73
-
74
- For transfer learning use cases, make sure to read the
75
- [guide to transfer learning & fine-tuning](
76
- https://keras.io/guides/transfer_learning/).
77
-
78
- Note: each Keras Application expects a specific kind of input preprocessing.
79
- For ModelNetV3, input preprocessing is included as part of the model
80
- (as a `Rescaling` layer), and thus
81
- `tf.keras.applications.mobilenet_v3.preprocess_input` is actually a
82
- pass-through function. ModelNetV3 models expect their inputs to be float
83
- tensors of pixels with values in the [0-255] range.
84
-
85
- Args:
86
- input_shape: Optional shape tuple, to be specified if you would
87
- like to use a model with an input image resolution that is not
88
- (224, 224, 3).
89
- It should have exactly 3 inputs channels (224, 224, 3).
90
- You can also omit this option if you would like
91
- to infer input_shape from an input_tensor.
92
- If you choose to include both input_tensor and input_shape then
93
- input_shape will be used if they match, if the shapes
94
- do not match then we will throw an error.
95
- E.g. `(160, 160, 3)` would be one valid value.
96
- alpha: controls the width of the network. This is known as the
97
- depth multiplier in the MobileNetV3 paper, but the name is kept for
98
- consistency with MobileNetV1 in Keras.
99
- - If `alpha` < 1.0, proportionally decreases the number
100
- of filters in each layer.
101
- - If `alpha` > 1.0, proportionally increases the number
102
- of filters in each layer.
103
- - If `alpha` = 1, default number of filters from the paper
104
- are used at each layer.
105
- minimalistic: In addition to large and small models this module also
106
- contains so-called minimalistic models, these models have the same
107
- per-layer dimensions characteristic as MobilenetV3 however, they don't
108
- utilize any of the advanced blocks (squeeze-and-excite units, hard-swish,
109
- and 5x5 convolutions). While these models are less efficient on CPU, they
110
- are much more performant on GPU/DSP.
111
- include_top: Boolean, whether to include the fully-connected
112
- layer at the top of the network. Defaults to `True`.
113
- weights: String, one of `None` (random initialization),
114
- 'imagenet' (pre-training on ImageNet),
115
- or the path to the weights file to be loaded.
116
- input_tensor: Optional Keras tensor (i.e. output of
117
- `layers.Input()`)
118
- to use as image input for the model.
119
- pooling: String, optional pooling mode for feature extraction
120
- when `include_top` is `False`.
121
- - `None` means that the output of the model
122
- will be the 4D tensor output of the
123
- last convolutional block.
124
- - `avg` means that global average pooling
125
- will be applied to the output of the
126
- last convolutional block, and thus
127
- the output of the model will be a
128
- 2D tensor.
129
- - `max` means that global max pooling will
130
- be applied.
131
- classes: Integer, optional number of classes to classify images
132
- into, only to be specified if `include_top` is True, and
133
- if no `weights` argument is specified.
134
- dropout_rate: fraction of the input units to drop on the last layer.
135
- classifier_activation: A `str` or callable. The activation function to use
136
- on the "top" layer. Ignored unless `include_top=True`. Set
137
- `classifier_activation=None` to return the logits of the "top" layer.
138
- When loading pretrained weights, `classifier_activation` can only
139
- be `None` or `"softmax"`.
140
-
141
- Call arguments:
142
- inputs: A floating point `numpy.array` or a `tf.Tensor`, 4D with 3 color
143
- channels, with values in the range [0, 255].
144
-
145
- Returns:
146
- A `keras.Model` instance.
147
- """
148
-
149
-
150
- def MobileNetV3(stack_fn,
151
- last_point_ch,
152
- input_shape=None,
153
- alpha=1.0,
154
- model_type='large',
155
- minimalistic=False,
156
- include_top=True,
157
- weights='imagenet',
158
- input_tensor=None,
159
- classes=1000,
160
- pooling=None,
161
- dropout_rate=0.2,
162
- classifier_activation='softmax'):
163
- if not (weights in {'imagenet', None} or file_io.file_exists_v2(weights)):
164
- raise ValueError('The `weights` argument should be either '
165
- '`None` (random initialization), `imagenet` '
166
- '(pre-training on ImageNet), '
167
- 'or the path to the weights file to be loaded.')
168
-
169
- if weights == 'imagenet' and include_top and classes != 1000:
170
- raise ValueError('If using `weights` as `"imagenet"` with `include_top` '
171
- 'as true, `classes` should be 1000')
172
-
173
- # Determine proper input shape and default size.
174
- # If both input_shape and input_tensor are used, they should match
175
- if input_shape is not None and input_tensor is not None:
176
- try:
177
- is_input_t_tensor = backend.is_keras_tensor(input_tensor)
178
- except ValueError:
179
- try:
180
- is_input_t_tensor = backend.is_keras_tensor(
181
- layer_utils.get_source_inputs(input_tensor))
182
- except ValueError:
183
- raise ValueError('input_tensor: ', input_tensor,
184
- 'is not type input_tensor')
185
- if is_input_t_tensor:
186
- if backend.image_data_format() == 'channels_first':
187
- if backend.int_shape(input_tensor)[1] != input_shape[1]:
188
- raise ValueError('input_shape: ', input_shape, 'and input_tensor: ',
189
- input_tensor,
190
- 'do not meet the same shape requirements')
191
- else:
192
- if backend.int_shape(input_tensor)[2] != input_shape[1]:
193
- raise ValueError('input_shape: ', input_shape, 'and input_tensor: ',
194
- input_tensor,
195
- 'do not meet the same shape requirements')
196
- else:
197
- raise ValueError('input_tensor specified: ', input_tensor,
198
- 'is not a keras tensor')
199
-
200
- # If input_shape is None, infer shape from input_tensor
201
- if input_shape is None and input_tensor is not None:
202
-
203
- try:
204
- backend.is_keras_tensor(input_tensor)
205
- except ValueError:
206
- raise ValueError('input_tensor: ', input_tensor, 'is type: ',
207
- type(input_tensor), 'which is not a valid type')
208
-
209
- if backend.is_keras_tensor(input_tensor):
210
- if backend.image_data_format() == 'channels_first':
211
- rows = backend.int_shape(input_tensor)[2]
212
- cols = backend.int_shape(input_tensor)[3]
213
- input_shape = (3, cols, rows)
214
- else:
215
- rows = backend.int_shape(input_tensor)[1]
216
- cols = backend.int_shape(input_tensor)[2]
217
- input_shape = (cols, rows, 3)
218
- # If input_shape is None and input_tensor is None using standart shape
219
- if input_shape is None and input_tensor is None:
220
- input_shape = (None, None, 3)
221
-
222
- if backend.image_data_format() == 'channels_last':
223
- row_axis, col_axis = (0, 1)
224
- else:
225
- row_axis, col_axis = (1, 2)
226
- rows = input_shape[row_axis]
227
- cols = input_shape[col_axis]
228
- if rows and cols and (rows < 32 or cols < 32):
229
- raise ValueError('Input size must be at least 32x32; got `input_shape=' +
230
- str(input_shape) + '`')
231
- if weights == 'imagenet':
232
- if (not minimalistic and alpha not in [0.75, 1.0]
233
- or minimalistic and alpha != 1.0):
234
- raise ValueError('If imagenet weights are being loaded, '
235
- 'alpha can be one of `0.75`, `1.0` for non minimalistic'
236
- ' or `1.0` for minimalistic only.')
237
-
238
- if rows != cols or rows != 224:
239
- logging.warning('`input_shape` is undefined or non-square, '
240
- 'or `rows` is not 224.'
241
- ' Weights for input shape (224, 224) will be'
242
- ' loaded as the default.')
243
-
244
- if input_tensor is None:
245
- img_input = layers.Input(shape=input_shape)
246
- else:
247
- if not backend.is_keras_tensor(input_tensor):
248
- img_input = layers.Input(tensor=input_tensor, shape=input_shape)
249
- else:
250
- img_input = input_tensor
251
-
252
- channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
253
-
254
- if minimalistic:
255
- kernel = 3
256
- activation = relu
257
- se_ratio = None
258
- else:
259
- kernel = 5
260
- activation = hard_swish
261
- se_ratio = 0.25
262
-
263
- x = img_input
264
- x = layers.Rescaling(scale=1. / 127.5, offset=-1.)(x)
265
- x = layers.Conv2D(
266
- 16,
267
- kernel_size=3,
268
- strides=(2, 2),
269
- padding='same',
270
- use_bias=False,
271
- name='Conv')(x)
272
- x = layers.BatchNormalization(
273
- axis=channel_axis, epsilon=1e-3,
274
- momentum=0.999, name='Conv/BatchNorm')(x)
275
- x = activation(x)
276
-
277
- x = stack_fn(x, kernel, activation, se_ratio)
278
-
279
- last_conv_ch = _depth(backend.int_shape(x)[channel_axis] * 6)
280
-
281
- # if the width multiplier is greater than 1 we
282
- # increase the number of output channels
283
- if alpha > 1.0:
284
- last_point_ch = _depth(last_point_ch * alpha)
285
- x = layers.Conv2D(
286
- last_conv_ch,
287
- kernel_size=1,
288
- padding='same',
289
- use_bias=False,
290
- name='Conv_1')(x)
291
- x = layers.BatchNormalization(
292
- axis=channel_axis, epsilon=1e-3,
293
- momentum=0.999, name='Conv_1/BatchNorm')(x)
294
- x = activation(x)
295
- x = layers.GlobalAveragePooling2D()(x)
296
- if channel_axis == 1:
297
- x = layers.Reshape((last_conv_ch, 1, 1))(x)
298
- else:
299
- x = layers.Reshape((1, 1, last_conv_ch))(x)
300
- x = layers.Conv2D(
301
- last_point_ch,
302
- kernel_size=1,
303
- padding='same',
304
- use_bias=True,
305
- name='Conv_2')(x)
306
- x = activation(x)
307
-
308
- if include_top:
309
- if dropout_rate > 0:
310
- x = layers.Dropout(dropout_rate)(x)
311
- x = layers.Conv2D(classes, kernel_size=1, padding='same', name='Logits')(x)
312
- x = layers.Flatten()(x)
313
- imagenet_utils.validate_activation(classifier_activation, weights)
314
- x = layers.Activation(activation=classifier_activation,
315
- name='Predictions')(x)
316
- else:
317
- if pooling == 'avg':
318
- x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
319
- elif pooling == 'max':
320
- x = layers.GlobalMaxPooling2D(name='max_pool')(x)
321
- # Ensure that the model takes into account
322
- # any potential predecessors of `input_tensor`.
323
- if input_tensor is not None:
324
- inputs = layer_utils.get_source_inputs(input_tensor)
325
- else:
326
- inputs = img_input
327
-
328
- # Create model.
329
- model = models.Model(inputs, x, name='MobilenetV3' + model_type)
330
-
331
- # Load weights.
332
- if weights == 'imagenet':
333
- model_name = '{}{}_224_{}_float'.format(
334
- model_type, '_minimalistic' if minimalistic else '', str(alpha))
335
- if include_top:
336
- file_name = 'weights_mobilenet_v3_' + model_name + '.h5'
337
- file_hash = WEIGHTS_HASHES[model_name][0]
338
- else:
339
- file_name = 'weights_mobilenet_v3_' + model_name + '_no_top.h5'
340
- file_hash = WEIGHTS_HASHES[model_name][1]
341
- weights_path = data_utils.get_file(
342
- file_name,
343
- BASE_WEIGHT_PATH + file_name,
344
- cache_subdir='models',
345
- file_hash=file_hash)
346
- model.load_weights(weights_path)
347
- elif weights is not None:
348
- model.load_weights(weights)
349
-
350
- return model
351
-
352
-
353
- @keras_export('keras.applications.MobileNetV3Small')
354
- def MobileNetV3Small(input_shape=None,
355
- alpha=1.0,
356
- minimalistic=False,
357
- include_top=True,
358
- weights='imagenet',
359
- input_tensor=None,
360
- classes=1000,
361
- pooling=None,
362
- dropout_rate=0.2,
363
- classifier_activation='softmax'):
364
-
365
- def stack_fn(x, kernel, activation, se_ratio):
366
-
367
- def depth(d):
368
- return _depth(d * alpha)
369
-
370
- x = _inverted_res_block(x, 1, depth(16), 3, 2, se_ratio, relu, 0)
371
- x = _inverted_res_block(x, 72. / 16, depth(24), 3, 2, None, relu, 1)
372
- x = _inverted_res_block(x, 88. / 24, depth(24), 3, 1, None, relu, 2)
373
- x = _inverted_res_block(x, 4, depth(40), kernel, 2, se_ratio, activation, 3)
374
- x = _inverted_res_block(x, 6, depth(40), kernel, 1, se_ratio, activation, 4)
375
- x = _inverted_res_block(x, 6, depth(40), kernel, 1, se_ratio, activation, 5)
376
- x = _inverted_res_block(x, 3, depth(48), kernel, 1, se_ratio, activation, 6)
377
- x = _inverted_res_block(x, 3, depth(48), kernel, 1, se_ratio, activation, 7)
378
- x = _inverted_res_block(x, 6, depth(96), kernel, 2, se_ratio, activation, 8)
379
- x = _inverted_res_block(x, 6, depth(96), kernel, 1, se_ratio, activation, 9)
380
- x = _inverted_res_block(x, 6, depth(96), kernel, 1, se_ratio, activation,
381
- 10)
382
- return x
383
-
384
- return MobileNetV3(stack_fn, 1024, input_shape, alpha, 'small', minimalistic,
385
- include_top, weights, input_tensor, classes, pooling,
386
- dropout_rate, classifier_activation)
387
-
388
-
389
- @keras_export('keras.applications.MobileNetV3Large')
390
- def MobileNetV3Large(input_shape=None,
391
- alpha=1.0,
392
- minimalistic=False,
393
- include_top=True,
394
- weights='imagenet',
395
- input_tensor=None,
396
- classes=1000,
397
- pooling=None,
398
- dropout_rate=0.2,
399
- classifier_activation='softmax'):
400
-
401
- def stack_fn(x, kernel, activation, se_ratio):
402
-
403
- def depth(d):
404
- return _depth(d * alpha)
405
-
406
- x = _inverted_res_block(x, 1, depth(16), 3, 1, None, relu, 0)
407
- x = _inverted_res_block(x, 4, depth(24), 3, 2, None, relu, 1)
408
- x = _inverted_res_block(x, 3, depth(24), 3, 1, None, relu, 2)
409
- x = _inverted_res_block(x, 3, depth(40), kernel, 2, se_ratio, relu, 3)
410
- x = _inverted_res_block(x, 3, depth(40), kernel, 1, se_ratio, relu, 4)
411
- x = _inverted_res_block(x, 3, depth(40), kernel, 1, se_ratio, relu, 5)
412
- x = _inverted_res_block(x, 6, depth(80), 3, 2, None, activation, 6)
413
- x = _inverted_res_block(x, 2.5, depth(80), 3, 1, None, activation, 7)
414
- x = _inverted_res_block(x, 2.3, depth(80), 3, 1, None, activation, 8)
415
- x = _inverted_res_block(x, 2.3, depth(80), 3, 1, None, activation, 9)
416
- x = _inverted_res_block(x, 6, depth(112), 3, 1, se_ratio, activation, 10)
417
- x = _inverted_res_block(x, 6, depth(112), 3, 1, se_ratio, activation, 11)
418
- x = _inverted_res_block(x, 6, depth(160), kernel, 2, se_ratio, activation,
419
- 12)
420
- x = _inverted_res_block(x, 6, depth(160), kernel, 1, se_ratio, activation,
421
- 13)
422
- x = _inverted_res_block(x, 6, depth(160), kernel, 1, se_ratio, activation,
423
- 14)
424
- return x
425
-
426
- return MobileNetV3(stack_fn, 1280, input_shape, alpha, 'large', minimalistic,
427
- include_top, weights, input_tensor, classes, pooling,
428
- dropout_rate, classifier_activation)
429
-
430
-
431
- MobileNetV3Small.__doc__ = BASE_DOCSTRING.format(name='MobileNetV3Small')
432
- MobileNetV3Large.__doc__ = BASE_DOCSTRING.format(name='MobileNetV3Large')
433
-
434
-
435
- def relu(x):
436
- return layers.ReLU()(x)
437
-
438
-
439
- def hard_sigmoid(x):
440
- return layers.ReLU(6.)(x + 3.) * (1. / 6.)
441
-
442
-
443
- def hard_swish(x):
444
- return layers.Multiply()([hard_sigmoid(x), x])
445
-
446
-
447
- # This function is taken from the original tf repo.
448
- # It ensures that all layers have a channel number that is divisible by 8
449
- # It can be seen here:
450
- # https://github.com/tensorflow/models/blob/master/research/
451
- # slim/nets/mobilenet/mobilenet.py
452
-
453
-
454
- def _depth(v, divisor=8, min_value=None):
455
- if min_value is None:
456
- min_value = divisor
457
- new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
458
- # Make sure that round down does not go down by more than 10%.
459
- if new_v < 0.9 * v:
460
- new_v += divisor
461
- return new_v
462
-
463
-
464
- def _se_block(inputs, filters, se_ratio, prefix):
465
- x = layers.GlobalAveragePooling2D(name=prefix + 'squeeze_excite/AvgPool')(
466
- inputs)
467
- if backend.image_data_format() == 'channels_first':
468
- x = layers.Reshape((filters, 1, 1))(x)
469
- else:
470
- x = layers.Reshape((1, 1, filters))(x)
471
- x = layers.Conv2D(
472
- _depth(filters * se_ratio),
473
- kernel_size=1,
474
- padding='same',
475
- name=prefix + 'squeeze_excite/Conv')(
476
- x)
477
- x = layers.ReLU(name=prefix + 'squeeze_excite/Relu')(x)
478
- x = layers.Conv2D(
479
- filters,
480
- kernel_size=1,
481
- padding='same',
482
- name=prefix + 'squeeze_excite/Conv_1')(
483
- x)
484
- x = hard_sigmoid(x)
485
- x = layers.Multiply(name=prefix + 'squeeze_excite/Mul')([inputs, x])
486
- return x
487
-
488
-
489
- def _inverted_res_block(x, expansion, filters, kernel_size, stride, se_ratio,
490
- activation, block_id):
491
- channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
492
- shortcut = x
493
- prefix = 'expanded_conv/'
494
- infilters = backend.int_shape(x)[channel_axis]
495
- if block_id:
496
- # Expand
497
- prefix = 'expanded_conv_{}/'.format(block_id)
498
- x = layers.Conv2D(
499
- _depth(infilters * expansion),
500
- kernel_size=1,
501
- padding='same',
502
- use_bias=False,
503
- name=prefix + 'expand')(
504
- x)
505
- x = layers.BatchNormalization(
506
- axis=channel_axis,
507
- epsilon=1e-3,
508
- momentum=0.999,
509
- name=prefix + 'expand/BatchNorm')(
510
- x)
511
- x = activation(x)
512
-
513
- if stride == 2:
514
- x = layers.ZeroPadding2D(
515
- padding=imagenet_utils.correct_pad(x, kernel_size),
516
- name=prefix + 'depthwise/pad')(
517
- x)
518
- x = layers.DepthwiseConv2D(
519
- kernel_size,
520
- strides=stride,
521
- padding='same' if stride == 1 else 'valid',
522
- use_bias=False,
523
- name=prefix + 'depthwise')(
524
- x)
525
- x = layers.BatchNormalization(
526
- axis=channel_axis,
527
- epsilon=1e-3,
528
- momentum=0.999,
529
- name=prefix + 'depthwise/BatchNorm')(
530
- x)
531
- x = activation(x)
532
-
533
- if se_ratio:
534
- x = _se_block(x, _depth(infilters * expansion), se_ratio, prefix)
535
-
536
- x = layers.Conv2D(
537
- filters,
538
- kernel_size=1,
539
- padding='same',
540
- use_bias=False,
541
- name=prefix + 'project')(
542
- x)
543
- x = layers.BatchNormalization(
544
- axis=channel_axis,
545
- epsilon=1e-3,
546
- momentum=0.999,
547
- name=prefix + 'project/BatchNorm')(
548
- x)
549
-
550
- if stride == 1 and infilters == filters:
551
- x = layers.Add(name=prefix + 'Add')([shortcut, x])
552
- return x
553
-
554
-
555
- @keras_export('keras.applications.mobilenet_v3.preprocess_input')
556
- def preprocess_input(x, data_format=None): # pylint: disable=unused-argument
557
- """A placeholder method for backward compatibility.
558
-
559
- The preprocessing logic has been included in the mobilenet_v3 model
560
- implementation. Users are no longer required to call this method to normalize
561
- the input data. This method does nothing and only kept as a placeholder to
562
- align the API surface between old and new version of model.
563
-
564
- Args:
565
- x: A floating point `numpy.array` or a `tf.Tensor`.
566
- data_format: Optional data format of the image tensor/array. Defaults to
567
- None, in which case the global setting
568
- `tf.keras.backend.image_data_format()` is used (unless you changed it,
569
- it defaults to "channels_last").{mode}
570
-
571
- Returns:
572
- Unchanged `numpy.array` or `tf.Tensor`.
573
- """
574
- return x
575
-
576
-
577
- @keras_export('keras.applications.mobilenet_v3.decode_predictions')
578
- def decode_predictions(preds, top=5):
579
- return imagenet_utils.decode_predictions(preds, top=top)
580
-
581
-
582
- decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
File without changes