mttf 1.1.15__py3-none-any.whl → 1.1.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mttf might be problematic. Click here for more details.

Files changed (34) hide show
  1. mt/keras/__init__.py +3 -0
  2. mt/{keras/applications → keras_src/applications_src}/__init__.py +1 -1
  3. mt/{keras/applications → keras_src/applications_src}/mobilenet_v3_split.py +4 -5
  4. mt/{keras/applications → keras_src/applications_src}/mobilevit.py +2 -1
  5. mt/{keras/layers → keras_src/layers_src}/__init__.py +1 -1
  6. mt/{keras/layers → keras_src/layers_src}/counter.py +1 -1
  7. mt/{keras/layers → keras_src/layers_src}/floor.py +1 -1
  8. mt/{keras/layers → keras_src/layers_src}/identical.py +1 -1
  9. mt/{keras/layers → keras_src/layers_src}/image_sizing.py +2 -3
  10. mt/{keras/layers → keras_src/layers_src}/normed_conv2d.py +1 -1
  11. mt/{keras/layers → keras_src/layers_src}/simple_mha.py +1 -2
  12. mt/{keras/layers → keras_src/layers_src}/utils.py +7 -9
  13. mt/{keras/layers → keras_src/layers_src}/var_regularizer.py +1 -1
  14. mt/tf/init.py +2 -2
  15. mt/tf/keras_applications/__init__.py +5 -0
  16. mt/tf/utils.py +5 -59
  17. mt/tf/version.py +1 -1
  18. mt/tfc/__init__.py +58 -0
  19. {mttf-1.1.15.dist-info → mttf-1.1.17.dist-info}/METADATA +1 -1
  20. mttf-1.1.17.dist-info/RECORD +37 -0
  21. mt/tf/keras_applications/mobilenet_v3_split.py +0 -555
  22. mt/tf/keras_applications/mobilevit.py +0 -323
  23. mttf-1.1.15.dist-info/RECORD +0 -39
  24. /mt/{keras/base → keras_src}/__init__.py +0 -0
  25. /mt/{keras/base → keras_src}/base.py +0 -0
  26. {mttf-1.1.15.data → mttf-1.1.17.data}/scripts/dmt_pipi.sh +0 -0
  27. {mttf-1.1.15.data → mttf-1.1.17.data}/scripts/dmt_twineu.sh +0 -0
  28. {mttf-1.1.15.data → mttf-1.1.17.data}/scripts/pipi.sh +0 -0
  29. {mttf-1.1.15.data → mttf-1.1.17.data}/scripts/wml_nexus.py +0 -0
  30. {mttf-1.1.15.data → mttf-1.1.17.data}/scripts/wml_pipi.sh +0 -0
  31. {mttf-1.1.15.data → mttf-1.1.17.data}/scripts/wml_twineu.sh +0 -0
  32. {mttf-1.1.15.dist-info → mttf-1.1.17.dist-info}/WHEEL +0 -0
  33. {mttf-1.1.15.dist-info → mttf-1.1.17.dist-info}/licenses/LICENSE +0 -0
  34. {mttf-1.1.15.dist-info → mttf-1.1.17.dist-info}/top_level.txt +0 -0
@@ -1,323 +0,0 @@
1
- # pylint: disable=invalid-name
2
- # pylint: disable=missing-function-docstring
3
- """MobileViT model.
4
-
5
- Most of the code here has been ripped and updated off from the following
6
- `Keras tutorial <https://keras.io/examples/vision/mobilevit/>`_. Please refer
7
- to the `MobileViT ICLR2022 paper <https://arxiv.org/abs/2110.02178>`_ for more details.
8
-
9
- The paper authors' code is `here <https://github.com/apple/ml-cvnets>`_.
10
- """
11
-
12
-
13
- from mt import tp, tfc, tf
14
-
15
-
16
- try:
17
- from tensorflow.keras.applications.mobilenet_v3 import _inverted_res_block
18
- except ImportError:
19
- try:
20
- from keras.applications.mobilenet_v3 import _inverted_res_block
21
- except ImportError:
22
- try:
23
- from keras.src.applications.mobilenet_v3 import _inverted_res_block
24
- except ImportError:
25
- from .mobilenet_v3_split import _inverted_res_block
26
-
27
-
28
- try:
29
- import keras
30
- from keras import backend
31
- from keras import models
32
- from keras.layers import VersionAwareLayers
33
- except ImportError:
34
- try:
35
- from tensorflow import keras
36
- from tensorflow.keras import backend
37
- from tensorflow.keras import models
38
- from tensorflow.keras.layers import VersionAwareLayers
39
- except ImportError:
40
- from tensorflow.python import keras
41
- from tensorflow.python.keras import backend
42
- from tensorflow.python.keras import models
43
- from tensorflow.python.keras.layers import VersionAwareLayers
44
-
45
-
46
- from .mobilenet_v3_split import MobileNetV3Input
47
-
48
-
49
- layers = VersionAwareLayers()
50
-
51
-
52
- def conv_block(x, filters=16, kernel_size=3, strides=2):
53
- conv_layer = layers.Conv2D(
54
- filters, kernel_size, strides=strides, activation=tf.nn.swish, padding="same"
55
- )
56
- return conv_layer(x)
57
-
58
-
59
- # Reference: https://git.io/JKgtC
60
-
61
-
62
- def inverted_residual_block(
63
- x, expanded_channels, output_channels, strides=1, block_id=0
64
- ):
65
- if block_id == 0:
66
- raise NotImplementedError(
67
- "Zero block id for _inverted_res_block() is not implemented in MobileViT."
68
- )
69
-
70
- channel_axis = 1 if backend.image_data_format() == "channels_first" else -1
71
- infilters = backend.int_shape(x)[channel_axis]
72
-
73
- m = _inverted_res_block(
74
- x,
75
- expanded_channels // infilters, # expansion
76
- output_channels, # filters
77
- 3, # kernel_size
78
- strides, # stride
79
- 0, # se_ratio
80
- tf.nn.swish, # activation
81
- block_id,
82
- )
83
-
84
- return m
85
-
86
-
87
- # Reference:
88
- # https://keras.io/examples/vision/image_classification_with_vision_transformer/
89
-
90
-
91
- def mlp(x, hidden_units, dropout_rate):
92
- for units in hidden_units:
93
- x = layers.Dense(units, activation=tf.nn.swish)(x)
94
- x = layers.Dropout(dropout_rate)(x)
95
- return x
96
-
97
-
98
- def transformer_block(x, transformer_layers, projection_dim, num_heads=2):
99
- for _ in range(transformer_layers):
100
- # Layer normalization 1.
101
- x1 = layers.LayerNormalization(epsilon=1e-6)(x)
102
- # Create a multi-head attention layer.
103
- attention_output = layers.MultiHeadAttention(
104
- num_heads=num_heads, key_dim=projection_dim, dropout=0.1
105
- )(x1, x1)
106
- # Skip connection 1.
107
- x2 = layers.Add()([attention_output, x])
108
- # Layer normalization 2.
109
- x3 = layers.LayerNormalization(epsilon=1e-6)(x2)
110
- # MLP.
111
- x3 = mlp(
112
- x3,
113
- hidden_units=[x.shape[-1] * 2, x.shape[-1]],
114
- dropout_rate=0.1,
115
- )
116
- # Skip connection 2.
117
- x = layers.Add()([x3, x2])
118
-
119
- return x
120
-
121
-
122
- def mobilevit_block(x, num_blocks, projection_dim, strides=1):
123
- cell_size = 2 # 2x2 for the Transformer block
124
-
125
- # Local projection with convolutions.
126
- local_features = conv_block(x, filters=projection_dim, strides=strides)
127
- local_features = conv_block(
128
- local_features, filters=projection_dim, kernel_size=1, strides=strides
129
- )
130
-
131
- if x.shape[1] % cell_size != 0:
132
- raise tfc.ModelSyntaxError(
133
- "Input tensor must have height divisible by {}. Got {}.".format(
134
- cell_size, x.shape
135
- )
136
- )
137
-
138
- if x.shape[2] % cell_size != 0:
139
- raise tfc.ModelSyntaxError(
140
- "Input tensor must have width divisible by {}. Got {}.".format(
141
- cell_size, x.shape
142
- )
143
- )
144
-
145
- # Unfold into patches and then pass through Transformers.
146
- z = local_features # (B,H,W,C)
147
- z = layers.Reshape(
148
- (
149
- z.shape[1] // cell_size,
150
- cell_size,
151
- z.shape[2] // cell_size,
152
- cell_size,
153
- projection_dim,
154
- )
155
- )(
156
- z
157
- ) # (B,H/P,P,W/P,P,C)
158
- z = tf.transpose(z, perm=[0, 2, 4, 1, 3, 5]) # (B,P,P,H/P,W/P,C)
159
- non_overlapping_patches = layers.Reshape(
160
- (cell_size * cell_size, z.shape[3] * z.shape[4], projection_dim)
161
- )(
162
- z
163
- ) # (B,P*P,H*W/(P*P),C)
164
- global_features = transformer_block(
165
- non_overlapping_patches, num_blocks, projection_dim
166
- )
167
-
168
- # Fold into conv-like feature-maps.
169
- z = layers.Reshape(
170
- (
171
- cell_size,
172
- cell_size,
173
- x.shape[1] // cell_size,
174
- x.shape[2] // cell_size,
175
- projection_dim,
176
- )
177
- )(
178
- global_features
179
- ) # (B,P,P,H/P,W/P,C)
180
- z = tf.transpose(z, perm=[0, 3, 1, 4, 2, 5]) # (B,H/P,P,W/P,P,C)
181
- folded_feature_map = layers.Reshape((x.shape[1], x.shape[2], projection_dim))(z)
182
-
183
- # Apply point-wise conv -> concatenate with the input features.
184
- folded_feature_map = conv_block(
185
- folded_feature_map, filters=x.shape[-1], kernel_size=1, strides=strides
186
- )
187
- local_global_features = layers.Concatenate(axis=-1)([x, folded_feature_map])
188
-
189
- # Fuse the local and global features using a convolution layer.
190
- local_global_features = conv_block(
191
- local_global_features, filters=projection_dim, strides=strides
192
- )
193
-
194
- return local_global_features
195
-
196
-
197
- def create_mobilevit(
198
- input_shape=None,
199
- model_type: str = "XXS",
200
- output_all: bool = False,
201
- name: tp.Optional[str] = None,
202
- ):
203
- """Prepares a model of submodels which is equivalent to a MobileNetV3 model.
204
-
205
- Parameters
206
- ----------
207
- input_shape : tuple
208
- Optional shape tuple, to be specified if you would like to use a model with an input image
209
- resolution that is not (224, 224, 3). It should have exactly 3 inputs channels
210
- (224, 224, 3). You can also omit this option if you would like to infer input_shape from an
211
- input_tensor. If you choose to include both input_tensor and input_shape then input_shape
212
- will be used if they match, if the shapes do not match then we will throw an error. E.g.
213
- `(160, 160, 3)` would be one valid value.
214
- model_type : {'XXS', 'XS', 'S'}
215
- one of the 3 variants introduced in the paper
216
- output_all : bool
217
- If True, the model returns the output tensor of every block before down-sampling, other
218
- than the input layer. Otherwise, it returns the output tensor of the last block.
219
- name : str, optional
220
- model name, if any. Default to 'MobileViT<model_type>'.
221
-
222
- Returns
223
- -------
224
- tensorflow.keras.Model
225
- the output MobileViT model
226
- """
227
-
228
- model_type_id = ["XXS", "XS", "S"].index(model_type)
229
-
230
- expansion_factor = 2 if model_type_id == 0 else 4
231
-
232
- inputs = MobileNetV3Input(input_shape=input_shape)
233
- x = layers.Rescaling(scale=1.0 / 255)(inputs)
234
-
235
- # Initial conv-stem -> MV2 block.
236
- x = conv_block(x, filters=16)
237
- x = inverted_residual_block(
238
- x,
239
- expanded_channels=16 * expansion_factor,
240
- output_channels=16 if model_type_id == 0 else 32,
241
- block_id=1,
242
- )
243
- outputs = [x]
244
-
245
- # Downsampling with MV2 block.
246
- output_channels = [24, 48, 64][model_type_id]
247
- x = inverted_residual_block(
248
- x,
249
- expanded_channels=16 * expansion_factor,
250
- output_channels=output_channels,
251
- strides=2,
252
- block_id=2,
253
- )
254
- x = inverted_residual_block(
255
- x,
256
- expanded_channels=24 * expansion_factor,
257
- output_channels=output_channels,
258
- block_id=3,
259
- )
260
- x = inverted_residual_block(
261
- x,
262
- expanded_channels=24 * expansion_factor,
263
- output_channels=output_channels,
264
- block_id=4,
265
- )
266
- if output_all:
267
- outputs.append(x)
268
- else:
269
- outputs = [x]
270
-
271
- # First MV2 -> MobileViT block.
272
- output_channels = [48, 64, 96][model_type_id]
273
- projection_dim = [64, 96, 144][model_type_id]
274
- x = inverted_residual_block(
275
- x,
276
- expanded_channels=48 * expansion_factor,
277
- output_channels=output_channels,
278
- strides=2,
279
- block_id=5,
280
- )
281
- x = mobilevit_block(x, num_blocks=2, projection_dim=projection_dim)
282
- if output_all:
283
- outputs.append(x)
284
- else:
285
- outputs = [x]
286
-
287
- # Second MV2 -> MobileViT block.
288
- output_channels = [64, 80, 128][model_type_id]
289
- projection_dim = [80, 120, 192][model_type_id]
290
- x = inverted_residual_block(
291
- x,
292
- expanded_channels=64 * expansion_factor,
293
- output_channels=output_channels,
294
- strides=2,
295
- block_id=6,
296
- )
297
- x = mobilevit_block(x, num_blocks=4, projection_dim=projection_dim)
298
- if output_all:
299
- outputs.append(x)
300
- else:
301
- outputs = [x]
302
-
303
- # Third MV2 -> MobileViT block.
304
- output_channels = [80, 96, 160][model_type_id]
305
- projection_dim = [96, 144, 240][model_type_id]
306
- x = inverted_residual_block(
307
- x,
308
- expanded_channels=80 * expansion_factor,
309
- output_channels=output_channels,
310
- strides=2,
311
- block_id=7,
312
- )
313
- x = mobilevit_block(x, num_blocks=3, projection_dim=projection_dim)
314
- filters = [320, 384, 640][model_type_id]
315
- x = conv_block(x, filters=filters, kernel_size=1, strides=1)
316
- if output_all:
317
- outputs.append(x)
318
- else:
319
- outputs = [x]
320
-
321
- if name is None:
322
- name = "MobileViT{}".format(model_type)
323
- return keras.Model(inputs, outputs, name=name)
@@ -1,39 +0,0 @@
1
- mt/keras/__init__.py,sha256=oM2xsSZTuGSUvFJSpntk8r7bzGcTytNvn-Apv_SBFh4,70
2
- mt/keras/applications/__init__.py,sha256=oHvyjPR5QGhcHxQyPryt_heTIcPzDl0c18GMOylYmRI,659
3
- mt/keras/applications/mobilenet_v3_split.py,sha256=HQ6wd6CotEngHOE_yuV8KlU6O-7WFxEcU_okW2QkquQ,19939
4
- mt/keras/applications/mobilevit.py,sha256=v-XzmFSwWAjN_nsp2ys0QINtoqcYD-qvNrSbKHkreQs,8914
5
- mt/keras/base/__init__.py,sha256=vIvuf3gjbpXC-rGpXLfIApLRUV6w1GUzVK8YJOgWLyk,327
6
- mt/keras/base/base.py,sha256=_B2sSUMlHOtGSAqQD1p5YD0raEDL4W0Bh3uKD6BXOJM,807
7
- mt/keras/layers/__init__.py,sha256=7TMhZig3bHtyDpiwnoXcwgAxw6e5xkAy664Z6qLQUrY,750
8
- mt/keras/layers/counter.py,sha256=J3__IXbaa7zp72a5P8FFi0bfftTHwa1xzzCwxCIU2gc,856
9
- mt/keras/layers/floor.py,sha256=4mSpmTrhM7VqTK85POkjC3OhaTZUNUF9knO7gTbSGtc,512
10
- mt/keras/layers/identical.py,sha256=AIqC36PxU9sXyF9rZuQ-5ObjnIjBiSIMHIb5MwqVdmY,361
11
- mt/keras/layers/image_sizing.py,sha256=LeWwyFwQSYrDq_2fjOVi1YiRn7GcwMzrLUCLR8zcaKI,58530
12
- mt/keras/layers/normed_conv2d.py,sha256=dFqeuNS0WyzrGIP3wiaKl_iSFmTgTPg-w3nCvn2X4d0,10680
13
- mt/keras/layers/simple_mha.py,sha256=ecGL5Sy8mbpj6F8hAAN2h-7Dt0TiAOEM7o6SyVrVTp4,19225
14
- mt/keras/layers/utils.py,sha256=lk9y0Sl4_w69JtFXKrKlWIgWBJx5lz9WrQi2LszvaZE,2834
15
- mt/keras/layers/var_regularizer.py,sha256=EZ8ueXrObfu-oo4qixM0UkULe-C03kdNQppQ_NmtYSA,1108
16
- mt/tf/__init__.py,sha256=M8xiJNdrAUJZgiZTOQOdfkehjO-CYzGpoxh5HVGBkms,338
17
- mt/tf/init.py,sha256=BUpw33uyA_DmeJjrN2fX4MIs8MynKxkwgc2oTGTqssU,1294
18
- mt/tf/mttf_version.py,sha256=ha53i-H9pE-crufFttUECgXHwPvam07zMKzApUts1Gs,206
19
- mt/tf/utils.py,sha256=Copl5VM0PpuFUchK-AcBuGO6QitDwHcEs4FruZb2GAI,2460
20
- mt/tf/version.py,sha256=Mj53oE1ekCr-5b7nNz38RqfI1eDogXGshIO3l_nD-Lg,207
21
- mt/tf/keras_applications/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
22
- mt/tf/keras_applications/mobilenet_v3_split.py,sha256=1oPB3EX3k3c7iju9Ksuw9xyv32-mOKPs3uy2Mk5tLd8,19716
23
- mt/tf/keras_applications/mobilevit.py,sha256=VsKB_U-f9jwUEjpd0eq-YXL4rDsuAbKQ0yIzkbMfLzw,9949
24
- mt/tf/keras_layers/__init__.py,sha256=NsuFD-kSuy6cVV3Kl7ab95tw4g7x4Igv3cF-Ky3VuCo,124
25
- mt/tfc/__init__.py,sha256=XFXAbmEi5uTAuZj797cBqw9ZBnVy6ptxP0TTzVauMNk,8446
26
- mt/tfg/__init__.py,sha256=6Ly2QImAyQTsg_ZszuAuK_L2n56v89Cix9yYmMVk0CM,304
27
- mt/tfp/__init__.py,sha256=AQkGCkmDRwswEt3qoOSpxe-fZekx78sHHBs2ZVz33gc,383
28
- mt/tfp/real_nvp.py,sha256=U9EmkXGqFcvtS2yeh5_RgbKlVKKlGFGklAb7Voyazz4,4440
29
- mttf-1.1.15.data/scripts/dmt_pipi.sh,sha256=NNsj4P332unHMqU4mAFjU9PQvxQ8TK5XQ42LC29IZY8,510
30
- mttf-1.1.15.data/scripts/dmt_twineu.sh,sha256=KZhcYwuCW0c36tWcOgCe7uxJmS08rz-J6YNY76Exy4M,193
31
- mttf-1.1.15.data/scripts/pipi.sh,sha256=kdo96bdaKq2QIa52Z4XFSiGPcbDm09SAU9cju6I2Lxo,289
32
- mttf-1.1.15.data/scripts/wml_nexus.py,sha256=kW0ju8_kdXc4jOjhdzKiMsFuO1MNpHmu87skrhu9SEg,1492
33
- mttf-1.1.15.data/scripts/wml_pipi.sh,sha256=CuidIcbuxyXSBNQqYRhCcSC8QbBaSGnQX0KAIFaIvKA,499
34
- mttf-1.1.15.data/scripts/wml_twineu.sh,sha256=av1JLN765oOWC5LPkv2eSWIVof26y60601tMGkuYdb8,180
35
- mttf-1.1.15.dist-info/licenses/LICENSE,sha256=e_JtcszdGZ2ZGfjcymTGrcxFj_9XPicZOVtnsrPvruk,1070
36
- mttf-1.1.15.dist-info/METADATA,sha256=0bESNNs2pyhob_uCHnK1Q0uMh8c1kAWOF3ffAJsQQzI,568
37
- mttf-1.1.15.dist-info/WHEEL,sha256=wXxTzcEDnjrTwFYjLPcsW_7_XihufBwmpiBeiXNBGEA,91
38
- mttf-1.1.15.dist-info/top_level.txt,sha256=WcqGFu9cV7iMZg09iam8eNxUvGpLSKKF2Iubf6SJVOo,3
39
- mttf-1.1.15.dist-info/RECORD,,
File without changes
File without changes
File without changes
File without changes