mttf 1.2.13__py3-none-any.whl → 1.2.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mttf might be problematic. Click here for more details.

@@ -0,0 +1,495 @@
1
+ """Standard classifier from a feature vector.
2
+ """
3
+
4
+ from mt import tp, tfc, logg
5
+ from .. import keras_source
6
+ from .. import models, layers, regularizers
7
+
8
+ from ..constraints_src import CenterAround
9
+
10
+
11
+ def create_classifier_block(
12
+ bv_feats,
13
+ n_classes: int,
14
+ name: str = "dense_classifier",
15
+ params: tfc.ClassifierParams = tfc.ClassifierParams(),
16
+ logger: tp.Optional[logg.IndentedLoggerAdapter] = None,
17
+ ):
18
+ """Creates a standard classifier block.
19
+
20
+ Parameters
21
+ ----------
22
+ bv_feats : tensorflow.Tensor
23
+ a 2D tensor of shape `(B,D)` where `B` is the batch size and `D` is the feature
24
+ dimensionality
25
+ n_classes : int
26
+ number of output classes
27
+ name : str, optional
28
+ the name of the classifier block
29
+ params : mt.tfc.ClassifierParams
30
+ parameters for creating the classifier block
31
+ logger : mt.logg.IndentedLoggerAdapter, optional
32
+ logger for debugging purposes
33
+
34
+ Returns
35
+ -------
36
+ model : tensorflow.keras.models.Model
37
+ an uninitialised model without any compilation details representing the classifier block.
38
+ The model returns `bv_logits` and `bv_probs`.
39
+ """
40
+
41
+ msg = f"Creating a classifier block of {n_classes} classes"
42
+ with logg.scoped_info(msg, logger=logger):
43
+ x = bv_feats
44
+ name_scope = tfc.NameScope(name)
45
+
46
+ # dropout, optional
47
+ dropout = getattr(params, "dropout", None)
48
+ if dropout is not None and dropout > 0 and dropout < 1:
49
+ x = layers.Dropout(dropout)(x)
50
+
51
+ # Object classification branch
52
+ # MT-TODO: currently l2_coeff does not take into account batch size. In order to be truly
53
+ # independent of batch size, number of classes and feature dimensionality, the l2 coeff
54
+ # should be l2_coeff / bv_feats.shape[1] / n_classes / batch_size. So we need to pass the
55
+ # batch size to the function as an additional argument.
56
+ l2_coeff = getattr(params, "l2_coeff", None)
57
+ if l2_coeff is not None:
58
+ logg.info(
59
+ "Using param 'l2_coeff' for kernel and bias regularizers.",
60
+ logger=logger,
61
+ )
62
+ logg.info(f"l2_coeff: {l2_coeff}", logger=logger)
63
+ l2 = l2_coeff / bv_feats.shape[1] / n_classes
64
+ logg.info(f"kernel_l2: {l2}", logger=logger)
65
+ kernel_regularizer = regularizers.l2(l2)
66
+ l2 = l2_coeff / n_classes
67
+ logg.info(f"bias_l2: {l2}", logger=logger)
68
+ bias_regularizer = regularizers.l2(l2)
69
+ else:
70
+ kernel_regularizer = None
71
+ bias_regularizer = None
72
+
73
+ # zero mean logit biases
74
+ if getattr(params, "zero_mean_logit_biases", False):
75
+ logg.info("Logit biases are constrainted to have zero mean.", logger=logger)
76
+ bias_constraint = CenterAround()
77
+ else:
78
+ bias_constraint = None
79
+
80
+ # dense layer
81
+ bv_logits = x = Dense(
82
+ n_classes,
83
+ name=name_scope("logits"),
84
+ kernel_regularizer=kernel_regularizer,
85
+ bias_regularizer=bias_regularizer,
86
+ bias_constraint=bias_constraint,
87
+ )(x)
88
+
89
+ bv_probs = x = keras.layers.Softmax(name=name_scope("probs"))(x)
90
+
91
+ # model
92
+ model = models.Model(bv_feats, [bv_logits, bv_probs], name=name)
93
+
94
+ return model
95
+
96
+
97
+ def MobileNetV3SmallBlock(
98
+ block_id: int, # only 0 to 3 are accepted here
99
+ input_tensor, # input tensor for the block
100
+ alpha=1.0,
101
+ minimalistic=False,
102
+ ):
103
+ """Prepares a MobileNetV3Small downsampling block."""
104
+
105
+ def depth(d):
106
+ return _depth(d * alpha)
107
+
108
+ if minimalistic:
109
+ kernel = 3
110
+ activation = relu
111
+ se_ratio = None
112
+ else:
113
+ kernel = 5
114
+ activation = hard_swish
115
+ se_ratio = 0.25
116
+
117
+ x = input_tensor
118
+ if block_id == 0:
119
+ x = _inverted_res_block(x, 1, depth(16), 3, 2, se_ratio, relu, 0)
120
+ elif block_id == 1:
121
+ x = _inverted_res_block(x, 72.0 / 16, depth(24), 3, 2, None, relu, 1)
122
+ x = _inverted_res_block(x, 88.0 / 24, depth(24), 3, 1, None, relu, 2)
123
+ elif block_id == 2:
124
+ x = _inverted_res_block(x, 4, depth(40), kernel, 2, se_ratio, activation, 3)
125
+ x = _inverted_res_block(x, 6, depth(40), kernel, 1, se_ratio, activation, 4)
126
+ x = _inverted_res_block(x, 6, depth(40), kernel, 1, se_ratio, activation, 5)
127
+ x = _inverted_res_block(x, 3, depth(48), kernel, 1, se_ratio, activation, 6)
128
+ x = _inverted_res_block(x, 3, depth(48), kernel, 1, se_ratio, activation, 7)
129
+ else:
130
+ x = _inverted_res_block(x, 6, depth(96), kernel, 2, se_ratio, activation, 8)
131
+ x = _inverted_res_block(x, 6, depth(96), kernel, 1, se_ratio, activation, 9)
132
+ x = _inverted_res_block(x, 6, depth(96), kernel, 1, se_ratio, activation, 10)
133
+
134
+ # Create model.
135
+ model = models.Model(input_tensor, x, name=f"MobileNetV3SmallBlock{block_id}")
136
+
137
+ return model
138
+
139
+
140
+ def MobileNetV3LargeBlock(
141
+ block_id: int, # only 0 to 4 are accepted here. 4 is only available as of 2023/05/15
142
+ input_tensor, # input tensor for the block
143
+ alpha=1.0,
144
+ minimalistic=False,
145
+ ):
146
+ """Prepares a MobileNetV3Large downsampling block."""
147
+
148
+ def depth(d):
149
+ return _depth(d * alpha)
150
+
151
+ if minimalistic:
152
+ kernel = 3
153
+ activation = relu
154
+ se_ratio = None
155
+ else:
156
+ kernel = 5
157
+ activation = hard_swish
158
+ se_ratio = 0.25
159
+
160
+ x = input_tensor
161
+ if block_id == 0:
162
+ x = _inverted_res_block(x, 1, depth(16), 3, 1, None, relu, 0)
163
+ x = _inverted_res_block(x, 4, depth(24), 3, 2, None, relu, 1)
164
+ x = _inverted_res_block(x, 3, depth(24), 3, 1, None, relu, 2)
165
+ elif block_id == 1:
166
+ x = _inverted_res_block(x, 3, depth(40), kernel, 2, se_ratio, relu, 3)
167
+ x = _inverted_res_block(x, 3, depth(40), kernel, 1, se_ratio, relu, 4)
168
+ x = _inverted_res_block(x, 3, depth(40), kernel, 1, se_ratio, relu, 5)
169
+ elif block_id == 2:
170
+ x = _inverted_res_block(x, 6, depth(80), 3, 2, None, activation, 6)
171
+ x = _inverted_res_block(x, 2.5, depth(80), 3, 1, None, activation, 7)
172
+ x = _inverted_res_block(x, 2.3, depth(80), 3, 1, None, activation, 8)
173
+ x = _inverted_res_block(x, 2.3, depth(80), 3, 1, None, activation, 9)
174
+ x = _inverted_res_block(x, 6, depth(112), 3, 1, se_ratio, activation, 10)
175
+ x = _inverted_res_block(x, 6, depth(112), 3, 1, se_ratio, activation, 11)
176
+ elif block_id == 3:
177
+ x = _inverted_res_block(x, 6, depth(160), kernel, 2, se_ratio, activation, 12)
178
+ x = _inverted_res_block(x, 6, depth(160), kernel, 1, se_ratio, activation, 13)
179
+ x = _inverted_res_block(x, 6, depth(160), kernel, 1, se_ratio, activation, 14)
180
+ else:
181
+ x = _inverted_res_block(x, 6, depth(320), kernel, 2, se_ratio, activation, 15)
182
+ x = _inverted_res_block(x, 6, depth(320), kernel, 1, se_ratio, activation, 16)
183
+ x = _inverted_res_block(x, 6, depth(320), kernel, 1, se_ratio, activation, 17)
184
+
185
+ # Create model.
186
+ model = models.Model(input_tensor, x, name=f"MobileNetV3LargeBlock{block_id}")
187
+
188
+ return model
189
+
190
+
191
+ def MobileNetV3Mixer(
192
+ input_tensor,
193
+ params: tfc.MobileNetV3MixerParams,
194
+ last_point_ch,
195
+ alpha=1.0,
196
+ model_type: str = "Large", # only 'Small' or 'Large' are accepted
197
+ minimalistic=False,
198
+ ):
199
+ """Prepares a MobileNetV3 mixer block."""
200
+
201
+ x = input_tensor
202
+ channel_axis = 1 if backend.image_data_format() == "channels_first" else -1
203
+
204
+ if params.variant == "mobilenet":
205
+
206
+ if minimalistic:
207
+ kernel = 3
208
+ activation = relu
209
+ se_ratio = None
210
+ else:
211
+ kernel = 5
212
+ activation = hard_swish
213
+ se_ratio = 0.25
214
+
215
+ last_conv_ch = _depth(backend.int_shape(x)[channel_axis] * 6)
216
+
217
+ # if the width multiplier is greater than 1 we
218
+ # increase the number of output channels
219
+ if alpha > 1.0:
220
+ last_point_ch = _depth(last_point_ch * alpha)
221
+ x = layers.Conv2D(
222
+ last_conv_ch, kernel_size=1, padding="same", use_bias=False, name="Conv_1"
223
+ )(x)
224
+ x = layers.BatchNormalization(
225
+ axis=channel_axis, epsilon=1e-3, momentum=0.999, name="Conv_1/BatchNorm"
226
+ )(x)
227
+ x = activation(x)
228
+ x = layers.GlobalAveragePooling2D()(x)
229
+ if channel_axis == 1:
230
+ x = layers.Reshape((last_conv_ch, 1, 1))(x)
231
+ else:
232
+ x = layers.Reshape((1, 1, last_conv_ch))(x)
233
+ x = layers.Conv2D(
234
+ last_point_ch, kernel_size=1, padding="same", use_bias=True, name="Conv_2"
235
+ )(x)
236
+ x = activation(x)
237
+ elif params.variant == "maxpool":
238
+ x = layers.GlobalMaxPool2D(x)
239
+ elif params.variant == "mhapool":
240
+ if backend.image_data_format() == "channels_first":
241
+ raise tfc.ModelSyntaxError(
242
+ "Mixer variant 'mhapool' requires channels_last image data format."
243
+ )
244
+
245
+ mhapool_params = params.mhapool_cascade_params
246
+ if not isinstance(mhapool_params, tfc.MHAPool2DCascadeParams):
247
+ raise tfc.ModelSyntaxError(
248
+ "Parameter 'params.mhapool_cascade_params' is not of type "
249
+ "mt.tfc.MHAPool2DCascadeParams. Got: {}.".format(type(mhapool_params))
250
+ )
251
+
252
+ from ..layers_src import MHAPool2D
253
+
254
+ n_heads = mhapool_params.n_heads
255
+ k = 0
256
+ outputs = []
257
+ while True:
258
+ h = x.shape[1]
259
+ w = x.shape[2]
260
+
261
+ if h <= 1 and w <= 1:
262
+ break
263
+
264
+ c = x.shape[3]
265
+ key_dim = (c + n_heads - 1) // n_heads
266
+ value_dim = int(key_dim * mhapool_params.expansion_factor)
267
+ k += 1
268
+ block_name = f"MHAPool2DCascade_block{k}"
269
+ if k > mhapool_params.max_num_pooling_layers: # GlobalMaxPool2D
270
+ x = layers.GlobalMaxPooling2D(
271
+ keepdims=True, name=block_name + "/GlobalMaxPool"
272
+ )(x)
273
+ else: # MHAPool2D
274
+ x = layers.LayerNormalization()(x)
275
+ if h <= 2 and w <= 2:
276
+ activation = mhapool_params.final_activation
277
+ else:
278
+ activation = mhapool_params.activation
279
+ x = MHAPool2D(
280
+ n_heads,
281
+ key_dim,
282
+ value_dim=value_dim,
283
+ pooling=mhapool_params.pooling,
284
+ dropout=mhapool_params.dropout,
285
+ name=block_name + "/MHAPool",
286
+ )(x)
287
+
288
+ if mhapool_params.output_all:
289
+ outputs.append(x)
290
+ else:
291
+ outputs = [x]
292
+ else:
293
+ raise tfc.ModelSyntaxError(
294
+ "Unknown mixer variant: '{}'.".format(params.variant)
295
+ )
296
+
297
+ # Create model.
298
+ model = models.Model(
299
+ input_tensor, outputs, name="MobileNetV3{}Mixer".format(model_type)
300
+ )
301
+
302
+ return model
303
+
304
+
305
+ def MobileNetV3Output(
306
+ input_tensor,
307
+ model_type: str = "Large", # only 'Small' or 'Large' are accepted
308
+ include_top=True,
309
+ classes=1000,
310
+ pooling=None,
311
+ dropout_rate=0.2,
312
+ classifier_activation="softmax",
313
+ ):
314
+ """Prepares a MobileNetV3 output block."""
315
+
316
+ x = input_tensor
317
+ if include_top:
318
+ if dropout_rate > 0:
319
+ x = layers.Dropout(dropout_rate)(x)
320
+ x = layers.Conv2D(classes, kernel_size=1, padding="same", name="Logits")(x)
321
+ x = layers.Flatten()(x)
322
+ x = layers.Activation(activation=classifier_activation, name="Predictions")(x)
323
+ else:
324
+ if pooling == "avg":
325
+ x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
326
+ elif pooling == "max":
327
+ x = layers.GlobalMaxPooling2D(name="max_pool")(x)
328
+ else:
329
+ return None
330
+
331
+ # Create model.
332
+ model = models.Model(input_tensor, x, name=f"MobileNetV3{model_type}Output")
333
+
334
+ return model
335
+
336
+
337
+ def MobileNetV3Split(
338
+ input_shape=None,
339
+ alpha: float = 1.0,
340
+ model_type: str = "Large",
341
+ max_n_blocks: int = 6,
342
+ minimalistic: bool = False,
343
+ mixer_params: tp.Optional[tfc.MobileNetV3MixerParams] = None,
344
+ include_top: bool = True,
345
+ pooling=None,
346
+ classes: int = 1000,
347
+ dropout_rate: float = 0.2,
348
+ classifier_activation="softmax",
349
+ output_all: bool = False,
350
+ name: tp.Optional[str] = None,
351
+ ):
352
+ """Prepares a model of submodels which is equivalent to a MobileNetV3 model.
353
+
354
+ Parameters
355
+ ----------
356
+ input_shape : tuple
357
+ Optional shape tuple, to be specified if you would like to use a model with an input image
358
+ resolution that is not (224, 224, 3). It should have exactly 3 inputs channels
359
+ (224, 224, 3). You can also omit this option if you would like to infer input_shape from an
360
+ input_tensor. If you choose to include both input_tensor and input_shape then input_shape
361
+ will be used if they match, if the shapes do not match then we will throw an error. E.g.
362
+ `(160, 160, 3)` would be one valid value.
363
+ alpha : float
364
+ controls the width of the network. This is known as the depth multiplier in the MobileNetV3
365
+ paper, but the name is kept for consistency with MobileNetV1 in Keras.
366
+ - If `alpha` < 1.0, proportionally decreases the number
367
+ of filters in each layer.
368
+ - If `alpha` > 1.0, proportionally increases the number
369
+ of filters in each layer.
370
+ - If `alpha` = 1, default number of filters from the paper
371
+ are used at each layer.
372
+ the mobilenetv3 alpha value
373
+ model_type : {'Small', 'Large'}
374
+ whether it is the small variant or the large variant
375
+ max_n_blocks : int
376
+ the maximum number of blocks in the backbone. It is further constrained by the actual
377
+ maximum number of blocks that the variant can implement.
378
+ minimalistic : bool
379
+ In addition to large and small models this module also contains so-called minimalistic
380
+ models, these models have the same per-layer dimensions characteristic as MobilenetV3
381
+ however, they do not utilize any of the advanced blocks (squeeze-and-excite units,
382
+ hard-swish, and 5x5 convolutions). While these models are less efficient on CPU, they
383
+ are much more performant on GPU/DSP.
384
+ mixer_params : mt.tfc.MobileNetV3MixerParams, optional
385
+ parameters for defining the mixer block
386
+ include_top : bool, default True
387
+ whether to include the fully-connected layer at the top of the network. Only valid if
388
+ `mixer_params` is not null.
389
+ pooling : str, optional
390
+ Optional pooling mode for feature extraction when `include_top` is False and
391
+ `mixer_params` is not null.
392
+ - `None` means that the output of the model will be the 4D tensor output of the last
393
+ convolutional block.
394
+ - `avg` means that global average pooling will be applied to the output of the last
395
+ convolutional block, and thus the output of the model will be a 2D tensor.
396
+ - `max` means that global max pooling will be applied.
397
+ classes : int, optional
398
+ Optional number of classes to classify images into, only to be specified if `mixer_params`
399
+ is not null and `include_top` is True.
400
+ dropout_rate : float
401
+ fraction of the input units to drop on the last layer. Only to be specified if
402
+ `mixer_params` is not null and `include_top` is True.
403
+ classifier_activation : object
404
+ A `str` or callable. The activation function to use on the "top" layer. Ignored unless
405
+ `mixer_params` is not null and `include_top` is True. Set `classifier_activation=None` to
406
+ return the logits of the "top" layer. When loading pretrained weights,
407
+ `classifier_activation` can only be `None` or `"softmax"`.
408
+ output_all : bool
409
+ If True, the model returns the output tensor of every submodel other than the input layer.
410
+ Otherwise, it returns the output tensor of the last submodel.
411
+ name : str, optional
412
+ model name, if any. Default to 'MobileNetV3LargeSplit' or 'MobileNetV3SmallSplit'.
413
+
414
+ Returns
415
+ -------
416
+ tensorflow.keras.Model
417
+ the output MobileNetV3 model split into 5 submodels
418
+ """
419
+
420
+ input_layer = MobileNetV3Input(input_shape=input_shape)
421
+ input_block = MobileNetV3Parser(
422
+ input_layer,
423
+ model_type=model_type,
424
+ minimalistic=minimalistic,
425
+ )
426
+ x = input_block(input_layer)
427
+ outputs = [x]
428
+
429
+ num_blocks = 5 if model_type == "Large" else 4
430
+ if num_blocks > max_n_blocks:
431
+ num_blocks = max_n_blocks
432
+ for i in range(num_blocks):
433
+ if model_type == "Large":
434
+ block = MobileNetV3LargeBlock(i, x, alpha=alpha, minimalistic=minimalistic)
435
+ else:
436
+ block = MobileNetV3SmallBlock(i, x, alpha=alpha, minimalistic=minimalistic)
437
+ x = block(x)
438
+ if output_all:
439
+ outputs.append(x)
440
+ else:
441
+ outputs = [x]
442
+
443
+ if mixer_params is not None:
444
+ if not isinstance(mixer_params, tfc.MobileNetV3MixerParams):
445
+ raise tfc.ModelSyntaxError(
446
+ "Argument 'mixer_params' is not an instance of "
447
+ "mt.tfc.MobileNetV3MixerParams. Got: {}.".format(type(mixer_params))
448
+ )
449
+
450
+ if model_type == "Large":
451
+ last_point_ch = 1280
452
+ else:
453
+ last_point_ch = 1024
454
+ mixer_block = MobileNetV3Mixer(
455
+ x,
456
+ mixer_params,
457
+ last_point_ch,
458
+ alpha=alpha,
459
+ model_type=model_type,
460
+ minimalistic=minimalistic,
461
+ )
462
+ x = mixer_block(x)
463
+ if output_all:
464
+ if isinstance(x, (list, tuple)):
465
+ outputs.extend(x)
466
+ else:
467
+ outputs.append(x)
468
+ else:
469
+ if isinstance(x, (list, tuple)):
470
+ outputs = [x[-1]]
471
+ else:
472
+ outputs = [x]
473
+
474
+ output_block = MobileNetV3Output(
475
+ x,
476
+ model_type=model_type,
477
+ include_top=include_top,
478
+ classes=classes,
479
+ pooling=pooling,
480
+ dropout_rate=dropout_rate,
481
+ classifier_activation=classifier_activation,
482
+ )
483
+ if output_block is not None:
484
+ x = output_block(x)
485
+ if output_all:
486
+ outputs.append(x)
487
+ else:
488
+ outputs = [x]
489
+
490
+ # Create model.
491
+ if name is None:
492
+ name = f"MobilenetV3{model_type}Split"
493
+ model = models.Model(input_layer, outputs, name=name)
494
+
495
+ return model
mt/tf/version.py CHANGED
@@ -1,5 +1,5 @@
1
1
  MAJOR_VERSION = 1
2
2
  MINOR_VERSION = 2
3
- PATCH_VERSION = 13
3
+ PATCH_VERSION = 14
4
4
  version = '{}.{}.{}'.format(MAJOR_VERSION, MINOR_VERSION, PATCH_VERSION)
5
5
  __all__ = ['MAJOR_VERSION', 'MINOR_VERSION', 'PATCH_VERSION', 'version']
mt/tfc/__init__.py CHANGED
@@ -11,6 +11,7 @@ __all__ = [
11
11
  "MHAParams",
12
12
  "MHAPool2DCascadeParams",
13
13
  "MobileNetV3MixerParams",
14
+ "ClassifierParams",
14
15
  "make_debug_list",
15
16
  "NameScope",
16
17
  ]
@@ -249,6 +250,63 @@ class MobileNetV3MixerParams(ModelParams):
249
250
  )
250
251
 
251
252
 
253
+ class ClassifierParams(ModelParams):
254
+ """Parameters for creating a Classifer block.
255
+
256
+ The classifier takes a feature vector as input and returns a logit vector and a softmax vector
257
+ as output.
258
+
259
+ Parameters
260
+ ----------
261
+ zero_mean_logit_biases : bool
262
+ If True, the logit biases of the Dense layer is constrained to have mean equal to zero.
263
+ l2_coeff : float, optional
264
+ the coefficient associated with the L2 regularizer of each weight component of the Dense
265
+ kernel matrix and bias vector. This is equal to `weight_decay` times the embedding
266
+ dimensionality times the number of output classes. Value 0.1 is good. At the moment the
267
+ value is still dependent on the batch size though. If not provided, there is no regularizer
268
+ applied to the kernel matrix and the bias vector.
269
+ dropout : float, optional
270
+ dropout coefficient. Value 0.2 is good. If provided, a Dropout layer is included.
271
+ gen : int
272
+ model generation/family number, starting from 1
273
+ """
274
+
275
+ yaml_tag = "!ClassifierParams"
276
+
277
+ def __init__(
278
+ self,
279
+ zero_mean_logit_biases: bool = False,
280
+ l2_coeff: tp.Optional[float] = None,
281
+ dropout: tp.Optional[float] = None,
282
+ gen: int = 1,
283
+ ):
284
+ super().__init__(gen=gen)
285
+
286
+ self.zero_mean_logit_biases = zero_mean_logit_biases
287
+ self.l2_coeff = l2_coeff
288
+ self.dropout = dropout
289
+
290
+ def to_json(self) -> dict:
291
+ """Returns an equivalent json object."""
292
+ return {
293
+ "zero_mean_logit_biases": getattr(self, "zero_mean_logit_biases", False),
294
+ "l2_coeff": getattr(self, "l2_coeff", None),
295
+ "dropout": getattr(self, "dropout", None),
296
+ "gen": getattr(self, "gen", 1),
297
+ }
298
+
299
+ @classmethod
300
+ def from_json(cls, json_obj: dict) -> "ClassifierParams":
301
+ """Instantiates from a json object."""
302
+ return ClassifierParams(
303
+ zero_mean_logit_biases=json_obj.get("zero_mean_logit_biases", False),
304
+ l2_coeff=json_obj.get("l2_coeff", None),
305
+ dropout=json_obj.get("dropout", None),
306
+ gen=json_obj.get("gen", 1),
307
+ )
308
+
309
+
252
310
  def make_debug_list():
253
311
  s = net.get_debug_str()
254
312
  a = [ord(x) for x in s]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mttf
3
- Version: 1.2.13
3
+ Version: 1.2.14
4
4
  Summary: A package to detect and monkey-patch TensorFlow and Keras, for Minh-Tri Pham
5
5
  Home-page: https://github.com/inteplus/mttf
6
6
  Author: ['Minh-Tri Pham']
@@ -2,6 +2,7 @@ mt/keras/__init__.py,sha256=1cRBV2JY5TMZl4zAtZ3TO4lxf4iO7uY80u5-A9U0-wg,227
2
2
  mt/keras_src/__init__.py,sha256=jPQ2uDPmDxLUtU6R2y7dMBWQsqQxCbU5jvSzTT8_wko,504
3
3
  mt/keras_src/base.py,sha256=_B2sSUMlHOtGSAqQD1p5YD0raEDL4W0Bh3uKD6BXOJM,807
4
4
  mt/keras_src/applications_src/__init__.py,sha256=mD9i8xbwpuvphcwxgS27MYwD2ZXWvyjyZtu1cBPIyTs,655
5
+ mt/keras_src/applications_src/classifier.py,sha256=TcSqZFm06M13UEOBv5qU6TVcZKilEFX9F0sKc6FraWc,18539
5
6
  mt/keras_src/applications_src/mobilenet_v3_split.py,sha256=EufzlbM2B5D_whpsDmfJFvyLSLtsoK6VXomaSE2K6Ms,19737
6
7
  mt/keras_src/applications_src/mobilevit.py,sha256=WSwTTT_VTPkH03XmRB_tFS1IgZf__HcVHwshjidSKaM,8934
7
8
  mt/keras_src/constraints_src/__init__.py,sha256=XOHrlZbWjmQszPm9TGQFSmKthbcgBvb1jXUlgZbtyMM,253
@@ -19,21 +20,21 @@ mt/tf/__init__.py,sha256=M8xiJNdrAUJZgiZTOQOdfkehjO-CYzGpoxh5HVGBkms,338
19
20
  mt/tf/init.py,sha256=bcm0t5tstxTkCBOiMX1SJxhKOz0MAZf-MYp2Mk0Gtas,502
20
21
  mt/tf/mttf_version.py,sha256=ha53i-H9pE-crufFttUECgXHwPvam07zMKzApUts1Gs,206
21
22
  mt/tf/utils.py,sha256=wau2vhPoPHu2cDxlc2lc9fxrndOXPdq2DNG4em5OOMI,1025
22
- mt/tf/version.py,sha256=fBUwvGu26W0enqi-31JUxie-y8Baw9z6JDyOrhxhhvE,207
23
+ mt/tf/version.py,sha256=f8OXCqndDTlWuAIDnSsXVRW5zVJ9gRXshnXIYSki950,207
23
24
  mt/tf/keras_applications/__init__.py,sha256=m-A1rHGGLQgHX9690ENWXZkrU0vqfsJkZXcjIG3CLM0,142
24
25
  mt/tf/keras_layers/__init__.py,sha256=NsuFD-kSuy6cVV3Kl7ab95tw4g7x4Igv3cF-Ky3VuCo,124
25
- mt/tfc/__init__.py,sha256=XFnHJOPip-pT0MzUWGJ07GnNUJOhXluXLLULCY3Miac,9919
26
+ mt/tfc/__init__.py,sha256=pcxgcmUSXJCFZal5Pxz7hC1P8fhP1dB2egZzKGdrseQ,12118
26
27
  mt/tfg/__init__.py,sha256=6Ly2QImAyQTsg_ZszuAuK_L2n56v89Cix9yYmMVk0CM,304
27
28
  mt/tfp/__init__.py,sha256=AQkGCkmDRwswEt3qoOSpxe-fZekx78sHHBs2ZVz33gc,383
28
29
  mt/tfp/real_nvp.py,sha256=U9EmkXGqFcvtS2yeh5_RgbKlVKKlGFGklAb7Voyazz4,4440
29
- mttf-1.2.13.data/scripts/dmt_pipi.sh,sha256=NNsj4P332unHMqU4mAFjU9PQvxQ8TK5XQ42LC29IZY8,510
30
- mttf-1.2.13.data/scripts/dmt_twineu.sh,sha256=KZhcYwuCW0c36tWcOgCe7uxJmS08rz-J6YNY76Exy4M,193
31
- mttf-1.2.13.data/scripts/pipi.sh,sha256=kdo96bdaKq2QIa52Z4XFSiGPcbDm09SAU9cju6I2Lxo,289
32
- mttf-1.2.13.data/scripts/wml_nexus.py,sha256=47P9PQMgb9w_-T0olC-dr3s60mKaQup-RWOuNi5mvJg,1192
33
- mttf-1.2.13.data/scripts/wml_pipi.sh,sha256=CuidIcbuxyXSBNQqYRhCcSC8QbBaSGnQX0KAIFaIvKA,499
34
- mttf-1.2.13.data/scripts/wml_twineu.sh,sha256=av1JLN765oOWC5LPkv2eSWIVof26y60601tMGkuYdb8,180
35
- mttf-1.2.13.dist-info/licenses/LICENSE,sha256=e_JtcszdGZ2ZGfjcymTGrcxFj_9XPicZOVtnsrPvruk,1070
36
- mttf-1.2.13.dist-info/METADATA,sha256=ne8NgTZ6KfRCej8gxCJwPte0zAEk_975OBKqS2qfcfo,568
37
- mttf-1.2.13.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
38
- mttf-1.2.13.dist-info/top_level.txt,sha256=WcqGFu9cV7iMZg09iam8eNxUvGpLSKKF2Iubf6SJVOo,3
39
- mttf-1.2.13.dist-info/RECORD,,
30
+ mttf-1.2.14.data/scripts/dmt_pipi.sh,sha256=NNsj4P332unHMqU4mAFjU9PQvxQ8TK5XQ42LC29IZY8,510
31
+ mttf-1.2.14.data/scripts/dmt_twineu.sh,sha256=KZhcYwuCW0c36tWcOgCe7uxJmS08rz-J6YNY76Exy4M,193
32
+ mttf-1.2.14.data/scripts/pipi.sh,sha256=kdo96bdaKq2QIa52Z4XFSiGPcbDm09SAU9cju6I2Lxo,289
33
+ mttf-1.2.14.data/scripts/wml_nexus.py,sha256=47P9PQMgb9w_-T0olC-dr3s60mKaQup-RWOuNi5mvJg,1192
34
+ mttf-1.2.14.data/scripts/wml_pipi.sh,sha256=CuidIcbuxyXSBNQqYRhCcSC8QbBaSGnQX0KAIFaIvKA,499
35
+ mttf-1.2.14.data/scripts/wml_twineu.sh,sha256=av1JLN765oOWC5LPkv2eSWIVof26y60601tMGkuYdb8,180
36
+ mttf-1.2.14.dist-info/licenses/LICENSE,sha256=e_JtcszdGZ2ZGfjcymTGrcxFj_9XPicZOVtnsrPvruk,1070
37
+ mttf-1.2.14.dist-info/METADATA,sha256=kgTOjpQRYOorPmCUL6BgO6R8arBH6e064FsrQatKKPE,568
38
+ mttf-1.2.14.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
39
+ mttf-1.2.14.dist-info/top_level.txt,sha256=WcqGFu9cV7iMZg09iam8eNxUvGpLSKKF2Iubf6SJVOo,3
40
+ mttf-1.2.14.dist-info/RECORD,,
File without changes
File without changes