mttf 1.3.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. mt/keras/__init__.py +8 -0
  2. mt/keras_src/__init__.py +16 -0
  3. mt/keras_src/applications_src/__init__.py +33 -0
  4. mt/keras_src/applications_src/classifier.py +497 -0
  5. mt/keras_src/applications_src/mobilenet_v3_split.py +544 -0
  6. mt/keras_src/applications_src/mobilevit.py +292 -0
  7. mt/keras_src/base.py +28 -0
  8. mt/keras_src/constraints_src/__init__.py +14 -0
  9. mt/keras_src/constraints_src/center_around.py +19 -0
  10. mt/keras_src/layers_src/__init__.py +43 -0
  11. mt/keras_src/layers_src/counter.py +27 -0
  12. mt/keras_src/layers_src/floor.py +24 -0
  13. mt/keras_src/layers_src/identical.py +15 -0
  14. mt/keras_src/layers_src/image_sizing.py +1605 -0
  15. mt/keras_src/layers_src/normed_conv2d.py +239 -0
  16. mt/keras_src/layers_src/simple_mha.py +472 -0
  17. mt/keras_src/layers_src/soft_bend.py +36 -0
  18. mt/keras_src/layers_src/transformer_encoder.py +246 -0
  19. mt/keras_src/layers_src/utils.py +88 -0
  20. mt/keras_src/layers_src/var_regularizer.py +38 -0
  21. mt/tf/__init__.py +10 -0
  22. mt/tf/init.py +25 -0
  23. mt/tf/keras_applications/__init__.py +5 -0
  24. mt/tf/keras_layers/__init__.py +5 -0
  25. mt/tf/mttf_version.py +5 -0
  26. mt/tf/utils.py +44 -0
  27. mt/tf/version.py +5 -0
  28. mt/tfc/__init__.py +291 -0
  29. mt/tfg/__init__.py +8 -0
  30. mt/tfp/__init__.py +11 -0
  31. mt/tfp/real_nvp.py +116 -0
  32. mttf-1.3.6.data/scripts/dmt_build_package_and_upload_to_nexus.sh +25 -0
  33. mttf-1.3.6.data/scripts/dmt_pipi.sh +7 -0
  34. mttf-1.3.6.data/scripts/dmt_twineu.sh +2 -0
  35. mttf-1.3.6.data/scripts/pipi.sh +7 -0
  36. mttf-1.3.6.data/scripts/user_build_package_and_upload_to_nexus.sh +25 -0
  37. mttf-1.3.6.data/scripts/user_pipi.sh +8 -0
  38. mttf-1.3.6.data/scripts/user_twineu.sh +3 -0
  39. mttf-1.3.6.data/scripts/wml_build_package_and_upload_to_nexus.sh +25 -0
  40. mttf-1.3.6.data/scripts/wml_nexus.py +50 -0
  41. mttf-1.3.6.data/scripts/wml_pipi.sh +7 -0
  42. mttf-1.3.6.data/scripts/wml_twineu.sh +2 -0
  43. mttf-1.3.6.dist-info/METADATA +18 -0
  44. mttf-1.3.6.dist-info/RECORD +47 -0
  45. mttf-1.3.6.dist-info/WHEEL +5 -0
  46. mttf-1.3.6.dist-info/licenses/LICENSE +21 -0
  47. mttf-1.3.6.dist-info/top_level.txt +1 -0
@@ -0,0 +1,239 @@
1
+ from .. import layers, initializers, regularizers, constraints
2
+ from .counter import Counter
3
+
4
+
5
+ class NormedConv2D(layers.Layer):
6
+ """A block of Conv2D without activation, followed by LayerNormalization, then activation.
7
+
8
+ This layer represents the following block:
9
+
10
+ .. code-block:: python
11
+
12
+ x = input_tensor
13
+ count = tf.keras_layers.Counter()(x)
14
+ x = tf.keras.layers.Conv2D(activation=None, use_bias=False, ...)(x)
15
+ y = tf.keras.layers.LayerNormalization(axis=-1, scale=True, ...)(x)
16
+ alpha = growth_rate / (1.0 + count)
17
+ x = alpha * x + (1 - alpha) * y
18
+ z = tf.keras.layers.Activation(...)(y)
19
+ return z
20
+
21
+ It operates as a Conv2D layer whose kernel responses are normalized before being activated. The
22
+ bias vector of the convolution is omitted and replaced by the beta vector of the normalization.
23
+ The activation of the convolution is done explicitly via an activation layer.
24
+
25
+ Parameters
26
+ ----------
27
+ filters : int
28
+ Integer, the dimensionality of the output space (i.e. the number of output filters in the
29
+ convolution). Passed as-is to :class:`tensorflow.keras.layers.Conv2D`.
30
+ kernel_size : int or tuple or list
31
+ An integer or tuple/list of 2 integers, specifying the height and width of the 2D
32
+ convolution window. Can be a single integer to specify the same value for all spatial
33
+ dimensions. Passed as-is to :class:`tensorflow.keras.layers.Conv2D`.
34
+ strides : int or tuple or list
35
+ An integer or tuple/list of 2 integers, specifying the strides of the convolution along the
36
+ height and width. Can be a single integer to specify the same value for all spatial
37
+ dimensions. Specifying any ``stride value != 1 is`` incompatible with specifying any
38
+ ``dilation_rate value != 1``. Passed as-is to :class:`tensorflow.keras.layers.Conv2D`.
39
+ padding : {"valid", "same"}
40
+ 'valid' means no padding. 'same' results in padding with zeros evenly to the left/right
41
+ or up/down of the input. When ``padding="same"`` and ``strides=1``, the output has the same
42
+ size as the input. Passed as-is to :class:`tensorflow.keras.layers.Conv2D`.
43
+ data_format : {"channel_last", "channel_first", None}
44
+ A string, one of 'channels_last' (default) or 'channels_first'. The ordering of the
45
+ dimensions in the inputs. channels_last corresponds to inputs with shape
46
+ ``(batch_size, height, width, channels)`` while channels_first corresponds to inputs with
47
+ shape ``(batch_size, channels, height, width)``. It defaults to the image_data_format value
48
+ found in your Keras config file at ``~/.keras/keras.json``. If you never set it, then it
49
+ will be 'channels_last'. Note that the 'channels_first' format is currently not
50
+ supported by TensorFlow on CPU. Passed as-is to :class:`tensorflow.keras.layers.Conv2D`.
51
+ dilation_rate : int or tuple or list
52
+ an integer or tuple/list of 2 integers, specifying the dilation rate to use for dilated
53
+ convolution. Can be a single integer to specify the same value for all spatial dimensions.
54
+ Currently, specifying any ``dilation_rate value != 1`` is incompatible with specifying any
55
+ ``stride value != 1``. Passed as-is to :class:`tensorflow.keras.layers.Conv2D`.
56
+ groups : int
57
+ A positive integer specifying the number of groups in which the input is split along the
58
+ channel axis. Each group is convolved separately with filters / groups filters. The output
59
+ is the concatenation of all the groups results along the channel axis. Input channels and
60
+ filters must both be divisible by groups. Passed as-is to
61
+ :class:`tensorflow.keras.layers.Conv2D`.
62
+ kernel_initializer : str or object
63
+ Initializer for the kernel weights matrix (see keras.initializers). Defaults to
64
+ 'glorot_uniform'. Passed as-is to :class:`tensorflow.keras.layers.Conv2D`.
65
+ kernel_regularizer : str or object
66
+ Regularizer function applied to the kernel weights matrix (see keras.regularizers). Passed
67
+ as-is to :class:`tensorflow.keras.layers.Conv2D`.
68
+ kernel_constraint : str or object
69
+ Constraint function applied to the kernel matrix (see keras.constraints). Passed as-is to
70
+ :class:`tensorflow.keras.layers.Conv2D`.
71
+ epsilon : float
72
+ Small float added to variance to avoid dividing by zero. Defaults to ``1e-3``. Passed as-is
73
+ to :class:`tensorflow.keras.layers.LayerNormalization`.
74
+ center : float
75
+ If True, add offset of beta to normalized tensor. If False, beta is ignored. Defaults to
76
+ True. Passed as-is to :class:`tensorflow.keras.layers.LayerNormalization`.
77
+ gamma_initializer : str or object
78
+ Initializer for the gamma weight. Defaults to ones. Passed as-is to
79
+ :class:`tensorflow.keras.layers.LayerNormalization`.
80
+ gamma_regularizer : str or object
81
+ Optional regularizer for the gamma weight. None by default. Passed as-is to
82
+ :class:`tensorflow.keras.layers.LayerNormalization`.
83
+ gamma_constraint : str or object
84
+ Optional constraint for the gamma weight. None by default. Passed as-is to
85
+ :class:`tensorflow.keras.layers.LayerNormalization`.
86
+ beta_initializer : str or object
87
+ Initializer for the beta weight. Defaults to zeros. Passed as-is to
88
+ :class:`tensorflow.keras.layers.LayerNormalization`.
89
+ beta_regularizer : str or object
90
+ Optional regularizer for the beta weight. None by default. Passed as-is to
91
+ :class:`tensorflow.keras.layers.LayerNormalization`.
92
+ beta_constraint: str or object
93
+ Optional constraint for the beta weight. None by default. Passed as-is to
94
+ :class:`tensorflow.keras.layers.LayerNormalization`.
95
+ growth_rate : float
96
+ Growth rate for switching from Conv2D output to the normed output. Defaults to 1.0.
97
+ activation : str or object
98
+ Activation function to use. If you don't specify anything, no activation is applied (see
99
+ keras.activations). Passed as-is to :class:`tensorflow.keras.layers.Activation`.
100
+
101
+ Input shape
102
+ -----------
103
+ ``4+D`` tensor with shape: ``batch_shape + (channels, rows, cols)`` if
104
+ ``data_format='channels_first'`` or ``4+D`` tensor with shape:
105
+ ``batch_shape + (rows, cols, channels)`` if ``data_format='channels_last'``.
106
+
107
+ Output shape
108
+ ------------
109
+ ``4+D`` tensor with shape: ``batch_shape + (filters, new_rows, new_cols)`` if
110
+ ``data_format='channels_first'`` or ``4+D`` tensor with shape:
111
+ ``batch_shape + (new_rows, new_cols, filters)`` if ``data_format='channels_last'``. rows and
112
+ cols values might have changed due to padding.
113
+
114
+ Please see the `layer_normalization`_ paper for more details.
115
+
116
+ .. _layer_normalization:
117
+ https://arxiv.org/abs/1607.06450
118
+ """
119
+
120
+ def __init__(
121
+ self,
122
+ filters,
123
+ kernel_size,
124
+ strides=(1, 1),
125
+ padding="valid",
126
+ data_format=None,
127
+ dilation_rate=(1, 1),
128
+ groups=1,
129
+ kernel_initializer="glorot_uniform",
130
+ kernel_regularizer=None,
131
+ kernel_constraint=None,
132
+ epsilon=0.001,
133
+ center=True,
134
+ gamma_initializer="ones",
135
+ gamma_regularizer=None,
136
+ gamma_constraint=None,
137
+ beta_initializer="zeros",
138
+ beta_regularizer=None,
139
+ beta_constraint=None,
140
+ growth_rate: float = 1.0,
141
+ activation=None,
142
+ **kwargs,
143
+ ):
144
+ super(NormedConv2D, self).__init__(**kwargs)
145
+
146
+ self.keys = [
147
+ "filters",
148
+ "kernel_size",
149
+ "strides",
150
+ "padding",
151
+ "data_format",
152
+ "dilation_rate",
153
+ "groups",
154
+ "kernel_initializer",
155
+ "kernel_regularizer",
156
+ "kernel_constraint",
157
+ "epsilon",
158
+ "center",
159
+ "gamma_initializer",
160
+ "gamma_regularizer",
161
+ "gamma_constraint",
162
+ "beta_initializer",
163
+ "beta_regularizer",
164
+ "beta_constraint",
165
+ "growth_rate",
166
+ "activation",
167
+ ]
168
+
169
+ for key in self.keys:
170
+ setattr(self, key, locals()[key])
171
+
172
+ self.conv2d = layers.Conv2D(
173
+ filters,
174
+ kernel_size,
175
+ strides=strides,
176
+ padding=padding,
177
+ data_format=data_format,
178
+ dilation_rate=dilation_rate,
179
+ groups=groups,
180
+ activation=None,
181
+ use_bias=False,
182
+ kernel_initializer=kernel_initializer,
183
+ kernel_regularizer=kernel_regularizer,
184
+ kernel_constraint=kernel_constraint,
185
+ )
186
+
187
+ self.counter = Counter()
188
+
189
+ self.norm = layers.LayerNormalization(
190
+ axis=-1,
191
+ epsilon=epsilon,
192
+ scale=True,
193
+ center=center,
194
+ gamma_initializer=gamma_initializer,
195
+ gamma_regularizer=gamma_regularizer,
196
+ gamma_constraint=gamma_constraint,
197
+ beta_initializer=beta_initializer,
198
+ beta_regularizer=beta_regularizer,
199
+ beta_constraint=beta_constraint,
200
+ )
201
+
202
+ if activation is not None:
203
+ self.acti = layers.Activation(activation)
204
+
205
+ def call(self, x, training: bool = False):
206
+ count = self.counter(x, training=training)
207
+ coeff = self.growth_rate / (1.0 + count)
208
+ y1 = self.conv2d(x, training=training)
209
+ y2 = self.norm(y1)
210
+ z = coeff * y1 + (1 - coeff) * y2
211
+ if self.activation is None:
212
+ return z
213
+ w = self.acti(z, training=training)
214
+ return w
215
+
216
+ call.__doc__ = layers.Layer.call.__doc__
217
+
218
+ def get_config(self):
219
+ config = {key: getattr(self, key) for key in self.keys}
220
+ prefixes = ["kernel", "gamma", "beta"]
221
+ for prefix in prefixes:
222
+ key = prefix + "_initializer"
223
+ value = config[key]
224
+ if not isinstance(value, str):
225
+ value = initializers.serialize(value)
226
+ config[key] = value
227
+ key = prefix + "_regularizer"
228
+ value = config[key]
229
+ if not isinstance(value, str):
230
+ value = regularizers.serialize(value)
231
+ config[key] = value
232
+ key = prefix + "_constraint"
233
+ value = config[key]
234
+ if not isinstance(value, str):
235
+ value = constraints.serialize(value)
236
+ config[key] = value
237
+ config = {key: value for key, value in config.items() if value is not None}
238
+ base_config = super(NormedConv2D, self).get_config()
239
+ return dict(list(base_config.items()) + list(config.items()))