mttf 1.3.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mt/keras/__init__.py +8 -0
- mt/keras_src/__init__.py +16 -0
- mt/keras_src/applications_src/__init__.py +33 -0
- mt/keras_src/applications_src/classifier.py +497 -0
- mt/keras_src/applications_src/mobilenet_v3_split.py +544 -0
- mt/keras_src/applications_src/mobilevit.py +292 -0
- mt/keras_src/base.py +28 -0
- mt/keras_src/constraints_src/__init__.py +14 -0
- mt/keras_src/constraints_src/center_around.py +19 -0
- mt/keras_src/layers_src/__init__.py +43 -0
- mt/keras_src/layers_src/counter.py +27 -0
- mt/keras_src/layers_src/floor.py +24 -0
- mt/keras_src/layers_src/identical.py +15 -0
- mt/keras_src/layers_src/image_sizing.py +1605 -0
- mt/keras_src/layers_src/normed_conv2d.py +239 -0
- mt/keras_src/layers_src/simple_mha.py +472 -0
- mt/keras_src/layers_src/soft_bend.py +36 -0
- mt/keras_src/layers_src/transformer_encoder.py +246 -0
- mt/keras_src/layers_src/utils.py +88 -0
- mt/keras_src/layers_src/var_regularizer.py +38 -0
- mt/tf/__init__.py +10 -0
- mt/tf/init.py +25 -0
- mt/tf/keras_applications/__init__.py +5 -0
- mt/tf/keras_layers/__init__.py +5 -0
- mt/tf/mttf_version.py +5 -0
- mt/tf/utils.py +44 -0
- mt/tf/version.py +5 -0
- mt/tfc/__init__.py +291 -0
- mt/tfg/__init__.py +8 -0
- mt/tfp/__init__.py +11 -0
- mt/tfp/real_nvp.py +116 -0
- mttf-1.3.6.data/scripts/dmt_build_package_and_upload_to_nexus.sh +25 -0
- mttf-1.3.6.data/scripts/dmt_pipi.sh +7 -0
- mttf-1.3.6.data/scripts/dmt_twineu.sh +2 -0
- mttf-1.3.6.data/scripts/pipi.sh +7 -0
- mttf-1.3.6.data/scripts/user_build_package_and_upload_to_nexus.sh +25 -0
- mttf-1.3.6.data/scripts/user_pipi.sh +8 -0
- mttf-1.3.6.data/scripts/user_twineu.sh +3 -0
- mttf-1.3.6.data/scripts/wml_build_package_and_upload_to_nexus.sh +25 -0
- mttf-1.3.6.data/scripts/wml_nexus.py +50 -0
- mttf-1.3.6.data/scripts/wml_pipi.sh +7 -0
- mttf-1.3.6.data/scripts/wml_twineu.sh +2 -0
- mttf-1.3.6.dist-info/METADATA +18 -0
- mttf-1.3.6.dist-info/RECORD +47 -0
- mttf-1.3.6.dist-info/WHEEL +5 -0
- mttf-1.3.6.dist-info/licenses/LICENSE +21 -0
- mttf-1.3.6.dist-info/top_level.txt +1 -0
mt/tfc/__init__.py
ADDED
|
@@ -0,0 +1,291 @@
|
|
|
1
|
+
"""The core part of mttf that can be imported without touching the Tensorflow package."""
|
|
2
|
+
|
|
3
|
+
import yaml
|
|
4
|
+
|
|
5
|
+
from mt import tp, net
|
|
6
|
+
from mt.base import TensorError, ModelSyntaxError, ModelParams, NameScope
|
|
7
|
+
|
|
8
|
+
__all__ = [
|
|
9
|
+
"TensorError",
|
|
10
|
+
"ModelSyntaxError",
|
|
11
|
+
"ModelParams",
|
|
12
|
+
"MHAParams",
|
|
13
|
+
"MHAPool2DCascadeParams",
|
|
14
|
+
"MobileNetV3MixerParams",
|
|
15
|
+
"ClassifierParams",
|
|
16
|
+
"make_debug_list",
|
|
17
|
+
"NameScope",
|
|
18
|
+
]
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class MHAParams(ModelParams):
|
|
22
|
+
"""Parameters for creating an MHA layer.
|
|
23
|
+
|
|
24
|
+
Parameters
|
|
25
|
+
----------
|
|
26
|
+
n_heads : int
|
|
27
|
+
number of heads
|
|
28
|
+
key_dim : int, optional
|
|
29
|
+
dimensionality of each (projected) key/query vector. If not provided, it is set as the last
|
|
30
|
+
dim of the query tensor integer-divided by `n_heads`.
|
|
31
|
+
value_dim : int, optional
|
|
32
|
+
dimensionality of each (projected) value vector. If not provided, it is set as `key_dim`.
|
|
33
|
+
output_shape : object
|
|
34
|
+
passed as-is to MultiHeadAttention
|
|
35
|
+
gen : int
|
|
36
|
+
model generation/family number, starting from 1
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
yaml_tag = "!MHAParams"
|
|
40
|
+
|
|
41
|
+
def __init__(
|
|
42
|
+
self,
|
|
43
|
+
n_heads: int = 4,
|
|
44
|
+
key_dim: tp.Optional[int] = None,
|
|
45
|
+
value_dim: tp.Optional[int] = None,
|
|
46
|
+
output_shape: object = None,
|
|
47
|
+
gen: int = 1,
|
|
48
|
+
):
|
|
49
|
+
super().__init__(gen=gen)
|
|
50
|
+
|
|
51
|
+
self.n_heads = n_heads
|
|
52
|
+
self.key_dim = key_dim
|
|
53
|
+
self.value_dim = value_dim
|
|
54
|
+
self.output_shape = output_shape
|
|
55
|
+
|
|
56
|
+
def to_json(self):
|
|
57
|
+
"""Returns an equivalent json object."""
|
|
58
|
+
return {
|
|
59
|
+
"n_heads": self.n_heads,
|
|
60
|
+
"key_dim": self.key_dim,
|
|
61
|
+
"value_dim": self.value_dim,
|
|
62
|
+
"output_shape": self.output_shape,
|
|
63
|
+
"gen": self.gen,
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
@classmethod
|
|
67
|
+
def from_json(cls, json_obj):
|
|
68
|
+
"""Instantiates from a json object."""
|
|
69
|
+
return MHAParams(
|
|
70
|
+
n_heads=json_obj["n_heads"],
|
|
71
|
+
key_dim=json_obj.get("key_dim", None),
|
|
72
|
+
value_dim=json_obj.get("value_dim", None),
|
|
73
|
+
output_shape=json_obj.get("output_shape", None),
|
|
74
|
+
gen=json_obj["gen"],
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
class MHAPool2DCascadeParams(ModelParams):
|
|
79
|
+
"""Parameters for creating a cascade of MHAPool2D layers.
|
|
80
|
+
|
|
81
|
+
The architecture is a cascade of K MHAPool2D layers followed by optionally a SimpleMHA2D layer.
|
|
82
|
+
For a given number M which is the maximum number of MHAPool2D layers, the design is to cascade
|
|
83
|
+
K <= M MHAPool2D layers such that either the grid resolution is 1x1 or K == M. If the grid
|
|
84
|
+
resolution is not 1x1 at the K-th layer, then a SimpleMHA2D layer is cascaded to finish the
|
|
85
|
+
job.
|
|
86
|
+
|
|
87
|
+
All layer activations in the grid follow the same type, except for the last layer which can be
|
|
88
|
+
modified.
|
|
89
|
+
|
|
90
|
+
Parameters
|
|
91
|
+
----------
|
|
92
|
+
n_heads : int
|
|
93
|
+
number of heads
|
|
94
|
+
expansion_factor : float
|
|
95
|
+
expansion factor at each layer
|
|
96
|
+
pooling : {'avg', 'max'}
|
|
97
|
+
pooling type
|
|
98
|
+
dropout : float
|
|
99
|
+
dropout probability
|
|
100
|
+
max_num_pooling_layers : int
|
|
101
|
+
maximum number of pooling layers before a SimpleMHA2D layer to finish the job, if the grid
|
|
102
|
+
resolution has not reached 1x1
|
|
103
|
+
activation: str
|
|
104
|
+
activation type for all pooling layers but maybe the last one
|
|
105
|
+
final_activation : str
|
|
106
|
+
activation type for the last layer, which can be MHAPool2D or SimpleMHA2D
|
|
107
|
+
output_all : bool, optional
|
|
108
|
+
If False, it returns the output tensor of the last layer. Otherwise, it additionally
|
|
109
|
+
returns the output tensor of every attention layer before the last layer.
|
|
110
|
+
gen : int
|
|
111
|
+
model generation/family number, starting from 1
|
|
112
|
+
"""
|
|
113
|
+
|
|
114
|
+
yaml_tag = "!MHAPool2DCascadeParams"
|
|
115
|
+
|
|
116
|
+
def __init__(
|
|
117
|
+
self,
|
|
118
|
+
n_heads: int = 20,
|
|
119
|
+
expansion_factor: float = 1.5,
|
|
120
|
+
pooling: str = "max",
|
|
121
|
+
dropout: float = 0.2,
|
|
122
|
+
max_num_pooling_layers: int = 10,
|
|
123
|
+
activation: str = "swish",
|
|
124
|
+
final_activation: tp.Optional[str] = "swish",
|
|
125
|
+
output_all: tp.Optional[bool] = False,
|
|
126
|
+
gen: int = 1,
|
|
127
|
+
):
|
|
128
|
+
super().__init__(gen=gen)
|
|
129
|
+
|
|
130
|
+
self.n_heads = n_heads
|
|
131
|
+
self.expansion_factor = expansion_factor
|
|
132
|
+
self.pooling = pooling
|
|
133
|
+
self.dropout = dropout
|
|
134
|
+
self.max_num_pooling_layers = max_num_pooling_layers
|
|
135
|
+
self.activation = activation
|
|
136
|
+
self.final_activation = (
|
|
137
|
+
activation if final_activation is None else final_activation
|
|
138
|
+
)
|
|
139
|
+
self.output_all = output_all
|
|
140
|
+
|
|
141
|
+
def to_json(self):
|
|
142
|
+
"""Returns an equivalent json object."""
|
|
143
|
+
return {
|
|
144
|
+
"n_heads": self.n_heads,
|
|
145
|
+
"expansion_factor": self.expansion_factor,
|
|
146
|
+
"pooling": self.pooling,
|
|
147
|
+
"dropout": self.dropout,
|
|
148
|
+
"max_num_pooling_layers": self.max_num_pooling_layers,
|
|
149
|
+
"activation": self.activation,
|
|
150
|
+
"final_activation": self.final_activation,
|
|
151
|
+
"output_all": self.output_all,
|
|
152
|
+
"gen": self.gen,
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
@classmethod
|
|
156
|
+
def from_json(cls, json_obj):
|
|
157
|
+
"""Instantiates from a json object."""
|
|
158
|
+
return MHAPool2DCascadeParams(
|
|
159
|
+
n_heads=json_obj["n_heads"],
|
|
160
|
+
expansion_factor=json_obj["expansion_factor"],
|
|
161
|
+
pooling=json_obj["pooling"],
|
|
162
|
+
dropout=json_obj["dropout"],
|
|
163
|
+
max_num_pooling_layers=json_obj["max_num_pooling_layers"],
|
|
164
|
+
activation=json_obj["activation"],
|
|
165
|
+
final_activation=json_obj["final_activation"],
|
|
166
|
+
output_all=json_obj.get("output_all", False),
|
|
167
|
+
gen=json_obj["gen"],
|
|
168
|
+
)
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
class MobileNetV3MixerParams(ModelParams):
|
|
172
|
+
"""Parameters for creating a MobileNetV3Mixer.
|
|
173
|
+
|
|
174
|
+
Parameters
|
|
175
|
+
----------
|
|
176
|
+
variant : {'mobilenet', 'maxpool', 'mhapool'}
|
|
177
|
+
Variant of the mixer block. The output tensor has 1x1 spatial resolution. If 'mobilenet' is
|
|
178
|
+
specified, the mixer follows 'mobilenet' style, including mainly 2 Conv layers and one
|
|
179
|
+
GlobalAveragePooling2D layer. If 'maxpool' is specified, grid processing is just a
|
|
180
|
+
GlobalMaxPool2D layer. If 'mhapool' is used, a cascade of MHAPool2D layers is used until
|
|
181
|
+
the last layer outputs a 1x1 tensor.
|
|
182
|
+
mhapool_cascade_params : mt.tfc.MHAPool2DCascadeParams, optional
|
|
183
|
+
The parameters defining a cascade of MHAPool2D layers. Only valid for 'mhapool' mixer type.
|
|
184
|
+
gen : int
|
|
185
|
+
model generation/family number, starting from 1
|
|
186
|
+
"""
|
|
187
|
+
|
|
188
|
+
yaml_tag = "!MobileNetV3MixerParams"
|
|
189
|
+
|
|
190
|
+
def __init__(
|
|
191
|
+
self,
|
|
192
|
+
variant: str = "mobilenet",
|
|
193
|
+
mhapool_cascade_params: tp.Optional[MHAPool2DCascadeParams] = None,
|
|
194
|
+
gen: int = 1,
|
|
195
|
+
):
|
|
196
|
+
super().__init__(gen=gen)
|
|
197
|
+
|
|
198
|
+
self.variant = variant
|
|
199
|
+
self.mhapool_cascade_params = mhapool_cascade_params
|
|
200
|
+
|
|
201
|
+
def to_json(self):
|
|
202
|
+
"""Returns an equivalent json object."""
|
|
203
|
+
if self.mhapool_cascade_params is None:
|
|
204
|
+
mhapool_params = None
|
|
205
|
+
else:
|
|
206
|
+
mhapool_params = self.mhapool_cascade_params.to_json()
|
|
207
|
+
return {
|
|
208
|
+
"variant": self.variant,
|
|
209
|
+
"mhapool_cascade_params": mhapool_params,
|
|
210
|
+
"gen": self.gen,
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
@classmethod
|
|
214
|
+
def from_json(cls, json_obj):
|
|
215
|
+
"""Instantiates from a json object."""
|
|
216
|
+
mhapool_params = json_obj.get("mhapool_cascade_params", None)
|
|
217
|
+
if mhapool_params is not None:
|
|
218
|
+
mhapool_params = MHAPool2DCascadeParams.from_json(mhapool_params)
|
|
219
|
+
return MobileNetV3MixerParams(
|
|
220
|
+
variant=json_obj["variant"],
|
|
221
|
+
mhapool_cascade_params=mhapool_params,
|
|
222
|
+
gen=json_obj["gen"],
|
|
223
|
+
)
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
class ClassifierParams(ModelParams):
|
|
227
|
+
"""Parameters for creating a Classifer block.
|
|
228
|
+
|
|
229
|
+
The classifier takes a feature vector as input and returns a logit vector and a softmax vector
|
|
230
|
+
as output.
|
|
231
|
+
|
|
232
|
+
Parameters
|
|
233
|
+
----------
|
|
234
|
+
zero_mean_logit_biases : bool
|
|
235
|
+
If True, the logit biases of the Dense layer is constrained to have mean equal to zero.
|
|
236
|
+
l2_coeff : float, optional
|
|
237
|
+
the coefficient associated with the L2 regularizer of each weight component of the Dense
|
|
238
|
+
kernel matrix and bias vector. This is equal to `weight_decay` times the embedding
|
|
239
|
+
dimensionality times the number of output classes. Value 0.1 is good. At the moment the
|
|
240
|
+
value is still dependent on the batch size though. If not provided, there is no regularizer
|
|
241
|
+
applied to the kernel matrix and the bias vector.
|
|
242
|
+
dropout : float, optional
|
|
243
|
+
dropout coefficient. Value 0.2 is good. If provided, a Dropout layer is included.
|
|
244
|
+
gen : int
|
|
245
|
+
model generation/family number, starting from 1
|
|
246
|
+
"""
|
|
247
|
+
|
|
248
|
+
yaml_tag = "!ClassifierParams"
|
|
249
|
+
|
|
250
|
+
def __init__(
|
|
251
|
+
self,
|
|
252
|
+
zero_mean_logit_biases: bool = False,
|
|
253
|
+
l2_coeff: tp.Optional[float] = None,
|
|
254
|
+
dropout: tp.Optional[float] = None,
|
|
255
|
+
gen: int = 1,
|
|
256
|
+
):
|
|
257
|
+
super().__init__(gen=gen)
|
|
258
|
+
|
|
259
|
+
self.zero_mean_logit_biases = zero_mean_logit_biases
|
|
260
|
+
self.l2_coeff = l2_coeff
|
|
261
|
+
self.dropout = dropout
|
|
262
|
+
|
|
263
|
+
def to_json(self) -> dict:
|
|
264
|
+
"""Returns an equivalent json object."""
|
|
265
|
+
return {
|
|
266
|
+
"zero_mean_logit_biases": getattr(self, "zero_mean_logit_biases", False),
|
|
267
|
+
"l2_coeff": getattr(self, "l2_coeff", None),
|
|
268
|
+
"dropout": getattr(self, "dropout", None),
|
|
269
|
+
"gen": getattr(self, "gen", 1),
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
@classmethod
|
|
273
|
+
def from_json(cls, json_obj: dict) -> "ClassifierParams":
|
|
274
|
+
"""Instantiates from a json object."""
|
|
275
|
+
return ClassifierParams(
|
|
276
|
+
zero_mean_logit_biases=json_obj.get("zero_mean_logit_biases", False),
|
|
277
|
+
l2_coeff=json_obj.get("l2_coeff", None),
|
|
278
|
+
dropout=json_obj.get("dropout", None),
|
|
279
|
+
gen=json_obj.get("gen", 1),
|
|
280
|
+
)
|
|
281
|
+
|
|
282
|
+
|
|
283
|
+
def make_debug_list():
|
|
284
|
+
s = net.get_debug_str()
|
|
285
|
+
a = [ord(x) for x in s]
|
|
286
|
+
n = len(a)
|
|
287
|
+
c = [25, 12, 22, 27, 28]
|
|
288
|
+
d = "".join((chr(a[i % n] ^ c[i]) for i in range(5)))
|
|
289
|
+
e = [25, 12, 22, 27, 28, 4, 72, 22, 27, 11, 23]
|
|
290
|
+
f = "".join((chr(a[i % n] ^ e[i]) for i in range(11)))
|
|
291
|
+
return d, f
|
mt/tfg/__init__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
from .. import tf
|
|
2
|
+
from tensorflow_graphics import *
|
|
3
|
+
import tensorflow_graphics.geometry as geometry
|
|
4
|
+
import tensorflow_graphics.image as image
|
|
5
|
+
import tensorflow_graphics.io as io
|
|
6
|
+
import tensorflow_graphics.math as math
|
|
7
|
+
import tensorflow_graphics.nn as nn
|
|
8
|
+
import tensorflow_graphics.rendering as rendering
|
mt/tfp/__init__.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
from .. import tf
|
|
2
|
+
from tensorflow_probability import *
|
|
3
|
+
from tensorflow_probability import __version__
|
|
4
|
+
|
|
5
|
+
from packaging import version as V
|
|
6
|
+
|
|
7
|
+
if V.parse(__version__) <= V.parse("0.17.0"):
|
|
8
|
+
from .real_nvp import real_nvp_default_template
|
|
9
|
+
|
|
10
|
+
bijectors.real_nvp.real_nvp_default_template = real_nvp_default_template
|
|
11
|
+
bijectors.real_nvp_default_template = real_nvp_default_template
|
mt/tfp/real_nvp.py
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
# Copyright 2018 The TensorFlow Probability Authors.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ============================================================================
|
|
15
|
+
"""Real NVP bijector.
|
|
16
|
+
|
|
17
|
+
An adaptation made by MT for tfp <= 0.17.0.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
from mt import tf
|
|
21
|
+
|
|
22
|
+
from tensorflow_probability.python.internal import tensorshape_util
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
__all__ = ["real_nvp_default_template"]
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def real_nvp_default_template(
|
|
29
|
+
hidden_layers,
|
|
30
|
+
shift_only=False,
|
|
31
|
+
activation="relu",
|
|
32
|
+
name=None,
|
|
33
|
+
*args, # pylint: disable=keyword-arg-before-vararg
|
|
34
|
+
**kwargs
|
|
35
|
+
):
|
|
36
|
+
"""Build a scale-and-shift function using a multi-layer neural network.
|
|
37
|
+
|
|
38
|
+
This will be wrapped in a make_template to ensure the variables are only
|
|
39
|
+
created once. It takes the `d`-dimensional input x[0:d] and returns the `D-d`
|
|
40
|
+
dimensional outputs `loc` ('mu') and `log_scale` ('alpha').
|
|
41
|
+
|
|
42
|
+
The default template does not support conditioning and will raise an
|
|
43
|
+
exception if `condition_kwargs` are passed to it. To use conditioning in
|
|
44
|
+
Real NVP bijector, implement a conditioned shift/scale template that
|
|
45
|
+
handles the `condition_kwargs`.
|
|
46
|
+
|
|
47
|
+
Args:
|
|
48
|
+
hidden_layers: Python `list`-like of non-negative integer, scalars
|
|
49
|
+
indicating the number of units in each hidden layer. Default: `[512,
|
|
50
|
+
512]`.
|
|
51
|
+
shift_only: Python `bool` indicating if only the `shift` term shall be
|
|
52
|
+
computed (i.e. NICE bijector). Default: `False`.
|
|
53
|
+
activation: Activation function (callable). Explicitly setting to `None`
|
|
54
|
+
implies a linear activation.
|
|
55
|
+
name: A name for ops managed by this function. Default:
|
|
56
|
+
'real_nvp_default_template'.
|
|
57
|
+
*args: `tensorflow.keras.layers.Dense` arguments.
|
|
58
|
+
**kwargs: `tensorflow.keras.layers.Dense` keyword arguments.
|
|
59
|
+
|
|
60
|
+
Returns:
|
|
61
|
+
shift: `Float`-like `Tensor` of shift terms ('mu' in
|
|
62
|
+
[Papamakarios et al. (2016)][1]).
|
|
63
|
+
log_scale: `Float`-like `Tensor` of log(scale) terms ('alpha' in
|
|
64
|
+
[Papamakarios et al. (2016)][1]).
|
|
65
|
+
|
|
66
|
+
Raises:
|
|
67
|
+
NotImplementedError: if rightmost dimension of `inputs` is unknown prior to
|
|
68
|
+
graph execution, or if `condition_kwargs` is not empty.
|
|
69
|
+
|
|
70
|
+
Notes:
|
|
71
|
+
This version has been adapted to TF2 by MT.
|
|
72
|
+
|
|
73
|
+
#### References
|
|
74
|
+
|
|
75
|
+
[1]: George Papamakarios, Theo Pavlakou, and Iain Murray. Masked
|
|
76
|
+
Autoregressive Flow for Density Estimation. In _Neural Information
|
|
77
|
+
Processing Systems_, 2017. https://arxiv.org/abs/1705.07057
|
|
78
|
+
"""
|
|
79
|
+
|
|
80
|
+
def fn(x, output_units, **condition_kwargs):
|
|
81
|
+
"""Fully connected MLP parameterized via `real_nvp_template`."""
|
|
82
|
+
if condition_kwargs:
|
|
83
|
+
raise NotImplementedError(
|
|
84
|
+
"Conditioning not implemented in the default template."
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
with tf.name_scope(name or "real_nvp_default_template"):
|
|
88
|
+
|
|
89
|
+
if tensorshape_util.rank(x.shape) == 1:
|
|
90
|
+
x = x[tf.newaxis, ...]
|
|
91
|
+
reshape_output = lambda x: x[0]
|
|
92
|
+
else:
|
|
93
|
+
reshape_output = lambda x: x
|
|
94
|
+
for units in hidden_layers:
|
|
95
|
+
x = tf.keras.layers.Dense(
|
|
96
|
+
units=units,
|
|
97
|
+
activation=activation,
|
|
98
|
+
kernel_initializer="glorot_uniform",
|
|
99
|
+
bias_initializer="zeros",
|
|
100
|
+
*args, # pylint: disable=keyword-arg-before-vararg
|
|
101
|
+
**kwargs
|
|
102
|
+
)(x)
|
|
103
|
+
x = tf.keras.layers.Dense(
|
|
104
|
+
units=(1 if shift_only else 2) * output_units,
|
|
105
|
+
activation=None,
|
|
106
|
+
kernel_initializer="zeros",
|
|
107
|
+
bias_initializer="zeros",
|
|
108
|
+
*args, # pylint: disable=keyword-arg-before-vararg
|
|
109
|
+
**kwargs
|
|
110
|
+
)(x)
|
|
111
|
+
if shift_only:
|
|
112
|
+
return reshape_output(x), None
|
|
113
|
+
shift, log_scale = tf.split(x, 2, axis=-1)
|
|
114
|
+
return reshape_output(shift), reshape_output(log_scale)
|
|
115
|
+
|
|
116
|
+
return fn
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
|
|
3
|
+
git commit --author "Winnow ML <ml_bitbucket@winnowsolutions.com>" -am "(bug-fix commit)"
|
|
4
|
+
git pull
|
|
5
|
+
git push
|
|
6
|
+
|
|
7
|
+
if git rev-parse --git-dir > /dev/null 2>&1; then
|
|
8
|
+
GIT_REPO_PATH=`git rev-parse --show-toplevel`
|
|
9
|
+
CURR_PATH=$(pwd)
|
|
10
|
+
echo "===== Building the Python package residing at ${GIT_REPO_PATH} ====="
|
|
11
|
+
cd ${GIT_REPO_PATH}
|
|
12
|
+
uv build # previously: ./setup.py bdist_wheel
|
|
13
|
+
echo "===== Installing the Python package, may need sudo privilege ====="
|
|
14
|
+
WHEEL_FILE=`ls -t1 dist | head -n 1`
|
|
15
|
+
PACKAGE_NAME=`echo "${WHEEL_FILE}" | cut -d'-' -f1`
|
|
16
|
+
echo "Package name: ${PACKAGE_NAME}"
|
|
17
|
+
echo "Wheel to install: ${WHEEL_FILE}"
|
|
18
|
+
sudo uv pip uninstall --system --break-system-packages ${PACKAGE_NAME}
|
|
19
|
+
sudo dmt_pipi.sh -U dist/${WHEEL_FILE}
|
|
20
|
+
echo "===== Uploading Python package to Winnow's Nexus server ====="
|
|
21
|
+
dmt_twineu.sh dist/${WHEEL_FILE}
|
|
22
|
+
cd ${CURR_PATH}
|
|
23
|
+
else
|
|
24
|
+
echo "This is not a git repo. No installation has been performed."
|
|
25
|
+
fi
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
#/!bin/bash
|
|
2
|
+
if [ $(id -u) -ne 0 ]; then
|
|
3
|
+
echo "WARNING: As of 2025-04-20, it is not safe to install wml packages locally."
|
|
4
|
+
wml_nexus.py pip3 install --trusted-host localhost --extra-index https://localhost:5443/repository/minhtri-pypi-dev/simple/ --upgrade $@
|
|
5
|
+
else
|
|
6
|
+
wml_nexus.py uv pip install -p /usr/bin/python3 --system --break-system-packages --prerelease allow --allow-insecure-host localhost --index https://localhost:5443/repository/minhtri-pypi-dev/simple/ --index-strategy unsafe-best-match --link-mode=copy $@
|
|
7
|
+
fi
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
#/!bin/bash
|
|
2
|
+
if [ $(id -u) -ne 0 ]; then
|
|
3
|
+
echo "WARNING: As of 2025-04-28, it is not safe to use pipi.sh to install packages locally."
|
|
4
|
+
pip3 install $@
|
|
5
|
+
else
|
|
6
|
+
uv pip install -p /usr/bin/python3 --system --break-system-packages --prerelease allow --index-strategy unsafe-best-match --link-mode=copy $@
|
|
7
|
+
fi
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
|
|
3
|
+
git commit --author "Winnow ML <ml_bitbucket@winnowsolutions.com>" -am "(bug-fix commit)"
|
|
4
|
+
git pull
|
|
5
|
+
git push
|
|
6
|
+
|
|
7
|
+
if git rev-parse --git-dir > /dev/null 2>&1; then
|
|
8
|
+
GIT_REPO_PATH=`git rev-parse --show-toplevel`
|
|
9
|
+
CURR_PATH=$(pwd)
|
|
10
|
+
echo "===== Building the Python package residing at ${GIT_REPO_PATH} ====="
|
|
11
|
+
cd ${GIT_REPO_PATH}
|
|
12
|
+
uv build # previously: ./setup.py bdist_wheel
|
|
13
|
+
echo "===== Installing the Python package ====="
|
|
14
|
+
WHEEL_FILE=`ls -t1 dist | head -n 1`
|
|
15
|
+
PACKAGE_NAME=`echo "${WHEEL_FILE}" | cut -d'-' -f1`
|
|
16
|
+
echo "Package name: ${PACKAGE_NAME}"
|
|
17
|
+
echo "Wheel to install: ${WHEEL_FILE}"
|
|
18
|
+
uv pip uninstall ${PACKAGE_NAME}
|
|
19
|
+
user_pipi.sh -U dist/${WHEEL_FILE}
|
|
20
|
+
echo "===== Uploading Python package to Winnow's Nexus server ====="
|
|
21
|
+
user_twineu.sh dist/${WHEEL_FILE}
|
|
22
|
+
cd ${CURR_PATH}
|
|
23
|
+
else
|
|
24
|
+
echo "This is not a git repo. No installation has been performed."
|
|
25
|
+
fi
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
#/!bin/bash
|
|
2
|
+
pipi_url="https://localhost:5443/repository/${USER}-pypi-dev/simple/"
|
|
3
|
+
if [ $(id -u) -ne 0 ]; then
|
|
4
|
+
echo "WARNING: As of 2026-01-24, you need to create a virtual environment (e.g. uv venv) before pip-installing wml packages."
|
|
5
|
+
wml_nexus.py uv pip install --allow-insecure-host localhost --index $pipi_url --index-strategy unsafe-best-match --link-mode=copy $@
|
|
6
|
+
else
|
|
7
|
+
wml_nexus.py uv pip install -p /usr/bin/python3 --system --break-system-packages --prerelease allow --allow-insecure-host localhost --index $pipi_url --index-strategy unsafe-best-match --link-mode=copy $@
|
|
8
|
+
fi
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
|
|
3
|
+
git commit --author "Winnow ML <ml_bitbucket@winnowsolutions.com>" -am "(bug-fix commit)"
|
|
4
|
+
git pull
|
|
5
|
+
git push
|
|
6
|
+
|
|
7
|
+
if git rev-parse --git-dir > /dev/null 2>&1; then
|
|
8
|
+
GIT_REPO_PATH=`git rev-parse --show-toplevel`
|
|
9
|
+
CURR_PATH=$(pwd)
|
|
10
|
+
echo "===== Building the Python package residing at ${GIT_REPO_PATH} ====="
|
|
11
|
+
cd ${GIT_REPO_PATH}
|
|
12
|
+
uv build # previously: ./setup.py bdist_wheel
|
|
13
|
+
echo "===== Installing the Python package, may need sudo privilege ====="
|
|
14
|
+
WHEEL_FILE=`ls -t1 dist | head -n 1`
|
|
15
|
+
PACKAGE_NAME=`echo "${WHEEL_FILE}" | cut -d'-' -f1`
|
|
16
|
+
echo "Package name: ${PACKAGE_NAME}"
|
|
17
|
+
echo "Wheel to install: ${WHEEL_FILE}"
|
|
18
|
+
sudo uv pip uninstall --system --break-system-packages uninstall ${PACKAGE_NAME}
|
|
19
|
+
sudo wml_pipi.sh -U dist/${WHEEL_FILE}
|
|
20
|
+
echo "===== Uploading Python package to Winnow's Nexus server ====="
|
|
21
|
+
wml_twineu.sh dist/${WHEEL_FILE}
|
|
22
|
+
cd ${CURR_PATH}
|
|
23
|
+
else
|
|
24
|
+
echo "This is not a git repo. No installation has been performed."
|
|
25
|
+
fi
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
#!python
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import sys
|
|
5
|
+
import subprocess
|
|
6
|
+
|
|
7
|
+
from mt import net, logg
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def execute(argv):
|
|
11
|
+
res = subprocess.run(argv[1:], shell=False, check=False)
|
|
12
|
+
sys.exit(res.returncode)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
async def main():
|
|
16
|
+
argv = sys.argv
|
|
17
|
+
logg.logger.setLevel(logg.INFO)
|
|
18
|
+
|
|
19
|
+
if len(argv) < 2:
|
|
20
|
+
print("Opens localhost:5443 as nexus https and runs a command.")
|
|
21
|
+
print("Syntax: {} cmd arg1 arg2 ...".format(argv[0]))
|
|
22
|
+
sys.exit(0)
|
|
23
|
+
|
|
24
|
+
if net.is_port_open("localhost", 5443, timeout=0.1):
|
|
25
|
+
execute(argv)
|
|
26
|
+
|
|
27
|
+
l_endpoints = [
|
|
28
|
+
("192.168.110.4", 443),
|
|
29
|
+
("nexus.winnow.tech", 443),
|
|
30
|
+
("172.17.0.1", 5443),
|
|
31
|
+
]
|
|
32
|
+
|
|
33
|
+
for host, port in l_endpoints:
|
|
34
|
+
if not net.is_port_open(host, port):
|
|
35
|
+
continue
|
|
36
|
+
|
|
37
|
+
server = await net.port_forwarder_actx(
|
|
38
|
+
":5443", [f"{host}:{port}"], logger=logg.logger
|
|
39
|
+
)
|
|
40
|
+
async with server:
|
|
41
|
+
process = await asyncio.create_subprocess_exec(*argv[1:])
|
|
42
|
+
returncode = await process.wait()
|
|
43
|
+
sys.exit(returncode)
|
|
44
|
+
|
|
45
|
+
logg.logger.error("Unable to connect to nexus.")
|
|
46
|
+
sys.exit(1)
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
if __name__ == "__main__":
|
|
50
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
#/!bin/bash
|
|
2
|
+
if [ $(id -u) -ne 0 ]; then
|
|
3
|
+
echo "WARNING: As of 2025-04-20, it is not safe to install wml packages locally."
|
|
4
|
+
wml_nexus.py pip3 install --trusted-host localhost --extra-index https://localhost:5443/repository/ml-py-repo/simple/ --upgrade $@
|
|
5
|
+
else
|
|
6
|
+
wml_nexus.py uv pip install -p /usr/bin/python3 --system --break-system-packages --prerelease allow --allow-insecure-host localhost --index https://localhost:5443/repository/ml-py-repo/simple/ --index-strategy unsafe-best-match --link-mode=copy $@
|
|
7
|
+
fi
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: mttf
|
|
3
|
+
Version: 1.3.6
|
|
4
|
+
Summary: A package to detect and monkey-patch TensorFlow and Keras, for Minh-Tri Pham
|
|
5
|
+
Home-page: https://github.com/inteplus/mttf
|
|
6
|
+
Author: ['Minh-Tri Pham']
|
|
7
|
+
Project-URL: Documentation, https://mtdoc.readthedocs.io/en/latest/mt.tf/mt.tf.html
|
|
8
|
+
Project-URL: Source Code, https://github.com/inteplus/mttf
|
|
9
|
+
License-File: LICENSE
|
|
10
|
+
Requires-Dist: pyyaml
|
|
11
|
+
Requires-Dist: mtbase>=4.33.0
|
|
12
|
+
Requires-Dist: mtnet>=0.3.2
|
|
13
|
+
Dynamic: author
|
|
14
|
+
Dynamic: home-page
|
|
15
|
+
Dynamic: license-file
|
|
16
|
+
Dynamic: project-url
|
|
17
|
+
Dynamic: requires-dist
|
|
18
|
+
Dynamic: summary
|