gfdl 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gfdl/__init__.py +36 -0
- gfdl/activations.py +194 -0
- gfdl/model.py +851 -0
- gfdl/tests/__init__.py +0 -0
- gfdl/tests/test_model.py +518 -0
- gfdl/tests/test_regression.py +142 -0
- gfdl/weights.py +378 -0
- gfdl-0.1.0.dist-info/METADATA +33 -0
- gfdl-0.1.0.dist-info/RECORD +12 -0
- gfdl-0.1.0.dist-info/WHEEL +5 -0
- gfdl-0.1.0.dist-info/licenses/COPYING +29 -0
- gfdl-0.1.0.dist-info/top_level.txt +1 -0
gfdl/__init__.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
import platform
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
import packaging
|
|
5
|
+
import scipy
|
|
6
|
+
import sklearn
|
|
7
|
+
from packaging.version import Version
|
|
8
|
+
|
|
9
|
+
from .model import GFDL
|
|
10
|
+
|
|
11
|
+
__all__ = ["GFDL"]
|
|
12
|
+
|
|
13
|
+
packaging_version = Version(packaging.__version__)
|
|
14
|
+
min_packaging_version = "24.0"
|
|
15
|
+
if packaging_version < Version(min_packaging_version):
|
|
16
|
+
raise ImportError(f"{packaging_version=}, but {min_packaging_version=}")
|
|
17
|
+
|
|
18
|
+
python_version = Version(platform.python_version())
|
|
19
|
+
min_python_version = "3.12"
|
|
20
|
+
if python_version < Version(min_python_version):
|
|
21
|
+
raise ImportError(f"{python_version=}, but {min_python_version=}")
|
|
22
|
+
|
|
23
|
+
numpy_version = Version(np.__version__)
|
|
24
|
+
min_numpy_version = "2.0.0"
|
|
25
|
+
if numpy_version < Version(min_numpy_version):
|
|
26
|
+
raise ImportError(f"{numpy_version=}, but {min_numpy_version=}")
|
|
27
|
+
|
|
28
|
+
sklearn_version = Version(sklearn.__version__)
|
|
29
|
+
min_sklearn_version = "1.5.0"
|
|
30
|
+
if sklearn_version < Version(min_sklearn_version):
|
|
31
|
+
raise ImportError(f"{sklearn_version=}, but {min_sklearn_version=}")
|
|
32
|
+
|
|
33
|
+
scipy_version = Version(scipy.__version__)
|
|
34
|
+
min_scipy_version = "1.13.0"
|
|
35
|
+
if scipy_version < Version(min_scipy_version):
|
|
36
|
+
raise ImportError(f"{scipy_version=}, but {min_scipy_version=}")
|
gfdl/activations.py
ADDED
|
@@ -0,0 +1,194 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Activation functions for Gradient Free Deep Learning estimators.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import numpy as np
|
|
6
|
+
import scipy
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def relu(z):
|
|
10
|
+
"""
|
|
11
|
+
The rectified linear unit activation function.
|
|
12
|
+
|
|
13
|
+
Parameters
|
|
14
|
+
----------
|
|
15
|
+
z : array_like
|
|
16
|
+
Input array.
|
|
17
|
+
|
|
18
|
+
Returns
|
|
19
|
+
-------
|
|
20
|
+
numpy.ndarray
|
|
21
|
+
The output array with only positive values.
|
|
22
|
+
"""
|
|
23
|
+
return np.maximum(0, z)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def tanh(z):
|
|
27
|
+
"""
|
|
28
|
+
The hyperbolic tangent activation function.
|
|
29
|
+
|
|
30
|
+
Parameters
|
|
31
|
+
----------
|
|
32
|
+
z : array_like
|
|
33
|
+
Input array.
|
|
34
|
+
|
|
35
|
+
Returns
|
|
36
|
+
-------
|
|
37
|
+
numpy.ndarray
|
|
38
|
+
The output array with hyperbolic tangent values.
|
|
39
|
+
|
|
40
|
+
See Also
|
|
41
|
+
--------
|
|
42
|
+
numpy.tanh : The hyperbolic tangent function.
|
|
43
|
+
"""
|
|
44
|
+
return np.tanh(z)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def sigmoid(z):
|
|
48
|
+
"""
|
|
49
|
+
The logistic sigmoid activation function.
|
|
50
|
+
|
|
51
|
+
Parameters
|
|
52
|
+
----------
|
|
53
|
+
z : array_like
|
|
54
|
+
Input array.
|
|
55
|
+
|
|
56
|
+
Returns
|
|
57
|
+
-------
|
|
58
|
+
numpy.ndarray
|
|
59
|
+
The output array with the function values.
|
|
60
|
+
|
|
61
|
+
See Also
|
|
62
|
+
--------
|
|
63
|
+
scipy.special.expit : The logistic sigmoid function.
|
|
64
|
+
"""
|
|
65
|
+
return scipy.special.expit(z)
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def identity(z):
|
|
69
|
+
"""
|
|
70
|
+
The identity activation function.
|
|
71
|
+
|
|
72
|
+
Parameters
|
|
73
|
+
----------
|
|
74
|
+
z : array_like
|
|
75
|
+
Input array.
|
|
76
|
+
|
|
77
|
+
Returns
|
|
78
|
+
-------
|
|
79
|
+
numpy.ndarray
|
|
80
|
+
The input array is returned unchanged.
|
|
81
|
+
"""
|
|
82
|
+
return z
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def softmax(z):
|
|
86
|
+
"""
|
|
87
|
+
The softmax activation function.
|
|
88
|
+
|
|
89
|
+
Parameters
|
|
90
|
+
----------
|
|
91
|
+
z : array_like
|
|
92
|
+
Input array.
|
|
93
|
+
|
|
94
|
+
Returns
|
|
95
|
+
-------
|
|
96
|
+
numpy.ndarray
|
|
97
|
+
The output array with the function values.
|
|
98
|
+
|
|
99
|
+
See Also
|
|
100
|
+
--------
|
|
101
|
+
scipy.special.softmax : The softmax function.
|
|
102
|
+
"""
|
|
103
|
+
return scipy.special.softmax(z, axis=-1)
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def softmin(z):
|
|
107
|
+
"""
|
|
108
|
+
The softmin activation function.
|
|
109
|
+
|
|
110
|
+
It is the softmax function applied to negative of the input values.
|
|
111
|
+
|
|
112
|
+
Parameters
|
|
113
|
+
----------
|
|
114
|
+
z : array_like
|
|
115
|
+
Input array.
|
|
116
|
+
|
|
117
|
+
Returns
|
|
118
|
+
-------
|
|
119
|
+
numpy.ndarray
|
|
120
|
+
The output array with the function values.
|
|
121
|
+
|
|
122
|
+
See Also
|
|
123
|
+
--------
|
|
124
|
+
scipy.special.softmax : The softmax function.
|
|
125
|
+
"""
|
|
126
|
+
return softmax(-z)
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
def log_sigmoid(z):
|
|
130
|
+
"""
|
|
131
|
+
The logarithm of logistic sigmoid activation function.
|
|
132
|
+
|
|
133
|
+
Parameters
|
|
134
|
+
----------
|
|
135
|
+
z : array_like
|
|
136
|
+
Input array.
|
|
137
|
+
|
|
138
|
+
Returns
|
|
139
|
+
-------
|
|
140
|
+
numpy.ndarray
|
|
141
|
+
The output array with the function values.
|
|
142
|
+
|
|
143
|
+
See Also
|
|
144
|
+
--------
|
|
145
|
+
scipy.special.log_expit : The logistic sigmoid function.
|
|
146
|
+
"""
|
|
147
|
+
return scipy.special.log_expit(z)
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
def log_softmax(z):
|
|
151
|
+
"""
|
|
152
|
+
The log softmax activation function.
|
|
153
|
+
|
|
154
|
+
Parameters
|
|
155
|
+
----------
|
|
156
|
+
z : array_like
|
|
157
|
+
Input array.
|
|
158
|
+
|
|
159
|
+
Returns
|
|
160
|
+
-------
|
|
161
|
+
numpy.ndarray
|
|
162
|
+
The output array with the function values.
|
|
163
|
+
|
|
164
|
+
See Also
|
|
165
|
+
--------
|
|
166
|
+
scipy.special.log_softmax : The logarithm of softmax function.
|
|
167
|
+
"""
|
|
168
|
+
return scipy.special.log_softmax(z, axis=-1)
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
ACTIVATIONS = {
|
|
172
|
+
"relu": relu,
|
|
173
|
+
"tanh": tanh,
|
|
174
|
+
"sigmoid": sigmoid,
|
|
175
|
+
"identity": identity,
|
|
176
|
+
"linear": identity,
|
|
177
|
+
"softmax": softmax,
|
|
178
|
+
"softmin": softmin,
|
|
179
|
+
"log_sigmoid": log_sigmoid,
|
|
180
|
+
"log_softmax": log_softmax,
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
def resolve_activation(activation):
|
|
185
|
+
# numpydoc ignore=GL08
|
|
186
|
+
name = activation.strip().lower()
|
|
187
|
+
try:
|
|
188
|
+
fn = ACTIVATIONS[name]
|
|
189
|
+
except KeyError as e:
|
|
190
|
+
allowed = sorted(ACTIVATIONS.keys())
|
|
191
|
+
raise ValueError(
|
|
192
|
+
f"activation='{activation}' is not supported; choose from {allowed}"
|
|
193
|
+
) from e
|
|
194
|
+
return name, fn
|