nnodely 0.14.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. mplplots/__init__.py +0 -0
  2. mplplots/plots.py +131 -0
  3. nnodely/__init__.py +42 -0
  4. nnodely/activation.py +85 -0
  5. nnodely/arithmetic.py +203 -0
  6. nnodely/earlystopping.py +81 -0
  7. nnodely/exporter/__init__.py +3 -0
  8. nnodely/exporter/export.py +275 -0
  9. nnodely/exporter/exporter.py +45 -0
  10. nnodely/exporter/reporter.py +48 -0
  11. nnodely/exporter/standardexporter.py +108 -0
  12. nnodely/fir.py +150 -0
  13. nnodely/fuzzify.py +221 -0
  14. nnodely/initializer.py +31 -0
  15. nnodely/input.py +131 -0
  16. nnodely/linear.py +130 -0
  17. nnodely/localmodel.py +82 -0
  18. nnodely/logger.py +94 -0
  19. nnodely/loss.py +30 -0
  20. nnodely/model.py +263 -0
  21. nnodely/modeldef.py +205 -0
  22. nnodely/nnodely.py +1295 -0
  23. nnodely/optimizer.py +91 -0
  24. nnodely/output.py +23 -0
  25. nnodely/parameter.py +103 -0
  26. nnodely/parametricfunction.py +329 -0
  27. nnodely/part.py +201 -0
  28. nnodely/relation.py +149 -0
  29. nnodely/trigonometric.py +67 -0
  30. nnodely/utils.py +101 -0
  31. nnodely/visualizer/__init__.py +4 -0
  32. nnodely/visualizer/dynamicmpl/functionplot.py +34 -0
  33. nnodely/visualizer/dynamicmpl/fuzzyplot.py +31 -0
  34. nnodely/visualizer/dynamicmpl/resultsplot.py +28 -0
  35. nnodely/visualizer/dynamicmpl/trainingplot.py +46 -0
  36. nnodely/visualizer/mplnotebookvisualizer.py +66 -0
  37. nnodely/visualizer/mplvisualizer.py +215 -0
  38. nnodely/visualizer/textvisualizer.py +320 -0
  39. nnodely/visualizer/visualizer.py +84 -0
  40. nnodely-0.14.0.dist-info/LICENSE +21 -0
  41. nnodely-0.14.0.dist-info/METADATA +401 -0
  42. nnodely-0.14.0.dist-info/RECORD +44 -0
  43. nnodely-0.14.0.dist-info/WHEEL +5 -0
  44. nnodely-0.14.0.dist-info/top_level.txt +2 -0
mplplots/__init__.py ADDED
File without changes
mplplots/plots.py ADDED
@@ -0,0 +1,131 @@
1
+ import numpy as np
2
+
3
+ import matplotlib.colors as mcolors
4
+
5
+ def plot_training(ax, title, key, data_train, data_val = None, last = None):
6
+ # Plot data
7
+ if last is not None:
8
+ ax.set_title(f'{title} - epochs last {last}')
9
+ else:
10
+ ax.set_title(f'{title}')
11
+
12
+ ax.plot([i + 1 for i in range(len(data_train))], data_train, label=f'Train loss {key}')
13
+ if data_val:
14
+ ax.plot([i + 1 for i in range(len(data_val))], data_val, '-.', label=f'Validation loss {key}')
15
+
16
+ ax.set_yscale('log')
17
+ ax.grid(True)
18
+ ax.legend(loc='best')
19
+ ax.set_xlabel('Epochs')
20
+ ax.set_ylabel('Loss')
21
+ # Set plot limits
22
+ data_train = np.nan_to_num(data_train, nan=np.nan, posinf=np.nan, neginf=np.nan)
23
+ if data_val:
24
+ data_val = np.nan_to_num(data_val, nan=np.nan, posinf=np.nan, neginf=np.nan)
25
+ min_val = min([min(data_val), min(data_train)])
26
+ max_val = max([max(data_val), max(data_train)])
27
+ else:
28
+ min_val = min(data_train)
29
+ max_val = max(data_train)
30
+ ax.set_ylim(min_val - min_val / 10, max_val + max_val / 10)
31
+
32
+
33
+ def plot_results(ax, name_data, key, A, B, sample_time):
34
+ # Plot data
35
+ ax.set_title(f'{name_data} Data of {key}')
36
+ A_t = np.transpose(np.array(A))
37
+ B_t = np.transpose(np.array(B))
38
+ for ind_win in range(A_t.shape[0]):
39
+ for ind_dim in range(A_t.shape[1]):
40
+ ax.plot(np.arange(0, len(A_t[ind_win, ind_dim]) * sample_time, sample_time), A_t[ind_win, ind_dim],
41
+ label=f'real')
42
+ ax.plot(np.arange(0, len(B_t[ind_win, ind_dim]) * sample_time, sample_time), B_t[ind_win, ind_dim], '-.',
43
+ label=f'prediction')
44
+ correlation = np.corrcoef(A_t[ind_win, ind_dim],B_t[ind_win, ind_dim])[0, 1]
45
+ ax.text(0.05, 0.95, f'Correlation: {correlation:.2f}', transform=ax.transAxes, verticalalignment='top')
46
+
47
+ ax.grid(True)
48
+ ax.legend(loc='best')
49
+ ax.set_xlabel('Time (s)')
50
+ ax.set_ylabel(f'Value {key}')
51
+
52
+ # min_val = min([min(A), min(B)])
53
+ # max_val = max([max(A), max(B)])
54
+ # plt.ylim(min_val - min_val / 10, max_val + max_val / 10)
55
+
56
+ # # Plot
57
+ # self.fig, self.ax = self.plt.subplots(2*len(output_keys), 2,
58
+ # gridspec_kw={'width_ratios': [5, 1], 'height_ratios': [2, 1]*len(output_keys)})
59
+ # if len(self.ax.shape) == 1:
60
+ # self.ax = np.expand_dims(self.ax, axis=0)
61
+ # #plotsamples = self.prediction.shape[1]s
62
+ # plotsamples = 200
63
+ # for i in range(0, nnodely.prediction.shape[0]):
64
+ # # Zoomed test data
65
+ # self.ax[2*i,0].plot(nnodely.prediction[i], linestyle='dashed')
66
+ # self.ax[2*i,0].plot(nnodely.label[i])
67
+ # self.ax[2*i,0].grid('on')
68
+ # self.ax[2*i,0].set_xlim((performance['max_se_idxs'][i]-plotsamples, performance['max_se_idxs'][i]+plotsamples))
69
+ # self.ax[2*i,0].vlines(performance['max_se_idxs'][i], nnodely.prediction[i][performance['max_se_idxs'][i]], nnodely.label[i][performance['max_se_idxs'][i]],
70
+ # colors='r', linestyles='dashed')
71
+ # self.ax[2*i,0].legend(['predicted', 'test'], prop={'family':'serif'})
72
+ # self.ax[2*i,0].set_title(output_keys[i], family='serif')
73
+ # # Statitics
74
+ # self.ax[2*i,1].axis("off")
75
+ # self.ax[2*i,1].invert_yaxis()
76
+ # if performance:
77
+ # text = "Rmse test: {:3.6f}\nFVU: {:3.6f}".format(#\nAIC: {:3.6f}
78
+ # nnodely.performance['rmse_test'][i],
79
+ # #nnodely.performance['aic'][i],
80
+ # nnodely.performance['fvu'][i])
81
+ # self.ax[2*i,1].text(0, 0, text, family='serif', verticalalignment='top')
82
+ # # test data
83
+ # self.ax[2*i+1,0].plot(nnodely.prediction[i], linestyle='dashed')
84
+ # self.ax[2*i+1,0].plot(nnodely.label[i])
85
+ # self.ax[2*i+1,0].grid('on')
86
+ # self.ax[2*i+1,0].legend(['predicted', 'test'], prop={'family':'serif'})
87
+ # self.ax[2*i+1,0].set_title(output_keys[i], family='serif')
88
+ # # Empty
89
+ # self.ax[2*i+1,1].axis("off")
90
+ # self.fig.tight_layout()
91
+ # self.plt.show()
92
+
93
+
94
+ def plot_fuzzy(ax, name, x, y, chan_centers):
95
+ tableau_colors = mcolors.TABLEAU_COLORS
96
+ num_of_colors = len(list(tableau_colors.keys()))
97
+ for ind in range(len(y)):
98
+ ax.axvline(x=chan_centers[ind], color=tableau_colors[list(tableau_colors.keys())[ind % num_of_colors]],
99
+ linestyle='--')
100
+ ax.plot(x, y[ind], label=f'Channel {int(ind) + 1}', linewidth=2)
101
+ ax.legend(loc='best')
102
+ ax.set_xlabel('Input')
103
+ ax.set_ylabel('Value')
104
+ ax.set_title(f'Function {name}')
105
+
106
+
107
+ def plot_3d_function(plt, name, x0, x1, params, output, input_names):
108
+ fig = plt.figure()
109
+ # Clear the current plot
110
+ plt.clf()
111
+ ax = fig.add_subplot(111, projection='3d')
112
+ ax.plot_surface(np.array(x0), np.array(x1), np.array(output), cmap='viridis')
113
+ ax.set_xlabel(input_names[0])
114
+ ax.set_ylabel(input_names[1])
115
+ ax.set_zlabel(f'{name} output')
116
+ for ind in range(len(input_names) - 2):
117
+ fig.text(0.01, 0.9 - 0.05 * ind, f"{input_names[ind + 2]} ={params[ind]}", fontsize=10, color='blue',
118
+ style='italic')
119
+ plt.title(f'Function {name}')
120
+
121
+ def plot_2d_function(plt, name, x, params, output, input_names):
122
+ fig = plt.figure()
123
+ # Clear the current plot
124
+ plt.clf()
125
+ plt.plot(np.array(x), np.array(output), linewidth=2)
126
+ plt.xlabel(input_names[0])
127
+ plt.ylabel(f'{name} output')
128
+ for ind in range(len(input_names) - 1):
129
+ fig.text(0.01, 0.9 - 0.05 * ind, f"{input_names[ind + 1]} ={params[ind]}", fontsize=10, color='blue',
130
+ style='italic')
131
+ plt.title(f'Function {name}')
nnodely/__init__.py ADDED
@@ -0,0 +1,42 @@
1
+
2
+ __version__ = '0.14.0'
3
+
4
+ import sys
5
+ major, minor = sys.version_info.major, sys.version_info.minor
6
+
7
+ import logging
8
+ LOG_LEVEL = logging.INFO
9
+
10
+ if major < 3:
11
+ sys.exit("Sorry, Python 2 is not supported. You need Python >= 3.6 for "+__package__+".")
12
+ elif minor < 10:
13
+ sys.exit("Sorry, You need Python >= 3.10 for "+__package__+".")
14
+ else:
15
+ print(f'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>'+
16
+ f' {__package__}_v{__version__} '.center(20, '-')+
17
+ f'<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
18
+
19
+ # Network input, outputs and parameters
20
+ from nnodely.input import Input, State, Connect, ClosedLoop
21
+ from nnodely.parameter import Parameter, Constant
22
+ from nnodely.output import Output
23
+
24
+ # Network elements
25
+ from nnodely.activation import Relu, Tanh
26
+ from nnodely.fir import Fir
27
+ from nnodely.linear import Linear
28
+ from nnodely.arithmetic import Add, Sum, Sub, Mul, Pow, Neg
29
+ from nnodely.trigonometric import Sin, Cos, Tan
30
+ from nnodely.parametricfunction import ParamFun
31
+ from nnodely.fuzzify import Fuzzify
32
+ from nnodely.part import TimePart, TimeSelect, SamplePart, SampleSelect, Part, Select
33
+ from nnodely.localmodel import LocalModel
34
+
35
+ # Main nnodely classes
36
+ from nnodely.nnodely import nnodely, Modely
37
+ from nnodely.visualizer import Visualizer, TextVisualizer, MPLVisualizer, MPLNotebookVisualizer
38
+ from nnodely.optimizer import Optimizer, SGD, Adam
39
+ from nnodely.exporter import Exporter, StandardExporter
40
+
41
+ # Support functions
42
+ from nnodely.initializer import init_negexp, init_lin, init_constant
nnodely/activation.py ADDED
@@ -0,0 +1,85 @@
1
+ import torch.nn as nn
2
+
3
+ from nnodely.relation import Stream, ToStream, toStream
4
+ from nnodely.model import Model
5
+ from nnodely.utils import check
6
+ import torch
7
+
8
+ relu_relation_name = 'ReLU'
9
+ tanh_relation_name = 'Tanh'
10
+ elu_relation_name = 'ELU'
11
+
12
+ class Relu(Stream, ToStream):
13
+ """
14
+ Relu activation function for the stream object.
15
+ Example:
16
+ >>> x = Relu(x)
17
+ """
18
+ def __init__(self, obj:Stream) -> Stream:
19
+ obj = toStream(obj)
20
+ check(type(obj) is Stream, TypeError,
21
+ f"The type of {obj} is {type(obj)} and is not supported for Relu operation.")
22
+ super().__init__(relu_relation_name + str(Stream.count),obj.json,obj.dim)
23
+ self.json['Relations'][self.name] = [relu_relation_name,[obj.name]]
24
+
25
+ class Tanh(Stream, ToStream):
26
+ """
27
+ Tanh activation function for the stream object.
28
+ Example:
29
+ >>> x = Tanh(x)
30
+ """
31
+ def __init__(self, obj:Stream) -> Stream:
32
+ obj = toStream(obj)
33
+ check(type(obj) is Stream,TypeError,
34
+ f"The type of {obj} is {type(obj)} and is not supported for Tanh operation.")
35
+ super().__init__(tanh_relation_name + str(Stream.count),obj.json,obj.dim)
36
+ self.json['Relations'][self.name] = [tanh_relation_name,[obj.name]]
37
+
38
+ class ELU(Stream, ToStream):
39
+ """
40
+ ELU activation function for the stream object.
41
+ Example:
42
+ >>> x = ELU(x)
43
+ """
44
+ def __init__(self, obj:Stream) -> Stream:
45
+ obj = toStream(obj)
46
+ check(type(obj) is Stream,TypeError,
47
+ f"The type of {obj} is {type(obj)} and is not supported for Tanh operation.")
48
+ super().__init__(elu_relation_name + str(Stream.count),obj.json,obj.dim)
49
+ self.json['Relations'][self.name] = [elu_relation_name,[obj.name]]
50
+
51
+ class Tanh_Layer(nn.Module):
52
+ #: :noindex:
53
+ def __init__(self,):
54
+ super(Tanh_Layer, self).__init__()
55
+ def forward(self, x):
56
+ return torch.tanh(x)
57
+
58
+ def createTanh(self, *input):
59
+ """
60
+ :noindex:
61
+ """
62
+ return Tanh_Layer()
63
+
64
+ class ReLU_Layer(nn.Module):
65
+ """
66
+ :noindex:
67
+ """
68
+ def __init__(self,):
69
+ super(ReLU_Layer, self).__init__()
70
+ def forward(self, x):
71
+ return torch.relu(x)
72
+
73
+ def createRelu(self, *input):
74
+ """
75
+ :noindex:
76
+ """
77
+ return ReLU_Layer()
78
+
79
+ def createELU(self, *input):
80
+ #: :noindex:
81
+ return nn.ELU()
82
+
83
+ setattr(Model, relu_relation_name, createRelu)
84
+ setattr(Model, tanh_relation_name, createTanh)
85
+ setattr(Model, elu_relation_name, createELU)
nnodely/arithmetic.py ADDED
@@ -0,0 +1,203 @@
1
+ import torch.nn as nn
2
+ import torch
3
+
4
+ from nnodely.relation import ToStream, Stream, toStream
5
+ from nnodely.model import Model
6
+ from nnodely.utils import check, merge
7
+
8
+
9
+ # Binary operators
10
+ add_relation_name = 'Add'
11
+ sub_relation_name = 'Sub'
12
+ mul_relation_name = 'Mul'
13
+ div_relation_name = 'Div'
14
+ pow_relation_name = 'Pow'
15
+
16
+ # Unary operators
17
+ neg_relation_name = 'Neg'
18
+ # square_relation_name = 'Square'
19
+
20
+ # Merge operator
21
+ sum_relation_name = 'Sum'
22
+ class Add(Stream, ToStream):
23
+ def __init__(self, obj1:Stream, obj2:Stream) -> Stream:
24
+ obj1,obj2 = toStream(obj1),toStream(obj2)
25
+ check(type(obj1) is Stream,TypeError,
26
+ f"The type of {obj1} is {type(obj1)} and is not supported for add operation.")
27
+ check(type(obj2) is Stream,TypeError,
28
+ f"The type of {obj2} is {type(obj2)} and is not supported for add operation.")
29
+ check(obj1.dim == obj2.dim or obj1.dim == {'dim':1} or obj2.dim == {'dim':1}, ValueError,
30
+ f"For addition operators (+) the dimension of {obj1.name} = {obj1.dim} must be the same of {obj2.name} = {obj2.dim}.")
31
+ super().__init__(add_relation_name + str(Stream.count),merge(obj1.json,obj2.json),obj1.dim)
32
+ self.json['Relations'][self.name] = [add_relation_name,[obj1.name,obj2.name]]
33
+
34
+ ## TODO: check the scalar dimension, helpful for the offset
35
+ class Sub(Stream, ToStream):
36
+ def __init__(self, obj1:Stream, obj2:Stream) -> Stream:
37
+ obj1, obj2 = toStream(obj1), toStream(obj2)
38
+ check(type(obj1) is Stream,TypeError,
39
+ f"The type of {obj1} is {type(obj1)} and is not supported for sub operation.")
40
+ check(type(obj2) is Stream,TypeError,
41
+ f"The type of {obj2} is {type(obj2)} and is not supported for sub operation.")
42
+ check(obj1.dim == obj2.dim or obj1.dim == {'dim':1} or obj2.dim == {'dim':1}, ValueError,
43
+ f"For subtraction operators (-) the dimension of {obj1.name} = {obj1.dim} must be the same of {obj2.name} = {obj2.dim}.")
44
+ super().__init__(sub_relation_name + str(Stream.count),merge(obj1.json,obj2.json),obj1.dim)
45
+ self.json['Relations'][self.name] = [sub_relation_name,[obj1.name,obj2.name]]
46
+
47
+ class Mul(Stream, ToStream):
48
+ def __init__(self, obj1:Stream, obj2:Stream) -> Stream:
49
+ obj1, obj2 = toStream(obj1), toStream(obj2)
50
+ check(type(obj1) is Stream, TypeError,
51
+ f"The type of {obj1} is {type(obj1)} and is not supported for mul operation.")
52
+ check(type(obj2) is Stream, TypeError,
53
+ f"The type of {obj2} is {type(obj2)} and is not supported for mul operation.")
54
+ check(obj1.dim == obj2.dim or obj1.dim == {'dim':1} or obj2.dim == {'dim':1}, ValueError,
55
+ f"For multiplication operators (*) the dimension of {obj1.name} = {obj1.dim} must be the same of {obj2.name} = {obj2.dim}.")
56
+ super().__init__(mul_relation_name + str(Stream.count),merge(obj1.json,obj2.json),obj1.dim)
57
+ self.json['Relations'][self.name] = [mul_relation_name,[obj1.name,obj2.name]]
58
+
59
+ class Div(Stream, ToStream):
60
+ def __init__(self, obj1:Stream, obj2:Stream) -> Stream:
61
+ obj1, obj2 = toStream(obj1), toStream(obj2)
62
+ check(type(obj1) is Stream, TypeError,
63
+ f"The type of {obj1} is {type(obj1)} and is not supported for div operation.")
64
+ check(type(obj2) is Stream, TypeError,
65
+ f"The type of {obj2} is {type(obj2)} and is not supported for div operation.")
66
+ check(obj1.dim == obj2.dim or obj1.dim == {'dim':1} or obj2.dim == {'dim':1}, ValueError,
67
+ f"For division operators (*) the dimension of {obj1.name} = {obj1.dim} must be the same of {obj2.name} = {obj2.dim}.")
68
+ super().__init__(div_relation_name + str(Stream.count),merge(obj1.json,obj2.json),obj1.dim)
69
+ self.json['Relations'][self.name] = [div_relation_name,[obj1.name,obj2.name]]
70
+
71
+ class Pow(Stream, ToStream):
72
+ def __init__(self, obj1:Stream, obj2:Stream) -> Stream:
73
+ obj1, obj2 = toStream(obj1), toStream(obj2)
74
+ check(type(obj1) is Stream, TypeError,
75
+ f"The type of {obj1} is {type(obj1)} and is not supported for exp operation.")
76
+ check(type(obj2) is Stream, TypeError,
77
+ f"The type of {obj2} is {type(obj2)} but must be int or float and is not supported for exp operation.")
78
+ check(obj1.dim == obj2.dim or obj1.dim == {'dim':1} or obj2.dim == {'dim':1}, ValueError,
79
+ f"For division operators (*) the dimension of {obj1.name} = {obj1.dim} must be the same of {obj2.name} = {obj2.dim}.")
80
+ super().__init__(pow_relation_name + str(Stream.count),merge(obj1.json,obj2.json),obj1.dim)
81
+ self.json['Relations'][self.name] = [pow_relation_name,[obj1.name,obj2.name]]
82
+
83
+ class Neg(Stream, ToStream):
84
+ def __init__(self, obj:Stream) -> Stream:
85
+ obj = toStream(obj)
86
+ check(type(obj) is Stream, TypeError,
87
+ f"The type of {obj} is {type(obj)} and is not supported for neg operation.")
88
+ super().__init__(neg_relation_name+str(Stream.count), obj.json, obj.dim)
89
+ self.json['Relations'][self.name] = [neg_relation_name,[obj.name]]
90
+
91
+ # class Square(Stream, ToStream):
92
+ # def __init__(self, obj:Stream) -> Stream:
93
+ # check(type(obj) is Stream, TypeError,
94
+ # f"The type of {obj.name} is {type(obj)} and is not supported for neg operation.")
95
+ # super().__init__(square_relation_name+str(Stream.count), obj.json, obj.dim)
96
+ # self.json['Relations'][self.name] = [square_relation_name,[obj.name]]
97
+
98
+ class Sum(Stream, ToStream):
99
+ def __init__(self, obj:Stream) -> Stream:
100
+ obj = toStream(obj)
101
+ check(type(obj) is Stream, TypeError,
102
+ f"The type of {obj} is {type(obj)} and is not supported for sum operation.")
103
+ super().__init__(sum_relation_name + str(Stream.count),obj.json,obj.dim)
104
+ self.json['Relations'][self.name] = [sum_relation_name,[obj.name]]
105
+
106
+ class Add_Layer(nn.Module):
107
+ #: :noindex:
108
+ def __init__(self):
109
+ super(Add_Layer, self).__init__()
110
+
111
+ def forward(self, *inputs):
112
+ return torch.add(inputs[0], inputs[1])
113
+
114
+ def createAdd(name, *inputs):
115
+ #: :noindex:
116
+ return Add_Layer()
117
+
118
+ class Sub_Layer(nn.Module):
119
+ #: :noindex:
120
+ def __init__(self):
121
+ super(Sub_Layer, self).__init__()
122
+
123
+ def forward(self, *inputs):
124
+ # Perform element-wise subtraction
125
+ return torch.add(inputs[0],-inputs[1])
126
+
127
+ def createSub(self, *inputs):
128
+ #: :noindex:
129
+ return Sub_Layer()
130
+
131
+
132
+ class Mul_Layer(nn.Module):
133
+ #: :noindex:
134
+ def __init__(self):
135
+ super(Mul_Layer, self).__init__()
136
+
137
+ def forward(self, *inputs):
138
+ return inputs[0] * inputs[1]
139
+
140
+ def createMul(name, *inputs):
141
+ #: :noindex:
142
+ return Mul_Layer()
143
+
144
+ class Div_Layer(nn.Module):
145
+ #: :noindex:
146
+ def __init__(self):
147
+ super(Div_Layer, self).__init__()
148
+
149
+ def forward(self, *inputs):
150
+ return inputs[0] / inputs[1]
151
+
152
+ def createDiv(name, *inputs):
153
+ #: :noindex:
154
+ return Div_Layer()
155
+
156
+ class Pow_Layer(nn.Module):
157
+ #: :noindex:
158
+ def __init__(self):
159
+ super(Pow_Layer, self).__init__()
160
+
161
+ def forward(self, *inputs):
162
+ return torch.pow(inputs[0], inputs[1])
163
+
164
+ def createPow(name, *inputs):
165
+ #: :noindex:
166
+ return Pow_Layer()
167
+
168
+ class Neg_Layer(nn.Module):
169
+ #: :noindex:
170
+ def __init__(self):
171
+ super(Neg_Layer, self).__init__()
172
+
173
+ def forward(self, x):
174
+ return -x
175
+
176
+ def createNeg(self, *inputs):
177
+ #: :noindex:
178
+ return Neg_Layer()
179
+
180
+ class Sum_Layer(nn.Module):
181
+ #: :noindex:
182
+ def __init__(self):
183
+ super(Sum_Layer, self).__init__()
184
+
185
+ def forward(self, inputs):
186
+ return torch.sum(inputs, dim = 2)
187
+
188
+ def createSum(name, *inputs):
189
+ #: :noindex:
190
+ return Sum_Layer()
191
+
192
+ setattr(Model, add_relation_name, createAdd)
193
+ setattr(Model, sub_relation_name, createSub)
194
+ setattr(Model, mul_relation_name, createMul)
195
+ setattr(Model, div_relation_name, createDiv)
196
+ setattr(Model, pow_relation_name, createPow)
197
+
198
+ setattr(Model, neg_relation_name, createNeg)
199
+ # setattr(Model, square_relation_name, createSquare)
200
+
201
+ setattr(Model, sum_relation_name, createSum)
202
+
203
+
@@ -0,0 +1,81 @@
1
+ # Early stopping functions:
2
+ # The functions return True if the training should stop
3
+
4
+ # "Classical" early stopping based on the validation loss:
5
+ # Stop if the validation loss has not improved for a certain number of epochs
6
+ def early_stop_patience(train_losses, val_losses, params):
7
+ patience = params['patience'] if 'patience' in params.keys() else 50
8
+ if val_losses:
9
+ losses = val_losses
10
+ else:
11
+ # if there is no validation set, use the training losses
12
+ losses = train_losses
13
+
14
+ if 'error' in params.keys():
15
+ # if the type of loss to be used is provided by the user
16
+ losses_use = losses[params['error']]
17
+ else:
18
+ # take the mean of all the losses for all the keys of the dictionary
19
+ import numpy as np
20
+ losses_use = [np.mean([losses[key][index] for key in losses.keys()]) for index in range(len(losses[list(losses.keys())[0]]))]
21
+ if len(losses_use) > patience:
22
+ # index of the minimum validation loss
23
+ min_val_loss_index = losses_use.index(min(losses_use))
24
+ # check if the patience has been exceeded
25
+ if min_val_loss_index < len(losses_use) - patience:
26
+ return True
27
+ return False
28
+
29
+
30
+ def select_best_model(train_losses, val_losses, params):
31
+ if val_losses:
32
+ losses = val_losses
33
+ else:
34
+ # if there is no validation set, use the training losses
35
+ losses = train_losses
36
+ import numpy as np
37
+ losses_use = [np.mean([losses[key][index] for key in losses.keys()]) for index in
38
+ range(len(losses[list(losses.keys())[0]]))]
39
+ if len(losses_use)-1 == losses_use.index(min(losses_use)):
40
+ return True
41
+ else:
42
+ return False
43
+
44
+
45
+ def mean_stopping(train_losses, val_losses, params):
46
+ tol = params['tol'] if 'tol' in params.keys() else 0.001
47
+ if val_losses:
48
+ for (train_loss_name, train_loss_value), (val_loss_name, val_loss_value) in zip(train_losses.items(), val_losses.items()):
49
+ if abs(train_loss_value[-1] - val_loss_value[-1]) < tol:
50
+ return True
51
+ else:
52
+ for loss_name, loss_value in train_losses.items():
53
+ if loss_value[-1] < tol:
54
+ return True
55
+ return False
56
+
57
+ def standard_early_stopping(train_losses, val_losses, params):
58
+ n = params['tol'] if 'tol' in params.keys() else 10
59
+ if val_losses:
60
+ for (_, train_loss_value), (_, val_loss_value) in zip(train_losses.items(), val_losses.items()):
61
+ if (len(train_loss_value) <= n) and (len(val_loss_value) <= n):
62
+ return False
63
+
64
+ tol = 0.0
65
+ for train_loss, val_loss in zip(train_loss_value[-n:], val_loss_value[-n:]):
66
+ if abs(train_loss - val_loss) > tol:
67
+ tol = abs(train_loss - val_loss)
68
+ else:
69
+ return False
70
+ else:
71
+ for _, loss_value in train_losses.items():
72
+ if (len(loss_value) <= n):
73
+ return False
74
+
75
+ tol = loss_value[-n]
76
+ for loss in loss_value[-n+1:]:
77
+ if loss < tol:
78
+ return False
79
+
80
+ return True
81
+
@@ -0,0 +1,3 @@
1
+ from nnodely.exporter.export import save_model, load_model, export_python_model
2
+ from nnodely.exporter.exporter import Exporter
3
+ from nnodely.exporter.standardexporter import StandardExporter