nnodely 0.14.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mplplots/__init__.py +0 -0
- mplplots/plots.py +131 -0
- nnodely/__init__.py +42 -0
- nnodely/activation.py +85 -0
- nnodely/arithmetic.py +203 -0
- nnodely/earlystopping.py +81 -0
- nnodely/exporter/__init__.py +3 -0
- nnodely/exporter/export.py +275 -0
- nnodely/exporter/exporter.py +45 -0
- nnodely/exporter/reporter.py +48 -0
- nnodely/exporter/standardexporter.py +108 -0
- nnodely/fir.py +150 -0
- nnodely/fuzzify.py +221 -0
- nnodely/initializer.py +31 -0
- nnodely/input.py +131 -0
- nnodely/linear.py +130 -0
- nnodely/localmodel.py +82 -0
- nnodely/logger.py +94 -0
- nnodely/loss.py +30 -0
- nnodely/model.py +263 -0
- nnodely/modeldef.py +205 -0
- nnodely/nnodely.py +1295 -0
- nnodely/optimizer.py +91 -0
- nnodely/output.py +23 -0
- nnodely/parameter.py +103 -0
- nnodely/parametricfunction.py +329 -0
- nnodely/part.py +201 -0
- nnodely/relation.py +149 -0
- nnodely/trigonometric.py +67 -0
- nnodely/utils.py +101 -0
- nnodely/visualizer/__init__.py +4 -0
- nnodely/visualizer/dynamicmpl/functionplot.py +34 -0
- nnodely/visualizer/dynamicmpl/fuzzyplot.py +31 -0
- nnodely/visualizer/dynamicmpl/resultsplot.py +28 -0
- nnodely/visualizer/dynamicmpl/trainingplot.py +46 -0
- nnodely/visualizer/mplnotebookvisualizer.py +66 -0
- nnodely/visualizer/mplvisualizer.py +215 -0
- nnodely/visualizer/textvisualizer.py +320 -0
- nnodely/visualizer/visualizer.py +84 -0
- nnodely-0.14.0.dist-info/LICENSE +21 -0
- nnodely-0.14.0.dist-info/METADATA +401 -0
- nnodely-0.14.0.dist-info/RECORD +44 -0
- nnodely-0.14.0.dist-info/WHEEL +5 -0
- nnodely-0.14.0.dist-info/top_level.txt +2 -0
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
import matplotlib.pyplot as plt
|
|
2
|
+
|
|
3
|
+
from nnodely.visualizer.textvisualizer import TextVisualizer
|
|
4
|
+
from nnodely.fuzzify import return_fuzzify
|
|
5
|
+
from nnodely.parametricfunction import return_standard_inputs, return_function
|
|
6
|
+
from nnodely.utils import check
|
|
7
|
+
from mplplots import plots
|
|
8
|
+
|
|
9
|
+
class MPLNotebookVisualizer(TextVisualizer):
|
|
10
|
+
def __init__(self, verbose = 1):
|
|
11
|
+
super().__init__(verbose)
|
|
12
|
+
|
|
13
|
+
def showEndTraining(self, epoch, train_losses, val_losses):
|
|
14
|
+
for key in self.n4m.model_def['Minimizers'].keys():
|
|
15
|
+
fig = plt.figure()
|
|
16
|
+
ax = fig.add_subplot(111)
|
|
17
|
+
plots.plot_training(ax, "Training", key, train_losses[key], val_losses[key])
|
|
18
|
+
plt.show()
|
|
19
|
+
|
|
20
|
+
def showResult(self, name_data):
|
|
21
|
+
super().showResult(name_data)
|
|
22
|
+
for key in self.n4m.model_def['Minimizers'].keys():
|
|
23
|
+
fig = plt.figure()
|
|
24
|
+
ax = fig.add_subplot(111)
|
|
25
|
+
plots.plot_results(ax, name_data, key, self.n4m.prediction[name_data][key]['A'],
|
|
26
|
+
self.n4m.prediction[name_data][key]['B'], self.n4m.model_def['Info']["SampleTime"])
|
|
27
|
+
plt.show()
|
|
28
|
+
|
|
29
|
+
def showWeights(self, weights = None):
|
|
30
|
+
pass
|
|
31
|
+
|
|
32
|
+
def showFunctions(self, functions = None, xlim = None, num_points = 1000):
|
|
33
|
+
check(self.n4m.neuralized, ValueError, "The model has not been neuralized.")
|
|
34
|
+
for fun, value in self.n4m.model_def['Functions'].items():
|
|
35
|
+
if fun in functions:
|
|
36
|
+
if 'functions' in self.n4m.model_def['Functions'][fun]:
|
|
37
|
+
x, activ_fun = return_fuzzify(value, xlim, num_points)
|
|
38
|
+
fig = plt.figure()
|
|
39
|
+
ax = fig.add_subplot(111)
|
|
40
|
+
plots.plot_fuzzy(ax, fun, x, activ_fun, value['centers'])
|
|
41
|
+
plt.show()
|
|
42
|
+
elif 'code':
|
|
43
|
+
function_inputs = return_standard_inputs(value, self.n4m.model_def_values, xlim, num_points)
|
|
44
|
+
function_output, function_input_list = return_function(value, function_inputs)
|
|
45
|
+
if value['n_input'] == 2:
|
|
46
|
+
x0 = function_inputs[0].reshape(num_points, num_points).tolist()
|
|
47
|
+
x1 = function_inputs[1].reshape(num_points, num_points).tolist()
|
|
48
|
+
output = function_output.reshape(num_points, num_points).tolist()
|
|
49
|
+
params = []
|
|
50
|
+
for i, key in enumerate(value['params_and_consts']):
|
|
51
|
+
params += [function_inputs[i + value['n_input']].tolist()]
|
|
52
|
+
plots.plot_3d_function(plt, fun, x0, x1, params, output, function_input_list)
|
|
53
|
+
else:
|
|
54
|
+
x = function_inputs[0].reshape(num_points).tolist()
|
|
55
|
+
output = function_output.reshape(num_points).tolist()
|
|
56
|
+
params = []
|
|
57
|
+
for i, key in enumerate(value['params_and_consts']):
|
|
58
|
+
params += [function_inputs[i + value['n_input']].tolist()]
|
|
59
|
+
plots.plot_2d_function(plt, fun, x, params, output, function_input_list)
|
|
60
|
+
plt.show()
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
|
|
@@ -0,0 +1,215 @@
|
|
|
1
|
+
import subprocess, json, os, importlib
|
|
2
|
+
|
|
3
|
+
from nnodely.visualizer.textvisualizer import TextVisualizer
|
|
4
|
+
from nnodely.fuzzify import return_fuzzify
|
|
5
|
+
from nnodely.parametricfunction import return_standard_inputs, return_function
|
|
6
|
+
from nnodely.utils import check
|
|
7
|
+
from nnodely.modeldef import ModelDef
|
|
8
|
+
|
|
9
|
+
from nnodely.logger import logging, nnLogger
|
|
10
|
+
log = nnLogger(__name__, logging.INFO)
|
|
11
|
+
|
|
12
|
+
def get_library_path(library_name):
|
|
13
|
+
spec = importlib.util.find_spec(library_name)
|
|
14
|
+
if spec is None:
|
|
15
|
+
raise ImportError(f"Library {library_name} not found")
|
|
16
|
+
return os.path.dirname(spec.origin)
|
|
17
|
+
|
|
18
|
+
class MPLVisualizer(TextVisualizer):
|
|
19
|
+
def __init__(self, verbose = 1):
|
|
20
|
+
super().__init__(verbose)
|
|
21
|
+
# Path to the data visualizer script
|
|
22
|
+
import signal
|
|
23
|
+
import sys
|
|
24
|
+
get_library_path('nnodely')
|
|
25
|
+
self.training_visualizer_script = os.path.join(get_library_path('nnodely'),'visualizer/dynamicmpl/trainingplot.py')
|
|
26
|
+
self.time_series_visualizer_script = os.path.join(get_library_path('nnodely'),'visualizer/dynamicmpl/resultsplot.py')
|
|
27
|
+
self.fuzzy_visualizer_script = os.path.join(get_library_path('nnodely'),'visualizer/dynamicmpl/fuzzyplot.py')
|
|
28
|
+
self.function_visualizer_script = os.path.join(get_library_path('nnodely'),'visualizer/dynamicmpl/functionplot.py')
|
|
29
|
+
self.process_training = {}
|
|
30
|
+
self.process_results = {}
|
|
31
|
+
self.process_function = {}
|
|
32
|
+
def signal_handler(sig, frame):
|
|
33
|
+
for key in self.process_training.keys():
|
|
34
|
+
self.process_training[key].terminate()
|
|
35
|
+
self.process_training[key].wait()
|
|
36
|
+
for name_data in self.process_results.keys():
|
|
37
|
+
for key in self.process_results[name_data].keys():
|
|
38
|
+
self.process_results[name_data][key].terminate()
|
|
39
|
+
self.process_results[name_data][key].wait()
|
|
40
|
+
self.process_results = {}
|
|
41
|
+
for key in self.process_function.keys():
|
|
42
|
+
self.process_function[key].terminate()
|
|
43
|
+
self.process_functios[key].wait()
|
|
44
|
+
sys.exit()
|
|
45
|
+
|
|
46
|
+
signal.signal(signal.SIGINT, signal_handler)
|
|
47
|
+
|
|
48
|
+
def showStartTraining(self):
|
|
49
|
+
pass
|
|
50
|
+
|
|
51
|
+
def showTraining(self, epoch, train_losses, val_losses):
|
|
52
|
+
if epoch == 0:
|
|
53
|
+
for key in self.process_training.keys():
|
|
54
|
+
if self.process_training[key].poll() is None:
|
|
55
|
+
self.process_training[key].terminate()
|
|
56
|
+
self.process_training[key].wait()
|
|
57
|
+
self.process_training[key] = {}
|
|
58
|
+
|
|
59
|
+
self.process_training = {}
|
|
60
|
+
for key in self.n4m.model_def['Minimizers'].keys():
|
|
61
|
+
self.process_training[key] = subprocess.Popen(['python', self.training_visualizer_script], stdin=subprocess.PIPE, text=True)
|
|
62
|
+
|
|
63
|
+
num_of_epochs = self.n4m.run_training_params['num_of_epochs']
|
|
64
|
+
train_dataset = self.n4m.run_training_params['train_dataset']
|
|
65
|
+
validation_dataset = self.n4m.run_training_params['validation_dataset']
|
|
66
|
+
if epoch+1 <= num_of_epochs:
|
|
67
|
+
for key in self.n4m.model_def['Minimizers'].keys():
|
|
68
|
+
if val_losses:
|
|
69
|
+
val_loss = val_losses[key][epoch]
|
|
70
|
+
else:
|
|
71
|
+
val_loss = []
|
|
72
|
+
data = {"title":f"Training on {train_dataset} and {validation_dataset}", "key": key, "last": num_of_epochs - (epoch + 1), "epoch": epoch,
|
|
73
|
+
"train_losses": train_losses[key][epoch], "val_losses": val_loss}
|
|
74
|
+
try:
|
|
75
|
+
# Send data to the visualizer process
|
|
76
|
+
self.process_training[key].stdin.write(f"{json.dumps(data)}\n")
|
|
77
|
+
self.process_training[key].stdin.flush()
|
|
78
|
+
except BrokenPipeError:
|
|
79
|
+
self.closeTraining()
|
|
80
|
+
log.warning("The visualizer process has been closed.")
|
|
81
|
+
|
|
82
|
+
if epoch+1 == num_of_epochs:
|
|
83
|
+
for key in self.n4m.model_def['Minimizers'].keys():
|
|
84
|
+
self.process_training[key].stdin.close()
|
|
85
|
+
|
|
86
|
+
def showResult(self, name_data):
|
|
87
|
+
super().showResult(name_data)
|
|
88
|
+
check(name_data in self.n4m.performance, ValueError, f"Results not available for {name_data}.")
|
|
89
|
+
if name_data in self.process_results:
|
|
90
|
+
for key in self.n4m.model_def['Minimizers'].keys():
|
|
91
|
+
if key in self.process_results[name_data] and self.process_results[name_data][key].poll() is None:
|
|
92
|
+
self.process_results[name_data][key].terminate()
|
|
93
|
+
self.process_results[name_data][key].wait()
|
|
94
|
+
self.process_results[name_data][key] = None
|
|
95
|
+
self.process_results[name_data] = {}
|
|
96
|
+
|
|
97
|
+
for key in self.n4m.model_def['Minimizers'].keys():
|
|
98
|
+
# Start the data visualizer process
|
|
99
|
+
self.process_results[name_data][key] = subprocess.Popen(['python', self.time_series_visualizer_script], stdin=subprocess.PIPE,
|
|
100
|
+
text=True)
|
|
101
|
+
data = {"name_data": name_data,
|
|
102
|
+
"key": key,
|
|
103
|
+
"performance": self.n4m.performance[name_data][key],
|
|
104
|
+
"prediction_A": self.n4m.prediction[name_data][key]['A'],
|
|
105
|
+
"prediction_B": self.n4m.prediction[name_data][key]['B'],
|
|
106
|
+
"sample_time": self.n4m.model_def['Info']["SampleTime"]}
|
|
107
|
+
try:
|
|
108
|
+
# Send data to the visualizer process
|
|
109
|
+
self.process_results[name_data][key].stdin.write(f"{json.dumps(data)}\n")
|
|
110
|
+
self.process_results[name_data][key].stdin.flush()
|
|
111
|
+
self.process_results[name_data][key].stdin.close()
|
|
112
|
+
except BrokenPipeError:
|
|
113
|
+
self.closeResult(self, name_data)
|
|
114
|
+
log.warning(f"The visualizer {name_data} process has been closed.")
|
|
115
|
+
|
|
116
|
+
def showWeights(self, weights = None):
|
|
117
|
+
pass
|
|
118
|
+
|
|
119
|
+
def showFunctions(self, functions = None, xlim = None, num_points = 1000):
|
|
120
|
+
check(self.n4m.neuralized, ValueError, "The model has not been neuralized.")
|
|
121
|
+
for key, value in self.n4m.model_def['Functions'].items():
|
|
122
|
+
if key in functions:
|
|
123
|
+
if key in self.process_function and self.process_function[key].poll() is None:
|
|
124
|
+
self.process_function[key].terminate()
|
|
125
|
+
self.process_function[key].wait()
|
|
126
|
+
|
|
127
|
+
if 'functions' in self.n4m.model_def['Functions'][key]:
|
|
128
|
+
x, activ_fun = return_fuzzify(value, xlim, num_points)
|
|
129
|
+
data = {"name": key,
|
|
130
|
+
"x": x,
|
|
131
|
+
"y": activ_fun,
|
|
132
|
+
"chan_centers": value['centers']}
|
|
133
|
+
# Start the data visualizer process
|
|
134
|
+
self.process_function[key] = subprocess.Popen(['python', self.fuzzy_visualizer_script],
|
|
135
|
+
stdin=subprocess.PIPE,
|
|
136
|
+
text=True)
|
|
137
|
+
elif 'code':
|
|
138
|
+
model_def = ModelDef(self.n4m.model_def)
|
|
139
|
+
model_def.updateParameters(self.n4m.model)
|
|
140
|
+
function_inputs = return_standard_inputs(value, model_def, xlim, num_points)
|
|
141
|
+
function_output, function_input_list = return_function(value, function_inputs)
|
|
142
|
+
|
|
143
|
+
data = {"name": key}
|
|
144
|
+
if value['n_input'] == 2:
|
|
145
|
+
data['x0'] = function_inputs[0].reshape(num_points, num_points).tolist()
|
|
146
|
+
data['x1'] = function_inputs[1].reshape(num_points, num_points).tolist()
|
|
147
|
+
data['output'] = function_output.reshape(num_points, num_points).tolist()
|
|
148
|
+
else:
|
|
149
|
+
data['x0'] = function_inputs[0].reshape(num_points).tolist()
|
|
150
|
+
data['output'] = function_output.reshape(num_points).tolist()
|
|
151
|
+
data['params'] = []
|
|
152
|
+
for i, key in enumerate(value['params_and_consts']):
|
|
153
|
+
data['params'] += [function_inputs[i+value['n_input']].tolist()]
|
|
154
|
+
data['input_names'] = function_input_list
|
|
155
|
+
|
|
156
|
+
# Start the data visualizer process
|
|
157
|
+
self.process_function[key] = subprocess.Popen(['python', self.function_visualizer_script],
|
|
158
|
+
stdin=subprocess.PIPE,
|
|
159
|
+
text=True)
|
|
160
|
+
try:
|
|
161
|
+
# Send data to the visualizer process
|
|
162
|
+
self.process_function[key].stdin.write(f"{json.dumps(data)}\n")
|
|
163
|
+
self.process_function[key].stdin.flush()
|
|
164
|
+
self.process_function[key].stdin.close()
|
|
165
|
+
except BrokenPipeError:
|
|
166
|
+
self.closeFunctions()
|
|
167
|
+
log.warning(f"The visualizer {functions} process has been closed.")
|
|
168
|
+
|
|
169
|
+
def closeFunctions(self, functions = None):
|
|
170
|
+
if functions is None:
|
|
171
|
+
for key in self.process_function.keys():
|
|
172
|
+
self.process_function[key].terminate()
|
|
173
|
+
self.process_function[key].wait()
|
|
174
|
+
self.process_function = {}
|
|
175
|
+
else:
|
|
176
|
+
for key in functions:
|
|
177
|
+
self.process_function[key].terminate()
|
|
178
|
+
self.process_function[key].wait()
|
|
179
|
+
self.process_function.pop(key)
|
|
180
|
+
|
|
181
|
+
def closeTraining(self, minimizer = None):
|
|
182
|
+
if minimizer is None:
|
|
183
|
+
for key in self.n4m.model_def['Minimizers'].keys():
|
|
184
|
+
if key in self.process_training and self.process_training[key].poll() is None:
|
|
185
|
+
self.process_training[key].terminate()
|
|
186
|
+
self.process_training[key].wait()
|
|
187
|
+
self.process_training[key] = {}
|
|
188
|
+
else:
|
|
189
|
+
self.process_training[minimizer].terminate()
|
|
190
|
+
self.process_training[minimizer].wait()
|
|
191
|
+
self.process_training.pop(minimizer)
|
|
192
|
+
|
|
193
|
+
def closeResult(self, name_data = None, minimizer = None):
|
|
194
|
+
if name_data is None:
|
|
195
|
+
check(minimizer is None, ValueError, "If name_data is None, minimizer must be None.")
|
|
196
|
+
for name_data in self.process_results.keys():
|
|
197
|
+
for key in self.process_results[name_data].keys():
|
|
198
|
+
self.process_results[name_data][key].terminate()
|
|
199
|
+
self.process_results[name_data][key].wait()
|
|
200
|
+
self.process_results = {}
|
|
201
|
+
else:
|
|
202
|
+
if minimizer is None:
|
|
203
|
+
for key in self.process_results[name_data].keys():
|
|
204
|
+
self.process_results[name_data][key].terminate()
|
|
205
|
+
self.process_results[name_data][key].wait()
|
|
206
|
+
self.process_results[name_data] = {}
|
|
207
|
+
else:
|
|
208
|
+
self.process_results[name_data][minimizer].terminate()
|
|
209
|
+
self.process_results[name_data][minimizer].wait()
|
|
210
|
+
self.process_results[name_data].pop(minimizer)
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
|
|
@@ -0,0 +1,320 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from pprint import pformat
|
|
3
|
+
|
|
4
|
+
from nnodely.visualizer.visualizer import Visualizer, color, GREEN, RED, BLUE
|
|
5
|
+
|
|
6
|
+
class TextVisualizer(Visualizer):
|
|
7
|
+
def __init__(self, verbose=1):
|
|
8
|
+
self.verbose = verbose
|
|
9
|
+
|
|
10
|
+
def __title(self,msg, lenght = 80):
|
|
11
|
+
print(color((msg).center(lenght, '='), GREEN, True))
|
|
12
|
+
|
|
13
|
+
def __subtitle(self,msg, lenght = 80):
|
|
14
|
+
print(color((msg).center(lenght, '-'), GREEN, True))
|
|
15
|
+
|
|
16
|
+
def __line(self):
|
|
17
|
+
print(color('='.center(80, '='),GREEN))
|
|
18
|
+
|
|
19
|
+
def __singleline(self):
|
|
20
|
+
print(color('-'.center(80, '-'),GREEN))
|
|
21
|
+
|
|
22
|
+
def __info(self,name, dim =30):
|
|
23
|
+
print(color((name).ljust(dim),BLUE))
|
|
24
|
+
|
|
25
|
+
def __paramjson(self,name, value, dim =30):
|
|
26
|
+
lines = pformat(value, width=80 - dim).strip().splitlines()
|
|
27
|
+
vai = ('\n' + (' ' * dim)).join(x for x in lines)
|
|
28
|
+
# pformat(value).strip().splitlines().rjust(40)
|
|
29
|
+
print(color((name).ljust(dim) + vai,GREEN))
|
|
30
|
+
|
|
31
|
+
def __param(self,name, value, dim =30):
|
|
32
|
+
print(color((name).ljust(dim) + value,GREEN))
|
|
33
|
+
|
|
34
|
+
def showModel(self, model):
|
|
35
|
+
if self.verbose >= 1:
|
|
36
|
+
self.__title(" nnodely Model ")
|
|
37
|
+
print(color(pformat(model),GREEN))
|
|
38
|
+
self.__line()
|
|
39
|
+
|
|
40
|
+
def showMinimize(self,variable_name):
|
|
41
|
+
if self.verbose >= 2:
|
|
42
|
+
self.__title(f" Minimize Error of {variable_name} with {self.n4m.model_def['Minimizers'][variable_name]['loss']} ")
|
|
43
|
+
self.__paramjson(f"Model {self.n4m.model_def['Minimizers'][variable_name]['A'].name}", self.n4m.model_def['Minimizers'][variable_name]['A'].json)
|
|
44
|
+
self.__paramjson(f"Model {self.n4m.model_def['Minimizers'][variable_name]['B'].name}", self.n4m.model_def['Minimizers'][variable_name]['B'].json)
|
|
45
|
+
self.__line()
|
|
46
|
+
|
|
47
|
+
def showModelInputWindow(self):
|
|
48
|
+
if self.verbose >= 2:
|
|
49
|
+
input_ns_backward = {key: value['ns'][0] for key, value in
|
|
50
|
+
(self.n4m.model_def['Inputs'] | self.n4m.model_def['States']).items()}
|
|
51
|
+
input_ns_forward = {key: value['ns'][1] for key, value in
|
|
52
|
+
(self.n4m.model_def['Inputs'] | self.n4m.model_def['States']).items()}
|
|
53
|
+
self.__title(" nnodely Model Input Windows ")
|
|
54
|
+
#self.__paramjson("time_window_backward:",self.n4m.input_tw_backward)
|
|
55
|
+
#self.__paramjson("time_window_forward:",self.n4m.input_tw_forward)
|
|
56
|
+
self.__paramjson("sample_window_backward:", input_ns_backward)
|
|
57
|
+
self.__paramjson("sample_window_forward:", input_ns_forward)
|
|
58
|
+
self.__paramjson("input_n_samples:", self.n4m.input_n_samples)
|
|
59
|
+
self.__param("max_samples [backw, forw]:", f"[{self.n4m.model_def['Info']['ns'][0]},{self.n4m.model_def['Info']['ns'][1]}]")
|
|
60
|
+
self.__param("max_samples total:",f"{self.n4m.max_n_samples}")
|
|
61
|
+
self.__line()
|
|
62
|
+
|
|
63
|
+
def showModelRelationSamples(self):
|
|
64
|
+
if self.verbose >= 2:
|
|
65
|
+
self.__title(" nnodely Model Relation Samples ")
|
|
66
|
+
self.__paramjson("Relation_samples:", self.n4m.relation_samples)
|
|
67
|
+
self.__line()
|
|
68
|
+
|
|
69
|
+
def showBuiltModel(self):
|
|
70
|
+
if self.verbose >= 2:
|
|
71
|
+
self.__title(" nnodely Built Model ")
|
|
72
|
+
print(color(pformat(self.n4m.model),GREEN))
|
|
73
|
+
self.__line()
|
|
74
|
+
|
|
75
|
+
def showWeights(self, weights = None):
|
|
76
|
+
self.__title(" nnodely Models Weights ")
|
|
77
|
+
for key, param in self.n4m.model.all_parameters.items():
|
|
78
|
+
if weights is None or key in weights:
|
|
79
|
+
self.__paramjson(key,param.tolist())
|
|
80
|
+
self.__line()
|
|
81
|
+
|
|
82
|
+
def showWeightsInTrain(self, batch = None, epoch = None, weights = None):
|
|
83
|
+
if self.verbose >= 2:
|
|
84
|
+
par = self.n4m.run_training_params
|
|
85
|
+
dim = len(self.n4m.model_def['Minimizers'])
|
|
86
|
+
COLOR = BLUE
|
|
87
|
+
if epoch is not None:
|
|
88
|
+
print(color('|' + (f"{epoch + 1}/{par['num_of_epochs']}").center(10, ' ') + '|',COLOR), end='')
|
|
89
|
+
print(color((f' Params end epochs {epoch + 1} ').center(20 * (dim + 1) - 1, '-') + '|',COLOR))
|
|
90
|
+
|
|
91
|
+
if batch is not None:
|
|
92
|
+
print(color('|' + (f"{batch + 1}").center(10, ' ') + '|', COLOR), end='')
|
|
93
|
+
print(color((f' Params end batch {batch + 1} ').center(20 * (dim + 1) - 1, '-') + '|', COLOR))
|
|
94
|
+
|
|
95
|
+
for key, param in self.n4m.model.all_parameters.items():
|
|
96
|
+
if weights is None or key in weights:
|
|
97
|
+
print(color('|' + (f"{key}").center(10, ' ') + '|', COLOR), end='')
|
|
98
|
+
print(color((f'{param.tolist()}').center(20 * (dim + 1) - 1, ' ') + '|', COLOR))
|
|
99
|
+
|
|
100
|
+
if epoch is not None:
|
|
101
|
+
print(color('|'+(f'').center(10+20*(dim+1), '-') + '|'))
|
|
102
|
+
|
|
103
|
+
def showDataset(self, name):
|
|
104
|
+
if self.verbose >= 1:
|
|
105
|
+
self.__title(" nnodely Model Dataset ")
|
|
106
|
+
self.__param("Dataset Name:", name)
|
|
107
|
+
self.__param("Number of files:", f'{self.n4m.file_count}')
|
|
108
|
+
self.__param("Total number of samples:", f'{self.n4m.num_of_samples[name]}')
|
|
109
|
+
for key in self.n4m.model_def['Inputs'].keys():
|
|
110
|
+
if key in self.n4m.data[name].keys():
|
|
111
|
+
self.__param(f"Shape of {key}:", f'{self.n4m.data[name][key].shape}')
|
|
112
|
+
self.__line()
|
|
113
|
+
|
|
114
|
+
def showStartTraining(self):
|
|
115
|
+
if self.verbose >= 1:
|
|
116
|
+
par = self.n4m.run_training_params
|
|
117
|
+
dim = len(self.n4m.model_def['Minimizers'])
|
|
118
|
+
self.__title(" nnodely Training ", 12+(len(self.n4m.model_def['Minimizers'])+1)*20)
|
|
119
|
+
print(color('|'+(f'Epoch').center(10,' ')+'|'),end='')
|
|
120
|
+
for key in self.n4m.model_def['Minimizers'].keys():
|
|
121
|
+
print(color((f'{key}').center(19, ' ') + '|'), end='')
|
|
122
|
+
print(color((f'Total').center(19, ' ') + '|'))
|
|
123
|
+
|
|
124
|
+
print(color('|' + (f' ').center(10, ' ') + '|'), end='')
|
|
125
|
+
for key in self.n4m.model_def['Minimizers'].keys():
|
|
126
|
+
print(color((f'Loss').center(19, ' ') + '|'),end='')
|
|
127
|
+
print(color((f'Loss').center(19, ' ') + '|'))
|
|
128
|
+
|
|
129
|
+
print(color('|' + (f' ').center(10, ' ') + '|'), end='')
|
|
130
|
+
for key in self.n4m.model_def['Minimizers'].keys():
|
|
131
|
+
if par['n_samples_val']:
|
|
132
|
+
print(color((f'train').center(9, ' ') + '|'),end='')
|
|
133
|
+
print(color((f'val').center(9, ' ') + '|'),end='')
|
|
134
|
+
else:
|
|
135
|
+
print(color((f'train').center(19, ' ') + '|'), end='')
|
|
136
|
+
if par['n_samples_val']:
|
|
137
|
+
print(color((f'train').center(9, ' ') + '|'), end='')
|
|
138
|
+
print(color((f'val').center(9, ' ') + '|'))
|
|
139
|
+
else:
|
|
140
|
+
print(color((f'train').center(19, ' ') + '|'))
|
|
141
|
+
|
|
142
|
+
print(color('|'+(f'').center(10+20*(dim+1), '-') + '|'))
|
|
143
|
+
|
|
144
|
+
def showTraining(self, epoch, train_losses, val_losses):
|
|
145
|
+
if self.verbose >= 1:
|
|
146
|
+
eng = lambda val: np.format_float_scientific(val, precision=3)
|
|
147
|
+
par = self.n4m.run_training_params
|
|
148
|
+
show_epoch = 1 if par['num_of_epochs'] <= 20 else 10
|
|
149
|
+
dim = len(self.n4m.model_def['Minimizers'])
|
|
150
|
+
if epoch < par['num_of_epochs']:
|
|
151
|
+
print('', end='\r')
|
|
152
|
+
print('|' + (f"{epoch + 1}/{par['num_of_epochs']}").center(10, ' ') + '|', end='')
|
|
153
|
+
train_loss = []
|
|
154
|
+
val_loss = []
|
|
155
|
+
for key in self.n4m.model_def['Minimizers'].keys():
|
|
156
|
+
train_loss.append(train_losses[key][epoch])
|
|
157
|
+
if val_losses:
|
|
158
|
+
val_loss.append(val_losses[key][epoch])
|
|
159
|
+
print((f'{eng(train_losses[key][epoch])}').center(9, ' ') + '|', end='')
|
|
160
|
+
print((f'{eng(val_losses[key][epoch])}').center(9, ' ') + '|', end='')
|
|
161
|
+
else:
|
|
162
|
+
print((f'{eng(train_losses[key][epoch])}').center(19, ' ') + '|', end='')
|
|
163
|
+
|
|
164
|
+
if val_losses:
|
|
165
|
+
print((f'{eng(np.mean(train_loss))}').center(9, ' ') + '|', end='')
|
|
166
|
+
print((f'{eng(np.mean(val_loss))}').center(9, ' ') + '|', end='')
|
|
167
|
+
else:
|
|
168
|
+
print((f'{eng(np.mean(train_loss))}').center(19, ' ') + '|', end='')
|
|
169
|
+
|
|
170
|
+
if (epoch + 1) % show_epoch == 0:
|
|
171
|
+
print('', end='\r')
|
|
172
|
+
print(color('|' + (f"{epoch + 1}/{par['num_of_epochs']}").center(10, ' ') + '|'), end='')
|
|
173
|
+
for key in self.n4m.model_def['Minimizers'].keys():
|
|
174
|
+
if val_losses:
|
|
175
|
+
print(color((f'{eng(train_losses[key][epoch])}').center(9, ' ') + '|'), end='')
|
|
176
|
+
print(color((f'{eng(val_losses[key][epoch])}').center(9, ' ') + '|'), end='')
|
|
177
|
+
else:
|
|
178
|
+
print(color((f'{eng(train_losses[key][epoch])}').center(19, ' ') + '|'), end='')
|
|
179
|
+
|
|
180
|
+
if val_losses:
|
|
181
|
+
print(color((f'{eng(np.mean(train_loss))}').center(9, ' ') + '|'), end='')
|
|
182
|
+
print(color((f'{eng(np.mean(val_loss))}').center(9, ' ') + '|'))
|
|
183
|
+
else:
|
|
184
|
+
print(color((f'{eng(np.mean(train_loss))}').center(19, ' ') + '|'))
|
|
185
|
+
|
|
186
|
+
if epoch+1 == par['num_of_epochs']:
|
|
187
|
+
print(color('|'+(f'').center(10+20*(dim+1), '-') + '|'))
|
|
188
|
+
|
|
189
|
+
def showTrainingTime(self, time):
|
|
190
|
+
if self.verbose >= 1:
|
|
191
|
+
self.__title(" nnodely Training Time ")
|
|
192
|
+
self.__param("Total time of Training:", f'{time}')
|
|
193
|
+
self.__line()
|
|
194
|
+
|
|
195
|
+
def showTrainParams(self):
|
|
196
|
+
if self.verbose >= 1:
|
|
197
|
+
self.__title(" nnodely Model Train Parameters ")
|
|
198
|
+
par = self.n4m.run_training_params
|
|
199
|
+
batch_size = par['train_batch_size']
|
|
200
|
+
n_samples = par['n_samples_train']
|
|
201
|
+
n_update = par['update_per_epochs']
|
|
202
|
+
unused_samples = par['unused_samples']
|
|
203
|
+
|
|
204
|
+
self.__paramjson("models:", par['models'])
|
|
205
|
+
self.__paramjson("num of epochs:", par['num_of_epochs'])
|
|
206
|
+
self.__param("update per epochs:", f"{n_update}")
|
|
207
|
+
if par['recurrent_train']:
|
|
208
|
+
self.__info("└>(n_samples-batch_size-prediction_samples+1)/(batch_size+step-1)+1")
|
|
209
|
+
else:
|
|
210
|
+
self.__info("└>(n_samples-batch_size)/batch_size+1")
|
|
211
|
+
|
|
212
|
+
if par['shuffle_data']:
|
|
213
|
+
self.__param('shuffle data:', str(par['shuffle_data']))
|
|
214
|
+
|
|
215
|
+
if 'early_stopping' in par:
|
|
216
|
+
self.__param('early stopping:', par['early_stopping'])
|
|
217
|
+
self.__paramjson('early stopping params:', par['early_stopping_params'])
|
|
218
|
+
|
|
219
|
+
if par['recurrent_train']:
|
|
220
|
+
self.__param("prediction samples:", f"{par['prediction_samples']}")
|
|
221
|
+
self.__param("step:", f"{par['step']}")
|
|
222
|
+
self.__paramjson("closed loop:", par['closed_loop'])
|
|
223
|
+
self.__paramjson("connect:", par['connect'])
|
|
224
|
+
|
|
225
|
+
self.__param("train dataset:", f"{par['train_dataset']}")
|
|
226
|
+
self.__param("\t- num of samples:", f"{n_samples}")
|
|
227
|
+
self.__param("\t- batch size:", f"{batch_size}")
|
|
228
|
+
self.__param("\t- unused samples:", f"{unused_samples}")
|
|
229
|
+
if par['recurrent_train']:
|
|
230
|
+
self.__info("\t └>n_samples-prediction_samples-update_per_epochs*(batch_size+step-1)")
|
|
231
|
+
else:
|
|
232
|
+
self.__info("\t └>n_samples-update_per_epochs*batch_size")
|
|
233
|
+
|
|
234
|
+
if par['n_samples_val']:
|
|
235
|
+
self.__param("val dataset:", f"{par['validation_dataset']}")
|
|
236
|
+
self.__param("val {batch size, samples}:", f"{{{par['val_batch_size']}, {par['n_samples_val']}}}")
|
|
237
|
+
if par['n_samples_test']:
|
|
238
|
+
self.__param("test dataset:", f"{par['test_dataset']}")
|
|
239
|
+
self.__param("test {batch size, samples}:", f"{{{par['test_batch_size']}, {par['n_samples_test']}}}")
|
|
240
|
+
|
|
241
|
+
self.__paramjson('minimizers:', par['minimizers'])
|
|
242
|
+
|
|
243
|
+
self.__param("optimizer:", par['optimizer'])
|
|
244
|
+
self.__paramjson("optimizer defaults:",self.n4m.run_training_params['optimizer_defaults'])
|
|
245
|
+
if self.n4m.run_training_params['optimizer_params'] is not None:
|
|
246
|
+
self.__paramjson("optimizer params:", self.n4m.run_training_params['optimizer_params'])
|
|
247
|
+
|
|
248
|
+
self.__line()
|
|
249
|
+
|
|
250
|
+
def showResult(self, name_data):
|
|
251
|
+
eng = lambda val: np.format_float_scientific(val, precision=3)
|
|
252
|
+
if self.verbose >= 1:
|
|
253
|
+
dim_loss = len(max(self.n4m.model_def['Minimizers'].keys(),key=len))
|
|
254
|
+
loss_type_list = set([value["loss"] for ind, (key, value) in enumerate(self.n4m.model_def['Minimizers'].items())])
|
|
255
|
+
self.__title(f" nnodely Model Results for {name_data} ", dim_loss + 2 + (len(loss_type_list) + 2) * 20)
|
|
256
|
+
print(color('|' + (f'Loss').center(dim_loss, ' ') + '|'), end='')
|
|
257
|
+
for loss in loss_type_list:
|
|
258
|
+
print(color((f'{loss}').center(19, ' ') + '|'), end='')
|
|
259
|
+
print(color((f'FVU').center(19, ' ') + '|'), end='')
|
|
260
|
+
print(color((f'AIC').center(19, ' ') + '|'))
|
|
261
|
+
|
|
262
|
+
print(color('|' + (f'').center(dim_loss, ' ') + '|'), end='')
|
|
263
|
+
for i in range(len(loss_type_list)):
|
|
264
|
+
print(color((f'small better').center(19, ' ') + '|'), end='')
|
|
265
|
+
print(color((f'small better').center(19, ' ') + '|'), end='')
|
|
266
|
+
print(color((f'lower better').center(19, ' ') + '|'))
|
|
267
|
+
|
|
268
|
+
print(color('|' + (f'').center(dim_loss + 20 * (len(loss_type_list) + 2), '-') + '|'))
|
|
269
|
+
for ind, (key, value) in enumerate(self.n4m.model_def['Minimizers'].items()):
|
|
270
|
+
print(color('|'+(f'{key}').center(dim_loss, ' ') + '|'), end='')
|
|
271
|
+
for loss in list(loss_type_list):
|
|
272
|
+
if value["loss"] == loss:
|
|
273
|
+
print(color((f'{eng(self.n4m.performance[name_data][key][value["loss"]])}').center(19, ' ') + '|'), end='')
|
|
274
|
+
else:
|
|
275
|
+
print(color((f' ').center(19, ' ') + '|'), end='')
|
|
276
|
+
print(color((f'{eng(self.n4m.performance[name_data][key]["fvu"]["total"])}').center(19, ' ') + '|'), end='')
|
|
277
|
+
print(color((f'{eng(self.n4m.performance[name_data][key]["aic"]["value"])}').center(19, ' ') + '|'))
|
|
278
|
+
|
|
279
|
+
print(color('|' + (f'').center(dim_loss + 20 * (len(loss_type_list) + 2), '-') + '|'))
|
|
280
|
+
print(color('|'+(f'Total').center(dim_loss, ' ') + '|'), end='')
|
|
281
|
+
print(color((f'{eng(self.n4m.performance[name_data]["total"]["mean_error"])}').center(len(loss_type_list)*20-1, ' ') + '|'), end='')
|
|
282
|
+
print(color((f'{eng(self.n4m.performance[name_data]["total"]["fvu"])}').center(19, ' ') + '|'), end='')
|
|
283
|
+
print(color((f'{eng(self.n4m.performance[name_data]["total"]["aic"])}').center(19, ' ') + '|'))
|
|
284
|
+
|
|
285
|
+
print(color('|' + (f'').center(dim_loss + 20 * (len(loss_type_list) + 2), '-') + '|'))
|
|
286
|
+
|
|
287
|
+
if self.verbose >= 2:
|
|
288
|
+
self.__title(" Detalied Results ")
|
|
289
|
+
print(color(pformat(self.n4m.performance), GREEN))
|
|
290
|
+
self.__line()
|
|
291
|
+
|
|
292
|
+
def saveModel(self, name, path):
|
|
293
|
+
if self.verbose >= 1:
|
|
294
|
+
self.__title(f" Save {name} ")
|
|
295
|
+
self.__param("Model saved in:", path)
|
|
296
|
+
self.__line()
|
|
297
|
+
|
|
298
|
+
def loadModel(self, name, path):
|
|
299
|
+
if self.verbose >= 1:
|
|
300
|
+
self.__title(f" Load {name} ")
|
|
301
|
+
self.__param("Model loaded from:", path)
|
|
302
|
+
self.__line()
|
|
303
|
+
|
|
304
|
+
def exportModel(self, name, path):
|
|
305
|
+
if self.verbose >= 1:
|
|
306
|
+
self.__title(f" Export {name} ")
|
|
307
|
+
self.__param("Model exported in:", path)
|
|
308
|
+
self.__line()
|
|
309
|
+
|
|
310
|
+
def importModel(self, name, path):
|
|
311
|
+
if self.verbose >= 1:
|
|
312
|
+
self.__title(f" Import {name} ")
|
|
313
|
+
self.__param("Model imported from:", path)
|
|
314
|
+
self.__line()
|
|
315
|
+
|
|
316
|
+
def exportReport(self, name, path):
|
|
317
|
+
if self.verbose >= 1:
|
|
318
|
+
self.__title(f" Export {name} Report ")
|
|
319
|
+
self.__param("Report exported in:", path)
|
|
320
|
+
self.__line()
|