nnodely 0.14.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mplplots/__init__.py +0 -0
- mplplots/plots.py +131 -0
- nnodely/__init__.py +42 -0
- nnodely/activation.py +85 -0
- nnodely/arithmetic.py +203 -0
- nnodely/earlystopping.py +81 -0
- nnodely/exporter/__init__.py +3 -0
- nnodely/exporter/export.py +275 -0
- nnodely/exporter/exporter.py +45 -0
- nnodely/exporter/reporter.py +48 -0
- nnodely/exporter/standardexporter.py +108 -0
- nnodely/fir.py +150 -0
- nnodely/fuzzify.py +221 -0
- nnodely/initializer.py +31 -0
- nnodely/input.py +131 -0
- nnodely/linear.py +130 -0
- nnodely/localmodel.py +82 -0
- nnodely/logger.py +94 -0
- nnodely/loss.py +30 -0
- nnodely/model.py +263 -0
- nnodely/modeldef.py +205 -0
- nnodely/nnodely.py +1295 -0
- nnodely/optimizer.py +91 -0
- nnodely/output.py +23 -0
- nnodely/parameter.py +103 -0
- nnodely/parametricfunction.py +329 -0
- nnodely/part.py +201 -0
- nnodely/relation.py +149 -0
- nnodely/trigonometric.py +67 -0
- nnodely/utils.py +101 -0
- nnodely/visualizer/__init__.py +4 -0
- nnodely/visualizer/dynamicmpl/functionplot.py +34 -0
- nnodely/visualizer/dynamicmpl/fuzzyplot.py +31 -0
- nnodely/visualizer/dynamicmpl/resultsplot.py +28 -0
- nnodely/visualizer/dynamicmpl/trainingplot.py +46 -0
- nnodely/visualizer/mplnotebookvisualizer.py +66 -0
- nnodely/visualizer/mplvisualizer.py +215 -0
- nnodely/visualizer/textvisualizer.py +320 -0
- nnodely/visualizer/visualizer.py +84 -0
- nnodely-0.14.0.dist-info/LICENSE +21 -0
- nnodely-0.14.0.dist-info/METADATA +401 -0
- nnodely-0.14.0.dist-info/RECORD +44 -0
- nnodely-0.14.0.dist-info/WHEEL +5 -0
- nnodely-0.14.0.dist-info/top_level.txt +2 -0
|
@@ -0,0 +1,275 @@
|
|
|
1
|
+
import sys, os, torch, importlib
|
|
2
|
+
|
|
3
|
+
from torch.fx import symbolic_trace
|
|
4
|
+
|
|
5
|
+
from pprint import PrettyPrinter
|
|
6
|
+
|
|
7
|
+
class JsonPrettyPrinter(PrettyPrinter):
|
|
8
|
+
def _format(self, object, *args):
|
|
9
|
+
if isinstance(object, str):
|
|
10
|
+
width = self._width
|
|
11
|
+
self._width = sys.maxsize
|
|
12
|
+
try:
|
|
13
|
+
super()._format(object.replace('\'','_"_'), *args)
|
|
14
|
+
finally:
|
|
15
|
+
self._width = width
|
|
16
|
+
else:
|
|
17
|
+
super()._format(object, *args)
|
|
18
|
+
|
|
19
|
+
def save_model(model, model_path):
|
|
20
|
+
# Export the dictionary as a JSON file
|
|
21
|
+
with open(model_path, 'w') as json_file:
|
|
22
|
+
# json.dump(self.model_def, json_file, indent=4)
|
|
23
|
+
json_file.write(JsonPrettyPrinter().pformat(model)
|
|
24
|
+
.replace('\'', '\"')
|
|
25
|
+
.replace('_"_', '\'')
|
|
26
|
+
.replace('None', 'null')
|
|
27
|
+
.replace('False', 'false')
|
|
28
|
+
.replace('True', 'true'))
|
|
29
|
+
# json_file.write(JsonPrettyPrinter().pformat(model).replace('None','null'))
|
|
30
|
+
# data = json.dumps(self.model_def)
|
|
31
|
+
# json_file.write(pformat(data).replace('\\\\n', '\\n').replace('\'', '').replace('(','').replace(')',''))
|
|
32
|
+
# json_file.write(pformat(data).replace('\'', '\"'))
|
|
33
|
+
|
|
34
|
+
def load_model(model_path):
|
|
35
|
+
import json
|
|
36
|
+
with open(model_path, 'r', encoding='UTF-8') as file:
|
|
37
|
+
model_def = json.load(file)
|
|
38
|
+
return model_def
|
|
39
|
+
|
|
40
|
+
def export_python_model(model_def, model, model_path):
|
|
41
|
+
package_name = __package__.split('.')[0]
|
|
42
|
+
|
|
43
|
+
# Get the symbolic tracer
|
|
44
|
+
with torch.no_grad():
|
|
45
|
+
trace = symbolic_trace(model)
|
|
46
|
+
|
|
47
|
+
## Standard way to modify the graph
|
|
48
|
+
# # Replace all _tensor_constant variables with their constant values
|
|
49
|
+
# for node in trace.graph.nodes:
|
|
50
|
+
# if node.op == 'get_attr' and node.target.startswith('_tensor_constant'):
|
|
51
|
+
# constant_value = getattr(model, node.target).item()
|
|
52
|
+
# with trace.graph.inserting_after(node):
|
|
53
|
+
# new_node = trace.graph.create_node('call_function', torch.tensor, (constant_value,))
|
|
54
|
+
# node.replace_all_uses_with(new_node)
|
|
55
|
+
# trace.graph.erase_node(node)
|
|
56
|
+
#
|
|
57
|
+
# # Recompile the graph
|
|
58
|
+
# trace.recompile()
|
|
59
|
+
## Standard way to modify the graph
|
|
60
|
+
|
|
61
|
+
attributes = sorted(set([line for line in trace.code.split() if 'self.' in line]))
|
|
62
|
+
saved_functions = []
|
|
63
|
+
|
|
64
|
+
with open(model_path, 'w') as file:
|
|
65
|
+
#file.write("import torch.nn as nn\n")
|
|
66
|
+
file.write("import torch\n\n")
|
|
67
|
+
|
|
68
|
+
for name in model_def['Functions'].keys():
|
|
69
|
+
if 'Fuzzify' in name:
|
|
70
|
+
if 'slicing' not in saved_functions:
|
|
71
|
+
#file.write("@torch.fx.wrap\n")
|
|
72
|
+
file.write(f"def {package_name}_fuzzify_slicing(res, i, x):\n")
|
|
73
|
+
file.write(" res[:, :, i:i+1] = x\n\n")
|
|
74
|
+
saved_functions.append('slicing')
|
|
75
|
+
|
|
76
|
+
function_name = model_def['Functions'][name]['names']
|
|
77
|
+
function_code = model_def['Functions'][name]['functions']
|
|
78
|
+
if isinstance(function_code, list):
|
|
79
|
+
for i, fun_code in enumerate(function_code):
|
|
80
|
+
if fun_code != 'Rectangular' and fun_code != 'Triangular':
|
|
81
|
+
if function_name[i] not in saved_functions:
|
|
82
|
+
fun_code = fun_code.replace(f'def {function_name[i]}',
|
|
83
|
+
f'def {package_name}_fuzzify_{function_name[i]}')
|
|
84
|
+
#file.write("@torch.fx.wrap\n")
|
|
85
|
+
file.write(fun_code)
|
|
86
|
+
file.write("\n")
|
|
87
|
+
saved_functions.append(function_name[i])
|
|
88
|
+
else:
|
|
89
|
+
if (function_name != 'Rectangular') and (function_name != 'Triangular') and (
|
|
90
|
+
function_name not in saved_functions):
|
|
91
|
+
function_code = function_code.replace(f'def {function_name}',
|
|
92
|
+
f'def {package_name}_fuzzify_{function_name}')
|
|
93
|
+
#file.write("@torch.fx.wrap\n")
|
|
94
|
+
file.write(function_code)
|
|
95
|
+
file.write("\n")
|
|
96
|
+
saved_functions.append(function_name)
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
elif 'ParamFun' in name:
|
|
100
|
+
function_name = model_def['Functions'][name]['name']
|
|
101
|
+
# torch.fx.wrap(self.model_def['Functions'][name]['name'])
|
|
102
|
+
if function_name not in saved_functions:
|
|
103
|
+
code = model_def['Functions'][name]['code']
|
|
104
|
+
code = code.replace(f'def {function_name}', f'def {package_name}_parametricfunction_{function_name}')
|
|
105
|
+
file.write(code)
|
|
106
|
+
file.write("\n")
|
|
107
|
+
saved_functions.append(function_name)
|
|
108
|
+
|
|
109
|
+
file.write("class TracerModel(torch.nn.Module):\n")
|
|
110
|
+
file.write(" def __init__(self):\n")
|
|
111
|
+
file.write(" super().__init__()\n")
|
|
112
|
+
file.write(" self.all_parameters = {}\n")
|
|
113
|
+
file.write(" self.all_constants = {}\n")
|
|
114
|
+
for attr in attributes:
|
|
115
|
+
if 'all_constant' in attr:
|
|
116
|
+
key = attr.split('.')[-1]
|
|
117
|
+
file.write(
|
|
118
|
+
f" self.all_constants[\"{key}\"] = torch.tensor({model.all_constants[key].tolist()})\n")
|
|
119
|
+
#file.write(f" {attr} = torch.tensor({getattr(trace, attr.replace('self.', ''))})\n")
|
|
120
|
+
elif 'relation_forward' in attr:
|
|
121
|
+
key = attr.split('.')[2]
|
|
122
|
+
if 'Fir' in key or 'Linear' in key:
|
|
123
|
+
if 'weights' in attr.split('.')[3]:
|
|
124
|
+
param = model_def['Relations'][key][2]
|
|
125
|
+
value = model.all_parameters[param] #.squeeze(0) if 'Linear' in key else model.all_parameters[param]
|
|
126
|
+
file.write(
|
|
127
|
+
f" self.all_parameters[\"{param}\"] = torch.nn.Parameter(torch.tensor({value.tolist()}), requires_grad=True)\n")
|
|
128
|
+
elif 'bias' in attr.split('.')[3]:
|
|
129
|
+
param = model_def['Relations'][key][3]
|
|
130
|
+
# value = model.all_parameters[param].data.squeeze(0) if 'Linear' in key else model.all_parameters[param].data
|
|
131
|
+
# value = model.all_parameters[param].data
|
|
132
|
+
file.write(
|
|
133
|
+
f" self.all_parameters[\"{param}\"] = torch.nn.Parameter(torch.tensor({model.all_parameters[param].tolist()}), requires_grad=True)\n")
|
|
134
|
+
elif 'dropout' in attr.split('.')[3]:
|
|
135
|
+
param = model_def['Relations'][key][4]
|
|
136
|
+
file.write(f" self.{key} = torch.nn.Dropout(p={param})\n")
|
|
137
|
+
# param = model_def['Relations'][key][2] if 'weights' in attr.split('.')[3] else model_def['Relations'][key][3]
|
|
138
|
+
# value = model.all_parameters[param].data.squeeze(0) if 'Linear' in key else model.all_parameters[param].data
|
|
139
|
+
# file.write(f" self.all_parameters[\"{param}\"] = torch.nn.Parameter(torch.{value}, requires_grad=True)\n")
|
|
140
|
+
elif 'all_parameters' in attr:
|
|
141
|
+
key = attr.split('.')[-1]
|
|
142
|
+
file.write(
|
|
143
|
+
f" self.all_parameters[\"{key}\"] = torch.nn.Parameter(torch.tensor({model.all_parameters[key].tolist()}), requires_grad=True)\n")
|
|
144
|
+
elif '_tensor_constant' in attr:
|
|
145
|
+
key = attr.split('.')[-1]
|
|
146
|
+
file.write(
|
|
147
|
+
f" {attr} = torch.tensor({getattr(model,key).item()})\n")
|
|
148
|
+
|
|
149
|
+
file.write(" self.all_parameters = torch.nn.ParameterDict(self.all_parameters)\n")
|
|
150
|
+
file.write(" self.all_constants = torch.nn.ParameterDict(self.all_constants)\n")
|
|
151
|
+
file.write(" def init_states(self, state_model, connect = {}, reset_states = False):\n")
|
|
152
|
+
file.write(" pass\n")
|
|
153
|
+
file.write(" def reset_connect_variables(self, connect, values = None, only = True):\n")
|
|
154
|
+
file.write(" pass\n")
|
|
155
|
+
file.write(" def reset_states(self, values = None, only = True):\n")
|
|
156
|
+
file.write(" pass\n")
|
|
157
|
+
|
|
158
|
+
for line in trace.code.split("\n")[len(saved_functions) + 1:]:
|
|
159
|
+
if 'self.relation_forward' in line:
|
|
160
|
+
if 'dropout' in line:
|
|
161
|
+
attribute = line.split()[0]
|
|
162
|
+
layer = attribute.split('_')[2].capitalize()
|
|
163
|
+
old_line = f"self.relation_forward.{layer}.dropout"
|
|
164
|
+
new_line = f"self.{layer}"
|
|
165
|
+
file.write(f" {line.replace(old_line, new_line)}\n")
|
|
166
|
+
else:
|
|
167
|
+
attribute = line.split()[-1]
|
|
168
|
+
relation = attribute.split('.')[2]
|
|
169
|
+
relation_type = attribute.split('.')[3]
|
|
170
|
+
param = model_def['Relations'][relation][2] if 'weights' == relation_type else \
|
|
171
|
+
model_def['Relations'][relation][3]
|
|
172
|
+
new_attribute = f'self.all_parameters.{param}'
|
|
173
|
+
file.write(f" {line.replace(attribute, new_attribute)}\n")
|
|
174
|
+
else:
|
|
175
|
+
file.write(f" {line}\n")
|
|
176
|
+
|
|
177
|
+
def export_pythononnx_model(input_order, outputs_order, model_path, model_onnx_path):
|
|
178
|
+
# Define the mapping dictionary input
|
|
179
|
+
trace_mapping_input = {}
|
|
180
|
+
forward = 'def forward(self,'
|
|
181
|
+
for i, key in enumerate(input_order):
|
|
182
|
+
value = f'kwargs[\'{key}\']'
|
|
183
|
+
trace_mapping_input[value] = key
|
|
184
|
+
forward = forward + f' {key}' + (',' if i < len(input_order) - 1 else '')
|
|
185
|
+
forward = forward + '):'
|
|
186
|
+
# Define the mapping dictionary output
|
|
187
|
+
outputs = ' return ('
|
|
188
|
+
for i, key in enumerate(outputs_order):
|
|
189
|
+
outputs += f'outputs[0][\'{key}\']' + (',' if i < len(outputs_order) - 1 else ')')
|
|
190
|
+
|
|
191
|
+
# Open and read the file
|
|
192
|
+
with open(model_path, 'r') as file:
|
|
193
|
+
file_content = file.read()
|
|
194
|
+
# Replace the forward header
|
|
195
|
+
file_content = file_content.replace('def forward(self, kwargs):', forward)
|
|
196
|
+
# Perform the substitution
|
|
197
|
+
for key, value in trace_mapping_input.items():
|
|
198
|
+
file_content = file_content.replace(key, value)
|
|
199
|
+
# Write the modified content back to a new file
|
|
200
|
+
# Replace the return statement
|
|
201
|
+
# Trova l'ultima occorrenza di 'return'
|
|
202
|
+
last_return_index = file_content.rfind('return')
|
|
203
|
+
# Se 'return' è trovato, sostituiscilo con 'outputs ='
|
|
204
|
+
if last_return_index != -1:
|
|
205
|
+
file_content = file_content[:last_return_index] + 'outputs =' + file_content[last_return_index + len('return'):]
|
|
206
|
+
file_content += outputs
|
|
207
|
+
with open(model_onnx_path, 'w') as file:
|
|
208
|
+
file.write(file_content)
|
|
209
|
+
|
|
210
|
+
def import_python_model(name, model_folder):
|
|
211
|
+
sys.path.insert(0, model_folder)
|
|
212
|
+
module_name = os.path.basename(name)
|
|
213
|
+
if module_name in sys.modules:
|
|
214
|
+
# Reload the module if it is already loaded
|
|
215
|
+
module = importlib.reload(sys.modules[module_name])
|
|
216
|
+
else:
|
|
217
|
+
# Import the module if it is not loaded
|
|
218
|
+
module = importlib.import_module(module_name)
|
|
219
|
+
return module.TracerModel()
|
|
220
|
+
|
|
221
|
+
def export_onnx_model(model_def, model, input_order, output_order, model_path):
|
|
222
|
+
dummy_inputs = []
|
|
223
|
+
input_names = []
|
|
224
|
+
dynamic_axes = {}
|
|
225
|
+
for key in input_order:
|
|
226
|
+
input_names.append(key)
|
|
227
|
+
window_size = model_def['Inputs'][key]['ntot']
|
|
228
|
+
dummy_inputs.append(torch.randn(size=(1, window_size, model_def['Inputs'][key]['dim'])))
|
|
229
|
+
dynamic_axes[key] = {0: 'batch_size'}
|
|
230
|
+
output_names = output_order
|
|
231
|
+
dummy_inputs = tuple(dummy_inputs)
|
|
232
|
+
|
|
233
|
+
torch.onnx.export(
|
|
234
|
+
model, # The model to be exported
|
|
235
|
+
dummy_inputs, # Tuple of inputs to match the forward signature
|
|
236
|
+
model_path, # File path to save the ONNX model
|
|
237
|
+
export_params = True, # Store the trained parameters in the model file
|
|
238
|
+
opset_version = 12, # ONNX version to export to (you can use 11 or higher)
|
|
239
|
+
do_constant_folding=True, # Optimize constant folding for inference
|
|
240
|
+
input_names = input_names, # Name each input as they will appear in ONNX
|
|
241
|
+
output_names = output_names, # Name the output
|
|
242
|
+
dynamic_axes = dynamic_axes
|
|
243
|
+
)
|
|
244
|
+
|
|
245
|
+
def import_onnx_model(name, model_folder):
|
|
246
|
+
import onnxruntime as ort
|
|
247
|
+
model_path = os.path.join(model_folder, name + '.onnx')
|
|
248
|
+
return ort.InferenceSession(model_path)
|
|
249
|
+
|
|
250
|
+
# import numpy as np
|
|
251
|
+
# ort_sess = ort.InferenceSession("./TODO/mauro/net.onnx")
|
|
252
|
+
# prova = {'Vo': np.array([[20.0]], dtype=np.float32),
|
|
253
|
+
# 'VL': np.array([[[50.0], [50.0], [50.2], [30.1], [15.0]]], dtype=np.float32)}
|
|
254
|
+
# # ortvalue = ort.OrtValue.ortvalue_from_numpy(prova)
|
|
255
|
+
# outputs = ort_sess.run(None, prova)
|
|
256
|
+
# # Print Result
|
|
257
|
+
# print(outputs)
|
|
258
|
+
#
|
|
259
|
+
# session = onnxruntime.InferenceSession(onnx_path)
|
|
260
|
+
# # Get input and output names
|
|
261
|
+
# input_names = [item.name for item in session.get_inputs()]
|
|
262
|
+
# output_names = [item.name for item in session.get_outputs()]
|
|
263
|
+
# # input_name = session.get_inputs()#[0].name
|
|
264
|
+
# # output_name = session.get_outputs()[0].name
|
|
265
|
+
#
|
|
266
|
+
# print('input_name: ', input_names)
|
|
267
|
+
# print('output_name: ', output_names)
|
|
268
|
+
#
|
|
269
|
+
# # Run inference
|
|
270
|
+
# result = session.run([output_names], {input_names: data})
|
|
271
|
+
# # Print the result
|
|
272
|
+
# print(result)
|
|
273
|
+
#
|
|
274
|
+
# import onnx
|
|
275
|
+
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
import os
|
|
2
|
+
|
|
3
|
+
from datetime import datetime
|
|
4
|
+
|
|
5
|
+
from nnodely.visualizer import Visualizer
|
|
6
|
+
|
|
7
|
+
class Exporter():
|
|
8
|
+
|
|
9
|
+
def __init__(self, workspace = None, visualizer = None, save_history = False):
|
|
10
|
+
# Export parameters
|
|
11
|
+
if workspace is not None:
|
|
12
|
+
self.workspace = workspace
|
|
13
|
+
os.makedirs(self.workspace, exist_ok=True)
|
|
14
|
+
if save_history:
|
|
15
|
+
self.folder = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
|
|
16
|
+
self.workspace_folder = os.path.join(self.workspace, self.folder)
|
|
17
|
+
else:
|
|
18
|
+
self.workspace_folder = self.workspace
|
|
19
|
+
os.makedirs(self.workspace_folder, exist_ok=True)
|
|
20
|
+
|
|
21
|
+
if visualizer is not None:
|
|
22
|
+
self.visualizer = visualizer
|
|
23
|
+
else:
|
|
24
|
+
self.visualizer = Visualizer()
|
|
25
|
+
|
|
26
|
+
def saveTorchModel(self, model, name = 'net', model_folder = None):
|
|
27
|
+
pass
|
|
28
|
+
|
|
29
|
+
def loadTorchModel(self, name = 'net', model_folder = None):
|
|
30
|
+
pass
|
|
31
|
+
|
|
32
|
+
def saveModel(self, model, name = 'net', model_folder = None):
|
|
33
|
+
pass
|
|
34
|
+
|
|
35
|
+
def loadModel(self, name = 'net', model_folder = None):
|
|
36
|
+
pass
|
|
37
|
+
|
|
38
|
+
def exportPythonModel(self, name = 'net', model_folder = None):
|
|
39
|
+
pass
|
|
40
|
+
|
|
41
|
+
def importPythonModel(self, name = 'net', model_folder = None):
|
|
42
|
+
pass
|
|
43
|
+
|
|
44
|
+
def exportReport(self, name = 'net', model_folder = None):
|
|
45
|
+
pass
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
import io
|
|
2
|
+
|
|
3
|
+
import matplotlib.pyplot as plt
|
|
4
|
+
from reportlab.lib.pagesizes import letter
|
|
5
|
+
from reportlab.pdfgen import canvas
|
|
6
|
+
from reportlab.lib.utils import ImageReader
|
|
7
|
+
|
|
8
|
+
from mplplots import plots
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class Reporter:
|
|
12
|
+
def __init__(self, n4m):
|
|
13
|
+
self.n4m = n4m
|
|
14
|
+
|
|
15
|
+
def exportReport(self, report_path):
|
|
16
|
+
c = canvas.Canvas(report_path, pagesize=letter)
|
|
17
|
+
width, height = letter
|
|
18
|
+
|
|
19
|
+
for key, value in self.n4m.model_def['Minimizers'].items():
|
|
20
|
+
fig = plt.figure(figsize=(10, 5))
|
|
21
|
+
ax = fig.add_subplot(111)
|
|
22
|
+
if 'val' in self.n4m.training[key]:
|
|
23
|
+
plots.plot_training(ax, f"Training Loss of {key}", key, self.n4m.training[key]['train'], self.n4m.training[key]['val'])
|
|
24
|
+
else:
|
|
25
|
+
plots.plot_training(ax, f"Training Loss of {key}", key, self.n4m.training[key]['train'])
|
|
26
|
+
training = io.BytesIO()
|
|
27
|
+
plt.savefig(training, format='png')
|
|
28
|
+
training.seek(0)
|
|
29
|
+
plt.close()
|
|
30
|
+
c.drawString(100, height - 30, f"Training Loss of {key}")
|
|
31
|
+
c.drawImage(ImageReader(training), 50, height - 290, width=500, height=250)
|
|
32
|
+
c.showPage()
|
|
33
|
+
|
|
34
|
+
for key in self.n4m.model_def['Minimizers'].keys():
|
|
35
|
+
c.drawString(100, height - 30, f"Prediction of {key}")
|
|
36
|
+
for ind, name_data in enumerate(self.n4m.prediction.keys()):
|
|
37
|
+
fig = plt.figure(figsize=(10, 5))
|
|
38
|
+
ax = fig.add_subplot(111)
|
|
39
|
+
plots.plot_results(ax, name_data, key, self.n4m.prediction[name_data][key]['A'],
|
|
40
|
+
self.n4m.prediction[name_data][key]['B'], self.n4m.model_def['Info']["SampleTime"])
|
|
41
|
+
# Add a text box with correlation coefficient
|
|
42
|
+
results = io.BytesIO()
|
|
43
|
+
plt.savefig(results, format='png')
|
|
44
|
+
results.seek(0)
|
|
45
|
+
plt.close()
|
|
46
|
+
c.drawImage(ImageReader(results), 50, height - 290 - 245*ind, width=500, height=250)
|
|
47
|
+
c.showPage()
|
|
48
|
+
c.save()
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
import os, torch
|
|
2
|
+
|
|
3
|
+
from nnodely.exporter.exporter import Exporter
|
|
4
|
+
from nnodely.exporter.reporter import Reporter
|
|
5
|
+
from nnodely.exporter.export import save_model, load_model, export_python_model, export_pythononnx_model, export_onnx_model, import_python_model, import_onnx_model
|
|
6
|
+
from nnodely.utils import check
|
|
7
|
+
|
|
8
|
+
class StandardExporter(Exporter):
|
|
9
|
+
def __init__(self, workspace=None, visualizer=None, save_history=False):
|
|
10
|
+
super().__init__(workspace, visualizer, save_history)
|
|
11
|
+
|
|
12
|
+
def getWorkspace(self):
|
|
13
|
+
return self.workspace_folder
|
|
14
|
+
|
|
15
|
+
def saveTorchModel(self, model, name = 'net', model_folder = None):
|
|
16
|
+
file_name = name + ".pt"
|
|
17
|
+
model_path = os.path.join(self.workspace_folder, file_name) if model_folder is None else os.path.join(model_folder,file_name)
|
|
18
|
+
torch.save(model.state_dict(), model_path)
|
|
19
|
+
self.visualizer.saveModel('Torch Model', model_path)
|
|
20
|
+
|
|
21
|
+
def loadTorchModel(self, model, name = 'net', model_folder = None): #TODO, model = None):
|
|
22
|
+
file_name = name + ".pt"
|
|
23
|
+
model_path = os.path.join(self.workspace_folder, file_name) if model_folder is None else os.path.join(model_folder,file_name)
|
|
24
|
+
model.load_state_dict(torch.load(model_path))
|
|
25
|
+
self.visualizer.loadModel('Torch Model',model_path)
|
|
26
|
+
|
|
27
|
+
def saveModel(self, model_def, name = 'net', model_folder = None):
|
|
28
|
+
# Combine the folder path and file name to form the complete file path
|
|
29
|
+
model_folder = self.workspace_folder if model_folder is None else model_folder
|
|
30
|
+
# Specify the JSON file name
|
|
31
|
+
file_name = name + ".json"
|
|
32
|
+
# Combine the folder path and file name to form the complete file path
|
|
33
|
+
model_path = os.path.join(model_folder, file_name)
|
|
34
|
+
save_model(model_def, model_path)
|
|
35
|
+
self.visualizer.saveModel('JSON Model', model_path)
|
|
36
|
+
|
|
37
|
+
def loadModel(self, name = 'net', model_folder = None):
|
|
38
|
+
# Combine the folder path and file name to form the complete file path
|
|
39
|
+
model_folder = self.workspace_folder if model_folder is None else model_folder
|
|
40
|
+
model_def = None
|
|
41
|
+
try:
|
|
42
|
+
file_name = name + ".json"
|
|
43
|
+
model_path = os.path.join(model_folder, file_name)
|
|
44
|
+
model_def = load_model(model_path)
|
|
45
|
+
self.visualizer.loadModel('JSON Model', model_path)
|
|
46
|
+
except Exception as e:
|
|
47
|
+
check(False, RuntimeError, f"The file {model_path} it is not found or not conformed.\n Error: {e}")
|
|
48
|
+
return model_def
|
|
49
|
+
|
|
50
|
+
def exportPythonModel(self, model_def, model, name = 'net', model_folder = None):
|
|
51
|
+
file_name = name + ".py"
|
|
52
|
+
model_path = os.path.join(self.workspace_folder, file_name) if model_folder is None else os.path.join(model_folder, file_name)
|
|
53
|
+
## Export to python file
|
|
54
|
+
export_python_model(model_def.json, model, model_path)
|
|
55
|
+
self.visualizer.exportModel('Python Torch Model', model_path)
|
|
56
|
+
|
|
57
|
+
def importPythonModel(self, name = 'net', model_folder = None):
|
|
58
|
+
try:
|
|
59
|
+
model_folder = self.workspace_folder if model_folder is None else model_folder
|
|
60
|
+
model = import_python_model(name, model_folder)
|
|
61
|
+
self.visualizer.importModel('Python Torch Model', os.path.join(model_folder,name+'.py'))
|
|
62
|
+
except Exception as e:
|
|
63
|
+
model = None
|
|
64
|
+
check(False, RuntimeError, f"The module {name} it is not found in the folder {model_folder}.\nError: {e}")
|
|
65
|
+
return model
|
|
66
|
+
|
|
67
|
+
def exportONNX(self, model_def, model, inputs_order, outputs_order, name = 'net', model_folder = None, ):
|
|
68
|
+
check(set(inputs_order) == set(model_def['Inputs'].keys()), ValueError,
|
|
69
|
+
f'The inputs are not the same as the model inputs ({model_def["Inputs"].keys()}).')
|
|
70
|
+
check(set(outputs_order) == set(model_def['Outputs'].keys()), ValueError,
|
|
71
|
+
f'The outputs are not the same as the model outputs ({model_def["Outputs"].keys()}).')
|
|
72
|
+
file_name = name + ".py"
|
|
73
|
+
model_folder = self.workspace_folder if model_folder is None else model_folder
|
|
74
|
+
model_folder = os.path.join(model_folder, 'onnx')
|
|
75
|
+
os.makedirs(model_folder, exist_ok=True)
|
|
76
|
+
model_path = os.path.join(model_folder, file_name)
|
|
77
|
+
onnx_python_model_path = model_path.replace('.py', '_onnx.py')
|
|
78
|
+
onnx_model_path = model_path.replace('.py', '.onnx')
|
|
79
|
+
## Export to python file (onnx compatible)
|
|
80
|
+
export_python_model(model_def, model, model_path)
|
|
81
|
+
self.visualizer.exportModel('Python Torch Model', model_path)
|
|
82
|
+
export_pythononnx_model(inputs_order, outputs_order, model_path, onnx_python_model_path)
|
|
83
|
+
self.visualizer.exportModel('Python Onnx Torch Model', onnx_python_model_path)
|
|
84
|
+
## Export to onnx file (onnx compatible)
|
|
85
|
+
model = import_python_model(file_name.replace('.py', '_onnx'), model_folder)
|
|
86
|
+
export_onnx_model(model_def, model, inputs_order, outputs_order, onnx_model_path)
|
|
87
|
+
self.visualizer.exportModel('Onnx Model', onnx_model_path)
|
|
88
|
+
|
|
89
|
+
def importONNX(self, name = 'net', model_folder = None):
|
|
90
|
+
try:
|
|
91
|
+
model_folder = self.workspace_folder if model_folder is None else model_folder
|
|
92
|
+
model = import_onnx_model(name, model_folder)
|
|
93
|
+
self.visualizer.importModel('Onnx Model', os.path.join(model_folder,name+'.py'))
|
|
94
|
+
except Exception as e:
|
|
95
|
+
log.warning(f"The module {name} it is not found in the folder {model_folder}.\nError: {e}")
|
|
96
|
+
return model
|
|
97
|
+
|
|
98
|
+
def exportReport(self, n4m, name = 'net', model_folder = None):
|
|
99
|
+
# Combine the folder path and file name to form the complete file path
|
|
100
|
+
model_folder = self.workspace_folder if model_folder is None else model_folder
|
|
101
|
+
# Specify the JSON file name
|
|
102
|
+
file_name = name + ".pdf"
|
|
103
|
+
# Combine the folder path and file name to form the complete file path
|
|
104
|
+
report_path = os.path.join(model_folder, file_name)
|
|
105
|
+
reporter = Reporter(n4m)
|
|
106
|
+
reporter.exportReport(report_path)
|
|
107
|
+
self.visualizer.exportReport('Training Results', report_path)
|
|
108
|
+
|
nnodely/fir.py
ADDED
|
@@ -0,0 +1,150 @@
|
|
|
1
|
+
import copy, inspect, textwrap, torch
|
|
2
|
+
|
|
3
|
+
import torch.nn as nn
|
|
4
|
+
|
|
5
|
+
from collections.abc import Callable
|
|
6
|
+
|
|
7
|
+
from nnodely.relation import NeuObj, Stream, AutoToStream
|
|
8
|
+
from nnodely.utils import check, merge, enforce_types
|
|
9
|
+
from nnodely.model import Model
|
|
10
|
+
from nnodely.parameter import Parameter
|
|
11
|
+
from nnodely.input import Input
|
|
12
|
+
|
|
13
|
+
fir_relation_name = 'Fir'
|
|
14
|
+
|
|
15
|
+
class Fir(NeuObj, AutoToStream):
|
|
16
|
+
@enforce_types
|
|
17
|
+
def __init__(self, output_dimension:int|None = None,
|
|
18
|
+
parameter_init:Callable|None = None,
|
|
19
|
+
parameter_init_params:dict|None = None,
|
|
20
|
+
bias_init:Callable|None = None,
|
|
21
|
+
bias_init_params:dict|None = None,
|
|
22
|
+
parameter:Parameter|str|None = None,
|
|
23
|
+
bias:bool|str|Parameter|None = None,
|
|
24
|
+
dropout:int|float = 0):
|
|
25
|
+
|
|
26
|
+
self.relation_name = fir_relation_name
|
|
27
|
+
self.parameter_init = parameter_init
|
|
28
|
+
self.parameter_init_params = parameter_init_params
|
|
29
|
+
self.parameter = parameter
|
|
30
|
+
self.bias_init = bias_init
|
|
31
|
+
self.bias_init_params = bias_init_params
|
|
32
|
+
self.bias = bias
|
|
33
|
+
self.pname = None
|
|
34
|
+
self.bname = None
|
|
35
|
+
self.dropout = dropout
|
|
36
|
+
super().__init__('P' + fir_relation_name + str(NeuObj.count))
|
|
37
|
+
|
|
38
|
+
if parameter is None:
|
|
39
|
+
self.output_dimension = 1 if output_dimension is None else output_dimension
|
|
40
|
+
self.pname = self.name + 'p'
|
|
41
|
+
self.json['Parameters'][self.pname] = {'dim': self.output_dimension}
|
|
42
|
+
elif type(parameter) is str:
|
|
43
|
+
self.output_dimension = 1 if output_dimension is None else output_dimension
|
|
44
|
+
self.pname = parameter
|
|
45
|
+
self.json['Parameters'][self.pname] = {'dim': self.output_dimension}
|
|
46
|
+
else:
|
|
47
|
+
check(type(parameter) is Parameter, TypeError, 'Input parameter must be of type Parameter')
|
|
48
|
+
check(len(parameter.dim) == 2,ValueError,f"The values of the parameters must be have two dimensions (tw/sample_rate or sw,output_dimension).")
|
|
49
|
+
if output_dimension is None:
|
|
50
|
+
check(type(parameter.dim['dim']) is int, TypeError, 'Dimension of the parameter must be an integer for the Fir')
|
|
51
|
+
self.output_dimension = parameter.dim['dim']
|
|
52
|
+
else:
|
|
53
|
+
self.output_dimension = output_dimension
|
|
54
|
+
check(parameter.dim['dim'] == self.output_dimension, ValueError, 'output_dimension must be equal to dim of the Parameter')
|
|
55
|
+
self.pname = parameter.name
|
|
56
|
+
self.json['Parameters'][self.pname] = copy.deepcopy(parameter.json['Parameters'][parameter.name])
|
|
57
|
+
|
|
58
|
+
if bias is not None:
|
|
59
|
+
check(type(bias) is Parameter or type(bias) is bool or type(bias) is str, TypeError, 'The "bias" must be of type Parameter, bool or str.')
|
|
60
|
+
if type(bias) is Parameter:
|
|
61
|
+
check(type(bias.dim['dim']) is int, ValueError, 'The "bias" dimensions must be an integer.')
|
|
62
|
+
if output_dimension is not None:
|
|
63
|
+
check(bias.dim['dim'] == output_dimension, ValueError,
|
|
64
|
+
'output_dimension must be equal to the dim of the "bias".')
|
|
65
|
+
self.bname = bias.name
|
|
66
|
+
self.json['Parameters'][bias.name] = copy.deepcopy(bias.json['Parameters'][bias.name])
|
|
67
|
+
elif type(bias) is str:
|
|
68
|
+
self.bname = bias
|
|
69
|
+
self.json['Parameters'][self.bname] = { 'dim': self.output_dimension }
|
|
70
|
+
else:
|
|
71
|
+
self.bname = self.name + 'b'
|
|
72
|
+
self.json['Parameters'][self.bname] = { 'dim': self.output_dimension }
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def __call__(self, obj:Stream) -> Stream:
|
|
76
|
+
stream_name = fir_relation_name + str(Stream.count)
|
|
77
|
+
check(type(obj) is not Input, TypeError,
|
|
78
|
+
f"The type of {obj.name} is Input not a Stream create a Stream using the functions: tw, sw, z, last, next.")
|
|
79
|
+
check(type(obj) is Stream, TypeError,
|
|
80
|
+
f"The type of {obj} is {type(obj)} and is not supported for Fir operation.")
|
|
81
|
+
check('dim' in obj.dim and obj.dim['dim'] == 1, ValueError, f"Input dimension is {obj.dim['dim']} and not scalar")
|
|
82
|
+
window = 'tw' if 'tw' in obj.dim else ('sw' if 'sw' in obj.dim else None)
|
|
83
|
+
if window:
|
|
84
|
+
if type(self.parameter) is Parameter:
|
|
85
|
+
check(window in self.json['Parameters'][self.pname],
|
|
86
|
+
KeyError,
|
|
87
|
+
f"The window \'{window}\' of the input is not in the parameter")
|
|
88
|
+
check(self.json['Parameters'][self.pname][window] == obj.dim[window],
|
|
89
|
+
ValueError,
|
|
90
|
+
f"The window \'{window}\' of the input must be the same of the parameter")
|
|
91
|
+
else:
|
|
92
|
+
self.json['Parameters'][self.pname][window] = obj.dim[window]
|
|
93
|
+
else:
|
|
94
|
+
if type(self.parameter) is Parameter:
|
|
95
|
+
cond = 'sw' not in self.json['Parameters'][self.pname] and 'tw' not in self.json['Parameters'][self.nampe]
|
|
96
|
+
check(cond, KeyError,'The parameter have a time window and the input no')
|
|
97
|
+
|
|
98
|
+
if self.parameter_init is not None:
|
|
99
|
+
check('values' not in self.json['Parameters'][self.pname], ValueError, f"The parameter {self.pname} is already initialized.")
|
|
100
|
+
check(inspect.isfunction(self.parameter_init), ValueError,
|
|
101
|
+
f"The parameter_init parameter must be a function.")
|
|
102
|
+
code = textwrap.dedent(inspect.getsource(self.parameter_init)).replace('\"', '\'')
|
|
103
|
+
self.json['Parameters'][self.pname]['init_fun'] = {'code' : code, 'name' : self.parameter_init.__name__}
|
|
104
|
+
if self.parameter_init_params is not None:
|
|
105
|
+
self.json['Parameters'][self.pname]['init_fun']['params'] = self.parameter_init_params
|
|
106
|
+
|
|
107
|
+
if self.bias_init is not None:
|
|
108
|
+
check(self.bname is not None, ValueError,f"The bias is missing.")
|
|
109
|
+
check('values' not in self.json['Parameters'][self.bname], ValueError, f"The parameter {self.bname} is already initialized.")
|
|
110
|
+
check(inspect.isfunction(self.bias_init), ValueError,
|
|
111
|
+
f"The bias_init parameter must be a function.")
|
|
112
|
+
code = textwrap.dedent(inspect.getsource(self.bias_init)).replace('\"', '\'')
|
|
113
|
+
self.json['Parameters'][self.bname]['init_fun'] = { 'code' : code, 'name' : self.bias_init.__name__ }
|
|
114
|
+
if self.bias_init_params is not None:
|
|
115
|
+
self.json['Parameters'][self.bname]['init_fun']['params'] = self.bias_init_params
|
|
116
|
+
|
|
117
|
+
stream_json = merge(self.json,obj.json)
|
|
118
|
+
stream_json['Relations'][stream_name] = [fir_relation_name, [obj.name], self.pname, self.bname, self.dropout]
|
|
119
|
+
return Stream(stream_name, stream_json,{'dim':self.output_dimension, 'sw': 1})
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
class Fir_Layer(nn.Module):
|
|
123
|
+
def __init__(self, weights, bias=None, dropout=0):
|
|
124
|
+
super(Fir_Layer, self).__init__()
|
|
125
|
+
self.dropout = nn.Dropout(p=dropout) if dropout > 0 else None
|
|
126
|
+
self.weights = weights
|
|
127
|
+
self.bias = bias
|
|
128
|
+
|
|
129
|
+
def forward(self, x):
|
|
130
|
+
# x is expected to be of shape [batch, window, 1]
|
|
131
|
+
batch_size = x.size(0)
|
|
132
|
+
output_features = self.weights.size(1)
|
|
133
|
+
# Remove the last dimension (1) to make x shape [batch, window]
|
|
134
|
+
x = x.squeeze(-1)
|
|
135
|
+
# Perform the linear transformation: y = xW^T
|
|
136
|
+
x = torch.matmul(x, self.weights)
|
|
137
|
+
# Reshape y to be [batch, 1, output_features]
|
|
138
|
+
x = x.view(batch_size, 1, output_features)
|
|
139
|
+
# Add bias if necessary
|
|
140
|
+
if self.bias is not None:
|
|
141
|
+
x += self.bias # Add bias
|
|
142
|
+
# Add dropout if necessary
|
|
143
|
+
if self.dropout is not None:
|
|
144
|
+
x = self.dropout(x)
|
|
145
|
+
return x
|
|
146
|
+
|
|
147
|
+
def createFir(self, *inputs):
|
|
148
|
+
return Fir_Layer(weights=inputs[0], bias=inputs[1], dropout=inputs[2])
|
|
149
|
+
|
|
150
|
+
setattr(Model, fir_relation_name, createFir)
|