nnodely 0.14.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. mplplots/__init__.py +0 -0
  2. mplplots/plots.py +131 -0
  3. nnodely/__init__.py +42 -0
  4. nnodely/activation.py +85 -0
  5. nnodely/arithmetic.py +203 -0
  6. nnodely/earlystopping.py +81 -0
  7. nnodely/exporter/__init__.py +3 -0
  8. nnodely/exporter/export.py +275 -0
  9. nnodely/exporter/exporter.py +45 -0
  10. nnodely/exporter/reporter.py +48 -0
  11. nnodely/exporter/standardexporter.py +108 -0
  12. nnodely/fir.py +150 -0
  13. nnodely/fuzzify.py +221 -0
  14. nnodely/initializer.py +31 -0
  15. nnodely/input.py +131 -0
  16. nnodely/linear.py +130 -0
  17. nnodely/localmodel.py +82 -0
  18. nnodely/logger.py +94 -0
  19. nnodely/loss.py +30 -0
  20. nnodely/model.py +263 -0
  21. nnodely/modeldef.py +205 -0
  22. nnodely/nnodely.py +1295 -0
  23. nnodely/optimizer.py +91 -0
  24. nnodely/output.py +23 -0
  25. nnodely/parameter.py +103 -0
  26. nnodely/parametricfunction.py +329 -0
  27. nnodely/part.py +201 -0
  28. nnodely/relation.py +149 -0
  29. nnodely/trigonometric.py +67 -0
  30. nnodely/utils.py +101 -0
  31. nnodely/visualizer/__init__.py +4 -0
  32. nnodely/visualizer/dynamicmpl/functionplot.py +34 -0
  33. nnodely/visualizer/dynamicmpl/fuzzyplot.py +31 -0
  34. nnodely/visualizer/dynamicmpl/resultsplot.py +28 -0
  35. nnodely/visualizer/dynamicmpl/trainingplot.py +46 -0
  36. nnodely/visualizer/mplnotebookvisualizer.py +66 -0
  37. nnodely/visualizer/mplvisualizer.py +215 -0
  38. nnodely/visualizer/textvisualizer.py +320 -0
  39. nnodely/visualizer/visualizer.py +84 -0
  40. nnodely-0.14.0.dist-info/LICENSE +21 -0
  41. nnodely-0.14.0.dist-info/METADATA +401 -0
  42. nnodely-0.14.0.dist-info/RECORD +44 -0
  43. nnodely-0.14.0.dist-info/WHEEL +5 -0
  44. nnodely-0.14.0.dist-info/top_level.txt +2 -0
nnodely/part.py ADDED
@@ -0,0 +1,201 @@
1
+ import copy
2
+
3
+ import torch.nn as nn
4
+
5
+ from nnodely.relation import ToStream, Stream
6
+ from nnodely.model import Model
7
+ from nnodely.utils import check, enforce_types
8
+
9
+ part_relation_name = 'Part'
10
+ select_relation_name = 'Select'
11
+ timepart_relation_name = 'TimePart'
12
+ timeselect_relation_name = 'TimeSelect'
13
+ samplepart_relation_name = 'SamplePart'
14
+ sampleselect_relation_name = 'SampleSelect'
15
+
16
+ class Part(Stream, ToStream):
17
+ @enforce_types
18
+ def __init__(self, obj:Stream, i:int, j:int):
19
+ # check(type(obj) is Stream, TypeError,
20
+ # f"The type of {obj} is {type(obj)} and is not supported for Part operation.")
21
+ check(i >= 0 and j > 0 and i < obj.dim['dim'] and j <= obj.dim['dim'],
22
+ IndexError,
23
+ f"i={i} or j={j} are not in the range [0,{obj.dim['dim']}]")
24
+ dim = copy.deepcopy(obj.dim)
25
+ dim['dim'] = j - i
26
+ super().__init__(part_relation_name + str(Stream.count),obj.json,dim)
27
+ self.json['Relations'][self.name] = [part_relation_name,[obj.name],[i,j]]
28
+
29
+ class Part_Layer(nn.Module):
30
+ @enforce_types
31
+ def __init__(self, i:int, j:int):
32
+ super(Part_Layer, self).__init__()
33
+ self.i, self.j = i, j
34
+
35
+ def forward(self, x):
36
+ assert x.ndim >= 3, 'The Part Relation Works only for 3D inputs'
37
+ return x[:, :, self.i:self.j]
38
+
39
+ ## Select elements on the third dimension in the range [i,j]
40
+ def createPart(self, *inputs):
41
+ return Part_Layer(i=inputs[0][0], j=inputs[0][1])
42
+
43
+ class Select(Stream, ToStream):
44
+
45
+ @enforce_types
46
+ def __init__(self, obj:Stream, i:int):
47
+ # check(type(obj) is Stream, TypeError,
48
+ # f"The type of {obj} is {type(obj)} and is not supported for Select operation.")
49
+ check(i >= 0 and i < obj.dim['dim'],
50
+ IndexError,
51
+ f"i={i} are not in the range [0,{obj.dim['dim']}]")
52
+ dim = copy.deepcopy(obj.dim)
53
+ dim['dim'] = 1
54
+ super().__init__(select_relation_name + str(Stream.count),obj.json,dim)
55
+ self.json['Relations'][self.name] = [select_relation_name,[obj.name],i]
56
+
57
+ class Select_Layer(nn.Module):
58
+ def __init__(self, idx):
59
+ super(Select_Layer, self).__init__()
60
+ self.idx = idx
61
+
62
+ def forward(self, x):
63
+ #assert x.ndim >= 3, 'The Part Relation Works only for 3D inputs'
64
+ return x[:, :, self.idx:self.idx + 1]
65
+
66
+ ## Select an element i on the third dimension
67
+ def createSelect(self, *inputs):
68
+ return Select_Layer(idx=inputs[0])
69
+
70
+ class SamplePart(Stream, ToStream):
71
+ @enforce_types
72
+ def __init__(self, obj:Stream, i:int, j:int, offset:int|None = None):
73
+ # check(type(obj) is Stream, TypeError,
74
+ # f"The type of {obj} is {type(obj)} and is not supported for SamplePart operation.")
75
+ check('sw' in obj.dim, KeyError, 'Input must have a sample window')
76
+ check(i < j, ValueError, 'i must be smaller than j')
77
+ all_inputs = obj.json['Inputs'] | obj.json['States']
78
+ if obj.name in all_inputs:
79
+ backward_idx = all_inputs[obj.name]['sw'][0]
80
+ forward_idx = all_inputs[obj.name]['sw'][1]
81
+ else:
82
+ backward_idx = 0
83
+ forward_idx = obj.dim['sw']
84
+ check(i >= backward_idx and i < forward_idx, ValueError, 'i must be in the sample window of the input')
85
+ check(j > backward_idx and j <= forward_idx, ValueError, 'j must be in the sample window of the input')
86
+ dim = copy.deepcopy(obj.dim)
87
+ dim['sw'] = j - i
88
+ super().__init__(samplepart_relation_name + str(Stream.count),obj.json,dim)
89
+ rel = [samplepart_relation_name,[obj.name],[i,j]]
90
+ if offset is not None:
91
+ check(i <= offset < j, IndexError,"The offset must be inside the sample window")
92
+ rel.append(offset)
93
+ self.json['Relations'][self.name] = rel
94
+
95
+ class SamplePart_Layer(nn.Module):
96
+ def __init__(self, part, offset):
97
+ super(SamplePart_Layer, self).__init__()
98
+ self.back, self.forw = part[0], part[1]
99
+ self.offset = offset
100
+
101
+ def forward(self, x):
102
+ if self.offset is not None:
103
+ x = x - x[:, self.offset].unsqueeze(1)
104
+ return x[:, self.back:self.forw]
105
+
106
+ def createSamplePart(self, *inputs):
107
+ if len(inputs) > 1: ## offset
108
+ return SamplePart_Layer(part=inputs[0], offset=inputs[1])
109
+ else:
110
+ return SamplePart_Layer(part=inputs[0], offset=None)
111
+
112
+ class SampleSelect(Stream, ToStream):
113
+
114
+ @enforce_types
115
+ def __init__(self, obj:Stream, i:int):
116
+ # check(type(obj) is Stream, TypeError,
117
+ # f"The type of {obj} is {type(obj)} and is not supported for SampleSelect operation.")
118
+ check('sw' in obj.dim, KeyError, 'Input must have a sample window')
119
+ backward_idx = 0
120
+ forward_idx = obj.dim['sw']
121
+ check(i >= backward_idx and i < forward_idx, ValueError, 'i must be in the sample window of the input')
122
+ dim = copy.deepcopy(obj.dim)
123
+ del dim['sw']
124
+ super().__init__(sampleselect_relation_name + str(Stream.count),obj.json,dim)
125
+ self.json['Relations'][self.name] = [sampleselect_relation_name,[obj.name],i]
126
+
127
+ class SampleSelect_Layer(nn.Module):
128
+ def __init__(self, idx):
129
+ super(SampleSelect_Layer, self).__init__()
130
+ self.idx = idx
131
+
132
+ def forward(self, x):
133
+ #assert x.ndim >= 2, 'The Part Relation Works only for 2D inputs'
134
+ return x[:, self.idx:self.idx + 1, :]
135
+
136
+ def createSampleSelect(self, *inputs):
137
+ return SampleSelect_Layer(idx=inputs[0])
138
+
139
+ class TimePart(Stream, ToStream):
140
+ @enforce_types
141
+ def __init__(self, obj:Stream, i:int|float, j:int|float, offset:int|float|None = None):
142
+ check(type(obj) is Stream, TypeError,
143
+ f"The type of {obj} is {type(obj)} and is not supported for TimePart operation.")
144
+ check('tw' in obj.dim, KeyError, 'Input must have a time window')
145
+ check(i < j, ValueError, 'i must be smaller than j')
146
+ all_inputs = obj.json['Inputs'] | obj.json['States']
147
+ if obj.name in all_inputs:
148
+ backward_idx = all_inputs[obj.name]['tw'][0]
149
+ forward_idx = all_inputs[obj.name]['tw'][1]
150
+ else:
151
+ backward_idx = 0
152
+ forward_idx = obj.dim['tw']
153
+ check(i >= backward_idx and i < forward_idx, ValueError, 'i must be in the time window of the input')
154
+ check(j > backward_idx and j <= forward_idx, ValueError, 'j must be in the time window of the input')
155
+ dim = copy.deepcopy(obj.dim)
156
+ dim['tw'] = j - i
157
+ super().__init__(timepart_relation_name + str(Stream.count),obj.json,dim)
158
+ rel = [timepart_relation_name,[obj.name],[i,j]]
159
+ if offset is not None:
160
+ check(i <= offset < j, IndexError,"The offset must be inside the time window")
161
+ rel.append(offset)
162
+ self.json['Relations'][self.name] = rel
163
+
164
+ class TimePart_Layer(nn.Module):
165
+ def __init__(self, part, offset):
166
+ super(TimePart_Layer, self).__init__()
167
+ self.back, self.forw = part[0], part[1]
168
+ self.offset = offset
169
+
170
+ def forward(self, x):
171
+ if self.offset is not None:
172
+ x = x - x[:, self.offset].unsqueeze(1)
173
+ return x[:, self.back:self.forw]
174
+
175
+ def createTimePart(self, *inputs):
176
+ if len(inputs) > 1: ## offset
177
+ return TimePart_Layer(part=inputs[0], offset=inputs[1])
178
+ else:
179
+ return TimePart_Layer(part=inputs[0], offset=None)
180
+
181
+ class TimeSelect(Stream, ToStream):
182
+
183
+ @enforce_types
184
+ def __init__(self, obj:Stream, i:int|float):
185
+ check('tw' in obj.dim, KeyError, 'Input must have a time window')
186
+ backward_idx = 0
187
+ forward_idx = obj.dim['tw']
188
+ check(i >= backward_idx and i < forward_idx, ValueError, 'i must be in the time window of the input')
189
+ dim = copy.deepcopy(obj.dim)
190
+ del dim['tw']
191
+ super().__init__(timeselect_relation_name + str(Stream.count),obj.json,dim)
192
+ if (type(obj) is Stream):
193
+ self.json['Relations'][self.name] = [timeselect_relation_name,[obj.name],i]
194
+
195
+ setattr(Model, part_relation_name, createPart)
196
+ setattr(Model, select_relation_name, createSelect)
197
+
198
+ setattr(Model, samplepart_relation_name, createSamplePart)
199
+ setattr(Model, sampleselect_relation_name, createSampleSelect)
200
+
201
+ setattr(Model, timepart_relation_name, createTimePart)
nnodely/relation.py ADDED
@@ -0,0 +1,149 @@
1
+ import copy
2
+
3
+ import numpy as np
4
+
5
+ from nnodely.utils import check, merge
6
+
7
+ from nnodely.logger import logging, nnLogger
8
+ log = nnLogger(__name__, logging.CRITICAL)
9
+
10
+ MAIN_JSON = {
11
+ 'Info' : {},
12
+ 'Inputs' : {},
13
+ 'States' : {},
14
+ 'Constants': {},
15
+ 'Parameters' : {},
16
+ 'Functions' : {},
17
+ 'Relations': {},
18
+ 'Outputs': {}
19
+ }
20
+
21
+ CHECK_NAMES = True
22
+ NeuObj_names = []
23
+
24
+ def toStream(obj):
25
+ from nnodely.parameter import Parameter, Constant
26
+ if type(obj) in (int,float,list,np.ndarray):
27
+ obj = Constant('Constant'+str(NeuObj.count), obj)
28
+ #obj = Stream(obj, MAIN_JSON, {'dim': 1}) if type(obj) in (int, float) else obj
29
+ if type(obj) is Parameter or type(obj) is Constant:
30
+ obj = Stream(obj.name, obj.json, obj.dim)
31
+ return obj
32
+
33
+
34
+ class NeuObj():
35
+ count = 0
36
+ @classmethod
37
+ def reset_count(self):
38
+ NeuObj.count = 0
39
+ def __init__(self, name = '', json = {}, dim = 0):
40
+ NeuObj.count += 1
41
+ if CHECK_NAMES == True:
42
+ check(name not in NeuObj_names, NameError, f"The name {name} is already used change the name of NeuObj.")
43
+ NeuObj_names.append(name)
44
+ self.name = name
45
+ self.dim = dim
46
+ if json:
47
+ self.json = copy.deepcopy(json)
48
+ else:
49
+ self.json = copy.deepcopy(MAIN_JSON)
50
+
51
+ class Relation():
52
+ def __add__(self, obj):
53
+ from nnodely.arithmetic import Add
54
+ return Add(self, obj)
55
+
56
+ def __sub__(self, obj):
57
+ from nnodely.arithmetic import Sub
58
+ return Sub(self, obj)
59
+
60
+ def __truediv__(self, obj):
61
+ from nnodely.arithmetic import Div
62
+ return Div(self, obj)
63
+
64
+ def __mul__(self, obj):
65
+ from nnodely.arithmetic import Mul
66
+ return Mul(self, obj)
67
+
68
+ def __pow__(self, obj):
69
+ from nnodely.arithmetic import Pow
70
+ return Pow(self, obj)
71
+
72
+ def __neg__(self):
73
+ from nnodely.arithmetic import Neg
74
+ return Neg(self)
75
+
76
+ class Stream(Relation):
77
+ count = 0
78
+ @classmethod
79
+ def reset_count(self):
80
+ Stream.count = 0
81
+
82
+ def __init__(self, name, json, dim, count = 1):
83
+ Stream.count += count
84
+ self.name = name
85
+ self.json = copy.deepcopy(json)
86
+ self.dim = dim
87
+
88
+ def tw(self, tw, offset = None):
89
+ from nnodely.input import State, Connect
90
+ from nnodely.utils import merge
91
+ s = State(self.name+"_state",dimensions=self.dim['dim'])
92
+ if type(tw) == int:
93
+ out_connect = Connect(self, s)
94
+ win_state = s.tw(tw, offset)
95
+ return Stream(win_state.name, merge(win_state.json, out_connect.json), win_state.dim,0 )
96
+
97
+ def sw(self, sw, offset = None):
98
+ from nnodely.input import State, Connect
99
+ from nnodely.utils import merge
100
+ s = State(self.name+"_state",dimensions=self.dim['dim'])
101
+ if type(sw) == int:
102
+ out_connect = Connect(self, s)
103
+ win_state = s.sw(sw, offset)
104
+ return Stream(win_state.name, merge(win_state.json, out_connect.json), win_state.dim,0 )
105
+
106
+ def z(self, delay):
107
+ from nnodely.input import State, Connect
108
+ from nnodely.utils import merge
109
+ s = State(self.name + "_state",dimensions=self.dim['dim'])
110
+ if type(delay) == int and delay > 0:
111
+ out_connect = Connect(self, s)
112
+ win_state = s.z(delay)
113
+ return Stream(win_state.name, merge(win_state.json, out_connect.json), win_state.dim,0 )
114
+
115
+ def connect(self, obj):
116
+ from nnodely.input import State
117
+ check(type(obj) is State, TypeError,
118
+ f"The {obj} must be a State and not a {type(obj)}.")
119
+ self.json = merge(self.json, obj.json)
120
+ check('closedLoop' not in self.json['States'][obj.name] or 'connect' not in self.json['States'][obj.name], KeyError,
121
+ f"The state variable {obj.name} is already connected.")
122
+ self.json['States'][obj.name]['connect'] = self.name
123
+ return Stream(self.name, self.json, self.dim,0 )
124
+
125
+ def closedLoop(self, obj):
126
+ from nnodely.input import State
127
+ check(type(obj) is State, TypeError,
128
+ f"The {obj} must be a State and not a {type(obj)}.")
129
+ self.json = merge(self.json, obj.json)
130
+ check('closedLoop' not in self.json['States'][obj.name] or 'connect' not in self.json['States'][obj.name],
131
+ KeyError,
132
+ f"The state variable {obj.name} is already connected.")
133
+ self.json['States'][obj.name]['closedLoop'] = self.name
134
+ return Stream(self.name, self.json, self.dim,0 )
135
+
136
+ class ToStream():
137
+ def __new__(cls, *args, **kwargs):
138
+ out = super(ToStream,cls).__new__(cls)
139
+ out.__init__(*args, **kwargs)
140
+ return Stream(out.name,out.json,out.dim,0)
141
+
142
+ class AutoToStream():
143
+ def __new__(cls, *args, **kwargs):
144
+ if len(args) > 0 and (issubclass(type(args[0]),NeuObj) or type(args[0]) is Stream):
145
+ instance = super().__new__(cls)
146
+ instance.__init__()
147
+ return instance(args[0])
148
+ instance = super().__new__(cls)
149
+ return instance
@@ -0,0 +1,67 @@
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from nnodely.relation import ToStream, Stream, toStream
5
+ from nnodely.model import Model
6
+ from nnodely.utils import check
7
+
8
+ sin_relation_name = 'Sin'
9
+ cos_relation_name = 'Cos'
10
+ tan_relation_name = 'Tan'
11
+
12
+ class Sin(Stream, ToStream):
13
+ def __init__(self, obj:Stream) -> Stream:
14
+ obj = toStream(obj)
15
+ check(type(obj) is Stream, TypeError,
16
+ f"The type of {obj} is {type(obj)} and is not supported for Sin operation.")
17
+ super().__init__(sin_relation_name + str(Stream.count),obj.json,obj.dim)
18
+ self.json['Relations'][self.name] = [sin_relation_name, [obj.name]]
19
+
20
+ class Cos(Stream, ToStream):
21
+ def __init__(self, obj:Stream) -> Stream:
22
+ obj = toStream(obj)
23
+ check(type(obj) is Stream, TypeError,
24
+ f"The type of {obj} is {type(obj)} and is not supported for Cos operation.")
25
+ super().__init__(cos_relation_name + str(Stream.count),obj.json,obj.dim)
26
+ self.json['Relations'][self.name] = [cos_relation_name, [obj.name]]
27
+
28
+ class Tan(Stream, ToStream):
29
+ def __init__(self, obj:Stream) -> Stream:
30
+ obj = toStream(obj)
31
+ check(type(obj) is Stream, TypeError,
32
+ f"The type of {obj} is {type(obj)} and is not supported for Tan operation.")
33
+ super().__init__(tan_relation_name + str(Stream.count),obj.json,obj.dim)
34
+ self.json['Relations'][self.name] = [tan_relation_name, [obj.name]]
35
+
36
+
37
+ class Sin_Layer(nn.Module):
38
+ def __init__(self,):
39
+ super(Sin_Layer, self).__init__()
40
+ def forward(self, x):
41
+ return torch.sin(x)
42
+
43
+ def createSin(self, *inputs):
44
+ return Sin_Layer()
45
+
46
+ class Cos_Layer(nn.Module):
47
+ def __init__(self,):
48
+ super(Cos_Layer, self).__init__()
49
+ def forward(self, x):
50
+ return torch.cos(x)
51
+
52
+ def createCos(self, *inputs):
53
+ return Cos_Layer()
54
+
55
+ class Tan_Layer(nn.Module):
56
+ def __init__(self,):
57
+ super(Tan_Layer, self).__init__()
58
+ def forward(self, x):
59
+ return torch.tan(x)
60
+
61
+ def createTan(self, *inputs):
62
+ return Tan_Layer()
63
+
64
+
65
+ setattr(Model, sin_relation_name, createSin)
66
+ setattr(Model, cos_relation_name, createCos)
67
+ setattr(Model, tan_relation_name, createTan)
nnodely/utils.py ADDED
@@ -0,0 +1,101 @@
1
+ import copy, torch, inspect
2
+
3
+ from pprint import pformat
4
+ from functools import wraps
5
+ from typing import get_type_hints
6
+
7
+ from nnodely.logger import logging, nnLogger
8
+ log = nnLogger(__name__, logging.CRITICAL)
9
+
10
+ def enforce_types(func):
11
+ @wraps(func)
12
+ def wrapper(*args, **kwargs):
13
+ hints = get_type_hints(func)
14
+ all_args = kwargs.copy()
15
+ all_args.update(dict(zip(inspect.signature(func).parameters, args)))
16
+
17
+ for arg, arg_type in hints.items():
18
+ if arg in all_args and not isinstance(all_args[arg], arg_type):
19
+ raise TypeError(
20
+ f"Expected argument '{arg}' to be of type {arg_type.__name__}, but got {type(all_args[arg]).__name__}")
21
+
22
+ return func(*args, **kwargs)
23
+
24
+ return wrapper
25
+
26
+ # Linear interpolation function, operating on batches of input data and returning batches of output data
27
+ def linear_interp(x,x_data,y_data):
28
+ # Inputs:
29
+ # x: query point, a tensor of shape torch.Size([N, 1, 1])
30
+ # x_data: map of x values, sorted in ascending order, a tensor of shape torch.Size([Q, 1])
31
+ # y_data: map of y values, a tensor of shape torch.Size([Q, 1])
32
+ # Output:
33
+ # y: interpolated value at x, a tensor of shape torch.Size([N, 1, 1])
34
+
35
+ # Saturate x to the range of x_data
36
+ x = torch.min(torch.max(x,x_data[0]),x_data[-1])
37
+
38
+ # Find the index of the closest value in x_data
39
+ idx = torch.argmin(torch.abs(x_data[:-1] - x),dim=1)
40
+
41
+ # Linear interpolation
42
+ y = y_data[idx] + (y_data[idx+1] - y_data[idx])/(x_data[idx+1] - x_data[idx])*(x - x_data[idx])
43
+ return y
44
+
45
+ def tensor_to_list(data):
46
+ if isinstance(data, torch.Tensor):
47
+ # Converte il tensore in una lista
48
+ return data.tolist()
49
+ elif isinstance(data, dict):
50
+ # Ricorsione per i dizionari
51
+ return {key: tensor_to_list(value) for key, value in data.items()}
52
+ elif isinstance(data, list):
53
+ # Ricorsione per le liste
54
+ return [tensor_to_list(item) for item in data]
55
+ elif isinstance(data, tuple):
56
+ # Ricorsione per tuple
57
+ return tuple(tensor_to_list(item) for item in data)
58
+ elif isinstance(data, torch.nn.modules.container.ParameterDict):
59
+ # Ricorsione per parameter dict
60
+ return {key: tensor_to_list(value) for key, value in data.items()}
61
+ else:
62
+ # Altri tipi di dati rimangono invariati
63
+ return data
64
+
65
+ def merge(source, destination, main = True):
66
+ if main:
67
+ log.debug("Merge Source")
68
+ log.debug("\n"+pformat(source))
69
+ log.debug("Merge Destination")
70
+ log.debug("\n"+pformat(destination))
71
+ result = copy.deepcopy(destination)
72
+ else:
73
+ result = destination
74
+ for key, value in source.items():
75
+ if isinstance(value, dict):
76
+ # get node or create one
77
+ node = result.setdefault(key, {})
78
+ merge(value, node, False)
79
+ else:
80
+ if key in result and type(result[key]) is list:
81
+ if key == 'tw' or key == 'sw':
82
+ if result[key][0] > value[0]:
83
+ result[key][0] = value[0]
84
+ if result[key][1] < value[1]:
85
+ result[key][1] = value[1]
86
+ else:
87
+ result[key] = value
88
+ if main == True:
89
+ log.debug("Merge Result")
90
+ log.debug("\n" + pformat(result))
91
+ return result
92
+
93
+ def check(condition, exception, string):
94
+ if not condition:
95
+ raise exception(string)
96
+
97
+ def argmax_max(iterable):
98
+ return max(enumerate(iterable), key=lambda x: x[1])
99
+
100
+ def argmin_min(iterable):
101
+ return min(enumerate(iterable), key=lambda x: x[1])
@@ -0,0 +1,4 @@
1
+ from nnodely.visualizer.visualizer import Visualizer
2
+ from nnodely.visualizer.textvisualizer import TextVisualizer
3
+ from nnodely.visualizer.mplvisualizer import MPLVisualizer
4
+ from nnodely.visualizer.mplnotebookvisualizer import MPLNotebookVisualizer
@@ -0,0 +1,34 @@
1
+ import sys, json
2
+
3
+ import matplotlib.pyplot as plt
4
+
5
+ from mplplots import plots
6
+
7
+ # Plot data
8
+ line = sys.stdin.readline().strip()
9
+ name, x, x0, x1, params, output = None, None, None, None, None, None
10
+ data_point = {}
11
+ input_names = []
12
+ if line:
13
+ try:
14
+ # Convert to float and append to buffer
15
+ data_point = json.loads(line)
16
+ name = data_point['name']
17
+ if 'x1' in data_point.keys():
18
+ x0 = data_point['x0']
19
+ x1 = data_point['x1']
20
+ else:
21
+ x = data_point['x0']
22
+ params = data_point['params']
23
+ input_names = data_point['input_names']
24
+ output = data_point['output']
25
+
26
+ if 'x1' in data_point.keys():
27
+ plots.plot_3d_function(plt, name, x0, x1, params, output, input_names)
28
+ else:
29
+ plots.plot_2d_function(plt, name, x, params, output, input_names)
30
+ plt.show()
31
+
32
+ except ValueError:
33
+ pass
34
+
@@ -0,0 +1,31 @@
1
+ import sys, json
2
+
3
+ import matplotlib.pyplot as plt
4
+ import matplotlib.colors as mcolors
5
+
6
+ from mplplots import plots
7
+
8
+ # Plot data
9
+ line = sys.stdin.readline().strip()
10
+ name, x, y = None, None, []
11
+ chan_centers = []
12
+
13
+ if line:
14
+ try:
15
+ # Convert to float and append to buffer
16
+ data_point = json.loads(line)
17
+ name = data_point['name']
18
+ x = data_point['x']
19
+ chan_centers = data_point['chan_centers']
20
+ tableau_colors = mcolors.TABLEAU_COLORS
21
+ num_of_colors = len(list(tableau_colors.keys()))
22
+ for ind, key in enumerate(data_point['y'].keys()):
23
+ y.append(data_point['y'][key])
24
+
25
+ fig, ax = plt.subplots()
26
+ ax.cla()
27
+ plots.plot_fuzzy(ax, name, x, y, chan_centers)
28
+ plt.show()
29
+
30
+ except ValueError:
31
+ pass
@@ -0,0 +1,28 @@
1
+ import sys, json
2
+
3
+ import matplotlib.pyplot as plt
4
+
5
+ from mplplots import plots
6
+
7
+ line = sys.stdin.readline().strip()
8
+ key, A, B, sample_time = None, None, None, None
9
+ name_data = None
10
+ sample_time = None
11
+
12
+ if line:
13
+ try:
14
+ # Convert to float and append to buffer
15
+ data_point = json.loads(line)
16
+ name_data = data_point['name_data']
17
+ key = data_point['key']
18
+ A = data_point['prediction_A']
19
+ B = data_point['prediction_B']
20
+ sample_time = data_point['sample_time']
21
+
22
+ fig, ax = plt.subplots()
23
+ ax.cla()
24
+ plots.plot_results(ax, name_data, key, A, B, sample_time)
25
+ plt.show()
26
+
27
+ except ValueError:
28
+ pass
@@ -0,0 +1,46 @@
1
+ import sys, json
2
+
3
+ import matplotlib.pyplot as plt
4
+ import matplotlib.animation as animation
5
+ from collections import deque
6
+
7
+ from mplplots import plots
8
+
9
+ # Buffer to hold the data points
10
+ data_train = deque(maxlen=2000)
11
+ data_val = deque(maxlen=2000)
12
+ last = 1
13
+ epoch = 0
14
+ # Set up the plot
15
+ fig, ax = plt.subplots()
16
+
17
+ def update_graph(frame):
18
+ global last, title, epoch
19
+ if last > 0:
20
+ # Read data from stdin
21
+ line = sys.stdin.readline().strip()
22
+ if line:
23
+ try:
24
+ # Convert to float and append to buffer
25
+ data = json.loads(line)
26
+ data_train.append(data['train_losses'])
27
+ if data['val_losses']:
28
+ data_val.append(data['val_losses'])
29
+ title = data['title']
30
+ key = data['key']
31
+ last = data['last']
32
+ epoch = data['epoch']
33
+ # Clear the current plot
34
+ ax.cla()
35
+ # Clear the current plot
36
+ plots.plot_training(ax, title, key, data_train, data_val, last)
37
+ except ValueError:
38
+ pass
39
+ else:
40
+ pass
41
+
42
+ # Use FuncAnimation to update the plot dynamically
43
+ ani = animation.FuncAnimation(fig, update_graph, interval=10, save_count=20)
44
+
45
+ # Show the plot
46
+ plt.show(block=True)