nnodely 0.14.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. mplplots/__init__.py +0 -0
  2. mplplots/plots.py +131 -0
  3. nnodely/__init__.py +42 -0
  4. nnodely/activation.py +85 -0
  5. nnodely/arithmetic.py +203 -0
  6. nnodely/earlystopping.py +81 -0
  7. nnodely/exporter/__init__.py +3 -0
  8. nnodely/exporter/export.py +275 -0
  9. nnodely/exporter/exporter.py +45 -0
  10. nnodely/exporter/reporter.py +48 -0
  11. nnodely/exporter/standardexporter.py +108 -0
  12. nnodely/fir.py +150 -0
  13. nnodely/fuzzify.py +221 -0
  14. nnodely/initializer.py +31 -0
  15. nnodely/input.py +131 -0
  16. nnodely/linear.py +130 -0
  17. nnodely/localmodel.py +82 -0
  18. nnodely/logger.py +94 -0
  19. nnodely/loss.py +30 -0
  20. nnodely/model.py +263 -0
  21. nnodely/modeldef.py +205 -0
  22. nnodely/nnodely.py +1295 -0
  23. nnodely/optimizer.py +91 -0
  24. nnodely/output.py +23 -0
  25. nnodely/parameter.py +103 -0
  26. nnodely/parametricfunction.py +329 -0
  27. nnodely/part.py +201 -0
  28. nnodely/relation.py +149 -0
  29. nnodely/trigonometric.py +67 -0
  30. nnodely/utils.py +101 -0
  31. nnodely/visualizer/__init__.py +4 -0
  32. nnodely/visualizer/dynamicmpl/functionplot.py +34 -0
  33. nnodely/visualizer/dynamicmpl/fuzzyplot.py +31 -0
  34. nnodely/visualizer/dynamicmpl/resultsplot.py +28 -0
  35. nnodely/visualizer/dynamicmpl/trainingplot.py +46 -0
  36. nnodely/visualizer/mplnotebookvisualizer.py +66 -0
  37. nnodely/visualizer/mplvisualizer.py +215 -0
  38. nnodely/visualizer/textvisualizer.py +320 -0
  39. nnodely/visualizer/visualizer.py +84 -0
  40. nnodely-0.14.0.dist-info/LICENSE +21 -0
  41. nnodely-0.14.0.dist-info/METADATA +401 -0
  42. nnodely-0.14.0.dist-info/RECORD +44 -0
  43. nnodely-0.14.0.dist-info/WHEEL +5 -0
  44. nnodely-0.14.0.dist-info/top_level.txt +2 -0
nnodely/fuzzify.py ADDED
@@ -0,0 +1,221 @@
1
+ import inspect, copy, textwrap, torch
2
+
3
+ import numpy as np
4
+ import torch.nn as nn
5
+
6
+ from collections.abc import Callable
7
+
8
+ from nnodely.relation import NeuObj, Stream
9
+ from nnodely.model import Model
10
+ from nnodely.utils import check, merge, enforce_types
11
+
12
+ from nnodely.logger import logging, nnLogger
13
+ log = nnLogger(__name__, logging.CRITICAL)
14
+
15
+ fuzzify_relation_name = 'Fuzzify'
16
+
17
+
18
+ class Fuzzify(NeuObj):
19
+ @enforce_types
20
+ def __init__(self, output_dimension: int | None = None,
21
+ range: list | None = None,
22
+ centers: list | None = None,
23
+ functions: str | list | Callable = 'Triangular'):
24
+
25
+ self.relation_name = fuzzify_relation_name
26
+ super().__init__('F' + fuzzify_relation_name + str(NeuObj.count))
27
+ self.json['Functions'][self.name] = {}
28
+ if output_dimension is not None:
29
+ check(range is not None, ValueError, 'if "output_dimension" is not None, "range" must be not setted')
30
+ check(centers is None, ValueError,
31
+ 'if "output_dimension" and "range" are not None, then "centers" must be None')
32
+ self.output_dimension = {'dim': output_dimension}
33
+ interval = ((range[1] - range[0]) / (output_dimension - 1))
34
+ self.json['Functions'][self.name]['centers'] = np.arange(range[0], range[1] + interval, interval).tolist()
35
+ else:
36
+ check(centers is not None, ValueError, 'if "output_dimension" is None and "centers" must be setted')
37
+ self.output_dimension = {'dim': len(centers)}
38
+ self.json['Functions'][self.name]['centers'] = np.array(centers).tolist()
39
+ self.json['Functions'][self.name]['dim_out'] = copy.deepcopy(self.output_dimension)
40
+
41
+ if type(functions) is str:
42
+ self.json['Functions'][self.name]['functions'] = functions
43
+ self.json['Functions'][self.name]['names'] = functions
44
+ elif type(functions) is list:
45
+ self.json['Functions'][self.name]['functions'] = []
46
+ self.json['Functions'][self.name]['names'] = []
47
+ for func in functions:
48
+ code = textwrap.dedent(inspect.getsource(func)).replace('\"', '\'')
49
+ self.json['Functions'][self.name]['functions'].append(code)
50
+ self.json['Functions'][self.name]['names'].append(func.__name__)
51
+ else:
52
+ code = textwrap.dedent(inspect.getsource(functions)).replace('\"', '\'')
53
+ self.json['Functions'][self.name]['functions'] = code
54
+ self.json['Functions'][self.name]['names'] = functions.__name__
55
+
56
+ def __call__(self, obj: Stream) -> Stream:
57
+ stream_name = fuzzify_relation_name + str(Stream.count)
58
+ check(type(obj) is Stream, TypeError,
59
+ f"The type of {obj} is {type(obj)} and is not supported for Fuzzify operation.")
60
+ check('dim' in obj.dim and obj.dim['dim'] == 1, ValueError, 'Input dimension must be scalar')
61
+ output_dimension = copy.deepcopy(obj.dim)
62
+ output_dimension.update(self.output_dimension)
63
+ stream_json = merge(self.json, obj.json)
64
+ stream_json['Relations'][stream_name] = [fuzzify_relation_name, [obj.name], self.name]
65
+ return Stream(stream_name, stream_json, output_dimension)
66
+
67
+
68
+ def return_fuzzify(json, xlim=None, num_points=1000):
69
+ if xlim is not None:
70
+ x = torch.from_numpy(np.linspace(xlim[0], xlim[1], num=num_points))
71
+ else:
72
+ x = torch.from_numpy(np.linspace(json['centers'][0] - 2, json['centers'][-1] + 2, num=num_points))
73
+ chan_centers = np.array(json['centers'])
74
+ activ_fun = {}
75
+ if isinstance(json['names'], list):
76
+ n_func = len(json['names'])
77
+ else:
78
+ n_func = 1
79
+ for i in range(len(chan_centers)):
80
+ if json['functions'] == 'Triangular':
81
+ activ_fun[i] = triangular(x, i, chan_centers).tolist()
82
+ elif json['functions'] == 'Rectangular':
83
+ activ_fun[i] = rectangular(x, i, chan_centers).tolist()
84
+ else:
85
+ if isinstance(json['names'], list):
86
+ if i >= n_func:
87
+ func_idx = i - round(n_func * (i // n_func))
88
+ else:
89
+ func_idx = i
90
+ exec(json['functions'][func_idx], globals())
91
+ function_to_call = globals()[json['names'][func_idx]]
92
+ else:
93
+ exec(json['functions'], globals())
94
+ function_to_call = globals()[json['names']]
95
+ activ_fun[i] = custom_function(function_to_call, x, i, chan_centers).tolist()
96
+ return x.tolist(), activ_fun
97
+
98
+
99
+ def triangular(x, idx_channel, chan_centers):
100
+ # Compute the number of channels
101
+ num_channels = len(chan_centers)
102
+
103
+ # First dimension of activation
104
+ if idx_channel == 0:
105
+ if num_channels != 1:
106
+ ampl = chan_centers[1] - chan_centers[0]
107
+ act_fcn = torch.minimum(torch.maximum(-(x - chan_centers[0]) / ampl + 1, torch.tensor(0.0)),
108
+ torch.tensor(1.0))
109
+ else:
110
+ # In case the user only wants one channel
111
+ act_fcn = 1
112
+ elif idx_channel != 0 and idx_channel == (num_channels - 1):
113
+ ampl = chan_centers[-1] - chan_centers[-2]
114
+ act_fcn = torch.minimum(torch.maximum((x - chan_centers[-2]) / ampl, torch.tensor(0.0)), torch.tensor(1.0))
115
+ else:
116
+ ampl_1 = chan_centers[idx_channel] - chan_centers[idx_channel - 1]
117
+ ampl_2 = chan_centers[idx_channel + 1] - chan_centers[idx_channel]
118
+ act_fcn = torch.minimum(torch.maximum((x - chan_centers[idx_channel - 1]) / ampl_1, torch.tensor(0.0)),
119
+ torch.maximum(-(x - chan_centers[idx_channel]) / ampl_2 + 1, torch.tensor(0.0)))
120
+
121
+ return act_fcn
122
+
123
+
124
+ def rectangular(x, idx_channel, chan_centers):
125
+ ## compute number of channels
126
+ num_channels = len(chan_centers)
127
+
128
+ ## First dimension of activation
129
+ if idx_channel == 0:
130
+ if num_channels != 1:
131
+ width = abs(chan_centers[idx_channel + 1] - chan_centers[idx_channel]) / 2
132
+ act_fcn = torch.where(x < (chan_centers[idx_channel] + width), 1.0, 0.0)
133
+ else:
134
+ # In case the user only wants one channel
135
+ act_fcn = 1.0
136
+ elif idx_channel != 0 and idx_channel == (num_channels - 1):
137
+ width = abs(chan_centers[idx_channel] - chan_centers[idx_channel - 1]) / 2
138
+ act_fcn = torch.where(x >= chan_centers[idx_channel] - width, 1.0, 0.0)
139
+ else:
140
+ width_forward = abs(chan_centers[idx_channel + 1] - chan_centers[idx_channel]) / 2
141
+ width_backward = abs(chan_centers[idx_channel] - chan_centers[idx_channel - 1]) / 2
142
+ act_fcn = torch.where(
143
+ (x >= chan_centers[idx_channel] - width_backward) & (x < chan_centers[idx_channel] + width_forward), 1.0,
144
+ 0.0)
145
+
146
+ return act_fcn
147
+
148
+
149
+ def custom_function(func, x, idx_channel, chan_centers):
150
+ act_fcn = func(x - chan_centers[idx_channel])
151
+ return act_fcn
152
+
153
+
154
+ class Fuzzify_Layer(nn.Module):
155
+ def __init__(self, params):
156
+ super().__init__()
157
+ self.centers = params['centers']
158
+ self.function = params['functions']
159
+ self.dimension = params['dim_out']['dim']
160
+ self.name = params['names']
161
+
162
+ if type(self.name) is list:
163
+ self.n_func = len(self.name)
164
+ for func, name in zip(self.function, self.name):
165
+ ## Add the function to the globals
166
+ try:
167
+ code = 'import torch\n@torch.fx.wrap\n' + func
168
+ exec(code, globals())
169
+ except Exception as e:
170
+ check(False, RuntimeError, f"An error occurred when running the function '{name}':\n {e}")
171
+ else:
172
+ self.n_func = 1
173
+ if self.name not in ['Triangular', 'Rectangular']: ## custom function
174
+ ## Add the function to the globals
175
+ try:
176
+ code = 'import torch\n@torch.fx.wrap\n' + self.function
177
+ exec(code, globals())
178
+ except Exception as e:
179
+ check(False, RuntimeError, f"An error occurred when running the function '{self.name}':\n {e}")
180
+
181
+ def forward(self, x):
182
+ # res = torch.empty((x.size(0), x.size(1), self.dimension), dtype=torch.float32)
183
+ res = torch.zeros_like(x).repeat(1, 1, self.dimension)
184
+
185
+ if self.function == 'Triangular':
186
+ for i in range(len(self.centers)):
187
+ # res[:, :, i:i+1] = triangular(x, i, self.centers)
188
+ slicing(res, torch.tensor(i), triangular(x, i, self.centers))
189
+ elif self.function == 'Rectangular':
190
+ for i in range(len(self.centers)):
191
+ # res[:, :, i:i+1] = rectangular(x, i, self.centers)
192
+ slicing(res, torch.tensor(i), rectangular(x, i, self.centers))
193
+ else: ## Custom_function
194
+ if self.n_func == 1:
195
+ # Retrieve the function object from the globals dictionary
196
+ function_to_call = globals()[self.name]
197
+ for i in range(len(self.centers)):
198
+ # res[:, :, i:i+1] = custom_function(function_to_call, x, i, self.centers)
199
+ slicing(res, torch.tensor(i), custom_function(function_to_call, x, i, self.centers))
200
+ else: ## we have multiple functions
201
+ for i in range(len(self.centers)):
202
+ if i >= self.n_func:
203
+ func_idx = i - round(self.n_func * (i // self.n_func))
204
+ else:
205
+ func_idx = i
206
+ function_to_call = globals()[self.name[func_idx]]
207
+ # res[:, :, i:i+1] = custom_function(function_to_call, x, i, self.centers)
208
+ slicing(res, torch.tensor(i), custom_function(function_to_call, x, i, self.centers))
209
+ return res
210
+
211
+
212
+ @torch.fx.wrap
213
+ def slicing(res, i, x):
214
+ res[:, :, i:i + 1] = x
215
+
216
+
217
+ def createFuzzify(self, *params):
218
+ return Fuzzify_Layer(params[0])
219
+
220
+
221
+ setattr(Model, fuzzify_relation_name, createFuzzify)
nnodely/initializer.py ADDED
@@ -0,0 +1,31 @@
1
+
2
+ def init_constant(indexes, params_size, dict_param = {'value':1}):
3
+ return dict_param['value']
4
+
5
+ def init_negexp(indexes, params_size, dict_param = {'size_index':0, 'first_value':1, 'lambda':3}):
6
+ import numpy as np
7
+ size_index = dict_param['size_index']
8
+ # check if the size of the list of parameters is 1, to avoid a division by zero
9
+ x = 1 if params_size[size_index]-1 == 0 else indexes[size_index]/(params_size[size_index]-1)
10
+ return dict_param['first_value']*np.exp(-dict_param['lambda']*(1-x))
11
+
12
+ def init_exp(indexes, params_size, dict_param = {'size_index':0, 'max_value':1, 'lambda':3, 'monotonicity':'decreasing'}):
13
+ import numpy as np
14
+ size_index = dict_param['size_index']
15
+ monotonicity = dict_param['monotonicity']
16
+ if monotonicity == 'increasing':
17
+ # increasing exponential, the 'max_value' is the value at x=1, i.e, at the end of the range
18
+ x = 1 if params_size[size_index]-1 == 0 else indexes[size_index]/(params_size[size_index]-1)
19
+ out = dict_param['max_value']*np.exp(dict_param['lambda']*(x-1))
20
+ elif monotonicity == 'decreasing':
21
+ # decreasing exponential, the 'max_value' is the value at x=0, i.e, at the beginning of the range
22
+ x = 0 if params_size[size_index]-1 == 0 else indexes[size_index]/(params_size[size_index]-1)
23
+ out = dict_param['max_value']*np.exp(-dict_param['lambda']*x)
24
+ else:
25
+ raise ValueError('The parameter monotonicity must be either increasing or decreasing.')
26
+ return out
27
+
28
+ def init_lin(indexes, params_size, dict_param = {'size_index':0, 'first_value':1, 'last_value':0}):
29
+ size_index = dict_param['size_index']
30
+ x = 0 if params_size[size_index]-1 == 0 else indexes[size_index]/(params_size[size_index]-1)
31
+ return (dict_param['last_value'] - dict_param['first_value']) * x + dict_param['first_value']
nnodely/input.py ADDED
@@ -0,0 +1,131 @@
1
+ import copy
2
+
3
+ from nnodely.relation import NeuObj, Stream, ToStream
4
+ from nnodely.utils import check, merge
5
+ from nnodely.part import SamplePart, TimePart
6
+
7
+ class InputState(NeuObj, Stream):
8
+ def __init__(self, json_name, name, dimensions:int = 1):
9
+ NeuObj.__init__(self, name)
10
+ check(type(dimensions) == int, TypeError,"The dimensions must be a integer")
11
+ self.json_name = json_name
12
+ self.json[self.json_name][self.name] = {'dim': dimensions, 'tw': [0, 0], 'sw': [0,0] }
13
+ self.dim = {'dim': dimensions}
14
+ Stream.__init__(self, name, self.json, self.dim)
15
+
16
+ def tw(self, tw, offset = None):
17
+ dim = copy.deepcopy(self.dim)
18
+ json = copy.deepcopy(self.json)
19
+ if type(tw) is list:
20
+ json[self.json_name][self.name]['tw'] = tw
21
+ tw = tw[1] - tw[0]
22
+ else:
23
+ json[self.json_name][self.name]['tw'][0] = -tw
24
+ check(tw > 0, ValueError, "The time window must be positive")
25
+ dim['tw'] = tw
26
+ if offset is not None:
27
+ check(json[self.json_name][self.name]['tw'][0] <= offset < json[self.json_name][self.name]['tw'][1],
28
+ IndexError,
29
+ "The offset must be inside the time window")
30
+ return TimePart(Stream(self.name, json, dim), json[self.json_name][self.name]['tw'][0], json[self.json_name][self.name]['tw'][1], offset)
31
+
32
+ # Select a sample window
33
+ # Example T = [-3,-2,-1,0,1,2] # time vector 0 represent the last passed instant
34
+ # If sw is an integer #1 represent the number of step in the past
35
+ # T.s(2) = [-1, 0] # represents two time step in the past
36
+ # If sw is a list [#1,#2] the numbers represent the time index in the vector second element excluded
37
+ # T.s([-2,0]) = [-1, 0] # represents two time step in the past zero in the future
38
+ # T.s([0,1]) = [1] # the first time in the future
39
+ # T.s([-4,-2]) = [-3,-2]
40
+ # The total number of samples can be computed #2-#1
41
+ # The offset represent the index of the vector that need to be used to offset the window
42
+ # T.s(2,offset=-2) = [0, 1] # the value of the window is [-1,0]
43
+ # T.s([-2,2],offset=-1) = [-1,0,1,2] # the value of the window is [-1,0,1,2]
44
+ def sw(self, sw, offset = None):
45
+ dim = copy.deepcopy(self.dim)
46
+ json = copy.deepcopy(self.json)
47
+ if type(sw) is list:
48
+ check(type(sw[0]) == int and type(sw[1]) == int, TypeError, "The sample window must be integer")
49
+ json[self.json_name][self.name]['sw'] = sw
50
+ sw = sw[1] - sw[0]
51
+ else:
52
+ check(type(sw) == int, TypeError, "The sample window must be integer")
53
+ json[self.json_name][self.name]['sw'][0] = -sw
54
+ check(sw > 0, ValueError, "The sample window must be positive")
55
+ dim['sw'] = sw
56
+ if offset is not None:
57
+ check(json[self.json_name][self.name]['sw'][0] <= offset < json[self.json_name][self.name]['sw'][1],
58
+ IndexError,
59
+ "The offset must be inside the sample window")
60
+ return SamplePart(Stream(self.name, json, dim), json[self.json_name][self.name]['sw'][0], json[self.json_name][self.name]['sw'][1], offset)
61
+
62
+ # Select the unitary delay
63
+ # Example T = [-3,-2,-1,0,1,2] # time vector 0 represent the last passed instant
64
+ # T.z(-1) = 1
65
+ # T.z(0) = 0 #the last passed instant
66
+ # T.z(2) = -2
67
+ def z(self, delay):
68
+ dim = copy.deepcopy(self.dim)
69
+ json = copy.deepcopy(self.json)
70
+ sw = [(-delay) - 1, (-delay)]
71
+ json[self.json_name][self.name]['sw'] = sw
72
+ dim['sw'] = sw[1] - sw[0]
73
+ return SamplePart(Stream(self.name, json, dim), json[self.json_name][self.name]['sw'][0], json[self.json_name][self.name]['sw'][1], None)
74
+
75
+ def last(self):
76
+ return self.z(0)
77
+
78
+ def next(self):
79
+ return self.z(-1)
80
+
81
+ # def s(self, derivate):
82
+ # return Stream((self.name, {'s':derivate}), self.json, self.dim)
83
+
84
+
85
+ class Input(InputState):
86
+ def __init__(self, name, dimensions:int = 1):
87
+ InputState.__init__(self, 'Inputs', name, dimensions)
88
+
89
+ class State(InputState):
90
+ def __init__(self, name, dimensions:int = 1):
91
+ InputState.__init__(self, 'States', name, dimensions)
92
+
93
+
94
+ # connect operation
95
+ connect_name = 'connect'
96
+ closedloop_name = 'closedLoop'
97
+
98
+
99
+ # class Connect(Stream, ToStream):
100
+ # def __init__(self, obj1: Stream, obj2: State) -> Stream:
101
+ # check(type(obj1) is Stream, TypeError,
102
+ # f"The {obj1} must be a Stream or Output and not a {type(obj1)}.")
103
+ # obj1.connect(obj2)
104
+ #
105
+ # class ClosedLoop(Stream, ToStream):
106
+ # def __init__(self, obj1: Stream, obj2: State) -> Stream:
107
+ # check(type(obj1) is Stream, TypeError,
108
+ # f"The {obj1} must be a Stream or Output and not a {type(obj1)}.")
109
+ # obj1.closedloop(obj2)
110
+
111
+ class Connect(Stream, ToStream):
112
+ def __init__(self, obj1:Stream, obj2:State) -> Stream:
113
+ check(type(obj1) is Stream, TypeError,
114
+ f"The {obj1} must be a Stream and not a {type(obj1)}.")
115
+ check(type(obj2) is State, TypeError,
116
+ f"The {obj2} must be a State and not a {type(obj2)}.")
117
+ super().__init__(obj1.name,merge(obj1.json, obj2.json),obj1.dim)
118
+ check(closedloop_name not in self.json['States'][obj2.name] or connect_name not in self.json['States'][obj2.name],
119
+ KeyError,f"The state variable {obj2.name} is already connected.")
120
+ self.json['States'][obj2.name][connect_name] = obj1.name
121
+
122
+ class ClosedLoop(Stream, ToStream):
123
+ def __init__(self, obj1:Stream, obj2: State) -> Stream:
124
+ check(type(obj1) is Stream, TypeError,
125
+ f"The {obj1} must be a Stream and not a {type(obj1)}.")
126
+ check(type(obj2) is State, TypeError,
127
+ f"The {obj2} must be a State and not a {type(obj2)}.")
128
+ super().__init__(obj1.name, merge(obj1.json, obj2.json), obj1.dim)
129
+ check(closedloop_name not in self.json['States'][obj2.name] or connect_name not in self.json['States'][obj2.name],
130
+ KeyError, f"The state variable {obj2.name} is already connected.")
131
+ self.json['States'][obj2.name][closedloop_name] = obj1.name
nnodely/linear.py ADDED
@@ -0,0 +1,130 @@
1
+ import copy, inspect, textwrap, torch
2
+
3
+ import torch.nn as nn
4
+
5
+ from collections.abc import Callable
6
+
7
+ from nnodely.relation import NeuObj, Stream, AutoToStream
8
+ from nnodely.model import Model
9
+ from nnodely.parameter import Parameter
10
+ from nnodely.utils import check, merge, enforce_types
11
+
12
+ from nnodely.logger import logging, nnLogger
13
+ log = nnLogger(__name__, logging.CRITICAL)
14
+
15
+ linear_relation_name = 'Linear'
16
+ class Linear(NeuObj, AutoToStream):
17
+
18
+ @enforce_types
19
+ def __init__(self, output_dimension:int|None = None,
20
+ W_init:Callable|None = None,
21
+ W_init_params:dict|None = None,
22
+ b_init:Callable|None = None,
23
+ b_init_params:dict|None = None,
24
+ W:Parameter|str|None = None,
25
+ b:bool|str|Parameter|None = None,
26
+ dropout:int|float = 0):
27
+
28
+ self.relation_name = linear_relation_name
29
+ self.W_init = W_init
30
+ self.W_init_params = W_init_params
31
+ self.b_init = b_init
32
+ self.b_init_params = b_init_params
33
+ self.W = W
34
+ self.b = b
35
+ self.bname = None
36
+ self.Wname = None
37
+ self.dropout = dropout
38
+ super().__init__('P' + linear_relation_name + str(NeuObj.count))
39
+
40
+ if W is None:
41
+ self.output_dimension = 1 if output_dimension is None else output_dimension
42
+ self.Wname = self.name + 'W'
43
+ elif type(W) is str:
44
+ self.output_dimension = 1 if output_dimension is None else output_dimension
45
+ self.Wname = W
46
+ else:
47
+ check(type(W) is Parameter or type(W) is str, TypeError, 'The "W" must be of type Parameter or str.')
48
+ window = 'tw' if 'tw' in W.dim else ('sw' if 'sw' in W.dim else None)
49
+ check(window == None or W.dim['sw'] == 1, ValueError, 'The "W" must not have window dimension.')
50
+ check(len(W.dim['dim']) == 2, ValueError,'The "W" dimensions must be a list of 2.')
51
+ self.output_dimension = W.dim['dim'][1]
52
+ if output_dimension is not None:
53
+ check(W.dim['dim'][1] == output_dimension, ValueError, 'output_dimension must be equal to the second dim of "W".')
54
+ self.Wname = W.name
55
+ self.json['Parameters'][W.name] = copy.deepcopy(W.json['Parameters'][W.name])
56
+
57
+ if b is not None:
58
+ check(type(b) is Parameter or type(b) is bool or type(b) is str, TypeError, 'The "b" must be of type Parameter, bool or str.')
59
+ if type(b) is Parameter:
60
+ check(type(b.dim['dim']) is int, ValueError, 'The "b" dimensions must be an integer.')
61
+ if output_dimension is not None:
62
+ check(b.dim['dim'] == output_dimension, ValueError,
63
+ 'output_dimension must be equal to the dim of the "b".')
64
+ self.bname = b.name
65
+ self.json['Parameters'][b.name] = copy.deepcopy(b.json['Parameters'][b.name])
66
+ elif type(b) is str:
67
+ self.bname = b
68
+ self.json['Parameters'][self.bname] = { 'dim': self.output_dimension }
69
+ else:
70
+ self.bname = self.name + 'b'
71
+ self.json['Parameters'][self.bname] = { 'dim': self.output_dimension }
72
+
73
+ def __call__(self, obj:Stream) -> Stream:
74
+ stream_name = linear_relation_name + str(Stream.count)
75
+ check(type(obj) is Stream, TypeError,
76
+ f"The type of {obj} is {type(obj)} and is not supported for Linear operation.")
77
+ window = 'tw' if 'tw' in obj.dim else ('sw' if 'sw' in obj.dim else None)
78
+
79
+ if type(self.W) is Parameter:
80
+ check(self.W.dim['dim'][0] == obj.dim['dim'], ValueError,
81
+ 'the input dimension must be equal to the first dim of the parameter')
82
+ else:
83
+ self.json['Parameters'][self.Wname] = { 'dim': [obj.dim['dim'],self.output_dimension,] }
84
+
85
+ if self.W_init is not None:
86
+ check('values' not in self.json['Parameters'][self.Wname], ValueError, f"The parameter {self.Wname} is already initialized.")
87
+ check(inspect.isfunction(self.W_init), ValueError,
88
+ f"The W_init parameter must be a function.")
89
+ code = textwrap.dedent(inspect.getsource(self.W_init)).replace('\"', '\'')
90
+ self.json['Parameters'][self.Wname]['init_fun'] = { 'code' : code, 'name' : self.W_init.__name__}
91
+ if self.W_init_params is not None:
92
+ self.json['Parameters'][self.Wname]['init_fun']['params'] = self.W_init_params
93
+
94
+ if self.b_init is not None:
95
+ check(self.bname is not None, ValueError,f"The bias is missing.")
96
+ check('values' not in self.json['Parameters'][self.bname], ValueError, f"The parameter {self.bname} is already initialized.")
97
+ check(inspect.isfunction(self.b_init), ValueError,
98
+ f"The b_init parameter must be a function.")
99
+ code = textwrap.dedent(inspect.getsource(self.b_init)).replace('\"', '\'')
100
+ self.json['Parameters'][self.bname]['init_fun'] = { 'code' : code, 'name' : self.b_init.__name__ }
101
+ if self.b_init_params is not None:
102
+ self.json['Parameters'][self.bname]['init_fun']['params'] = self.b_init_params
103
+
104
+ stream_json = merge(self.json,obj.json)
105
+ stream_json['Relations'][stream_name] = [linear_relation_name, [obj.name], self.Wname, self.bname, self.dropout]
106
+ return Stream(stream_name, stream_json,{'dim': self.output_dimension, window:obj.dim[window]})
107
+
108
+
109
+ class Linear_Layer(nn.Module):
110
+ def __init__(self, weights, bias=None, dropout=0):
111
+ super(Linear_Layer, self).__init__()
112
+ self.dropout = nn.Dropout(p=dropout) if dropout > 0 else None
113
+ self.weights = weights
114
+ self.bias = bias
115
+
116
+ def forward(self, x):
117
+ # x is expected to be of shape [batch, window, input_dimension]
118
+ # Using torch.einsum for batch matrix multiplication
119
+ y = torch.einsum('bwi,io->bwo', x, self.weights[0]) # y will have shape [batch, window, output_features]
120
+ if self.bias is not None:
121
+ y += self.bias # Add bias
122
+ # Add dropout if necessary
123
+ if self.dropout is not None:
124
+ y = self.dropout(y)
125
+ return y
126
+
127
+ def createLinear(self, *inputs):
128
+ return Linear_Layer(weights=inputs[0], bias=inputs[1], dropout=inputs[2])
129
+
130
+ setattr(Model, linear_relation_name, createLinear)
nnodely/localmodel.py ADDED
@@ -0,0 +1,82 @@
1
+ import inspect
2
+
3
+ from collections.abc import Callable
4
+
5
+ from nnodely.relation import NeuObj
6
+ from nnodely.part import Select
7
+ from nnodely.utils import check, enforce_types
8
+
9
+ localmodel_relation_name = 'LocalModel'
10
+
11
+ class LocalModel(NeuObj):
12
+ @enforce_types
13
+ def __init__(self, input_function:Callable|None = None,
14
+ output_function:Callable|None = None,
15
+ pass_indexes:bool = False):
16
+
17
+ self.relation_name = localmodel_relation_name
18
+ self.pass_indexes = pass_indexes
19
+ super().__init__(localmodel_relation_name + str(NeuObj.count))
20
+ self.json['Functions'][self.name] = {}
21
+ if input_function is not None:
22
+ check(callable(input_function), TypeError, 'The input_function must be callable')
23
+ self.input_function = input_function
24
+ if output_function is not None:
25
+ check(callable(output_function), TypeError, 'The output_function must be callable')
26
+ self.output_function = output_function
27
+
28
+
29
+ def __call__(self, inputs, activations):
30
+ self.out_sum = []
31
+ if type(activations) is not tuple:
32
+ activations = (activations,)
33
+ self.___activations_matrix(activations,inputs)
34
+
35
+ out = self.out_sum[0]
36
+ for ind in range(1,len(self.out_sum)):
37
+ out = out + self.out_sum[ind]
38
+ return out
39
+
40
+ # Definisci una funzione ricorsiva per annidare i cicli for
41
+ def ___activations_matrix(self, activations, inputs, idx=0, idx_list=[]):
42
+ if idx != len(activations):
43
+ for i in range(activations[idx].dim['dim']):
44
+ self.___activations_matrix(activations, inputs, idx+1, idx_list+[i])
45
+ else:
46
+ if self.input_function is not None:
47
+ if len(inspect.getfullargspec(self.input_function).args) == 0:
48
+ if type(inputs) is tuple:
49
+ out_in = self.input_function()(*inputs)
50
+ else:
51
+ out_in = self.input_function()(inputs)
52
+ else:
53
+ if self.pass_indexes:
54
+ if type(inputs) is tuple:
55
+ out_in = self.input_function(idx_list)(*inputs)
56
+ else:
57
+ out_in = self.input_function(idx_list)(inputs)
58
+ else:
59
+ if type(inputs) is tuple:
60
+ out_in = self.input_function(*inputs)
61
+ else:
62
+ out_in = self.input_function(inputs)
63
+ else:
64
+ check(type(inputs) is not tuple, TypeError, 'The input cannot be a tuple without input_function')
65
+ out_in = inputs
66
+
67
+ act = Select(activations[0], idx_list[0])
68
+ for ind, i in enumerate(idx_list[1:]):
69
+ act = act * Select(activations[ind+1], i)
70
+
71
+ prod = out_in * act
72
+
73
+ if self.output_function is not None:
74
+ if len(inspect.getfullargspec(self.output_function).args) == 0:
75
+ self.out_sum.append(self.output_function()(prod))
76
+ else:
77
+ if self.pass_indexes:
78
+ self.out_sum.append(self.output_function(idx_list)(prod))
79
+ else:
80
+ self.out_sum.append(self.output_function(prod))
81
+ else:
82
+ self.out_sum.append(prod)