nnodely 0.25.0__tar.gz → 1.2.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. {nnodely-0.25.0/nnodely.egg-info → nnodely-1.2.3}/PKG-INFO +3 -2
  2. {nnodely-0.25.0 → nnodely-1.2.3}/mplplots/plots.py +19 -6
  3. {nnodely-0.25.0 → nnodely-1.2.3}/nnodely/__init__.py +9 -6
  4. nnodely-1.2.3/nnodely/activation.py +197 -0
  5. {nnodely-0.25.0 → nnodely-1.2.3}/nnodely/arithmetic.py +44 -22
  6. nnodely-1.2.3/nnodely/equationlearner.py +126 -0
  7. nnodely-1.2.3/nnodely/exporter/export.py +422 -0
  8. {nnodely-0.25.0 → nnodely-1.2.3}/nnodely/exporter/exporter.py +3 -0
  9. {nnodely-0.25.0 → nnodely-1.2.3}/nnodely/exporter/standardexporter.py +16 -8
  10. {nnodely-0.25.0 → nnodely-1.2.3}/nnodely/fir.py +39 -25
  11. {nnodely-0.25.0 → nnodely-1.2.3}/nnodely/fuzzify.py +1 -0
  12. {nnodely-0.25.0 → nnodely-1.2.3}/nnodely/input.py +98 -69
  13. nnodely-1.2.3/nnodely/interpolation.py +132 -0
  14. {nnodely-0.25.0 → nnodely-1.2.3}/nnodely/linear.py +33 -18
  15. {nnodely-0.25.0 → nnodely-1.2.3}/nnodely/localmodel.py +16 -16
  16. {nnodely-0.25.0 → nnodely-1.2.3}/nnodely/model.py +85 -121
  17. {nnodely-0.25.0 → nnodely-1.2.3}/nnodely/modeldef.py +19 -6
  18. {nnodely-0.25.0 → nnodely-1.2.3}/nnodely/nnodely.py +406 -284
  19. {nnodely-0.25.0 → nnodely-1.2.3}/nnodely/output.py +4 -13
  20. {nnodely-0.25.0 → nnodely-1.2.3}/nnodely/parameter.py +33 -8
  21. {nnodely-0.25.0 → nnodely-1.2.3}/nnodely/parametricfunction.py +118 -86
  22. {nnodely-0.25.0 → nnodely-1.2.3}/nnodely/part.py +221 -79
  23. nnodely-1.2.3/nnodely/relation.py +300 -0
  24. nnodely-1.2.3/nnodely/timeoperation.py +48 -0
  25. nnodely-1.2.3/nnodely/trigonometric.py +207 -0
  26. nnodely-1.2.3/nnodely/utils.py +166 -0
  27. {nnodely-0.25.0 → nnodely-1.2.3}/nnodely/visualizer/mplnotebookvisualizer.py +1 -1
  28. {nnodely-0.25.0 → nnodely-1.2.3}/nnodely/visualizer/textvisualizer.py +3 -3
  29. {nnodely-0.25.0 → nnodely-1.2.3/nnodely.egg-info}/PKG-INFO +3 -2
  30. {nnodely-0.25.0 → nnodely-1.2.3}/nnodely.egg-info/SOURCES.txt +4 -0
  31. {nnodely-0.25.0 → nnodely-1.2.3}/nnodely.egg-info/requires.txt +1 -0
  32. {nnodely-0.25.0 → nnodely-1.2.3}/pyproject.toml +2 -1
  33. {nnodely-0.25.0 → nnodely-1.2.3}/tests/test_dataset.py +63 -24
  34. {nnodely-0.25.0 → nnodely-1.2.3}/tests/test_export.py +24 -4
  35. nnodely-1.2.3/tests/test_export_recurrent.py +573 -0
  36. {nnodely-0.25.0 → nnodely-1.2.3}/tests/test_input_dimensions.py +62 -7
  37. {nnodely-0.25.0 → nnodely-1.2.3}/tests/test_json.py +201 -72
  38. {nnodely-0.25.0 → nnodely-1.2.3}/tests/test_losses.py +1 -1
  39. {nnodely-0.25.0 → nnodely-1.2.3}/tests/test_model_predict.py +538 -14
  40. {nnodely-0.25.0 → nnodely-1.2.3}/tests/test_model_predict_recurrent.py +69 -65
  41. nnodely-1.2.3/tests/test_network_element.py +438 -0
  42. {nnodely-0.25.0 → nnodely-1.2.3}/tests/test_parameters_of_train.py +10 -10
  43. {nnodely-0.25.0 → nnodely-1.2.3}/tests/test_train.py +51 -3
  44. {nnodely-0.25.0 → nnodely-1.2.3}/tests/test_train_recurrent.py +140 -61
  45. {nnodely-0.25.0 → nnodely-1.2.3}/tests/test_visualizer.py +21 -7
  46. nnodely-0.25.0/nnodely/activation.py +0 -115
  47. nnodely-0.25.0/nnodely/exporter/export.py +0 -275
  48. nnodely-0.25.0/nnodely/relation.py +0 -189
  49. nnodely-0.25.0/nnodely/trigonometric.py +0 -106
  50. nnodely-0.25.0/nnodely/utils.py +0 -101
  51. nnodely-0.25.0/tests/test_network_element.py +0 -203
  52. {nnodely-0.25.0 → nnodely-1.2.3}/LICENSE +0 -0
  53. {nnodely-0.25.0 → nnodely-1.2.3}/README.md +0 -0
  54. {nnodely-0.25.0 → nnodely-1.2.3}/mplplots/__init__.py +0 -0
  55. {nnodely-0.25.0 → nnodely-1.2.3}/nnodely/earlystopping.py +0 -0
  56. {nnodely-0.25.0 → nnodely-1.2.3}/nnodely/exporter/__init__.py +0 -0
  57. {nnodely-0.25.0 → nnodely-1.2.3}/nnodely/exporter/reporter.py +0 -0
  58. {nnodely-0.25.0 → nnodely-1.2.3}/nnodely/initializer.py +0 -0
  59. {nnodely-0.25.0 → nnodely-1.2.3}/nnodely/logger.py +0 -0
  60. {nnodely-0.25.0 → nnodely-1.2.3}/nnodely/loss.py +0 -0
  61. {nnodely-0.25.0 → nnodely-1.2.3}/nnodely/optimizer.py +0 -0
  62. {nnodely-0.25.0 → nnodely-1.2.3}/nnodely/visualizer/__init__.py +0 -0
  63. {nnodely-0.25.0 → nnodely-1.2.3}/nnodely/visualizer/dynamicmpl/functionplot.py +0 -0
  64. {nnodely-0.25.0 → nnodely-1.2.3}/nnodely/visualizer/dynamicmpl/fuzzyplot.py +0 -0
  65. {nnodely-0.25.0 → nnodely-1.2.3}/nnodely/visualizer/dynamicmpl/resultsplot.py +0 -0
  66. {nnodely-0.25.0 → nnodely-1.2.3}/nnodely/visualizer/dynamicmpl/trainingplot.py +0 -0
  67. {nnodely-0.25.0 → nnodely-1.2.3}/nnodely/visualizer/mplvisualizer.py +0 -0
  68. {nnodely-0.25.0 → nnodely-1.2.3}/nnodely/visualizer/visualizer.py +0 -0
  69. {nnodely-0.25.0 → nnodely-1.2.3}/nnodely.egg-info/dependency_links.txt +0 -0
  70. {nnodely-0.25.0 → nnodely-1.2.3}/nnodely.egg-info/top_level.txt +0 -0
  71. {nnodely-0.25.0 → nnodely-1.2.3}/setup.cfg +0 -0
  72. {nnodely-0.25.0 → nnodely-1.2.3}/setup.py +0 -0
  73. {nnodely-0.25.0 → nnodely-1.2.3}/tests/test_results.py +0 -0
  74. {nnodely-0.25.0 → nnodely-1.2.3}/tests/test_utils.py +0 -0
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.2
2
2
  Name: nnodely
3
- Version: 0.25.0
3
+ Version: 1.2.3
4
4
  Summary: Model-structured neural network framework for the modeling and control of physical systems
5
5
  Author-email: Gastone Pietro Rosati Papini <tonegas@gmail.com>
6
6
  License: MIT License
@@ -40,6 +40,7 @@ Requires-Dist: onnx
40
40
  Requires-Dist: pandas
41
41
  Requires-Dist: reportlab
42
42
  Requires-Dist: matplotlib
43
+ Requires-Dist: onnxruntime
43
44
 
44
45
  <p align="center">
45
46
  <img src="https://raw.githubusercontent.com/tonegas/nnodely/main/imgs/logo_white_info.png" alt="logo" >
@@ -37,12 +37,25 @@ def plot_results(ax, name_data, key, A, B, sample_time):
37
37
  B_t = np.transpose(np.array(B))
38
38
  for ind_win in range(A_t.shape[0]):
39
39
  for ind_dim in range(A_t.shape[1]):
40
- ax.plot(np.arange(0, len(A_t[ind_win, ind_dim]) * sample_time, sample_time), A_t[ind_win, ind_dim],
41
- label=f'real')
42
- ax.plot(np.arange(0, len(B_t[ind_win, ind_dim]) * sample_time, sample_time), B_t[ind_win, ind_dim], '-.',
43
- label=f'prediction')
44
- correlation = np.corrcoef(A_t[ind_win, ind_dim],B_t[ind_win, ind_dim])[0, 1]
45
- ax.text(0.05, 0.95, f'Correlation: {correlation:.2f}', transform=ax.transAxes, verticalalignment='top')
40
+ if len(A_t.shape) == 3:
41
+ num_samples = len(A_t[ind_win, ind_dim])
42
+ time_array = np.linspace(0, (num_samples - 1) * sample_time, num_samples)
43
+ ax.plot(time_array, A_t[ind_win, ind_dim],
44
+ label=f'real')
45
+ ax.plot(time_array, B_t[ind_win, ind_dim], '-.',
46
+ label=f'prediction')
47
+ correlation = np.corrcoef(A_t[ind_win, ind_dim],B_t[ind_win, ind_dim])[0, 1]
48
+ ax.text(0.05, 0.95, f'Correlation: {correlation:.2f}', transform=ax.transAxes, verticalalignment='top')
49
+ else:
50
+ num_samples = A_t.shape[3]
51
+ for idx in range(A_t.shape[2]):
52
+ time_array = np.linspace(idx * sample_time, (idx + num_samples - 1) * sample_time, num_samples)
53
+ ax.plot(time_array, A_t[ind_win, ind_dim, idx],
54
+ label=f'real')
55
+ ax.plot(time_array, B_t[ind_win, ind_dim, idx], '-.',
56
+ label=f'prediction')
57
+ correlation = np.corrcoef(A_t[ind_win, ind_dim, idx],B_t[ind_win, ind_dim, idx])[0, 1]
58
+ ax.text(0.05, 0.95, f'Correlation: {correlation:.2f}', transform=ax.transAxes, verticalalignment='top')
46
59
 
47
60
  ax.grid(True)
48
61
  ax.legend(loc='best')
@@ -1,5 +1,5 @@
1
1
 
2
- __version__ = '0.25.0'
2
+ __version__ = '1.2.3'
3
3
 
4
4
  import sys
5
5
  major, minor = sys.version_info.major, sys.version_info.minor
@@ -18,19 +18,22 @@ else:
18
18
 
19
19
  # Network input, outputs and parameters
20
20
  from nnodely.input import Input, State, Connect, ClosedLoop
21
- from nnodely.parameter import Parameter, Constant
21
+ from nnodely.parameter import Parameter, Constant, SampleTime
22
22
  from nnodely.output import Output
23
23
 
24
24
  # Network elements
25
- from nnodely.activation import Relu, Tanh, ELU
25
+ from nnodely.activation import Relu, ELU, Softmax, Sigmoid, Identity
26
26
  from nnodely.fir import Fir
27
27
  from nnodely.linear import Linear
28
28
  from nnodely.arithmetic import Add, Sum, Sub, Mul, Pow, Neg
29
- from nnodely.trigonometric import Sin, Cos, Tan
29
+ from nnodely.trigonometric import Sin, Cos, Tan, Cosh, Tanh, Sech
30
30
  from nnodely.parametricfunction import ParamFun
31
31
  from nnodely.fuzzify import Fuzzify
32
- from nnodely.part import TimePart, TimeSelect, SamplePart, SampleSelect, Part, Select
32
+ from nnodely.part import Part, Select, Concatenate, SamplePart, SampleSelect, TimePart, TimeConcatenate
33
33
  from nnodely.localmodel import LocalModel
34
+ from nnodely.equationlearner import EquationLearner
35
+ from nnodely.timeoperation import Integrate, Derivate
36
+ from nnodely.interpolation import Interpolation
34
37
 
35
38
  # Main nnodely classes
36
39
  from nnodely.nnodely import nnodely, Modely
@@ -39,4 +42,4 @@ from nnodely.optimizer import Optimizer, SGD, Adam
39
42
  from nnodely.exporter import Exporter, StandardExporter
40
43
 
41
44
  # Support functions
42
- from nnodely.initializer import init_negexp, init_lin, init_constant
45
+ from nnodely.initializer import init_negexp, init_lin, init_constant, init_exp
@@ -0,0 +1,197 @@
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from nnodely import Parameter, Constant
5
+ from nnodely.relation import Stream, ToStream, toStream
6
+ from nnodely.model import Model
7
+ from nnodely.utils import check, enforce_types
8
+
9
+
10
+ relu_relation_name = 'Relu'
11
+ elu_relation_name = 'ELU'
12
+ sigmoid_relation_name = 'Sigmoid'
13
+
14
+ identity_relation_name = 'Identity'
15
+
16
+ softmax_relation_name = 'Softmax'
17
+
18
+
19
+ class Relu(Stream, ToStream):
20
+ """
21
+ Implement the Rectified-Linear Unit (ReLU) relation function.
22
+
23
+ See also:
24
+ Official PyTorch ReLU documentation:
25
+ `torch.nn.ReLU <https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html>`_
26
+
27
+ :param obj: The relation stream.
28
+ :type obj: Stream
29
+
30
+ Example:
31
+ >>> x = Relu(x)
32
+ """
33
+ @enforce_types
34
+ def __init__(self, obj:Stream|Parameter|Constant|float|int) -> Stream:
35
+ obj = toStream(obj)
36
+ check(type(obj) is Stream, TypeError,
37
+ f"The type of {obj} is {type(obj)} and is not supported for Relu operation.")
38
+ super().__init__(relu_relation_name + str(Stream.count),obj.json,obj.dim)
39
+ self.json['Relations'][self.name] = [relu_relation_name,[obj.name]]
40
+
41
+ class ELU(Stream, ToStream):
42
+ """
43
+ Implement the Exponential-Linear Unit (ELU) relation function.
44
+
45
+ See also:
46
+ Official PyTorch ReLU documentation:
47
+ `torch.nn.ELU <https://pytorch.org/docs/stable/generated/torch.nn.ELU.html>`_
48
+
49
+ :param obj: The relation stream.
50
+ :type obj: Stream
51
+
52
+ Example:
53
+ >>> x = ELU(x)
54
+ """
55
+ @enforce_types
56
+ def __init__(self, obj:Stream|Parameter|Constant|float|int) -> Stream:
57
+ obj = toStream(obj)
58
+ check(type(obj) is Stream,TypeError,
59
+ f"The type of {obj} is {type(obj)} and is not supported for Tanh operation.")
60
+ super().__init__(elu_relation_name + str(Stream.count),obj.json,obj.dim)
61
+ self.json['Relations'][self.name] = [elu_relation_name,[obj.name]]
62
+
63
+ class Identity(Stream, ToStream):
64
+ """
65
+ Implement the Identity relation function that simply returns the input vector x.
66
+
67
+ See also:
68
+ Official PyTorch Identity documentation:
69
+ `torch.nn.Identity <https://pytorch.org/docs/stable/generated/torch.nn.Identity.html>`_
70
+
71
+ :param obj: The relation stream.
72
+ :type obj: Stream
73
+
74
+ Example:
75
+ >>> x = Identity(x)
76
+ """
77
+ @enforce_types
78
+ def __init__(self, obj: Stream|Parameter|Constant|float|int) -> Stream:
79
+ obj = toStream(obj)
80
+ check(type(obj) is Stream, TypeError,
81
+ f"The type of {obj} is {type(obj)} and is not supported for Identity operation.")
82
+ super().__init__(identity_relation_name + str(Stream.count), obj.json, obj.dim)
83
+ self.json['Relations'][self.name] = [identity_relation_name, [obj.name]]
84
+
85
+
86
+ class Softmax(Stream, ToStream):
87
+ """
88
+ Implement the Softmax relation function.
89
+
90
+ See also:
91
+ Official PyTorch Softmax documentation:
92
+ `torch.nn.Softmax <https://pytorch.org/docs/stable/generated/torch.nn.Softmax.html>`_
93
+
94
+ :param obj: The relation stream.
95
+ :type obj: Stream
96
+
97
+ Example:
98
+ >>> x = Softmax(x)
99
+ """
100
+ @enforce_types
101
+ def __init__(self, obj:Stream|Parameter|Constant|float|int) -> Stream:
102
+ obj = toStream(obj)
103
+ check(type(obj) is Stream, TypeError,
104
+ f"The type of {obj} is {type(obj)} and is not supported for Softmax operation.")
105
+ super().__init__(softmax_relation_name + str(Stream.count), obj.json, obj.dim)
106
+ self.json['Relations'][self.name] = [softmax_relation_name, [obj.name]]
107
+
108
+ class Sigmoid(Stream, ToStream):
109
+ """
110
+ Implement the Sigmoid relation function.
111
+ The Sigmoid function is defined as:
112
+
113
+ .. math::
114
+ \sigma(x) = \frac{1}{1 + e^{-x}}
115
+
116
+ :param obj: The relation stream.
117
+ :type obj: Stream
118
+
119
+ Example:
120
+ >>> x = Sigmoid(x)
121
+ """
122
+ @enforce_types
123
+ def __init__(self, obj:Stream|Parameter|Constant|float|int) -> Stream:
124
+ obj = toStream(obj)
125
+ check(type(obj) is Stream, TypeError,
126
+ f"The type of {obj} is {type(obj)} and is not supported for {sigmoid_relation_name} operation.")
127
+ super().__init__(sigmoid_relation_name + str(Stream.count), obj.json, obj.dim)
128
+ self.json['Relations'][self.name] = [sigmoid_relation_name, [obj.name]]
129
+
130
+ class Relu_Layer(nn.Module):
131
+ """
132
+ :noindex:
133
+ """
134
+ def __init__(self,):
135
+ super(Relu_Layer, self).__init__()
136
+ def forward(self, x):
137
+ return torch.relu(x)
138
+
139
+ def createRelu(self, *input):
140
+ """
141
+ :noindex:
142
+ """
143
+ return Relu_Layer()
144
+
145
+
146
+ def createELU(self, *input):
147
+ """
148
+ :noindex:
149
+ """
150
+ return nn.ELU()
151
+
152
+ class Identity_Layer(nn.Module):
153
+ """
154
+ :noindex:
155
+ """
156
+ def __init__(self, *args):
157
+ super(Identity_Layer, self).__init__()
158
+
159
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
160
+ return x
161
+
162
+ def createIdentity(self, *input):
163
+ """
164
+ :noindex:
165
+ """
166
+ return Identity_Layer()
167
+
168
+
169
+ class Sigmoid_Layer(nn.Module):
170
+ """
171
+ :noindex:
172
+ """
173
+ def __init__(self,):
174
+ super(Sigmoid_Layer, self).__init__()
175
+ def forward(self, x):
176
+ return 1/(1+torch.exp(-x))
177
+
178
+ def createSigmoid(self, *input):
179
+ """
180
+ :noindex:
181
+ """
182
+ return Sigmoid_Layer()
183
+
184
+ def createSoftmax(self, *input):
185
+ """
186
+ :noindex:
187
+ """
188
+ return nn.Softmax(dim=-1)
189
+
190
+
191
+ setattr(Model, relu_relation_name, createRelu)
192
+ setattr(Model, elu_relation_name, createELU)
193
+ setattr(Model, sigmoid_relation_name, createSigmoid)
194
+
195
+ setattr(Model, identity_relation_name, createIdentity)
196
+
197
+ setattr(Model, softmax_relation_name, createSoftmax)
@@ -3,8 +3,8 @@ import torch
3
3
 
4
4
  from nnodely.relation import ToStream, Stream, toStream
5
5
  from nnodely.model import Model
6
- from nnodely.utils import check, merge
7
-
6
+ from nnodely.utils import check, merge, enforce_types
7
+ from nnodely.parameter import Parameter, Constant
8
8
 
9
9
  # Binary operators
10
10
  add_relation_name = 'Add'
@@ -15,10 +15,11 @@ pow_relation_name = 'Pow'
15
15
 
16
16
  # Unary operators
17
17
  neg_relation_name = 'Neg'
18
- # square_relation_name = 'Square'
19
18
 
20
19
  # Merge operator
21
20
  sum_relation_name = 'Sum'
21
+
22
+
22
23
  class Add(Stream, ToStream):
23
24
  """
24
25
  Implement the addition function between two tensors.
@@ -38,7 +39,8 @@ class Add(Stream, ToStream):
38
39
  or
39
40
  >>> add = relation1 + relation2
40
41
  """
41
- def __init__(self, obj1:Stream, obj2:Stream) -> Stream:
42
+ @enforce_types
43
+ def __init__(self, obj1:Stream|Parameter|Constant, obj2:Stream|Parameter|Constant|int|float) -> Stream:
42
44
  obj1,obj2 = toStream(obj1),toStream(obj2)
43
45
  check(type(obj1) is Stream,TypeError,
44
46
  f"The type of {obj1} is {type(obj1)} and is not supported for add operation.")
@@ -65,7 +67,8 @@ class Sub(Stream, ToStream):
65
67
  or
66
68
  >>> sub = relation1 - relation2
67
69
  """
68
- def __init__(self, obj1:Stream, obj2:Stream) -> Stream:
70
+ @enforce_types
71
+ def __init__(self, obj1:Stream|Parameter|Constant, obj2:Stream|Parameter|Constant|int|float) -> Stream:
69
72
  obj1, obj2 = toStream(obj1), toStream(obj2)
70
73
  check(type(obj1) is Stream,TypeError,
71
74
  f"The type of {obj1} is {type(obj1)} and is not supported for sub operation.")
@@ -91,7 +94,8 @@ class Mul(Stream, ToStream):
91
94
  or
92
95
  >>> mul = relation1 * relation2
93
96
  """
94
- def __init__(self, obj1:Stream, obj2:Stream) -> Stream:
97
+ @enforce_types
98
+ def __init__(self, obj1:Stream|Parameter|Constant, obj2:Stream|Parameter|Constant|int|float) -> Stream:
95
99
  obj1, obj2 = toStream(obj1), toStream(obj2)
96
100
  check(type(obj1) is Stream, TypeError,
97
101
  f"The type of {obj1} is {type(obj1)} and is not supported for mul operation.")
@@ -117,7 +121,8 @@ class Div(Stream, ToStream):
117
121
  or
118
122
  >>> div = relation1 / relation2
119
123
  """
120
- def __init__(self, obj1:Stream, obj2:Stream) -> Stream:
124
+ @enforce_types
125
+ def __init__(self, obj1:Stream|Parameter|Constant, obj2:Stream|Parameter|Constant|int|float) -> Stream:
121
126
  obj1, obj2 = toStream(obj1), toStream(obj2)
122
127
  check(type(obj1) is Stream, TypeError,
123
128
  f"The type of {obj1} is {type(obj1)} and is not supported for div operation.")
@@ -147,7 +152,8 @@ class Pow(Stream, ToStream):
147
152
  or
148
153
  >>> pow = relation1 ** relation2
149
154
  """
150
- def __init__(self, obj1:Stream, obj2:Stream) -> Stream:
155
+ @enforce_types
156
+ def __init__(self, obj1:Stream|Parameter|Constant, obj2:Stream|Parameter|Constant|int|float) -> Stream:
151
157
  obj1, obj2 = toStream(obj1), toStream(obj2)
152
158
  check(type(obj1) is Stream, TypeError,
153
159
  f"The type of {obj1} is {type(obj1)} and is not supported for exp operation.")
@@ -168,22 +174,17 @@ class Neg(Stream, ToStream):
168
174
  Example:
169
175
  >>> x = Neg(x)
170
176
  """
171
- def __init__(self, obj:Stream) -> Stream:
177
+ @enforce_types
178
+ def __init__(self, obj:Stream|Parameter|Constant) -> Stream:
172
179
  obj = toStream(obj)
173
180
  check(type(obj) is Stream, TypeError,
174
181
  f"The type of {obj} is {type(obj)} and is not supported for neg operation.")
175
182
  super().__init__(neg_relation_name+str(Stream.count), obj.json, obj.dim)
176
183
  self.json['Relations'][self.name] = [neg_relation_name,[obj.name]]
177
184
 
178
- # class Square(Stream, ToStream):
179
- # def __init__(self, obj:Stream) -> Stream:
180
- # check(type(obj) is Stream, TypeError,
181
- # f"The type of {obj.name} is {type(obj)} and is not supported for neg operation.")
182
- # super().__init__(square_relation_name+str(Stream.count), obj.json, obj.dim)
183
- # self.json['Relations'][self.name] = [square_relation_name,[obj.name]]
184
-
185
185
  class Sum(Stream, ToStream):
186
- def __init__(self, obj:Stream) -> Stream:
186
+ @enforce_types
187
+ def __init__(self, obj:Stream|Parameter|Constant) -> Stream:
187
188
  obj = toStream(obj)
188
189
  check(type(obj) is Stream, TypeError,
189
190
  f"The type of {obj} is {type(obj)} and is not supported for sum operation.")
@@ -196,7 +197,12 @@ class Add_Layer(nn.Module):
196
197
  super(Add_Layer, self).__init__()
197
198
 
198
199
  def forward(self, *inputs):
199
- return torch.add(inputs[0], inputs[1])
200
+ results = inputs[0]
201
+ for input in inputs[1:]:
202
+ results = results + input
203
+ return results
204
+ #return torch.add(inputs[0],inputs[1]))
205
+ #return torch.sum(torch.stack(list(inputs)),dim=0)
200
206
 
201
207
  def createAdd(name, *inputs):
202
208
  #: :noindex:
@@ -209,7 +215,12 @@ class Sub_Layer(nn.Module):
209
215
 
210
216
  def forward(self, *inputs):
211
217
  # Perform element-wise subtraction
212
- return torch.add(inputs[0],-inputs[1])
218
+ results = inputs[0]
219
+ for input in inputs[1:]:
220
+ results = results - input
221
+ return results
222
+ #return torch.add(inputs[0], -inputs[1])
223
+ #return torch.add(inputs[0],-torch.sum(torch.stack(list(inputs[1:])),dim=0))
213
224
 
214
225
  def createSub(self, *inputs):
215
226
  #: :noindex:
@@ -222,7 +233,13 @@ class Mul_Layer(nn.Module):
222
233
  super(Mul_Layer, self).__init__()
223
234
 
224
235
  def forward(self, *inputs):
225
- return inputs[0] * inputs[1]
236
+ results = inputs[0]
237
+ for input in inputs[1:]:
238
+ results = results * input
239
+ return results
240
+ #return inputs[0] * inputs[1]
241
+ #return torch.prod(torch.stack(list(inputs)),dim=0)
242
+
226
243
 
227
244
  def createMul(name, *inputs):
228
245
  #: :noindex:
@@ -234,7 +251,12 @@ class Div_Layer(nn.Module):
234
251
  super(Div_Layer, self).__init__()
235
252
 
236
253
  def forward(self, *inputs):
237
- return inputs[0] / inputs[1]
254
+ results = inputs[0]
255
+ for input in inputs[1:]:
256
+ results = results / input
257
+ return results
258
+ #return inputs[0] / inputs[1]
259
+ #return inputs[0] / torch.prod(torch.stack(list(inputs[1:])),dim=0)
238
260
 
239
261
  def createDiv(name, *inputs):
240
262
  #: :noindex:
@@ -276,6 +298,7 @@ def createSum(name, *inputs):
276
298
  #: :noindex:
277
299
  return Sum_Layer()
278
300
 
301
+
279
302
  setattr(Model, add_relation_name, createAdd)
280
303
  setattr(Model, sub_relation_name, createSub)
281
304
  setattr(Model, mul_relation_name, createMul)
@@ -283,7 +306,6 @@ setattr(Model, div_relation_name, createDiv)
283
306
  setattr(Model, pow_relation_name, createPow)
284
307
 
285
308
  setattr(Model, neg_relation_name, createNeg)
286
- # setattr(Model, square_relation_name, createSquare)
287
309
 
288
310
  setattr(Model, sum_relation_name, createSum)
289
311
 
@@ -0,0 +1,126 @@
1
+ import inspect
2
+
3
+ from nnodely.relation import NeuObj, Stream
4
+ from nnodely.utils import check, enforce_types
5
+
6
+ from nnodely.linear import Linear
7
+ from nnodely.part import Select, Concatenate
8
+ from nnodely.fuzzify import Fuzzify
9
+ from nnodely.parametricfunction import ParamFun
10
+ from nnodely.activation import Relu, ELU, Identity, Sigmoid
11
+ from nnodely.trigonometric import Sin, Cos, Tan, Tanh, Cosh, Sech
12
+ from nnodely.arithmetic import Add, Mul, Sub, Neg, Pow, Sum
13
+
14
+ equationlearner_relation_name = 'EquationLearner'
15
+ Available_functions = [Sin, Cos, Tan, Cosh, Tanh, Sech, Add, Mul, Sub, Neg, Pow, Sum, Concatenate, Relu, ELU, Identity, Sigmoid]
16
+ Initialized_functions = [ParamFun, Fuzzify]
17
+
18
+ class EquationLearner(NeuObj):
19
+ """
20
+ Represents a nnodely implementation of the Task-Parametrized Equation Learner block.
21
+
22
+ See also:
23
+ Task-Parametrized Equation Learner official paper:
24
+ `Equation Learner <https://www.sciencedirect.com/science/article/pii/S0921889022001981>`_
25
+
26
+ Parameters
27
+ ----------
28
+ functions : list
29
+ A list of callable functions to be used as activation functions.
30
+ linear_in : Linear, optional
31
+ A Linear layer to process the input before applying the activation functions. If not provided a random initialized linear layer will be used instead.
32
+ linear_out : Linear, optional
33
+ A Linear layer to process the output after applying the activation functions. Can be omitted.
34
+
35
+ Attributes
36
+ ----------
37
+ relation_name : str
38
+ The name of the relation.
39
+ linear_in : Linear or None
40
+ The Linear layer to process the input.
41
+ linear_out : Linear or None
42
+ The Linear layer to process the output.
43
+ functions : list
44
+ The list of activation functions.
45
+ func_parameters : dict
46
+ A dictionary mapping function indices to the number of parameters they require.
47
+ n_activations : int
48
+ The total number of activation functions.
49
+
50
+ Examples
51
+ --------
52
+
53
+ Example - basic usage:
54
+ >>> x = Input('x')
55
+
56
+ >>> equation_learner = EquationLearner(functions=[Tan, Sin, Cos])
57
+ >>> out = Output('out',equation_learner(x.last()))
58
+
59
+ Example - passing a linear layer:
60
+ >>> x = Input('x')
61
+
62
+ >>> linear_layer = Linear(output_dimension=3, W_init=init_constant, W_init_params={'value':0})
63
+ >>> equation_learner = EquationLearner(functions=[Tan, Sin, Cos], linear_in=linear_layer)
64
+
65
+ >>> out = Output('out',equation_learner(x.last()))
66
+
67
+ Example - passing a custom parametric function and multiple inputs:
68
+ >>> x = Input('x')
69
+ >>> F = Input('F')
70
+
71
+ >>> def myFun(K1,p1):
72
+ return K1*p1
73
+
74
+ >>> K = Parameter('k', dimensions = 1, sw = 1,values=[[2.0]])
75
+ >>> parfun = ParamFun(myFun, parameters = [K] )
76
+
77
+ >>> equation_learner = EquationLearner([parfun])
78
+ >>> out = Output('out',equation_learner((x.last(),F.last())))
79
+ """
80
+ @enforce_types
81
+ def __init__(self, functions:list, linear_in:Linear|None = None, linear_out:Linear|None = None) -> Stream:
82
+ self.relation_name = equationlearner_relation_name
83
+ self.linear_in = linear_in
84
+ self.linear_out = linear_out
85
+
86
+ # input parameters
87
+ self.functions = functions
88
+ super().__init__(equationlearner_relation_name + str(NeuObj.count))
89
+
90
+ self.func_parameters = {}
91
+ for func_idx, func in enumerate(self.functions):
92
+ check(callable(func), TypeError, 'The activation functions must be callable')
93
+ if type(func) in Initialized_functions:
94
+ if type(func) == ParamFun:
95
+ funinfo = inspect.getfullargspec(func.param_fun)
96
+ num_args = len(funinfo.args) - len(func.parameters) if func.parameters else len(funinfo.args)
97
+ elif type(func) == Fuzzify:
98
+ init_signature = inspect.signature(func.__call__)
99
+ parameters = list(init_signature.parameters.values())
100
+ num_args = len([param for param in parameters if param.name != "self"])
101
+ else:
102
+ check(func in Available_functions, ValueError, f'The function {func} is not available for the EquationLearner operation')
103
+ init_signature = inspect.signature(func.__init__)
104
+ parameters = list(init_signature.parameters.values())
105
+ num_args = len([param for param in parameters if param.name != "self"])
106
+ self.func_parameters[func_idx] = num_args
107
+
108
+ self.n_activations = sum(self.func_parameters.values())
109
+ check(self.n_activations > 0, ValueError, 'At least one activation function must be provided')
110
+
111
+ def __call__(self, inputs):
112
+ if type(inputs) is not tuple:
113
+ inputs = (inputs,)
114
+ check(len(set([x.dim['sw'] if 'sw' in x.dim.keys() else x.dim['tw'] for x in inputs])) == 1, ValueError, 'All inputs must have the same time dimension')
115
+ concatenated_input = inputs[0]
116
+ for inp in inputs[1:]:
117
+ concatenated_input = Concatenate(concatenated_input, inp)
118
+ linear_layer = self.linear_in(concatenated_input) if self.linear_in else Linear(output_dimension=self.n_activations, b=True)(concatenated_input)
119
+ idx, out = 0, None
120
+ for func_idx, func in enumerate(self.functions):
121
+ arguments = [Select(linear_layer,idx+arg_idx) for arg_idx in range(self.func_parameters[func_idx])]
122
+ idx += self.func_parameters[func_idx]
123
+ out = func(*arguments) if func_idx == 0 else Concatenate(out, func(*arguments))
124
+ if self.linear_out:
125
+ out = self.linear_out(out)
126
+ return out