nnodely 0.14.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. nnodely-0.14.0/LICENSE +21 -0
  2. nnodely-0.14.0/PKG-INFO +401 -0
  3. nnodely-0.14.0/README.md +360 -0
  4. nnodely-0.14.0/imgs/logo_info.png +0 -0
  5. nnodely-0.14.0/imgs/logo_white_info.png +0 -0
  6. nnodely-0.14.0/imgs/massspringdamper.png +0 -0
  7. nnodely-0.14.0/mplplots/__init__.py +0 -0
  8. nnodely-0.14.0/mplplots/plots.py +131 -0
  9. nnodely-0.14.0/nnodely/__init__.py +42 -0
  10. nnodely-0.14.0/nnodely/activation.py +85 -0
  11. nnodely-0.14.0/nnodely/arithmetic.py +203 -0
  12. nnodely-0.14.0/nnodely/earlystopping.py +81 -0
  13. nnodely-0.14.0/nnodely/exporter/__init__.py +3 -0
  14. nnodely-0.14.0/nnodely/exporter/export.py +275 -0
  15. nnodely-0.14.0/nnodely/exporter/exporter.py +45 -0
  16. nnodely-0.14.0/nnodely/exporter/reporter.py +48 -0
  17. nnodely-0.14.0/nnodely/exporter/standardexporter.py +108 -0
  18. nnodely-0.14.0/nnodely/fir.py +150 -0
  19. nnodely-0.14.0/nnodely/fuzzify.py +221 -0
  20. nnodely-0.14.0/nnodely/initializer.py +31 -0
  21. nnodely-0.14.0/nnodely/input.py +131 -0
  22. nnodely-0.14.0/nnodely/linear.py +130 -0
  23. nnodely-0.14.0/nnodely/localmodel.py +82 -0
  24. nnodely-0.14.0/nnodely/logger.py +94 -0
  25. nnodely-0.14.0/nnodely/loss.py +30 -0
  26. nnodely-0.14.0/nnodely/model.py +263 -0
  27. nnodely-0.14.0/nnodely/modeldef.py +205 -0
  28. nnodely-0.14.0/nnodely/nnodely.py +1295 -0
  29. nnodely-0.14.0/nnodely/optimizer.py +91 -0
  30. nnodely-0.14.0/nnodely/output.py +23 -0
  31. nnodely-0.14.0/nnodely/parameter.py +103 -0
  32. nnodely-0.14.0/nnodely/parametricfunction.py +329 -0
  33. nnodely-0.14.0/nnodely/part.py +201 -0
  34. nnodely-0.14.0/nnodely/relation.py +149 -0
  35. nnodely-0.14.0/nnodely/trigonometric.py +67 -0
  36. nnodely-0.14.0/nnodely/utils.py +101 -0
  37. nnodely-0.14.0/nnodely/visualizer/__init__.py +4 -0
  38. nnodely-0.14.0/nnodely/visualizer/dynamicmpl/functionplot.py +34 -0
  39. nnodely-0.14.0/nnodely/visualizer/dynamicmpl/fuzzyplot.py +31 -0
  40. nnodely-0.14.0/nnodely/visualizer/dynamicmpl/resultsplot.py +28 -0
  41. nnodely-0.14.0/nnodely/visualizer/dynamicmpl/trainingplot.py +46 -0
  42. nnodely-0.14.0/nnodely/visualizer/mplnotebookvisualizer.py +66 -0
  43. nnodely-0.14.0/nnodely/visualizer/mplvisualizer.py +215 -0
  44. nnodely-0.14.0/nnodely/visualizer/textvisualizer.py +320 -0
  45. nnodely-0.14.0/nnodely/visualizer/visualizer.py +84 -0
  46. nnodely-0.14.0/nnodely.egg-info/PKG-INFO +401 -0
  47. nnodely-0.14.0/nnodely.egg-info/SOURCES.txt +71 -0
  48. nnodely-0.14.0/nnodely.egg-info/dependency_links.txt +1 -0
  49. nnodely-0.14.0/nnodely.egg-info/requires.txt +6 -0
  50. nnodely-0.14.0/nnodely.egg-info/top_level.txt +2 -0
  51. nnodely-0.14.0/pyproject.toml +41 -0
  52. nnodely-0.14.0/setup.cfg +4 -0
  53. nnodely-0.14.0/setup.py +27 -0
  54. nnodely-0.14.0/tests/__init__.py +0 -0
  55. nnodely-0.14.0/tests/data/testdata.dta +19 -0
  56. nnodely-0.14.0/tests/test_data/testdata.dta +18 -0
  57. nnodely-0.14.0/tests/test_dataset.py +549 -0
  58. nnodely-0.14.0/tests/test_export.py +321 -0
  59. nnodely-0.14.0/tests/test_input_dimensions.py +387 -0
  60. nnodely-0.14.0/tests/test_json.py +365 -0
  61. nnodely-0.14.0/tests/test_losses.py +171 -0
  62. nnodely-0.14.0/tests/test_model_predict.py +1231 -0
  63. nnodely-0.14.0/tests/test_model_predict_recurrent.py +1227 -0
  64. nnodely-0.14.0/tests/test_network_element.py +203 -0
  65. nnodely-0.14.0/tests/test_parameters_of_train.py +1143 -0
  66. nnodely-0.14.0/tests/test_results.py +316 -0
  67. nnodely-0.14.0/tests/test_train.py +225 -0
  68. nnodely-0.14.0/tests/test_train_recurrent.py +1440 -0
  69. nnodely-0.14.0/tests/test_utils.py +29 -0
  70. nnodely-0.14.0/tests/test_visualizer.py +153 -0
  71. nnodely-0.14.0/tests/val_data/testdata.dta +16 -0
  72. nnodely-0.14.0/tests/vector_data/vector_1.dta +16 -0
  73. nnodely-0.14.0/tests/vector_data/vector_2.dta +17 -0
nnodely-0.14.0/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2024 Gastone Pietro Rosati Papini
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,401 @@
1
+ Metadata-Version: 2.1
2
+ Name: nnodely
3
+ Version: 0.14.0
4
+ Summary: Model-structured neural network framework for the modeling and control of physical systems
5
+ Author-email: Gastone Pietro Rosati Papini <tonegas@gmail.com>
6
+ License: MIT License
7
+
8
+ Copyright (c) 2024 Gastone Pietro Rosati Papini
9
+
10
+ Permission is hereby granted, free of charge, to any person obtaining a copy
11
+ of this software and associated documentation files (the "Software"), to deal
12
+ in the Software without restriction, including without limitation the rights
13
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14
+ copies of the Software, and to permit persons to whom the Software is
15
+ furnished to do so, subject to the following conditions:
16
+
17
+ The above copyright notice and this permission notice shall be included in all
18
+ copies or substantial portions of the Software.
19
+
20
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26
+ SOFTWARE.
27
+
28
+ Project-URL: Homepage, https://github.com/tonegas/nnodely
29
+ Classifier: Programming Language :: Python :: 3
30
+ Classifier: License :: OSI Approved :: MIT License
31
+ Classifier: Operating System :: OS Independent
32
+ Requires-Python: >=3.10
33
+ Description-Content-Type: text/markdown
34
+ License-File: LICENSE
35
+ Requires-Dist: numpy==2.0.0
36
+ Requires-Dist: onnx==1.16.2
37
+ Requires-Dist: pandas==2.2.2
38
+ Requires-Dist: torch==2.3.1
39
+ Requires-Dist: reportlab==4.2.5
40
+ Requires-Dist: matplotlib==3.9.0
41
+
42
+ <p align="center">
43
+ <img src="https://raw.githubusercontent.com/tonegas/nnodely/main/imgs/logo_white_info.png" alt="logo" >
44
+ </p>
45
+
46
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
47
+ [![Coverage Status](https://coveralls.io/repos/github/tonegas/nnodely/badge.svg?branch=main)](https://coveralls.io/github/tonegas/nnodely?branch=main)
48
+
49
+
50
+ <a name="readme-top"></a>
51
+ ### Model-structured neural network framework for the modeling and control of physical systems
52
+
53
+ _Model-Structured neural networks_ (MSNNs) are a new neural networks concept.
54
+ These networks base their structure on mechanical and control theory laws.
55
+
56
+ The framework's goal is to allow the users fast modeling and control of a mechanical system such as an autonomous vehicle, an industrial robot, a walking robot, a flying drone.
57
+
58
+ Below is the workflow that the framework follows.
59
+
60
+ Using a conceptual representation of your mechanical system the framework generates the structured neural network of model of mechanical device considered.
61
+ Providing suitable experimental data, the framework will realize an effective training of the neural models by appropriately choosing all the hyper-parameters.
62
+ The framework will allow the user to synthesize and train a structured neural network that will be used as a control system in a few simple steps and without the need to perform new experiments.
63
+ The realized neural controller will be exported using C language or ONNX, and it will be ready to use.
64
+
65
+ <!-- TABLE OF CONTENTS -->
66
+ <details>
67
+ <summary>Table of Contents</summary>
68
+ <ol>
69
+ <li>
70
+ <a href="#settingstarted">Getting Started</a>
71
+ </li>
72
+ <li>
73
+ <a href="#basicfunctionalities">Basic Functionalities</a>
74
+ <ul>
75
+ <li><a href="#structuredneuralmodel">Build the structured neural model</a></li>
76
+ <li><a href="#neuralizemodel">Neuralize the structured neural model</a></li>
77
+ <li><a href="#loaddataset">Load the dataset</a></li>
78
+ <li><a href="#trainmodel">Train the structured neural network</a></li>
79
+ <li><a href="#testmodel">Test the structured neural model</a></li>
80
+ </ul>
81
+ </li>
82
+ <li>
83
+ <a href="#fonlderstructure">Structure of the Folders</a>
84
+ <ul>
85
+ <li><a href="#nnodelyfolder">nnodely folder</a></li>
86
+ <li><a href="#testsfolder">tests folder</a></li>
87
+ <li><a href="#examplesfolder">examples folder</a></li>
88
+ </ul>
89
+ </li>
90
+ <li>
91
+ <a href="#shape">Overview on Signal Shape</a>
92
+ <ul>
93
+ <li><a href="#inoutshape">Input and output shape from the structured neural model</a></li>
94
+ <li><a href="#elementwiseshape">Elementwise Arithmetic, Activation, Trigonometric</a></li>
95
+ <li><a href="#firshape">Fir</a></li>
96
+ <li><a href="#linearshape">Linear</a></li>
97
+ <li><a href="#fuzzyshape">Fuzzy</a></li>
98
+ <li><a href="#partshape">Part and Select</a></li>
99
+ <li><a href="#timepartshape">TimePart, SimplePart, SampleSelect</a></li>
100
+ <li><a href="#localmodelshape">LocalModel</a></li>
101
+ <li><a href="#parametersshape">Parameters</a></li>
102
+ <li><a href="#paramfunshape">Parametric Function</a></li>
103
+ </ul>
104
+ </li>
105
+ <li>
106
+ <a href="#license">License</a>
107
+ </li>
108
+ </ol>
109
+ </details>
110
+
111
+ <!-- GETTING STARTED -->
112
+ <a name="settingstarted"></a>
113
+ ## Getting Started
114
+ ### Installation
115
+ You can install the nnodely framework from PyPI via:
116
+ ```sh
117
+ pip install nnodely
118
+ ```
119
+
120
+ ### Prerequisites
121
+ You can install the dependencies of the nnodely framework from PyPI via:
122
+ ```sh
123
+ pip install -r requirements.txt
124
+ ```
125
+
126
+ <p align="right">(<a href="#readme-top">back to top</a>)</p>
127
+
128
+ <a name="basicfunctionalities"></a>
129
+ ## Basic Functionalities
130
+ <a name="structuredneuralmodel"></a>
131
+ ### Build the structured neural model
132
+
133
+ The structured neural model is defined by a list of inputs by a list of outputs and by a list of relationships that link the inputs to the outputs.
134
+
135
+ Let's assume we want to model one of the best-known linear mechanical systems, the mass-spring-damper system.
136
+
137
+ <p align="center">
138
+ <img src="https://raw.githubusercontent.com/tonegas/nnodely/main/imgs/massspringdamper.png" width="250" alt="linearsys" >
139
+ </p>
140
+
141
+ The system is defined as the following equation:
142
+ ```math
143
+ M \ddot x = - k x - c \dot x + F
144
+ ```
145
+
146
+ Suppose we want to estimate the value of the future position of the mass given the initial position and the external force.
147
+
148
+ In the nnodely framework we can build an estimator in this form:
149
+ ```python
150
+ x = Input('x')
151
+ F = Input('F')
152
+ x_z_est = Output('x_z_est', Fir(x.tw(1))+Fir(F.last()))
153
+ ```
154
+
155
+ The first thing we define the input variable of the system.
156
+ Input variabiles can be created using the `Input` function.
157
+ In our system we have two inputs the position of the mass, `x`, and the external force, `F`, exerted on the mass.
158
+ The `Output` function is used to define an output of our model.
159
+ The `Output` gets two inputs, the first is the name of the output and the second is the structure of the estimator.
160
+
161
+ Let's explain some of the functions used:
162
+ 1. The `tw(...)` function is used to extract a time window from a signal.
163
+ In particular we extract a time window of 1 second.
164
+ 2. The `last()` function that is used to get the last force applied to the mass.
165
+ 3. The `Fir(...)` function to build an FIR filter with the tunable parameters on our input variable.
166
+
167
+ So we are creating an estimator for the variable `x` at the instant following the observation (the future position of the mass) by building an
168
+ observer that has a mathematical structure equal to the one shown below:
169
+ ```math
170
+ x[1] = \sum_{k=0}^{N_x-1} x[-k]\cdot h_x[(N_x-1)-k] + F[0]\cdot h_F
171
+ ```
172
+ Where the variables $N_x$, and $h_f$ also the values of the vectors $h_x$ are still unknowns.
173
+ Regarding $N_x$, we know that the window lasts one second but we do not know how many samples it corresponds to and this depends on the discretization interval.
174
+ The formulation above is equivalent to the formulation of the discrete time response of the system
175
+ if we choose $N_x = 3$ and $h_x$ equal to the characteristic polynomial and $h_f = T^2/M$ (with $T$ sample time).
176
+ Our formulation is more general and can take into account the noise of the measured variable using a bigger time window.
177
+ The estimator can also be seen as the composition of the force contributions due to the position and velocity of the mass plus the contribution of external forces.
178
+
179
+ <a name="neuralizemodel"></a>
180
+ ### Neuralize the structured neural model
181
+ Let's now try to train our observer using the data we have.
182
+ We perform:
183
+ ```python
184
+ mass_spring_damper = Modely()
185
+ mass_spring_damper.addModel('x_z_est', x_z_est)
186
+ mass_spring_damper.addMinimize('next-pos', x.z(-1), x_z_est, 'mse')
187
+ mass_spring_damper.neuralizeModel(0.2)
188
+ ```
189
+ Let's create a **nnodely** object, and add one output to the network using the `addModel` function.
190
+ This function is needed for create an output on the model. In this example it is not mandatory because the same output is added also to the `minimizeError` function.
191
+ In order to train our model/estimator the function `addMinimize` is used to add a loss function to the list of losses.
192
+ This function takes:
193
+ 1. The name of the error, it is presented in the results and during the training.
194
+ 2. The second and third inputs are the variable that will be minimized, the order is not important.
195
+ 3. The minimization function used, in this case 'mse'.
196
+ In the function `addMinimize` is used the `z(-1)` function. This function get from the dataset the future value of a variable
197
+ (in our case the position of the mass), the next instant, using the **Z-transform** notation, `z(-1)` is equivalent to `next()` function.
198
+ The function `z(...)` method can be used on an `Input` variable to get a time shifted value.
199
+
200
+ The obective of the minimization is to reduce the error between
201
+ `x_z` that represent one sample of the next position of the mass get from the dataset and
202
+ `x_z_est` is one sample of the output of our estimator.
203
+ The matematical formulation is as follow:
204
+ ```math
205
+ \frac{1}{n} \sum_{i=0}^{n} (x_{z_i} - x_{{z\_est}_i})^2
206
+ ```
207
+ where `n` represents the number of sample in the dataset.
208
+
209
+ Finally the function `neuralizeModel` is used to perform the discretization. The parameter of the function is the sampling time and it will be chosen based on the data we have available.
210
+
211
+ <a name="loaddataset"></a>
212
+ ### Load the dataset
213
+
214
+ ```python
215
+ data_struct = ['time','x','dx','F']
216
+ data_folder = './tutorials/datasets/mass-spring-damper/data/'
217
+ mass_spring_damper.loadData(name='mass_spring_dataset', source=data_folder, format=data_struct, delimiter=';')
218
+ ```
219
+ Finally, the dataset is loaded. **nnodely** loads all the files that are in a source folder.
220
+
221
+ <a name="trainmodel"></a>
222
+ ### Train the structured neural network
223
+ Using the dataset created the training is performed on the model.
224
+
225
+ ```python
226
+ mass_spring_damper.trainModel()
227
+ ```
228
+
229
+ <a name="testmodel"></a>
230
+ ### Test the structured neural model
231
+ In order to test the results we need to create a input, in this case is defined by:
232
+ 1. `x` with 5 sample because the sample time is 0.2 and the window of `x`is 1 second.
233
+ 2. `F` is one sample because only the last sample is needed.
234
+
235
+ ```python
236
+ sample = {'F':[0.5], 'x':[0.25, 0.26, 0.27, 0.28, 0.29]}
237
+ results = mass_spring_damper(sample)
238
+ print(results)
239
+ ```
240
+ The result variable is structured as follow:
241
+ ```shell
242
+ >> {'x_z_est':[0.4]}
243
+ ```
244
+ The value represents the output of our estimator (means the next position of the mass) and is close as possible to `x.next()` get from the dataset.
245
+ The network can be tested also using a bigger time window
246
+ ```python
247
+ sample = {'F':[0.5, 0.6], 'x':[0.25, 0.26, 0.27, 0.28, 0.29, 0.30]}
248
+ results = mass_spring_damper(sample)
249
+ print(results)
250
+ ```
251
+ The value of `x` is build using a moving time window.
252
+ The result variable is structured as follow:
253
+ ```shell
254
+ >> {'x_z_est':[0.4, 0.42]}
255
+ ```
256
+ The same output can be generated calling the network using the flag `sampled=True` in this way:
257
+ ```python
258
+ sample = {'F':[[0.5],[0.6]], 'x':[[0.25, 0.26, 0.27, 0.28, 0.29],[0.26, 0.27, 0.28, 0.29, 0.30]]}
259
+ results = mass_spring_damper(sample,sampled=True)
260
+ print(results)
261
+ ```
262
+
263
+ <p align="right">(<a href="#readme-top">back to top</a>)</p>
264
+
265
+ <a name="fonlderstructure"></a>
266
+ ## Structure of the Repository
267
+
268
+ <a name="nnodelyfolder"></a>
269
+ ### nnodely folder
270
+ This folder contains all the nnodely library files, the main files are the following:
271
+ 1. __activation.py__ this file contains all the activation functions.
272
+ 2. __arithmetic.py__ this file contains the aritmetic functions as: +, -, /, *., ^2
273
+ 3. __fir.py__ this file contains the finite inpulse response filter function. It is a linear operation without bias on the second dimension.
274
+ 4. __fuzzify.py__ contains the operation for the fuzzification of a variable, commonly used in the local model as activation function.
275
+ 5. __input.py__ contains the Input class used for create an input for the network.
276
+ 6. __linear.py__ this file contains the linear function. Typical Linear operation `W*x+b` operated on the third dimension.
277
+ 7. __localmodel.py__ this file contains the logic for build a local model.
278
+ 8. __ouptut.py__ contains the Output class used for create an output for the network.
279
+ 9. __parameter.py__ contains the logic for create a generic parameters
280
+ 10. __parametricfunction.py__ are the user custom function. The function can use the pytorch syntax.
281
+ 11. __part.py__ are used for selecting part of the data.
282
+ 12. __trigonometric.py__ this file contains all the trigonometric functions.
283
+ 13. __nnodely.py__ the main file for create the structured network
284
+ 14. __model.py__ containts the pytorch template model for the structured network
285
+
286
+ <a name="testsfolder"></a>
287
+ ### Tests Folder
288
+ This folder contains the unittest of the library in particular each file test a specific functionality.
289
+
290
+ <a name="examplesfolder"></a>
291
+ ### Examples of usage Folder
292
+ The files in the examples folder are a collection of the functionality of the library.
293
+ Each file present in deep a specific functionality or function of the framework.
294
+ This folder is useful to understand the flexibility and capability of the framework.
295
+
296
+ <p align="right">(<a href="#readme-top">back to top</a>)</p>
297
+
298
+ <a name="shape"></a>
299
+ ## Overview on signal shape
300
+ In this section is explained the shape of the input/output of the network.
301
+
302
+ <a name="inoutshape"></a>
303
+ ### Input and output shape from the structured neural model
304
+ The structured network can be called in two way:
305
+ 1. The shape of the inputs not sampled are [total time window size, dim]
306
+ Sampled inputs are reconstructed as soon as the maximum size of the time window is known.
307
+ 'dim' represents the size of the input if is not 1 means that the input is a vector.
308
+ 2. The shape of the sampled inputs are [number of samples = batch, size of time window for a sample, dim]
309
+ In the example presented before in the first call the shape for `x` are [1,5,1] for `F` are [1,1,1]
310
+ in the second call for `x` are [2,5,1] for `F` are [2,1,1]. In both cases the last dimensions is ignored as the input are scalar.
311
+ The output of the structured neural model
312
+ The outputs are defined in this way for the different cases:
313
+ 1. if the shape is [batch, 1, 1] the final two dimensions are collapsed result [batch]
314
+ 2. if the shape is [batch, window, 1] the last dimension is collapsed result [batch, window]
315
+ 3. if the shape is [batch, window, dim] the output is equal to [batch, window, dim]
316
+ 4. if the shape is [batch, 1, dim] the output is equal to [batch, 1, dim]
317
+ In the example `x_z_est` has the shape of [1] in the first call and [2] because the the window and the dim were equal to 1.
318
+
319
+ <a name="elementwiseshape"></a>
320
+ ### Shape of elementwise Arithmetic, Activation, Trigonometric
321
+ The shape and time windows remain unchanged, for the binary operators shape must be equal.
322
+ ```
323
+ input shape = [batch, window, dim] -> output shape = [batch, window, dim]
324
+ ```
325
+
326
+ <a name="firshape"></a>
327
+ ### Shape of Fir input/output
328
+ The input must be scalar, the fir compress di time dimension (window) that goes to 1. A vector input is not allowed.
329
+ The output dimension of the Fir is moved on the last dimension for create a vector output.
330
+ ```
331
+ input shape = [batch, window, 1] -> output shape = [batch, 1, output dimension of Fir = output_dimension]
332
+ ```
333
+
334
+ <a name="linearshape"></a>
335
+ ### Shape of Linear input/output
336
+ The window remains unchanged and the output dimension is user defined.
337
+ ```
338
+ input shape = [batch, window, dimension] -> output shape = [batch, window, output dimension of Linear = output_dimension]
339
+ ```
340
+
341
+ <a name="fuzzyshape"></a>
342
+ ### Shape of Fuzzy input/output
343
+ The function fuzzify the input and creates a vector for output.
344
+ The window remains unchanged, input must be scalar. Vector input are not allowed.
345
+ ```
346
+ input shape = [batch, window, 1] -> output shape = [batch, window, number of centers of Fuzzy = len(centers)]
347
+ ```
348
+
349
+ <a name="partshape"></a>
350
+ ### Shape of Part and Select input/output
351
+ Part selects a slice of the vector input, the input must be a vector.
352
+ Select operation the dimension becomes 1, the input must be a vector.
353
+ For both operation if there is a time component it remains unchanged.
354
+ ```
355
+ Part input shape = [batch, window, dimension] -> output shape = [batch, window, selected dimension = [j-i]]
356
+ Select input shape = [batch, window, dimension] -> output shape = [batch, window, 1]
357
+ ```
358
+
359
+ <a name="timepartshape"></a>
360
+ ### Shape of TimePart, SimplePart, SampleSelect input/output
361
+ The TimePart selects a time window from the signal (works like timewindow `tw([i,j])` but in this the i,j are absolute).
362
+ The SamplePart selects a list of samples from the signal (works like samplewindow `sw([i,j])` but in this the i,j are absolute).
363
+ The SampleSelect selects a specific index from the signal (works like zeta operation `z(index)` but in this the index are absolute).
364
+ For all the operation the shape remains unchanged.
365
+ ```
366
+ SamplePart input shape = [batch, window, dimension] -> output shape = [batch, selected sample window = [j-i], dimension]
367
+ SampleSelect input shape = [batch, window, dimension] -> output shape = [batch, 1, dimension]
368
+ TimePart input shape = [batch, window, dimension] -> output shape = [batch, selected time window = [j-i]/sample_time, dimension]
369
+ ```
370
+
371
+ <a name="localmodelshape"></a>
372
+ ### Shape of LocalModel input/output
373
+ The local model has two main inputs, activation functions and inputs.
374
+ Activation functions have shape of the fuzzy
375
+ ```
376
+ input shape = [batch, window, 1] -> output shape = [batch, window, number of centers of Fuzzy = len(centers)]
377
+ ```
378
+ Inputs go through input function and output function.
379
+ The input shape of the input function can be anything as long as the output shape of the input function have the following dimensions
380
+ `[batch, window, 1]` so input functions for example cannot be a Fir with output_dimension different from 1.
381
+ The input shape of the output function is `[batch, window, 1]` while the shape of the output of the output functions can be any
382
+
383
+ <a name="parametersshape"></a>
384
+ ### Shape of Parameters input/output
385
+ Parameter shape are defined as follows `[window = sw or tw/sample_time, dim]` the dimensions can be defined as a tuple and are appended to window
386
+ When the time dimension is not defined it is configured to 1
387
+
388
+ <a name="paramfunshape"></a>
389
+ ### Shape of Parametric Function input/output
390
+ The Parametric functions take inputs and parameters as inputs
391
+ Parameter dimensions are the same as defined by the parameters if the dimensions are not defined they will be equal to `[window = 1,dim = 1]`
392
+ Dimensions of the inputs inside the parametric function are the same as those managed within the Pytorch framework equal to `[batch, window, dim]`
393
+ Output dimensions must follow the same convention `[batch, window, dim]`
394
+
395
+ <p align="right">(<a href="#readme-top">back to top</a>)</p>
396
+
397
+ <a name="license"></a>
398
+ ## License
399
+ This project is released under the license [License: MIT](https://opensource.org/licenses/MIT).
400
+
401
+ <p align="right">(<a href="#readme-top">back to top</a>)</p>