pyanfis 0.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyanfis-0.1/LICENSE +19 -0
- pyanfis-0.1/PKG-INFO +30 -0
- pyanfis-0.1/README.md +17 -0
- pyanfis-0.1/pyproject.toml +17 -0
- pyanfis-0.1/setup.cfg +4 -0
- pyanfis-0.1/src/pyanfis/__init__.py +3 -0
- pyanfis-0.1/src/pyanfis/algorithms/LSTSQ.py +42 -0
- pyanfis-0.1/src/pyanfis/algorithms/RLSE.py +37 -0
- pyanfis-0.1/src/pyanfis/algorithms/__init__.py +4 -0
- pyanfis-0.1/src/pyanfis/anfis.py +416 -0
- pyanfis-0.1/src/pyanfis/antecedents/__init__.py +3 -0
- pyanfis-0.1/src/pyanfis/antecedents/antecedents.py +50 -0
- pyanfis-0.1/src/pyanfis/consequents/__init__.py +7 -0
- pyanfis-0.1/src/pyanfis/consequents/consequents.py +47 -0
- pyanfis-0.1/src/pyanfis/consequents/types/algorithm.py +31 -0
- pyanfis-0.1/src/pyanfis/consequents/types/lee.py +32 -0
- pyanfis-0.1/src/pyanfis/consequents/types/takagi_sugeno.py +47 -0
- pyanfis-0.1/src/pyanfis/consequents/types/tsukamoto.py +54 -0
- pyanfis-0.1/src/pyanfis/functions/__init__.py +9 -0
- pyanfis-0.1/src/pyanfis/functions/bell.py +37 -0
- pyanfis-0.1/src/pyanfis/functions/gauss.py +48 -0
- pyanfis-0.1/src/pyanfis/functions/heaviside.py +34 -0
- pyanfis-0.1/src/pyanfis/functions/linear_s.py +41 -0
- pyanfis-0.1/src/pyanfis/functions/linear_z.py +42 -0
- pyanfis-0.1/src/pyanfis/functions/sigmoid.py +34 -0
- pyanfis-0.1/src/pyanfis/functions/universe.py +86 -0
- pyanfis-0.1/src/pyanfis/functions/utils.py +9 -0
- pyanfis-0.1/src/pyanfis/optimizers/__init__.py +3 -0
- pyanfis-0.1/src/pyanfis/optimizers/fuzzySGD.py +196 -0
- pyanfis-0.1/src/pyanfis/rules/__init__.py +3 -0
- pyanfis-0.1/src/pyanfis/rules/intersection_algorithms.py +82 -0
- pyanfis-0.1/src/pyanfis/rules/relation_algorithms.py +40 -0
- pyanfis-0.1/src/pyanfis/rules/rules.py +62 -0
- pyanfis-0.1/src/pyanfis.egg-info/PKG-INFO +30 -0
- pyanfis-0.1/src/pyanfis.egg-info/SOURCES.txt +35 -0
- pyanfis-0.1/src/pyanfis.egg-info/dependency_links.txt +1 -0
- pyanfis-0.1/src/pyanfis.egg-info/top_level.txt +1 -0
pyanfis-0.1/LICENSE
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
Copyright (c) 2024 The Python Packaging Authority
|
|
2
|
+
|
|
3
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
4
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
5
|
+
in the Software without restriction, including without limitation the rights
|
|
6
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
7
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
8
|
+
furnished to do so, subject to the following conditions:
|
|
9
|
+
|
|
10
|
+
The above copyright notice and this permission notice shall be included in all
|
|
11
|
+
copies or substantial portions of the Software.
|
|
12
|
+
|
|
13
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
14
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
15
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
16
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
17
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
18
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
19
|
+
SOFTWARE.
|
pyanfis-0.1/PKG-INFO
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: pyanfis
|
|
3
|
+
Version: 0.1
|
|
4
|
+
Summary: An pytorch-based package to use ANFIS for AI
|
|
5
|
+
Author-email: Vicente Feced Mas <vicente.feced.mas@gmail.com>
|
|
6
|
+
Project-URL: Homepage, https://github.com/VicenteFecedMas/pyanfis
|
|
7
|
+
Classifier: Programming Language :: Python :: 3
|
|
8
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
9
|
+
Classifier: Operating System :: OS Independent
|
|
10
|
+
Requires-Python: >=3.10
|
|
11
|
+
Description-Content-Type: text/markdown
|
|
12
|
+
License-File: LICENSE
|
|
13
|
+
|
|
14
|
+
# pyANFIS
|
|
15
|
+
|
|
16
|
+
Welcome to pyANFIS! here you will be able to find a framework that will allow you to use **Fuzzy Logic** with usual pytorch layers.
|
|
17
|
+
This framework is based on [Jang's](https://www.researchgate.net/publication/3113825_ANFIS_Adaptive-Network-based_Fuzzy_Inference_System?enrichId=rgreq-15825cac70a3ae795992310484420cab-XXX&enrichSource=Y292ZXJQYWdlOzMxMTM4MjU7QVM6MTU5MDc1MDY1MTQ3MzkyQDE0MTQ5Mzc4NTk3MzI%3D&el=1_x_2&_esc=publicationCoverPdf) original paper, although it is going to implement several more things (listed below).
|
|
18
|
+
|
|
19
|
+
## 2024 Roadmap
|
|
20
|
+
|
|
21
|
+
- [x] Jang's Original ANFIS.
|
|
22
|
+
- [ ] Create documentation for each class and function.
|
|
23
|
+
- [ ] Create an installation tutorial.
|
|
24
|
+
- [x] Consequent parameters can be estimated using backpropagation.
|
|
25
|
+
- [X] Type 1 (Tsukamoto) consequents can be used.
|
|
26
|
+
- [ ] Type 2 (Lee) consequents can be used.
|
|
27
|
+
- [ ] Functions inside a universe can merge when 2 functions cover a similar area.
|
|
28
|
+
- [ ] Automatically not use rules when they are not relevant.
|
|
29
|
+
- [X] Display what rules have been fired with a certain data.
|
|
30
|
+
- [ ] Create a method to substitute a trained ANFIS with a surface like in [Matlab](https://www.mathworks.com/help/fuzzy/genfis.html#d126e35957).
|
pyanfis-0.1/README.md
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
# pyANFIS
|
|
2
|
+
|
|
3
|
+
Welcome to pyANFIS! here you will be able to find a framework that will allow you to use **Fuzzy Logic** with usual pytorch layers.
|
|
4
|
+
This framework is based on [Jang's](https://www.researchgate.net/publication/3113825_ANFIS_Adaptive-Network-based_Fuzzy_Inference_System?enrichId=rgreq-15825cac70a3ae795992310484420cab-XXX&enrichSource=Y292ZXJQYWdlOzMxMTM4MjU7QVM6MTU5MDc1MDY1MTQ3MzkyQDE0MTQ5Mzc4NTk3MzI%3D&el=1_x_2&_esc=publicationCoverPdf) original paper, although it is going to implement several more things (listed below).
|
|
5
|
+
|
|
6
|
+
## 2024 Roadmap
|
|
7
|
+
|
|
8
|
+
- [x] Jang's Original ANFIS.
|
|
9
|
+
- [ ] Create documentation for each class and function.
|
|
10
|
+
- [ ] Create an installation tutorial.
|
|
11
|
+
- [x] Consequent parameters can be estimated using backpropagation.
|
|
12
|
+
- [X] Type 1 (Tsukamoto) consequents can be used.
|
|
13
|
+
- [ ] Type 2 (Lee) consequents can be used.
|
|
14
|
+
- [ ] Functions inside a universe can merge when 2 functions cover a similar area.
|
|
15
|
+
- [ ] Automatically not use rules when they are not relevant.
|
|
16
|
+
- [X] Display what rules have been fired with a certain data.
|
|
17
|
+
- [ ] Create a method to substitute a trained ANFIS with a surface like in [Matlab](https://www.mathworks.com/help/fuzzy/genfis.html#d126e35957).
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "pyanfis"
|
|
3
|
+
version = "0.1"
|
|
4
|
+
authors = [
|
|
5
|
+
{ name="Vicente Feced Mas", email="vicente.feced.mas@gmail.com" },
|
|
6
|
+
]
|
|
7
|
+
description = "An pytorch-based package to use ANFIS for AI"
|
|
8
|
+
readme = "README.md"
|
|
9
|
+
requires-python = ">=3.10"
|
|
10
|
+
classifiers = [
|
|
11
|
+
"Programming Language :: Python :: 3",
|
|
12
|
+
"License :: OSI Approved :: MIT License",
|
|
13
|
+
"Operating System :: OS Independent",
|
|
14
|
+
]
|
|
15
|
+
|
|
16
|
+
[project.urls]
|
|
17
|
+
Homepage = "https://github.com/VicenteFecedMas/pyanfis"
|
pyanfis-0.1/setup.cfg
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
|
|
3
|
+
# TO BE FINISHED
|
|
4
|
+
class LSTSQ(torch.nn.Module):
|
|
5
|
+
"""
|
|
6
|
+
Computes the vector x that approximately solves the equation a @ x = b
|
|
7
|
+
|
|
8
|
+
Attributes
|
|
9
|
+
----------
|
|
10
|
+
n_vars : float
|
|
11
|
+
length of the "x" vector
|
|
12
|
+
shoulder : float
|
|
13
|
+
shoulder of the linear S function
|
|
14
|
+
|
|
15
|
+
Returns
|
|
16
|
+
-------
|
|
17
|
+
torch.tensor
|
|
18
|
+
a tensor of equal size to the input tensor
|
|
19
|
+
"""
|
|
20
|
+
def __init__(self, n_vars, alpha=0.001, driver='gels') -> None:
|
|
21
|
+
super().__init__()
|
|
22
|
+
|
|
23
|
+
self.theta = torch.zeros((n_vars, 1))
|
|
24
|
+
self.alpha = alpha
|
|
25
|
+
self.driver = driver
|
|
26
|
+
|
|
27
|
+
self.step = -1
|
|
28
|
+
self.theta_dict = {}
|
|
29
|
+
|
|
30
|
+
def forward(self, x, y=None):
|
|
31
|
+
if self.training:
|
|
32
|
+
for _ in range(x.size(0)):
|
|
33
|
+
self.step += 1
|
|
34
|
+
theta = torch.linalg.lstsq(x, y, driver=self.driver).solution
|
|
35
|
+
|
|
36
|
+
if theta.dim() > 2:
|
|
37
|
+
theta = theta.mean(dim=0)
|
|
38
|
+
|
|
39
|
+
self.theta = self.theta + theta * self.alpha
|
|
40
|
+
self.theta_dict[self.step] = self.theta
|
|
41
|
+
|
|
42
|
+
#return self.theta_dict[self.step]
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
|
|
3
|
+
class RLSE(torch.nn.Module):
|
|
4
|
+
"""
|
|
5
|
+
Computes the vector x that approximately solves the equation a @ x = b
|
|
6
|
+
using a recursive approach
|
|
7
|
+
|
|
8
|
+
Attributes
|
|
9
|
+
----------
|
|
10
|
+
n_vars : float
|
|
11
|
+
length of the "x" vector
|
|
12
|
+
initial_gamma : float
|
|
13
|
+
big number to initialise the "S" matrix
|
|
14
|
+
|
|
15
|
+
Returns
|
|
16
|
+
-------
|
|
17
|
+
torch.tensor
|
|
18
|
+
a tensor of equal size to the input tensor
|
|
19
|
+
"""
|
|
20
|
+
def __init__(self, n_vars, initial_gamma=1000.):
|
|
21
|
+
super().__init__()
|
|
22
|
+
self.S = torch.eye(n_vars) * initial_gamma
|
|
23
|
+
self.theta = torch.zeros((n_vars, 1))
|
|
24
|
+
self.gamma = 1000
|
|
25
|
+
|
|
26
|
+
def forward(self, A, B,):
|
|
27
|
+
batch, row, _ = A.size()
|
|
28
|
+
|
|
29
|
+
for ba in range(batch):
|
|
30
|
+
for i in range(row):
|
|
31
|
+
a = A[ba, i, :].view(1, -1) # Reshape a to match the dimensions for matrix operations
|
|
32
|
+
b = B[ba, i].unsqueeze(0)
|
|
33
|
+
|
|
34
|
+
self.S = self.S - (torch.matmul(torch.matmul(torch.matmul(self.S, a.T), a), self.S)) / (1 + torch.matmul(torch.matmul(a, self.S), a.T))
|
|
35
|
+
self.theta = self.theta + torch.matmul(self.S, torch.matmul(a.T, (b - torch.matmul(a, self.theta))))
|
|
36
|
+
|
|
37
|
+
#return self.theta
|
|
@@ -0,0 +1,416 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
import math
|
|
3
|
+
|
|
4
|
+
from antecedents import Antecedents
|
|
5
|
+
from rules import Rules
|
|
6
|
+
from consequents import Consequents
|
|
7
|
+
|
|
8
|
+
class ANFIS(torch.nn.Module):
|
|
9
|
+
def __init__(self, num_inputs:int, num_outputs:int, system_type:str="Takagi-Sugeno", consequents_parameters_update:str = 'backward',):
|
|
10
|
+
super().__init__()
|
|
11
|
+
self.num_inputs = num_inputs
|
|
12
|
+
self.num_outputs = num_outputs
|
|
13
|
+
self.system_type = system_type
|
|
14
|
+
|
|
15
|
+
self.params_update = consequents_parameters_update
|
|
16
|
+
|
|
17
|
+
self.antecedents = Antecedents(num_inputs)
|
|
18
|
+
self.rules = Rules()
|
|
19
|
+
self.normalisation = torch.nn.functional.normalize
|
|
20
|
+
self.consequents = Consequents(num_inputs=num_inputs, num_outputs=num_outputs, parameters_update=self.params_update, system_type=self.system_type)
|
|
21
|
+
|
|
22
|
+
self.active_rules = None
|
|
23
|
+
self.active_rules_consequents = None
|
|
24
|
+
self.rules_relevancy = None
|
|
25
|
+
self.erase_irrelevant_rules = None
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
# The next to are pointers
|
|
29
|
+
self.inputs = self.antecedents.universes # To make renaming easier
|
|
30
|
+
self.outputs = self.consequents.consequents.universes # To make renaming easier
|
|
31
|
+
|
|
32
|
+
self.firing_strength = None
|
|
33
|
+
|
|
34
|
+
def _create_binari_rule_from_indexes(self, is_pairs, rule_index):
|
|
35
|
+
rule_list = []
|
|
36
|
+
for universe_name, function_name in is_pairs:
|
|
37
|
+
try:
|
|
38
|
+
index = rule_index.index(f"{universe_name} {function_name}")
|
|
39
|
+
rule_list.append(index)
|
|
40
|
+
except ValueError:
|
|
41
|
+
if universe_name in [i.name for i in self.inputs.values()]:
|
|
42
|
+
raise ValueError(f"Function {function_name} not found in {universe_name}")
|
|
43
|
+
else:
|
|
44
|
+
raise ValueError(f"Universe {universe_name} not found in universe list")
|
|
45
|
+
|
|
46
|
+
rule_tensor = torch.zeros(len(rule_index))
|
|
47
|
+
rule_tensor[rule_list] = 1
|
|
48
|
+
return rule_tensor.unsqueeze(0)
|
|
49
|
+
|
|
50
|
+
def _parse_rule(self, rule):
|
|
51
|
+
if self.system_type == "Takagi-Sugeno" and "then" in rule:
|
|
52
|
+
raise ValueError(f"Takagi-Sugeno systems only reference to the antecedent: 'If VAR1 is VALUE1 | If VAR2 is VALUE2 and VAR3 is VALUE3 | ...' the existance of a 'then' in the sencente does not make sense")
|
|
53
|
+
|
|
54
|
+
rule = rule.split()
|
|
55
|
+
antecedents_rule_index = [f"{item.name} {subkey}" for key, item in self.antecedents.universes.items() for subkey, _ in item.functions.items()]
|
|
56
|
+
|
|
57
|
+
is_word_pairs = [(rule[i-1], rule[i+1]) for i, word in enumerate(rule) if word == 'is']
|
|
58
|
+
|
|
59
|
+
if self.system_type != "Takagi-Sugeno":
|
|
60
|
+
consequets_rule_index = [f"{item.name} {subkey}" for key, item in self.consequents.consequents.universes.items() for subkey, _ in item.functions.items()]
|
|
61
|
+
|
|
62
|
+
antecedent_rules = is_word_pairs[:-1]
|
|
63
|
+
consequent_rules = is_word_pairs[-1:]
|
|
64
|
+
then_word_index = [i for i, word in enumerate(rule) if word == 'then']
|
|
65
|
+
|
|
66
|
+
if rule[0] != "If" and "is" not in rule and "then" not in rule and len(then_word_index) != 1 and any(then_word_index[0] > num for num in is_word_pairs):
|
|
67
|
+
raise ValueError(f"Every string containing a rule must be formated as: 'If VAR1 is VALUE1 and ... then VAR2 is VALUE2'")
|
|
68
|
+
|
|
69
|
+
antecedents_rules = self._create_binari_rule_from_indexes(antecedent_rules, antecedents_rule_index)
|
|
70
|
+
consequent_rules = self._create_binari_rule_from_indexes(consequent_rules, consequets_rule_index)
|
|
71
|
+
|
|
72
|
+
return antecedents_rules, consequent_rules
|
|
73
|
+
else:
|
|
74
|
+
antecedents_rules = self._create_binari_rule_from_indexes(is_word_pairs, antecedents_rule_index)
|
|
75
|
+
return antecedents_rules
|
|
76
|
+
|
|
77
|
+
def create_rules_base(self, rules):
|
|
78
|
+
if not isinstance(rules, torch.Tensor) and not isinstance(rules, list):
|
|
79
|
+
raise ValueError(f"The introduced rules must be either a torch.Tensor or a list")
|
|
80
|
+
|
|
81
|
+
if self.system_type == "Takagi-Sugeno":
|
|
82
|
+
for rule in rules:
|
|
83
|
+
antecedent_part = self._parse_rule(rule)
|
|
84
|
+
if self.active_rules is None:
|
|
85
|
+
self.active_rules = antecedent_part
|
|
86
|
+
else:
|
|
87
|
+
self.active_rules = torch.cat((self.active_rules, antecedent_part), dim=0)
|
|
88
|
+
|
|
89
|
+
for algorithm in self.outputs.values():
|
|
90
|
+
algorithm.generate_theta(self.active_rules.size(0) * (self.num_inputs + 1))
|
|
91
|
+
|
|
92
|
+
elif self.system_type == "Tsukamoto":
|
|
93
|
+
for rule in rules:
|
|
94
|
+
antecedent_part, consequent_part = self._parse_rule(rule)
|
|
95
|
+
if self.active_rules is None and self.active_rules_consequents is None:
|
|
96
|
+
self.active_rules = antecedent_part
|
|
97
|
+
self.active_rules_consequents = consequent_part
|
|
98
|
+
elif self.active_rules is not None and self.active_rules_consequents is not None:
|
|
99
|
+
self.active_rules = torch.cat((self.active_rules, antecedent_part), dim=0)
|
|
100
|
+
self.active_rules_consequents = torch.cat((self.active_rules_consequents, consequent_part), dim=0)
|
|
101
|
+
else:
|
|
102
|
+
raise ValueError(f"Got {len(self.active_rules)} antecedent statements and {len(self.active_rules_consequents)} consequent statements")
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
elif self.system_type == "Lee":
|
|
106
|
+
pass
|
|
107
|
+
|
|
108
|
+
self.rules.active_antecedents_rules = self.active_rules
|
|
109
|
+
|
|
110
|
+
def parameters(self):
|
|
111
|
+
|
|
112
|
+
parameters = []
|
|
113
|
+
|
|
114
|
+
# Antecedents parameters
|
|
115
|
+
for universe in self.antecedents.universes.values():
|
|
116
|
+
for function in universe.functions.values():
|
|
117
|
+
for param in function.parameters():
|
|
118
|
+
parameters.append(param)
|
|
119
|
+
|
|
120
|
+
# Consequent parameters
|
|
121
|
+
if self.params_update == "backward":
|
|
122
|
+
for universe in self.consequents.consequents.universes.values():
|
|
123
|
+
if self.system_type == "Takagi-Sugeno":
|
|
124
|
+
parameters.append(universe.theta)
|
|
125
|
+
else:
|
|
126
|
+
for function in universe.functions.values():
|
|
127
|
+
for param in function.parameters():
|
|
128
|
+
parameters.append(param)
|
|
129
|
+
#print([round(param.data.item(), 2) for param in parameters])
|
|
130
|
+
return parameters
|
|
131
|
+
|
|
132
|
+
def smart_concat(self, tensor_list):
|
|
133
|
+
dimensions = tensor_list[0].dim()
|
|
134
|
+
shape = tensor_list[0].shape
|
|
135
|
+
|
|
136
|
+
tensor_list = torch.stack(tensor_list, dim=-1)
|
|
137
|
+
if dimensions == 0:
|
|
138
|
+
return tensor_list.view(1,1,-1)
|
|
139
|
+
elif dimensions == 1:
|
|
140
|
+
return tensor_list.view(1,shape[0],-1)
|
|
141
|
+
elif dimensions == 2:
|
|
142
|
+
return tensor_list.view(shape[0], shape[1],-1)
|
|
143
|
+
elif dimensions == 3:
|
|
144
|
+
return tensor_list.view(shape[0], shape[1] * shape[2] , -1)
|
|
145
|
+
|
|
146
|
+
def _prepare_input_matrices(self, **kwargs):
|
|
147
|
+
if len(kwargs) != len(self.antecedents.universes.items()):
|
|
148
|
+
|
|
149
|
+
antecedents_tensor = []
|
|
150
|
+
for universe in self.antecedents.universes.values():
|
|
151
|
+
if universe.name not in list(kwargs.keys()):
|
|
152
|
+
raise ValueError(f"Universe name {universe.name} not present in input variables {list(kwargs.keys())}")
|
|
153
|
+
antecedents_tensor.append(kwargs[universe.name])
|
|
154
|
+
del kwargs[universe.name]
|
|
155
|
+
|
|
156
|
+
if not kwargs and self.system_type == "Takagi-Sugeno" and self.training is True and self.params_update != "backward":
|
|
157
|
+
raise ValueError(f"If you use a {self.system_type} and do not update the system using backpropagation you need to feed the output values to train the system.")
|
|
158
|
+
elif not kwargs:
|
|
159
|
+
return self.smart_concat(antecedents_tensor), None
|
|
160
|
+
|
|
161
|
+
consequents_tensor = []
|
|
162
|
+
for universe in self.consequents.consequents.universes.values():
|
|
163
|
+
if universe.name not in list(kwargs.keys()):
|
|
164
|
+
raise ValueError(f"Universe name {universe.name} not present in input variables {list(kwargs.keys())}")
|
|
165
|
+
|
|
166
|
+
consequents_tensor.append(kwargs[universe.name])
|
|
167
|
+
del kwargs[universe.name]
|
|
168
|
+
|
|
169
|
+
return self.smart_concat(antecedents_tensor), self.smart_concat(consequents_tensor)
|
|
170
|
+
|
|
171
|
+
def get_fired_rules(self, **kwargs):
|
|
172
|
+
self.training = False
|
|
173
|
+
X, _ = self._prepare_input_matrices(**kwargs)
|
|
174
|
+
if X.size(1) != 1:
|
|
175
|
+
raise ValueError(f"Only one row can be evaluated at a time")
|
|
176
|
+
|
|
177
|
+
f = self.antecedents(X)
|
|
178
|
+
|
|
179
|
+
self.rules.active_rules = self.active_rules
|
|
180
|
+
f, _ = self.rules(f) # col_indexes = rule place on each col
|
|
181
|
+
|
|
182
|
+
f = self.normalisation(f, dim=2, p=1)
|
|
183
|
+
|
|
184
|
+
return {str(key.to(torch.int16).tolist()): float(strength) for key, strength in zip(self.active_rules,f[0, 0, :])}
|
|
185
|
+
|
|
186
|
+
def state_dict(self):
|
|
187
|
+
params = {}
|
|
188
|
+
|
|
189
|
+
# Main
|
|
190
|
+
params["main"] = {}
|
|
191
|
+
params["main"]["num_inputs"] = self.num_inputs
|
|
192
|
+
params["main"]["num_outputs"] = self.num_outputs
|
|
193
|
+
params["main"]["system_type"] = self.system_type
|
|
194
|
+
params["main"]["params_update"] = self.params_update
|
|
195
|
+
params["main"]["active_rules"] = self.active_rules
|
|
196
|
+
params["main"]["rules_relevancy"] = self.rules_relevancy
|
|
197
|
+
params["main"]["active_rules_consequents"] = self.active_rules_consequents
|
|
198
|
+
params["main"]["erase_irrelevant_rules"] = self.erase_irrelevant_rules
|
|
199
|
+
params["main"]["firing_strength"] = self.firing_strength
|
|
200
|
+
|
|
201
|
+
# Antecedents
|
|
202
|
+
params["Antecedents"] = {}
|
|
203
|
+
params["Antecedents"]["universes"] = {}
|
|
204
|
+
|
|
205
|
+
|
|
206
|
+
# Each universe in the antecedents
|
|
207
|
+
for universe_name, universe in self.inputs.items():
|
|
208
|
+
params["Antecedents"]["universes"][universe_name] = {}
|
|
209
|
+
params["Antecedents"]["universes"][universe_name]["name"] = universe.name
|
|
210
|
+
params["Antecedents"]["universes"][universe_name]["min"] = universe.min
|
|
211
|
+
params["Antecedents"]["universes"][universe_name]["max"] = universe.max
|
|
212
|
+
params["Antecedents"]["universes"][universe_name]["merge"] = universe.merge
|
|
213
|
+
params["Antecedents"]["universes"][universe_name]["heaviside"] = universe.heaviside
|
|
214
|
+
params["Antecedents"]["universes"][universe_name]["functions"] = {}
|
|
215
|
+
|
|
216
|
+
for function_name, function in universe.functions.items():
|
|
217
|
+
params["Antecedents"]["universes"][universe_name]["functions"][function_name] = {}
|
|
218
|
+
params["Antecedents"]["universes"][universe_name]["functions"][function_name]["type"] = str(function)[:-2]
|
|
219
|
+
params["Antecedents"]["universes"][universe_name]["functions"][function_name]["parameters"] = {}
|
|
220
|
+
for name, value in vars(function)['_parameters'].items():
|
|
221
|
+
params["Antecedents"]["universes"][universe_name]["functions"][function_name]["parameters"][name] = value
|
|
222
|
+
|
|
223
|
+
# Consequents
|
|
224
|
+
params["Consequents"] = {}
|
|
225
|
+
params["Consequents"]["universes"] = {}
|
|
226
|
+
|
|
227
|
+
for universe_name, universe in self.outputs.items():
|
|
228
|
+
params["Consequents"]["universes"][universe_name] = {}
|
|
229
|
+
params["Consequents"]["universes"][universe_name]["name"] = universe.name
|
|
230
|
+
params["Consequents"]["universes"][universe_name]["min"] = universe.min
|
|
231
|
+
params["Consequents"]["universes"][universe_name]["max"] = universe.max
|
|
232
|
+
params["Consequents"]["universes"][universe_name]["merge"] = universe.merge
|
|
233
|
+
params["Consequents"]["universes"][universe_name]["heaviside"] = universe.heaviside
|
|
234
|
+
|
|
235
|
+
# Takagi Sugeno
|
|
236
|
+
if self.system_type == "Takagi-Sugeno":
|
|
237
|
+
params["Consequents"]["universes"][universe_name][universe.algorithm_name] = {}
|
|
238
|
+
params["Consequents"]["universes"][universe_name][universe.algorithm_name]["name"] = universe.name
|
|
239
|
+
params["Consequents"]["universes"][universe_name][universe.algorithm_name]["theta"] = universe.theta
|
|
240
|
+
params["Consequents"]["universes"][universe_name][universe.algorithm_name]["algorithm"] = universe.algorithm
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
# Tsukamoto and Lee
|
|
244
|
+
else:
|
|
245
|
+
params["Consequents"]["universes"][universe_name]["functions"] = {}
|
|
246
|
+
for function_name, function in universe.functions.items():
|
|
247
|
+
params["Consequents"]["universes"][universe_name]["functions"][function_name] = {}
|
|
248
|
+
params["Consequents"]["universes"][universe_name]["functions"][function_name]["type"] = str(function)[:-2]
|
|
249
|
+
params["Consequents"]["universes"][universe_name]["functions"][function_name]["parameters"] = {}
|
|
250
|
+
for name, value in vars(function)['_parameters'].items():
|
|
251
|
+
params["Consequents"]["universes"][universe_name]["functions"][function_name]["parameters"][name] = value
|
|
252
|
+
return params
|
|
253
|
+
|
|
254
|
+
def load_state_dict(self, state_dict):
|
|
255
|
+
# Load Main
|
|
256
|
+
if self.num_inputs != state_dict["main"]["num_inputs"]:
|
|
257
|
+
raise ImportError(f"Atempting to import a system with {state_dict['main']['num_inputs']} inputs into a created system of {self.num_inputs} inputs.")
|
|
258
|
+
|
|
259
|
+
if self.num_outputs != state_dict["main"]["num_outputs"]:
|
|
260
|
+
raise ImportError(f"Atempting to import a system with {state_dict['main']['num_outputs']} outputs into a created system of {self.num_outputs} outputs.")
|
|
261
|
+
|
|
262
|
+
self.system_type = state_dict["main"]["system_type"]
|
|
263
|
+
self.params_update = state_dict["main"]["params_update"]
|
|
264
|
+
|
|
265
|
+
self.antecedents = Antecedents(self.num_inputs)
|
|
266
|
+
self.rules = Rules()
|
|
267
|
+
self.normalisation = torch.nn.functional.normalize
|
|
268
|
+
self.consequents = Consequents(num_inputs=self.num_inputs, num_outputs=self.num_outputs, parameters_update=self.params_update, system_type=self.system_type)
|
|
269
|
+
|
|
270
|
+
self.active_rules = state_dict["main"]["active_rules"]
|
|
271
|
+
self.active_rules_consequents = state_dict["main"]["active_rules_consequents"]
|
|
272
|
+
self.rules_relevancy = state_dict["main"]["rules_relevancy"]
|
|
273
|
+
self.erase_irrelevant_rules = state_dict["main"]["erase_irrelevant_rules"]
|
|
274
|
+
|
|
275
|
+
self.firing_strength = state_dict["main"]["firing_strength"]
|
|
276
|
+
|
|
277
|
+
# Load Antecedents
|
|
278
|
+
for universe_name, universe in self.antecedents.universes.items():
|
|
279
|
+
universe.name = state_dict["Antecedents"]["universes"][universe_name]["name"]
|
|
280
|
+
universe.min = state_dict["Antecedents"]["universes"][universe_name]["min"]
|
|
281
|
+
universe.max = state_dict["Antecedents"]["universes"][universe_name]["max"]
|
|
282
|
+
universe.merge = state_dict["Antecedents"]["universes"][universe_name]["merge"]
|
|
283
|
+
universe.heaviside = state_dict["Antecedents"]["universes"][universe_name]["heaviside"]
|
|
284
|
+
|
|
285
|
+
# Loading functions
|
|
286
|
+
for function_name, function_params in state_dict["Antecedents"]["universes"][universe_name]["functions"].items():
|
|
287
|
+
function_type = function_params["type"]
|
|
288
|
+
function_params = function_params["parameters"]
|
|
289
|
+
try:
|
|
290
|
+
module = __import__("functions", fromlist=[function_type])
|
|
291
|
+
universe.functions[function_name] = getattr(module, function_type)()
|
|
292
|
+
except ImportError:
|
|
293
|
+
raise ImportError(f"Error: Class {function_type} not found in the 'functions' folder.")
|
|
294
|
+
|
|
295
|
+
for name, value in function_params.items():
|
|
296
|
+
universe.functions[function_name][name] = value
|
|
297
|
+
|
|
298
|
+
# Load Consequents
|
|
299
|
+
for universe_name, universe in self.consequents.consequents.universes.items():
|
|
300
|
+
universe.name = state_dict["Consequents"]["universes"][universe_name]["name"]
|
|
301
|
+
universe.min = state_dict["Consequents"]["universes"][universe_name]["min"]
|
|
302
|
+
universe.max = state_dict["Consequents"]["universes"][universe_name]["max"]
|
|
303
|
+
universe.merge = state_dict["Consequents"]["universes"][universe_name]["merge"]
|
|
304
|
+
universe.heaviside = state_dict["Consequents"]["universes"][universe_name]["heaviside"]
|
|
305
|
+
|
|
306
|
+
# Takagi Sugeno
|
|
307
|
+
if self.system_type == "Takagi-Sugeno":
|
|
308
|
+
universe.name = state_dict["Consequents"]["universes"][universe_name][universe.algorithm_name]["name"]
|
|
309
|
+
universe.theta = state_dict["Consequents"]["universes"][universe_name][universe.algorithm_name]["theta"]
|
|
310
|
+
universe.algorithm = state_dict["Consequents"]["universes"][universe_name][universe.algorithm_name]["algorithm"]
|
|
311
|
+
|
|
312
|
+
# Tsukamoto and Lee
|
|
313
|
+
else:
|
|
314
|
+
# Loading functions
|
|
315
|
+
for function_name, function_params in state_dict["Consequents"]["universes"][universe_name]["functions"].items():
|
|
316
|
+
function_type = function_params["type"]
|
|
317
|
+
function_params = function_params["parameters"]
|
|
318
|
+
|
|
319
|
+
try:
|
|
320
|
+
module = __import__("functions", fromlist=[function_type])
|
|
321
|
+
universe.functions[function_name] = getattr(module, function_type)()
|
|
322
|
+
except ImportError:
|
|
323
|
+
raise ImportError(f"Error: Class {function_type} not found in the 'functions' folder.")
|
|
324
|
+
|
|
325
|
+
for name, value in function_params.items():
|
|
326
|
+
universe.functions[function_name][name] = value
|
|
327
|
+
|
|
328
|
+
# The next to are pointers
|
|
329
|
+
self.inputs = self.antecedents.universes # To make renaming easier
|
|
330
|
+
self.outputs = self.consequents.consequents.universes # To make renaming easier
|
|
331
|
+
|
|
332
|
+
def _irrelevant_rules_check(self, f):
|
|
333
|
+
relevancy = torch.mean(torch.mean(f, dim=0), dim=0)
|
|
334
|
+
if self.rules_relevancy is None:
|
|
335
|
+
self.rules_relevancy = relevancy
|
|
336
|
+
else:
|
|
337
|
+
self.rules_relevancy += relevancy
|
|
338
|
+
|
|
339
|
+
self.rules_relevancy = torch.nn.functional.normalize(self.rules_relevancy, dim=0, p=1)
|
|
340
|
+
|
|
341
|
+
def get_conversion_number(self, x):
|
|
342
|
+
if x == 0:
|
|
343
|
+
return 1.0
|
|
344
|
+
|
|
345
|
+
x = math.floor(math.log10(abs(x)))
|
|
346
|
+
if x > 0:
|
|
347
|
+
return float("1" + "0" * x)
|
|
348
|
+
else:
|
|
349
|
+
return float("0." + "0" * abs(x) + "1")
|
|
350
|
+
|
|
351
|
+
def forward(self, X, Y):
|
|
352
|
+
f = self.antecedents(X)
|
|
353
|
+
|
|
354
|
+
self.rules.active_rules = self.active_rules
|
|
355
|
+
f = self.rules(f)
|
|
356
|
+
|
|
357
|
+
f = self.normalisation(f, dim=2, p=1)
|
|
358
|
+
|
|
359
|
+
self.consequents.consequents.active_rules = self.active_rules_consequents
|
|
360
|
+
output = self.consequents(f, X, Y)
|
|
361
|
+
|
|
362
|
+
return output
|
|
363
|
+
|
|
364
|
+
|
|
365
|
+
def __call__(self, *args, **kwargs):
|
|
366
|
+
'''
|
|
367
|
+
In the call step I will preprocess the data and discern if
|
|
368
|
+
I am being asked a control/regression or a classification task.
|
|
369
|
+
'''
|
|
370
|
+
|
|
371
|
+
if args and kwargs:
|
|
372
|
+
raise ValueError("All the arguments must be either arguments or keyword arguments, but you cannot mix between both")
|
|
373
|
+
|
|
374
|
+
# I NEED TO DISCOVER HOW THE DATA ENTERS
|
|
375
|
+
if kwargs:
|
|
376
|
+
X, Y = self._prepare_input_matrices(**kwargs)
|
|
377
|
+
|
|
378
|
+
else:
|
|
379
|
+
if len(args) > 2:
|
|
380
|
+
raise ValueError("Please provide as input either a matrix with the X arguments or two matrices, one with X arguments and one with Y arguments")
|
|
381
|
+
X, Y = args
|
|
382
|
+
|
|
383
|
+
print(X, Y)
|
|
384
|
+
return self.forward(X, Y)
|
|
385
|
+
|
|
386
|
+
|
|
387
|
+
from functions import LinearS, LinearZ
|
|
388
|
+
import torch
|
|
389
|
+
|
|
390
|
+
anfis = ANFIS(num_inputs=2, num_outputs=1)
|
|
391
|
+
|
|
392
|
+
anfis.inputs['Input 1'].range = 0, 10
|
|
393
|
+
anfis.inputs['Input 1'].name = 'X'
|
|
394
|
+
anfis.inputs['Input 1'].functions["Token1"] = LinearS(0, 10)
|
|
395
|
+
|
|
396
|
+
anfis.inputs['Input 2'].range = 0, 10
|
|
397
|
+
anfis.inputs['Input 2'].name = 'Y'
|
|
398
|
+
anfis.inputs['Input 2'].functions["Token2"] = LinearZ(0, 10)
|
|
399
|
+
|
|
400
|
+
anfis.create_rules_base([
|
|
401
|
+
"If Token1",
|
|
402
|
+
"If Token2"
|
|
403
|
+
])
|
|
404
|
+
|
|
405
|
+
X = torch.rand(1, 1)
|
|
406
|
+
Y = torch.rand(1, 1)
|
|
407
|
+
print(' ')
|
|
408
|
+
print('Pasando argumentos posicionales')
|
|
409
|
+
anfis(X=X, Y=Y)
|
|
410
|
+
print(' ')
|
|
411
|
+
print('Pasando argumentos no posicionales')
|
|
412
|
+
anfis(X, Y)
|
|
413
|
+
print(' ')
|
|
414
|
+
print('Pasando argumentos mix')
|
|
415
|
+
anfis(X, Y, X)
|
|
416
|
+
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
|
|
3
|
+
from functions import Universe
|
|
4
|
+
|
|
5
|
+
class Antecedents(torch.nn.Module):
|
|
6
|
+
"""
|
|
7
|
+
This class is used to define the range in which a variable
|
|
8
|
+
is going to be defined in a fuzzy way, it is composed of
|
|
9
|
+
several functions used to describe it.
|
|
10
|
+
|
|
11
|
+
Attributes
|
|
12
|
+
----------
|
|
13
|
+
x : torch.Tensor
|
|
14
|
+
input batched data
|
|
15
|
+
merge : bool
|
|
16
|
+
if True, the functions that cover similar area will merge
|
|
17
|
+
heaviside :
|
|
18
|
+
if True, the functions on the sides will become Heaviside
|
|
19
|
+
universes : dict
|
|
20
|
+
dict where all the universes are going to be stored
|
|
21
|
+
|
|
22
|
+
Methods
|
|
23
|
+
-------
|
|
24
|
+
automf(n_func)
|
|
25
|
+
generate automatically gauss functions inside all universes
|
|
26
|
+
inside the antecedents
|
|
27
|
+
|
|
28
|
+
Returns
|
|
29
|
+
-------
|
|
30
|
+
torch.tensor
|
|
31
|
+
a tensor of size [n_batches, n_lines, total_functions_of_all_universes]
|
|
32
|
+
"""
|
|
33
|
+
def __init__(self, num_inputs: int, heaviside: bool=False) -> None:
|
|
34
|
+
super(Antecedents, self).__init__()
|
|
35
|
+
self.num_inputs = num_inputs
|
|
36
|
+
self.heaviside = heaviside
|
|
37
|
+
self.universes = {f"Input {i+1}": Universe() for i in range(num_inputs)}
|
|
38
|
+
|
|
39
|
+
def automf(self, n_func: int=2) -> None:
|
|
40
|
+
for key in self.universes.keys():
|
|
41
|
+
self.universes[key].automf(n_func=n_func)
|
|
42
|
+
|
|
43
|
+
def forward(self , X: torch.Tensor) -> torch.Tensor:
|
|
44
|
+
width = len([function for key, universe in self.universes.items() for key, function in universe.functions.items()])
|
|
45
|
+
fuzzy = torch.zeros(X.size(0), X.size(1), width)
|
|
46
|
+
for i, (key, universe) in enumerate(self.universes.items()):
|
|
47
|
+
fuzzy[:, :, i*len(universe.functions):(i+1)*len(universe.functions)] = universe(X[:,:,i:i+1])
|
|
48
|
+
|
|
49
|
+
fuzzy[torch.isnan(fuzzy)] = 1
|
|
50
|
+
return fuzzy
|