oikan 0.0.1.1__tar.gz → 0.0.1.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {oikan-0.0.1.1 → oikan-0.0.1.2}/PKG-INFO +1 -1
- oikan-0.0.1.2/oikan/model.py +65 -0
- oikan-0.0.1.2/oikan/regularization.py +30 -0
- oikan-0.0.1.2/oikan/symbolic.py +21 -0
- oikan-0.0.1.2/oikan/trainer.py +37 -0
- oikan-0.0.1.2/oikan/utils.py +43 -0
- oikan-0.0.1.2/oikan/visualize.py +37 -0
- {oikan-0.0.1.1 → oikan-0.0.1.2}/oikan.egg-info/PKG-INFO +1 -1
- {oikan-0.0.1.1 → oikan-0.0.1.2}/oikan.egg-info/SOURCES.txt +2 -0
- {oikan-0.0.1.1 → oikan-0.0.1.2}/pyproject.toml +1 -1
- oikan-0.0.1.1/oikan/model.py +0 -28
- oikan-0.0.1.1/oikan/symbolic.py +0 -36
- oikan-0.0.1.1/oikan/trainer.py +0 -32
- oikan-0.0.1.1/oikan/visualize.py +0 -20
- {oikan-0.0.1.1 → oikan-0.0.1.2}/README.md +0 -0
- {oikan-0.0.1.1 → oikan-0.0.1.2}/oikan/__init__.py +0 -0
- {oikan-0.0.1.1 → oikan-0.0.1.2}/oikan.egg-info/dependency_links.txt +0 -0
- {oikan-0.0.1.1 → oikan-0.0.1.2}/oikan.egg-info/requires.txt +0 -0
- {oikan-0.0.1.1 → oikan-0.0.1.2}/oikan.egg-info/top_level.txt +0 -0
- {oikan-0.0.1.1 → oikan-0.0.1.2}/setup.cfg +0 -0
- {oikan-0.0.1.1 → oikan-0.0.1.2}/setup.py +0 -0
@@ -0,0 +1,65 @@
|
|
1
|
+
import torch
|
2
|
+
import torch.nn as nn
|
3
|
+
from .utils import BSplineBasis, FourierBasis
|
4
|
+
|
5
|
+
class AdaptiveBasisLayer(nn.Module):
|
6
|
+
def __init__(self, input_dim, hidden_dim):
|
7
|
+
super().__init__()
|
8
|
+
self.weights = nn.Parameter(torch.randn(input_dim, hidden_dim))
|
9
|
+
self.bias = nn.Parameter(torch.zeros(hidden_dim))
|
10
|
+
|
11
|
+
def forward(self, x):
|
12
|
+
return torch.matmul(x, self.weights) + self.bias
|
13
|
+
|
14
|
+
class EfficientKAN(nn.Module):
|
15
|
+
def __init__(self, input_dim, hidden_units=10, basis_type='bspline'):
|
16
|
+
super().__init__()
|
17
|
+
self.input_dim = input_dim
|
18
|
+
self.hidden_units = hidden_units
|
19
|
+
self.basis_type = basis_type
|
20
|
+
|
21
|
+
if basis_type == 'bspline':
|
22
|
+
self.basis_functions = nn.ModuleList([BSplineBasis(hidden_units) for _ in range(input_dim)])
|
23
|
+
self.basis_output_dim = input_dim * (hidden_units - 4) # Adjusted for BSpline output
|
24
|
+
elif basis_type == 'fourier':
|
25
|
+
self.basis_functions = nn.ModuleList([FourierBasis(hidden_units//2) for _ in range(input_dim)])
|
26
|
+
self.basis_output_dim = input_dim * hidden_units
|
27
|
+
|
28
|
+
# Grid-based interaction layer
|
29
|
+
self.interaction_weights = nn.Parameter(torch.randn(input_dim, input_dim))
|
30
|
+
|
31
|
+
def forward(self, x):
|
32
|
+
# Transform each feature using basis functions
|
33
|
+
transformed_features = [bf(x[:, i].unsqueeze(1)) for i, bf in enumerate(self.basis_functions)]
|
34
|
+
basis_output = torch.cat(transformed_features, dim=1)
|
35
|
+
|
36
|
+
# Compute feature interactions - fixed matrix multiplication
|
37
|
+
batch_size = x.size(0)
|
38
|
+
x_reshaped = x.view(batch_size, self.input_dim, 1) # [batch_size, input_dim, 1]
|
39
|
+
interaction_matrix = torch.sigmoid(self.interaction_weights) # [input_dim, input_dim]
|
40
|
+
interaction_features = torch.bmm(x_reshaped.transpose(1, 2),
|
41
|
+
x_reshaped * interaction_matrix.unsqueeze(0)) # [batch_size, 1, 1]
|
42
|
+
interaction_features = interaction_features.view(batch_size, -1) # [batch_size, 1]
|
43
|
+
|
44
|
+
return torch.cat([basis_output, interaction_features], dim=1)
|
45
|
+
|
46
|
+
def get_output_dim(self):
|
47
|
+
return self.basis_output_dim + self.input_dim
|
48
|
+
|
49
|
+
class OIKAN(nn.Module):
|
50
|
+
def __init__(self, input_dim, output_dim, hidden_units=10):
|
51
|
+
super().__init__()
|
52
|
+
self.efficientkan = EfficientKAN(input_dim, hidden_units)
|
53
|
+
|
54
|
+
# Get actual feature dimension after transformation
|
55
|
+
feature_dim = self.efficientkan.get_output_dim()
|
56
|
+
|
57
|
+
self.interpretable_layers = nn.Sequential(
|
58
|
+
AdaptiveBasisLayer(feature_dim, 32),
|
59
|
+
nn.ReLU(),
|
60
|
+
AdaptiveBasisLayer(32, output_dim)
|
61
|
+
)
|
62
|
+
|
63
|
+
def forward(self, x):
|
64
|
+
transformed_x = self.efficientkan(x)
|
65
|
+
return self.interpretable_layers(transformed_x)
|
@@ -0,0 +1,30 @@
|
|
1
|
+
import torch
|
2
|
+
import torch.nn as nn
|
3
|
+
|
4
|
+
class RegularizedLoss:
|
5
|
+
def __init__(self, base_criterion, model, l1_lambda=0.01, gradient_lambda=0.01):
|
6
|
+
self.base_criterion = base_criterion
|
7
|
+
self.model = model
|
8
|
+
self.l1_lambda = l1_lambda
|
9
|
+
self.gradient_lambda = gradient_lambda
|
10
|
+
|
11
|
+
def __call__(self, pred, target, inputs):
|
12
|
+
base_loss = self.base_criterion(pred, target)
|
13
|
+
|
14
|
+
# L1 regularization
|
15
|
+
l1_loss = 0
|
16
|
+
for param in self.model.parameters():
|
17
|
+
l1_loss += torch.norm(param, p=1)
|
18
|
+
|
19
|
+
# Gradient penalty
|
20
|
+
grad_penalty = 0
|
21
|
+
inputs.requires_grad_(True)
|
22
|
+
outputs = self.model(inputs)
|
23
|
+
gradients = torch.autograd.grad(
|
24
|
+
outputs=outputs.sum(),
|
25
|
+
inputs=inputs,
|
26
|
+
create_graph=True
|
27
|
+
)[0]
|
28
|
+
grad_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
|
29
|
+
|
30
|
+
return base_loss + self.l1_lambda * l1_loss + self.gradient_lambda * grad_penalty
|
@@ -0,0 +1,21 @@
|
|
1
|
+
import torch
|
2
|
+
import numpy as np
|
3
|
+
|
4
|
+
def extract_symbolic_formula_regression(model, X):
|
5
|
+
"""Simple coefficient-based formula extraction"""
|
6
|
+
model.eval()
|
7
|
+
with torch.no_grad():
|
8
|
+
# Get weights from the first adaptive layer
|
9
|
+
weights = model.interpretable_layers[0].weights.numpy()
|
10
|
+
# Simplified representation
|
11
|
+
terms = []
|
12
|
+
for i in range(X.shape[1]):
|
13
|
+
coef = np.abs(weights[i]).mean()
|
14
|
+
if coef > 0.1: # threshold for significance
|
15
|
+
terms.append(f"{coef:.2f}*x{i+1}")
|
16
|
+
|
17
|
+
return " + ".join(terms) if terms else "0"
|
18
|
+
|
19
|
+
def extract_symbolic_formula_classification(model, X):
|
20
|
+
"""Extract classification boundary formula"""
|
21
|
+
return extract_symbolic_formula_regression(model, X) + " = 0"
|
@@ -0,0 +1,37 @@
|
|
1
|
+
import torch
|
2
|
+
import torch.nn as nn
|
3
|
+
from .regularization import RegularizedLoss
|
4
|
+
|
5
|
+
def train(model, train_data, epochs=100, lr=0.01):
|
6
|
+
X_train, y_train = train_data
|
7
|
+
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
|
8
|
+
criterion = nn.MSELoss()
|
9
|
+
reg_loss = RegularizedLoss(criterion, model)
|
10
|
+
|
11
|
+
model.train()
|
12
|
+
for epoch in range(epochs):
|
13
|
+
optimizer.zero_grad()
|
14
|
+
outputs = model(X_train)
|
15
|
+
loss = reg_loss(outputs, y_train, X_train)
|
16
|
+
loss.backward()
|
17
|
+
optimizer.step()
|
18
|
+
|
19
|
+
if (epoch + 1) % 10 == 0:
|
20
|
+
print(f'Epoch [{epoch+1}/{epochs}], Loss: {loss.item():.4f}')
|
21
|
+
|
22
|
+
def train_classification(model, train_data, epochs=100, lr=0.01):
|
23
|
+
X_train, y_train = train_data
|
24
|
+
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
|
25
|
+
criterion = nn.CrossEntropyLoss()
|
26
|
+
reg_loss = RegularizedLoss(criterion, model)
|
27
|
+
|
28
|
+
model.train()
|
29
|
+
for epoch in range(epochs):
|
30
|
+
optimizer.zero_grad()
|
31
|
+
outputs = model(X_train)
|
32
|
+
loss = reg_loss(outputs, y_train, X_train)
|
33
|
+
loss.backward()
|
34
|
+
optimizer.step()
|
35
|
+
|
36
|
+
if (epoch + 1) % 10 == 0:
|
37
|
+
print(f'Epoch [{epoch+1}/{epochs}], Loss: {loss.item():.4f}')
|
@@ -0,0 +1,43 @@
|
|
1
|
+
import torch
|
2
|
+
import torch.nn as nn
|
3
|
+
import numpy as np
|
4
|
+
from scipy.interpolate import BSpline
|
5
|
+
|
6
|
+
class BSplineBasis(nn.Module):
|
7
|
+
def __init__(self, num_knots=10, degree=3):
|
8
|
+
super().__init__()
|
9
|
+
self.num_knots = max(num_knots, degree + 5) # Ensure minimum number of knots
|
10
|
+
self.degree = degree
|
11
|
+
|
12
|
+
# Create knot vector with proper padding
|
13
|
+
inner_knots = np.linspace(0, 1, self.num_knots - 2 * degree)
|
14
|
+
left_pad = np.zeros(degree)
|
15
|
+
right_pad = np.ones(degree)
|
16
|
+
knots = np.concatenate([left_pad, inner_knots, right_pad])
|
17
|
+
|
18
|
+
self.register_buffer('knots', torch.FloatTensor(knots))
|
19
|
+
|
20
|
+
def forward(self, x):
|
21
|
+
x_np = x.detach().cpu().numpy()
|
22
|
+
basis_values = np.zeros((x_np.shape[0], self.num_knots - self.degree - 1))
|
23
|
+
|
24
|
+
# Normalize input to [0,1] range
|
25
|
+
x_normalized = (x_np - x_np.min()) / (x_np.max() - x_np.min() + 1e-8)
|
26
|
+
|
27
|
+
for i in range(self.num_knots - self.degree - 1):
|
28
|
+
spl = BSpline.basis_element(self.knots[i:i+self.degree+2])
|
29
|
+
basis_values[:, i] = spl(x_normalized.squeeze())
|
30
|
+
|
31
|
+
# Replace NaN values with 0
|
32
|
+
basis_values = np.nan_to_num(basis_values, 0)
|
33
|
+
return torch.FloatTensor(basis_values).to(x.device)
|
34
|
+
|
35
|
+
class FourierBasis(nn.Module):
|
36
|
+
def __init__(self, num_frequencies=5):
|
37
|
+
super().__init__()
|
38
|
+
self.num_frequencies = num_frequencies
|
39
|
+
|
40
|
+
def forward(self, x):
|
41
|
+
frequencies = torch.arange(1, self.num_frequencies + 1, device=x.device).float()
|
42
|
+
x_expanded = x * frequencies.view(1, -1) * 2 * np.pi
|
43
|
+
return torch.cat([torch.sin(x_expanded), torch.cos(x_expanded)], dim=1)
|
@@ -0,0 +1,37 @@
|
|
1
|
+
import numpy as np
|
2
|
+
import matplotlib.pyplot as plt
|
3
|
+
import torch
|
4
|
+
|
5
|
+
def visualize_regression(model, X, y):
|
6
|
+
model.eval()
|
7
|
+
with torch.no_grad():
|
8
|
+
X_tensor = torch.FloatTensor(X)
|
9
|
+
y_pred = model(X_tensor).numpy()
|
10
|
+
|
11
|
+
plt.figure(figsize=(10, 6))
|
12
|
+
plt.scatter(X[:, 0], y, color='blue', label='True')
|
13
|
+
plt.scatter(X[:, 0], y_pred, color='red', label='Predicted')
|
14
|
+
plt.legend()
|
15
|
+
plt.show()
|
16
|
+
|
17
|
+
def visualize_classification(model, X, y):
|
18
|
+
model.eval()
|
19
|
+
|
20
|
+
# Create a mesh grid
|
21
|
+
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
|
22
|
+
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
|
23
|
+
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),
|
24
|
+
np.linspace(y_min, y_max, 100))
|
25
|
+
|
26
|
+
# Make predictions
|
27
|
+
with torch.no_grad():
|
28
|
+
X_grid = torch.FloatTensor(np.c_[xx.ravel(), yy.ravel()])
|
29
|
+
Z = model(X_grid)
|
30
|
+
Z = torch.argmax(Z, dim=1).numpy()
|
31
|
+
Z = Z.reshape(xx.shape)
|
32
|
+
|
33
|
+
# Plot
|
34
|
+
plt.figure(figsize=(10, 8))
|
35
|
+
plt.contourf(xx, yy, Z, alpha=0.4)
|
36
|
+
plt.scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
|
37
|
+
plt.show()
|
oikan-0.0.1.1/oikan/model.py
DELETED
@@ -1,28 +0,0 @@
|
|
1
|
-
import torch
|
2
|
-
import torch.nn as nn
|
3
|
-
|
4
|
-
# EfficientKAN Layer
|
5
|
-
class EfficientKAN(nn.Module):
|
6
|
-
def __init__(self, input_dim, hidden_units=10):
|
7
|
-
super(EfficientKAN, self).__init__()
|
8
|
-
self.basis_functions = nn.ModuleList([nn.Linear(1, hidden_units) for _ in range(input_dim)])
|
9
|
-
self.activations = nn.ReLU()
|
10
|
-
|
11
|
-
def forward(self, x):
|
12
|
-
transformed_features = [self.activations(bf(x[:, i].unsqueeze(1))) for i, bf in enumerate(self.basis_functions)]
|
13
|
-
return torch.cat(transformed_features, dim=1)
|
14
|
-
|
15
|
-
# OIKAN Model
|
16
|
-
class OIKAN(nn.Module):
|
17
|
-
def __init__(self, input_dim, output_dim, hidden_units=10):
|
18
|
-
super(OIKAN, self).__init__()
|
19
|
-
self.efficientkan = EfficientKAN(input_dim, hidden_units)
|
20
|
-
self.mlp = nn.Sequential(
|
21
|
-
nn.Linear(input_dim * hidden_units, 32),
|
22
|
-
nn.ReLU(),
|
23
|
-
nn.Linear(32, output_dim)
|
24
|
-
)
|
25
|
-
|
26
|
-
def forward(self, x):
|
27
|
-
transformed_x = self.efficientkan(x)
|
28
|
-
return self.mlp(transformed_x)
|
oikan-0.0.1.1/oikan/symbolic.py
DELETED
@@ -1,36 +0,0 @@
|
|
1
|
-
import torch
|
2
|
-
from sympy import symbols, simplify, Add
|
3
|
-
|
4
|
-
# Regression symbolic extraction
|
5
|
-
def extract_symbolic_formula_regression(model, input_data):
|
6
|
-
symbolic_vars = symbols([f'x{i}' for i in range(input_data.shape[1])])
|
7
|
-
|
8
|
-
with torch.no_grad():
|
9
|
-
weights = model.mlp[0].weight.cpu().numpy()
|
10
|
-
if weights.size == 0:
|
11
|
-
print("Warning: Extracted weights are empty.")
|
12
|
-
return "NaN"
|
13
|
-
|
14
|
-
formula = sum(weights[0, i] * symbolic_vars[i] for i in range(len(symbolic_vars)))
|
15
|
-
return simplify(formula)
|
16
|
-
|
17
|
-
# Classification symbolic extraction
|
18
|
-
def extract_symbolic_formula_classification(model, input_data):
|
19
|
-
"""
|
20
|
-
Extracts a symbolic decision boundary for a two-class classifier.
|
21
|
-
Approximates:
|
22
|
-
decision = (w[0] - w[1]) · x + (b[0] - b[1])
|
23
|
-
where w and b are from the model's final linear layer.
|
24
|
-
"""
|
25
|
-
symbolic_vars = symbols([f'x{i}' for i in range(input_data.shape[1])])
|
26
|
-
with torch.no_grad():
|
27
|
-
final_layer = model.mlp[-1]
|
28
|
-
w = final_layer.weight.cpu().numpy()
|
29
|
-
b = final_layer.bias.cpu().numpy()
|
30
|
-
if w.shape[0] < 2:
|
31
|
-
print("Classification symbolic extraction requires at least 2 classes.")
|
32
|
-
return "NaN"
|
33
|
-
w_diff = w[0] - w[1]
|
34
|
-
b_diff = b[0] - b[1]
|
35
|
-
formula = sum(w_diff[i] * symbolic_vars[i] for i in range(len(symbolic_vars))) + b_diff
|
36
|
-
return simplify(formula)
|
oikan-0.0.1.1/oikan/trainer.py
DELETED
@@ -1,32 +0,0 @@
|
|
1
|
-
import torch.optim as optim
|
2
|
-
import torch.nn as nn
|
3
|
-
|
4
|
-
# Regression training
|
5
|
-
def train(model, train_loader, epochs=100, lr=0.01):
|
6
|
-
criterion = nn.MSELoss()
|
7
|
-
optimizer = optim.LBFGS(model.parameters(), lr=lr)
|
8
|
-
|
9
|
-
def closure():
|
10
|
-
optimizer.zero_grad()
|
11
|
-
outputs = model(train_loader[0])
|
12
|
-
loss = criterion(outputs, train_loader[1])
|
13
|
-
loss.backward()
|
14
|
-
print(f"Loss: {loss.item()}")
|
15
|
-
return loss
|
16
|
-
|
17
|
-
for epoch in range(epochs):
|
18
|
-
optimizer.step(closure)
|
19
|
-
print(f"Epoch {epoch+1}/{epochs}")
|
20
|
-
|
21
|
-
# Classification training
|
22
|
-
def train_classification(model, train_loader, epochs=100, lr=0.01):
|
23
|
-
criterion = nn.CrossEntropyLoss()
|
24
|
-
optimizer = optim.Adam(model.parameters(), lr=lr)
|
25
|
-
|
26
|
-
for epoch in range(epochs):
|
27
|
-
optimizer.zero_grad()
|
28
|
-
outputs = model(train_loader[0])
|
29
|
-
loss = criterion(outputs, train_loader[1])
|
30
|
-
loss.backward()
|
31
|
-
optimizer.step()
|
32
|
-
print(f"Epoch {epoch+1}/{epochs}, Loss: {loss.item()}")
|
oikan-0.0.1.1/oikan/visualize.py
DELETED
@@ -1,20 +0,0 @@
|
|
1
|
-
import matplotlib.pyplot as plt
|
2
|
-
import torch
|
3
|
-
|
4
|
-
# Regression Visualization Function
|
5
|
-
def visualize_regression(model, X, y):
|
6
|
-
with torch.no_grad():
|
7
|
-
y_pred = model(torch.tensor(X, dtype=torch.float32)).numpy()
|
8
|
-
plt.scatter(X[:, 0], y, label='True Data')
|
9
|
-
plt.scatter(X[:, 0], y_pred, label='OIKAN Predictions', color='r')
|
10
|
-
plt.legend()
|
11
|
-
plt.show()
|
12
|
-
|
13
|
-
# Classification visualization
|
14
|
-
def visualize_classification(model, X, y):
|
15
|
-
with torch.no_grad():
|
16
|
-
outputs = model(torch.tensor(X, dtype=torch.float32))
|
17
|
-
preds = torch.argmax(outputs, dim=1).numpy()
|
18
|
-
plt.scatter(X[:, 0], X[:, 1], c=preds, cmap='viridis', edgecolor='k')
|
19
|
-
plt.title("Classification Results")
|
20
|
-
plt.show()
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|