oikan 0.0.1.6__py3-none-any.whl → 0.0.1.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
oikan/metrics.py ADDED
@@ -0,0 +1,23 @@
1
+ import numpy as np
2
+ import torch
3
+
4
+ def evaluate_regression(model, X, y):
5
+ '''Evaluate regression performance by computing MSE, MAE, and RMSE.'''
6
+ with torch.no_grad():
7
+ y_pred = model(torch.FloatTensor(X)).numpy().ravel()
8
+ mse = np.mean((y - y_pred)**2)
9
+ mae = np.mean(np.abs(y - y_pred))
10
+ rmse = np.sqrt(mse)
11
+ print("Mean Squared Error:", mse)
12
+ print("Mean Absolute Error:", mae)
13
+ print("Root Mean Squared Error:", rmse)
14
+ return mse, mae, rmse
15
+
16
+ def evaluate_classification(model, X, y):
17
+ '''Evaluate classification accuracy by comparing model predictions and true labels.'''
18
+ with torch.no_grad():
19
+ logits = model(torch.FloatTensor(X))
20
+ y_pred = torch.argmax(logits, dim=1).numpy()
21
+ accuracy = np.mean(y_pred == y)
22
+ print("Classification Accuracy:", accuracy)
23
+ return accuracy
oikan/model.py CHANGED
@@ -3,15 +3,18 @@ import torch.nn as nn
3
3
  from .utils import BSplineBasis, FourierBasis
4
4
 
5
5
  class AdaptiveBasisLayer(nn.Module):
6
+ '''Layer that applies a linear transformation as part of interpretable modeling.'''
6
7
  def __init__(self, input_dim, hidden_dim):
7
8
  super().__init__()
8
9
  self.weights = nn.Parameter(torch.randn(input_dim, hidden_dim))
9
10
  self.bias = nn.Parameter(torch.zeros(hidden_dim))
10
11
 
11
12
  def forward(self, x):
13
+ # Linear transformation for adaptive basis processing
12
14
  return torch.matmul(x, self.weights) + self.bias
13
15
 
14
16
  class EfficientKAN(nn.Module):
17
+ '''Module computing feature transformations using nonlinear basis functions and interaction terms.'''
15
18
  def __init__(self, input_dim, hidden_units=10, basis_type='bspline'):
16
19
  super().__init__()
17
20
  self.input_dim = input_dim
@@ -19,41 +22,48 @@ class EfficientKAN(nn.Module):
19
22
  self.basis_type = basis_type
20
23
 
21
24
  if basis_type == 'bspline':
25
+ # One BSpline per feature expected with adjusted output dimensions
22
26
  self.basis_functions = nn.ModuleList([BSplineBasis(hidden_units) for _ in range(input_dim)])
23
27
  self.basis_output_dim = input_dim * (hidden_units - 4) # Adjusted for BSpline output
24
28
  elif basis_type == 'fourier':
29
+ # Use Fourier basis transformation for each feature
25
30
  self.basis_functions = nn.ModuleList([FourierBasis(hidden_units//2) for _ in range(input_dim)])
26
31
  self.basis_output_dim = input_dim * hidden_units
27
32
 
28
- # Grid-based interaction layer
33
+ # Interaction layer: captures pairwise feature interactions
29
34
  self.interaction_weights = nn.Parameter(torch.randn(input_dim, input_dim))
30
35
 
31
36
  def forward(self, x):
32
- # Transform each feature using basis functions
37
+ # Transform each feature using its corresponding basis function
33
38
  transformed_features = [bf(x[:, i].unsqueeze(1)) for i, bf in enumerate(self.basis_functions)]
34
39
  basis_output = torch.cat(transformed_features, dim=1)
35
40
 
36
- # Compute feature interactions - fixed matrix multiplication
41
+ # Compute interaction features via fixed matrix multiplication
37
42
  batch_size = x.size(0)
38
- x_reshaped = x.view(batch_size, self.input_dim, 1) # [batch_size, input_dim, 1]
39
- interaction_matrix = torch.sigmoid(self.interaction_weights) # [input_dim, input_dim]
43
+ x_reshaped = x.view(batch_size, self.input_dim, 1) # Reshape to [batch_size, input_dim, 1]
44
+ interaction_matrix = torch.sigmoid(self.interaction_weights) # Normalize interaction weights
40
45
  interaction_features = torch.bmm(x_reshaped.transpose(1, 2),
41
- x_reshaped * interaction_matrix.unsqueeze(0)) # [batch_size, 1, 1]
42
- interaction_features = interaction_features.view(batch_size, -1) # [batch_size, 1]
46
+ x_reshaped * interaction_matrix.unsqueeze(0)) # Result: [batch_size, 1, 1]
47
+ interaction_features = interaction_features.view(batch_size, -1) # Flatten interaction output
43
48
 
44
49
  return torch.cat([basis_output, interaction_features], dim=1)
45
50
 
46
51
  def get_output_dim(self):
52
+ # Output dimension includes both basis and interaction features
47
53
  return self.basis_output_dim + self.input_dim
48
54
 
49
55
  class OIKAN(nn.Module):
50
- def __init__(self, input_dim, output_dim, hidden_units=10):
56
+ '''Main OIKAN model combining nonlinear transformations, SVD-projection, and interpretable layers.'''
57
+ def __init__(self, input_dim, output_dim, hidden_units=10, reduced_dim=32):
51
58
  super().__init__()
52
59
  self.efficientkan = EfficientKAN(input_dim, hidden_units)
53
-
54
- # Get actual feature dimension after transformation
55
60
  feature_dim = self.efficientkan.get_output_dim()
56
61
 
62
+ # Apply SVD projection to compress high-dimensional features
63
+ self.svd_projection = nn.Linear(feature_dim, reduced_dim, bias=False)
64
+ feature_dim = reduced_dim # Update feature dimension after projection
65
+
66
+ # Interpretable layers for final mapping
57
67
  self.interpretable_layers = nn.Sequential(
58
68
  AdaptiveBasisLayer(feature_dim, 32),
59
69
  nn.ReLU(),
@@ -62,4 +72,6 @@ class OIKAN(nn.Module):
62
72
 
63
73
  def forward(self, x):
64
74
  transformed_x = self.efficientkan(x)
75
+ # Compress features prior to final prediction layer
76
+ transformed_x = self.svd_projection(transformed_x)
65
77
  return self.interpretable_layers(transformed_x)
oikan/regularization.py CHANGED
@@ -3,21 +3,21 @@ import torch.nn as nn
3
3
 
4
4
  class RegularizedLoss:
5
5
  def __init__(self, base_criterion, model, l1_lambda=0.01, gradient_lambda=0.01):
6
- self.base_criterion = base_criterion
6
+ self.base_criterion = base_criterion # Primary loss (e.g. MSE, CrossEntropy)
7
7
  self.model = model
8
8
  self.l1_lambda = l1_lambda
9
9
  self.gradient_lambda = gradient_lambda
10
10
 
11
11
  def __call__(self, pred, target, inputs):
12
+ # Compute the standard loss
12
13
  base_loss = self.base_criterion(pred, target)
13
14
 
14
- # L1 regularization
15
+ # Calculate L1 regularization to promote sparsity
15
16
  l1_loss = 0
16
17
  for param in self.model.parameters():
17
18
  l1_loss += torch.norm(param, p=1)
18
19
 
19
- # Gradient penalty
20
- grad_penalty = 0
20
+ # Compute gradient penalty to enforce smoothness
21
21
  inputs.requires_grad_(True)
22
22
  outputs = self.model(inputs)
23
23
  gradients = torch.autograd.grad(
oikan/symbolic.py CHANGED
@@ -17,9 +17,9 @@ ADVANCED_LIB = {
17
17
  'abs': lambda x: np.abs(x)
18
18
  }
19
19
 
20
- # STEP-1: Helper functions
20
+ # Helper functions
21
21
  def get_model_predictions(model, X, mode):
22
- """Compute model predictions and return target values (and raw preds for classification)."""
22
+ """Obtain model predictions; returns flattened predictions for regression, raw outputs for classification."""
23
23
  X_tensor = torch.FloatTensor(X)
24
24
  with torch.no_grad():
25
25
  preds = model(X_tensor)
@@ -27,16 +27,17 @@ def get_model_predictions(model, X, mode):
27
27
  return preds.detach().cpu().numpy().flatten(), None
28
28
  elif mode == 'classification':
29
29
  out = preds.detach().cpu().numpy()
30
+ # In classification, compute a target difference or fallback to flattening
30
31
  target = (out[:, 0] - out[:, 1]).flatten() if (out.ndim > 1 and out.shape[1] > 1) else out.flatten()
31
32
  return target, out
32
33
  else:
33
34
  raise ValueError("Unknown mode")
34
35
 
35
36
  def build_design_matrix(X, return_names=False):
36
- """Build the design matrix using the advanced nonlinear bases."""
37
+ """Construct the design matrix from advanced nonlinear bases with optional feature names."""
37
38
  X_np = np.array(X)
38
39
  n_samples, d = X_np.shape
39
- F_parts = [np.ones((n_samples, 1))]
40
+ F_parts = [np.ones((n_samples, 1))] # Bias term
40
41
  names = ['1'] if return_names else None
41
42
  for j in range(d):
42
43
  xj = X_np[:, j:j+1]
@@ -46,23 +47,20 @@ def build_design_matrix(X, return_names=False):
46
47
  names.append(f"{key}(x{j+1})")
47
48
  return (np.hstack(F_parts), names) if return_names else np.hstack(F_parts)
48
49
 
49
- # STEP-2: Main functions using helpers
50
+ # Main functions using helpers
50
51
  def extract_symbolic_formula(model, X, mode='regression'):
51
52
  """
52
- Approximate a symbolic formula from the model using advanced nonlinear bases.
53
+ Approximate a symbolic formula that represents model behavior using nonlinear bases.
53
54
  """
54
- n_samples = np.array(X).shape[0]
55
55
  y_target, _ = get_model_predictions(model, X, mode)
56
56
  F, func_names = build_design_matrix(X, return_names=True)
57
57
  beta, _, _, _ = np.linalg.lstsq(F, y_target, rcond=None)
58
+ # Only include terms with significant coefficients
58
59
  terms = [f"({c:.2f}*{name})" for c, name in zip(beta, func_names) if abs(c) > 1e-4]
59
60
  return " + ".join(terms)
60
61
 
61
62
  def test_symbolic_formula(model, X, mode='regression'):
62
- """
63
- Evaluate the extracted symbolic formula against model outputs.
64
- """
65
- n_samples = np.array(X).shape[0]
63
+ """Evaluate the symbolic approximation against the model by computing error metrics."""
66
64
  y_target, out = get_model_predictions(model, X, mode)
67
65
  F = build_design_matrix(X, return_names=False)
68
66
  beta, _, _, _ = np.linalg.lstsq(F, y_target, rcond=None)
@@ -83,19 +81,19 @@ def test_symbolic_formula(model, X, mode='regression'):
83
81
  return accuracy
84
82
 
85
83
  def plot_symbolic_formula(model, X, mode='regression'):
86
- """
87
- Plot a graph representation of the extracted symbolic formula.
88
- """
84
+ """Plot a graph representation of the extracted symbolic formula."""
89
85
  formula = extract_symbolic_formula(model, X, mode)
90
86
  G = nx.DiGraph()
91
87
  G.add_node("Output")
92
88
  terms = formula.split(" + ")
89
+ # Add nodes for each term with coefficient information
93
90
  for term in terms:
94
91
  expr = term.strip("()")
95
92
  coeff_str, basis = expr.split("*", 1) if "*" in expr else (expr, "unknown")
96
93
  node_label = f"{basis}\n({float(coeff_str):.2f})"
97
94
  G.add_node(node_label)
98
95
  G.add_edge(node_label, "Output", weight=float(coeff_str))
96
+ # Position nodes for visualization
99
97
  left_nodes = [n for n in G.nodes() if n != "Output"]
100
98
  pos = {}
101
99
  n_left = len(left_nodes)
@@ -112,9 +110,7 @@ def plot_symbolic_formula(model, X, mode='regression'):
112
110
  plt.show()
113
111
 
114
112
  def extract_latex_formula(model, X, mode='regression'):
115
- """
116
- Return the extracted symbolic formula as LaTeX code.
117
- """
113
+ """Return the extracted symbolic formula formatted as LaTeX code."""
118
114
  formula = extract_symbolic_formula(model, X, mode)
119
115
  terms = formula.split(" + ")
120
116
  latex_terms = []
@@ -122,6 +118,7 @@ def extract_latex_formula(model, X, mode='regression'):
122
118
  expr = term.strip("()")
123
119
  coeff_str, basis = expr.split("*", 1) if "*" in expr else (expr, "")
124
120
  coeff = float(coeff_str)
121
+ # Balance parentheses if required
125
122
  missing = basis.count("(") - basis.count(")")
126
123
  if missing > 0:
127
124
  basis = basis + ")" * missing
oikan/trainer.py CHANGED
@@ -3,6 +3,7 @@ import torch.nn as nn
3
3
  from .regularization import RegularizedLoss
4
4
 
5
5
  def train(model, train_data, epochs=100, lr=0.01):
6
+ '''Train regression model using MSE loss with regularization.'''
6
7
  X_train, y_train = train_data
7
8
  optimizer = torch.optim.Adam(model.parameters(), lr=lr)
8
9
  criterion = nn.MSELoss()
@@ -10,16 +11,18 @@ def train(model, train_data, epochs=100, lr=0.01):
10
11
 
11
12
  model.train()
12
13
  for epoch in range(epochs):
13
- optimizer.zero_grad()
14
+ optimizer.zero_grad() # Reset gradients
14
15
  outputs = model(X_train)
16
+ # Compute loss including regularization penalties
15
17
  loss = reg_loss(outputs, y_train, X_train)
16
- loss.backward()
17
- optimizer.step()
18
+ loss.backward() # Backpropagate errors
19
+ optimizer.step() # Update parameters
18
20
 
19
21
  if (epoch + 1) % 10 == 0:
20
22
  print(f'Epoch [{epoch+1}/{epochs}], Loss: {loss.item():.4f}')
21
23
 
22
24
  def train_classification(model, train_data, epochs=100, lr=0.01):
25
+ '''Train classification model using CrossEntropy loss with regularization.'''
23
26
  X_train, y_train = train_data
24
27
  optimizer = torch.optim.Adam(model.parameters(), lr=lr)
25
28
  criterion = nn.CrossEntropyLoss()
@@ -27,11 +30,12 @@ def train_classification(model, train_data, epochs=100, lr=0.01):
27
30
 
28
31
  model.train()
29
32
  for epoch in range(epochs):
30
- optimizer.zero_grad()
33
+ optimizer.zero_grad() # Reset gradients each epoch
31
34
  outputs = model(X_train)
35
+ # Loss includes both cross-entropy and regularization terms
32
36
  loss = reg_loss(outputs, y_train, X_train)
33
- loss.backward()
34
- optimizer.step()
37
+ loss.backward() # Backpropagation
38
+ optimizer.step() # Parameter update
35
39
 
36
40
  if (epoch + 1) % 10 == 0:
37
41
  print(f'Epoch [{epoch+1}/{epochs}], Loss: {loss.item():.4f}')
oikan/utils.py CHANGED
@@ -4,40 +4,41 @@ import numpy as np
4
4
  from scipy.interpolate import BSpline
5
5
 
6
6
  class BSplineBasis(nn.Module):
7
+ '''Module to compute B-Spline basis values for input features.'''
7
8
  def __init__(self, num_knots=10, degree=3):
8
9
  super().__init__()
9
- self.num_knots = max(num_knots, degree + 5) # Ensure minimum number of knots
10
+ # Ensure ample knots relative to degree
11
+ self.num_knots = max(num_knots, degree + 5)
10
12
  self.degree = degree
11
-
12
- # Create knot vector with proper padding
13
+ # Create inner knots uniformly in [0,1]
13
14
  inner_knots = np.linspace(0, 1, self.num_knots - 2 * degree)
14
15
  left_pad = np.zeros(degree)
15
16
  right_pad = np.ones(degree)
16
17
  knots = np.concatenate([left_pad, inner_knots, right_pad])
17
-
18
18
  self.register_buffer('knots', torch.FloatTensor(knots))
19
19
 
20
20
  def forward(self, x):
21
+ # Convert tensor to numpy for BSpline evaluation
21
22
  x_np = x.detach().cpu().numpy()
22
23
  basis_values = np.zeros((x_np.shape[0], self.num_knots - self.degree - 1))
23
-
24
- # Normalize input to [0,1] range
25
- x_normalized = (x_np - x_np.min()) / (x_np.max() - x_np.min() + 1e-8)
26
-
24
+ # Normalize input for stable spline evaluation
25
+ x_min, x_max = x_np.min(), x_np.max()
26
+ x_normalized = (x_np - x_min) / (x_max - x_min + 1e-8)
27
27
  for i in range(self.num_knots - self.degree - 1):
28
+ # Create BSpline basis function for a subset of knots
28
29
  spl = BSpline.basis_element(self.knots[i:i+self.degree+2])
29
30
  basis_values[:, i] = spl(x_normalized.squeeze())
30
-
31
- # Replace NaN values with 0
32
31
  basis_values = np.nan_to_num(basis_values, 0)
33
32
  return torch.FloatTensor(basis_values).to(x.device)
34
33
 
35
34
  class FourierBasis(nn.Module):
35
+ '''Module to compute Fourier basis representations for input features.'''
36
36
  def __init__(self, num_frequencies=5):
37
37
  super().__init__()
38
38
  self.num_frequencies = num_frequencies
39
39
 
40
40
  def forward(self, x):
41
- frequencies = torch.arange(1, self.num_frequencies + 1, device=x.device).float()
41
+ # Create a range of frequencies and compute sine and cosine transforms.
42
+ frequencies = torch.arange(1, self.num_frequencies + 1, device=x.device, dtype=torch.float)
42
43
  x_expanded = x * frequencies.view(1, -1) * 2 * np.pi
43
44
  return torch.cat([torch.sin(x_expanded), torch.cos(x_expanded)], dim=1)
oikan/visualize.py CHANGED
@@ -3,67 +3,37 @@ import matplotlib.pyplot as plt
3
3
  import torch
4
4
 
5
5
  def visualize_regression(model, X, y):
6
+ '''Visualize regression results via a scatter plot, comparing true vs predicted values.'''
6
7
  model.eval()
7
8
  with torch.no_grad():
8
- X_tensor = torch.FloatTensor(X)
9
- y_pred = model(X_tensor).numpy()
10
-
9
+ y_pred = model(torch.FloatTensor(X)).numpy()
11
10
  plt.figure(figsize=(10, 6))
11
+ # Plot true values vs predictions
12
12
  plt.scatter(X[:, 0], y, color='blue', label='True')
13
13
  plt.scatter(X[:, 0], y_pred, color='red', label='Predicted')
14
14
  plt.legend()
15
+ plt.title("Regression: True vs Predicted")
16
+ plt.xlabel("Feature 1")
17
+ plt.ylabel("Output")
15
18
  plt.show()
16
19
 
17
20
  def visualize_classification(model, X, y):
21
+ '''Visualize classification decision boundaries for 2D input data.'''
18
22
  model.eval()
19
-
20
- if X.shape[1] > 2:
21
- # SVD projection for high-dimensional inputs.
22
- X_mean = np.mean(X, axis=0)
23
- X_centered = X - X_mean
24
- _, _, Vt = np.linalg.svd(X_centered, full_matrices=False)
25
- principal = Vt[:2] # shape: (2, D)
26
- X_proj = (X - X_mean) @ principal.T
27
-
28
- x1, x2 = X_proj[:, 0], X_proj[:, 1]
29
- x_min, x_max = x1.min() - 1, x1.max() + 1
30
- y_min, y_max = x2.min() - 1, x2.max() + 1
31
- xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),
32
- np.linspace(y_min, y_max, 100))
33
- grid_2d = np.c_[xx.ravel(), yy.ravel()]
34
- # Inverse transform grid points to original space.
35
- X_grid = X_mean + grid_2d @ principal
36
-
37
- with torch.no_grad():
38
- X_grid_tensor = torch.FloatTensor(X_grid)
39
- Z = model(X_grid_tensor)
40
- Z = torch.argmax(Z, dim=1).numpy()
41
- Z = Z.reshape(xx.shape)
42
-
43
- plt.figure(figsize=(10, 8))
44
- plt.contourf(xx, yy, Z, alpha=0.4)
45
- plt.scatter(X_proj[:, 0], X_proj[:, 1], c=y, alpha=0.8)
46
- plt.title("Classification Visualization (SVD Projection)")
47
- plt.show()
48
-
49
- else:
50
- x1 = X[:, 0]
51
- x2 = X[:, 1]
52
- x_min, x_max = x1.min() - 1, x1.max() + 1
53
- y_min, y_max = x2.min() - 1, x2.max() + 1
54
- xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),
55
- np.linspace(y_min, y_max, 100))
56
- grid_2d = np.c_[xx.ravel(), yy.ravel()]
57
- X_grid = grid_2d
58
-
59
- with torch.no_grad():
60
- X_grid_tensor = torch.FloatTensor(X_grid)
61
- Z = model(X_grid_tensor)
62
- Z = torch.argmax(Z, dim=1).numpy()
63
- Z = Z.reshape(xx.shape)
64
-
65
- plt.figure(figsize=(10, 8))
66
- plt.contourf(xx, yy, Z, alpha=0.4)
67
- plt.scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
68
- plt.title("Classification Visualization")
69
- plt.show()
23
+ x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
24
+ y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
25
+ xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),
26
+ np.linspace(y_min, y_max, 100))
27
+ grid_2d = np.c_[xx.ravel(), yy.ravel()]
28
+ with torch.no_grad():
29
+ # Compute prediction for each point in the grid
30
+ Z = model(torch.FloatTensor(grid_2d))
31
+ Z = torch.argmax(Z, dim=1).numpy().reshape(xx.shape)
32
+ plt.figure(figsize=(10, 8))
33
+ # Draw decision boundaries and scatter the data
34
+ plt.contourf(xx, yy, Z, alpha=0.4)
35
+ plt.scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
36
+ plt.title("Classification Visualization")
37
+ plt.xlabel("Feature 1")
38
+ plt.ylabel("Feature 2")
39
+ plt.show()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: oikan
3
- Version: 0.0.1.6
3
+ Version: 0.0.1.8
4
4
  Summary: OIKAN: Optimized Interpretable Kolmogorov-Arnold Networks
5
5
  Author: Arman Zhalgasbayev
6
6
  License: MIT
@@ -23,17 +23,26 @@ A deep learning framework for interpretable neural networks using advanced basis
23
23
 
24
24
  [![PyPI version](https://badge.fury.io/py/oikan.svg)](https://badge.fury.io/py/oikan)
25
25
  [![PyPI downloads](https://img.shields.io/pypi/dm/oikan.svg)](https://pypistats.org/packages/oikan)
26
+ [![License](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
27
+ [![GitHub issues](https://img.shields.io/github/issues/silvermete0r/OIKAN.svg)](https://github.com/silvermete0r/oikan/issues)
28
+ [![Docs](https://img.shields.io/badge/docs-passing-brightgreen)](https://silvermete0r.github.io/oikan/)
26
29
 
27
30
  ## Key Features
28
- - EfficientKAN layer implementation
29
- - Built-in visualization tools
30
- - Support for both regression and classification tasks
31
- - Symbolic formula extraction
32
- - Easy-to-use training interface
33
- - LaTeX-formatted formula extraction
31
+ - 🚀 Efficient Implementation ~ Optimized KAN architecture with SVD projection
32
+ - 📊 Advanced Basis Functions ~ B-spline and Fourier basis transformations
33
+ - 🎯 Multi-Task Support ~ Both regression and classification capabilities
34
+ - 🔍 Interpretability Tools ~ Extract and visualize symbolic formulas
35
+ - 📈 Interactive Visualizations ~ Built-in plotting and analysis tools
36
+ - 🧮 Symbolic Mathematics ~ LaTeX formula extraction and symbolic approximations
34
37
 
35
38
  ## Installation
36
39
 
40
+ ### Method 1: Via PyPI (Recommended)
41
+ ```bash
42
+ pip install oikan
43
+ ```
44
+
45
+ ### Method 2: Local Development
37
46
  ```bash
38
47
  git clone https://github.com/silvermete0r/OIKAN.git
39
48
  cd OIKAN
@@ -46,7 +55,8 @@ pip install -e . # Install in development mode
46
55
  ```python
47
56
  from oikan.model import OIKAN
48
57
  from oikan.trainer import train
49
- from oikan.symbolic import extract_symbolic_formula
58
+ from oikan.visualize import visualize_regression
59
+ from oikan.symbolic import extract_symbolic_formula, plot_symbolic_formula, extract_latex_formula
50
60
 
51
61
  model = OIKAN(input_dim=2, output_dim=1)
52
62
  train(model, (X_train, y_train))
@@ -0,0 +1,13 @@
1
+ oikan/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ oikan/metrics.py,sha256=65txPbjhTz7lCXCLtTAJTS4E5Hx42wzZ3jKar3lH_bY,860
3
+ oikan/model.py,sha256=zlw_4HbSK3IiQhE8M4NitvrXa7vffBWsOR-HLRSJADA,3944
4
+ oikan/regularization.py,sha256=xt8JNnPdHRAQgzF_vnyme005hWLunz9Vo2qw6m08NMM,1145
5
+ oikan/symbolic.py,sha256=RRYHOCOCJr5KXRhdcCPvT_OqyNcCnWCWt7fOtos8rRI,5765
6
+ oikan/trainer.py,sha256=S-23uwmQ3Kx1FnE-dKd76zTZjvaV0VUZoChUsNzjcwk,1672
7
+ oikan/utils.py,sha256=xbVgrbhXYj57RdD3uNPchjyfmP6Kur7tngoZPa3qWOw,2094
8
+ oikan/visualize.py,sha256=VpIzWpwoZihQ0gPSRjsEuKSxqHf1SiKxLynOzZ4P6HE,1539
9
+ oikan-0.0.1.8.dist-info/LICENSE,sha256=75ASVmU-XIpN-M4LbVmJ_ibgbzbvRLVti8FhnR0BTf8,1096
10
+ oikan-0.0.1.8.dist-info/METADATA,sha256=aDbshPny4TxIE_tZt0I8I_T_9tRwkomC6zC1BHT4knw,3738
11
+ oikan-0.0.1.8.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
12
+ oikan-0.0.1.8.dist-info/top_level.txt,sha256=XwnwKwTJddZwIvtrUsAz-l-58BJRj6HjAGWrfYi_3QY,6
13
+ oikan-0.0.1.8.dist-info/RECORD,,
@@ -1,12 +0,0 @@
1
- oikan/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- oikan/model.py,sha256=9_U3jh1YwASbLOgHpFm4F80J3QGEhzIgQHNkqbZCPJs,2920
3
- oikan/regularization.py,sha256=D0Xc2lr5X5ORdA5ltvWDbNDuN8z0hkyoGzFo7pum2XE,1033
4
- oikan/symbolic.py,sha256=K1aI5JEPgKFu8dyjXxWDA-UZm8Gvfp0lU1M7c2NAPLY,5517
5
- oikan/trainer.py,sha256=itFCHSR_T6KHqa0D5RLRCmqFHa4lUIamsFGWKHmUZuI,1258
6
- oikan/utils.py,sha256=XwY6pgAgfYlUI9SOjdop91wh0_t6LfPLCiHretlw2Wg,1754
7
- oikan/visualize.py,sha256=7GTvfeFxrezHKMM9QmYaax75rMnTxMH0KYi_fbAw4-M,2501
8
- oikan-0.0.1.6.dist-info/LICENSE,sha256=75ASVmU-XIpN-M4LbVmJ_ibgbzbvRLVti8FhnR0BTf8,1096
9
- oikan-0.0.1.6.dist-info/METADATA,sha256=GOWOqXAAezQYOHMgxFn8sI2wlJotgesFA_8Yaiu2XS8,2962
10
- oikan-0.0.1.6.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
11
- oikan-0.0.1.6.dist-info/top_level.txt,sha256=XwnwKwTJddZwIvtrUsAz-l-58BJRj6HjAGWrfYi_3QY,6
12
- oikan-0.0.1.6.dist-info/RECORD,,