oikan 0.0.2.2__py3-none-any.whl → 0.0.2.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- oikan/model.py +49 -27
- oikan/utils.py +3 -9
- {oikan-0.0.2.2.dist-info → oikan-0.0.2.4.dist-info}/METADATA +77 -86
- oikan-0.0.2.4.dist-info/RECORD +10 -0
- oikan-0.0.2.2.dist-info/RECORD +0 -10
- {oikan-0.0.2.2.dist-info → oikan-0.0.2.4.dist-info}/WHEEL +0 -0
- {oikan-0.0.2.2.dist-info → oikan-0.0.2.4.dist-info}/licenses/LICENSE +0 -0
- {oikan-0.0.2.2.dist-info → oikan-0.0.2.4.dist-info}/top_level.txt +0 -0
oikan/model.py
CHANGED
@@ -30,7 +30,10 @@ class KANLayer(nn.Module):
|
|
30
30
|
for _ in range(input_dim)
|
31
31
|
])
|
32
32
|
|
33
|
-
|
33
|
+
# Updated initialization using Xavier uniform initialization
|
34
|
+
self.combination_weights = nn.Parameter(
|
35
|
+
nn.init.xavier_uniform_(torch.empty(input_dim, output_dim))
|
36
|
+
)
|
34
37
|
|
35
38
|
def forward(self, x):
|
36
39
|
x_split = x.split(1, dim=1) # list of (batch, 1) tensors for each input feature
|
@@ -49,7 +52,8 @@ class KANLayer(nn.Module):
|
|
49
52
|
for i in range(self.input_dim):
|
50
53
|
weight = self.combination_weights[i, j].item()
|
51
54
|
if abs(weight) > 1e-4:
|
52
|
-
|
55
|
+
# Pass lower threshold for improved precision
|
56
|
+
edge_formula = self.edges[i][j].get_symbolic_repr(threshold=1e-6)
|
53
57
|
if edge_formula != "0":
|
54
58
|
terms.append(f"({weight:.4f} * ({edge_formula}))")
|
55
59
|
formulas.append(" + ".join(terms) if terms else "0")
|
@@ -57,15 +61,13 @@ class KANLayer(nn.Module):
|
|
57
61
|
|
58
62
|
class BaseOIKAN(BaseEstimator):
|
59
63
|
"""Base OIKAN model implementing common functionality"""
|
60
|
-
def __init__(self, hidden_dims=[
|
64
|
+
def __init__(self, hidden_dims=[32, 16], dropout=0.1):
|
61
65
|
self.hidden_dims = hidden_dims
|
62
|
-
self.num_basis = num_basis
|
63
|
-
self.degree = degree
|
64
66
|
self.dropout = dropout # Dropout probability for uncertainty quantification
|
65
67
|
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # Auto device chooser
|
66
68
|
self.model = None
|
67
69
|
self._is_fitted = False
|
68
|
-
self.__name = "OIKAN v0.0.2"
|
70
|
+
self.__name = "OIKAN v0.0.2" # Manual configured version
|
69
71
|
self.loss_history = [] # <-- new attribute to store loss values
|
70
72
|
|
71
73
|
def _build_network(self, input_dim, output_dim):
|
@@ -73,7 +75,9 @@ class BaseOIKAN(BaseEstimator):
|
|
73
75
|
prev_dim = input_dim
|
74
76
|
for hidden_dim in self.hidden_dims:
|
75
77
|
layers.append(KANLayer(prev_dim, hidden_dim))
|
76
|
-
layers.append(nn.
|
78
|
+
layers.append(nn.BatchNorm1d(hidden_dim)) # Added batch normalization
|
79
|
+
layers.append(nn.ReLU()) # Added activation function
|
80
|
+
layers.append(nn.Dropout(self.dropout)) # Apply dropout for uncertainty quantification
|
77
81
|
prev_dim = hidden_dim
|
78
82
|
layers.append(KANLayer(prev_dim, output_dim))
|
79
83
|
return nn.Sequential(*layers).to(self.device)
|
@@ -85,6 +89,25 @@ class BaseOIKAN(BaseEstimator):
|
|
85
89
|
y = torch.FloatTensor(y)
|
86
90
|
return X.to(self.device), (y.to(self.device) if y is not None else None)
|
87
91
|
|
92
|
+
def _process_edge_formula(self, edge_formula, weight):
|
93
|
+
"""Helper to scale symbolic formula terms by a given weight"""
|
94
|
+
terms = []
|
95
|
+
for term in edge_formula.split(" + "):
|
96
|
+
if term and term != "0":
|
97
|
+
if "*" in term:
|
98
|
+
coef_str, rest = term.split("*", 1)
|
99
|
+
try:
|
100
|
+
coef = float(coef_str)
|
101
|
+
terms.append(f"{(coef * weight):.4f}*{rest}")
|
102
|
+
except Exception:
|
103
|
+
terms.append(term) # fallback
|
104
|
+
else:
|
105
|
+
try:
|
106
|
+
terms.append(f"{(float(term) * weight):.4f}")
|
107
|
+
except Exception:
|
108
|
+
terms.append(term)
|
109
|
+
return " + ".join(terms) if terms else "0"
|
110
|
+
|
88
111
|
def get_symbolic_formula(self):
|
89
112
|
"""Generate and cache symbolic formulas for production‐ready inference."""
|
90
113
|
if not self._is_fitted:
|
@@ -100,17 +123,9 @@ class BaseOIKAN(BaseEstimator):
|
|
100
123
|
for j in range(n_classes):
|
101
124
|
weight = first_layer.combination_weights[i, j].item()
|
102
125
|
if abs(weight) > 1e-4:
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
if term and term != "0":
|
107
|
-
if "*" in term:
|
108
|
-
coef, rest = term.split("*", 1)
|
109
|
-
coef = float(coef) * weight
|
110
|
-
terms.append(f"{coef:.4f}*{rest}")
|
111
|
-
else:
|
112
|
-
terms.append(f"{float(term)*weight:.4f}")
|
113
|
-
formulas[i][j] = " + ".join(terms) if terms else "0"
|
126
|
+
# Use improved threshold for formula extraction
|
127
|
+
edge_formula = first_layer.edges[i][j].get_symbolic_repr(threshold=1e-6)
|
128
|
+
formulas[i][j] = self._process_edge_formula(edge_formula, weight)
|
114
129
|
else:
|
115
130
|
formulas[i][j] = "0"
|
116
131
|
self.symbolic_formula = formulas
|
@@ -119,8 +134,9 @@ class BaseOIKAN(BaseEstimator):
|
|
119
134
|
formulas = []
|
120
135
|
first_layer = self.model[0]
|
121
136
|
for i in range(first_layer.input_dim):
|
122
|
-
formula
|
123
|
-
|
137
|
+
# Use improved threshold for formula extraction in regressor branch
|
138
|
+
edge_formula = first_layer.edges[i][0].get_symbolic_repr(threshold=1e-6)
|
139
|
+
formulas.append(self._process_edge_formula(edge_formula, 1.0))
|
124
140
|
self.symbolic_formula = formulas
|
125
141
|
return formulas
|
126
142
|
|
@@ -131,7 +147,7 @@ class BaseOIKAN(BaseEstimator):
|
|
131
147
|
- A header with the version and timestamp
|
132
148
|
- The symbolic formulas for each feature (and class for classification)
|
133
149
|
- A general formula, including softmax for classification
|
134
|
-
- Recommendations
|
150
|
+
- Recommendations and performance results.
|
135
151
|
"""
|
136
152
|
header = f"Generated by {self.__name} | Timestamp: {dt.now()}\n\n"
|
137
153
|
header += "Symbolic Formulas:\n"
|
@@ -157,8 +173,14 @@ class BaseOIKAN(BaseEstimator):
|
|
157
173
|
recs = ("\nRecommendations:\n"
|
158
174
|
"• Consider the symbolic formula for lightweight and interpretable inference.\n"
|
159
175
|
"• Validate approximation accuracy against the neural model.\n")
|
176
|
+
|
177
|
+
# Disclaimer regarding experimental usage
|
178
|
+
disclaimer = ("\nDisclaimer:\n"
|
179
|
+
"This experimental model is intended for research purposes only and is not production-ready. "
|
180
|
+
"Feel free to fork and build your own project based on this research: "
|
181
|
+
"https://github.com/silvermete0r/oikan\n")
|
160
182
|
|
161
|
-
output = header + formulas_text + general + recs
|
183
|
+
output = header + formulas_text + general + recs + disclaimer
|
162
184
|
with open(filename, "w") as f:
|
163
185
|
f.write(output)
|
164
186
|
print(f"Symbolic formulas saved to {filename}")
|
@@ -263,7 +285,7 @@ class BaseOIKAN(BaseEstimator):
|
|
263
285
|
|
264
286
|
class OIKANRegressor(BaseOIKAN, RegressorMixin):
|
265
287
|
"""OIKAN implementation for regression tasks"""
|
266
|
-
def fit(self, X, y, epochs=100, lr=0.01,
|
288
|
+
def fit(self, X, y, epochs=100, lr=0.01, verbose=True):
|
267
289
|
X, y = self._validate_data(X, y)
|
268
290
|
if len(y.shape) == 1:
|
269
291
|
y = y.reshape(-1, 1)
|
@@ -284,7 +306,7 @@ class OIKANRegressor(BaseOIKAN, RegressorMixin):
|
|
284
306
|
if torch.isnan(loss):
|
285
307
|
print("Warning: NaN loss detected, reinitializing model...")
|
286
308
|
self.model = None
|
287
|
-
return self.fit(X, y, epochs, lr/10,
|
309
|
+
return self.fit(X, y, epochs, lr/10, verbose)
|
288
310
|
|
289
311
|
loss.backward()
|
290
312
|
|
@@ -312,7 +334,7 @@ class OIKANRegressor(BaseOIKAN, RegressorMixin):
|
|
312
334
|
|
313
335
|
class OIKANClassifier(BaseOIKAN, ClassifierMixin):
|
314
336
|
"""OIKAN implementation for classification tasks"""
|
315
|
-
def fit(self, X, y, epochs=100, lr=0.01,
|
337
|
+
def fit(self, X, y, epochs=100, lr=0.01, verbose=True):
|
316
338
|
X, y = self._validate_data(X, y)
|
317
339
|
self.classes_ = torch.unique(y)
|
318
340
|
n_classes = len(self.classes_)
|
@@ -414,8 +436,8 @@ class OIKANClassifier(BaseOIKAN, ClassifierMixin):
|
|
414
436
|
weight = first_layer.combination_weights[i, j].item()
|
415
437
|
|
416
438
|
if abs(weight) > 1e-4:
|
417
|
-
#
|
418
|
-
edge_formula = edge.get_symbolic_repr()
|
439
|
+
# Improved precision by using a lower threshold
|
440
|
+
edge_formula = edge.get_symbolic_repr(threshold=1e-6)
|
419
441
|
terms = []
|
420
442
|
for term in edge_formula.split(" + "):
|
421
443
|
if term and term != "0":
|
oikan/utils.py
CHANGED
@@ -3,17 +3,11 @@ import torch
|
|
3
3
|
import torch.nn as nn
|
4
4
|
import numpy as np
|
5
5
|
|
6
|
-
# Core basis functions with explicit variable notation
|
7
6
|
ADVANCED_LIB = {
|
8
7
|
'x': ('x', lambda x: x),
|
9
|
-
'x^2': ('x^2', lambda x:
|
10
|
-
'
|
11
|
-
'
|
12
|
-
'log': ('log(x)', lambda x: np.log(np.abs(x) + 1)),
|
13
|
-
'sqrt': ('sqrt(x)', lambda x: np.sqrt(np.abs(x))),
|
14
|
-
'tanh': ('tanh(x)', lambda x: np.tanh(x)),
|
15
|
-
'sin': ('sin(x)', lambda x: np.sin(np.clip(x, -10*np.pi, 10*np.pi))),
|
16
|
-
'abs': ('abs(x)', lambda x: np.abs(x))
|
8
|
+
'x^2': ('x^2', lambda x: x**2),
|
9
|
+
'sin': ('sin(x)', lambda x: np.sin(x)),
|
10
|
+
'tanh': ('tanh(x)', lambda x: np.tanh(x))
|
17
11
|
}
|
18
12
|
|
19
13
|
class EdgeActivation(nn.Module):
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: oikan
|
3
|
-
Version: 0.0.2.
|
3
|
+
Version: 0.0.2.4
|
4
4
|
Summary: OIKAN: Optimized Interpretable Kolmogorov-Arnold Networks
|
5
5
|
Author: Arman Zhalgasbayev
|
6
6
|
License: MIT
|
@@ -17,7 +17,7 @@ Dynamic: license-file
|
|
17
17
|
|
18
18
|
<!-- logo in the center -->
|
19
19
|
<div align="center">
|
20
|
-
<img src="docs/media/oikan_logo.png" alt="OIKAN Logo" width="200"/>
|
20
|
+
<img src="https://raw.githubusercontent.com/silvermete0r/oikan/main/docs/media/oikan_logo.png" alt="OIKAN Logo" width="200"/>
|
21
21
|
|
22
22
|
<h1>OIKAN: Optimized Interpretable Kolmogorov-Arnold Networks</h1>
|
23
23
|
</div>
|
@@ -32,20 +32,39 @@ OIKAN (Optimized Interpretable Kolmogorov-Arnold Networks) is a neuro-symbolic M
|
|
32
32
|
[](https://github.com/silvermete0r/oikan/issues)
|
33
33
|
[](https://silvermete0r.github.io/oikan/)
|
34
34
|
|
35
|
+
> **Important Disclaimer**: OIKAN is an experimental research project. It is not intended for production use or real-world applications. This framework is designed for research purposes, experimentation, and academic exploration of neuro-symbolic machine learning concepts.
|
36
|
+
|
35
37
|
## Key Features
|
36
38
|
- 🧠 **Neuro-Symbolic ML**: Combines neural network learning with symbolic mathematics
|
37
39
|
- 📊 **Automatic Formula Extraction**: Generates human-readable mathematical expressions
|
38
40
|
- 🎯 **Scikit-learn Compatible**: Familiar `.fit()` and `.predict()` interface
|
39
|
-
-
|
41
|
+
- 🔬 **Research-Focused**: Designed for academic exploration and experimentation
|
40
42
|
- 📈 **Multi-Task**: Supports both regression and classification problems
|
41
43
|
|
42
44
|
## Scientific Foundation
|
43
45
|
|
44
|
-
OIKAN
|
46
|
+
OIKAN implements the Kolmogorov-Arnold Representation Theorem through a novel neural architecture:
|
47
|
+
|
48
|
+
1. **Theorem Background**: Any continuous multivariate function f(x1,...,xn) can be represented as:
|
49
|
+
```
|
50
|
+
f(x1,...,xn) = ∑(j=0 to 2n){ φj( ∑(i=1 to n) ψij(xi) ) }
|
51
|
+
```
|
52
|
+
where φj and ψij are continuous single-variable functions.
|
45
53
|
|
46
|
-
|
47
|
-
|
48
|
-
|
54
|
+
2. **Neural Implementation**:
|
55
|
+
```python
|
56
|
+
# Pseudo-implementation of KAN architecture
|
57
|
+
class KANLayer:
|
58
|
+
def __init__(self, input_dim, output_dim):
|
59
|
+
self.edges = [SymbolicEdge() for _ in range(input_dim * output_dim)]
|
60
|
+
self.weights = initialize_weights(input_dim, output_dim)
|
61
|
+
|
62
|
+
def forward(self, x):
|
63
|
+
# Transform each input through basis functions
|
64
|
+
edge_outputs = [edge(x_i) for x_i, edge in zip(x, self.edges)]
|
65
|
+
# Combine using learned weights
|
66
|
+
return combine_weighted_outputs(edge_outputs, self.weights)
|
67
|
+
```
|
49
68
|
|
50
69
|
## Quick Start
|
51
70
|
|
@@ -68,16 +87,11 @@ pip install -e . # Install in development mode
|
|
68
87
|
from oikan.model import OIKANRegressor
|
69
88
|
from sklearn.model_selection import train_test_split
|
70
89
|
|
71
|
-
# Initialize model
|
72
|
-
model = OIKANRegressor(
|
73
|
-
hidden_dims=[16, 8], # Network architecture
|
74
|
-
num_basis=10, # Number of basis functions
|
75
|
-
degree=3, # Polynomial degree
|
76
|
-
dropout=0.1 # Regularization
|
77
|
-
)
|
90
|
+
# Initialize model
|
91
|
+
model = OIKANRegressor()
|
78
92
|
|
79
93
|
# Fit model (sklearn-style)
|
80
|
-
model.fit(X_train, y_train, epochs=
|
94
|
+
model.fit(X_train, y_train, epochs=100, lr=0.01)
|
81
95
|
|
82
96
|
# Get predictions
|
83
97
|
y_pred = model.predict(X_test)
|
@@ -86,7 +100,7 @@ y_pred = model.predict(X_test)
|
|
86
100
|
# The output file will contain:
|
87
101
|
# - Detailed symbolic formulas for each feature
|
88
102
|
# - Instructions for practical implementation
|
89
|
-
# - Recommendations for
|
103
|
+
# - Recommendations for testing and validation
|
90
104
|
model.save_symbolic_formula("regression_formula.txt")
|
91
105
|
```
|
92
106
|
|
@@ -98,101 +112,78 @@ model.save_symbolic_formula("regression_formula.txt")
|
|
98
112
|
from oikan.model import OIKANClassifier
|
99
113
|
|
100
114
|
# Similar sklearn-style interface for classification
|
101
|
-
model = OIKANClassifier(
|
102
|
-
model.fit(X_train, y_train)
|
115
|
+
model = OIKANClassifier()
|
116
|
+
model.fit(X_train, y_train, epochs=100, lr=0.01)
|
103
117
|
probas = model.predict_proba(X_test)
|
104
118
|
|
105
119
|
# Save classification formulas with implementation guidelines
|
106
120
|
# The output file will contain:
|
107
121
|
# - Decision boundary formulas for each class
|
108
122
|
# - Softmax application instructions
|
109
|
-
# -
|
123
|
+
# - Recommendations for testing and validation
|
110
124
|
model.save_symbolic_formula("classification_formula.txt")
|
111
125
|
```
|
112
126
|
|
113
127
|
*Example of the saved symbolic formula instructions: [outputs/classification_symbolic_formula.txt](outputs/classification_symbolic_formula.txt)*
|
114
128
|
|
115
|
-
## Architecture Details
|
116
|
-
|
117
|
-
OIKAN implements a novel neuro-symbolic architecture based on Kolmogorov-Arnold representation theory through three specialized components:
|
118
|
-
|
119
|
-
1. **Edge Symbolic Layer**: Learns interpretable single-variable transformations
|
120
|
-
- Adaptive basis function composition using 9 core functions:
|
121
|
-
```python
|
122
|
-
ADVANCED_LIB = {
|
123
|
-
'x': ('x', lambda x: x),
|
124
|
-
'x^2': ('x^2', lambda x: x**2),
|
125
|
-
'x^3': ('x^3', lambda x: x**3),
|
126
|
-
'exp': ('exp(x)', lambda x: np.exp(x)),
|
127
|
-
'log': ('log(x)', lambda x: np.log(abs(x) + 1)),
|
128
|
-
'sqrt': ('sqrt(x)', lambda x: np.sqrt(abs(x))),
|
129
|
-
'tanh': ('tanh(x)', lambda x: np.tanh(x)),
|
130
|
-
'sin': ('sin(x)', lambda x: np.sin(x)),
|
131
|
-
'abs': ('abs(x)', lambda x: np.abs(x))
|
132
|
-
}
|
133
|
-
```
|
134
|
-
- Each input feature is transformed through these basis functions
|
135
|
-
- Learnable weights determine the optimal combination
|
136
|
-
|
137
|
-
2. **Neural Composition Layer**: Multi-layer feature aggregation
|
138
|
-
- Direct feature-to-feature connections through KAN layers
|
139
|
-
- Dropout regularization (p=0.1 default) for robust learning
|
140
|
-
- Gradient clipping (max_norm=1.0) for stable training
|
141
|
-
- User-configurable hidden layer dimensions
|
142
|
-
|
143
|
-
3. **Symbolic Extraction Layer**: Generates production-ready formulas
|
144
|
-
- Weight-based term pruning (threshold=1e-4)
|
145
|
-
- Automatic coefficient optimization
|
146
|
-
- Human-readable mathematical expressions
|
147
|
-
- Exportable to lightweight production code
|
148
|
-
|
149
|
-
### Architecture Diagram
|
150
|
-
|
151
|
-

|
152
129
|
|
153
130
|
### Key Design Principles
|
154
131
|
|
155
|
-
1. **Interpretability
|
156
|
-
2. **Scikit-learn Compatibility**: Familiar `.fit()` and `.predict()` interface
|
157
|
-
3. **Production Ready**: Export formulas as lightweight mathematical expressions
|
158
|
-
4. **Automatic Simplification**: Remove insignificant terms (|w| < 1e-4)
|
159
|
-
|
160
|
-
## Model Components
|
161
|
-
|
162
|
-
1. **Symbolic Edge Functions**
|
132
|
+
1. **Interpretability by Design**
|
163
133
|
```python
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
134
|
+
# Edge activation contains interpretable basis functions
|
135
|
+
ADVANCED_LIB = {
|
136
|
+
'x': (lambda x: x), # Linear
|
137
|
+
'x^2': (lambda x: x**2), # Quadratic
|
138
|
+
'sin(x)': np.sin, # Periodic
|
139
|
+
'tanh(x)': np.tanh # Bounded
|
140
|
+
}
|
168
141
|
```
|
169
142
|
|
170
|
-
2. **
|
143
|
+
2. **Automatic Simplification**
|
171
144
|
```python
|
172
|
-
|
173
|
-
|
174
|
-
def forward(self, x):
|
175
|
-
edge_outputs = [self.edges[i](x[:,i]) for i in range(self.input_dim)]
|
176
|
-
return self.combine(edge_outputs)
|
145
|
+
def simplify_formula(terms, threshold=1e-4):
|
146
|
+
return [term for term in terms if abs(term.coefficient) > threshold]
|
177
147
|
```
|
178
148
|
|
179
|
-
3. **
|
149
|
+
3. **Research-Oriented Architecture**
|
180
150
|
```python
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
return " + ".join(terms)
|
151
|
+
class SymbolicEdge:
|
152
|
+
def forward(self, x):
|
153
|
+
return sum(w * f(x) for w, f in zip(self.weights, self.basis_functions))
|
154
|
+
|
155
|
+
def get_formula(self):
|
156
|
+
return format_symbolic_terms(self.weights, self.basis_functions)
|
188
157
|
```
|
189
158
|
|
159
|
+
### Architecture Diagram
|
160
|
+
|
161
|
+

|
162
|
+
|
190
163
|
### Key Design Principles
|
191
164
|
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
165
|
+
1. **Interpretability First**: All transformations maintain clear mathematical meaning
|
166
|
+
2. **Scikit-learn Compatibility**: Familiar `.fit()` and `.predict()` interface
|
167
|
+
3. **Symbolic Formula Exporting**: Export formulas as lightweight mathematical expressions
|
168
|
+
4. **Automatic Simplification**: Remove insignificant terms (|w| < 1e-4)
|
169
|
+
|
170
|
+
|
171
|
+
### Key Model Components
|
172
|
+
|
173
|
+
1. **EdgeActivation Layer**:
|
174
|
+
- Implements interpretable basis function transformations
|
175
|
+
- Automatically prunes insignificant terms
|
176
|
+
- Maintains mathematical transparency
|
177
|
+
|
178
|
+
2. **Formula Extraction**:
|
179
|
+
- Combines edge transformations with learned weights
|
180
|
+
- Applies symbolic simplification
|
181
|
+
- Generates human-readable expressions
|
182
|
+
|
183
|
+
3. **Training Process**:
|
184
|
+
- Gradient-based optimization of edge weights
|
185
|
+
- Automatic feature importance detection
|
186
|
+
- Complexity control through regularization
|
196
187
|
|
197
188
|
## Contributing
|
198
189
|
|
@@ -0,0 +1,10 @@
|
|
1
|
+
oikan/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
2
|
+
oikan/exceptions.py,sha256=UqT3uTtfiB8QA_3AMvKdHOme9WL9HZD_d7GHIk00LJw,394
|
3
|
+
oikan/model.py,sha256=nPQcP5TYeuL29pjc9nIKd1tak-Bmh0d0LdRZz6LwcTo,20779
|
4
|
+
oikan/symbolic.py,sha256=TtalmSpBecf33_g7yE3q-RPuCVRWQNaXWE4LsCNZmfg,1040
|
5
|
+
oikan/utils.py,sha256=GpwAHjPpq3lHvUIS0sKSxJzaLBIkyDxe0aiYRrOqL90,1581
|
6
|
+
oikan-0.0.2.4.dist-info/licenses/LICENSE,sha256=75ASVmU-XIpN-M4LbVmJ_ibgbzbvRLVti8FhnR0BTf8,1096
|
7
|
+
oikan-0.0.2.4.dist-info/METADATA,sha256=DXQFc4HCNY7hVk_UGXLN43qwmEf0OZFIredbEE6Uq5I,7850
|
8
|
+
oikan-0.0.2.4.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
|
9
|
+
oikan-0.0.2.4.dist-info/top_level.txt,sha256=XwnwKwTJddZwIvtrUsAz-l-58BJRj6HjAGWrfYi_3QY,6
|
10
|
+
oikan-0.0.2.4.dist-info/RECORD,,
|
oikan-0.0.2.2.dist-info/RECORD
DELETED
@@ -1,10 +0,0 @@
|
|
1
|
-
oikan/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
2
|
-
oikan/exceptions.py,sha256=UqT3uTtfiB8QA_3AMvKdHOme9WL9HZD_d7GHIk00LJw,394
|
3
|
-
oikan/model.py,sha256=iHWKjk_n0Kkw47UO2XFTc0faqGYBrQBJhmmRn1Po4qw,19604
|
4
|
-
oikan/symbolic.py,sha256=TtalmSpBecf33_g7yE3q-RPuCVRWQNaXWE4LsCNZmfg,1040
|
5
|
-
oikan/utils.py,sha256=sivt_8jzATH-eUZ3-P-tsdmyIgKsayibSZeP_MtLTfU,1969
|
6
|
-
oikan-0.0.2.2.dist-info/licenses/LICENSE,sha256=75ASVmU-XIpN-M4LbVmJ_ibgbzbvRLVti8FhnR0BTf8,1096
|
7
|
-
oikan-0.0.2.2.dist-info/METADATA,sha256=VvxfL5IWijk6RJObJL5fORZQFAY55X_oZf00Qk5ATTU,8519
|
8
|
-
oikan-0.0.2.2.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
|
9
|
-
oikan-0.0.2.2.dist-info/top_level.txt,sha256=XwnwKwTJddZwIvtrUsAz-l-58BJRj6HjAGWrfYi_3QY,6
|
10
|
-
oikan-0.0.2.2.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|