oikan 0.0.3.3__py3-none-any.whl → 0.0.3.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
oikan/model.py CHANGED
@@ -7,7 +7,7 @@ from sklearn.linear_model import ElasticNet
7
7
  from abc import ABC, abstractmethod
8
8
  import json
9
9
  from .neural import TabularNet
10
- from .utils import evaluate_basis_functions, get_features_involved
10
+ from .utils import evaluate_basis_functions, get_features_involved, sympify_formula, get_latex_formula
11
11
  from sklearn.model_selection import train_test_split
12
12
  from sklearn.metrics import r2_score, accuracy_score
13
13
  from .exceptions import *
@@ -25,8 +25,6 @@ class OIKAN(ABC):
25
25
  Activation function for the neural network ('relu', 'tanh', 'leaky_relu', 'elu', 'swish', 'gelu').
26
26
  augmentation_factor : int, optional (default=10)
27
27
  Number of augmented samples per original sample.
28
- polynomial_degree : int, optional (default=2)
29
- Maximum degree of polynomial features for symbolic regression.
30
28
  alpha : float, optional (default=0.1)
31
29
  L1 regularization strength for Lasso in symbolic regression.
32
30
  sigma : float, optional (default=0.1)
@@ -45,7 +43,7 @@ class OIKAN(ABC):
45
43
  Whether to evaluate neural network performance before full training.
46
44
  """
47
45
  def __init__(self, hidden_sizes=[64, 64], activation='relu', augmentation_factor=10,
48
- polynomial_degree=2, alpha=0.1, sigma=0.1, epochs=100, lr=0.001, batch_size=32,
46
+ alpha=0.1, sigma=0.1, epochs=100, lr=0.001, batch_size=32,
49
47
  verbose=False, evaluate_nn=False, top_k=5):
50
48
  if not isinstance(hidden_sizes, list) or not all(isinstance(x, int) and x > 0 for x in hidden_sizes):
51
49
  raise InvalidParameterError("hidden_sizes must be a list of positive integers")
@@ -53,8 +51,6 @@ class OIKAN(ABC):
53
51
  raise InvalidParameterError(f"Unsupported activation function: {activation}")
54
52
  if not isinstance(augmentation_factor, int) or augmentation_factor < 1:
55
53
  raise InvalidParameterError("augmentation_factor must be a positive integer")
56
- if not isinstance(polynomial_degree, int) or polynomial_degree < 1:
57
- raise InvalidParameterError("polynomial_degree must be a positive integer")
58
54
  if not isinstance(top_k, int) or top_k < 1:
59
55
  raise InvalidParameterError("top_k must be a positive integer")
60
56
  if not 0 < lr < 1:
@@ -71,7 +67,6 @@ class OIKAN(ABC):
71
67
  self.hidden_sizes = hidden_sizes
72
68
  self.activation = activation
73
69
  self.augmentation_factor = augmentation_factor
74
- self.polynomial_degree = polynomial_degree
75
70
  self.alpha = alpha
76
71
  self.sigma = sigma
77
72
  self.epochs = epochs
@@ -92,23 +87,53 @@ class OIKAN(ABC):
92
87
  def predict(self, X):
93
88
  pass
94
89
 
95
- def get_formula(self):
96
- """Returns the symbolic formula(s) as a string (regression) or list of strings (classification)."""
90
+ def get_formula(self, type='original'):
91
+ """
92
+ Returns the symbolic formula(s) as a string (regression) or list of strings (classification).
93
+
94
+ Parameter:
95
+ --------
96
+ type : str, optional (default='original') other options: 'sympy', 'latex'
97
+ 'original' returns the original formula with coefficients, 'sympy' returns sympy simplified formula.
98
+ """
99
+ if type.lower() not in ['original', 'sympy', 'latex']:
100
+ raise InvalidParameterError("Invalid type. Choose 'original', 'sympy', 'latex'.")
97
101
  if self.symbolic_model is None:
98
102
  raise ValueError("Model not fitted yet.")
99
103
  basis_functions = self.symbolic_model['basis_functions']
100
- if 'coefficients' in self.symbolic_model:
101
- coefficients = self.symbolic_model['coefficients']
102
- formula = " + ".join([f"{coefficients[i]:.5f}*{basis_functions[i]}"
103
- for i in range(len(coefficients)) if coefficients[i] != 0])
104
- return formula if formula else "0"
104
+ if type.lower() == 'original':
105
+ if 'coefficients' in self.symbolic_model:
106
+ coefficients = self.symbolic_model['coefficients']
107
+ formula = " + ".join([f"{coefficients[i]:.6f}*{basis_functions[i]}"
108
+ for i in range(len(coefficients)) if coefficients[i] != 0])
109
+ return formula if formula else "0"
110
+ else:
111
+ formulas = []
112
+ for c, coef in enumerate(self.symbolic_model['coefficients_list']):
113
+ formula = " + ".join([f"{coef[i]:.6f}*{basis_functions[i]}"
114
+ for i in range(len(coef)) if coef[i] != 0])
115
+ formulas.append(f"Class {self.classes_[c]}: {formula if formula else '0'}")
116
+ return formulas
117
+ elif type.lower() == 'sympy':
118
+ if 'coefficients' in self.symbolic_model:
119
+ formula = sympify_formula(self.symbolic_model['basis_functions'], self.symbolic_model['coefficients'], self.symbolic_model['n_features'])
120
+ return formula
121
+ else:
122
+ formulas = []
123
+ for c, coef in enumerate(self.symbolic_model['coefficients_list']):
124
+ formula = sympify_formula(self.symbolic_model['basis_functions'], coef, self.symbolic_model['n_features'])
125
+ formulas.append(f"Class {self.classes_[c]}: {formula}")
126
+ return formulas
105
127
  else:
106
- formulas = []
107
- for c, coef in enumerate(self.symbolic_model['coefficients_list']):
108
- formula = " + ".join([f"{coef[i]:.5f}*{basis_functions[i]}"
109
- for i in range(len(coef)) if coef[i] != 0])
110
- formulas.append(f"Class {self.classes_[c]}: {formula if formula else '0'}")
111
- return formulas
128
+ if 'coefficients' in self.symbolic_model:
129
+ formula = get_latex_formula(self.symbolic_model['basis_functions'], self.symbolic_model['coefficients'], self.symbolic_model['n_features'])
130
+ return formula
131
+ else:
132
+ formulas = []
133
+ for c, coef in enumerate(self.symbolic_model['coefficients_list']):
134
+ formula = get_latex_formula(self.symbolic_model['basis_functions'], coef, self.symbolic_model['n_features'])
135
+ formulas.append(f"Class {self.classes_[c]}: {formula}")
136
+ return formulas
112
137
 
113
138
  def feature_importances(self):
114
139
  """
@@ -163,7 +188,6 @@ class OIKAN(ABC):
163
188
  # Convert numpy arrays and other non-serializable types to lists
164
189
  model_data = {
165
190
  'n_features': self.symbolic_model['n_features'],
166
- 'degree': self.symbolic_model['degree'],
167
191
  'basis_functions': self.symbolic_model['basis_functions']
168
192
  }
169
193
 
@@ -200,7 +224,6 @@ class OIKAN(ABC):
200
224
 
201
225
  self.symbolic_model = {
202
226
  'n_features': model_data['n_features'],
203
- 'degree': model_data['degree'],
204
227
  'basis_functions': model_data['basis_functions']
205
228
  }
206
229
 
@@ -222,7 +245,6 @@ class OIKAN(ABC):
222
245
 
223
246
  input_size = X.shape[1]
224
247
  self.neural_net = TabularNet(input_size, self.hidden_sizes, output_size, self.activation)
225
- optimizer = optim.Adam(self.neural_net.parameters(), lr=self.lr)
226
248
 
227
249
  # Train on the training set
228
250
  self._train_neural_net(X_train, y_train, output_size, loss_fn)
@@ -378,7 +400,6 @@ class OIKAN(ABC):
378
400
  selected_indices = np.where(np.abs(coef_refined) > 1e-6)[0]
379
401
  self.symbolic_model = {
380
402
  'n_features': X.shape[1],
381
- 'degree': self.polynomial_degree,
382
403
  'basis_functions': [basis_functions_refined[i] for i in selected_indices],
383
404
  'coefficients': coef_refined[selected_indices].tolist()
384
405
  }
@@ -398,7 +419,6 @@ class OIKAN(ABC):
398
419
  coefficients_list.append(coef_selected)
399
420
  self.symbolic_model = {
400
421
  'n_features': X.shape[1],
401
- 'degree': self.polynomial_degree,
402
422
  'basis_functions': basis_functions,
403
423
  'coefficients_list': coefficients_list
404
424
  }
oikan/utils.py CHANGED
@@ -1,4 +1,7 @@
1
1
  import numpy as np
2
+ import sympy as sp
3
+ import json
4
+ from functools import lru_cache
2
5
 
3
6
  def evaluate_basis_functions(X, basis_functions, n_features):
4
7
  """
@@ -79,4 +82,175 @@ def get_features_involved(basis_function):
79
82
  elif basis_function.startswith('x'):
80
83
  idx = int(basis_function[1:])
81
84
  features.add(idx)
82
- return features
85
+ return features
86
+
87
+ @lru_cache(maxsize=1000)
88
+ def _cached_sympify_formula(basis_functions_tuple, coefficients_tuple, n_features, threshold):
89
+ """
90
+ Internal function to perform SymPy formula simplification with caching.
91
+
92
+ Parameters:
93
+ -----------
94
+ basis_functions_tuple : tuple
95
+ Tuple of basis function strings.
96
+ coefficients_tuple : tuple
97
+ Tuple of coefficients.
98
+ n_features : int
99
+ Number of input features.
100
+ threshold : float
101
+ Coefficients with absolute value below this are excluded.
102
+
103
+ Returns:
104
+ --------
105
+ str
106
+ Simplified formula as a string, or '0' if empty.
107
+ """
108
+ # Convert tuples back to lists
109
+ basis_functions = list(basis_functions_tuple)
110
+ coefficients = list(coefficients_tuple)
111
+
112
+ # Define symbolic variables
113
+ x = sp.symbols(f'x0:{n_features}')
114
+ expr = 0
115
+
116
+ # Build the expression
117
+ for coef, func in zip(coefficients, basis_functions):
118
+ if abs(coef) < threshold:
119
+ continue # Skip negligible coefficients
120
+ if func == '1':
121
+ term = coef
122
+ elif func.startswith('log1p_x'):
123
+ idx = int(func.split('_')[1][1:])
124
+ term = coef * sp.log(1 + sp.Abs(x[idx]))
125
+ elif func.startswith('exp_x'):
126
+ idx = int(func.split('_')[1][1:])
127
+ term = coef * sp.exp(x[idx])
128
+ elif func.startswith('sin_x'):
129
+ idx = int(func.split('_')[1][1:])
130
+ term = coef * sp.sin(x[idx])
131
+ elif '^' in func:
132
+ var, power = func.split('^')
133
+ idx = int(var[1:])
134
+ term = coef * x[idx]**int(power)
135
+ elif ' ' in func:
136
+ vars = func.split(' ')
137
+ term = coef
138
+ for var in vars:
139
+ idx = int(var[1:])
140
+ term *= x[idx]
141
+ else:
142
+ idx = int(func[1:])
143
+ term = coef * x[idx]
144
+ expr += term
145
+
146
+ # Simplify the expression
147
+ simplified_expr = sp.simplify(expr)
148
+
149
+ # Convert to string with rounded coefficients
150
+ def format_term(term):
151
+ if term.is_Mul:
152
+ coeff = 1
153
+ factors = []
154
+ for factor in term.args:
155
+ if factor.is_Number:
156
+ coeff *= float(factor)
157
+ else:
158
+ factors.append(str(factor))
159
+ if abs(coeff) < threshold:
160
+ return None
161
+ return f"{coeff:.5f}*{'*'.join(factors)}" if factors else f"{coeff:.5f}"
162
+ elif term.is_Add:
163
+ return None # Handle in recursion
164
+ elif term.is_Number:
165
+ return f"{float(term):.5f}" if abs(float(term)) >= threshold else None
166
+ else:
167
+ return f"{1.0:.5f}*{term}" if abs(1.0) >= threshold else None
168
+
169
+ terms = []
170
+ if simplified_expr.is_Add:
171
+ for term in simplified_expr.args:
172
+ formatted = format_term(term)
173
+ if formatted:
174
+ terms.append(formatted)
175
+ else:
176
+ formatted = format_term(simplified_expr)
177
+ if formatted:
178
+ terms.append(formatted)
179
+
180
+ formula = " + ".join(terms).replace("+ -", "- ")
181
+ return formula if formula else "0"
182
+
183
+ def sympify_formula(basis_functions, coefficients, n_features, threshold=0.00005):
184
+ """
185
+ Simplifies a symbolic formula using SymPy with caching.
186
+
187
+ Parameters:
188
+ -----------
189
+ basis_functions : list
190
+ List of basis function strings (e.g., 'x0', 'x0^2', 'x0 x1', 'exp_x0').
191
+ coefficients : list
192
+ List of coefficients corresponding to each basis function.
193
+ n_features : int
194
+ Number of input features.
195
+ threshold : float, optional (default=0.00005)
196
+ Coefficients with absolute value below this are excluded.
197
+
198
+ Returns:
199
+ --------
200
+ str
201
+ Simplified formula as a string, or '0' if empty.
202
+ """
203
+ # Convert inputs to hashable types
204
+ basis_functions_tuple = tuple(basis_functions)
205
+ coefficients_tuple = tuple(coefficients)
206
+
207
+ # Call cached function
208
+ return _cached_sympify_formula(basis_functions_tuple, coefficients_tuple, n_features, threshold)
209
+
210
+ @lru_cache(maxsize=1000)
211
+ def _cached_get_latex_formula(formula):
212
+ """
213
+ Internal function to convert a simplified formula to LaTeX with caching.
214
+
215
+ Parameters:
216
+ -----------
217
+ formula : str
218
+ Simplified formula string.
219
+
220
+ Returns:
221
+ --------
222
+ str
223
+ LaTeX formula as a string.
224
+ """
225
+ return sp.latex(sp.sympify(formula))
226
+
227
+ def get_latex_formula(basis_functions, coefficients, n_features, threshold=0.00005):
228
+ """
229
+ Generates a LaTeX formula from the basis functions and coefficients with caching.
230
+
231
+ Parameters:
232
+ -----------
233
+ basis_functions : list
234
+ List of basis function strings (e.g., 'x0', 'x0^2', 'x0 x1', 'exp_x0').
235
+ coefficients : list
236
+ List of coefficients corresponding to each basis function.
237
+ n_features : int
238
+ Number of input features.
239
+ threshold : float, optional (default=0.00005)
240
+ Coefficients with absolute value below this are excluded.
241
+
242
+ Returns:
243
+ --------
244
+ str
245
+ LaTeX formula as a string, or '0' if empty.
246
+ """
247
+ # Get simplified formula (cached)
248
+ formula = sympify_formula(basis_functions, coefficients, n_features, threshold)
249
+ # Convert to LaTeX (cached)
250
+ return _cached_get_latex_formula(formula)
251
+
252
+ if __name__ == "__main__":
253
+ with open('outputs/california_housing_model.json', 'r') as f:
254
+ model = json.load(f)
255
+ print('Sympified formula:', sympify_formula(model['basis_functions'], model['coefficients'], model['n_features']))
256
+ print('LaTeX formula:', get_latex_formula(model['basis_functions'], model['coefficients'], model['n_features']))
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: oikan
3
- Version: 0.0.3.3
3
+ Version: 0.0.3.5
4
4
  Summary: OIKAN: Neuro-Symbolic ML for Scientific Discovery
5
5
  Author: Arman Zhalgasbayev
6
6
  License: MIT
@@ -14,6 +14,7 @@ Requires-Dist: torch
14
14
  Requires-Dist: numpy
15
15
  Requires-Dist: scikit-learn
16
16
  Requires-Dist: tqdm
17
+ Requires-Dist: sympy
17
18
  Dynamic: license-file
18
19
 
19
20
  <!-- logo in the center -->
@@ -33,6 +34,7 @@ OIKAN is a neuro-symbolic machine learning framework inspired by Kolmogorov-Arno
33
34
  [![License](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
34
35
  [![GitHub issues](https://img.shields.io/github/issues/silvermete0r/OIKAN.svg)](https://github.com/silvermete0r/oikan/issues)
35
36
  [![Docs](https://img.shields.io/badge/docs-passing-brightgreen)](https://silvermete0r.github.io/oikan/)
37
+ [![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/silvermete0r/oikan)
36
38
 
37
39
  > **Important Disclaimer**: OIKAN is an experimental research project. It is not intended for production use or real-world applications. This framework is designed for research purposes, experimentation, and academic exploration of neuro-symbolic machine learning concepts.
38
40
 
@@ -61,14 +63,15 @@ OIKAN implements a modern interpretation of the Kolmogorov-Arnold Representation
61
63
  - Automatic pruning of insignificant terms
62
64
 
63
65
  ```python
64
- class OIKANRegressor:
65
- def __init__(self, hidden_sizes=[64, 64], activation='relu',
66
+ class OIKAN:
67
+ def __init__(self, hidden_sizes=[64, 64], activation='relu',
66
68
  polynomial_degree=2, alpha=0.1):
67
- # Neural network for learning complex patterns
68
- self.neural_net = TabularNet(input_size, hidden_sizes, activation)
69
- # Symbolic regression for interpretable formulas
70
- self.symbolic_model = None
71
-
69
+ # Neural network for learning complex patterns
70
+ self.neural_net = TabularNet(input_size, hidden_sizes, activation)
71
+ # Data augmentation for better coverage
72
+ self.augmented_data = self.augment_data(X, y, augmentation_factor=5)
73
+ # Symbolic regression for interpretable formulas
74
+ self.symbolic_regression = SymbolicRegression(alpha=alpha)
72
75
  ```
73
76
 
74
77
  3. **Basis Functions**: Core set of interpretable transformations:
@@ -118,7 +121,6 @@ model = OIKANRegressor(
118
121
  hidden_sizes=[32, 32], # Hidden layer sizes
119
122
  activation='relu', # Activation function (other options: 'tanh', 'leaky_relu', 'elu', 'swish', 'gelu')
120
123
  augmentation_factor=5, # Augmentation factor for data generation
121
- polynomial_degree=2, # Degree of polynomial basis functions
122
124
  alpha=0.1, # L1 regularization strength (Symbolic regression)
123
125
  sigma=0.1, # Standard deviation of Gaussian noise for data augmentation
124
126
  top_k=5, # Number of top features to select (Symbolic regression)
@@ -140,7 +142,7 @@ mse = mean_squared_error(y_test, y_pred)
140
142
  print("Mean Squared Error:", mse)
141
143
 
142
144
  # Get symbolic formula
143
- formula = model.get_formula()
145
+ formula = model.get_formula() # default: type='original' -> returns all formula without pruning | other options: 'sympy' -> simplified formula using sympy; 'latex' -> LaTeX format
144
146
  print("Symbolic Formula:", formula)
145
147
 
146
148
  # Get feature importances
@@ -168,7 +170,6 @@ model = OIKANClassifier(
168
170
  hidden_sizes=[32, 32], # Hidden layer sizes
169
171
  activation='relu', # Activation function (other options: 'tanh', 'leaky_relu', 'elu', 'swish', 'gelu')
170
172
  augmentation_factor=10, # Augmentation factor for data generation
171
- polynomial_degree=2, # Degree of polynomial basis functions
172
173
  alpha=0.1, # L1 regularization strength (Symbolic regression)
173
174
  sigma=0.1, # Standard deviation of Gaussian noise for data augmentation
174
175
  top_k=5, # Number of top features to select (Symbolic regression)
@@ -190,7 +191,7 @@ accuracy = model.score(X_test, y_test)
190
191
  print("Accuracy:", accuracy)
191
192
 
192
193
  # Get symbolic formulas for each class
193
- formulas = model.get_formula()
194
+ formulas = model.get_formula() # default: type='original' -> returns all formula without pruning | other options: 'sympy' -> simplified formula using sympy; 'latex' -> LaTeX format
194
195
  for i, formula in enumerate(formulas):
195
196
  print(f"Class {i} Formula:", formula)
196
197
 
@@ -210,7 +211,61 @@ loaded_model.load("outputs/model.json")
210
211
 
211
212
  ### Architecture Diagram
212
213
 
213
- ![OIKAN v0.0.3(1) Architecture](https://raw.githubusercontent.com/silvermete0r/oikan/main/docs/media/oikan-v0.0.3(1)-architecture-oop.png)
214
+ ![OIKAN v0.0.3(2) Architecture](https://raw.githubusercontent.com/silvermete0r/oikan/main/docs/media/oikan-v0.0.3(2)-architecture-oop.png)
215
+
216
+ ## OIKAN Symbolic Model Compilers
217
+
218
+ OIKAN provides a set of symbolic model compilers to convert the symbolic formulas generated by the OIKAN model into different programming languages.
219
+
220
+ *Currently, we support: `Python`, `C++`, `C`, `JavaScript`, `Rust`, and `Go`. This allows users to easily integrate the generated formulas into their applications or systems.*
221
+
222
+ All compilers: [model_compilers/](model_compilers)
223
+
224
+ ### Example of Python Compiler
225
+
226
+ 1. Regression Model:
227
+ ```python
228
+ import numpy as np
229
+ import json
230
+
231
+ def predict(X, symbolic_model):
232
+ X = np.asarray(X)
233
+ X_transformed = evaluate_basis_functions(X, symbolic_model['basis_functions'],
234
+ symbolic_model['n_features'])
235
+ return np.dot(X_transformed, symbolic_model['coefficients'])
236
+
237
+ if __name__ == "__main__":
238
+ with open('outputs/california_housing_model.json', 'r') as f:
239
+ symbolic_model = json.load(f)
240
+ X = np.random.rand(10, symbolic_model['n_features'])
241
+ y_pred = predict(X, symbolic_model)
242
+ print(y_pred)
243
+ ```
244
+
245
+ 2. Classification Model:
246
+ ```python
247
+ import numpy as np
248
+ import json
249
+
250
+ def predict(X, symbolic_model):
251
+ X = np.asarray(X)
252
+ X_transformed = evaluate_basis_functions(X, symbolic_model['basis_functions'],
253
+ symbolic_model['n_features'])
254
+ logits = np.dot(X_transformed, np.array(symbolic_model['coefficients_list']).T)
255
+ probabilities = np.exp(logits) / np.sum(np.exp(logits), axis=1, keepdims=True)
256
+ return np.argmax(probabilities, axis=1)
257
+
258
+ if __name__ == "__main__":
259
+ with open('outputs/iris_model.json', 'r') as f:
260
+ symbolic_model = json.load(f)
261
+ X = np.array([[5.1, 3.5, 1.4, 0.2],
262
+ [7.0, 3.2, 4.7, 1.4],
263
+ [6.3, 3.3, 6.0, 2.5]])
264
+ y_pred = predict(X, symbolic_model)
265
+ print(y_pred)
266
+ ```
267
+
268
+
214
269
 
215
270
  ## Contributing
216
271
 
@@ -0,0 +1,10 @@
1
+ oikan/__init__.py,sha256=zEzhm1GYLT4vNaIQ4CgZcNpUk3uo8SWnoaHYtHW_XSQ,628
2
+ oikan/exceptions.py,sha256=GhHWqy2Q5LVBcteTy4ngnqxr7FOoLNyD8dNt1kfRXyw,901
3
+ oikan/model.py,sha256=Ke5FdHOr1YwnUCJieXN2VjTXhxAGxHWzCfNgw0_WYbA,23157
4
+ oikan/neural.py,sha256=wxmGgzmtpwJ3lvH6u6D4i4BiAzg018czrIdw49phSCY,1558
5
+ oikan/utils.py,sha256=7UCm9obO-8Q2zhetdAkukMDOZvGSBWUL_dSF04XqM7k,8808
6
+ oikan-0.0.3.5.dist-info/licenses/LICENSE,sha256=75ASVmU-XIpN-M4LbVmJ_ibgbzbvRLVti8FhnR0BTf8,1096
7
+ oikan-0.0.3.5.dist-info/METADATA,sha256=sUzI5w2hfUd70B4s1K5vULOvQpQbkvb239NTfS2gAPU,11388
8
+ oikan-0.0.3.5.dist-info/WHEEL,sha256=DnLRTWE75wApRYVsjgc6wsVswC54sMSJhAEd4xhDpBk,91
9
+ oikan-0.0.3.5.dist-info/top_level.txt,sha256=XwnwKwTJddZwIvtrUsAz-l-58BJRj6HjAGWrfYi_3QY,6
10
+ oikan-0.0.3.5.dist-info/RECORD,,
@@ -1,10 +0,0 @@
1
- oikan/__init__.py,sha256=zEzhm1GYLT4vNaIQ4CgZcNpUk3uo8SWnoaHYtHW_XSQ,628
2
- oikan/exceptions.py,sha256=GhHWqy2Q5LVBcteTy4ngnqxr7FOoLNyD8dNt1kfRXyw,901
3
- oikan/model.py,sha256=wvF_g1RcpYcQin_wOUiWEUeKJcQ8HyPtEm_5YrCeXFs,21946
4
- oikan/neural.py,sha256=wxmGgzmtpwJ3lvH6u6D4i4BiAzg018czrIdw49phSCY,1558
5
- oikan/utils.py,sha256=_FNhB_sIQfY-KsKRqvuKKVXNVZaAdpI5w8zPY_j_xJU,2898
6
- oikan-0.0.3.3.dist-info/licenses/LICENSE,sha256=75ASVmU-XIpN-M4LbVmJ_ibgbzbvRLVti8FhnR0BTf8,1096
7
- oikan-0.0.3.3.dist-info/METADATA,sha256=moaO5H0kXU-Gf_sV7tpt4VUgmTEys6dINlzr0yfDSUc,9055
8
- oikan-0.0.3.3.dist-info/WHEEL,sha256=DnLRTWE75wApRYVsjgc6wsVswC54sMSJhAEd4xhDpBk,91
9
- oikan-0.0.3.3.dist-info/top_level.txt,sha256=XwnwKwTJddZwIvtrUsAz-l-58BJRj6HjAGWrfYi_3QY,6
10
- oikan-0.0.3.3.dist-info/RECORD,,