kernel-experience-tools 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kernel_experience_tools-0.1.0/LICENSE +21 -0
- kernel_experience_tools-0.1.0/MANIFEST.in +5 -0
- kernel_experience_tools-0.1.0/PKG-INFO +262 -0
- kernel_experience_tools-0.1.0/README.md +242 -0
- kernel_experience_tools-0.1.0/pyproject.toml +23 -0
- kernel_experience_tools-0.1.0/requirements.txt +3 -0
- kernel_experience_tools-0.1.0/setup.cfg +4 -0
- kernel_experience_tools-0.1.0/setup.py +50 -0
- kernel_experience_tools-0.1.0/src/kernel_experience/__init__.py +27 -0
- kernel_experience_tools-0.1.0/src/kernel_experience/kernel.py +86 -0
- kernel_experience_tools-0.1.0/src/kernel_experience/projection.py +107 -0
- kernel_experience_tools-0.1.0/src/kernel_experience/solvers.py +76 -0
- kernel_experience_tools-0.1.0/src/kernel_experience_tools.egg-info/PKG-INFO +262 -0
- kernel_experience_tools-0.1.0/src/kernel_experience_tools.egg-info/SOURCES.txt +16 -0
- kernel_experience_tools-0.1.0/src/kernel_experience_tools.egg-info/dependency_links.txt +1 -0
- kernel_experience_tools-0.1.0/src/kernel_experience_tools.egg-info/not-zip-safe +1 -0
- kernel_experience_tools-0.1.0/src/kernel_experience_tools.egg-info/requires.txt +3 -0
- kernel_experience_tools-0.1.0/src/kernel_experience_tools.egg-info/top_level.txt +1 -0
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 BRUTALLOLOL
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,262 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: kernel-experience-tools
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Library for projecting memory kernels to experience functions
|
|
5
|
+
Home-page: https://github.com/BRUTALLOLOL/kernel-experience-tools
|
|
6
|
+
Author: Artem Vozmishchev
|
|
7
|
+
Author-email: Artem Vozmishchev <your.email@example.com>
|
|
8
|
+
License: MIT
|
|
9
|
+
Project-URL: Homepage, https://github.com/BRUTALLOLOL/kernel-experience-tools
|
|
10
|
+
Requires-Python: >=3.7
|
|
11
|
+
Description-Content-Type: text/markdown
|
|
12
|
+
License-File: LICENSE
|
|
13
|
+
Requires-Dist: numpy
|
|
14
|
+
Requires-Dist: scipy>=1.6.0
|
|
15
|
+
Requires-Dist: matplotlib>=3.3.0
|
|
16
|
+
Dynamic: author
|
|
17
|
+
Dynamic: home-page
|
|
18
|
+
Dynamic: license-file
|
|
19
|
+
Dynamic: requires-python
|
|
20
|
+
|
|
21
|
+
# Kernel-Experience Tools 🧠 → ⏳
|
|
22
|
+
|
|
23
|
+
**A Python library that turns memory kernels into experience functions.**
|
|
24
|
+
|
|
25
|
+
---
|
|
26
|
+
|
|
27
|
+
## 📌 What is it?
|
|
28
|
+
|
|
29
|
+
Every memory kernel K(t) hides a story.
|
|
30
|
+
|
|
31
|
+
This library finds it.
|
|
32
|
+
|
|
33
|
+
Given the Volterra relaxation equation
|
|
34
|
+
|
|
35
|
+
x(t) = x₀ - ∫₀ᵗ K(t-τ) x(τ) dτ
|
|
36
|
+
|
|
37
|
+
we compute the unique experience function n(t) such that
|
|
38
|
+
|
|
39
|
+
x(t) = x₀ · λⁿ⁽ᵗ⁾
|
|
40
|
+
|
|
41
|
+
One kernel. One curve. One number.
|
|
42
|
+
|
|
43
|
+
---
|
|
44
|
+
|
|
45
|
+
## 🚀 Quick start
|
|
46
|
+
|
|
47
|
+
```python
|
|
48
|
+
from kernel_experience import Kernel, project_kernel_to_n
|
|
49
|
+
|
|
50
|
+
# Pick a kernel
|
|
51
|
+
K = Kernel.tempered_power_law(alpha=0.6, beta=0.3)
|
|
52
|
+
|
|
53
|
+
# Get its experience function
|
|
54
|
+
t, x, n = project_kernel_to_n(K, t_max=10)
|
|
55
|
+
|
|
56
|
+
print(f"Memory score: {n[-1]:.2f}")
|
|
57
|
+
# Memory score: 3.44
|
|
58
|
+
```
|
|
59
|
+
|
|
60
|
+
---
|
|
61
|
+
|
|
62
|
+
## 📦 Installation
|
|
63
|
+
|
|
64
|
+
```bash
|
|
65
|
+
pip install kernel-experience-tools
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
---
|
|
69
|
+
|
|
70
|
+
## 📘 API Reference
|
|
71
|
+
|
|
72
|
+
### Kernel
|
|
73
|
+
|
|
74
|
+
Container for your memory kernel.
|
|
75
|
+
|
|
76
|
+
**Parameters**
|
|
77
|
+
|
|
78
|
+
- `func`: callable — Kernel function K(t)
|
|
79
|
+
- `name`: str, optional — Kernel name (default: "CustomKernel")
|
|
80
|
+
- `params`: dict, optional — Kernel parameters
|
|
81
|
+
|
|
82
|
+
**Factory methods**
|
|
83
|
+
|
|
84
|
+
```python
|
|
85
|
+
# Exponential: γ·e^{-γt}
|
|
86
|
+
K = Kernel.exponential(gamma=1.0)
|
|
87
|
+
|
|
88
|
+
# Power law: γ·t^{α-1}/Γ(α)
|
|
89
|
+
K = Kernel.power_law(alpha=0.7, gamma=1.0)
|
|
90
|
+
|
|
91
|
+
# Mittag-Leffler: t^{α-1}E_{α,α}(-t^α)
|
|
92
|
+
K = Kernel.mittag_leffler(alpha=0.7)
|
|
93
|
+
|
|
94
|
+
# Tempered power law: γ·t^{α-1}e^{-βt}/Γ(α)
|
|
95
|
+
K = Kernel.tempered_power_law(alpha=0.6, beta=0.3, gamma=1.0)
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
**Custom kernel**
|
|
99
|
+
|
|
100
|
+
```python
|
|
101
|
+
def my_kernel(t):
|
|
102
|
+
return np.exp(-t) * np.cos(t)
|
|
103
|
+
|
|
104
|
+
K = Kernel(my_kernel, name="Oscillatory", params={"freq": 1.0})
|
|
105
|
+
```
|
|
106
|
+
|
|
107
|
+
---
|
|
108
|
+
|
|
109
|
+
### project_kernel_to_n
|
|
110
|
+
|
|
111
|
+
Main projection: K(t) → n(t).
|
|
112
|
+
|
|
113
|
+
**Parameters**
|
|
114
|
+
|
|
115
|
+
| Parameter | Type | Default | Description |
|
|
116
|
+
|----------|------|---------|-------------|
|
|
117
|
+
| `kernel` | `Kernel` | — | Memory kernel |
|
|
118
|
+
| `lambda_param` | `float` | 0.8 | Base λ in (0,1) |
|
|
119
|
+
| `t_max` | `float` | 10.0 | Maximum time |
|
|
120
|
+
| `n_points` | `int` | 1000 | Number of time points |
|
|
121
|
+
| `x0` | `float` | 1.0 | Initial condition |
|
|
122
|
+
| `return_complex` | `bool` | False | Return complex n(t) for oscillatory kernels |
|
|
123
|
+
|
|
124
|
+
**Returns**
|
|
125
|
+
|
|
126
|
+
| Return | Type | Description |
|
|
127
|
+
|--------|------|-------------|
|
|
128
|
+
| `t` | `ndarray` | Time grid |
|
|
129
|
+
| `x` | `ndarray` | Solution x(t) |
|
|
130
|
+
| `n` | `ndarray` | Experience function n(t) |
|
|
131
|
+
|
|
132
|
+
**Examples**
|
|
133
|
+
|
|
134
|
+
```python
|
|
135
|
+
# Basic usage
|
|
136
|
+
t, x, n = project_kernel_to_n(K, t_max=20, n_points=2000)
|
|
137
|
+
|
|
138
|
+
# Custom lambda
|
|
139
|
+
t, x, n = project_kernel_to_n(K, lambda_param=0.5)
|
|
140
|
+
|
|
141
|
+
# Oscillatory kernel — get complex n(t)
|
|
142
|
+
K_osc = Kernel(lambda t: np.exp(-0.1*t)*np.sin(t), name="Oscillatory")
|
|
143
|
+
t, x, n_complex = project_kernel_to_n(K_osc, return_complex=True)
|
|
144
|
+
|
|
145
|
+
# Extract real and imaginary parts
|
|
146
|
+
n_real = n_complex.real
|
|
147
|
+
n_imag = n_complex.imag
|
|
148
|
+
```
|
|
149
|
+
|
|
150
|
+
---
|
|
151
|
+
|
|
152
|
+
### solve_volterra
|
|
153
|
+
|
|
154
|
+
Numerical solver for Volterra integral equation.
|
|
155
|
+
|
|
156
|
+
**Parameters**
|
|
157
|
+
|
|
158
|
+
| Parameter | Type | Default | Description |
|
|
159
|
+
|----------|------|---------|-------------|
|
|
160
|
+
| `kernel` | `Kernel` | — | Memory kernel |
|
|
161
|
+
| `t_max` | `float` | 10.0 | Maximum time |
|
|
162
|
+
| `n_points` | `int` | 1000 | Number of time points |
|
|
163
|
+
| `x0` | `float` | 1.0 | Initial condition |
|
|
164
|
+
|
|
165
|
+
**Returns**
|
|
166
|
+
|
|
167
|
+
| Return | Type | Description |
|
|
168
|
+
|--------|------|-------------|
|
|
169
|
+
| `t` | `ndarray` | Time grid |
|
|
170
|
+
| `x` | `ndarray` | Solution x(t) |
|
|
171
|
+
|
|
172
|
+
**Example**
|
|
173
|
+
|
|
174
|
+
```python
|
|
175
|
+
t, x = solve_volterra(K, t_max=5, n_points=500)
|
|
176
|
+
```
|
|
177
|
+
|
|
178
|
+
---
|
|
179
|
+
|
|
180
|
+
### compute_accuracy
|
|
181
|
+
|
|
182
|
+
Compare original and reconstructed solutions.
|
|
183
|
+
|
|
184
|
+
**Parameters**
|
|
185
|
+
|
|
186
|
+
| Parameter | Type | Description |
|
|
187
|
+
|----------|------|-------------|
|
|
188
|
+
| `original_x` | `ndarray` | Original solution x(t) |
|
|
189
|
+
| `reconstructed_x` | `ndarray` | Reconstructed solution x₀·λⁿ⁽ᵗ⁾ |
|
|
190
|
+
|
|
191
|
+
**Returns**
|
|
192
|
+
|
|
193
|
+
| Return | Type | Description |
|
|
194
|
+
|--------|------|-------------|
|
|
195
|
+
| `dict` | `dict` | Accuracy metrics |
|
|
196
|
+
|
|
197
|
+
**Metrics**
|
|
198
|
+
|
|
199
|
+
- `mean_error`: float — Mean relative error
|
|
200
|
+
- `max_error`: float — Maximum relative error
|
|
201
|
+
- `accuracy`: float — 1 - mean_error
|
|
202
|
+
- `rmse`: float — Root mean square error
|
|
203
|
+
|
|
204
|
+
**Example**
|
|
205
|
+
|
|
206
|
+
```python
|
|
207
|
+
# Get solution and n(t)
|
|
208
|
+
t, x, n = project_kernel_to_n(K)
|
|
209
|
+
|
|
210
|
+
# Reconstruct from n(t)
|
|
211
|
+
x_rec = 1.0 * (0.8 ** n)
|
|
212
|
+
|
|
213
|
+
# Check accuracy
|
|
214
|
+
metrics = compute_accuracy(x, x_rec)
|
|
215
|
+
print(f"Accuracy: {metrics['accuracy']:.2%}")
|
|
216
|
+
print(f"Mean error: {metrics['mean_error']:.2e}")
|
|
217
|
+
# Accuracy: 100.00%
|
|
218
|
+
# Mean error: 1.23e-12
|
|
219
|
+
```
|
|
220
|
+
|
|
221
|
+
---
|
|
222
|
+
|
|
223
|
+
## 🧠 What problem does it solve?
|
|
224
|
+
|
|
225
|
+
Traditional relaxation models assume exponential decay.
|
|
226
|
+
|
|
227
|
+
Real systems — glasses, polymers, biological tissues — show memory effects. Power laws. Stretched exponentials. Oscillations.
|
|
228
|
+
|
|
229
|
+
This library gives you one language for all of them:
|
|
230
|
+
|
|
231
|
+
K(t) → n(t)
|
|
232
|
+
|
|
233
|
+
Once you have n(t), the relaxation curve is simply x₀ · λⁿ⁽ᵗ⁾.
|
|
234
|
+
|
|
235
|
+
No fractional calculus. No special functions. No black boxes.
|
|
236
|
+
|
|
237
|
+
Just your kernel. One function call. One curve.
|
|
238
|
+
|
|
239
|
+
---
|
|
240
|
+
|
|
241
|
+
## 📄 Citation
|
|
242
|
+
|
|
243
|
+
```bibtex
|
|
244
|
+
@software{vozmishchev2026kernel,
|
|
245
|
+
author = {Vozmishchev, Artem},
|
|
246
|
+
title = {Kernel-Experience Tools: Projecting Memory Kernels to Experience Functions},
|
|
247
|
+
year = {2026},
|
|
248
|
+
doi = {10.5281/zenodo.18239294},
|
|
249
|
+
url = {https://zenodo.org/records/18239294}
|
|
250
|
+
}
|
|
251
|
+
```
|
|
252
|
+
|
|
253
|
+
---
|
|
254
|
+
|
|
255
|
+
## 📜 License
|
|
256
|
+
|
|
257
|
+
MIT License
|
|
258
|
+
|
|
259
|
+
---
|
|
260
|
+
|
|
261
|
+
**Now go find what your kernel remembers.**
|
|
262
|
+
|
|
@@ -0,0 +1,242 @@
|
|
|
1
|
+
# Kernel-Experience Tools 🧠 → ⏳
|
|
2
|
+
|
|
3
|
+
**A Python library that turns memory kernels into experience functions.**
|
|
4
|
+
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
## 📌 What is it?
|
|
8
|
+
|
|
9
|
+
Every memory kernel K(t) hides a story.
|
|
10
|
+
|
|
11
|
+
This library finds it.
|
|
12
|
+
|
|
13
|
+
Given the Volterra relaxation equation
|
|
14
|
+
|
|
15
|
+
x(t) = x₀ - ∫₀ᵗ K(t-τ) x(τ) dτ
|
|
16
|
+
|
|
17
|
+
we compute the unique experience function n(t) such that
|
|
18
|
+
|
|
19
|
+
x(t) = x₀ · λⁿ⁽ᵗ⁾
|
|
20
|
+
|
|
21
|
+
One kernel. One curve. One number.
|
|
22
|
+
|
|
23
|
+
---
|
|
24
|
+
|
|
25
|
+
## 🚀 Quick start
|
|
26
|
+
|
|
27
|
+
```python
|
|
28
|
+
from kernel_experience import Kernel, project_kernel_to_n
|
|
29
|
+
|
|
30
|
+
# Pick a kernel
|
|
31
|
+
K = Kernel.tempered_power_law(alpha=0.6, beta=0.3)
|
|
32
|
+
|
|
33
|
+
# Get its experience function
|
|
34
|
+
t, x, n = project_kernel_to_n(K, t_max=10)
|
|
35
|
+
|
|
36
|
+
print(f"Memory score: {n[-1]:.2f}")
|
|
37
|
+
# Memory score: 3.44
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
---
|
|
41
|
+
|
|
42
|
+
## 📦 Installation
|
|
43
|
+
|
|
44
|
+
```bash
|
|
45
|
+
pip install kernel-experience-tools
|
|
46
|
+
```
|
|
47
|
+
|
|
48
|
+
---
|
|
49
|
+
|
|
50
|
+
## 📘 API Reference
|
|
51
|
+
|
|
52
|
+
### Kernel
|
|
53
|
+
|
|
54
|
+
Container for your memory kernel.
|
|
55
|
+
|
|
56
|
+
**Parameters**
|
|
57
|
+
|
|
58
|
+
- `func`: callable — Kernel function K(t)
|
|
59
|
+
- `name`: str, optional — Kernel name (default: "CustomKernel")
|
|
60
|
+
- `params`: dict, optional — Kernel parameters
|
|
61
|
+
|
|
62
|
+
**Factory methods**
|
|
63
|
+
|
|
64
|
+
```python
|
|
65
|
+
# Exponential: γ·e^{-γt}
|
|
66
|
+
K = Kernel.exponential(gamma=1.0)
|
|
67
|
+
|
|
68
|
+
# Power law: γ·t^{α-1}/Γ(α)
|
|
69
|
+
K = Kernel.power_law(alpha=0.7, gamma=1.0)
|
|
70
|
+
|
|
71
|
+
# Mittag-Leffler: t^{α-1}E_{α,α}(-t^α)
|
|
72
|
+
K = Kernel.mittag_leffler(alpha=0.7)
|
|
73
|
+
|
|
74
|
+
# Tempered power law: γ·t^{α-1}e^{-βt}/Γ(α)
|
|
75
|
+
K = Kernel.tempered_power_law(alpha=0.6, beta=0.3, gamma=1.0)
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
**Custom kernel**
|
|
79
|
+
|
|
80
|
+
```python
|
|
81
|
+
def my_kernel(t):
|
|
82
|
+
return np.exp(-t) * np.cos(t)
|
|
83
|
+
|
|
84
|
+
K = Kernel(my_kernel, name="Oscillatory", params={"freq": 1.0})
|
|
85
|
+
```
|
|
86
|
+
|
|
87
|
+
---
|
|
88
|
+
|
|
89
|
+
### project_kernel_to_n
|
|
90
|
+
|
|
91
|
+
Main projection: K(t) → n(t).
|
|
92
|
+
|
|
93
|
+
**Parameters**
|
|
94
|
+
|
|
95
|
+
| Parameter | Type | Default | Description |
|
|
96
|
+
|----------|------|---------|-------------|
|
|
97
|
+
| `kernel` | `Kernel` | — | Memory kernel |
|
|
98
|
+
| `lambda_param` | `float` | 0.8 | Base λ in (0,1) |
|
|
99
|
+
| `t_max` | `float` | 10.0 | Maximum time |
|
|
100
|
+
| `n_points` | `int` | 1000 | Number of time points |
|
|
101
|
+
| `x0` | `float` | 1.0 | Initial condition |
|
|
102
|
+
| `return_complex` | `bool` | False | Return complex n(t) for oscillatory kernels |
|
|
103
|
+
|
|
104
|
+
**Returns**
|
|
105
|
+
|
|
106
|
+
| Return | Type | Description |
|
|
107
|
+
|--------|------|-------------|
|
|
108
|
+
| `t` | `ndarray` | Time grid |
|
|
109
|
+
| `x` | `ndarray` | Solution x(t) |
|
|
110
|
+
| `n` | `ndarray` | Experience function n(t) |
|
|
111
|
+
|
|
112
|
+
**Examples**
|
|
113
|
+
|
|
114
|
+
```python
|
|
115
|
+
# Basic usage
|
|
116
|
+
t, x, n = project_kernel_to_n(K, t_max=20, n_points=2000)
|
|
117
|
+
|
|
118
|
+
# Custom lambda
|
|
119
|
+
t, x, n = project_kernel_to_n(K, lambda_param=0.5)
|
|
120
|
+
|
|
121
|
+
# Oscillatory kernel — get complex n(t)
|
|
122
|
+
K_osc = Kernel(lambda t: np.exp(-0.1*t)*np.sin(t), name="Oscillatory")
|
|
123
|
+
t, x, n_complex = project_kernel_to_n(K_osc, return_complex=True)
|
|
124
|
+
|
|
125
|
+
# Extract real and imaginary parts
|
|
126
|
+
n_real = n_complex.real
|
|
127
|
+
n_imag = n_complex.imag
|
|
128
|
+
```
|
|
129
|
+
|
|
130
|
+
---
|
|
131
|
+
|
|
132
|
+
### solve_volterra
|
|
133
|
+
|
|
134
|
+
Numerical solver for Volterra integral equation.
|
|
135
|
+
|
|
136
|
+
**Parameters**
|
|
137
|
+
|
|
138
|
+
| Parameter | Type | Default | Description |
|
|
139
|
+
|----------|------|---------|-------------|
|
|
140
|
+
| `kernel` | `Kernel` | — | Memory kernel |
|
|
141
|
+
| `t_max` | `float` | 10.0 | Maximum time |
|
|
142
|
+
| `n_points` | `int` | 1000 | Number of time points |
|
|
143
|
+
| `x0` | `float` | 1.0 | Initial condition |
|
|
144
|
+
|
|
145
|
+
**Returns**
|
|
146
|
+
|
|
147
|
+
| Return | Type | Description |
|
|
148
|
+
|--------|------|-------------|
|
|
149
|
+
| `t` | `ndarray` | Time grid |
|
|
150
|
+
| `x` | `ndarray` | Solution x(t) |
|
|
151
|
+
|
|
152
|
+
**Example**
|
|
153
|
+
|
|
154
|
+
```python
|
|
155
|
+
t, x = solve_volterra(K, t_max=5, n_points=500)
|
|
156
|
+
```
|
|
157
|
+
|
|
158
|
+
---
|
|
159
|
+
|
|
160
|
+
### compute_accuracy
|
|
161
|
+
|
|
162
|
+
Compare original and reconstructed solutions.
|
|
163
|
+
|
|
164
|
+
**Parameters**
|
|
165
|
+
|
|
166
|
+
| Parameter | Type | Description |
|
|
167
|
+
|----------|------|-------------|
|
|
168
|
+
| `original_x` | `ndarray` | Original solution x(t) |
|
|
169
|
+
| `reconstructed_x` | `ndarray` | Reconstructed solution x₀·λⁿ⁽ᵗ⁾ |
|
|
170
|
+
|
|
171
|
+
**Returns**
|
|
172
|
+
|
|
173
|
+
| Return | Type | Description |
|
|
174
|
+
|--------|------|-------------|
|
|
175
|
+
| `dict` | `dict` | Accuracy metrics |
|
|
176
|
+
|
|
177
|
+
**Metrics**
|
|
178
|
+
|
|
179
|
+
- `mean_error`: float — Mean relative error
|
|
180
|
+
- `max_error`: float — Maximum relative error
|
|
181
|
+
- `accuracy`: float — 1 - mean_error
|
|
182
|
+
- `rmse`: float — Root mean square error
|
|
183
|
+
|
|
184
|
+
**Example**
|
|
185
|
+
|
|
186
|
+
```python
|
|
187
|
+
# Get solution and n(t)
|
|
188
|
+
t, x, n = project_kernel_to_n(K)
|
|
189
|
+
|
|
190
|
+
# Reconstruct from n(t)
|
|
191
|
+
x_rec = 1.0 * (0.8 ** n)
|
|
192
|
+
|
|
193
|
+
# Check accuracy
|
|
194
|
+
metrics = compute_accuracy(x, x_rec)
|
|
195
|
+
print(f"Accuracy: {metrics['accuracy']:.2%}")
|
|
196
|
+
print(f"Mean error: {metrics['mean_error']:.2e}")
|
|
197
|
+
# Accuracy: 100.00%
|
|
198
|
+
# Mean error: 1.23e-12
|
|
199
|
+
```
|
|
200
|
+
|
|
201
|
+
---
|
|
202
|
+
|
|
203
|
+
## 🧠 What problem does it solve?
|
|
204
|
+
|
|
205
|
+
Traditional relaxation models assume exponential decay.
|
|
206
|
+
|
|
207
|
+
Real systems — glasses, polymers, biological tissues — show memory effects. Power laws. Stretched exponentials. Oscillations.
|
|
208
|
+
|
|
209
|
+
This library gives you one language for all of them:
|
|
210
|
+
|
|
211
|
+
K(t) → n(t)
|
|
212
|
+
|
|
213
|
+
Once you have n(t), the relaxation curve is simply x₀ · λⁿ⁽ᵗ⁾.
|
|
214
|
+
|
|
215
|
+
No fractional calculus. No special functions. No black boxes.
|
|
216
|
+
|
|
217
|
+
Just your kernel. One function call. One curve.
|
|
218
|
+
|
|
219
|
+
---
|
|
220
|
+
|
|
221
|
+
## 📄 Citation
|
|
222
|
+
|
|
223
|
+
```bibtex
|
|
224
|
+
@software{vozmishchev2026kernel,
|
|
225
|
+
author = {Vozmishchev, Artem},
|
|
226
|
+
title = {Kernel-Experience Tools: Projecting Memory Kernels to Experience Functions},
|
|
227
|
+
year = {2026},
|
|
228
|
+
doi = {10.5281/zenodo.18239294},
|
|
229
|
+
url = {https://zenodo.org/records/18239294}
|
|
230
|
+
}
|
|
231
|
+
```
|
|
232
|
+
|
|
233
|
+
---
|
|
234
|
+
|
|
235
|
+
## 📜 License
|
|
236
|
+
|
|
237
|
+
MIT License
|
|
238
|
+
|
|
239
|
+
---
|
|
240
|
+
|
|
241
|
+
**Now go find what your kernel remembers.**
|
|
242
|
+
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=61.0", "wheel"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "kernel-experience-tools"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
authors = [
|
|
9
|
+
{name = "Artem Vozmishchev", email = "your.email@example.com"}
|
|
10
|
+
]
|
|
11
|
+
description = "Library for projecting memory kernels to experience functions"
|
|
12
|
+
readme = "README.md"
|
|
13
|
+
requires-python = ">=3.7"
|
|
14
|
+
license = {text = "MIT"}
|
|
15
|
+
|
|
16
|
+
dependencies = [
|
|
17
|
+
"numpy",
|
|
18
|
+
"scipy>=1.6.0",
|
|
19
|
+
"matplotlib>=3.3.0",
|
|
20
|
+
]
|
|
21
|
+
|
|
22
|
+
[project.urls]
|
|
23
|
+
Homepage = "https://github.com/BRUTALLOLOL/kernel-experience-tools"
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
from setuptools import setup, find_packages
|
|
2
|
+
|
|
3
|
+
with open("README.md", "r", encoding="utf-8") as fh:
|
|
4
|
+
long_description = fh.read()
|
|
5
|
+
|
|
6
|
+
setup(
|
|
7
|
+
name="kernel-experience-tools",
|
|
8
|
+
version="0.1.0",
|
|
9
|
+
author="Artem Vozmishchev",
|
|
10
|
+
author_email="xbrutallololx@gmail.com",
|
|
11
|
+
description="Library for projecting memory kernels to experience functions",
|
|
12
|
+
long_description=long_description,
|
|
13
|
+
long_description_content_type="text/markdown",
|
|
14
|
+
url="https://github.com/BRUTALLOLOL/kernel-experience-tools",
|
|
15
|
+
|
|
16
|
+
# Package structure
|
|
17
|
+
package_dir={"": "src"},
|
|
18
|
+
packages=find_packages(where="src"),
|
|
19
|
+
|
|
20
|
+
# Python compatibility
|
|
21
|
+
python_requires=">=3.7",
|
|
22
|
+
|
|
23
|
+
# Dependencies - pip will handle version compatibility
|
|
24
|
+
install_requires=[
|
|
25
|
+
"numpy",
|
|
26
|
+
"scipy>=1.6.0",
|
|
27
|
+
"matplotlib>=3.3.0",
|
|
28
|
+
],
|
|
29
|
+
|
|
30
|
+
# PyPI metadata
|
|
31
|
+
classifiers=[
|
|
32
|
+
"Programming Language :: Python :: 3",
|
|
33
|
+
"Programming Language :: Python :: 3.7",
|
|
34
|
+
"Programming Language :: Python :: 3.8",
|
|
35
|
+
"Programming Language :: Python :: 3.9",
|
|
36
|
+
"Programming Language :: Python :: 3.10",
|
|
37
|
+
"Programming Language :: Python :: 3.11",
|
|
38
|
+
"Programming Language :: Python :: 3.12",
|
|
39
|
+
"Programming Language :: Python :: 3.13",
|
|
40
|
+
"License :: OSI Approved :: MIT License",
|
|
41
|
+
"Operating System :: OS Independent",
|
|
42
|
+
"Intended Audience :: Science/Research",
|
|
43
|
+
"Topic :: Scientific/Engineering :: Mathematics",
|
|
44
|
+
"Topic :: Scientific/Engineering :: Physics",
|
|
45
|
+
],
|
|
46
|
+
|
|
47
|
+
# Build options
|
|
48
|
+
include_package_data=True,
|
|
49
|
+
zip_safe=False,
|
|
50
|
+
)
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Kernel-Experience Tools: A library for projecting memory kernels to experience functions.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
# Use try-except for compatibility
|
|
6
|
+
try:
|
|
7
|
+
# Absolute imports (preferred)
|
|
8
|
+
from kernel_experience.kernel import Kernel
|
|
9
|
+
from kernel_experience.projection import project_kernel_to_n, project_to_envelope_n, compute_accuracy
|
|
10
|
+
from kernel_experience.solvers import solve_volterra
|
|
11
|
+
except ImportError:
|
|
12
|
+
# Relative imports as fallback
|
|
13
|
+
from .kernel import Kernel
|
|
14
|
+
from .projection import project_kernel_to_n, project_to_envelope_n, compute_accuracy
|
|
15
|
+
from .solvers import solve_volterra
|
|
16
|
+
|
|
17
|
+
__version__ = "0.1.0"
|
|
18
|
+
__author__ = "Artem Vozmishchev"
|
|
19
|
+
__email__ = "xbrutallololx@gmail.com"
|
|
20
|
+
|
|
21
|
+
__all__ = [
|
|
22
|
+
"Kernel",
|
|
23
|
+
"project_kernel_to_n",
|
|
24
|
+
"project_to_envelope_n",
|
|
25
|
+
"compute_accuracy",
|
|
26
|
+
"solve_volterra"
|
|
27
|
+
]
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Kernel class for memory kernels K(t).
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import numpy as np
|
|
6
|
+
from typing import Callable, Union, List
|
|
7
|
+
from dataclasses import dataclass
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@dataclass
|
|
11
|
+
class Kernel:
|
|
12
|
+
"""
|
|
13
|
+
Memory kernel representation.
|
|
14
|
+
|
|
15
|
+
Parameters
|
|
16
|
+
----------
|
|
17
|
+
func : callable
|
|
18
|
+
Function K(t) that returns kernel value at time t.
|
|
19
|
+
name : str, optional
|
|
20
|
+
Human-readable name of the kernel.
|
|
21
|
+
params : dict, optional
|
|
22
|
+
Dictionary of kernel parameters.
|
|
23
|
+
"""
|
|
24
|
+
func: Callable[[float], float]
|
|
25
|
+
name: str = "CustomKernel"
|
|
26
|
+
params: dict = None
|
|
27
|
+
|
|
28
|
+
def __post_init__(self):
|
|
29
|
+
if self.params is None:
|
|
30
|
+
self.params = {}
|
|
31
|
+
|
|
32
|
+
def __call__(self, t: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
|
|
33
|
+
"""Evaluate kernel at time t."""
|
|
34
|
+
return self.func(t)
|
|
35
|
+
|
|
36
|
+
def __repr__(self):
|
|
37
|
+
return f"Kernel(name='{self.name}', params={self.params})"
|
|
38
|
+
|
|
39
|
+
# Factory methods for common kernels
|
|
40
|
+
@classmethod
|
|
41
|
+
def exponential(cls, gamma: float = 1.0):
|
|
42
|
+
"""Exponential kernel: K(t) = γ * e^{-γt}"""
|
|
43
|
+
|
|
44
|
+
def func(t):
|
|
45
|
+
return gamma * np.exp(-gamma * np.maximum(t, 0))
|
|
46
|
+
|
|
47
|
+
return cls(func=func, name="Exponential", params={"gamma": gamma})
|
|
48
|
+
|
|
49
|
+
@classmethod
|
|
50
|
+
def power_law(cls, alpha: float = 0.5, gamma: float = 1.0):
|
|
51
|
+
"""Power-law kernel: K(t) = γ * t^(α-1) / Γ(α)"""
|
|
52
|
+
from scipy.special import gamma as gamma_func
|
|
53
|
+
prefactor = gamma / gamma_func(alpha)
|
|
54
|
+
|
|
55
|
+
def func(t):
|
|
56
|
+
t_safe = np.where(t > 0, t, 1e-12) # Избегаем деления на ноль
|
|
57
|
+
return prefactor * np.power(t_safe, alpha - 1)
|
|
58
|
+
|
|
59
|
+
return cls(func=func, name="PowerLaw",
|
|
60
|
+
params={"alpha": alpha, "gamma": gamma})
|
|
61
|
+
|
|
62
|
+
@classmethod
|
|
63
|
+
def mittag_leffler(cls, alpha: float = 0.7, beta: float = 1.0):
|
|
64
|
+
"""Mittag-Leffler kernel: K(t) = t^(α-1) * E_{α,α}(-t^α)"""
|
|
65
|
+
from scipy.special import gamma
|
|
66
|
+
|
|
67
|
+
def func(t):
|
|
68
|
+
# Simplified version - in practice use ml() from scipy
|
|
69
|
+
t_alpha = np.power(t, alpha)
|
|
70
|
+
return np.power(t, alpha - 1) * np.exp(-t_alpha) / gamma(alpha)
|
|
71
|
+
|
|
72
|
+
return cls(func=func, name="MittagLeffler",
|
|
73
|
+
params={"alpha": alpha, "beta": beta})
|
|
74
|
+
|
|
75
|
+
@classmethod
|
|
76
|
+
def tempered_power_law(cls, alpha: float = 0.6, beta: float = 0.3, gamma: float = 1.0):
|
|
77
|
+
"""Tempered power-law: K(t) = γ * t^(α-1) * e^{-βt} / Γ(α)"""
|
|
78
|
+
from scipy.special import gamma as gamma_func
|
|
79
|
+
prefactor = gamma / gamma_func(alpha)
|
|
80
|
+
|
|
81
|
+
def func(t):
|
|
82
|
+
t_safe = np.where(t > 0, t, 1e-12)
|
|
83
|
+
return prefactor * np.power(t_safe, alpha - 1) * np.exp(-beta * t_safe)
|
|
84
|
+
|
|
85
|
+
return cls(func=func, name="TemperedPowerLaw",
|
|
86
|
+
params={"alpha": alpha, "beta": beta, "gamma": gamma})
|
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Projection algorithms: K(t) → n(t).
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import numpy as np
|
|
6
|
+
from typing import Tuple, Union
|
|
7
|
+
from .kernel import Kernel
|
|
8
|
+
from .solvers import solve_volterra
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def project_kernel_to_n(kernel: Kernel,
|
|
12
|
+
lambda_param: float = 0.8,
|
|
13
|
+
t_max: float = 10.0,
|
|
14
|
+
n_points: int = 1000,
|
|
15
|
+
x0: float = 1.0,
|
|
16
|
+
return_complex: bool = False) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
|
|
17
|
+
"""
|
|
18
|
+
Main projection: K(t) → n(t).
|
|
19
|
+
|
|
20
|
+
Parameters
|
|
21
|
+
----------
|
|
22
|
+
kernel : Kernel
|
|
23
|
+
Memory kernel.
|
|
24
|
+
lambda_param : float
|
|
25
|
+
Parameter λ in (0, 1).
|
|
26
|
+
t_max : float
|
|
27
|
+
Maximum time.
|
|
28
|
+
n_points : int
|
|
29
|
+
Number of time points.
|
|
30
|
+
x0 : float
|
|
31
|
+
Initial condition.
|
|
32
|
+
return_complex : bool
|
|
33
|
+
If True, return complex n(t) for oscillatory kernels.
|
|
34
|
+
|
|
35
|
+
Returns
|
|
36
|
+
-------
|
|
37
|
+
t : np.ndarray
|
|
38
|
+
Time grid.
|
|
39
|
+
x : np.ndarray
|
|
40
|
+
Solution x(t).
|
|
41
|
+
n : np.ndarray
|
|
42
|
+
Experience function n(t) (real or complex).
|
|
43
|
+
"""
|
|
44
|
+
# 1. Solve Volterra equation
|
|
45
|
+
t, x = solve_volterra(kernel, t_max, n_points, x0)
|
|
46
|
+
|
|
47
|
+
# 2. Compute n(t) = log_λ(x(t)/x0)
|
|
48
|
+
ratio = x / x0
|
|
49
|
+
|
|
50
|
+
if return_complex:
|
|
51
|
+
# Complex logarithm for oscillatory solutions
|
|
52
|
+
n_real = np.log(np.abs(ratio)) / np.log(lambda_param)
|
|
53
|
+
n_imag = np.angle(ratio) / np.log(lambda_param)
|
|
54
|
+
n = n_real + 1j * n_imag
|
|
55
|
+
else:
|
|
56
|
+
# Real logarithm (enforce positivity)
|
|
57
|
+
ratio = np.maximum(ratio, 1e-12) # Avoid log(0)
|
|
58
|
+
n = np.log(ratio) / np.log(lambda_param)
|
|
59
|
+
|
|
60
|
+
return t, x, n
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def project_to_envelope_n(kernel: Kernel,
|
|
64
|
+
lambda_param: float = 0.8,
|
|
65
|
+
t_max: float = 10.0,
|
|
66
|
+
n_points: int = 1000,
|
|
67
|
+
x0: float = 1.0) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
|
|
68
|
+
"""
|
|
69
|
+
Project to monotonic envelope of n(t).
|
|
70
|
+
|
|
71
|
+
Uses Hilbert transform to extract envelope of oscillatory solutions.
|
|
72
|
+
"""
|
|
73
|
+
from scipy.signal import hilbert
|
|
74
|
+
|
|
75
|
+
t, x, n_complex = project_kernel_to_n(
|
|
76
|
+
kernel, lambda_param, t_max, n_points, x0, return_complex=True
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
# Extract envelope via Hilbert transform
|
|
80
|
+
analytic_signal = hilbert(x.real)
|
|
81
|
+
envelope = np.abs(analytic_signal)
|
|
82
|
+
|
|
83
|
+
# Compute envelope n(t)
|
|
84
|
+
ratio_env = envelope / x0
|
|
85
|
+
ratio_env = np.maximum(ratio_env, 1e-12)
|
|
86
|
+
n_env = np.log(ratio_env) / np.log(lambda_param)
|
|
87
|
+
|
|
88
|
+
# Ensure monotonic decrease (accumulated minimum)
|
|
89
|
+
n_env_mono = np.minimum.accumulate(n_env)
|
|
90
|
+
|
|
91
|
+
return t, envelope, n_env_mono
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def compute_accuracy(original_x: np.ndarray,
|
|
95
|
+
reconstructed_x: np.ndarray) -> dict:
|
|
96
|
+
"""
|
|
97
|
+
Compute accuracy metrics between original and reconstructed solutions.
|
|
98
|
+
"""
|
|
99
|
+
mask = original_x != 0
|
|
100
|
+
rel_error = np.abs(original_x[mask] - reconstructed_x[mask]) / np.abs(original_x[mask])
|
|
101
|
+
|
|
102
|
+
return {
|
|
103
|
+
'mean_error': float(np.mean(rel_error)),
|
|
104
|
+
'max_error': float(np.max(rel_error)),
|
|
105
|
+
'accuracy': float(1 - np.mean(rel_error)),
|
|
106
|
+
'rmse': float(np.sqrt(np.mean((original_x - reconstructed_x) ** 2)))
|
|
107
|
+
}
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Numerical solvers for Volterra integral equations.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import numpy as np
|
|
6
|
+
from typing import Callable, Tuple
|
|
7
|
+
from .kernel import Kernel
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def solve_volterra(kernel: Kernel,
|
|
11
|
+
t_max: float = 10.0,
|
|
12
|
+
n_points: int = 1000,
|
|
13
|
+
x0: float = 1.0,
|
|
14
|
+
method: str = 'trapezoidal') -> Tuple[np.ndarray, np.ndarray]:
|
|
15
|
+
"""
|
|
16
|
+
Solve x(t) = x0 - ∫₀ᵗ K(t-τ) x(τ) dτ.
|
|
17
|
+
|
|
18
|
+
Parameters
|
|
19
|
+
----------
|
|
20
|
+
kernel : Kernel
|
|
21
|
+
Memory kernel K(t).
|
|
22
|
+
t_max : float
|
|
23
|
+
Maximum time.
|
|
24
|
+
n_points : int
|
|
25
|
+
Number of time points.
|
|
26
|
+
x0 : float
|
|
27
|
+
Initial condition.
|
|
28
|
+
method : str
|
|
29
|
+
Integration method ('trapezoidal' or 'simpson').
|
|
30
|
+
|
|
31
|
+
Returns
|
|
32
|
+
-------
|
|
33
|
+
t : np.ndarray
|
|
34
|
+
Time grid.
|
|
35
|
+
x : np.ndarray
|
|
36
|
+
Solution x(t).
|
|
37
|
+
"""
|
|
38
|
+
t = np.linspace(0, t_max, n_points)
|
|
39
|
+
dt = t[1] - t[0]
|
|
40
|
+
x = np.zeros(n_points)
|
|
41
|
+
x[0] = x0
|
|
42
|
+
|
|
43
|
+
# Precompute kernel values for efficiency
|
|
44
|
+
K_vals = np.zeros((n_points, n_points))
|
|
45
|
+
for i in range(n_points):
|
|
46
|
+
for j in range(i + 1):
|
|
47
|
+
K_vals[i, j] = kernel(t[i] - t[j])
|
|
48
|
+
|
|
49
|
+
# Solve using specified method
|
|
50
|
+
if method == 'trapezoidal':
|
|
51
|
+
for i in range(1, n_points):
|
|
52
|
+
integral = 0
|
|
53
|
+
for j in range(i):
|
|
54
|
+
weight = 0.5 if (j == 0 or j == i - 1) else 1.0
|
|
55
|
+
integral += weight * K_vals[i, j] * x[j] * dt
|
|
56
|
+
|
|
57
|
+
x[i] = x0 - integral
|
|
58
|
+
|
|
59
|
+
elif method == 'simpson':
|
|
60
|
+
for i in range(1, n_points):
|
|
61
|
+
if i % 2 == 0: # Simpson requires even number of intervals
|
|
62
|
+
integral = K_vals[i, 0] * x[0] + K_vals[i, i] * x[i - 1]
|
|
63
|
+
for j in range(1, i):
|
|
64
|
+
weight = 4.0 if j % 2 == 1 else 2.0
|
|
65
|
+
integral += weight * K_vals[i, j] * x[j]
|
|
66
|
+
integral *= dt / 3.0
|
|
67
|
+
x[i] = x0 - integral
|
|
68
|
+
else:
|
|
69
|
+
# Fall back to trapezoidal for odd intervals
|
|
70
|
+
integral = 0
|
|
71
|
+
for j in range(i):
|
|
72
|
+
weight = 0.5 if (j == 0 or j == i - 1) else 1.0
|
|
73
|
+
integral += weight * K_vals[i, j] * x[j] * dt
|
|
74
|
+
x[i] = x0 - integral
|
|
75
|
+
|
|
76
|
+
return t, x
|
|
@@ -0,0 +1,262 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: kernel-experience-tools
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Library for projecting memory kernels to experience functions
|
|
5
|
+
Home-page: https://github.com/BRUTALLOLOL/kernel-experience-tools
|
|
6
|
+
Author: Artem Vozmishchev
|
|
7
|
+
Author-email: Artem Vozmishchev <your.email@example.com>
|
|
8
|
+
License: MIT
|
|
9
|
+
Project-URL: Homepage, https://github.com/BRUTALLOLOL/kernel-experience-tools
|
|
10
|
+
Requires-Python: >=3.7
|
|
11
|
+
Description-Content-Type: text/markdown
|
|
12
|
+
License-File: LICENSE
|
|
13
|
+
Requires-Dist: numpy
|
|
14
|
+
Requires-Dist: scipy>=1.6.0
|
|
15
|
+
Requires-Dist: matplotlib>=3.3.0
|
|
16
|
+
Dynamic: author
|
|
17
|
+
Dynamic: home-page
|
|
18
|
+
Dynamic: license-file
|
|
19
|
+
Dynamic: requires-python
|
|
20
|
+
|
|
21
|
+
# Kernel-Experience Tools 🧠 → ⏳
|
|
22
|
+
|
|
23
|
+
**A Python library that turns memory kernels into experience functions.**
|
|
24
|
+
|
|
25
|
+
---
|
|
26
|
+
|
|
27
|
+
## 📌 What is it?
|
|
28
|
+
|
|
29
|
+
Every memory kernel K(t) hides a story.
|
|
30
|
+
|
|
31
|
+
This library finds it.
|
|
32
|
+
|
|
33
|
+
Given the Volterra relaxation equation
|
|
34
|
+
|
|
35
|
+
x(t) = x₀ - ∫₀ᵗ K(t-τ) x(τ) dτ
|
|
36
|
+
|
|
37
|
+
we compute the unique experience function n(t) such that
|
|
38
|
+
|
|
39
|
+
x(t) = x₀ · λⁿ⁽ᵗ⁾
|
|
40
|
+
|
|
41
|
+
One kernel. One curve. One number.
|
|
42
|
+
|
|
43
|
+
---
|
|
44
|
+
|
|
45
|
+
## 🚀 Quick start
|
|
46
|
+
|
|
47
|
+
```python
|
|
48
|
+
from kernel_experience import Kernel, project_kernel_to_n
|
|
49
|
+
|
|
50
|
+
# Pick a kernel
|
|
51
|
+
K = Kernel.tempered_power_law(alpha=0.6, beta=0.3)
|
|
52
|
+
|
|
53
|
+
# Get its experience function
|
|
54
|
+
t, x, n = project_kernel_to_n(K, t_max=10)
|
|
55
|
+
|
|
56
|
+
print(f"Memory score: {n[-1]:.2f}")
|
|
57
|
+
# Memory score: 3.44
|
|
58
|
+
```
|
|
59
|
+
|
|
60
|
+
---
|
|
61
|
+
|
|
62
|
+
## 📦 Installation
|
|
63
|
+
|
|
64
|
+
```bash
|
|
65
|
+
pip install kernel-experience-tools
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
---
|
|
69
|
+
|
|
70
|
+
## 📘 API Reference
|
|
71
|
+
|
|
72
|
+
### Kernel
|
|
73
|
+
|
|
74
|
+
Container for your memory kernel.
|
|
75
|
+
|
|
76
|
+
**Parameters**
|
|
77
|
+
|
|
78
|
+
- `func`: callable — Kernel function K(t)
|
|
79
|
+
- `name`: str, optional — Kernel name (default: "CustomKernel")
|
|
80
|
+
- `params`: dict, optional — Kernel parameters
|
|
81
|
+
|
|
82
|
+
**Factory methods**
|
|
83
|
+
|
|
84
|
+
```python
|
|
85
|
+
# Exponential: γ·e^{-γt}
|
|
86
|
+
K = Kernel.exponential(gamma=1.0)
|
|
87
|
+
|
|
88
|
+
# Power law: γ·t^{α-1}/Γ(α)
|
|
89
|
+
K = Kernel.power_law(alpha=0.7, gamma=1.0)
|
|
90
|
+
|
|
91
|
+
# Mittag-Leffler: t^{α-1}E_{α,α}(-t^α)
|
|
92
|
+
K = Kernel.mittag_leffler(alpha=0.7)
|
|
93
|
+
|
|
94
|
+
# Tempered power law: γ·t^{α-1}e^{-βt}/Γ(α)
|
|
95
|
+
K = Kernel.tempered_power_law(alpha=0.6, beta=0.3, gamma=1.0)
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
**Custom kernel**
|
|
99
|
+
|
|
100
|
+
```python
|
|
101
|
+
def my_kernel(t):
|
|
102
|
+
return np.exp(-t) * np.cos(t)
|
|
103
|
+
|
|
104
|
+
K = Kernel(my_kernel, name="Oscillatory", params={"freq": 1.0})
|
|
105
|
+
```
|
|
106
|
+
|
|
107
|
+
---
|
|
108
|
+
|
|
109
|
+
### project_kernel_to_n
|
|
110
|
+
|
|
111
|
+
Main projection: K(t) → n(t).
|
|
112
|
+
|
|
113
|
+
**Parameters**
|
|
114
|
+
|
|
115
|
+
| Parameter | Type | Default | Description |
|
|
116
|
+
|----------|------|---------|-------------|
|
|
117
|
+
| `kernel` | `Kernel` | — | Memory kernel |
|
|
118
|
+
| `lambda_param` | `float` | 0.8 | Base λ in (0,1) |
|
|
119
|
+
| `t_max` | `float` | 10.0 | Maximum time |
|
|
120
|
+
| `n_points` | `int` | 1000 | Number of time points |
|
|
121
|
+
| `x0` | `float` | 1.0 | Initial condition |
|
|
122
|
+
| `return_complex` | `bool` | False | Return complex n(t) for oscillatory kernels |
|
|
123
|
+
|
|
124
|
+
**Returns**
|
|
125
|
+
|
|
126
|
+
| Return | Type | Description |
|
|
127
|
+
|--------|------|-------------|
|
|
128
|
+
| `t` | `ndarray` | Time grid |
|
|
129
|
+
| `x` | `ndarray` | Solution x(t) |
|
|
130
|
+
| `n` | `ndarray` | Experience function n(t) |
|
|
131
|
+
|
|
132
|
+
**Examples**
|
|
133
|
+
|
|
134
|
+
```python
|
|
135
|
+
# Basic usage
|
|
136
|
+
t, x, n = project_kernel_to_n(K, t_max=20, n_points=2000)
|
|
137
|
+
|
|
138
|
+
# Custom lambda
|
|
139
|
+
t, x, n = project_kernel_to_n(K, lambda_param=0.5)
|
|
140
|
+
|
|
141
|
+
# Oscillatory kernel — get complex n(t)
|
|
142
|
+
K_osc = Kernel(lambda t: np.exp(-0.1*t)*np.sin(t), name="Oscillatory")
|
|
143
|
+
t, x, n_complex = project_kernel_to_n(K_osc, return_complex=True)
|
|
144
|
+
|
|
145
|
+
# Extract real and imaginary parts
|
|
146
|
+
n_real = n_complex.real
|
|
147
|
+
n_imag = n_complex.imag
|
|
148
|
+
```
|
|
149
|
+
|
|
150
|
+
---
|
|
151
|
+
|
|
152
|
+
### solve_volterra
|
|
153
|
+
|
|
154
|
+
Numerical solver for Volterra integral equation.
|
|
155
|
+
|
|
156
|
+
**Parameters**
|
|
157
|
+
|
|
158
|
+
| Parameter | Type | Default | Description |
|
|
159
|
+
|----------|------|---------|-------------|
|
|
160
|
+
| `kernel` | `Kernel` | — | Memory kernel |
|
|
161
|
+
| `t_max` | `float` | 10.0 | Maximum time |
|
|
162
|
+
| `n_points` | `int` | 1000 | Number of time points |
|
|
163
|
+
| `x0` | `float` | 1.0 | Initial condition |
|
|
164
|
+
|
|
165
|
+
**Returns**
|
|
166
|
+
|
|
167
|
+
| Return | Type | Description |
|
|
168
|
+
|--------|------|-------------|
|
|
169
|
+
| `t` | `ndarray` | Time grid |
|
|
170
|
+
| `x` | `ndarray` | Solution x(t) |
|
|
171
|
+
|
|
172
|
+
**Example**
|
|
173
|
+
|
|
174
|
+
```python
|
|
175
|
+
t, x = solve_volterra(K, t_max=5, n_points=500)
|
|
176
|
+
```
|
|
177
|
+
|
|
178
|
+
---
|
|
179
|
+
|
|
180
|
+
### compute_accuracy
|
|
181
|
+
|
|
182
|
+
Compare original and reconstructed solutions.
|
|
183
|
+
|
|
184
|
+
**Parameters**
|
|
185
|
+
|
|
186
|
+
| Parameter | Type | Description |
|
|
187
|
+
|----------|------|-------------|
|
|
188
|
+
| `original_x` | `ndarray` | Original solution x(t) |
|
|
189
|
+
| `reconstructed_x` | `ndarray` | Reconstructed solution x₀·λⁿ⁽ᵗ⁾ |
|
|
190
|
+
|
|
191
|
+
**Returns**
|
|
192
|
+
|
|
193
|
+
| Return | Type | Description |
|
|
194
|
+
|--------|------|-------------|
|
|
195
|
+
| `dict` | `dict` | Accuracy metrics |
|
|
196
|
+
|
|
197
|
+
**Metrics**
|
|
198
|
+
|
|
199
|
+
- `mean_error`: float — Mean relative error
|
|
200
|
+
- `max_error`: float — Maximum relative error
|
|
201
|
+
- `accuracy`: float — 1 - mean_error
|
|
202
|
+
- `rmse`: float — Root mean square error
|
|
203
|
+
|
|
204
|
+
**Example**
|
|
205
|
+
|
|
206
|
+
```python
|
|
207
|
+
# Get solution and n(t)
|
|
208
|
+
t, x, n = project_kernel_to_n(K)
|
|
209
|
+
|
|
210
|
+
# Reconstruct from n(t)
|
|
211
|
+
x_rec = 1.0 * (0.8 ** n)
|
|
212
|
+
|
|
213
|
+
# Check accuracy
|
|
214
|
+
metrics = compute_accuracy(x, x_rec)
|
|
215
|
+
print(f"Accuracy: {metrics['accuracy']:.2%}")
|
|
216
|
+
print(f"Mean error: {metrics['mean_error']:.2e}")
|
|
217
|
+
# Accuracy: 100.00%
|
|
218
|
+
# Mean error: 1.23e-12
|
|
219
|
+
```
|
|
220
|
+
|
|
221
|
+
---
|
|
222
|
+
|
|
223
|
+
## 🧠 What problem does it solve?
|
|
224
|
+
|
|
225
|
+
Traditional relaxation models assume exponential decay.
|
|
226
|
+
|
|
227
|
+
Real systems — glasses, polymers, biological tissues — show memory effects. Power laws. Stretched exponentials. Oscillations.
|
|
228
|
+
|
|
229
|
+
This library gives you one language for all of them:
|
|
230
|
+
|
|
231
|
+
K(t) → n(t)
|
|
232
|
+
|
|
233
|
+
Once you have n(t), the relaxation curve is simply x₀ · λⁿ⁽ᵗ⁾.
|
|
234
|
+
|
|
235
|
+
No fractional calculus. No special functions. No black boxes.
|
|
236
|
+
|
|
237
|
+
Just your kernel. One function call. One curve.
|
|
238
|
+
|
|
239
|
+
---
|
|
240
|
+
|
|
241
|
+
## 📄 Citation
|
|
242
|
+
|
|
243
|
+
```bibtex
|
|
244
|
+
@software{vozmishchev2026kernel,
|
|
245
|
+
author = {Vozmishchev, Artem},
|
|
246
|
+
title = {Kernel-Experience Tools: Projecting Memory Kernels to Experience Functions},
|
|
247
|
+
year = {2026},
|
|
248
|
+
doi = {10.5281/zenodo.18239294},
|
|
249
|
+
url = {https://zenodo.org/records/18239294}
|
|
250
|
+
}
|
|
251
|
+
```
|
|
252
|
+
|
|
253
|
+
---
|
|
254
|
+
|
|
255
|
+
## 📜 License
|
|
256
|
+
|
|
257
|
+
MIT License
|
|
258
|
+
|
|
259
|
+
---
|
|
260
|
+
|
|
261
|
+
**Now go find what your kernel remembers.**
|
|
262
|
+
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
LICENSE
|
|
2
|
+
MANIFEST.in
|
|
3
|
+
README.md
|
|
4
|
+
pyproject.toml
|
|
5
|
+
requirements.txt
|
|
6
|
+
setup.py
|
|
7
|
+
src/kernel_experience/__init__.py
|
|
8
|
+
src/kernel_experience/kernel.py
|
|
9
|
+
src/kernel_experience/projection.py
|
|
10
|
+
src/kernel_experience/solvers.py
|
|
11
|
+
src/kernel_experience_tools.egg-info/PKG-INFO
|
|
12
|
+
src/kernel_experience_tools.egg-info/SOURCES.txt
|
|
13
|
+
src/kernel_experience_tools.egg-info/dependency_links.txt
|
|
14
|
+
src/kernel_experience_tools.egg-info/not-zip-safe
|
|
15
|
+
src/kernel_experience_tools.egg-info/requires.txt
|
|
16
|
+
src/kernel_experience_tools.egg-info/top_level.txt
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
kernel_experience
|