pyCLINE 0.1.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyCLINE/__init__.py +22 -0
- pyCLINE/example.py +164 -0
- pyCLINE/generate_data.py +105 -0
- pyCLINE/model.py +1045 -0
- pyCLINE/recovery_methods/__init__.py +15 -0
- pyCLINE/recovery_methods/data_preparation.py +394 -0
- pyCLINE/recovery_methods/nn_training.py +417 -0
- pycline-0.1.7.dist-info/LICENSE +23 -0
- pycline-0.1.7.dist-info/METADATA +40 -0
- pycline-0.1.7.dist-info/RECORD +12 -0
- pycline-0.1.7.dist-info/WHEEL +5 -0
- pycline-0.1.7.dist-info/top_level.txt +1 -0
pyCLINE/__init__.py
ADDED
@@ -0,0 +1,22 @@
|
|
1
|
+
"""
|
2
|
+
pyCLINE: Python implementation of the CLINE method introduced by Prokop, Billen, Frolov and Gelens (2025).
|
3
|
+
|
4
|
+
Modules:
|
5
|
+
recovery_methods: Methods for recovering nullclines.
|
6
|
+
model: Model definitions and utilities.
|
7
|
+
generate_data: Functions for generating synthetic data.
|
8
|
+
example: Example usage and demonstrations.
|
9
|
+
"""
|
10
|
+
|
11
|
+
from . import recovery_methods
|
12
|
+
from . import model
|
13
|
+
from . import generate_data
|
14
|
+
from . import example
|
15
|
+
from importlib.metadata import version, PackageNotFoundError
|
16
|
+
|
17
|
+
try:
|
18
|
+
__version__ = version("pyCLINE")
|
19
|
+
except PackageNotFoundError:
|
20
|
+
__version__ = "0.0.0" # Default when package is not installed
|
21
|
+
|
22
|
+
__all__ = ["recovery_methods", "model", "generate_data", "example"]
|
pyCLINE/example.py
ADDED
@@ -0,0 +1,164 @@
|
|
1
|
+
import sys
|
2
|
+
import os
|
3
|
+
from . import generate_data
|
4
|
+
import pandas as pd
|
5
|
+
import matplotlib.pyplot as plt
|
6
|
+
from . import recovery_methods
|
7
|
+
from . import model
|
8
|
+
import torch.nn as nn
|
9
|
+
import numpy
|
10
|
+
import torch
|
11
|
+
|
12
|
+
class ExampleCallable:
|
13
|
+
def __call__(self, example_model, plot=True):
|
14
|
+
example(example_model, plot)
|
15
|
+
|
16
|
+
sys.modules[__name__]= ExampleCallable()
|
17
|
+
|
18
|
+
def example(example_model, plot):
|
19
|
+
"""
|
20
|
+
This function runs multiple examples, depending on the choice of the example model.
|
21
|
+
It should be used as a guideline how to run pyCLINE for synthetic data.
|
22
|
+
For using pyCLINE on real data, the user should adapt the data source accordingly.
|
23
|
+
|
24
|
+
Args:
|
25
|
+
example_model (str): Selection of model to run the example on. Chose from 'FHN', 'Bicubic', 'GeneExpression', 'DelayOscillator'.
|
26
|
+
plot (bool): If True, the function will plot the data and the predictions.
|
27
|
+
|
28
|
+
Raises:
|
29
|
+
ValueError: In case no example model string is provided.
|
30
|
+
"""
|
31
|
+
|
32
|
+
#provide name of model to run example
|
33
|
+
if example_model is None:
|
34
|
+
raise ValueError("Error: example_model is None: please provide a model name (FHN, Bicubic, GeneExpression, DelayOscillator).")
|
35
|
+
|
36
|
+
# check if data already exists
|
37
|
+
path='data/synthetic_data/'
|
38
|
+
if example_model=='FHN':
|
39
|
+
fname='FHN_eps=0.3_a=0.0.csv'
|
40
|
+
else:
|
41
|
+
fname=example_model+'.csv'
|
42
|
+
|
43
|
+
print('Running example for model: '+example_model)
|
44
|
+
print('Step 1: Load or generate data')
|
45
|
+
# generate data if it does not exist
|
46
|
+
if os.path.exists(path+fname):
|
47
|
+
print('Data already exists: '+path+fname)
|
48
|
+
df = pd.read_csv(path+fname)
|
49
|
+
|
50
|
+
else:
|
51
|
+
print('No data saved: generating data for model: '+example_model)
|
52
|
+
getattr(generate_data, example_model)()
|
53
|
+
print('Data generated saved: '+path+fname)
|
54
|
+
df = pd.read_csv(path+fname)
|
55
|
+
|
56
|
+
# extracting 1 time series and plotting
|
57
|
+
if example_model!='DelayOscillator':
|
58
|
+
df_sim = df[(df['sim']==1)].copy()
|
59
|
+
else:
|
60
|
+
df_sim=df.copy()
|
61
|
+
df_sim.reset_index(drop=True, inplace=True)
|
62
|
+
|
63
|
+
if example_model=='DelayOscillator':
|
64
|
+
tau=10 # 12,20,35
|
65
|
+
v_data=df_sim['u'][df_sim['time']>=tau].values
|
66
|
+
df_sim=df_sim[df_sim['time']<=df_sim['time'].max() - tau]
|
67
|
+
df_sim['v']=v_data
|
68
|
+
|
69
|
+
if plot:
|
70
|
+
fig,ax = plt.subplots(1,1,figsize=(5,3))
|
71
|
+
ax.plot(df_sim['time'], df_sim['u'], label='u')
|
72
|
+
ax.plot(df_sim['time'], df_sim['v'], label='v')
|
73
|
+
ax.set_xlabel('Time')
|
74
|
+
ax.set_ylabel('Amount in arbs. units')
|
75
|
+
plt.show()
|
76
|
+
|
77
|
+
# prepare data for training
|
78
|
+
# tmin: minimum time point to consider to avoid transient behavior
|
79
|
+
# val_min, val_max: min and max values of the min max normalization
|
80
|
+
print('Step 2: Prepare data for training')
|
81
|
+
if example_model=='FHN':
|
82
|
+
tmin, val_min, val_max=3, -1.0, 1.0
|
83
|
+
if example_model=='Bicubic':
|
84
|
+
tmin, val_min, val_max=10, 0.0, 1.0
|
85
|
+
if example_model=='GeneExpression':
|
86
|
+
tmin, val_min, val_max=10, 0.0, 1.0
|
87
|
+
if example_model=='DelayOscillator':
|
88
|
+
tmin, val_min, val_max=100,0.0,1.0
|
89
|
+
|
90
|
+
|
91
|
+
# normalize data and create additional derivative or delayed variables
|
92
|
+
df_sim, df_coef = recovery_methods.data_preparation.prepare_data(df_sim, vars=['u', 'v'], time='time', tmin=tmin, scheme='derivative', value_min=val_min, value_max=val_max)
|
93
|
+
|
94
|
+
# define input and target variables (can be changed as needed)
|
95
|
+
input_vars, target_vars = ['norm u', 'd normu/dt'], ['norm v']
|
96
|
+
|
97
|
+
# schuffle and split training, test and validation data
|
98
|
+
# optimal_thresholding: if True, the amount of samples within the phase space is evenly distributed
|
99
|
+
input_train, target_train, input_test, target_test, input_val, target_val = recovery_methods.data_preparation.shuffle_and_split(df_sim, input_vars = input_vars,
|
100
|
+
target_var = target_vars,
|
101
|
+
optimal_thresholding=False)
|
102
|
+
|
103
|
+
# set up the model
|
104
|
+
# Nin: number of input variables
|
105
|
+
# Nout: number of output variables
|
106
|
+
# Nlayers: number of layers
|
107
|
+
# Nnodes: number of nodes per layer
|
108
|
+
# summary: if True, the model summary is printed
|
109
|
+
# lr: learning rate
|
110
|
+
# activation: activation function can be chosen as needed
|
111
|
+
print('Step 3: Set up the model')
|
112
|
+
nn_model, optimizer, loss_fn = recovery_methods.nn_training.configure_FFNN_model(Nin=len(input_vars), Nout=len(target_vars),
|
113
|
+
Nlayers=3, Nnodes=64, summary=True, lr=1e-4,
|
114
|
+
activation=nn.SiLU)
|
115
|
+
|
116
|
+
# train the model
|
117
|
+
print('Step 4: Train the model')
|
118
|
+
training_loss, val_loss, test_loss, predictions_evolution, lc_predictions = recovery_methods.nn_training.train_FFNN_model(model=nn_model,
|
119
|
+
optimizer=optimizer, loss_fn=loss_fn,
|
120
|
+
input_train=input_train,
|
121
|
+
target_train=target_train,input_test=input_test,
|
122
|
+
target_test=target_test,
|
123
|
+
validation_data=(input_val, target_val),
|
124
|
+
epochs=3000, batch_size=64, device='cpu',save_evolution=True,
|
125
|
+
method='derivative', minimal_value=val_min,maximal_value=val_max)
|
126
|
+
|
127
|
+
# save model and predictions
|
128
|
+
print('Step 5: Save the generated predictions and model')
|
129
|
+
if not os.path.exists(f'results/{example_model}'):
|
130
|
+
os.makedirs(f'results/{example_model}')
|
131
|
+
|
132
|
+
torch.save(nn_model.state_dict(), f'results/{example_model}/model.pth')
|
133
|
+
numpy.save(f'results/{example_model}/predictions.npy', predictions_evolution)
|
134
|
+
numpy.save(f'results/{example_model}/lc_predictions.npy', lc_predictions)
|
135
|
+
numpy.save(f'results/{example_model}/training_loss.npy', training_loss)
|
136
|
+
numpy.save(f'results/{example_model}/val_loss.npy', val_loss)
|
137
|
+
numpy.save(f'results/{example_model}/test_loss.npy', test_loss)
|
138
|
+
|
139
|
+
print('Example completed: model and predictions saved in results/'+example_model)
|
140
|
+
|
141
|
+
# plot the predictions
|
142
|
+
if plot:
|
143
|
+
print('Step 6: Plot the predictions')
|
144
|
+
fig,ax = plt.subplots(1,1,figsize=(5,3))
|
145
|
+
ax.scatter(df_sim['norm u'], df_sim['norm v'], label='GT LC', c='silver')
|
146
|
+
ax.scatter(input_train['norm u'], lc_predictions[-1,:,0],
|
147
|
+
c='C2', label='Pred. LC', s=2 )
|
148
|
+
|
149
|
+
# compute nullcline
|
150
|
+
u = numpy.linspace(df_coef['u'].min(), df_coef['u'].max(), predictions_evolution.shape[1])
|
151
|
+
sim_model = getattr(model, example_model)()
|
152
|
+
gt_nullcline=sim_model.vnull(u)
|
153
|
+
|
154
|
+
norm_u=recovery_methods.data_preparation.normalize_adjusted(u, df_coef,'u',
|
155
|
+
min=val_min, max=val_max)
|
156
|
+
norm_gt_nullcline=recovery_methods.data_preparation.normalize_adjusted(gt_nullcline, df_coef,'v',
|
157
|
+
min=val_min, max=val_max)
|
158
|
+
|
159
|
+
ax.plot(norm_u,norm_gt_nullcline, label='GT NC', c='k')
|
160
|
+
ax.plot(norm_u,predictions_evolution[-1,:], label='Pred. NC', c='C1')
|
161
|
+
ax.set_xlabel('u')
|
162
|
+
ax.set_ylabel('v')
|
163
|
+
ax.legend()
|
164
|
+
plt.show()
|
pyCLINE/generate_data.py
ADDED
@@ -0,0 +1,105 @@
|
|
1
|
+
import numpy as np
|
2
|
+
from . import model
|
3
|
+
|
4
|
+
def FHN(dt=0.1, N=1000000, epsilons=[0.3], n_intiaL_conditions=1):
|
5
|
+
"""
|
6
|
+
Generate data using the FitzHugh-Nagumo model (see src/pyCLINE/model.py)
|
7
|
+
with different time scale separation, as used in Prokop, Billen, Frolov, Gelens (2025).
|
8
|
+
|
9
|
+
Args:
|
10
|
+
dt (float): Time step. Defaults to 0.1.
|
11
|
+
N (int): Number of time steps. Defaults to 1000000.
|
12
|
+
epsilons (list): List of time scale separations. Defaults to [0.3].
|
13
|
+
|
14
|
+
Returns:
|
15
|
+
None
|
16
|
+
"""
|
17
|
+
if dt <= 0:
|
18
|
+
raise ValueError("Time step (dt) must be positive.")
|
19
|
+
if N <= 0:
|
20
|
+
raise ValueError("Number of time steps (N) must be positive.")
|
21
|
+
if n_intiaL_conditions <= 0:
|
22
|
+
raise ValueError("Number of initial conditions must be positive.")
|
23
|
+
|
24
|
+
u = np.zeros([2,N])
|
25
|
+
for i_eps, eps in enumerate(epsilons):
|
26
|
+
u[:,0] = [0.1, 0.1]
|
27
|
+
p = [1, 1, eps, 0.5, 0.0]
|
28
|
+
fhn=model.FHN(p)
|
29
|
+
u0, v0 = np.meshgrid(np.linspace(-1.25,1.75,n_intiaL_conditions),np.linspace(-0.75,1.75,n_intiaL_conditions))
|
30
|
+
x0 = np.array([u0,v0])
|
31
|
+
fhn.generate_data(x0, dt, N)
|
32
|
+
pass
|
33
|
+
|
34
|
+
def Bicubic(dt=0.1, N=1000000, n_intiaL_conditions=1):
|
35
|
+
"""
|
36
|
+
Generate data using the Bicubic model (see src/pyCLINE/model.py),
|
37
|
+
as used in Prokop, Billen, Frolov, Gelens (2025).
|
38
|
+
|
39
|
+
Args:
|
40
|
+
dt (float, optional): Time step. Defaults to 0.1.
|
41
|
+
N (int, optional): Number of time steps. Defaults to 1000000.
|
42
|
+
"""
|
43
|
+
if dt <= 0:
|
44
|
+
raise ValueError("Time step (dt) must be positive.")
|
45
|
+
if N <= 0:
|
46
|
+
raise ValueError("Number of time steps (N) must be positive.")
|
47
|
+
if n_intiaL_conditions <= 0:
|
48
|
+
raise ValueError("Number of initial conditions must be positive.")
|
49
|
+
|
50
|
+
u = np.zeros([2,N])
|
51
|
+
|
52
|
+
u[:,0] = [0.1, 0.1]
|
53
|
+
p = [-0.5, 0.5, -1/3]
|
54
|
+
|
55
|
+
bicubic=model.Bicubic(p)
|
56
|
+
u0, v0 = np.meshgrid(np.linspace(-1.25,1.75,n_intiaL_conditions),np.linspace(-0.75,1.75,n_intiaL_conditions))
|
57
|
+
x0 = np.array([u0,v0])
|
58
|
+
bicubic.generate_data(x0, dt, 10000)
|
59
|
+
pass
|
60
|
+
|
61
|
+
def GeneExpression(dt=0.1, N=1000000, n_intiaL_conditions=1):
|
62
|
+
"""
|
63
|
+
Generate data using the Gene Expression model (see src/pyCLINE/model.py),
|
64
|
+
as used in Prokop, Billen, Frolov, Gelens (2025).
|
65
|
+
|
66
|
+
Args:
|
67
|
+
dt (float, optional): Time step. Defaults to 0.1.
|
68
|
+
N (int, optional): Number of time steps. Defaults to 1000000.
|
69
|
+
"""
|
70
|
+
if dt <= 0:
|
71
|
+
raise ValueError("Time step (dt) must be positive.")
|
72
|
+
if N <= 0:
|
73
|
+
raise ValueError("Number of time steps (N) must be positive.")
|
74
|
+
if n_intiaL_conditions <= 0:
|
75
|
+
raise ValueError("Number of initial conditions must be positive.")
|
76
|
+
|
77
|
+
u = np.zeros([2,N])
|
78
|
+
|
79
|
+
u[:,0] = [0.1, 0.1]
|
80
|
+
p=[1, 0.05, 1, 0.05, 1, 0.05, 1, 1, 0.1, 2]
|
81
|
+
|
82
|
+
gene_expression=model.GeneExpression(p)
|
83
|
+
u0, v0 = np.meshgrid(np.linspace(0,1.75,n_intiaL_conditions),np.linspace(0,1.75,n_intiaL_conditions))
|
84
|
+
x0 = np.array([u0,v0])
|
85
|
+
gene_expression.generate_data(x0, dt, 10000)
|
86
|
+
pass
|
87
|
+
|
88
|
+
def DelayOscillator(N=20000):
|
89
|
+
"""
|
90
|
+
Generate data using the Delay Oscillator model (see src/pyCLINE/model.py),
|
91
|
+
as used in Prokop, Billen, Frolov, Gelens (2025).
|
92
|
+
|
93
|
+
Args:
|
94
|
+
N (int, optional): Number of time steps. Defaults to 20000.
|
95
|
+
"""
|
96
|
+
if N <= 0:
|
97
|
+
raise ValueError("Number of time steps (N) must be positive.")
|
98
|
+
|
99
|
+
time = np.linspace(0, 400, N-1)
|
100
|
+
dt=time[1]-time[0]
|
101
|
+
p=[4, 10, 2]
|
102
|
+
|
103
|
+
delay_osci=model.DelayOscillator(p)
|
104
|
+
delay_osci.generate_data( y_0=0, dt=dt, t_max=time[-1])
|
105
|
+
pass
|