pyCLINE 0.1.10__tar.gz → 0.1.12__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pyCLINE
3
- Version: 0.1.10
3
+ Version: 0.1.12
4
4
  Summary: This package is the Python implementation of the CLINE method
5
5
  Author-email: Bartosz Prokop <bartosz.prokop@kuleuven.be>, Nikita Frolov <nikita.frolov@kuleuven.be>, Lendert Gelens <lendert.gelens@kuleuven.be>
6
6
  Project-URL: Homepage, https://pycline-ec8369.pages.gitlab.kuleuven.be/
@@ -9,7 +9,7 @@ Keywords: model,model identification,nullcline,data-driven,machine learning,deep
9
9
  Classifier: Development Status :: 3 - Alpha
10
10
  Classifier: Programming Language :: Python :: 3
11
11
  Classifier: Operating System :: OS Independent
12
- Requires-Python: >=3.10
12
+ Requires-Python: <3.12,>=3.10
13
13
  Description-Content-Type: text/markdown
14
14
  License-File: LICENSE
15
15
  Requires-Dist: matplotlib>=3.6.2
@@ -100,7 +100,7 @@ With the prepared data, we can set up the model and train it:
100
100
  nn_model, optimizer, loss_fn = recovery_methods.nn_training.configure_FFNN_model(Nin=len(input_vars), Nout=len(target_vars),Nlayers=3, Nnodes=64, summary=True, lr=1e-4, activation=nn.SiLU)
101
101
 
102
102
  #training
103
- training_loss, val_loss, test_loss, predictions_evolution, lc_predictions = recovery_methods.nn_training.train_FFNN_model(model=nn_model, optimizer=optimizer, loss_fn=loss_fn, input_train=input_train,target_train=target_train,input_test=input_test, target_test=target_test, validation_data=(input_val, target_val), epochs=3000, batch_size=64, device='cpu',save_evolution=True,method='derivative', minimal_value=val_min,maximal_value=val_max)
103
+ training_loss, val_loss, test_loss, predictions_evolution, lc_predictions, _ = recovery_methods.nn_training.train_FFNN_model(model=nn_model, optimizer=optimizer, loss_fn=loss_fn, input_train=input_train,target_train=target_train,input_test=input_test, target_test=target_test, validation_data=(input_val, target_val), epochs=3000, batch_size=64, device='cpu',save_evolution=True,method='derivative', minimal_value=val_min,maximal_value=val_max)
104
104
  ```
105
105
 
106
106
  The result of the training are the losses and the predictions of the limit cycle (`lc_predictions`) and nullcline predictions (`predictions_evolution`) over the set amount of epochs, which can be used to visualize the outcome of the nullcline predictions.
@@ -77,7 +77,7 @@ With the prepared data, we can set up the model and train it:
77
77
  nn_model, optimizer, loss_fn = recovery_methods.nn_training.configure_FFNN_model(Nin=len(input_vars), Nout=len(target_vars),Nlayers=3, Nnodes=64, summary=True, lr=1e-4, activation=nn.SiLU)
78
78
 
79
79
  #training
80
- training_loss, val_loss, test_loss, predictions_evolution, lc_predictions = recovery_methods.nn_training.train_FFNN_model(model=nn_model, optimizer=optimizer, loss_fn=loss_fn, input_train=input_train,target_train=target_train,input_test=input_test, target_test=target_test, validation_data=(input_val, target_val), epochs=3000, batch_size=64, device='cpu',save_evolution=True,method='derivative', minimal_value=val_min,maximal_value=val_max)
80
+ training_loss, val_loss, test_loss, predictions_evolution, lc_predictions, _ = recovery_methods.nn_training.train_FFNN_model(model=nn_model, optimizer=optimizer, loss_fn=loss_fn, input_train=input_train,target_train=target_train,input_test=input_test, target_test=target_test, validation_data=(input_val, target_val), epochs=3000, batch_size=64, device='cpu',save_evolution=True,method='derivative', minimal_value=val_min,maximal_value=val_max)
81
81
  ```
82
82
 
83
83
  The result of the training are the losses and the predictions of the limit cycle (`lc_predictions`) and nullcline predictions (`predictions_evolution`) over the set amount of epochs, which can be used to visualize the outcome of the nullcline predictions.
@@ -4,10 +4,10 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "pyCLINE"
7
- version = "0.1.10"
7
+ version = "0.1.12"
8
8
  description = "This package is the Python implementation of the CLINE method"
9
9
  readme = "README.md"
10
- requires-python = ">=3.10"
10
+ requires-python = ">=3.10, <3.12"
11
11
  classifiers = [ "Development Status :: 3 - Alpha", "Programming Language :: Python :: 3", "Operating System :: OS Independent",]
12
12
  keywords = [ "model", "model identification", "nullcline", "data-driven", "machine learning", "deep learning", "torch", "dynamics", "oscillator", "nonlinear dynamics", "complex systems",]
13
13
  dependencies = [ "matplotlib>=3.6.2", "numpy>=1.24.1,<1.25.0", "pandas~=1.5.2", "torch>=2.4.1", "tqdm>=4.66.1", "jitcdde>=1.8.1", "scipy>=1.9.3",]
@@ -15,7 +15,7 @@ class ExampleCallable:
15
15
 
16
16
  sys.modules[__name__]= ExampleCallable()
17
17
 
18
- def example(example_model, plot):
18
+ def example(example_model, plot, epochs=3000, batch_size=64, lr=1e-4, Nlayers=3, Nnodes=64):
19
19
  """
20
20
  This function runs multiple examples, depending on the choice of the example model.
21
21
  It should be used as a guideline how to run pyCLINE for synthetic data.
@@ -24,6 +24,11 @@ def example(example_model, plot):
24
24
  Args:
25
25
  example_model (str): Selection of model to run the example on. Chose from 'FHN', 'Bicubic', 'GeneExpression', 'DelayOscillator'.
26
26
  plot (bool): If True, the function will plot the data and the predictions.
27
+ epochs (int, optional): Number of epochs for training the model. Defaults to 3000.
28
+ batch_size (int, optional): Batch size for training the model. Defaults to 64.
29
+ lr (float, optional): Learning rate for training the model. Defaults to 1e-4.
30
+ Nlayers (int, optional): Number of layers in the neural network. Defaults to 3.
31
+ Nnodes (int, optional): Number of nodes in each layer of the neural network. Defaults to 64.
27
32
 
28
33
  Raises:
29
34
  ValueError: In case no example model string is provided.
@@ -115,7 +120,7 @@ def example(example_model, plot):
115
120
 
116
121
  # train the model
117
122
  print('Step 4: Train the model')
118
- training_loss, val_loss, test_loss, predictions_evolution, lc_predictions = recovery_methods.nn_training.train_FFNN_model(model=nn_model,
123
+ training_loss, val_loss, test_loss, predictions_evolution, lc_predictions, _ = recovery_methods.nn_training.train_FFNN_model(model=nn_model,
119
124
  optimizer=optimizer, loss_fn=loss_fn,
120
125
  input_train=input_train,
121
126
  target_train=target_train,input_test=input_test,
@@ -11,8 +11,6 @@ def FHN(dt=0.1, N=1000000, epsilons=[0.3], n_intiaL_conditions=1):
11
11
  N (int): Number of time steps. Defaults to 1000000.
12
12
  epsilons (list): List of time scale separations. Defaults to [0.3].
13
13
 
14
- Returns:
15
- None
16
14
  """
17
15
  if dt <= 0:
18
16
  raise ValueError("Time step (dt) must be positive.")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pyCLINE
3
- Version: 0.1.10
3
+ Version: 0.1.12
4
4
  Summary: This package is the Python implementation of the CLINE method
5
5
  Author-email: Bartosz Prokop <bartosz.prokop@kuleuven.be>, Nikita Frolov <nikita.frolov@kuleuven.be>, Lendert Gelens <lendert.gelens@kuleuven.be>
6
6
  Project-URL: Homepage, https://pycline-ec8369.pages.gitlab.kuleuven.be/
@@ -9,7 +9,7 @@ Keywords: model,model identification,nullcline,data-driven,machine learning,deep
9
9
  Classifier: Development Status :: 3 - Alpha
10
10
  Classifier: Programming Language :: Python :: 3
11
11
  Classifier: Operating System :: OS Independent
12
- Requires-Python: >=3.10
12
+ Requires-Python: <3.12,>=3.10
13
13
  Description-Content-Type: text/markdown
14
14
  License-File: LICENSE
15
15
  Requires-Dist: matplotlib>=3.6.2
@@ -100,7 +100,7 @@ With the prepared data, we can set up the model and train it:
100
100
  nn_model, optimizer, loss_fn = recovery_methods.nn_training.configure_FFNN_model(Nin=len(input_vars), Nout=len(target_vars),Nlayers=3, Nnodes=64, summary=True, lr=1e-4, activation=nn.SiLU)
101
101
 
102
102
  #training
103
- training_loss, val_loss, test_loss, predictions_evolution, lc_predictions = recovery_methods.nn_training.train_FFNN_model(model=nn_model, optimizer=optimizer, loss_fn=loss_fn, input_train=input_train,target_train=target_train,input_test=input_test, target_test=target_test, validation_data=(input_val, target_val), epochs=3000, batch_size=64, device='cpu',save_evolution=True,method='derivative', minimal_value=val_min,maximal_value=val_max)
103
+ training_loss, val_loss, test_loss, predictions_evolution, lc_predictions, _ = recovery_methods.nn_training.train_FFNN_model(model=nn_model, optimizer=optimizer, loss_fn=loss_fn, input_train=input_train,target_train=target_train,input_test=input_test, target_test=target_test, validation_data=(input_val, target_val), epochs=3000, batch_size=64, device='cpu',save_evolution=True,method='derivative', minimal_value=val_min,maximal_value=val_max)
104
104
  ```
105
105
 
106
106
  The result of the training are the losses and the predictions of the limit cycle (`lc_predictions`) and nullcline predictions (`predictions_evolution`) over the set amount of epochs, which can be used to visualize the outcome of the nullcline predictions.
File without changes
File without changes
File without changes
File without changes