AdeptML 1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- adeptml-1.0/LICENSE +21 -0
- adeptml-1.0/PKG-INFO +49 -0
- adeptml-1.0/README.md +27 -0
- adeptml-1.0/adeptml/__init__.py +2 -0
- adeptml-1.0/adeptml/configs.py +87 -0
- adeptml-1.0/adeptml/ensemble.py +79 -0
- adeptml-1.0/adeptml/models.py +108 -0
- adeptml-1.0/adeptml/train_utils.py +129 -0
- adeptml-1.0/pyproject.toml +24 -0
adeptml-1.0/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2023 ADAMS Lab
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
adeptml-1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: AdeptML
|
|
3
|
+
Version: 1.0
|
|
4
|
+
Summary: A High-Level PyTorch based Library for Hybrid Physics-Informed Machine Learning Models
|
|
5
|
+
Author: Manaswin Oddiraju
|
|
6
|
+
Requires-Python: >=3.9.0,<3.13
|
|
7
|
+
Classifier: Programming Language :: Python :: 3
|
|
8
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
9
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
10
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
11
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
12
|
+
Provides-Extra: docs
|
|
13
|
+
Requires-Dist: Sphinx (==7.3.7) ; extra == "docs"
|
|
14
|
+
Requires-Dist: jax (>=0.4.18,<0.5.0)
|
|
15
|
+
Requires-Dist: joblib (>=1.3.2,<2.0.0)
|
|
16
|
+
Requires-Dist: sphinx-rtd-theme (==2.0.0) ; extra == "docs"
|
|
17
|
+
Requires-Dist: sphinxcontrib-napoleon (==0.7) ; extra == "docs"
|
|
18
|
+
Requires-Dist: tensorboard (>=2.16.2,<3.0.0)
|
|
19
|
+
Requires-Dist: torch (>=2.1.0,<3.0.0)
|
|
20
|
+
Description-Content-Type: text/markdown
|
|
21
|
+
|
|
22
|
+
# Auto-differentiable embedding of Physics and Torch Machine Learning (AdePT-ML):
|
|
23
|
+
|
|
24
|
+
This is a convienience library built on top of PyTorch to enable easy integration and training of hybrid models involving physics and deep learning modules.
|
|
25
|
+
|
|
26
|
+
### Features
|
|
27
|
+
1. Pre-defined Modules and configs for physics and MLP architectures.
|
|
28
|
+
2. Physics Module accepts physics functions which return numpy arrays.
|
|
29
|
+
3. Ability to input custom PyTorch nn classes.
|
|
30
|
+
4. Ensemble Module allows easy integration of constituent physics and ML modules for simple inference and training.
|
|
31
|
+
5. Auto-log training data with tensorboard.
|
|
32
|
+
|
|
33
|
+
## Installation
|
|
34
|
+
Installing with pip
|
|
35
|
+
```
|
|
36
|
+
pip install adeptml
|
|
37
|
+
```
|
|
38
|
+
## Requirements (Automatically installed with pip):
|
|
39
|
+
1. PyTorch (https://pytorch.org/)
|
|
40
|
+
2. Joblib (https://joblib.readthedocs.io/en/latest/) (For loading and saving model parameters)
|
|
41
|
+
3. Tensorboard
|
|
42
|
+
|
|
43
|
+
## Documentation:
|
|
44
|
+
Visit our [Read the docs page](https://adept-ml.readthedocs.io/en/latest/)
|
|
45
|
+
|
|
46
|
+
## Examples:
|
|
47
|
+
Refer to the tests file. Additional examples will be added soon.
|
|
48
|
+
|
|
49
|
+
|
adeptml-1.0/README.md
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
# Auto-differentiable embedding of Physics and Torch Machine Learning (AdePT-ML):
|
|
2
|
+
|
|
3
|
+
This is a convienience library built on top of PyTorch to enable easy integration and training of hybrid models involving physics and deep learning modules.
|
|
4
|
+
|
|
5
|
+
### Features
|
|
6
|
+
1. Pre-defined Modules and configs for physics and MLP architectures.
|
|
7
|
+
2. Physics Module accepts physics functions which return numpy arrays.
|
|
8
|
+
3. Ability to input custom PyTorch nn classes.
|
|
9
|
+
4. Ensemble Module allows easy integration of constituent physics and ML modules for simple inference and training.
|
|
10
|
+
5. Auto-log training data with tensorboard.
|
|
11
|
+
|
|
12
|
+
## Installation
|
|
13
|
+
Installing with pip
|
|
14
|
+
```
|
|
15
|
+
pip install adeptml
|
|
16
|
+
```
|
|
17
|
+
## Requirements (Automatically installed with pip):
|
|
18
|
+
1. PyTorch (https://pytorch.org/)
|
|
19
|
+
2. Joblib (https://joblib.readthedocs.io/en/latest/) (For loading and saving model parameters)
|
|
20
|
+
3. Tensorboard
|
|
21
|
+
|
|
22
|
+
## Documentation:
|
|
23
|
+
Visit our [Read the docs page](https://adept-ml.readthedocs.io/en/latest/)
|
|
24
|
+
|
|
25
|
+
## Examples:
|
|
26
|
+
Refer to the tests file. Additional examples will be added soon.
|
|
27
|
+
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
import dataclasses
|
|
2
|
+
from typing import Callable, Optional, Union
|
|
3
|
+
import torch
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
if torch.cuda.is_available():
|
|
7
|
+
DEVICE = "cuda"
|
|
8
|
+
else:
|
|
9
|
+
DEVICE = "cpu"
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dataclasses.dataclass
|
|
13
|
+
class MLPConfig:
|
|
14
|
+
"""
|
|
15
|
+
Configuration class for the Multilayer Perceptron (MLP) model.
|
|
16
|
+
|
|
17
|
+
Attributes
|
|
18
|
+
----------
|
|
19
|
+
layers : int
|
|
20
|
+
Total number of layers in the MLP (including hidden and output layers).
|
|
21
|
+
|
|
22
|
+
num_input_dim : int
|
|
23
|
+
Number of input dimensions to the MLP.
|
|
24
|
+
|
|
25
|
+
num_hidden_dim : int
|
|
26
|
+
Number of hidden dimensions in each hidden layer.
|
|
27
|
+
|
|
28
|
+
num_output_dim : int
|
|
29
|
+
Number of output dimensions from the MLP.
|
|
30
|
+
|
|
31
|
+
num_hidden_layers : int
|
|
32
|
+
Number of hidden layers in the MLP.
|
|
33
|
+
|
|
34
|
+
activation_functions : str
|
|
35
|
+
String representation of the activation functions used in the MLP.
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
num_input_dim: int
|
|
39
|
+
num_hidden_dim: int
|
|
40
|
+
num_output_dim: int
|
|
41
|
+
num_hidden_layers: int
|
|
42
|
+
activation_functions: str
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
@dataclasses.dataclass
|
|
46
|
+
class PhysicsConfig:
|
|
47
|
+
"""
|
|
48
|
+
Configuration class for physics-related functions.
|
|
49
|
+
|
|
50
|
+
Attributes
|
|
51
|
+
----------
|
|
52
|
+
forward_func : Callable[[torch.Tensor], torch.Tensor]
|
|
53
|
+
Forward function of the physics model.
|
|
54
|
+
|
|
55
|
+
jacobian_func : Callable[[torch.Tensor], torch.Tensor]
|
|
56
|
+
Function to compute the Jacobian of the physics model.
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
forward_func: Callable[[torch.Tensor], torch.Tensor]
|
|
60
|
+
jacobian_func: Callable[[torch.Tensor], torch.Tensor]
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
ModelConfig = Union[MLPConfig, PhysicsConfig]
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
@dataclasses.dataclass
|
|
67
|
+
class HybridConfig:
|
|
68
|
+
"""
|
|
69
|
+
Config for Ensemble Models.
|
|
70
|
+
|
|
71
|
+
Attributes
|
|
72
|
+
----------
|
|
73
|
+
models: dict
|
|
74
|
+
Contains Modelname as keys and an instance of ModelConfigs as values.
|
|
75
|
+
|
|
76
|
+
model_inputs: dict
|
|
77
|
+
By default, the Ensemble model operates sequentially, using the output of the preceding model as input for the next.
|
|
78
|
+
Setting this dict to a non-empty value overrides that behavior.
|
|
79
|
+
Keys are model names; values are dicts specifying input customization.
|
|
80
|
+
Each inner dict holds model names as keys and specifies how to stack inputs:
|
|
81
|
+
- 'None' stacks the entire tensor.
|
|
82
|
+
- A list of ints stacks only specified dimensions.
|
|
83
|
+
Use "Input" if the input to this model matches the hybrid model's original input.
|
|
84
|
+
"""
|
|
85
|
+
|
|
86
|
+
models: dict[str, ModelConfig | torch.nn.Module]
|
|
87
|
+
model_inputs: Optional[dict[str, dict[str, list[int] | None]]] = None
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
import torch.nn
|
|
3
|
+
from adeptml import configs, models
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class HybridModel(torch.nn.Module):
|
|
7
|
+
"""
|
|
8
|
+
Torch Module for Serial Hybrid Physics Models.
|
|
9
|
+
|
|
10
|
+
Parameters
|
|
11
|
+
----------
|
|
12
|
+
models_mlp : Torch module list of all MLP modules.
|
|
13
|
+
models_cnn : Torch module list of all CNN modules.
|
|
14
|
+
models_physics : Torch module list of all Physics modules.
|
|
15
|
+
unmodified_inputs : Indices of the inputs that are to be passed directly to the
|
|
16
|
+
model. These are appended to the outputs of the previous model.
|
|
17
|
+
architecture : Dict with key corresponding to model name and value being a model
|
|
18
|
+
config.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
def __init__(self, config: configs.HybridConfig):
|
|
22
|
+
super(HybridModel, self).__init__()
|
|
23
|
+
self.models_nn = torch.nn.ModuleDict()
|
|
24
|
+
self.models_physics = {}
|
|
25
|
+
self.config = config
|
|
26
|
+
for model_name in config.models:
|
|
27
|
+
if isinstance(config.models[model_name], torch.nn.Module):
|
|
28
|
+
self.models_nn[model_name] = config.models[model_name]().to(
|
|
29
|
+
configs.DEVICE
|
|
30
|
+
)
|
|
31
|
+
if isinstance(config.models[model_name], configs.PhysicsConfig):
|
|
32
|
+
self.models_physics[model_name] = models.Physics.apply
|
|
33
|
+
elif isinstance(config.models[model_name], configs.MLPConfig):
|
|
34
|
+
self.models_nn[model_name] = models.MLP(config.models[model_name]).to(
|
|
35
|
+
configs.DEVICE
|
|
36
|
+
)
|
|
37
|
+
self.model_inputs = {}
|
|
38
|
+
self.interim_data = {}
|
|
39
|
+
if config.model_inputs:
|
|
40
|
+
to_save = []
|
|
41
|
+
for _, vals in config.model_inputs.items():
|
|
42
|
+
to_save += list(vals.keys())
|
|
43
|
+
self.to_save = list(set(to_save))
|
|
44
|
+
|
|
45
|
+
def forward(self, x, phy_args=None):
|
|
46
|
+
"""Function to run inference on the hybrid model."""
|
|
47
|
+
self.interim_data["Input"] = x
|
|
48
|
+
current_input = x
|
|
49
|
+
for model_name in self.config.models:
|
|
50
|
+
if self.config.model_inputs:
|
|
51
|
+
if model_name in self.config.model_inputs:
|
|
52
|
+
input_tensors = []
|
|
53
|
+
for input_model, dims in self.config.model_inputs[
|
|
54
|
+
model_name
|
|
55
|
+
].items():
|
|
56
|
+
if dims:
|
|
57
|
+
input_tensors.append(
|
|
58
|
+
self.interim_data[input_model][:, dims]
|
|
59
|
+
)
|
|
60
|
+
else:
|
|
61
|
+
input_tensors.append(self.interim_data[input_model])
|
|
62
|
+
current_input = torch.hstack(input_tensors)
|
|
63
|
+
if model_name in self.models_nn.keys():
|
|
64
|
+
cur_model = self.models_nn[model_name]
|
|
65
|
+
out = cur_model(current_input)
|
|
66
|
+
elif model_name in self.models_physics.keys():
|
|
67
|
+
cur_model = self.models_physics[model_name]
|
|
68
|
+
out = cur_model(
|
|
69
|
+
current_input,
|
|
70
|
+
self.config.models[model_name].forward_func,
|
|
71
|
+
self.config.models[model_name].jacobian_func,
|
|
72
|
+
phy_args,
|
|
73
|
+
)
|
|
74
|
+
if self.config.model_inputs:
|
|
75
|
+
if model_name in self.to_save:
|
|
76
|
+
self.interim_data[model_name] = out
|
|
77
|
+
current_input = out
|
|
78
|
+
|
|
79
|
+
return out
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
import torch.nn
|
|
3
|
+
from adeptml import configs
|
|
4
|
+
|
|
5
|
+
ACTIVATIONS = {
|
|
6
|
+
"leakyrelu": torch.nn.LeakyReLU(),
|
|
7
|
+
"sigmoid": torch.nn.Sigmoid(),
|
|
8
|
+
"Tanh": torch.nn.Tanh(),
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class MLP(torch.nn.Module):
|
|
13
|
+
"""
|
|
14
|
+
Multilayer Perceptron (MLP) neural network model.
|
|
15
|
+
|
|
16
|
+
Attributes
|
|
17
|
+
----------
|
|
18
|
+
config : Instance of MLPConfig dataclass.
|
|
19
|
+
|
|
20
|
+
Note
|
|
21
|
+
----
|
|
22
|
+
This class implements a Multilayer Perceptron (MLP) neural network model.
|
|
23
|
+
It takes a configuration dictionary with parameters such as hidden layer size,
|
|
24
|
+
input and output dimensions, and the number of hidden layers.
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
def __init__(self, config: configs.MLPConfig):
|
|
28
|
+
super(MLP, self).__init__()
|
|
29
|
+
self.layers = torch.nn.ModuleList()
|
|
30
|
+
self.linear_in = torch.nn.Linear(config.num_input_dim, config.num_hidden_dim)
|
|
31
|
+
for _ in range(config.num_hidden_layers):
|
|
32
|
+
self.layers.append(
|
|
33
|
+
torch.nn.Linear(config.num_hidden_dim, config.num_hidden_dim)
|
|
34
|
+
)
|
|
35
|
+
self.linear_out = torch.nn.Linear(config.num_hidden_dim, config.num_output_dim)
|
|
36
|
+
self.nl1 = ACTIVATIONS[config.activation_functions]
|
|
37
|
+
|
|
38
|
+
def forward(self, x):
|
|
39
|
+
"""
|
|
40
|
+
Forward pass of the MLP model.
|
|
41
|
+
|
|
42
|
+
:param torch.Tensor x: Input tensor.
|
|
43
|
+
|
|
44
|
+
:return: Output tensor.
|
|
45
|
+
:rtype: torch.Tensor
|
|
46
|
+
"""
|
|
47
|
+
out = self.linear_in(x)
|
|
48
|
+
for i in range(len(self.layers) - 1):
|
|
49
|
+
net = self.layers[i]
|
|
50
|
+
out = self.nl1(net(out))
|
|
51
|
+
out = self.linear_out(out)
|
|
52
|
+
return out
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class Physics(torch.autograd.Function):
|
|
56
|
+
"""Custom Autograd function to enable backpropagation on Custom Physics Models.
|
|
57
|
+
|
|
58
|
+
Attributes:
|
|
59
|
+
config: Instance of PhysicsConfig.
|
|
60
|
+
"""
|
|
61
|
+
|
|
62
|
+
@staticmethod
|
|
63
|
+
def forward(ctx, x, forward_fun, jacobian_fun, args=None):
|
|
64
|
+
if args:
|
|
65
|
+
ctx.save_for_backward(x, *args)
|
|
66
|
+
else:
|
|
67
|
+
ctx.save_for_backward(x)
|
|
68
|
+
ctx.jacobian_fun = jacobian_fun
|
|
69
|
+
x = x.detach().cpu().numpy()
|
|
70
|
+
if args != None:
|
|
71
|
+
args = [tmp_args.detach().cpu().numpy() for tmp_args in args]
|
|
72
|
+
out = forward_fun(x, *args)
|
|
73
|
+
out = torch.Tensor(out).to(configs.DEVICE)
|
|
74
|
+
return out
|
|
75
|
+
else:
|
|
76
|
+
out = forward_fun(x)
|
|
77
|
+
out = torch.Tensor(out).to(configs.DEVICE)
|
|
78
|
+
return out
|
|
79
|
+
|
|
80
|
+
@staticmethod
|
|
81
|
+
def backward(ctx, grad_output):
|
|
82
|
+
input = ctx.saved_tensors[0]
|
|
83
|
+
args = ctx.saved_tensors[1:]
|
|
84
|
+
jacobian_fun = ctx.jacobian_fun
|
|
85
|
+
jac_final = None
|
|
86
|
+
if ctx.needs_input_grad[0]:
|
|
87
|
+
input = input.detach().cpu().numpy()
|
|
88
|
+
if args != None:
|
|
89
|
+
args = [tmp_args.detach().cpu().numpy() for tmp_args in args]
|
|
90
|
+
jac_final = jacobian_fun(input, *args)
|
|
91
|
+
jac_final = torch.Tensor(jac_final).to(configs.DEVICE)
|
|
92
|
+
jac_final = jac_final.reshape(input.shape[0], -1, input.shape[1])
|
|
93
|
+
grad_output = grad_output.reshape(input.shape[0], -1, 1)
|
|
94
|
+
grad_final = torch.matmul(grad_output, jac_final)
|
|
95
|
+
return grad_final, None, None, None
|
|
96
|
+
else:
|
|
97
|
+
jac_final = jacobian_fun(input)
|
|
98
|
+
jac_final = torch.Tensor(jac_final).to(configs.DEVICE)
|
|
99
|
+
grad_final = torch.zeros(input.shape[0], input.shape[1]).to(
|
|
100
|
+
configs.DEVICE
|
|
101
|
+
)
|
|
102
|
+
grad_output = grad_output.reshape(input.shape[0], -1)
|
|
103
|
+
for i in range(grad_final.shape[0]):
|
|
104
|
+
grad_final[i, :] = torch.matmul(
|
|
105
|
+
grad_output[i, :].reshape(1, -1),
|
|
106
|
+
jac_final[i].reshape(-1, input.shape[1]),
|
|
107
|
+
)
|
|
108
|
+
return grad_final, None, None
|
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import torch
|
|
3
|
+
from torch.utils.tensorboard import SummaryWriter
|
|
4
|
+
import numpy as np
|
|
5
|
+
from adeptml import configs
|
|
6
|
+
from adeptml import HybridModel
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def train_step(
|
|
10
|
+
model: HybridModel, optimizer: torch.optim.Optimizer, loss_fn, scheduler=None
|
|
11
|
+
):
|
|
12
|
+
# Builds function that performs a step in the train loop
|
|
13
|
+
def train_step(x, y, args, test=False):
|
|
14
|
+
if not test:
|
|
15
|
+
yhat = model.forward(x, args)
|
|
16
|
+
loss = loss_fn(yhat, y) # torch.mean(torch.abs(yhat-y))
|
|
17
|
+
optimizer.zero_grad()
|
|
18
|
+
loss.backward()
|
|
19
|
+
optimizer.step()
|
|
20
|
+
if scheduler:
|
|
21
|
+
scheduler.step()
|
|
22
|
+
else:
|
|
23
|
+
a = model.eval()
|
|
24
|
+
with torch.no_grad():
|
|
25
|
+
yhat = a(x, args)
|
|
26
|
+
loss = loss_fn(yhat, y)
|
|
27
|
+
return loss.item()
|
|
28
|
+
|
|
29
|
+
# Returns the function that will be called inside the train loop
|
|
30
|
+
return train_step
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def train(
|
|
34
|
+
model,
|
|
35
|
+
train_loader,
|
|
36
|
+
test_loader,
|
|
37
|
+
optimizer,
|
|
38
|
+
loss_fn,
|
|
39
|
+
scheduler,
|
|
40
|
+
filename,
|
|
41
|
+
epochs,
|
|
42
|
+
print_training_loss=True,
|
|
43
|
+
save_frequency=50,
|
|
44
|
+
):
|
|
45
|
+
"""Training Function.
|
|
46
|
+
|
|
47
|
+
Parameters
|
|
48
|
+
----------
|
|
49
|
+
train_loader : torch.Torch_Dataloader
|
|
50
|
+
Torch Dataloader with training samples.
|
|
51
|
+
|
|
52
|
+
test_loader : torch.Torch_Dataloader
|
|
53
|
+
Torch Dataloader with validation samples.
|
|
54
|
+
|
|
55
|
+
optimizer : torch.optim.Optimizer
|
|
56
|
+
Initialized Torch Optimizer.
|
|
57
|
+
|
|
58
|
+
loss_fn : callable
|
|
59
|
+
Loss function for training.
|
|
60
|
+
|
|
61
|
+
scheduler : torch.optim.lr_scheduler
|
|
62
|
+
Learning rate scheduler.
|
|
63
|
+
|
|
64
|
+
filename : str
|
|
65
|
+
File name for saving the trained model.
|
|
66
|
+
|
|
67
|
+
epochs : int
|
|
68
|
+
Number of training epochs.
|
|
69
|
+
|
|
70
|
+
print_training_loss: bool
|
|
71
|
+
Option to toggle printing epoch loss.
|
|
72
|
+
|
|
73
|
+
save_frequency: int
|
|
74
|
+
Number of epochs per which to save the model parameters to disk.
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
Returns
|
|
78
|
+
-------
|
|
79
|
+
Trained Hybrid Model.
|
|
80
|
+
"""
|
|
81
|
+
|
|
82
|
+
data_dir = os.path.join(os.getcwd(), f"Training_History_{filename}")
|
|
83
|
+
if not os.path.exists(data_dir):
|
|
84
|
+
os.system("mkdir %s" % data_dir)
|
|
85
|
+
try:
|
|
86
|
+
runs = max(
|
|
87
|
+
[int(f.name.split("_")[-1]) for f in os.scandir(data_dir) if f.is_dir()]
|
|
88
|
+
)
|
|
89
|
+
except:
|
|
90
|
+
runs = 0
|
|
91
|
+
current_data_dir = f"{data_dir}/run_{runs+1}"
|
|
92
|
+
cur_settings = ""
|
|
93
|
+
# for i in asdict(model.config) :
|
|
94
|
+
with SummaryWriter(log_dir=current_data_dir) as writer:
|
|
95
|
+
train_step_obj = train_step(model, optimizer, loss_fn, scheduler)
|
|
96
|
+
for epoch in range(epochs):
|
|
97
|
+
train_batch_losses = []
|
|
98
|
+
for data in train_loader:
|
|
99
|
+
x_batch = data[0]
|
|
100
|
+
y_batch = data[1]
|
|
101
|
+
if len(data) > 2:
|
|
102
|
+
args = data[2:]
|
|
103
|
+
else:
|
|
104
|
+
args = None
|
|
105
|
+
loss = train_step_obj(x_batch, y_batch, args)
|
|
106
|
+
train_batch_losses.append(loss)
|
|
107
|
+
writer.add_scalar("Loss/train", np.mean(train_batch_losses), epoch)
|
|
108
|
+
test_batch_losses = []
|
|
109
|
+
for data in test_loader:
|
|
110
|
+
x_batch = data[0]
|
|
111
|
+
y_batch = data[1]
|
|
112
|
+
if len(data) > 2:
|
|
113
|
+
args = data[2:]
|
|
114
|
+
else:
|
|
115
|
+
args = None
|
|
116
|
+
loss = train_step_obj(x_batch, y_batch, args, test=True)
|
|
117
|
+
test_batch_losses.append(loss)
|
|
118
|
+
writer.add_scalar("Loss/test", np.mean(test_batch_losses), epoch)
|
|
119
|
+
if print_training_loss:
|
|
120
|
+
print(
|
|
121
|
+
f"Train Loss {np.mean(train_batch_losses)} Test Loss {np.mean(test_batch_losses)}"
|
|
122
|
+
)
|
|
123
|
+
if epoch % save_frequency == 0:
|
|
124
|
+
torch.save(
|
|
125
|
+
model.state_dict(), "%s/Model_%d.pt" % (current_data_dir, epoch)
|
|
126
|
+
)
|
|
127
|
+
torch.save(model.state_dict(), current_data_dir + "/Model_final.pt")
|
|
128
|
+
|
|
129
|
+
return model
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
[tool.poetry]
|
|
2
|
+
name = "AdeptML"
|
|
3
|
+
version = "1.0"
|
|
4
|
+
description = "A High-Level PyTorch based Library for Hybrid Physics-Informed Machine Learning Models"
|
|
5
|
+
authors = ["Manaswin Oddiraju"]
|
|
6
|
+
readme = "README.md"
|
|
7
|
+
packages = [{include="adeptml"}]
|
|
8
|
+
|
|
9
|
+
[tool.poetry.dependencies]
|
|
10
|
+
python =">=3.9.0,<3.13"
|
|
11
|
+
torch = "^2.1.0"
|
|
12
|
+
jax = "^0.4.18"
|
|
13
|
+
joblib = "^1.3.2"
|
|
14
|
+
tensorboard = "^2.16.2"
|
|
15
|
+
Sphinx = { version = "7.3.7", optional = true }
|
|
16
|
+
sphinx-rtd-theme = { version = "2.0.0", optional = true }
|
|
17
|
+
sphinxcontrib-napoleon = { version = "0.7", optional = true }
|
|
18
|
+
|
|
19
|
+
[tool.poetry.extras]
|
|
20
|
+
docs = ["Sphinx", "sphinx-rtd-theme", "sphinxcontrib-napoleon"]
|
|
21
|
+
|
|
22
|
+
[build-system]
|
|
23
|
+
requires = ["poetry-core"]
|
|
24
|
+
build-backend = "poetry.core.masonry.api"
|