AdeptML 1.2.9__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- adeptml-1.2.9/LICENSE +21 -0
- adeptml-1.2.9/PKG-INFO +55 -0
- adeptml-1.2.9/README.md +33 -0
- adeptml-1.2.9/adeptml/__init__.py +2 -0
- adeptml-1.2.9/adeptml/configs.py +86 -0
- adeptml-1.2.9/adeptml/ensemble.py +79 -0
- adeptml-1.2.9/adeptml/models.py +187 -0
- adeptml-1.2.9/adeptml/train_utils.py +131 -0
- adeptml-1.2.9/pyproject.toml +23 -0
adeptml-1.2.9/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2023 ADAMS Lab
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
adeptml-1.2.9/PKG-INFO
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: AdeptML
|
|
3
|
+
Version: 1.2.9
|
|
4
|
+
Summary: A High-Level PyTorch based Library for Hybrid Physics-Informed Machine Learning Models
|
|
5
|
+
Author: Manaswin Oddiraju
|
|
6
|
+
Requires-Python: >=3.9.0
|
|
7
|
+
Classifier: Programming Language :: Python :: 3
|
|
8
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
9
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
10
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
11
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
13
|
+
Provides-Extra: docs
|
|
14
|
+
Requires-Dist: Sphinx (==7.3.7) ; extra == "docs"
|
|
15
|
+
Requires-Dist: joblib (>=1.3.2,<2.0.0)
|
|
16
|
+
Requires-Dist: sphinx-rtd-theme (==2.0.0) ; extra == "docs"
|
|
17
|
+
Requires-Dist: sphinxcontrib-napoleon (==0.7) ; extra == "docs"
|
|
18
|
+
Requires-Dist: tensorboard (>=2.16.2,<3.0.0)
|
|
19
|
+
Requires-Dist: torch (>=2.1.0,<3.0.0)
|
|
20
|
+
Description-Content-Type: text/markdown
|
|
21
|
+
|
|
22
|
+
# Auto-differentiable embedding of Physics and Torch Machine Learning (AdePT-ML):
|
|
23
|
+
|
|
24
|
+
This is a convienience library built on top of PyTorch to enable easy integration and training of hybrid models involving physics and deep learning modules.
|
|
25
|
+
|
|
26
|
+
### Features
|
|
27
|
+
1. Allows integration of torch.nn.module with numpy functions and enable training with torch optimizers.
|
|
28
|
+
1. Pre-defined Modules and configs for physics and MLP architectures.
|
|
29
|
+
5. Integrated training function with tensorboard support.
|
|
30
|
+
|
|
31
|
+
## Installation
|
|
32
|
+
Installing with pip
|
|
33
|
+
```
|
|
34
|
+
pip install adeptml
|
|
35
|
+
```
|
|
36
|
+
### Requirements (Automatically installed with pip):
|
|
37
|
+
1. PyTorch (https://pytorch.org/)
|
|
38
|
+
2. Joblib (https://joblib.readthedocs.io/en/latest/) (For loading and saving model parameters)
|
|
39
|
+
3. Tensorboard
|
|
40
|
+
|
|
41
|
+
## Usage:
|
|
42
|
+
|
|
43
|
+
The primary building block of this package is the [Hybrid Model]() class. It neatly packages all the member models into one main Torch model and enables running forward inference as well as backpropagation.
|
|
44
|
+
The class accepts as input an instance of the [Hybrid Config]() class. This config is useful in defining all the constituent modules and their inputs.
|
|
45
|
+
|
|
46
|
+
As component modules, the [Models]() module provides a straight forward [MLP]() implementation as well as a [Physics Module]().
|
|
47
|
+
This module is a torch Autograd wrapper which enables the integration of non-Torch numpy functions into a fully torch model and allows for training with torch optimizers.
|
|
48
|
+
|
|
49
|
+
## API Documentation:
|
|
50
|
+
Visit our [Read the docs page](https://adept-ml.readthedocs.io/en/latest/)
|
|
51
|
+
|
|
52
|
+
## Examples:
|
|
53
|
+
Refer to the tests file. Additional examples will be added soon.
|
|
54
|
+
|
|
55
|
+
|
adeptml-1.2.9/README.md
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
# Auto-differentiable embedding of Physics and Torch Machine Learning (AdePT-ML):
|
|
2
|
+
|
|
3
|
+
This is a convienience library built on top of PyTorch to enable easy integration and training of hybrid models involving physics and deep learning modules.
|
|
4
|
+
|
|
5
|
+
### Features
|
|
6
|
+
1. Allows integration of torch.nn.module with numpy functions and enable training with torch optimizers.
|
|
7
|
+
1. Pre-defined Modules and configs for physics and MLP architectures.
|
|
8
|
+
5. Integrated training function with tensorboard support.
|
|
9
|
+
|
|
10
|
+
## Installation
|
|
11
|
+
Installing with pip
|
|
12
|
+
```
|
|
13
|
+
pip install adeptml
|
|
14
|
+
```
|
|
15
|
+
### Requirements (Automatically installed with pip):
|
|
16
|
+
1. PyTorch (https://pytorch.org/)
|
|
17
|
+
2. Joblib (https://joblib.readthedocs.io/en/latest/) (For loading and saving model parameters)
|
|
18
|
+
3. Tensorboard
|
|
19
|
+
|
|
20
|
+
## Usage:
|
|
21
|
+
|
|
22
|
+
The primary building block of this package is the [Hybrid Model]() class. It neatly packages all the member models into one main Torch model and enables running forward inference as well as backpropagation.
|
|
23
|
+
The class accepts as input an instance of the [Hybrid Config]() class. This config is useful in defining all the constituent modules and their inputs.
|
|
24
|
+
|
|
25
|
+
As component modules, the [Models]() module provides a straight forward [MLP]() implementation as well as a [Physics Module]().
|
|
26
|
+
This module is a torch Autograd wrapper which enables the integration of non-Torch numpy functions into a fully torch model and allows for training with torch optimizers.
|
|
27
|
+
|
|
28
|
+
## API Documentation:
|
|
29
|
+
Visit our [Read the docs page](https://adept-ml.readthedocs.io/en/latest/)
|
|
30
|
+
|
|
31
|
+
## Examples:
|
|
32
|
+
Refer to the tests file. Additional examples will be added soon.
|
|
33
|
+
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
import dataclasses
|
|
2
|
+
from typing import Callable, Optional, Union
|
|
3
|
+
import torch
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
if torch.cuda.is_available():
|
|
7
|
+
DEVICE = "cuda:0"
|
|
8
|
+
else:
|
|
9
|
+
DEVICE = "cpu"
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dataclasses.dataclass
|
|
13
|
+
class MLPConfig:
|
|
14
|
+
"""
|
|
15
|
+
Configuration class for the Multilayer Perceptron (MLP) model.
|
|
16
|
+
|
|
17
|
+
Attributes
|
|
18
|
+
----------
|
|
19
|
+
num_input_dim : int
|
|
20
|
+
Number of input dimensions to the MLP.
|
|
21
|
+
|
|
22
|
+
num_hidden_dim : int
|
|
23
|
+
Number of hidden dimensions in each hidden layer.
|
|
24
|
+
|
|
25
|
+
num_output_dim : int
|
|
26
|
+
Number of output dimensions from the MLP.
|
|
27
|
+
|
|
28
|
+
num_hidden_layers : int
|
|
29
|
+
Number of hidden layers in the MLP.
|
|
30
|
+
|
|
31
|
+
activation_functions : str
|
|
32
|
+
String representation of the activation functions used in the MLP.
|
|
33
|
+
Choices are "leakyrelu", "sigmoid" and "tanh"
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
num_input_dim: int
|
|
37
|
+
num_hidden_dim: int
|
|
38
|
+
num_output_dim: int
|
|
39
|
+
num_hidden_layers: int
|
|
40
|
+
hidden_activation: str
|
|
41
|
+
output_activation: str
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
@dataclasses.dataclass
|
|
45
|
+
class PhysicsConfig:
|
|
46
|
+
"""
|
|
47
|
+
Configuration class for physics-related functions.
|
|
48
|
+
|
|
49
|
+
Attributes
|
|
50
|
+
----------
|
|
51
|
+
forward_func : Callable[[torch.Tensor], torch.Tensor]
|
|
52
|
+
Forward function of the physics model.
|
|
53
|
+
|
|
54
|
+
jacobian_func : Callable[[torch.Tensor], torch.Tensor]
|
|
55
|
+
Function to compute the Jacobian of the physics model.
|
|
56
|
+
"""
|
|
57
|
+
|
|
58
|
+
forward_func: Callable[[torch.Tensor], torch.Tensor]
|
|
59
|
+
jacobian_func: Callable[[torch.Tensor], torch.Tensor]
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
ModelConfig = Union[MLPConfig, PhysicsConfig]
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
@dataclasses.dataclass
|
|
66
|
+
class HybridConfig:
|
|
67
|
+
"""
|
|
68
|
+
Config for Ensemble Models.
|
|
69
|
+
|
|
70
|
+
Attributes
|
|
71
|
+
----------
|
|
72
|
+
models: dict
|
|
73
|
+
Contains Modelname as keys and an instance of ModelConfigs as values.
|
|
74
|
+
|
|
75
|
+
model_inputs: dict
|
|
76
|
+
By default, the Ensemble model operates sequentially, using the output of the preceding model as input for the next.
|
|
77
|
+
Setting this dict to a non-empty value overrides that behavior.
|
|
78
|
+
Keys are model names; values are dicts specifying input customization.
|
|
79
|
+
Each inner dict holds model names as keys and specifies how to stack inputs:
|
|
80
|
+
- 'None' stacks the entire tensor.
|
|
81
|
+
- A list of ints stacks only specified dimensions.
|
|
82
|
+
Use "Input" if the input to this model matches the hybrid model's original input.
|
|
83
|
+
"""
|
|
84
|
+
|
|
85
|
+
models: dict[str, ModelConfig | torch.nn.Module]
|
|
86
|
+
model_inputs: Optional[dict[str, dict[str, list[int] | None]]] = None
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
import torch.nn
|
|
3
|
+
from adeptml import configs, models
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class HybridModel(torch.nn.Module):
|
|
7
|
+
"""
|
|
8
|
+
Torch Module for Serial Hybrid Physics Models.
|
|
9
|
+
|
|
10
|
+
Parameters
|
|
11
|
+
----------
|
|
12
|
+
models_mlp : Torch module list of all MLP modules.
|
|
13
|
+
models_cnn : Torch module list of all CNN modules.
|
|
14
|
+
models_physics : Torch module list of all Physics modules.
|
|
15
|
+
unmodified_inputs : Indices of the inputs that are to be passed directly to the
|
|
16
|
+
model. These are appended to the outputs of the previous model.
|
|
17
|
+
architecture : Dict with key corresponding to model name and value being a model
|
|
18
|
+
config.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
def __init__(self, config: configs.HybridConfig):
|
|
22
|
+
super(HybridModel, self).__init__()
|
|
23
|
+
self.models_nn = torch.nn.ModuleDict()
|
|
24
|
+
self.models_physics = {}
|
|
25
|
+
self.config = config
|
|
26
|
+
for model_name in config.models:
|
|
27
|
+
if isinstance(config.models[model_name], torch.nn.Module):
|
|
28
|
+
self.models_nn[model_name] = config.models[model_name].to(
|
|
29
|
+
configs.DEVICE
|
|
30
|
+
)
|
|
31
|
+
if isinstance(config.models[model_name], configs.PhysicsConfig):
|
|
32
|
+
self.models_physics[model_name] = models.Physics.apply
|
|
33
|
+
elif isinstance(config.models[model_name], configs.MLPConfig):
|
|
34
|
+
self.models_nn[model_name] = models.MLP(config.models[model_name]).to(
|
|
35
|
+
configs.DEVICE
|
|
36
|
+
)
|
|
37
|
+
self.model_inputs = {}
|
|
38
|
+
self.interim_data = {}
|
|
39
|
+
if config.model_inputs:
|
|
40
|
+
to_save = []
|
|
41
|
+
for _, vals in config.model_inputs.items():
|
|
42
|
+
to_save += list(vals.keys())
|
|
43
|
+
self.to_save = list(set(to_save))
|
|
44
|
+
|
|
45
|
+
def forward(self, x, phy_args=None):
|
|
46
|
+
"""Function to run inference on the hybrid model."""
|
|
47
|
+
self.interim_data["Input"] = x
|
|
48
|
+
current_input = x
|
|
49
|
+
for model_name in self.config.models:
|
|
50
|
+
if self.config.model_inputs:
|
|
51
|
+
if model_name in self.config.model_inputs:
|
|
52
|
+
input_tensors = []
|
|
53
|
+
for input_model, dims in self.config.model_inputs[
|
|
54
|
+
model_name
|
|
55
|
+
].items():
|
|
56
|
+
if dims:
|
|
57
|
+
input_tensors.append(
|
|
58
|
+
self.interim_data[input_model][:, dims]
|
|
59
|
+
)
|
|
60
|
+
else:
|
|
61
|
+
input_tensors.append(self.interim_data[input_model])
|
|
62
|
+
current_input = torch.hstack(input_tensors)
|
|
63
|
+
if model_name in self.models_nn.keys():
|
|
64
|
+
cur_model = self.models_nn[model_name]
|
|
65
|
+
out = cur_model(current_input)
|
|
66
|
+
elif model_name in self.models_physics.keys():
|
|
67
|
+
cur_model = self.models_physics[model_name]
|
|
68
|
+
out = cur_model(
|
|
69
|
+
current_input,
|
|
70
|
+
self.config.models[model_name].forward_func,
|
|
71
|
+
self.config.models[model_name].jacobian_func,
|
|
72
|
+
phy_args,
|
|
73
|
+
)
|
|
74
|
+
if self.config.model_inputs:
|
|
75
|
+
if model_name in self.to_save:
|
|
76
|
+
self.interim_data[model_name] = out
|
|
77
|
+
current_input = out
|
|
78
|
+
|
|
79
|
+
return out
|
|
@@ -0,0 +1,187 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
import torch.nn
|
|
3
|
+
from typing import Callable, Optional, List
|
|
4
|
+
from adeptml import configs
|
|
5
|
+
|
|
6
|
+
ACTIVATIONS = {
|
|
7
|
+
"leakyrelu": torch.nn.LeakyReLU(),
|
|
8
|
+
"sigmoid": torch.nn.Sigmoid(),
|
|
9
|
+
"tanh": torch.nn.Tanh(),
|
|
10
|
+
"sin": torch.sin,
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class MLP(torch.nn.Module):
|
|
15
|
+
"""
|
|
16
|
+
Multilayer Perceptron (MLP) neural network model.
|
|
17
|
+
|
|
18
|
+
Attributes
|
|
19
|
+
----------
|
|
20
|
+
config : Instance of MLPConfig dataclass.
|
|
21
|
+
|
|
22
|
+
Note
|
|
23
|
+
----
|
|
24
|
+
This class implements a Multilayer Perceptron (MLP) neural network model.
|
|
25
|
+
It takes a configuration dictionary with parameters such as hidden layer size,
|
|
26
|
+
input and output dimensions, and the number of hidden layers.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
def __init__(self, config: configs.MLPConfig):
|
|
30
|
+
super(MLP, self).__init__()
|
|
31
|
+
self.layers = torch.nn.ModuleList()
|
|
32
|
+
self.linear_in = torch.nn.Linear(config.num_input_dim, config.num_hidden_dim)
|
|
33
|
+
for _ in range(config.num_hidden_layers):
|
|
34
|
+
self.layers.append(
|
|
35
|
+
torch.nn.Linear(config.num_hidden_dim, config.num_hidden_dim)
|
|
36
|
+
)
|
|
37
|
+
self.linear_out = torch.nn.Linear(config.num_hidden_dim, config.num_output_dim)
|
|
38
|
+
self.nl1 = ACTIVATIONS[config.hidden_activation]
|
|
39
|
+
self.nl2 = ACTIVATIONS[config.output_activation]
|
|
40
|
+
|
|
41
|
+
def forward(self, x):
|
|
42
|
+
"""
|
|
43
|
+
Forward pass of the MLP model.
|
|
44
|
+
|
|
45
|
+
:param torch.Tensor x: Input tensor.
|
|
46
|
+
|
|
47
|
+
:return: Output tensor.
|
|
48
|
+
:rtype: torch.Tensor
|
|
49
|
+
"""
|
|
50
|
+
out = self.linear_in(x)
|
|
51
|
+
for i in range(len(self.layers) - 1):
|
|
52
|
+
net = self.layers[i]
|
|
53
|
+
out = self.nl1(net(out))
|
|
54
|
+
out = self.linear_out(out)
|
|
55
|
+
return self.nl2(out)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class Physics(torch.autograd.Function):
|
|
59
|
+
"""Custom Autograd function to enable backpropagation on Custom Physics Models.
|
|
60
|
+
|
|
61
|
+
Attributes:
|
|
62
|
+
config: Instance of PhysicsConfig.
|
|
63
|
+
"""
|
|
64
|
+
|
|
65
|
+
@staticmethod
|
|
66
|
+
def forward(
|
|
67
|
+
ctx,
|
|
68
|
+
x: torch.Tensor,
|
|
69
|
+
forward_fun: Callable,
|
|
70
|
+
jacobian_fun: Callable,
|
|
71
|
+
args: Optional[List[torch.Tensor]] = None,
|
|
72
|
+
):
|
|
73
|
+
"""
|
|
74
|
+
Function defining forward pass for the physics model.
|
|
75
|
+
|
|
76
|
+
:param ctx: Torch Autograd context object (https://pytorch.org/docs/stable/autograd.html#context-method-mixins)
|
|
77
|
+
:param x: Input tensor
|
|
78
|
+
:param forward_fun: Function which computes outputs of the physics function. Accepts numpy arrays as input.
|
|
79
|
+
:param jacobian_fun: Function which computes Jacobian / gradient of the physics function. Accepts numpy arrays as input.
|
|
80
|
+
:param args: List containing additional positional arguments (as tensors) to forward_fun. Gradients are not computed w.r.t these args.
|
|
81
|
+
|
|
82
|
+
:return: The output of forward_fun as a tensor.
|
|
83
|
+
"""
|
|
84
|
+
if args:
|
|
85
|
+
ctx.save_for_backward(x, *args)
|
|
86
|
+
else:
|
|
87
|
+
ctx.save_for_backward(x)
|
|
88
|
+
ctx.jacobian_fun = jacobian_fun
|
|
89
|
+
x = x.detach().cpu().numpy()
|
|
90
|
+
if args != None:
|
|
91
|
+
args = [tmp_args.detach().cpu().numpy() for tmp_args in args]
|
|
92
|
+
out = forward_fun(x, *args)
|
|
93
|
+
out = torch.Tensor(out).to(configs.DEVICE)
|
|
94
|
+
return out
|
|
95
|
+
else:
|
|
96
|
+
out = forward_fun(x)
|
|
97
|
+
out = torch.Tensor(out).to(configs.DEVICE)
|
|
98
|
+
return out
|
|
99
|
+
|
|
100
|
+
@staticmethod
|
|
101
|
+
def backward(ctx, grad_output):
|
|
102
|
+
"""
|
|
103
|
+
Function to compute gradient across the forward_fun during backpropagation.
|
|
104
|
+
"""
|
|
105
|
+
input = ctx.saved_tensors[0]
|
|
106
|
+
args = ctx.saved_tensors[1:]
|
|
107
|
+
jacobian_fun = ctx.jacobian_fun
|
|
108
|
+
jac_final = None
|
|
109
|
+
if ctx.needs_input_grad[0]:
|
|
110
|
+
input = input.detach().cpu().numpy()
|
|
111
|
+
if args is not None:
|
|
112
|
+
args = [tmp_args.detach().cpu().numpy() for tmp_args in args]
|
|
113
|
+
jac_final = jacobian_fun(input, *args)
|
|
114
|
+
else:
|
|
115
|
+
jac_final = jacobian_fun(input)
|
|
116
|
+
jac_final = torch.Tensor(jac_final).to(configs.DEVICE)
|
|
117
|
+
jac_final = jac_final.reshape(input.shape[0], -1, input.shape[1])
|
|
118
|
+
grad_output = grad_output.unsqueeze(1)
|
|
119
|
+
grad_final = torch.matmul(grad_output, jac_final).squeeze()
|
|
120
|
+
return grad_final, None, None, None
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
class Physics_VJP(torch.autograd.Function):
|
|
124
|
+
"""Custom Autograd function to enable backpropagation on Custom Physics Models.
|
|
125
|
+
|
|
126
|
+
Attributes:
|
|
127
|
+
config: Instance of PhysicsConfig.
|
|
128
|
+
"""
|
|
129
|
+
|
|
130
|
+
@staticmethod
|
|
131
|
+
def forward(
|
|
132
|
+
ctx,
|
|
133
|
+
x: torch.Tensor,
|
|
134
|
+
forward_fun: Callable,
|
|
135
|
+
jacobian_fun: Callable,
|
|
136
|
+
args: Optional[List[torch.Tensor]] = None,
|
|
137
|
+
):
|
|
138
|
+
"""
|
|
139
|
+
Function defining forward pass for the physics model.
|
|
140
|
+
|
|
141
|
+
:param ctx: Torch Autograd context object (https://pytorch.org/docs/stable/autograd.html#context-method-mixins)
|
|
142
|
+
:param x: Input tensor
|
|
143
|
+
:param forward_fun: Function which computes outputs of the physics function. Accepts numpy arrays as input.
|
|
144
|
+
:param jacobian_fun: Function which computes Jacobian / gradient of the physics function. Accepts numpy arrays as input.
|
|
145
|
+
:param args: List containing additional positional arguments (as tensors) to forward_fun. Gradients are not computed w.r.t these args.
|
|
146
|
+
|
|
147
|
+
:return: The output of forward_fun as a tensor.
|
|
148
|
+
"""
|
|
149
|
+
if args:
|
|
150
|
+
ctx.save_for_backward(x, *args)
|
|
151
|
+
else:
|
|
152
|
+
ctx.save_for_backward(x)
|
|
153
|
+
ctx.jacobian_fun = jacobian_fun
|
|
154
|
+
x = x.detach().cpu().numpy()
|
|
155
|
+
if args != None:
|
|
156
|
+
args = [tmp_args.detach().cpu().numpy() for tmp_args in args]
|
|
157
|
+
out = forward_fun(x, *args)
|
|
158
|
+
out = torch.Tensor(out).to(configs.DEVICE)
|
|
159
|
+
return out
|
|
160
|
+
else:
|
|
161
|
+
out = forward_fun(x)
|
|
162
|
+
out = torch.Tensor(out).to(configs.DEVICE)
|
|
163
|
+
return out
|
|
164
|
+
|
|
165
|
+
@staticmethod
|
|
166
|
+
def backward(ctx, grad_output):
|
|
167
|
+
"""
|
|
168
|
+
Function to compute gradient across the forward_fun during backpropagation.
|
|
169
|
+
"""
|
|
170
|
+
input = ctx.saved_tensors[0]
|
|
171
|
+
args = ctx.saved_tensors[1:]
|
|
172
|
+
jacobian_fun = ctx.jacobian_fun
|
|
173
|
+
jac_final = None
|
|
174
|
+
if ctx.needs_input_grad[0]:
|
|
175
|
+
input = input.detach().cpu().numpy()
|
|
176
|
+
if args is not None:
|
|
177
|
+
args = [tmp_args.detach().cpu().numpy() for tmp_args in args]
|
|
178
|
+
grad_final = jacobian_fun(
|
|
179
|
+
input, grad_output.detach().cpu().numpy(), *args
|
|
180
|
+
)
|
|
181
|
+
else:
|
|
182
|
+
grad_final = jacobian_fun(input, grad_output.detach().cpu().numpy())
|
|
183
|
+
# jac_final = torch.Tensor(jac_final).to(configs.DEVICE)
|
|
184
|
+
# jac_final = jac_final.reshape(input.shape[0], -1, input.shape[1])
|
|
185
|
+
# grad_output = grad_output.unsqueeze(1)
|
|
186
|
+
# grad_final = torch.matmul(grad_output, jac_final).squeeze()
|
|
187
|
+
return grad_final, None, None, None
|
|
@@ -0,0 +1,131 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import torch
|
|
3
|
+
from torch.utils.tensorboard import SummaryWriter
|
|
4
|
+
import numpy as np
|
|
5
|
+
import time
|
|
6
|
+
from adeptml import configs
|
|
7
|
+
from adeptml import HybridModel
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def train_step(
|
|
11
|
+
model: HybridModel, optimizer: torch.optim.Optimizer, loss_fn, scheduler=None
|
|
12
|
+
):
|
|
13
|
+
# Builds function that performs a step in the train loop
|
|
14
|
+
def train_step(x, y, args, test=False):
|
|
15
|
+
if not test:
|
|
16
|
+
yhat = model.forward(x, args)
|
|
17
|
+
loss = loss_fn(yhat, y) # torch.mean(torch.abs(yhat-y))
|
|
18
|
+
optimizer.zero_grad()
|
|
19
|
+
loss.backward()
|
|
20
|
+
optimizer.step()
|
|
21
|
+
if scheduler:
|
|
22
|
+
scheduler.step()
|
|
23
|
+
else:
|
|
24
|
+
with torch.no_grad():
|
|
25
|
+
yhat = model.forward(x, args)
|
|
26
|
+
loss = loss_fn(yhat, y)
|
|
27
|
+
return loss.item()
|
|
28
|
+
|
|
29
|
+
# Returns the function that will be called inside the train loop
|
|
30
|
+
return train_step
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def train(
|
|
34
|
+
model,
|
|
35
|
+
train_loader,
|
|
36
|
+
test_loader,
|
|
37
|
+
optimizer,
|
|
38
|
+
loss_fn,
|
|
39
|
+
scheduler,
|
|
40
|
+
filename,
|
|
41
|
+
epochs,
|
|
42
|
+
print_training_loss=True,
|
|
43
|
+
save_frequency=50,
|
|
44
|
+
):
|
|
45
|
+
"""Training Function.
|
|
46
|
+
|
|
47
|
+
Parameters
|
|
48
|
+
----------
|
|
49
|
+
train_loader : torch.Torch_Dataloader
|
|
50
|
+
Torch Dataloader with training samples.
|
|
51
|
+
|
|
52
|
+
test_loader : torch.Torch_Dataloader
|
|
53
|
+
Torch Dataloader with validation samples.
|
|
54
|
+
|
|
55
|
+
optimizer : torch.optim.Optimizer
|
|
56
|
+
Initialized Torch Optimizer.
|
|
57
|
+
|
|
58
|
+
loss_fn : callable
|
|
59
|
+
Loss function for training.
|
|
60
|
+
|
|
61
|
+
scheduler : torch.optim.lr_scheduler
|
|
62
|
+
Learning rate scheduler.
|
|
63
|
+
|
|
64
|
+
filename : str
|
|
65
|
+
File name for saving the trained model.
|
|
66
|
+
|
|
67
|
+
epochs : int
|
|
68
|
+
Number of training epochs.
|
|
69
|
+
|
|
70
|
+
print_training_loss: bool
|
|
71
|
+
Option to toggle printing epoch loss.
|
|
72
|
+
|
|
73
|
+
save_frequency: int
|
|
74
|
+
Number of epochs per which to save the model parameters to disk.
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
Returns
|
|
78
|
+
-------
|
|
79
|
+
Trained Hybrid Model.
|
|
80
|
+
"""
|
|
81
|
+
|
|
82
|
+
data_dir = os.path.join(os.getcwd(), f"{filename}")
|
|
83
|
+
if not os.path.exists(data_dir):
|
|
84
|
+
os.makedirs(data_dir, exist_ok=True)
|
|
85
|
+
try:
|
|
86
|
+
runs = max(
|
|
87
|
+
[int(f.name.split("_")[-1]) for f in os.scandir(data_dir) if f.is_dir()]
|
|
88
|
+
)
|
|
89
|
+
except:
|
|
90
|
+
runs = 0
|
|
91
|
+
current_data_dir = f"{data_dir}/run_{runs+1}"
|
|
92
|
+
cur_settings = ""
|
|
93
|
+
# for i in asdict(model.config) :
|
|
94
|
+
with SummaryWriter(log_dir=current_data_dir) as writer:
|
|
95
|
+
train_step_obj = train_step(model, optimizer, loss_fn, scheduler)
|
|
96
|
+
for epoch in range(epochs):
|
|
97
|
+
t1 = time.time()
|
|
98
|
+
train_batch_losses = []
|
|
99
|
+
for data in train_loader:
|
|
100
|
+
x_batch = data[0]
|
|
101
|
+
y_batch = data[1]
|
|
102
|
+
if len(data) > 2:
|
|
103
|
+
args = data[2:]
|
|
104
|
+
else:
|
|
105
|
+
args = None
|
|
106
|
+
loss = train_step_obj(x_batch, y_batch, args)
|
|
107
|
+
train_batch_losses.append(loss)
|
|
108
|
+
writer.add_scalar("Loss/train", np.mean(train_batch_losses), epoch)
|
|
109
|
+
test_batch_losses = []
|
|
110
|
+
for data in test_loader:
|
|
111
|
+
x_batch = data[0]
|
|
112
|
+
y_batch = data[1]
|
|
113
|
+
if len(data) > 2:
|
|
114
|
+
args = data[2:]
|
|
115
|
+
else:
|
|
116
|
+
args = None
|
|
117
|
+
loss = train_step_obj(x_batch, y_batch, args, test=True)
|
|
118
|
+
test_batch_losses.append(loss)
|
|
119
|
+
t2 = time.time()
|
|
120
|
+
writer.add_scalar("Loss/test", np.mean(test_batch_losses), epoch)
|
|
121
|
+
if print_training_loss:
|
|
122
|
+
print(
|
|
123
|
+
f"Epoch Time: {t2-t1}s Train Loss {np.mean(train_batch_losses)} Test Loss {np.mean(test_batch_losses)}"
|
|
124
|
+
)
|
|
125
|
+
if epoch % save_frequency == 0 and epoch != 0:
|
|
126
|
+
torch.save(
|
|
127
|
+
model.state_dict(), "%s/Model_%d.pt" % (current_data_dir, epoch)
|
|
128
|
+
)
|
|
129
|
+
torch.save(model.state_dict(), current_data_dir + "/model_final.pt")
|
|
130
|
+
|
|
131
|
+
return model
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
[tool.poetry]
|
|
2
|
+
name = "AdeptML"
|
|
3
|
+
version = "1.2.9"
|
|
4
|
+
description = "A High-Level PyTorch based Library for Hybrid Physics-Informed Machine Learning Models"
|
|
5
|
+
authors = ["Manaswin Oddiraju"]
|
|
6
|
+
readme = "README.md"
|
|
7
|
+
packages = [{include="adeptml"}]
|
|
8
|
+
|
|
9
|
+
[tool.poetry.dependencies]
|
|
10
|
+
python =">=3.9.0"
|
|
11
|
+
torch = "^2.1.0"
|
|
12
|
+
joblib = "^1.3.2"
|
|
13
|
+
tensorboard = "^2.16.2"
|
|
14
|
+
Sphinx = { version = "7.3.7", optional = true }
|
|
15
|
+
sphinx-rtd-theme = { version = "2.0.0", optional = true }
|
|
16
|
+
sphinxcontrib-napoleon = { version = "0.7", optional = true }
|
|
17
|
+
|
|
18
|
+
[tool.poetry.extras]
|
|
19
|
+
docs = ["Sphinx", "sphinx-rtd-theme", "sphinxcontrib-napoleon"]
|
|
20
|
+
|
|
21
|
+
[build-system]
|
|
22
|
+
requires = ["poetry-core"]
|
|
23
|
+
build-backend = "poetry.core.masonry.api"
|