congrads 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
congrads/descriptor.py ADDED
@@ -0,0 +1,65 @@
1
+ class Descriptor:
2
+ """
3
+ A class to manage the mapping of neurons to layers and their properties
4
+ (e.g., output, constant, or variable) in a neural network.
5
+
6
+ This class enables the organization and description of network elements,
7
+ such as associating neurons with specific layers and categorizing layers
8
+ as outputs, constants, or variables.
9
+
10
+ This allows users to easily place constraints on parts of the network by
11
+ referencing the name that is configured in this class.
12
+ """
13
+
14
+ def __init__(
15
+ self,
16
+ ):
17
+ """
18
+ Initialize the Descriptor class with empty mappings for neurons and layers.
19
+
20
+ This includes:
21
+ - `neuron_to_layer`: A dictionary mapping neuron names to their corresponding layer names.
22
+ - `neuron_to_index`: A dictionary mapping neuron names to their corresponding index within a layer.
23
+ - `output_layers`: A set that holds the names of layers marked as output layers.
24
+ - `constant_layers`: A set that holds the names of layers marked as constant layers.
25
+ - `variable_layers`: A set that holds the names of layers marked as variable layers.
26
+ """
27
+
28
+ # Define dictionaries that will translate neuron names to layer and index
29
+ self.neuron_to_layer: dict[str, str] = {}
30
+ self.neuron_to_index: dict[str, int] = {}
31
+
32
+ # Define sets that will hold the layers based on which type
33
+ self.output_layers: set[str] = set()
34
+ self.constant_layers: set[str] = set()
35
+ self.variable_layers: set[str] = set()
36
+
37
+ def add(
38
+ self,
39
+ layer_name: str,
40
+ neuron_names: list[str],
41
+ output: bool = False,
42
+ constant: bool = False,
43
+ ):
44
+ """
45
+ Add a layer to the descriptor, associating it with neurons and marking it
46
+ as an output or constant layer.
47
+
48
+ Args:
49
+ layer_name (str): The name of the layer to be added.
50
+ neuron_names (list[str]): A list of neuron names that belong to the layer.
51
+ output (bool, optional): If True, mark this layer as an output layer. Defaults to False.
52
+ constant (bool, optional): If True, mark this layer as a constant layer. Defaults to False.
53
+ """
54
+
55
+ if output:
56
+ self.output_layers.add(layer_name)
57
+
58
+ if constant:
59
+ self.constant_layers.add(layer_name)
60
+ else:
61
+ self.variable_layers.add(layer_name)
62
+
63
+ for index, neuron_name in enumerate(neuron_names):
64
+ self.neuron_to_layer[neuron_name] = layer_name
65
+ self.neuron_to_index[neuron_name] = index
congrads/learners.py ADDED
@@ -0,0 +1,233 @@
1
+ import logging
2
+ from typing import Union
3
+ from torch import Tensor
4
+ from torch.nn import Module
5
+ from torch.nn.modules.loss import _Loss
6
+ from torch.optim import Optimizer
7
+
8
+ from .core import CGGDModule
9
+ from .constraints import Constraint
10
+ from .descriptor import Descriptor
11
+
12
+
13
+ class Learner(CGGDModule):
14
+ def __init__(
15
+ self,
16
+ network: Module,
17
+ descriptor: Descriptor,
18
+ constraints: list[Constraint],
19
+ loss_function: Union[_Loss, dict[str, _Loss]],
20
+ optimizer: Optimizer,
21
+ ):
22
+ """
23
+ A class that integrates a neural network with a training and validation loop,
24
+ supporting single or multi-output loss functions. The class manages the forward pass,
25
+ training step, and validation step while also configuring the optimizer.
26
+
27
+ Args:
28
+ network (Module): The neural network model to be trained.
29
+ descriptor (Descriptor): An object that defines the structure of the network,
30
+ including the output layers.
31
+ constraints (list[Constraint]): A list of constraints that can be applied during training.
32
+ loss_function (Union[_Loss, dict[str, _Loss]]): A loss function or a dictionary of loss functions
33
+ for each output layer.
34
+ optimizer (Optimizer): The optimizer used for training the model.
35
+
36
+ Raises:
37
+ ValueError: If the descriptor does not contain any output layers or if the number of loss functions
38
+ does not match the number of output layers when using a dictionary of loss functions.
39
+ """
40
+
41
+ # Init parent class
42
+ super().__init__(descriptor, constraints)
43
+
44
+ # Init object variables
45
+ self.network = network
46
+ self.descriptor = descriptor
47
+ self.loss_function = loss_function
48
+ self.optimizer = optimizer
49
+
50
+ # Perform checks
51
+ if len(self.descriptor.output_layers) == 0:
52
+ raise ValueError(
53
+ 'The descriptor class must contain one or more output layers. Mark a layer as output by setting descriptor.add("layer", ..., output=True).'
54
+ )
55
+
56
+ if isinstance(loss_function, _Loss):
57
+ if len(self.descriptor.output_layers) > 1:
58
+ logging.warning(
59
+ f"Multiple layers were marked as output, but only one loss function is defined. Only the loss of layer {list(self.descriptor.output_layers)[0]} will be calculated and used. To use the same loss function for all output layers, please specify then explicitly."
60
+ )
61
+
62
+ if isinstance(loss_function, dict):
63
+ if len(self.descriptor.output_layers) != len(loss_function):
64
+ raise ValueError(
65
+ f"The number of marked output layers does not match the number of provided loss functions."
66
+ )
67
+
68
+ # Assign proper step function based on if one or multiple loss functions are assigned
69
+ if isinstance(loss_function, _Loss):
70
+ self.training_step = self.training_step_single
71
+ self.validation_step = self.validation_step_single
72
+
73
+ if isinstance(loss_function, dict):
74
+ self.training_step = self.training_step_multi
75
+ self.validation_step = self.validation_step_multi
76
+
77
+ def forward(self, x):
78
+ """
79
+ Perform a forward pass through the network.
80
+
81
+ Args:
82
+ x (Tensor): The input tensor to pass through the network.
83
+
84
+ Returns:
85
+ Tensor: The model's output for the given input.
86
+ """
87
+
88
+ return self.network(x)
89
+
90
+ def training_step_single(self, batch, batch_idx):
91
+ """
92
+ Perform a single training step using a single loss function.
93
+
94
+ Args:
95
+ batch (tuple): A tuple containing the input and target output tensors.
96
+ batch_idx (int): The index of the batch in the current epoch.
97
+
98
+ Returns:
99
+ Tensor: The loss value for the batch.
100
+ """
101
+
102
+ self.train()
103
+
104
+ inputs, outputs = batch
105
+ prediction: dict[str, Tensor] = self(inputs)
106
+
107
+ layer = list(self.descriptor.output_layers)[0]
108
+ loss = self.loss_function(prediction[layer], outputs)
109
+
110
+ self.log(
111
+ "train_loss",
112
+ loss,
113
+ on_step=False,
114
+ on_epoch=True,
115
+ )
116
+
117
+ return super().training_step(prediction, loss)
118
+
119
+ def training_step_multi(self, batch, batch_idx):
120
+ """
121
+ Perform a training step using multiple loss functions, one for each output layer.
122
+
123
+ Args:
124
+ batch (tuple): A tuple containing the input and target output tensors.
125
+ batch_idx (int): The index of the batch in the current epoch.
126
+
127
+ Returns:
128
+ Tensor: The total loss value for the batch, combining the losses from all output layers.
129
+ """
130
+
131
+ self.train()
132
+
133
+ inputs, outputs = batch
134
+ prediction: dict[str, Tensor] = self(inputs)
135
+
136
+ # TODO add hyperparameter to scale loss per function
137
+ loss = 0
138
+ for layer in self.descriptor.output_layers:
139
+ layer_loss = self.loss_function[layer](prediction[layer], outputs)
140
+ loss += layer_loss
141
+
142
+ self.log(
143
+ f"train_loss_{layer}",
144
+ layer_loss,
145
+ on_step=False,
146
+ on_epoch=True,
147
+ )
148
+
149
+ self.log(
150
+ "train_loss",
151
+ loss,
152
+ on_step=False,
153
+ on_epoch=True,
154
+ )
155
+
156
+ return super().training_step(prediction, loss)
157
+
158
+ def validation_step_single(self, batch, batch_idx):
159
+ """
160
+ Perform a single validation step using a single loss function.
161
+
162
+ Args:
163
+ batch (tuple): A tuple containing the input and target output tensors.
164
+ batch_idx (int): The index of the batch in the current epoch.
165
+
166
+ Returns:
167
+ Tensor: The validation loss for the batch.
168
+ """
169
+
170
+ self.eval()
171
+
172
+ inputs, outputs = batch
173
+ prediction: dict[str, Tensor] = self(inputs)
174
+
175
+ layer = list(self.descriptor.output_layers)[0]
176
+ loss = self.loss_function(prediction[layer], outputs)
177
+
178
+ self.log(
179
+ "valid_loss",
180
+ loss,
181
+ on_step=False,
182
+ on_epoch=True,
183
+ )
184
+
185
+ return super().validation_step(prediction, loss)
186
+
187
+ def validation_step_multi(self, batch, batch_idx):
188
+ """
189
+ Perform a validation step using multiple loss functions, one for each output layer.
190
+
191
+ Args:
192
+ batch (tuple): A tuple containing the input and target output tensors.
193
+ batch_idx (int): The index of the batch in the current epoch.
194
+
195
+ Returns:
196
+ Tensor: The total validation loss for the batch, combining the losses from all output layers.
197
+ """
198
+
199
+ self.eval()
200
+
201
+ inputs, outputs = batch
202
+ prediction: dict[str, Tensor] = self(inputs)
203
+
204
+ loss = 0
205
+ for layer in self.descriptor.output_layers:
206
+ layer_loss = self.loss_function[layer](prediction[layer], outputs)
207
+ loss += layer_loss
208
+
209
+ self.log(
210
+ f"valid_loss_{layer}",
211
+ layer_loss,
212
+ on_step=False,
213
+ on_epoch=True,
214
+ )
215
+
216
+ self.log(
217
+ "valid_loss",
218
+ loss,
219
+ on_step=False,
220
+ on_epoch=True,
221
+ )
222
+
223
+ return super().validation_step(prediction, loss)
224
+
225
+ def configure_optimizers(self):
226
+ """
227
+ Configure the optimizer for training.
228
+
229
+ Returns:
230
+ Optimizer: The optimizer used to update the model's parameters during training.
231
+ """
232
+
233
+ return self.optimizer
congrads/metrics.py ADDED
@@ -0,0 +1,64 @@
1
+ from torch import Tensor, tensor, sum, numel
2
+ from torchmetrics import Metric
3
+
4
+ # NOTE
5
+
6
+
7
+ class ConstraintSatisfactionRatio(Metric):
8
+ """
9
+ A custom metric to calculate the ratio of satisfied constraints in a neural network model.
10
+ It computes the proportion of constraints that have been satisfied,
11
+ where satisfaction is determined based on the provided constraint results.
12
+
13
+ This metric tracks the number of unsatisfied constraints and the total number of constraints
14
+ during the training process, and computes the ratio of satisfied constraints once all updates
15
+ have been made.
16
+
17
+ Attributes:
18
+ unsatisfied (Tensor): Tracks the number of unsatisfied constraints.
19
+ total (Tensor): Tracks the total number of constraints processed.
20
+
21
+ Note:
22
+ For more information about custom metrics, we refer to the Pytorch Lightning documentation
23
+ at https://lightning.ai/docs/torchmetrics/stable/pages/implement.html
24
+ """
25
+
26
+ def __init__(self, **kwargs):
27
+ """
28
+ Initializes the ConstraintSatisfactionRatio metric by setting up the
29
+ state variables to track the number of unsatisfied and total constraints.
30
+
31
+ Args:
32
+ **kwargs: Additional arguments to pass to the base Metric class constructor.
33
+ """
34
+
35
+ # Init parent class
36
+ super().__init__(**kwargs)
37
+
38
+ # Init scalar tensors that will hold metric values
39
+ self.add_state("unsatisfied", default=tensor(0), dist_reduce_fx="sum")
40
+ self.add_state("total", default=tensor(0), dist_reduce_fx="sum")
41
+
42
+ def update(self, constraint_result: Tensor) -> None:
43
+ """
44
+ Updates the state of the metric with the latest constraint results.
45
+
46
+ Args:
47
+ constraint_result (Tensor): A tensor representing the result of
48
+ the constraint checks, where each
49
+ element indicates whether a constraint
50
+ is satisfied (e.g., 0 for satisfied,
51
+ 1 for unsatisfied).
52
+ """
53
+ self.unsatisfied += sum(constraint_result)
54
+ self.total += numel(constraint_result)
55
+
56
+ def compute(self) -> Tensor:
57
+ """
58
+ Computes the constraint satisfaction ratio, defined as:
59
+ 1 - (number of unsatisfied constraints / total constraints).
60
+
61
+ Returns:
62
+ Tensor: The satisfaction ratio as a scalar tensor.
63
+ """
64
+ return 1 - (self.unsatisfied.float() / self.total)
congrads/networks.py ADDED
@@ -0,0 +1,91 @@
1
+ from torch.nn import Linear, Sequential, ReLU, Module
2
+
3
+
4
+ class MLPNetwork(Module):
5
+ """
6
+ A multi-layer perceptron (MLP) neural network model consisting of
7
+ an input layer, multiple hidden layers, and an output layer.
8
+
9
+ This class constructs an MLP with configurable hyperparameters such as the
10
+ number of input features, output features, number of hidden layers, and
11
+ the dimensionality of hidden layers. It provides methods for both
12
+ building the model and performing a forward pass through the network.
13
+
14
+ Attributes:
15
+ n_inputs (int): The number of input features.
16
+ n_outputs (int): The number of output features.
17
+ n_hidden_layers (int): The number of hidden layers in the network.
18
+ hidden_dim (int): The dimensionality of the hidden layers.
19
+ input (nn.Module): The input layer (linear transformation followed by ReLU).
20
+ hidden (nn.Module): The sequential hidden layers (each consisting of
21
+ a linear transformation followed by ReLU).
22
+ out (nn.Module): The output layer (linear transformation).
23
+ """
24
+
25
+ def __init__(
26
+ self,
27
+ n_inputs=25,
28
+ n_outputs=2,
29
+ n_hidden_layers=2,
30
+ hidden_dim=35,
31
+ ):
32
+ """
33
+ Initializes the MLP network with the given hyperparameters.
34
+
35
+ Args:
36
+ n_inputs (int, optional): The number of input features. Defaults to 25.
37
+ n_outputs (int, optional): The number of output features. Defaults to 2.
38
+ n_hidden_layers (int, optional): The number of hidden layers. Defaults to 2.
39
+ hidden_dim (int, optional): The dimensionality of the hidden layers. Defaults to 35.
40
+ """
41
+ super().__init__()
42
+
43
+ # Init object variables
44
+ self.n_inputs = n_inputs
45
+ self.n_outputs = n_outputs
46
+ self.n_hidden_layers = n_hidden_layers
47
+ self.hidden_dim = hidden_dim
48
+
49
+ # Set up the components of our model
50
+ self.input = self.linear(self.n_inputs, self.hidden_dim)
51
+ self.hidden = Sequential(
52
+ *(
53
+ self.linear(self.hidden_dim, self.hidden_dim)
54
+ for _ in range(n_hidden_layers)
55
+ )
56
+ )
57
+ self.out = Linear(self.hidden_dim, self.n_outputs)
58
+
59
+ def forward(self, X):
60
+ """
61
+ Performs a forward pass through the network.
62
+
63
+ Args:
64
+ X (Tensor): The input tensor to be passed through the network.
65
+
66
+ Returns:
67
+ dict: A dictionary containing the 'input' (original input) and
68
+ 'output' (predicted output) of the network.
69
+ """
70
+ input = X
71
+ output = self.out(self.hidden(self.input(X)))
72
+
73
+ return {"input": input, "output": output}
74
+
75
+ @staticmethod
76
+ def linear(in_features, out_features):
77
+ """
78
+ Creates a basic linear block with a linear transformation followed
79
+ by a ReLU activation function.
80
+
81
+ Args:
82
+ in_features (int): The number of input features.
83
+ out_features (int): The number of output features.
84
+
85
+ Returns:
86
+ nn.Module: A sequential module consisting of a Linear layer and ReLU activation.
87
+ """
88
+ return Sequential(
89
+ Linear(in_features, out_features),
90
+ ReLU(),
91
+ )
@@ -0,0 +1,34 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2024 DTAI - KU Leuven
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
22
+
23
+
24
+ "Commons Clause" License Condition v1.0
25
+
26
+ The Software is provided to you by the Licensor under the License, as defined below, subject to the following condition.
27
+
28
+ Without limiting other conditions in the License, the grant of rights under the License will not include, and the License does not grant to you, the right to Sell the Software.
29
+
30
+ For purposes of the foregoing, "Sell" means practicing any or all of the rights granted to you under the License to provide to third parties, for a fee or other consideration (including without limitation fees for hosting or consulting/ support services related to the Software), a product or service whose value derives, entirely or substantially, from the functionality of the Software. Any license notice or attribution required by the License must also include this Commons Clause License Condition notice.
31
+
32
+ Software: All CGGD-Toolbox associated files.
33
+ License: MIT
34
+ Licensor: DTAI - KU Leuven
@@ -0,0 +1,196 @@
1
+ Metadata-Version: 2.1
2
+ Name: congrads
3
+ Version: 0.1.0
4
+ Summary: A toolbox for using Constraint Guided Gradient Descent when training neural networks.
5
+ Author-email: Wout Rombouts <wout.rombouts@kuleuven.be>, Quinten Van Baelen <quinten.vanbaelen@kuleuven.be>
6
+ License: MIT License
7
+
8
+ Copyright (c) 2024 DTAI - KU Leuven
9
+
10
+ Permission is hereby granted, free of charge, to any person obtaining a copy
11
+ of this software and associated documentation files (the "Software"), to deal
12
+ in the Software without restriction, including without limitation the rights
13
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14
+ copies of the Software, and to permit persons to whom the Software is
15
+ furnished to do so, subject to the following conditions:
16
+
17
+ The above copyright notice and this permission notice shall be included in all
18
+ copies or substantial portions of the Software.
19
+
20
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26
+ SOFTWARE.
27
+
28
+
29
+ "Commons Clause" License Condition v1.0
30
+
31
+ The Software is provided to you by the Licensor under the License, as defined below, subject to the following condition.
32
+
33
+ Without limiting other conditions in the License, the grant of rights under the License will not include, and the License does not grant to you, the right to Sell the Software.
34
+
35
+ For purposes of the foregoing, "Sell" means practicing any or all of the rights granted to you under the License to provide to third parties, for a fee or other consideration (including without limitation fees for hosting or consulting/ support services related to the Software), a product or service whose value derives, entirely or substantially, from the functionality of the Software. Any license notice or attribution required by the License must also include this Commons Clause License Condition notice.
36
+
37
+ Software: All CGGD-Toolbox associated files.
38
+ License: MIT
39
+ Licensor: DTAI - KU Leuven
40
+ Requires-Python: >=3.9
41
+ Description-Content-Type: text/markdown
42
+ License-File: LICENSE
43
+ Requires-Dist: torch>=1.12.0
44
+ Requires-Dist: pytorch-lightning>=2.0.0
45
+ Requires-Dist: pandas>=2.2.2
46
+ Requires-Dist: numpy>=1.26.4
47
+
48
+ # Congrads
49
+
50
+ **Congrads** is a Python toolbox that brings **constraint-guided gradient descent** capabilities to your machine learning projects. Built with seamless integration into PyTorch and PyTorch Lightning, Congrads empowers you to enhance the training and optimization process by incorporating constraints into your training pipeline.
51
+
52
+ Whether you're working with simple inequality constraints, combinations of input-output relations, or custom constraint formulations, Congrads provides the tools and flexibility needed to build more robust and generalized models.
53
+
54
+ > <strong>Note:</strong> The Congrads toolbox is currently in alpha phase. Expect significant changes, potential bugs, and incomplete features as we continue to develop and improve the functionality. Feedback is highly appreciated during this phase to help us refine the toolbox and ensure its reliability in later stages.
55
+
56
+ ## Key Features
57
+
58
+ - **Constraint-Guided Training**: Add constraints to guide the optimization process, ensuring that your model generalizes better by trying to satisfy the constraints.
59
+ - **Flexible Constraint Definition**: Define constraints on inputs, outputs, or combinations thereof, using an intuitive and extendable interface. Make use of pre-programmed constraint classes or write your own.
60
+ - **Seamless PyTorch Integration**: Use Congrads within your existing PyTorch workflows with minimal setup.
61
+ - **PyTorch Lightning Support**: Easily plug into PyTorch Lightning projects for scalable and structured model training.
62
+ - **Flexible and extendible**: Write your own custom networks, constraints and dataset classes to easily extend the functionality of the toolbox.
63
+
64
+ ## Installation
65
+
66
+ Currently, the **Congrads** toolbox can only be installed using pip. We will later expand to other package managers such as conda.
67
+
68
+ ```bash
69
+ pip install congrads
70
+ ```
71
+
72
+ ## Getting Started
73
+
74
+ ### 1. **Prerequisites**
75
+
76
+ Before you can use **Congrads**, make sure you have the following installed:
77
+
78
+ - Python 3.7+
79
+ - **PyTorch** (install with CUDA support for GPU training, refer to the [getting started guide](https://pytorch.org/get-started/locally/))
80
+ - **PyTorch Lightning** (preffered version 2.4, [installation guide](https://lightning.ai/docs/pytorch/stable/starter/installation.html))
81
+
82
+ ### 2. **Installation**
83
+
84
+ Please install **Congrads** via pip:
85
+
86
+ ```bash
87
+ pip install congrads
88
+ ```
89
+
90
+ ### 3. **Basic Usage**
91
+
92
+ #### 1. Import the toolbox
93
+
94
+ ```python
95
+ from congrads.descriptor import Descriptor
96
+ from congrads.constraints import ScalarConstraint, BinaryConstraint
97
+ from congrads.learners import Learner
98
+ ```
99
+
100
+ #### 2. Instantiate and configure descriptor
101
+
102
+ The descriptor describes your specific use-case. It assigns names to specific neurons so you can easily reference them when defining constraints. By settings flags, you can specifiy if a layer is fixed or if it is an output layer.
103
+
104
+ ```python
105
+ # Descriptor setup
106
+ descriptor = Descriptor()
107
+ descriptor.add("input", ["I1", "I2", "I3", "I4"], constant=True)
108
+ descriptor.add("output", ["O1", "O2"], output=True)
109
+ ```
110
+
111
+ #### 3. Define constraints on your network
112
+
113
+ You can define constraints on your network using the names previously configured in the descriptor. A set of predefined constraint classes can be used to define inequalities on input or output data.
114
+
115
+ ```python
116
+ # Constraints definition
117
+ Constraint.descriptor = descriptor
118
+ constraints = [
119
+ ScalarConstraint("O1", gt, 0), # O1 > 0
120
+ BinaryConstraint("O1", le, "O2"), # O1 <= O2
121
+ ]
122
+ ```
123
+
124
+ #### 4. Adjust network
125
+
126
+ Your regular Pytorch network can be used with this toolbox. We only require that the output of your model's forward pass is a dictionary of layers. The keys must match the descriptor settings.
127
+
128
+ ```python
129
+ def forward(self, X):
130
+ input = X
131
+ output = self.out(self.hidden(self.input(X)))
132
+
133
+ return {"input": input, "output": output}
134
+ ```
135
+
136
+ You then can use your own network and directly assign it to the learner.
137
+
138
+ #### 5. Set up network and data
139
+
140
+ Next, instantiate the adjusted network and the data. At the moment, we require the data to be implemented as a `LightningDataModule` class.
141
+
142
+ ```python
143
+ # Data and network setup
144
+ network = YourOwnNetwork(n_inputs=4, n_outputs=2, n_hidden_layers=3, hidden_dim=10)
145
+ data = YourOwnData(batch_size=100)
146
+ ```
147
+
148
+ #### 6. Set up learner
149
+
150
+ You can specify your own loss function and optimizer with their own settings to be used for learning the model.
151
+
152
+ ```python
153
+ # Learner setup
154
+ loss_function = MSELoss()
155
+ optimizer = Adam(network.parameters(), lr=0.001)
156
+
157
+ learner = Learner(network, descriptor, constraints, loss_function, optimizer)
158
+ ```
159
+
160
+ #### 7. Set up trainer
161
+
162
+ Finally, set up a trainer to start the actual training of the model.
163
+
164
+ ```python
165
+ # Trainer setup
166
+ trainer = Trainer(max_epochs=100)
167
+
168
+ # Train model
169
+ trainer.fit(learner, data)
170
+ ```
171
+
172
+ ## Example Use Cases
173
+
174
+ - **Optimization with Domain Knowledge**: Ensure outputs meet real-world restrictions or safety standards.
175
+ - **Physics-Informed Neural Networks (PINNs)**: Enforce physical laws as constraints in your models.
176
+ - **Improve Training Process**: Inject domain knowledge in the training stage, increasing learning efficiency.
177
+
178
+ ## Roadmap
179
+
180
+ - [ ] Documentation and Notebook examples
181
+ - [ ] Add support for constraint parser that can interpret equations
182
+ - [ ] Add better handling of metric logging and visualization
183
+ - [ ] Revise if Pytorch Lightning is preferable over plain Pytorch
184
+ - [ ] Determine if it is feasible to add unit and or functional tests
185
+
186
+ ## Contributing
187
+
188
+ We welcome contributions to Congrads! Whether you want to report issues, suggest features, or contribute code via issues and pull requests.
189
+
190
+ ## License
191
+
192
+ Congrads is licensed under the [MIT License with a Commons Clause](LICENSE). This means you are free to use, modify, and distribute the software, but you may not sell or offer it as part of a paid service without permission. We encourage companies that are interested in a collaboration for a specific topic to contact the authors for more information.
193
+
194
+ ---
195
+
196
+ Elevate your neural networks with Congrads! 🚀