congrads 0.1.0__py3-none-any.whl → 1.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- congrads/__init__.py +21 -13
- congrads/checkpoints.py +232 -0
- congrads/constraints.py +728 -316
- congrads/core.py +525 -139
- congrads/datasets.py +273 -516
- congrads/descriptor.py +95 -30
- congrads/metrics.py +185 -38
- congrads/networks.py +51 -28
- congrads/requirements.txt +6 -0
- congrads/transformations.py +139 -0
- congrads/utils.py +710 -0
- congrads-1.0.1.dist-info/LICENSE +26 -0
- congrads-1.0.1.dist-info/METADATA +208 -0
- congrads-1.0.1.dist-info/RECORD +16 -0
- {congrads-0.1.0.dist-info → congrads-1.0.1.dist-info}/WHEEL +1 -1
- congrads/learners.py +0 -233
- congrads-0.1.0.dist-info/LICENSE +0 -34
- congrads-0.1.0.dist-info/METADATA +0 -196
- congrads-0.1.0.dist-info/RECORD +0 -13
- {congrads-0.1.0.dist-info → congrads-1.0.1.dist-info}/top_level.txt +0 -0
congrads/descriptor.py
CHANGED
|
@@ -1,65 +1,130 @@
|
|
|
1
|
+
"""
|
|
2
|
+
This module defines the `Descriptor` class, which is designed to manage
|
|
3
|
+
the mapping between neuron names, their corresponding layers, and additional
|
|
4
|
+
properties such as constant or variable status. It provides a way to easily
|
|
5
|
+
place constraints on parts of your network, by referencing the neuron names
|
|
6
|
+
instead of indices.
|
|
7
|
+
|
|
8
|
+
The `Descriptor` class allows for easy constraint definitions on parts of
|
|
9
|
+
your neural network. It supports registering neurons with associated layers,
|
|
10
|
+
indices, and optional attributes, such as whether the layer is constant
|
|
11
|
+
or variable.
|
|
12
|
+
|
|
13
|
+
Key Methods:
|
|
14
|
+
|
|
15
|
+
- `__init__`: Initializes the `Descriptor` object with empty mappings
|
|
16
|
+
and sets for managing neurons and layers.
|
|
17
|
+
- `add`: Registers a neuron with its associated layer, index, and
|
|
18
|
+
optional constant status.
|
|
19
|
+
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
from .utils import validate_type
|
|
23
|
+
|
|
24
|
+
|
|
1
25
|
class Descriptor:
|
|
2
26
|
"""
|
|
3
|
-
A class to manage the mapping
|
|
4
|
-
|
|
27
|
+
A class to manage the mapping between neuron names, their corresponding
|
|
28
|
+
layers, and additional properties (such as min/max values, output,
|
|
29
|
+
and constant variables).
|
|
5
30
|
|
|
6
|
-
This class
|
|
7
|
-
|
|
8
|
-
|
|
31
|
+
This class is designed to track the relationship between neurons and
|
|
32
|
+
layers in a neural network. It allows for the assignment of properties
|
|
33
|
+
(like minimum and maximum values, and whether a layer is an output,
|
|
34
|
+
constant, or variable) to each neuron. The data is stored in
|
|
35
|
+
dictionaries and sets for efficient lookups.
|
|
9
36
|
|
|
10
|
-
|
|
11
|
-
|
|
37
|
+
Attributes:
|
|
38
|
+
neuron_to_layer (dict): A dictionary mapping neuron names to
|
|
39
|
+
their corresponding layer names.
|
|
40
|
+
neuron_to_index (dict): A dictionary mapping neuron names to
|
|
41
|
+
their corresponding indices in the layers.
|
|
42
|
+
constant_layers (set): A set of layer names that represent
|
|
43
|
+
constant layers.
|
|
44
|
+
variable_layers (set): A set of layer names that represent
|
|
45
|
+
variable layers.
|
|
12
46
|
"""
|
|
13
47
|
|
|
14
48
|
def __init__(
|
|
15
49
|
self,
|
|
16
50
|
):
|
|
17
51
|
"""
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
This includes:
|
|
21
|
-
- `neuron_to_layer`: A dictionary mapping neuron names to their corresponding layer names.
|
|
22
|
-
- `neuron_to_index`: A dictionary mapping neuron names to their corresponding index within a layer.
|
|
23
|
-
- `output_layers`: A set that holds the names of layers marked as output layers.
|
|
24
|
-
- `constant_layers`: A set that holds the names of layers marked as constant layers.
|
|
25
|
-
- `variable_layers`: A set that holds the names of layers marked as variable layers.
|
|
52
|
+
Initializes the Descriptor object.
|
|
26
53
|
"""
|
|
27
54
|
|
|
28
|
-
# Define dictionaries that will translate neuron
|
|
55
|
+
# Define dictionaries that will translate neuron
|
|
56
|
+
# names to layer and index
|
|
29
57
|
self.neuron_to_layer: dict[str, str] = {}
|
|
30
58
|
self.neuron_to_index: dict[str, int] = {}
|
|
31
59
|
|
|
32
60
|
# Define sets that will hold the layers based on which type
|
|
33
|
-
self.output_layers: set[str] = set()
|
|
34
61
|
self.constant_layers: set[str] = set()
|
|
35
62
|
self.variable_layers: set[str] = set()
|
|
36
63
|
|
|
37
64
|
def add(
|
|
38
65
|
self,
|
|
39
66
|
layer_name: str,
|
|
40
|
-
|
|
41
|
-
|
|
67
|
+
index: int,
|
|
68
|
+
neuron_name: str,
|
|
42
69
|
constant: bool = False,
|
|
43
70
|
):
|
|
44
71
|
"""
|
|
45
|
-
|
|
46
|
-
|
|
72
|
+
Adds a neuron to the descriptor with its associated layer,
|
|
73
|
+
index, and properties.
|
|
74
|
+
|
|
75
|
+
This method registers a neuron name and associates it with a
|
|
76
|
+
layer, its index, and optional properties such as whether
|
|
77
|
+
the layer is an output or constant layer.
|
|
47
78
|
|
|
48
79
|
Args:
|
|
49
|
-
layer_name (str): The name of the layer
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
constant (bool, optional):
|
|
80
|
+
layer_name (str): The name of the layer where the neuron is located.
|
|
81
|
+
index (int): The index of the neuron within the layer.
|
|
82
|
+
neuron_name (str): The name of the neuron.
|
|
83
|
+
constant (bool, optional): Whether the layer is a constant layer.
|
|
84
|
+
Defaults to False.
|
|
85
|
+
|
|
86
|
+
Raises:
|
|
87
|
+
TypeError: If a provided attribute has an incompatible type.
|
|
88
|
+
ValueError: If a layer or index is already assigned for a neuron
|
|
89
|
+
or a duplicate index is used within a layer.
|
|
90
|
+
|
|
53
91
|
"""
|
|
54
92
|
|
|
55
|
-
|
|
56
|
-
|
|
93
|
+
# Type checking
|
|
94
|
+
validate_type("layer_name", layer_name, str)
|
|
95
|
+
validate_type("index", index, int)
|
|
96
|
+
validate_type("neuron_name", neuron_name, str)
|
|
97
|
+
validate_type("constant", constant, bool)
|
|
98
|
+
|
|
99
|
+
# Other validations
|
|
100
|
+
if neuron_name in self.neuron_to_layer:
|
|
101
|
+
raise ValueError(
|
|
102
|
+
"There already is a layer registered for the neuron with name "
|
|
103
|
+
f"'{neuron_name}'. Please use a unique name for each neuron."
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
if neuron_name in self.neuron_to_index:
|
|
107
|
+
raise ValueError(
|
|
108
|
+
"There already is an index registered for the neuron with name "
|
|
109
|
+
f"'{neuron_name}'. Please use a unique name for each neuron."
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
for existing_neuron, assigned_index in self.neuron_to_index.items():
|
|
113
|
+
if (
|
|
114
|
+
assigned_index == index
|
|
115
|
+
and self.neuron_to_layer[existing_neuron] == layer_name
|
|
116
|
+
):
|
|
117
|
+
raise ValueError(
|
|
118
|
+
f"The index {index} in layer {layer_name} is already "
|
|
119
|
+
"assigned. Every neuron must be assigned a different "
|
|
120
|
+
"index that matches the network's output."
|
|
121
|
+
)
|
|
57
122
|
|
|
123
|
+
# Add to dictionaries and sets
|
|
58
124
|
if constant:
|
|
59
125
|
self.constant_layers.add(layer_name)
|
|
60
126
|
else:
|
|
61
127
|
self.variable_layers.add(layer_name)
|
|
62
128
|
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
self.neuron_to_index[neuron_name] = index
|
|
129
|
+
self.neuron_to_layer[neuron_name] = layer_name
|
|
130
|
+
self.neuron_to_index[neuron_name] = index
|
congrads/metrics.py
CHANGED
|
@@ -1,64 +1,211 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
1
|
+
"""
|
|
2
|
+
This module defines the `Metric` and `MetricManager` classes, which are
|
|
3
|
+
used to track and aggregate performance metrics during model training or
|
|
4
|
+
evaluation in machine learning. These classes support the accumulation of
|
|
5
|
+
metric values, aggregation using customizable functions (such as mean),
|
|
6
|
+
and resetting of the metrics.
|
|
3
7
|
|
|
4
|
-
|
|
8
|
+
Classes:
|
|
5
9
|
|
|
10
|
+
- Metric: A class that tracks and aggregates a specific metric over
|
|
11
|
+
multiple samples, allowing for accumulation, aggregation, and
|
|
12
|
+
resetting of values.
|
|
13
|
+
- MetricManager: A class that manages and tracks multiple metrics
|
|
14
|
+
during model training or evaluation, supporting registration,
|
|
15
|
+
accumulation, aggregation, and resetting of metrics.
|
|
6
16
|
|
|
7
|
-
|
|
17
|
+
Key Methods:
|
|
18
|
+
|
|
19
|
+
- `Metric.__init__`: Initializes a metric with a specified name and
|
|
20
|
+
optional accumulator function (defaults to `nanmean`).
|
|
21
|
+
- `Metric.accumulate`: Accumulates a new value for the metric,
|
|
22
|
+
typically a tensor of model output or performance.
|
|
23
|
+
- `Metric.aggregate`: Aggregates the accumulated values using the
|
|
24
|
+
specified accumulator function.
|
|
25
|
+
- `Metric.reset`: Resets the accumulated values and sample count for
|
|
26
|
+
the metric.
|
|
27
|
+
- `MetricManager.__init__`: Initializes a manager for multiple metrics.
|
|
28
|
+
- `MetricManager.register`: Registers a new metric with a name, group,
|
|
29
|
+
and optional accumulator function.
|
|
30
|
+
- `MetricManager.accumulate`: Accumulates a new value for the specified
|
|
31
|
+
metric.
|
|
32
|
+
- `MetricManager.aggregate`: Aggregates all metrics in a specified group.
|
|
33
|
+
- `MetricManager.reset`: Resets all registered metrics in a specified
|
|
34
|
+
group.
|
|
35
|
+
|
|
36
|
+
Each class provides functionality to efficiently track, aggregate, and reset
|
|
37
|
+
metrics during the training and evaluation phases of machine learning tasks,
|
|
38
|
+
supporting flexible aggregation strategies and group-based management of
|
|
39
|
+
metrics.
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
from typing import Callable
|
|
43
|
+
|
|
44
|
+
from torch import Tensor, cat, nanmean
|
|
45
|
+
|
|
46
|
+
from .utils import validate_callable, validate_type
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class Metric:
|
|
8
50
|
"""
|
|
9
|
-
A
|
|
10
|
-
It computes the proportion of constraints that have been satisfied,
|
|
11
|
-
where satisfaction is determined based on the provided constraint results.
|
|
51
|
+
A class that tracks and aggregates a specific metric over multiple samples.
|
|
12
52
|
|
|
13
|
-
This
|
|
14
|
-
|
|
15
|
-
|
|
53
|
+
This class allows the accumulation of values, their aggregation using a
|
|
54
|
+
specified function (e.g., mean), and the ability to reset the metrics.
|
|
55
|
+
It is typically used to track performance metrics during training or
|
|
56
|
+
evaluation processes in machine learning.
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
name (str): The name of the metric.
|
|
60
|
+
accumulator (Callable[..., Tensor], optional): A function used to
|
|
61
|
+
aggregate values (defaults to `nanmean`).
|
|
16
62
|
|
|
17
63
|
Attributes:
|
|
18
|
-
|
|
19
|
-
|
|
64
|
+
name (str): The name of the metric.
|
|
65
|
+
accumulator (Callable[..., Tensor]): The function used to aggregate
|
|
66
|
+
values.
|
|
67
|
+
values (list): A list to store accumulated values.
|
|
68
|
+
sample_count (int): The count of accumulated samples.
|
|
20
69
|
|
|
21
|
-
Note:
|
|
22
|
-
For more information about custom metrics, we refer to the Pytorch Lightning documentation
|
|
23
|
-
at https://lightning.ai/docs/torchmetrics/stable/pages/implement.html
|
|
24
70
|
"""
|
|
25
71
|
|
|
26
|
-
def __init__(
|
|
72
|
+
def __init__(
|
|
73
|
+
self,
|
|
74
|
+
name: str,
|
|
75
|
+
accumulator: Callable[..., Tensor] = nanmean,
|
|
76
|
+
) -> None:
|
|
77
|
+
"""
|
|
78
|
+
Constructor method
|
|
79
|
+
"""
|
|
80
|
+
|
|
81
|
+
# Type checking
|
|
82
|
+
validate_type("name", name, str)
|
|
83
|
+
validate_callable("accumulator", accumulator)
|
|
84
|
+
|
|
85
|
+
self.name = name
|
|
86
|
+
self.accumulator = accumulator
|
|
87
|
+
|
|
88
|
+
self.values = []
|
|
89
|
+
self.sample_count = 0
|
|
90
|
+
|
|
91
|
+
def accumulate(self, value: Tensor) -> None:
|
|
27
92
|
"""
|
|
28
|
-
|
|
29
|
-
state variables to track the number of unsatisfied and total constraints.
|
|
93
|
+
Accumulates a new value for the metric.
|
|
30
94
|
|
|
31
95
|
Args:
|
|
32
|
-
|
|
96
|
+
value (Tensor): The new value to accumulate, typically a
|
|
97
|
+
tensor of model output or performance.
|
|
33
98
|
"""
|
|
34
99
|
|
|
35
|
-
|
|
36
|
-
|
|
100
|
+
self.values.append(value)
|
|
101
|
+
self.sample_count += value.size(0)
|
|
37
102
|
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
103
|
+
def aggregate(self) -> Tensor:
|
|
104
|
+
"""
|
|
105
|
+
Aggregates the accumulated values using the specified
|
|
106
|
+
accumulator function.
|
|
41
107
|
|
|
42
|
-
|
|
108
|
+
Returns:
|
|
109
|
+
Tensor: The aggregated result of the accumulated values.
|
|
110
|
+
"""
|
|
111
|
+
|
|
112
|
+
combined = cat(self.values)
|
|
113
|
+
return self.accumulator(combined)
|
|
114
|
+
|
|
115
|
+
def reset(self) -> None:
|
|
116
|
+
"""
|
|
117
|
+
Resets the accumulated values and sample count for the metric.
|
|
43
118
|
"""
|
|
44
|
-
|
|
119
|
+
|
|
120
|
+
self.values = []
|
|
121
|
+
self.sample_count = 0
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
class MetricManager:
|
|
125
|
+
"""
|
|
126
|
+
A class to manage and track multiple metrics during model
|
|
127
|
+
training or evaluation.
|
|
128
|
+
|
|
129
|
+
This class allows registering metrics, accumulating values for each metric,
|
|
130
|
+
and recording the aggregated values. It also supports the reset of metrics
|
|
131
|
+
after each epoch or training step.
|
|
132
|
+
|
|
133
|
+
Attributes:
|
|
134
|
+
metrics (dict[str, Metric]): A dictionary of registered metrics.
|
|
135
|
+
groups (dict[str, str]): A dictionary mapping metric names to groups.
|
|
136
|
+
"""
|
|
137
|
+
|
|
138
|
+
def __init__(self) -> None:
|
|
139
|
+
"""
|
|
140
|
+
Constructor method
|
|
141
|
+
"""
|
|
142
|
+
|
|
143
|
+
self.metrics: dict[str, Metric] = {}
|
|
144
|
+
self.groups: dict[str, str] = {}
|
|
145
|
+
|
|
146
|
+
def register(
|
|
147
|
+
self,
|
|
148
|
+
name: str,
|
|
149
|
+
group: str,
|
|
150
|
+
accumulator: Callable[..., Tensor] = nanmean,
|
|
151
|
+
) -> None:
|
|
152
|
+
"""
|
|
153
|
+
Registers a new metric with the specified name and accumulator function.
|
|
45
154
|
|
|
46
155
|
Args:
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
156
|
+
name (str): The name of the metric to register.
|
|
157
|
+
group (str): The name of the group to assign the metric to.
|
|
158
|
+
accumulator (Callable[..., Tensor], optional): The function used
|
|
159
|
+
to aggregate values for the metric (defaults to `nanmean`).
|
|
160
|
+
"""
|
|
161
|
+
|
|
162
|
+
# Type checking
|
|
163
|
+
validate_type("name", name, str)
|
|
164
|
+
validate_type("group", group, str)
|
|
165
|
+
validate_callable("accumulator", accumulator)
|
|
166
|
+
|
|
167
|
+
self.metrics[name] = Metric(name, accumulator)
|
|
168
|
+
self.groups[name] = group
|
|
169
|
+
|
|
170
|
+
def accumulate(self, name: str, value: Tensor) -> None:
|
|
52
171
|
"""
|
|
53
|
-
|
|
54
|
-
self.total += numel(constraint_result)
|
|
172
|
+
Accumulates a new value for the specified metric.
|
|
55
173
|
|
|
56
|
-
|
|
174
|
+
Args:
|
|
175
|
+
name (str): The name of the metric.
|
|
176
|
+
value (Tensor): The new value to accumulate.
|
|
57
177
|
"""
|
|
58
|
-
|
|
59
|
-
|
|
178
|
+
|
|
179
|
+
self.metrics[name].accumulate(value)
|
|
180
|
+
|
|
181
|
+
def aggregate(self, group: str) -> dict[str, Tensor]:
|
|
182
|
+
"""
|
|
183
|
+
Aggregates all metrics in a group using the accumulators
|
|
184
|
+
specified during registration.
|
|
185
|
+
|
|
186
|
+
Args:
|
|
187
|
+
group (str): The name of the group.
|
|
60
188
|
|
|
61
189
|
Returns:
|
|
62
|
-
Tensor:
|
|
190
|
+
dict[str, Tensor]: A dictionary with the metric names and the
|
|
191
|
+
corresponding aggregated values of the selected group.
|
|
192
|
+
"""
|
|
193
|
+
|
|
194
|
+
return {
|
|
195
|
+
name: metric.aggregate()
|
|
196
|
+
for name, metric in self.metrics.items()
|
|
197
|
+
if self.groups[name] == group
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
def reset(self, group: str) -> None:
|
|
201
|
+
"""
|
|
202
|
+
Resets all registered metrics in a group.
|
|
203
|
+
|
|
204
|
+
Args:
|
|
205
|
+
group (str): The name of the group.
|
|
63
206
|
"""
|
|
64
|
-
|
|
207
|
+
|
|
208
|
+
for name, metric in self.metrics.items():
|
|
209
|
+
if self.groups[name] == group:
|
|
210
|
+
metric.reset()
|
|
211
|
+
metric.reset()
|
congrads/networks.py
CHANGED
|
@@ -1,4 +1,32 @@
|
|
|
1
|
-
|
|
1
|
+
"""
|
|
2
|
+
This module defines the `MLPNetwork` class, which constructs and
|
|
3
|
+
operates a multi-layer perceptron (MLP) neural network model. The MLP
|
|
4
|
+
network consists of an input layer, multiple hidden layers, and an
|
|
5
|
+
output layer. It allows for configurable hyperparameters such as the
|
|
6
|
+
number of input features, output features, number of hidden layers,
|
|
7
|
+
and the dimensionality of the hidden layers.
|
|
8
|
+
|
|
9
|
+
Classes:
|
|
10
|
+
|
|
11
|
+
- MLPNetwork: A neural network model that implements a multi-layer
|
|
12
|
+
perceptron with customizable layers and dimensionalities.
|
|
13
|
+
|
|
14
|
+
Key Methods:
|
|
15
|
+
|
|
16
|
+
- `__init__`: Initializes the MLP network with specified input size,
|
|
17
|
+
output size, number of hidden layers, and hidden layer dimensionality.
|
|
18
|
+
- `forward`: Performs a forward pass through the network, returning
|
|
19
|
+
both the input and output of the model.
|
|
20
|
+
- `linear`: Creates a basic linear block consisting of a Linear layer
|
|
21
|
+
followed by a ReLU activation function.
|
|
22
|
+
|
|
23
|
+
The `MLPNetwork` class constructs a fully connected neural network with
|
|
24
|
+
multiple hidden layers, providing flexibility in designing the network
|
|
25
|
+
architecture. It can be used for regression, classification, or other
|
|
26
|
+
machine learning tasks that require a feedforward neural network structure.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
from torch.nn import Linear, Module, ReLU, Sequential
|
|
2
30
|
|
|
3
31
|
|
|
4
32
|
class MLPNetwork(Module):
|
|
@@ -11,33 +39,26 @@ class MLPNetwork(Module):
|
|
|
11
39
|
the dimensionality of hidden layers. It provides methods for both
|
|
12
40
|
building the model and performing a forward pass through the network.
|
|
13
41
|
|
|
14
|
-
|
|
15
|
-
n_inputs (int): The number of input features.
|
|
16
|
-
n_outputs (int): The number of output features.
|
|
17
|
-
n_hidden_layers (int): The number of hidden layers
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
a linear transformation followed by ReLU).
|
|
22
|
-
out (nn.Module): The output layer (linear transformation).
|
|
42
|
+
Args:
|
|
43
|
+
n_inputs (int, optional): The number of input features. Defaults to 25.
|
|
44
|
+
n_outputs (int, optional): The number of output features. Defaults to 2.
|
|
45
|
+
n_hidden_layers (int, optional): The number of hidden layers.
|
|
46
|
+
Defaults to 2.
|
|
47
|
+
hidden_dim (int, optional): The dimensionality of the hidden layers.
|
|
48
|
+
Defaults to 35.
|
|
23
49
|
"""
|
|
24
50
|
|
|
25
51
|
def __init__(
|
|
26
52
|
self,
|
|
27
|
-
n_inputs
|
|
28
|
-
n_outputs
|
|
29
|
-
n_hidden_layers=
|
|
53
|
+
n_inputs,
|
|
54
|
+
n_outputs,
|
|
55
|
+
n_hidden_layers=3,
|
|
30
56
|
hidden_dim=35,
|
|
31
57
|
):
|
|
32
58
|
"""
|
|
33
|
-
Initializes the
|
|
34
|
-
|
|
35
|
-
Args:
|
|
36
|
-
n_inputs (int, optional): The number of input features. Defaults to 25.
|
|
37
|
-
n_outputs (int, optional): The number of output features. Defaults to 2.
|
|
38
|
-
n_hidden_layers (int, optional): The number of hidden layers. Defaults to 2.
|
|
39
|
-
hidden_dim (int, optional): The dimensionality of the hidden layers. Defaults to 35.
|
|
59
|
+
Initializes the MLPNetwork.
|
|
40
60
|
"""
|
|
61
|
+
|
|
41
62
|
super().__init__()
|
|
42
63
|
|
|
43
64
|
# Init object variables
|
|
@@ -47,7 +68,7 @@ class MLPNetwork(Module):
|
|
|
47
68
|
self.hidden_dim = hidden_dim
|
|
48
69
|
|
|
49
70
|
# Set up the components of our model
|
|
50
|
-
self.input =
|
|
71
|
+
self.input = Linear(self.n_inputs, self.hidden_dim)
|
|
51
72
|
self.hidden = Sequential(
|
|
52
73
|
*(
|
|
53
74
|
self.linear(self.hidden_dim, self.hidden_dim)
|
|
@@ -56,21 +77,21 @@ class MLPNetwork(Module):
|
|
|
56
77
|
)
|
|
57
78
|
self.out = Linear(self.hidden_dim, self.n_outputs)
|
|
58
79
|
|
|
59
|
-
def forward(self,
|
|
80
|
+
def forward(self, data):
|
|
60
81
|
"""
|
|
61
82
|
Performs a forward pass through the network.
|
|
62
83
|
|
|
63
84
|
Args:
|
|
64
|
-
|
|
85
|
+
data (Tensor): The input tensor to be passed through the network.
|
|
65
86
|
|
|
66
87
|
Returns:
|
|
67
88
|
dict: A dictionary containing the 'input' (original input) and
|
|
68
|
-
|
|
89
|
+
'output' (predicted output) of the network.
|
|
69
90
|
"""
|
|
70
|
-
input = X
|
|
71
|
-
output = self.out(self.hidden(self.input(X)))
|
|
72
91
|
|
|
73
|
-
|
|
92
|
+
output = self.out(self.hidden(self.input(data)))
|
|
93
|
+
|
|
94
|
+
return {"input": data, "output": output}
|
|
74
95
|
|
|
75
96
|
@staticmethod
|
|
76
97
|
def linear(in_features, out_features):
|
|
@@ -83,8 +104,10 @@ class MLPNetwork(Module):
|
|
|
83
104
|
out_features (int): The number of output features.
|
|
84
105
|
|
|
85
106
|
Returns:
|
|
86
|
-
nn.Module: A sequential module consisting of a Linear layer
|
|
107
|
+
nn.Module: A sequential module consisting of a Linear layer
|
|
108
|
+
and ReLU activation.
|
|
87
109
|
"""
|
|
110
|
+
|
|
88
111
|
return Sequential(
|
|
89
112
|
Linear(in_features, out_features),
|
|
90
113
|
ReLU(),
|