congrads 1.0.7__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
congrads/descriptor.py CHANGED
@@ -1,130 +1,166 @@
1
- """
2
- This module defines the `Descriptor` class, which is designed to manage
3
- the mapping between neuron names, their corresponding layers, and additional
4
- properties such as constant or variable status. It provides a way to easily
5
- place constraints on parts of your network, by referencing the neuron names
1
+ """This module defines the `Descriptor` class, which allows assigning tags to parts in the network.
2
+
3
+ It is designed to manage the mapping between tags, their corresponding data dictionary keys and indices,
4
+ and additional properties such as constant or variable status. It provides a way to easily
5
+ place constraints on parts of your network, by referencing the tags
6
6
  instead of indices.
7
7
 
8
8
  The `Descriptor` class allows for easy constraint definitions on parts of
9
- your neural network. It supports registering neurons with associated layers,
10
- indices, and optional attributes, such as whether the layer is constant
11
- or variable.
12
-
13
- Key Methods:
14
-
15
- - `__init__`: Initializes the `Descriptor` object with empty mappings
16
- and sets for managing neurons and layers.
17
- - `add`: Registers a neuron with its associated layer, index, and
18
- optional constant status.
19
-
9
+ your neural network. It supports registering tags with associated data dictionary keys,
10
+ indices, and optional attributes, such as whether the data is constant or variable.
20
11
  """
21
12
 
13
+ from torch import Tensor
14
+
22
15
  from .utils import validate_type
23
16
 
24
17
 
25
18
  class Descriptor:
26
- """
27
- A class to manage the mapping between neuron names, their corresponding
28
- layers, and additional properties (such as min/max values, output,
29
- and constant variables).
19
+ """A class to manage the mapping between tags.
30
20
 
31
- This class is designed to track the relationship between neurons and
32
- layers in a neural network. It allows for the assignment of properties
33
- (like minimum and maximum values, and whether a layer is an output,
34
- constant, or variable) to each neuron. The data is stored in
35
- dictionaries and sets for efficient lookups.
21
+ It represents data locations in the data dictionary and holds the dictionary keys, indices,
22
+ and additional properties (such as min/max values, output, and constant variables).
23
+
24
+ This class is designed to manage the relationships between the assigned tags and the
25
+ data dictionary keys in a neural network model. It allows for the assignment of properties
26
+ (like minimum and maximum values, and whether data is an output, constant, or variable) to
27
+ each tag. The data is stored in dictionaries and sets for efficient lookups.
36
28
 
37
29
  Attributes:
38
- neuron_to_layer (dict): A dictionary mapping neuron names to
39
- their corresponding layer names.
40
- neuron_to_index (dict): A dictionary mapping neuron names to
41
- their corresponding indices in the layers.
42
- constant_layers (set): A set of layer names that represent
43
- constant layers.
44
- variable_layers (set): A set of layer names that represent
45
- variable layers.
30
+ constant_keys (set): A set of keys that represent constant data in the data dictionary.
31
+ variable_keys (set): A set of keys that represent variable data in the data dictionary.
32
+ affects_loss_keys (set): A set of keys that represent data affecting the loss computation.
46
33
  """
47
34
 
48
35
  def __init__(
49
36
  self,
50
37
  ):
51
- """
52
- Initializes the Descriptor object.
53
- """
54
-
55
- # Define dictionaries that will translate neuron
56
- # names to layer and index
57
- self.neuron_to_layer: dict[str, str] = {}
58
- self.neuron_to_index: dict[str, int] = {}
38
+ """Initializes the Descriptor object."""
39
+ # Define dictionaries that will translate tags to keys and indices
40
+ self._tag_to_key: dict[str, str] = {}
41
+ self._tag_to_index: dict[str, int] = {}
59
42
 
60
- # Define sets that will hold the layers based on which type
61
- self.constant_layers: set[str] = set()
62
- self.variable_layers: set[str] = set()
43
+ # Define sets that will hold the keys based on which type
44
+ self.constant_keys: set[str] = set()
45
+ self.variable_keys: set[str] = set()
46
+ self.affects_loss_keys: set[str] = set()
63
47
 
64
48
  def add(
65
49
  self,
66
- layer_name: str,
67
- index: int,
68
- neuron_name: str,
50
+ key: str,
51
+ tag: str,
52
+ index: int = None,
69
53
  constant: bool = False,
54
+ affects_loss: bool = True,
70
55
  ):
71
- """
72
- Adds a neuron to the descriptor with its associated layer,
73
- index, and properties.
56
+ """Adds a tag to the descriptor with its associated key, index, and properties.
74
57
 
75
- This method registers a neuron name and associates it with a
76
- layer, its index, and optional properties such as whether
77
- the layer is an output or constant layer.
58
+ This method registers a tag name and associates it with a
59
+ data dictionary key, its index, and optional properties such as whether
60
+ the key hold output or constant data.
78
61
 
79
62
  Args:
80
- layer_name (str): The name of the layer where the neuron is located.
81
- index (int): The index of the neuron within the layer.
82
- neuron_name (str): The name of the neuron.
83
- constant (bool, optional): Whether the layer is a constant layer.
84
- Defaults to False.
63
+ key (str): The key on which the tagged data is located in the data dictionary.
64
+ tag (str): The identifier of the tag.
65
+ index (int): The index were the data is present. Defaults to None.
66
+ constant (bool, optional): Whether the data is constant and is not learned. Defaults to False.
67
+ affects_loss (bool, optional): Whether the data affects the loss computation. Defaults to True.
85
68
 
86
69
  Raises:
87
70
  TypeError: If a provided attribute has an incompatible type.
88
- ValueError: If a layer or index is already assigned for a neuron
89
- or a duplicate index is used within a layer.
90
-
71
+ ValueError: If a key or index is already assigned for a tag or a duplicate index is used within a key.
91
72
  """
92
-
93
73
  # Type checking
94
- validate_type("layer_name", layer_name, str)
95
- validate_type("index", index, int)
96
- validate_type("neuron_name", neuron_name, str)
74
+ validate_type("key", key, str)
75
+ validate_type("tag", tag, str)
76
+ validate_type("index", index, int, allow_none=True)
97
77
  validate_type("constant", constant, bool)
78
+ validate_type("affects_loss", affects_loss, bool)
98
79
 
99
80
  # Other validations
100
- if neuron_name in self.neuron_to_layer:
81
+ if tag in self._tag_to_key:
101
82
  raise ValueError(
102
- "There already is a layer registered for the neuron with name "
103
- f"'{neuron_name}'. Please use a unique name for each neuron."
83
+ f"There already is a key registered for the tag '{tag}'. "
84
+ "Please use a unique key name for each tag."
104
85
  )
105
86
 
106
- if neuron_name in self.neuron_to_index:
87
+ if tag in self._tag_to_index:
107
88
  raise ValueError(
108
- "There already is an index registered for the neuron with name "
109
- f"'{neuron_name}'. Please use a unique name for each neuron."
89
+ f"There already is an index registered for the tag '{tag}'. "
90
+ "Please use a unique name for each tag."
110
91
  )
111
92
 
112
- for existing_neuron, assigned_index in self.neuron_to_index.items():
113
- if (
114
- assigned_index == index
115
- and self.neuron_to_layer[existing_neuron] == layer_name
116
- ):
93
+ for existing_tag, assigned_index in self._tag_to_index.items():
94
+ if assigned_index == index and self._tag_to_key[existing_tag] == key:
117
95
  raise ValueError(
118
- f"The index {index} in layer {layer_name} is already "
119
- "assigned. Every neuron must be assigned a different "
96
+ f"The index {index} on key {key} is already "
97
+ "assigned. Every tag must be assigned a different "
120
98
  "index that matches the network's output."
121
99
  )
122
100
 
123
101
  # Add to dictionaries and sets
102
+ # TODO this now happens on key level, can this also be done on tag level?
124
103
  if constant:
125
- self.constant_layers.add(layer_name)
104
+ self.constant_keys.add(key)
126
105
  else:
127
- self.variable_layers.add(layer_name)
106
+ self.variable_keys.add(key)
107
+
108
+ if affects_loss:
109
+ self.affects_loss_keys.add(key)
110
+
111
+ self._tag_to_key[tag] = key
112
+ self._tag_to_index[tag] = index
113
+
114
+ def exists(self, tag: str) -> bool:
115
+ """Check if a tag is registered in the descriptor.
116
+
117
+ Args:
118
+ tag (str): The tag identifier to check.
119
+
120
+ Returns:
121
+ bool: True if the tag is registered, False otherwise.
122
+ """
123
+ return tag in self._tag_to_key and tag in self._tag_to_index
124
+
125
+ def location(self, tag: str) -> tuple[str, int]:
126
+ """Get the key and index for a given tag.
127
+
128
+ Looks up the mapping for a registered tag and returns the associated
129
+ dictionary key and the index.
130
+
131
+ Args:
132
+ tag (str): The tag identifier. Must be registered.
133
+
134
+ Returns:
135
+ tuple ((str, int)): A tuple containing:
136
+ - The key in the data dictionary which holds the data (str).
137
+ - The tensor index where the data is present (int).
138
+
139
+ Raises:
140
+ ValueError: If the tag is not registered in the descriptor.
141
+ """
142
+ key = self._tag_to_key.get(tag)
143
+ index = self._tag_to_index.get(tag)
144
+ if key is None or index is None:
145
+ raise ValueError(f"Tag '{tag}' is not registered in descriptor.")
146
+ return key, index
147
+
148
+ def select(self, tag: str, data: dict[str, Tensor]) -> Tensor:
149
+ """Extract prediction values for a specific tag.
128
150
 
129
- self.neuron_to_layer[neuron_name] = layer_name
130
- self.neuron_to_index[neuron_name] = index
151
+ Retrieves the key and index associated with a tag and selects
152
+ the corresponding slice from the given prediction tensor.
153
+
154
+ Args:
155
+ tag (str): The tag identifier. Must be registered.
156
+ data (dict[str, Tensor]): Dictionary that holds batch data, model predictions and context.
157
+
158
+ Returns:
159
+ Tensor: A tensor slice of shape ``(batch_size, 1)`` containing
160
+ the predictions for the specified tag.
161
+
162
+ Raises:
163
+ ValueError: If the tag is not registered in the descriptor.
164
+ """
165
+ key, index = self.location(tag)
166
+ return data[key][:, index : index + 1]
congrads/metrics.py CHANGED
@@ -1,164 +1,92 @@
1
- """
2
- This module defines the `Metric` and `MetricManager` classes, which are
3
- used to track and aggregate performance metrics during model training or
4
- evaluation in machine learning. These classes support the accumulation of
5
- metric values, aggregation using customizable functions (such as mean),
6
- and resetting of the metrics.
7
-
8
- Classes:
9
-
10
- - Metric: A class that tracks and aggregates a specific metric over
11
- multiple samples, allowing for accumulation, aggregation, and
12
- resetting of values.
13
- - MetricManager: A class that manages and tracks multiple metrics
14
- during model training or evaluation, supporting registration,
15
- accumulation, aggregation, and resetting of metrics.
16
-
17
- Key Methods:
18
-
19
- - `Metric.__init__`: Initializes a metric with a specified name and
20
- optional accumulator function (defaults to `nanmean`).
21
- - `Metric.accumulate`: Accumulates a new value for the metric,
22
- typically a tensor of model output or performance.
23
- - `Metric.aggregate`: Aggregates the accumulated values using the
24
- specified accumulator function.
25
- - `Metric.reset`: Resets the accumulated values and sample count for
26
- the metric.
27
- - `MetricManager.__init__`: Initializes a manager for multiple metrics.
28
- - `MetricManager.register`: Registers a new metric with a name, group,
29
- and optional accumulator function.
30
- - `MetricManager.accumulate`: Accumulates a new value for the specified
31
- metric.
32
- - `MetricManager.aggregate`: Aggregates all metrics in a specified group.
33
- - `MetricManager.reset`: Resets all registered metrics in a specified
34
- group.
35
-
36
- Each class provides functionality to efficiently track, aggregate, and reset
37
- metrics during the training and evaluation phases of machine learning tasks,
38
- supporting flexible aggregation strategies and group-based management of
39
- metrics.
1
+ """Module for managing metrics during training.
2
+
3
+ Provides the `Metric` and `MetricManager` classes for accumulating,
4
+ aggregating, and resetting metrics over training batches. Supports
5
+ grouping metrics and using custom accumulation functions.
40
6
  """
41
7
 
42
- from typing import Callable
8
+ from collections.abc import Callable
43
9
 
44
- from torch import Tensor, cat, nanmean
10
+ from torch import Tensor, cat, nanmean, tensor
45
11
 
46
12
  from .utils import validate_callable, validate_type
47
13
 
48
14
 
49
15
  class Metric:
50
- """
51
- A class that tracks and aggregates a specific metric over multiple samples.
52
-
53
- This class allows the accumulation of values, their aggregation using a
54
- specified function (e.g., mean), and the ability to reset the metrics.
55
- It is typically used to track performance metrics during training or
56
- evaluation processes in machine learning.
57
-
58
- Args:
59
- name (str): The name of the metric.
60
- accumulator (Callable[..., Tensor], optional): A function used to
61
- aggregate values (defaults to `nanmean`).
62
-
63
- Attributes:
64
- name (str): The name of the metric.
65
- accumulator (Callable[..., Tensor]): The function used to aggregate
66
- values.
67
- values (list): A list to store accumulated values.
68
- sample_count (int): The count of accumulated samples.
16
+ """Represents a single metric to be accumulated and aggregated.
69
17
 
18
+ Stores metric values over multiple batches and computes an aggregated
19
+ result using a specified accumulation function.
70
20
  """
71
21
 
72
- def __init__(
73
- self,
74
- name: str,
75
- accumulator: Callable[..., Tensor] = nanmean,
76
- ) -> None:
77
- """
78
- Constructor method
79
- """
22
+ def __init__(self, name: str, accumulator: Callable[..., Tensor] = nanmean) -> None:
23
+ """Initialize a Metric instance.
80
24
 
25
+ Args:
26
+ name (str): Name of the metric.
27
+ accumulator (Callable[..., Tensor], optional): Function to aggregate
28
+ accumulated values. Defaults to `torch.nanmean`.
29
+ """
81
30
  # Type checking
82
31
  validate_type("name", name, str)
83
32
  validate_callable("accumulator", accumulator)
84
33
 
85
34
  self.name = name
86
35
  self.accumulator = accumulator
87
-
88
- self.values = []
36
+ self.values: list[Tensor] = []
89
37
  self.sample_count = 0
90
38
 
91
39
  def accumulate(self, value: Tensor) -> None:
92
- """
93
- Accumulates a new value for the metric.
40
+ """Accumulate a new value for the metric.
94
41
 
95
42
  Args:
96
- value (Tensor): The new value to accumulate, typically a
97
- tensor of model output or performance.
43
+ value (Tensor): Metric values for the current batch.
98
44
  """
99
-
100
- self.values.append(value)
45
+ self.values.append(value.detach().clone())
101
46
  self.sample_count += value.size(0)
102
47
 
103
48
  def aggregate(self) -> Tensor:
104
- """
105
- Aggregates the accumulated values using the specified
106
- accumulator function.
49
+ """Compute the aggregated value of the metric.
107
50
 
108
51
  Returns:
109
- Tensor: The aggregated result of the accumulated values.
52
+ Tensor: The aggregated metric value. Returns NaN if no values
53
+ have been accumulated.
110
54
  """
55
+ if not self.values:
56
+ return tensor(float("nan"))
111
57
 
112
58
  combined = cat(self.values)
113
59
  return self.accumulator(combined)
114
60
 
115
61
  def reset(self) -> None:
116
- """
117
- Resets the accumulated values and sample count for the metric.
118
- """
119
-
62
+ """Reset the accumulated values and sample count for the metric."""
120
63
  self.values = []
121
64
  self.sample_count = 0
122
65
 
123
66
 
124
67
  class MetricManager:
125
- """
126
- A class to manage and track multiple metrics during model
127
- training or evaluation.
128
-
129
- This class allows registering metrics, accumulating values for each metric,
130
- and recording the aggregated values. It also supports the reset of metrics
131
- after each epoch or training step.
68
+ """Manages multiple metrics and groups for training or evaluation.
132
69
 
133
- Attributes:
134
- metrics (dict[str, Metric]): A dictionary of registered metrics.
135
- groups (dict[str, str]): A dictionary mapping metric names to groups.
70
+ Supports registering metrics, accumulating values by name, aggregating
71
+ metrics by group, and resetting metrics by group.
136
72
  """
137
73
 
138
74
  def __init__(self) -> None:
139
- """
140
- Constructor method
141
- """
142
-
75
+ """Initialize a MetricManager instance."""
143
76
  self.metrics: dict[str, Metric] = {}
144
77
  self.groups: dict[str, str] = {}
145
78
 
146
79
  def register(
147
- self,
148
- name: str,
149
- group: str,
150
- accumulator: Callable[..., Tensor] = nanmean,
80
+ self, name: str, group: str = "default", accumulator: Callable[..., Tensor] = nanmean
151
81
  ) -> None:
152
- """
153
- Registers a new metric with the specified name and accumulator function.
82
+ """Register a new metric under a specified group.
154
83
 
155
84
  Args:
156
- name (str): The name of the metric to register.
157
- group (str): The name of the group to assign the metric to.
158
- accumulator (Callable[..., Tensor], optional): The function used
159
- to aggregate values for the metric (defaults to `nanmean`).
85
+ name (str): Name of the metric.
86
+ group (str, optional): Group name for the metric. Defaults to "default".
87
+ accumulator (Callable[..., Tensor], optional): Function to aggregate
88
+ accumulated values. Defaults to `torch.nanmean`.
160
89
  """
161
-
162
90
  # Type checking
163
91
  validate_type("name", name, str)
164
92
  validate_type("group", group, str)
@@ -168,44 +96,44 @@ class MetricManager:
168
96
  self.groups[name] = group
169
97
 
170
98
  def accumulate(self, name: str, value: Tensor) -> None:
171
- """
172
- Accumulates a new value for the specified metric.
99
+ """Accumulate a value for a specific metric by name.
173
100
 
174
101
  Args:
175
- name (str): The name of the metric.
176
- value (Tensor): The new value to accumulate.
102
+ name (str): Name of the metric.
103
+ value (Tensor): Metric values for the current batch.
177
104
  """
105
+ if name not in self.metrics:
106
+ raise KeyError(f"Metric '{name}' is not registered.")
178
107
 
179
108
  self.metrics[name].accumulate(value)
180
109
 
181
- def aggregate(self, group: str) -> dict[str, Tensor]:
182
- """
183
- Aggregates all metrics in a group using the accumulators
184
- specified during registration.
110
+ def aggregate(self, group: str = "default") -> dict[str, Tensor]:
111
+ """Aggregate all metrics in a specified group.
185
112
 
186
113
  Args:
187
- group (str): The name of the group.
114
+ group (str, optional): The group of metrics to aggregate. Defaults to "default".
188
115
 
189
116
  Returns:
190
- dict[str, Tensor]: A dictionary with the metric names and the
191
- corresponding aggregated values of the selected group.
117
+ dict[str, Tensor]: Dictionary mapping metric names to their
118
+ aggregated values.
192
119
  """
193
-
194
120
  return {
195
121
  name: metric.aggregate()
196
122
  for name, metric in self.metrics.items()
197
123
  if self.groups[name] == group
198
124
  }
199
125
 
200
- def reset(self, group: str) -> None:
201
- """
202
- Resets all registered metrics in a group.
126
+ def reset(self, group: str = "default") -> None:
127
+ """Reset all metrics in a specified group.
203
128
 
204
129
  Args:
205
- group (str): The name of the group.
130
+ group (str, optional): The group of metrics to reset. Defaults to "default".
206
131
  """
207
-
208
132
  for name, metric in self.metrics.items():
209
133
  if self.groups[name] == group:
210
134
  metric.reset()
211
- metric.reset()
135
+
136
+ def reset_all(self) -> None:
137
+ """Reset all metrics across all groups."""
138
+ for metric in self.metrics.values():
139
+ metric.reset()
congrads/networks.py CHANGED
@@ -1,52 +1,11 @@
1
- """
2
- This module defines the `MLPNetwork` class, which constructs and
3
- operates a multi-layer perceptron (MLP) neural network model. The MLP
4
- network consists of an input layer, multiple hidden layers, and an
5
- output layer. It allows for configurable hyperparameters such as the
6
- number of input features, output features, number of hidden layers,
7
- and the dimensionality of the hidden layers.
8
-
9
- Classes:
10
-
11
- - MLPNetwork: A neural network model that implements a multi-layer
12
- perceptron with customizable layers and dimensionalities.
13
-
14
- Key Methods:
15
-
16
- - `__init__`: Initializes the MLP network with specified input size,
17
- output size, number of hidden layers, and hidden layer dimensionality.
18
- - `forward`: Performs a forward pass through the network, returning
19
- both the input and output of the model.
20
- - `linear`: Creates a basic linear block consisting of a Linear layer
21
- followed by a ReLU activation function.
22
-
23
- The `MLPNetwork` class constructs a fully connected neural network with
24
- multiple hidden layers, providing flexibility in designing the network
25
- architecture. It can be used for regression, classification, or other
26
- machine learning tasks that require a feedforward neural network structure.
27
- """
1
+ """Module defining the network architectures and components."""
28
2
 
3
+ from torch import Tensor
29
4
  from torch.nn import Linear, Module, ReLU, Sequential
30
5
 
31
6
 
32
7
  class MLPNetwork(Module):
33
- """
34
- A multi-layer perceptron (MLP) neural network model consisting of
35
- an input layer, multiple hidden layers, and an output layer.
36
-
37
- This class constructs an MLP with configurable hyperparameters such as the
38
- number of input features, output features, number of hidden layers, and
39
- the dimensionality of hidden layers. It provides methods for both
40
- building the model and performing a forward pass through the network.
41
-
42
- Args:
43
- n_inputs (int, optional): The number of input features. Defaults to 25.
44
- n_outputs (int, optional): The number of output features. Defaults to 2.
45
- n_hidden_layers (int, optional): The number of hidden layers.
46
- Defaults to 2.
47
- hidden_dim (int, optional): The dimensionality of the hidden layers.
48
- Defaults to 35.
49
- """
8
+ """A multi-layer perceptron (MLP) neural network with configurable hidden layers."""
50
9
 
51
10
  def __init__(
52
11
  self,
@@ -54,11 +13,18 @@ class MLPNetwork(Module):
54
13
  n_outputs,
55
14
  n_hidden_layers=3,
56
15
  hidden_dim=35,
16
+ activation=None,
57
17
  ):
58
- """
59
- Initializes the MLPNetwork.
60
- """
18
+ """Initialize the MLPNetwork.
61
19
 
20
+ Args:
21
+ n_inputs (int, optional): Number of input features. Defaults to 25.
22
+ n_outputs (int, optional): Number of output features. Defaults to 2.
23
+ n_hidden_layers (int, optional): Number of hidden layers. Defaults to 3.
24
+ hidden_dim (int, optional): Dimensionality of hidden layers. Defaults to 35.
25
+ activation (nn.Module, optional): Activation function module (e.g.,
26
+ `ReLU()`, `Tanh()`, `LeakyReLU(0.1)`). Defaults to `ReLU()`.
27
+ """
62
28
  super().__init__()
63
29
 
64
30
  # Init object variables
@@ -67,48 +33,36 @@ class MLPNetwork(Module):
67
33
  self.n_hidden_layers = n_hidden_layers
68
34
  self.hidden_dim = hidden_dim
69
35
 
70
- # Set up the components of our model
71
- self.input = Linear(self.n_inputs, self.hidden_dim)
72
- self.hidden = Sequential(
73
- *(
74
- self.linear(self.hidden_dim, self.hidden_dim)
75
- for _ in range(n_hidden_layers)
76
- )
77
- )
78
- self.out = Linear(self.hidden_dim, self.n_outputs)
36
+ # Default activation function
37
+ if activation is None:
38
+ activation = ReLU()
39
+ self.activation = activation
79
40
 
80
- def forward(self, data):
81
- """
82
- Performs a forward pass through the network.
41
+ # Build network layers
42
+ layers = []
83
43
 
84
- Args:
85
- data (Tensor): The input tensor to be passed through the network.
44
+ # Input layer with activation
45
+ layers.append(Linear(n_inputs, hidden_dim))
46
+ layers.append(self.activation)
86
47
 
87
- Returns:
88
- dict: A dictionary containing the 'input' (original input) and
89
- 'output' (predicted output) of the network.
90
- """
48
+ # Hidden layers (with activation after each)
49
+ for _ in range(n_hidden_layers - 1):
50
+ layers.append(Linear(hidden_dim, hidden_dim))
51
+ layers.append(self.activation)
91
52
 
92
- output = self.out(self.hidden(self.input(data)))
53
+ # Output layer (no activation by default)
54
+ layers.append(Linear(hidden_dim, n_outputs))
93
55
 
94
- return {"input": data, "output": output}
56
+ self.network = Sequential(*layers)
95
57
 
96
- @staticmethod
97
- def linear(in_features, out_features):
98
- """
99
- Creates a basic linear block with a linear transformation followed
100
- by a ReLU activation function.
58
+ def forward(self, data: dict[str, Tensor]):
59
+ """Run a forward pass through the network.
101
60
 
102
61
  Args:
103
- in_features (int): The number of input features.
104
- out_features (int): The number of output features.
62
+ data (dict[str, Tensor]): Input data to be processed by the network.
105
63
 
106
64
  Returns:
107
- nn.Module: A sequential module consisting of a Linear layer
108
- and ReLU activation.
65
+ dict: The original data tensor augmented with the network's output (having key "output").
109
66
  """
110
-
111
- return Sequential(
112
- Linear(in_features, out_features),
113
- ReLU(),
114
- )
67
+ data["output"] = self.network(data["input"])
68
+ return data
congrads/py.typed ADDED
File without changes