congrads 0.1.0__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
congrads/descriptor.py CHANGED
@@ -1,65 +1,130 @@
1
+ """
2
+ This module defines the `Descriptor` class, which is designed to manage
3
+ the mapping between neuron names, their corresponding layers, and additional
4
+ properties such as constant or variable status. It provides a way to easily
5
+ place constraints on parts of your network, by referencing the neuron names
6
+ instead of indices.
7
+
8
+ The `Descriptor` class allows for easy constraint definitions on parts of
9
+ your neural network. It supports registering neurons with associated layers,
10
+ indices, and optional attributes, such as whether the layer is constant
11
+ or variable.
12
+
13
+ Key Methods:
14
+
15
+ - `__init__`: Initializes the `Descriptor` object with empty mappings
16
+ and sets for managing neurons and layers.
17
+ - `add`: Registers a neuron with its associated layer, index, and
18
+ optional constant status.
19
+
20
+ """
21
+
22
+ from .utils import validate_type
23
+
24
+
1
25
  class Descriptor:
2
26
  """
3
- A class to manage the mapping of neurons to layers and their properties
4
- (e.g., output, constant, or variable) in a neural network.
27
+ A class to manage the mapping between neuron names, their corresponding
28
+ layers, and additional properties (such as min/max values, output,
29
+ and constant variables).
5
30
 
6
- This class enables the organization and description of network elements,
7
- such as associating neurons with specific layers and categorizing layers
8
- as outputs, constants, or variables.
31
+ This class is designed to track the relationship between neurons and
32
+ layers in a neural network. It allows for the assignment of properties
33
+ (like minimum and maximum values, and whether a layer is an output,
34
+ constant, or variable) to each neuron. The data is stored in
35
+ dictionaries and sets for efficient lookups.
9
36
 
10
- This allows users to easily place constraints on parts of the network by
11
- referencing the name that is configured in this class.
37
+ Attributes:
38
+ neuron_to_layer (dict): A dictionary mapping neuron names to
39
+ their corresponding layer names.
40
+ neuron_to_index (dict): A dictionary mapping neuron names to
41
+ their corresponding indices in the layers.
42
+ constant_layers (set): A set of layer names that represent
43
+ constant layers.
44
+ variable_layers (set): A set of layer names that represent
45
+ variable layers.
12
46
  """
13
47
 
14
48
  def __init__(
15
49
  self,
16
50
  ):
17
51
  """
18
- Initialize the Descriptor class with empty mappings for neurons and layers.
19
-
20
- This includes:
21
- - `neuron_to_layer`: A dictionary mapping neuron names to their corresponding layer names.
22
- - `neuron_to_index`: A dictionary mapping neuron names to their corresponding index within a layer.
23
- - `output_layers`: A set that holds the names of layers marked as output layers.
24
- - `constant_layers`: A set that holds the names of layers marked as constant layers.
25
- - `variable_layers`: A set that holds the names of layers marked as variable layers.
52
+ Initializes the Descriptor object.
26
53
  """
27
54
 
28
- # Define dictionaries that will translate neuron names to layer and index
55
+ # Define dictionaries that will translate neuron
56
+ # names to layer and index
29
57
  self.neuron_to_layer: dict[str, str] = {}
30
58
  self.neuron_to_index: dict[str, int] = {}
31
59
 
32
60
  # Define sets that will hold the layers based on which type
33
- self.output_layers: set[str] = set()
34
61
  self.constant_layers: set[str] = set()
35
62
  self.variable_layers: set[str] = set()
36
63
 
37
64
  def add(
38
65
  self,
39
66
  layer_name: str,
40
- neuron_names: list[str],
41
- output: bool = False,
67
+ index: int,
68
+ neuron_name: str,
42
69
  constant: bool = False,
43
70
  ):
44
71
  """
45
- Add a layer to the descriptor, associating it with neurons and marking it
46
- as an output or constant layer.
72
+ Adds a neuron to the descriptor with its associated layer,
73
+ index, and properties.
74
+
75
+ This method registers a neuron name and associates it with a
76
+ layer, its index, and optional properties such as whether
77
+ the layer is an output or constant layer.
47
78
 
48
79
  Args:
49
- layer_name (str): The name of the layer to be added.
50
- neuron_names (list[str]): A list of neuron names that belong to the layer.
51
- output (bool, optional): If True, mark this layer as an output layer. Defaults to False.
52
- constant (bool, optional): If True, mark this layer as a constant layer. Defaults to False.
80
+ layer_name (str): The name of the layer where the neuron is located.
81
+ index (int): The index of the neuron within the layer.
82
+ neuron_name (str): The name of the neuron.
83
+ constant (bool, optional): Whether the layer is a constant layer.
84
+ Defaults to False.
85
+
86
+ Raises:
87
+ TypeError: If a provided attribute has an incompatible type.
88
+ ValueError: If a layer or index is already assigned for a neuron
89
+ or a duplicate index is used within a layer.
90
+
53
91
  """
54
92
 
55
- if output:
56
- self.output_layers.add(layer_name)
93
+ # Type checking
94
+ validate_type("layer_name", layer_name, str)
95
+ validate_type("index", index, int)
96
+ validate_type("neuron_name", neuron_name, str)
97
+ validate_type("constant", constant, bool)
98
+
99
+ # Other validations
100
+ if neuron_name in self.neuron_to_layer:
101
+ raise ValueError(
102
+ "There already is a layer registered for the neuron with name "
103
+ f"'{neuron_name}'. Please use a unique name for each neuron."
104
+ )
105
+
106
+ if neuron_name in self.neuron_to_index:
107
+ raise ValueError(
108
+ "There already is an index registered for the neuron with name "
109
+ f"'{neuron_name}'. Please use a unique name for each neuron."
110
+ )
111
+
112
+ for existing_neuron, assigned_index in self.neuron_to_index.items():
113
+ if (
114
+ assigned_index == index
115
+ and self.neuron_to_layer[existing_neuron] == layer_name
116
+ ):
117
+ raise ValueError(
118
+ f"The index {index} in layer {layer_name} is already "
119
+ "assigned. Every neuron must be assigned a different "
120
+ "index that matches the network's output."
121
+ )
57
122
 
123
+ # Add to dictionaries and sets
58
124
  if constant:
59
125
  self.constant_layers.add(layer_name)
60
126
  else:
61
127
  self.variable_layers.add(layer_name)
62
128
 
63
- for index, neuron_name in enumerate(neuron_names):
64
- self.neuron_to_layer[neuron_name] = layer_name
65
- self.neuron_to_index[neuron_name] = index
129
+ self.neuron_to_layer[neuron_name] = layer_name
130
+ self.neuron_to_index[neuron_name] = index
congrads/metrics.py CHANGED
@@ -1,64 +1,211 @@
1
- from torch import Tensor, tensor, sum, numel
2
- from torchmetrics import Metric
1
+ """
2
+ This module defines the `Metric` and `MetricManager` classes, which are
3
+ used to track and aggregate performance metrics during model training or
4
+ evaluation in machine learning. These classes support the accumulation of
5
+ metric values, aggregation using customizable functions (such as mean),
6
+ and resetting of the metrics.
3
7
 
4
- # NOTE
8
+ Classes:
5
9
 
10
+ - Metric: A class that tracks and aggregates a specific metric over
11
+ multiple samples, allowing for accumulation, aggregation, and
12
+ resetting of values.
13
+ - MetricManager: A class that manages and tracks multiple metrics
14
+ during model training or evaluation, supporting registration,
15
+ accumulation, aggregation, and resetting of metrics.
6
16
 
7
- class ConstraintSatisfactionRatio(Metric):
17
+ Key Methods:
18
+
19
+ - `Metric.__init__`: Initializes a metric with a specified name and
20
+ optional accumulator function (defaults to `nanmean`).
21
+ - `Metric.accumulate`: Accumulates a new value for the metric,
22
+ typically a tensor of model output or performance.
23
+ - `Metric.aggregate`: Aggregates the accumulated values using the
24
+ specified accumulator function.
25
+ - `Metric.reset`: Resets the accumulated values and sample count for
26
+ the metric.
27
+ - `MetricManager.__init__`: Initializes a manager for multiple metrics.
28
+ - `MetricManager.register`: Registers a new metric with a name, group,
29
+ and optional accumulator function.
30
+ - `MetricManager.accumulate`: Accumulates a new value for the specified
31
+ metric.
32
+ - `MetricManager.aggregate`: Aggregates all metrics in a specified group.
33
+ - `MetricManager.reset`: Resets all registered metrics in a specified
34
+ group.
35
+
36
+ Each class provides functionality to efficiently track, aggregate, and reset
37
+ metrics during the training and evaluation phases of machine learning tasks,
38
+ supporting flexible aggregation strategies and group-based management of
39
+ metrics.
40
+ """
41
+
42
+ from typing import Callable
43
+
44
+ from torch import Tensor, cat, nanmean
45
+
46
+ from .utils import validate_callable, validate_type
47
+
48
+
49
+ class Metric:
8
50
  """
9
- A custom metric to calculate the ratio of satisfied constraints in a neural network model.
10
- It computes the proportion of constraints that have been satisfied,
11
- where satisfaction is determined based on the provided constraint results.
51
+ A class that tracks and aggregates a specific metric over multiple samples.
12
52
 
13
- This metric tracks the number of unsatisfied constraints and the total number of constraints
14
- during the training process, and computes the ratio of satisfied constraints once all updates
15
- have been made.
53
+ This class allows the accumulation of values, their aggregation using a
54
+ specified function (e.g., mean), and the ability to reset the metrics.
55
+ It is typically used to track performance metrics during training or
56
+ evaluation processes in machine learning.
57
+
58
+ Args:
59
+ name (str): The name of the metric.
60
+ accumulator (Callable[..., Tensor], optional): A function used to
61
+ aggregate values (defaults to `nanmean`).
16
62
 
17
63
  Attributes:
18
- unsatisfied (Tensor): Tracks the number of unsatisfied constraints.
19
- total (Tensor): Tracks the total number of constraints processed.
64
+ name (str): The name of the metric.
65
+ accumulator (Callable[..., Tensor]): The function used to aggregate
66
+ values.
67
+ values (list): A list to store accumulated values.
68
+ sample_count (int): The count of accumulated samples.
20
69
 
21
- Note:
22
- For more information about custom metrics, we refer to the Pytorch Lightning documentation
23
- at https://lightning.ai/docs/torchmetrics/stable/pages/implement.html
24
70
  """
25
71
 
26
- def __init__(self, **kwargs):
72
+ def __init__(
73
+ self,
74
+ name: str,
75
+ accumulator: Callable[..., Tensor] = nanmean,
76
+ ) -> None:
77
+ """
78
+ Constructor method
79
+ """
80
+
81
+ # Type checking
82
+ validate_type("name", name, str)
83
+ validate_callable("accumulator", accumulator)
84
+
85
+ self.name = name
86
+ self.accumulator = accumulator
87
+
88
+ self.values = []
89
+ self.sample_count = 0
90
+
91
+ def accumulate(self, value: Tensor) -> None:
27
92
  """
28
- Initializes the ConstraintSatisfactionRatio metric by setting up the
29
- state variables to track the number of unsatisfied and total constraints.
93
+ Accumulates a new value for the metric.
30
94
 
31
95
  Args:
32
- **kwargs: Additional arguments to pass to the base Metric class constructor.
96
+ value (Tensor): The new value to accumulate, typically a
97
+ tensor of model output or performance.
33
98
  """
34
99
 
35
- # Init parent class
36
- super().__init__(**kwargs)
100
+ self.values.append(value)
101
+ self.sample_count += value.size(0)
37
102
 
38
- # Init scalar tensors that will hold metric values
39
- self.add_state("unsatisfied", default=tensor(0), dist_reduce_fx="sum")
40
- self.add_state("total", default=tensor(0), dist_reduce_fx="sum")
103
+ def aggregate(self) -> Tensor:
104
+ """
105
+ Aggregates the accumulated values using the specified
106
+ accumulator function.
41
107
 
42
- def update(self, constraint_result: Tensor) -> None:
108
+ Returns:
109
+ Tensor: The aggregated result of the accumulated values.
110
+ """
111
+
112
+ combined = cat(self.values)
113
+ return self.accumulator(combined)
114
+
115
+ def reset(self) -> None:
116
+ """
117
+ Resets the accumulated values and sample count for the metric.
43
118
  """
44
- Updates the state of the metric with the latest constraint results.
119
+
120
+ self.values = []
121
+ self.sample_count = 0
122
+
123
+
124
+ class MetricManager:
125
+ """
126
+ A class to manage and track multiple metrics during model
127
+ training or evaluation.
128
+
129
+ This class allows registering metrics, accumulating values for each metric,
130
+ and recording the aggregated values. It also supports the reset of metrics
131
+ after each epoch or training step.
132
+
133
+ Attributes:
134
+ metrics (dict[str, Metric]): A dictionary of registered metrics.
135
+ groups (dict[str, str]): A dictionary mapping metric names to groups.
136
+ """
137
+
138
+ def __init__(self) -> None:
139
+ """
140
+ Constructor method
141
+ """
142
+
143
+ self.metrics: dict[str, Metric] = {}
144
+ self.groups: dict[str, str] = {}
145
+
146
+ def register(
147
+ self,
148
+ name: str,
149
+ group: str,
150
+ accumulator: Callable[..., Tensor] = nanmean,
151
+ ) -> None:
152
+ """
153
+ Registers a new metric with the specified name and accumulator function.
45
154
 
46
155
  Args:
47
- constraint_result (Tensor): A tensor representing the result of
48
- the constraint checks, where each
49
- element indicates whether a constraint
50
- is satisfied (e.g., 0 for satisfied,
51
- 1 for unsatisfied).
156
+ name (str): The name of the metric to register.
157
+ group (str): The name of the group to assign the metric to.
158
+ accumulator (Callable[..., Tensor], optional): The function used
159
+ to aggregate values for the metric (defaults to `nanmean`).
160
+ """
161
+
162
+ # Type checking
163
+ validate_type("name", name, str)
164
+ validate_type("group", group, str)
165
+ validate_callable("accumulator", accumulator)
166
+
167
+ self.metrics[name] = Metric(name, accumulator)
168
+ self.groups[name] = group
169
+
170
+ def accumulate(self, name: str, value: Tensor) -> None:
52
171
  """
53
- self.unsatisfied += sum(constraint_result)
54
- self.total += numel(constraint_result)
172
+ Accumulates a new value for the specified metric.
55
173
 
56
- def compute(self) -> Tensor:
174
+ Args:
175
+ name (str): The name of the metric.
176
+ value (Tensor): The new value to accumulate.
57
177
  """
58
- Computes the constraint satisfaction ratio, defined as:
59
- 1 - (number of unsatisfied constraints / total constraints).
178
+
179
+ self.metrics[name].accumulate(value)
180
+
181
+ def aggregate(self, group: str) -> dict[str, Tensor]:
182
+ """
183
+ Aggregates all metrics in a group using the accumulators
184
+ specified during registration.
185
+
186
+ Args:
187
+ group (str): The name of the group.
60
188
 
61
189
  Returns:
62
- Tensor: The satisfaction ratio as a scalar tensor.
190
+ dict[str, Tensor]: A dictionary with the metric names and the
191
+ corresponding aggregated values of the selected group.
192
+ """
193
+
194
+ return {
195
+ name: metric.aggregate()
196
+ for name, metric in self.metrics.items()
197
+ if self.groups[name] == group
198
+ }
199
+
200
+ def reset(self, group: str) -> None:
201
+ """
202
+ Resets all registered metrics in a group.
203
+
204
+ Args:
205
+ group (str): The name of the group.
63
206
  """
64
- return 1 - (self.unsatisfied.float() / self.total)
207
+
208
+ for name, metric in self.metrics.items():
209
+ if self.groups[name] == group:
210
+ metric.reset()
211
+ metric.reset()
congrads/networks.py CHANGED
@@ -1,4 +1,32 @@
1
- from torch.nn import Linear, Sequential, ReLU, Module
1
+ """
2
+ This module defines the `MLPNetwork` class, which constructs and
3
+ operates a multi-layer perceptron (MLP) neural network model. The MLP
4
+ network consists of an input layer, multiple hidden layers, and an
5
+ output layer. It allows for configurable hyperparameters such as the
6
+ number of input features, output features, number of hidden layers,
7
+ and the dimensionality of the hidden layers.
8
+
9
+ Classes:
10
+
11
+ - MLPNetwork: A neural network model that implements a multi-layer
12
+ perceptron with customizable layers and dimensionalities.
13
+
14
+ Key Methods:
15
+
16
+ - `__init__`: Initializes the MLP network with specified input size,
17
+ output size, number of hidden layers, and hidden layer dimensionality.
18
+ - `forward`: Performs a forward pass through the network, returning
19
+ both the input and output of the model.
20
+ - `linear`: Creates a basic linear block consisting of a Linear layer
21
+ followed by a ReLU activation function.
22
+
23
+ The `MLPNetwork` class constructs a fully connected neural network with
24
+ multiple hidden layers, providing flexibility in designing the network
25
+ architecture. It can be used for regression, classification, or other
26
+ machine learning tasks that require a feedforward neural network structure.
27
+ """
28
+
29
+ from torch.nn import Linear, Module, ReLU, Sequential
2
30
 
3
31
 
4
32
  class MLPNetwork(Module):
@@ -11,33 +39,26 @@ class MLPNetwork(Module):
11
39
  the dimensionality of hidden layers. It provides methods for both
12
40
  building the model and performing a forward pass through the network.
13
41
 
14
- Attributes:
15
- n_inputs (int): The number of input features.
16
- n_outputs (int): The number of output features.
17
- n_hidden_layers (int): The number of hidden layers in the network.
18
- hidden_dim (int): The dimensionality of the hidden layers.
19
- input (nn.Module): The input layer (linear transformation followed by ReLU).
20
- hidden (nn.Module): The sequential hidden layers (each consisting of
21
- a linear transformation followed by ReLU).
22
- out (nn.Module): The output layer (linear transformation).
42
+ Args:
43
+ n_inputs (int, optional): The number of input features. Defaults to 25.
44
+ n_outputs (int, optional): The number of output features. Defaults to 2.
45
+ n_hidden_layers (int, optional): The number of hidden layers.
46
+ Defaults to 2.
47
+ hidden_dim (int, optional): The dimensionality of the hidden layers.
48
+ Defaults to 35.
23
49
  """
24
50
 
25
51
  def __init__(
26
52
  self,
27
- n_inputs=25,
28
- n_outputs=2,
29
- n_hidden_layers=2,
53
+ n_inputs,
54
+ n_outputs,
55
+ n_hidden_layers=3,
30
56
  hidden_dim=35,
31
57
  ):
32
58
  """
33
- Initializes the MLP network with the given hyperparameters.
34
-
35
- Args:
36
- n_inputs (int, optional): The number of input features. Defaults to 25.
37
- n_outputs (int, optional): The number of output features. Defaults to 2.
38
- n_hidden_layers (int, optional): The number of hidden layers. Defaults to 2.
39
- hidden_dim (int, optional): The dimensionality of the hidden layers. Defaults to 35.
59
+ Initializes the MLPNetwork.
40
60
  """
61
+
41
62
  super().__init__()
42
63
 
43
64
  # Init object variables
@@ -47,7 +68,7 @@ class MLPNetwork(Module):
47
68
  self.hidden_dim = hidden_dim
48
69
 
49
70
  # Set up the components of our model
50
- self.input = self.linear(self.n_inputs, self.hidden_dim)
71
+ self.input = Linear(self.n_inputs, self.hidden_dim)
51
72
  self.hidden = Sequential(
52
73
  *(
53
74
  self.linear(self.hidden_dim, self.hidden_dim)
@@ -56,21 +77,21 @@ class MLPNetwork(Module):
56
77
  )
57
78
  self.out = Linear(self.hidden_dim, self.n_outputs)
58
79
 
59
- def forward(self, X):
80
+ def forward(self, data):
60
81
  """
61
82
  Performs a forward pass through the network.
62
83
 
63
84
  Args:
64
- X (Tensor): The input tensor to be passed through the network.
85
+ data (Tensor): The input tensor to be passed through the network.
65
86
 
66
87
  Returns:
67
88
  dict: A dictionary containing the 'input' (original input) and
68
- 'output' (predicted output) of the network.
89
+ 'output' (predicted output) of the network.
69
90
  """
70
- input = X
71
- output = self.out(self.hidden(self.input(X)))
72
91
 
73
- return {"input": input, "output": output}
92
+ output = self.out(self.hidden(self.input(data)))
93
+
94
+ return {"input": data, "output": output}
74
95
 
75
96
  @staticmethod
76
97
  def linear(in_features, out_features):
@@ -83,8 +104,10 @@ class MLPNetwork(Module):
83
104
  out_features (int): The number of output features.
84
105
 
85
106
  Returns:
86
- nn.Module: A sequential module consisting of a Linear layer and ReLU activation.
107
+ nn.Module: A sequential module consisting of a Linear layer
108
+ and ReLU activation.
87
109
  """
110
+
88
111
  return Sequential(
89
112
  Linear(in_features, out_features),
90
113
  ReLU(),
@@ -0,0 +1,6 @@
1
+ numpy==2.2.2
2
+ pandas==2.2.3
3
+ setuptools==75.6.0
4
+ torch==2.5.0
5
+ torchvision==0.20.0
6
+ tqdm==4.66.5