congrads 0.2.0__py3-none-any.whl → 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
congrads/metrics.py CHANGED
@@ -1,49 +1,211 @@
1
+ """
2
+ This module defines the `Metric` and `MetricManager` classes, which are
3
+ used to track and aggregate performance metrics during model training or
4
+ evaluation in machine learning. These classes support the accumulation of
5
+ metric values, aggregation using customizable functions (such as mean),
6
+ and resetting of the metrics.
7
+
8
+ Classes:
9
+
10
+ - Metric: A class that tracks and aggregates a specific metric over
11
+ multiple samples, allowing for accumulation, aggregation, and
12
+ resetting of values.
13
+ - MetricManager: A class that manages and tracks multiple metrics
14
+ during model training or evaluation, supporting registration,
15
+ accumulation, aggregation, and resetting of metrics.
16
+
17
+ Key Methods:
18
+
19
+ - `Metric.__init__`: Initializes a metric with a specified name and
20
+ optional accumulator function (defaults to `nanmean`).
21
+ - `Metric.accumulate`: Accumulates a new value for the metric,
22
+ typically a tensor of model output or performance.
23
+ - `Metric.aggregate`: Aggregates the accumulated values using the
24
+ specified accumulator function.
25
+ - `Metric.reset`: Resets the accumulated values and sample count for
26
+ the metric.
27
+ - `MetricManager.__init__`: Initializes a manager for multiple metrics.
28
+ - `MetricManager.register`: Registers a new metric with a name, group,
29
+ and optional accumulator function.
30
+ - `MetricManager.accumulate`: Accumulates a new value for the specified
31
+ metric.
32
+ - `MetricManager.aggregate`: Aggregates all metrics in a specified group.
33
+ - `MetricManager.reset`: Resets all registered metrics in a specified
34
+ group.
35
+
36
+ Each class provides functionality to efficiently track, aggregate, and reset
37
+ metrics during the training and evaluation phases of machine learning tasks,
38
+ supporting flexible aggregation strategies and group-based management of
39
+ metrics.
40
+ """
41
+
1
42
  from typing import Callable
2
- from torch import Tensor, mean, cat
3
- from torch.utils.tensorboard import SummaryWriter
43
+
44
+ from torch import Tensor, cat, nanmean
45
+
46
+ from .utils import validate_callable, validate_type
4
47
 
5
48
 
6
49
  class Metric:
50
+ """
51
+ A class that tracks and aggregates a specific metric over multiple samples.
52
+
53
+ This class allows the accumulation of values, their aggregation using a
54
+ specified function (e.g., mean), and the ability to reset the metrics.
55
+ It is typically used to track performance metrics during training or
56
+ evaluation processes in machine learning.
57
+
58
+ Args:
59
+ name (str): The name of the metric.
60
+ accumulator (Callable[..., Tensor], optional): A function used to
61
+ aggregate values (defaults to `nanmean`).
62
+
63
+ Attributes:
64
+ name (str): The name of the metric.
65
+ accumulator (Callable[..., Tensor]): The function used to aggregate
66
+ values.
67
+ values (list): A list to store accumulated values.
68
+ sample_count (int): The count of accumulated samples.
69
+
70
+ """
71
+
7
72
  def __init__(
8
- self, name: str, accumulator: Callable[..., Tensor] = mean, device=None
73
+ self,
74
+ name: str,
75
+ accumulator: Callable[..., Tensor] = nanmean,
9
76
  ) -> None:
77
+ """
78
+ Constructor method
79
+ """
80
+
81
+ # Type checking
82
+ validate_type("name", name, str)
83
+ validate_callable("accumulator", accumulator)
84
+
10
85
  self.name = name
11
86
  self.accumulator = accumulator
12
- self.device = device
13
87
 
14
88
  self.values = []
15
89
  self.sample_count = 0
16
90
 
17
91
  def accumulate(self, value: Tensor) -> None:
92
+ """
93
+ Accumulates a new value for the metric.
94
+
95
+ Args:
96
+ value (Tensor): The new value to accumulate, typically a
97
+ tensor of model output or performance.
98
+ """
99
+
18
100
  self.values.append(value)
19
101
  self.sample_count += value.size(0)
20
102
 
21
103
  def aggregate(self) -> Tensor:
104
+ """
105
+ Aggregates the accumulated values using the specified
106
+ accumulator function.
107
+
108
+ Returns:
109
+ Tensor: The aggregated result of the accumulated values.
110
+ """
111
+
22
112
  combined = cat(self.values)
23
113
  return self.accumulator(combined)
24
114
 
25
115
  def reset(self) -> None:
116
+ """
117
+ Resets the accumulated values and sample count for the metric.
118
+ """
119
+
26
120
  self.values = []
27
121
  self.sample_count = 0
28
122
 
29
123
 
30
124
  class MetricManager:
31
- def __init__(self, writer: SummaryWriter, device: str) -> None:
32
- self.writer = writer
33
- self.device = device
125
+ """
126
+ A class to manage and track multiple metrics during model
127
+ training or evaluation.
128
+
129
+ This class allows registering metrics, accumulating values for each metric,
130
+ and recording the aggregated values. It also supports the reset of metrics
131
+ after each epoch or training step.
132
+
133
+ Attributes:
134
+ metrics (dict[str, Metric]): A dictionary of registered metrics.
135
+ groups (dict[str, str]): A dictionary mapping metric names to groups.
136
+ """
137
+
138
+ def __init__(self) -> None:
139
+ """
140
+ Constructor method
141
+ """
142
+
34
143
  self.metrics: dict[str, Metric] = {}
144
+ self.groups: dict[str, str] = {}
35
145
 
36
- def register(self, name: str, accumulator: Callable[..., Tensor] = mean) -> None:
37
- self.metrics[name] = Metric(name, accumulator, self.device)
146
+ def register(
147
+ self,
148
+ name: str,
149
+ group: str,
150
+ accumulator: Callable[..., Tensor] = nanmean,
151
+ ) -> None:
152
+ """
153
+ Registers a new metric with the specified name and accumulator function.
154
+
155
+ Args:
156
+ name (str): The name of the metric to register.
157
+ group (str): The name of the group to assign the metric to.
158
+ accumulator (Callable[..., Tensor], optional): The function used
159
+ to aggregate values for the metric (defaults to `nanmean`).
160
+ """
161
+
162
+ # Type checking
163
+ validate_type("name", name, str)
164
+ validate_type("group", group, str)
165
+ validate_callable("accumulator", accumulator)
166
+
167
+ self.metrics[name] = Metric(name, accumulator)
168
+ self.groups[name] = group
38
169
 
39
170
  def accumulate(self, name: str, value: Tensor) -> None:
171
+ """
172
+ Accumulates a new value for the specified metric.
173
+
174
+ Args:
175
+ name (str): The name of the metric.
176
+ value (Tensor): The new value to accumulate.
177
+ """
178
+
40
179
  self.metrics[name].accumulate(value)
41
180
 
42
- def record(self, epoch: int) -> None:
43
- for name, metric in self.metrics.items():
44
- result = metric.aggregate()
45
- self.writer.add_scalar(name, result.item(), epoch)
181
+ def aggregate(self, group: str) -> dict[str, Tensor]:
182
+ """
183
+ Aggregates all metrics in a group using the accumulators
184
+ specified during registration.
46
185
 
47
- def reset(self) -> None:
48
- for metric in self.metrics.values():
49
- metric.reset()
186
+ Args:
187
+ group (str): The name of the group.
188
+
189
+ Returns:
190
+ dict[str, Tensor]: A dictionary with the metric names and the
191
+ corresponding aggregated values of the selected group.
192
+ """
193
+
194
+ return {
195
+ name: metric.aggregate()
196
+ for name, metric in self.metrics.items()
197
+ if self.groups[name] == group
198
+ }
199
+
200
+ def reset(self, group: str) -> None:
201
+ """
202
+ Resets all registered metrics in a group.
203
+
204
+ Args:
205
+ group (str): The name of the group.
206
+ """
207
+
208
+ for name, metric in self.metrics.items():
209
+ if self.groups[name] == group:
210
+ metric.reset()
211
+ metric.reset()
congrads/networks.py CHANGED
@@ -1,4 +1,32 @@
1
- from torch.nn import Linear, Sequential, ReLU, Module
1
+ """
2
+ This module defines the `MLPNetwork` class, which constructs and
3
+ operates a multi-layer perceptron (MLP) neural network model. The MLP
4
+ network consists of an input layer, multiple hidden layers, and an
5
+ output layer. It allows for configurable hyperparameters such as the
6
+ number of input features, output features, number of hidden layers,
7
+ and the dimensionality of the hidden layers.
8
+
9
+ Classes:
10
+
11
+ - MLPNetwork: A neural network model that implements a multi-layer
12
+ perceptron with customizable layers and dimensionalities.
13
+
14
+ Key Methods:
15
+
16
+ - `__init__`: Initializes the MLP network with specified input size,
17
+ output size, number of hidden layers, and hidden layer dimensionality.
18
+ - `forward`: Performs a forward pass through the network, returning
19
+ both the input and output of the model.
20
+ - `linear`: Creates a basic linear block consisting of a Linear layer
21
+ followed by a ReLU activation function.
22
+
23
+ The `MLPNetwork` class constructs a fully connected neural network with
24
+ multiple hidden layers, providing flexibility in designing the network
25
+ architecture. It can be used for regression, classification, or other
26
+ machine learning tasks that require a feedforward neural network structure.
27
+ """
28
+
29
+ from torch.nn import Linear, Module, ReLU, Sequential
2
30
 
3
31
 
4
32
  class MLPNetwork(Module):
@@ -11,15 +39,13 @@ class MLPNetwork(Module):
11
39
  the dimensionality of hidden layers. It provides methods for both
12
40
  building the model and performing a forward pass through the network.
13
41
 
14
- Attributes:
15
- n_inputs (int): The number of input features.
16
- n_outputs (int): The number of output features.
17
- n_hidden_layers (int): The number of hidden layers in the network.
18
- hidden_dim (int): The dimensionality of the hidden layers.
19
- input (nn.Module): The input layer (linear transformation followed by ReLU).
20
- hidden (nn.Module): The sequential hidden layers (each consisting of
21
- a linear transformation followed by ReLU).
22
- out (nn.Module): The output layer (linear transformation).
42
+ Args:
43
+ n_inputs (int, optional): The number of input features. Defaults to 25.
44
+ n_outputs (int, optional): The number of output features. Defaults to 2.
45
+ n_hidden_layers (int, optional): The number of hidden layers.
46
+ Defaults to 2.
47
+ hidden_dim (int, optional): The dimensionality of the hidden layers.
48
+ Defaults to 35.
23
49
  """
24
50
 
25
51
  def __init__(
@@ -30,14 +56,9 @@ class MLPNetwork(Module):
30
56
  hidden_dim=35,
31
57
  ):
32
58
  """
33
- Initializes the MLP network with the given hyperparameters.
34
-
35
- Args:
36
- n_inputs (int, optional): The number of input features. Defaults to 25.
37
- n_outputs (int, optional): The number of output features. Defaults to 2.
38
- n_hidden_layers (int, optional): The number of hidden layers. Defaults to 2.
39
- hidden_dim (int, optional): The dimensionality of the hidden layers. Defaults to 35.
59
+ Initializes the MLPNetwork.
40
60
  """
61
+
41
62
  super().__init__()
42
63
 
43
64
  # Init object variables
@@ -56,20 +77,21 @@ class MLPNetwork(Module):
56
77
  )
57
78
  self.out = Linear(self.hidden_dim, self.n_outputs)
58
79
 
59
- def forward(self, X):
80
+ def forward(self, data):
60
81
  """
61
82
  Performs a forward pass through the network.
62
83
 
63
84
  Args:
64
- X (Tensor): The input tensor to be passed through the network.
85
+ data (Tensor): The input tensor to be passed through the network.
65
86
 
66
87
  Returns:
67
88
  dict: A dictionary containing the 'input' (original input) and
68
- 'output' (predicted output) of the network.
89
+ 'output' (predicted output) of the network.
69
90
  """
70
- output = self.out(self.hidden(self.input(X)))
71
91
 
72
- return {"input": X, "output": output}
92
+ output = self.out(self.hidden(self.input(data)))
93
+
94
+ return {"input": data, "output": output}
73
95
 
74
96
  @staticmethod
75
97
  def linear(in_features, out_features):
@@ -82,8 +104,10 @@ class MLPNetwork(Module):
82
104
  out_features (int): The number of output features.
83
105
 
84
106
  Returns:
85
- nn.Module: A sequential module consisting of a Linear layer and ReLU activation.
107
+ nn.Module: A sequential module consisting of a Linear layer
108
+ and ReLU activation.
86
109
  """
110
+
87
111
  return Sequential(
88
112
  Linear(in_features, out_features),
89
113
  ReLU(),
@@ -0,0 +1,139 @@
1
+ """
2
+ This module defines the abstract base class `Transformation` and two
3
+ specific transformations: `IdentityTransformation` and `DenormalizeMinMax`.
4
+ These transformations are used to apply operations to neuron data.
5
+
6
+ Classes:
7
+
8
+ - Transformation: An abstract base class for transformations that
9
+ can be applied to neuron data. Subclasses must implement the
10
+ `__call__` method to apply the transformation.
11
+ - IdentityTransformation: A subclass of `Transformation` that
12
+ returns the input data unchanged.
13
+ - DenormalizeMinMax: A subclass of `Transformation` that denormalizes
14
+ input data based on specified minimum and maximum values.
15
+
16
+ Key Methods:
17
+
18
+ - `__call__(data: Tensor) -> Tensor`: Abstract method in the
19
+ `Transformation` class that must be implemented by subclasses to apply
20
+ a transformation to the input data.
21
+ - `__init__(neuron_name: str)`: Initializes the transformation with the
22
+ associated neuron name.
23
+ - `IdentityTransformation.__call__(data: Tensor) -> Tensor`: Returns
24
+ the input data without applying any transformation.
25
+ - `DenormalizeMinMax.__call__(data: Tensor) -> Tensor`: Denormalizes
26
+ the input data by scaling it based on the specified min and max values.
27
+
28
+ The `Transformation` class is intended as a base class for creating
29
+ custom transformations for neuron data, while the `IdentityTransformation`
30
+ is used when no transformation is desired, and `DenormalizeMinMax` is used
31
+ for reversing the normalization process by using a min-max scaling approach.
32
+ """
33
+
34
+ from abc import ABC, abstractmethod
35
+ from numbers import Number
36
+
37
+ from torch import Tensor
38
+
39
+ from .utils import validate_type
40
+
41
+
42
+ class Transformation(ABC):
43
+ """
44
+ Abstract base class for transformations applied to neuron data.
45
+
46
+ Args:
47
+ neuron_name (str): The name of the neuron associated with
48
+ the transformation.
49
+
50
+ Methods:
51
+ __call__(data: Tensor) -> Tensor:
52
+ Applies the transformation to the provided data.
53
+ Must be implemented by subclasses.
54
+ """
55
+
56
+ def __init__(self, neuron_name: str):
57
+ validate_type("neuron_name", neuron_name, str)
58
+
59
+ super().__init__()
60
+ self.neuron_name = neuron_name
61
+
62
+ @abstractmethod
63
+ def __call__(self, data: Tensor) -> Tensor:
64
+ """
65
+ Abstract method to apply the transformation to the given data.
66
+
67
+ Args:
68
+ data (Tensor): The input data to be transformed.
69
+
70
+ Returns:
71
+ Tensor: The transformed data.
72
+
73
+ Must be implemented by subclasses.
74
+ """
75
+ raise NotImplementedError
76
+
77
+
78
+ class IdentityTransformation(Transformation):
79
+ """
80
+ A transformation that returns the data unchanged (identity transformation).
81
+
82
+ Inherits from the Transformation class and implements the
83
+ __call__ method to return the input data as is.
84
+ """
85
+
86
+ def __call__(self, data: Tensor) -> Tensor:
87
+ """
88
+ Returns the input data without any transformation.
89
+
90
+ Args:
91
+ data (Tensor): The input data to be returned as is.
92
+
93
+ Returns:
94
+ Tensor: The unchanged input data.
95
+ """
96
+ return data
97
+
98
+
99
+ class DenormalizeMinMax(Transformation):
100
+ """
101
+ A transformation that denormalizes data based on a
102
+ specified min and max value.
103
+
104
+ This transformation scales the data by the range of the min and max values,
105
+ then adds the min value to denormalize it back to the original scale.
106
+
107
+ Args:
108
+ neuron_name (str): The name of the neuron associated with
109
+ the transformation.
110
+ min (Number): The minimum value to scale the data.
111
+ max (Number): The maximum value to scale the data.
112
+
113
+ Methods:
114
+ __call__(data: Tensor) -> Tensor:
115
+ Applies the denormalization to the given data by scaling it
116
+ with the min and max values.
117
+ """
118
+
119
+ # pylint: disable-next=redefined-builtin
120
+ def __init__(self, neuron_name: str, min: Number, max: Number):
121
+ validate_type("min", min, Number)
122
+ validate_type("max", max, Number)
123
+
124
+ super().__init__(neuron_name)
125
+
126
+ self.min = min
127
+ self.max = max
128
+
129
+ def __call__(self, data: Tensor) -> Tensor:
130
+ """
131
+ Denormalizes the input data based on the min and max values.
132
+
133
+ Args:
134
+ data (Tensor): The normalized input data to be denormalized.
135
+
136
+ Returns:
137
+ Tensor: The denormalized data.
138
+ """
139
+ return data * (self.max - self.min) + self.min