mxlpy 0.17.0__py3-none-any.whl → 0.19.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
mxlpy/npe/_torch.py CHANGED
@@ -12,18 +12,15 @@ Functions:
12
12
  from __future__ import annotations
13
13
 
14
14
  from dataclasses import dataclass
15
- from pathlib import Path
16
15
  from typing import TYPE_CHECKING, Self, cast
17
16
 
18
17
  import numpy as np
19
18
  import pandas as pd
20
19
  import torch
21
- import tqdm
22
20
  from torch import nn
23
21
  from torch.optim.adam import Adam
24
22
 
25
- from mxlpy.nn._torch import LSTM, MLP, DefaultDevice
26
- from mxlpy.parallel import Cache
23
+ from mxlpy.nn._torch import LSTM, MLP, DefaultDevice, train
27
24
  from mxlpy.types import AbstractEstimator
28
25
 
29
26
  if TYPE_CHECKING:
@@ -31,19 +28,17 @@ if TYPE_CHECKING:
31
28
 
32
29
  from torch.optim.optimizer import ParamsT
33
30
 
34
- DefaultCache = Cache(Path(".cache"))
35
31
 
36
32
  type LossFn = Callable[[torch.Tensor, torch.Tensor], torch.Tensor]
37
33
 
38
34
  __all__ = [
39
- "DefaultCache",
40
35
  "LossFn",
41
- "TorchSteadyState",
42
- "TorchSteadyStateTrainer",
43
- "TorchTimeCourse",
44
- "TorchTimeCourseTrainer",
45
- "train_torch_steady_state",
46
- "train_torch_time_course",
36
+ "SteadyState",
37
+ "SteadyStateTrainer",
38
+ "TimeCourse",
39
+ "TimeCourseTrainer",
40
+ "train_steady_state",
41
+ "train_time_course",
47
42
  ]
48
43
 
49
44
 
@@ -62,7 +57,7 @@ def _mean_abs(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
62
57
 
63
58
 
64
59
  @dataclass(kw_only=True)
65
- class TorchSteadyState(AbstractEstimator):
60
+ class SteadyState(AbstractEstimator):
66
61
  """Estimator for steady state data using PyTorch models."""
67
62
 
68
63
  model: torch.nn.Module
@@ -75,7 +70,7 @@ class TorchSteadyState(AbstractEstimator):
75
70
 
76
71
 
77
72
  @dataclass(kw_only=True)
78
- class TorchTimeCourse(AbstractEstimator):
73
+ class TimeCourse(AbstractEstimator):
79
74
  """Estimator for time course data using PyTorch models."""
80
75
 
81
76
  model: torch.nn.Module
@@ -102,13 +97,13 @@ class TorchTimeCourse(AbstractEstimator):
102
97
 
103
98
 
104
99
  @dataclass
105
- class TorchSteadyStateTrainer:
100
+ class SteadyStateTrainer:
106
101
  """Trainer for steady state data using PyTorch models."""
107
102
 
108
103
  features: pd.DataFrame
109
104
  targets: pd.DataFrame
110
- approximator: nn.Module
111
- optimimzer: Adam
105
+ model: nn.Module
106
+ optimizer: Adam
112
107
  device: torch.device
113
108
  losses: list[pd.Series]
114
109
  loss_fn: LossFn
@@ -117,8 +112,8 @@ class TorchSteadyStateTrainer:
117
112
  self,
118
113
  features: pd.DataFrame,
119
114
  targets: pd.DataFrame,
120
- approximator: nn.Module | None = None,
121
- optimimzer_cls: Callable[[ParamsT], Adam] = Adam,
115
+ model: nn.Module | None = None,
116
+ optimizer_cls: Callable[[ParamsT], Adam] = Adam,
122
117
  device: torch.device = DefaultDevice,
123
118
  loss_fn: LossFn = _mean_abs,
124
119
  ) -> None:
@@ -127,8 +122,8 @@ class TorchSteadyStateTrainer:
127
122
  Args:
128
123
  features: DataFrame containing the input features for training
129
124
  targets: DataFrame containing the target values for training
130
- approximator: Predefined neural network model (None to use default MLP)
131
- optimimzer_cls: Optimizer class to use for training (default: Adam)
125
+ model: Predefined neural network model (None to use default MLP)
126
+ optimizer_cls: Optimizer class to use for training (default: Adam)
132
127
  device: Device to run the training on (default: DefaultDevice)
133
128
  loss_fn: Loss function
134
129
 
@@ -136,15 +131,15 @@ class TorchSteadyStateTrainer:
136
131
  self.features = features
137
132
  self.targets = targets
138
133
 
139
- if approximator is None:
134
+ if model is None:
140
135
  n_hidden = max(2 * len(features.columns) * len(targets.columns), 10)
141
136
  n_outputs = len(targets.columns)
142
- approximator = MLP(
137
+ model = MLP(
143
138
  n_inputs=len(features.columns),
144
139
  neurons_per_layer=[n_hidden, n_hidden, n_outputs],
145
140
  )
146
- self.approximator = approximator.to(device)
147
- self.optimizer = optimimzer_cls(approximator.parameters())
141
+ self.model = model.to(device)
142
+ self.optimizer = optimizer_cls(model.parameters())
148
143
  self.device = device
149
144
  self.loss_fn = loss_fn
150
145
  self.losses = []
@@ -161,28 +156,16 @@ class TorchSteadyStateTrainer:
161
156
  batch_size: Size of mini-batches for training (None for full-batch)
162
157
 
163
158
  """
164
- features = torch.Tensor(self.features.to_numpy(), device=self.device)
165
- targets = torch.Tensor(self.targets.to_numpy(), device=self.device)
166
-
167
- if batch_size is None:
168
- losses = _train_full(
169
- approximator=self.approximator,
170
- features=features,
171
- targets=targets,
172
- epochs=epochs,
173
- optimizer=self.optimizer,
174
- loss_fn=self.loss_fn,
175
- )
176
- else:
177
- losses = _train_batched(
178
- approximator=self.approximator,
179
- features=features,
180
- targets=targets,
181
- epochs=epochs,
182
- optimizer=self.optimizer,
183
- batch_size=batch_size,
184
- loss_fn=self.loss_fn,
185
- )
159
+ losses = train(
160
+ model=self.model,
161
+ features=self.features.to_numpy(),
162
+ targets=self.targets.to_numpy(),
163
+ epochs=epochs,
164
+ optimizer=self.optimizer,
165
+ batch_size=batch_size,
166
+ loss_fn=self.loss_fn,
167
+ device=self.device,
168
+ )
186
169
 
187
170
  if len(self.losses) > 0:
188
171
  losses.index += self.losses[-1].index[-1]
@@ -193,22 +176,22 @@ class TorchSteadyStateTrainer:
193
176
  """Get the loss history of the training process."""
194
177
  return pd.concat(self.losses)
195
178
 
196
- def get_estimator(self) -> TorchSteadyState:
179
+ def get_estimator(self) -> SteadyState:
197
180
  """Get the trained estimator."""
198
- return TorchSteadyState(
199
- model=self.approximator,
181
+ return SteadyState(
182
+ model=self.model,
200
183
  parameter_names=list(self.targets.columns),
201
184
  )
202
185
 
203
186
 
204
187
  @dataclass
205
- class TorchTimeCourseTrainer:
188
+ class TimeCourseTrainer:
206
189
  """Trainer for time course data using PyTorch models."""
207
190
 
208
191
  features: pd.DataFrame
209
192
  targets: pd.DataFrame
210
- approximator: nn.Module
211
- optimimzer: Adam
193
+ model: nn.Module
194
+ optimizer: Adam
212
195
  device: torch.device
213
196
  losses: list[pd.Series]
214
197
  loss_fn: LossFn
@@ -217,8 +200,8 @@ class TorchTimeCourseTrainer:
217
200
  self,
218
201
  features: pd.DataFrame,
219
202
  targets: pd.DataFrame,
220
- approximator: nn.Module | None = None,
221
- optimimzer_cls: Callable[[ParamsT], Adam] = Adam,
203
+ model: nn.Module | None = None,
204
+ optimizer_cls: Callable[[ParamsT], Adam] = Adam,
222
205
  device: torch.device = DefaultDevice,
223
206
  loss_fn: LossFn = _mean_abs,
224
207
  ) -> None:
@@ -227,8 +210,8 @@ class TorchTimeCourseTrainer:
227
210
  Args:
228
211
  features: DataFrame containing the input features for training
229
212
  targets: DataFrame containing the target values for training
230
- approximator: Predefined neural network model (None to use default LSTM)
231
- optimimzer_cls: Optimizer class to use for training (default: Adam)
213
+ model: Predefined neural network model (None to use default LSTM)
214
+ optimizer_cls: Optimizer class to use for training (default: Adam)
232
215
  device: Device to run the training on (default: DefaultDevice)
233
216
  loss_fn: Loss function
234
217
 
@@ -236,14 +219,14 @@ class TorchTimeCourseTrainer:
236
219
  self.features = features
237
220
  self.targets = targets
238
221
 
239
- if approximator is None:
240
- approximator = LSTM(
222
+ if model is None:
223
+ model = LSTM(
241
224
  n_inputs=len(features.columns),
242
225
  n_outputs=len(targets.columns),
243
226
  n_hidden=1,
244
227
  ).to(device)
245
- self.approximator = approximator.to(device)
246
- self.optimizer = optimimzer_cls(approximator.parameters())
228
+ self.model = model.to(device)
229
+ self.optimizer = optimizer_cls(model.parameters())
247
230
  self.device = device
248
231
  self.loss_fn = loss_fn
249
232
  self.losses = []
@@ -260,37 +243,22 @@ class TorchTimeCourseTrainer:
260
243
  batch_size: Size of mini-batches for training (None for full-batch)
261
244
 
262
245
  """
263
- features = torch.Tensor(
264
- np.swapaxes(
246
+ losses = train(
247
+ model=self.model,
248
+ features=np.swapaxes(
265
249
  self.features.to_numpy().reshape(
266
250
  (len(self.targets), -1, len(self.features.columns))
267
251
  ),
268
252
  axis1=0,
269
253
  axis2=1,
270
254
  ),
255
+ targets=self.targets.to_numpy(),
256
+ epochs=epochs,
257
+ optimizer=self.optimizer,
258
+ batch_size=batch_size,
259
+ loss_fn=self.loss_fn,
271
260
  device=self.device,
272
261
  )
273
- targets = torch.Tensor(self.targets.to_numpy(), device=self.device)
274
-
275
- if batch_size is None:
276
- losses = _train_full(
277
- approximator=self.approximator,
278
- features=features,
279
- targets=targets,
280
- epochs=epochs,
281
- optimizer=self.optimizer,
282
- loss_fn=self.loss_fn,
283
- )
284
- else:
285
- losses = _train_batched(
286
- approximator=self.approximator,
287
- features=features,
288
- targets=targets,
289
- epochs=epochs,
290
- optimizer=self.optimizer,
291
- batch_size=batch_size,
292
- loss_fn=self.loss_fn,
293
- )
294
262
 
295
263
  if len(self.losses) > 0:
296
264
  losses.index += self.losses[-1].index[-1]
@@ -301,66 +269,23 @@ class TorchTimeCourseTrainer:
301
269
  """Get the loss history of the training process."""
302
270
  return pd.concat(self.losses)
303
271
 
304
- def get_estimator(self) -> TorchTimeCourse:
272
+ def get_estimator(self) -> TimeCourse:
305
273
  """Get the trained estimator."""
306
- return TorchTimeCourse(
307
- model=self.approximator,
274
+ return TimeCourse(
275
+ model=self.model,
308
276
  parameter_names=list(self.targets.columns),
309
277
  )
310
278
 
311
279
 
312
- def _train_batched(
313
- approximator: nn.Module,
314
- features: torch.Tensor,
315
- targets: torch.Tensor,
316
- epochs: int,
317
- optimizer: Adam,
318
- batch_size: int,
319
- loss_fn: LossFn,
320
- ) -> pd.Series:
321
- losses = {}
322
- for epoch in tqdm.trange(epochs):
323
- permutation = torch.randperm(features.size()[0])
324
- epoch_loss = 0
325
- for i in range(0, features.size()[0], batch_size):
326
- optimizer.zero_grad()
327
- indices = permutation[i : i + batch_size]
328
- loss = loss_fn(approximator(features[indices]), targets[indices])
329
- loss.backward()
330
- optimizer.step()
331
- epoch_loss += loss.detach().numpy()
332
-
333
- losses[epoch] = epoch_loss / (features.size()[0] / batch_size)
334
- return pd.Series(losses, dtype=float)
335
-
336
-
337
- def _train_full(
338
- approximator: nn.Module,
339
- features: torch.Tensor,
340
- targets: torch.Tensor,
341
- epochs: int,
342
- optimizer: Adam,
343
- loss_fn: LossFn,
344
- ) -> pd.Series:
345
- losses = {}
346
- for i in tqdm.trange(epochs):
347
- optimizer.zero_grad()
348
- loss = loss_fn(approximator(features), targets)
349
- loss.backward()
350
- optimizer.step()
351
- losses[i] = loss.detach().numpy()
352
- return pd.Series(losses, dtype=float)
353
-
354
-
355
- def train_torch_steady_state(
280
+ def train_steady_state(
356
281
  features: pd.DataFrame,
357
282
  targets: pd.DataFrame,
358
283
  epochs: int,
359
284
  batch_size: int | None = None,
360
- approximator: nn.Module | None = None,
361
- optimimzer_cls: Callable[[ParamsT], Adam] = Adam,
285
+ model: nn.Module | None = None,
286
+ optimizer_cls: Callable[[ParamsT], Adam] = Adam,
362
287
  device: torch.device = DefaultDevice,
363
- ) -> tuple[TorchSteadyState, pd.Series]:
288
+ ) -> tuple[SteadyState, pd.Series]:
364
289
  """Train a PyTorch steady state estimator.
365
290
 
366
291
  This function trains a neural network model to estimate steady state data
@@ -375,34 +300,34 @@ def train_torch_steady_state(
375
300
  targets: DataFrame containing the target values for training
376
301
  epochs: Number of training epochs
377
302
  batch_size: Size of mini-batches for training (None for full-batch)
378
- approximator: Predefined neural network model (None to use default MLP)
379
- optimimzer_cls: Optimizer class to use for training (default: Adam)
303
+ model: Predefined neural network model (None to use default MLP)
304
+ optimizer_cls: Optimizer class to use for training (default: Adam)
380
305
  device: Device to run the training on (default: DefaultDevice)
381
306
 
382
307
  Returns:
383
308
  tuple[TorchTimeSeriesEstimator, pd.Series]: Trained estimator and loss history
384
309
 
385
310
  """
386
- trainer = TorchSteadyStateTrainer(
311
+ trainer = SteadyStateTrainer(
387
312
  features=features,
388
313
  targets=targets,
389
- approximator=approximator,
390
- optimimzer_cls=optimimzer_cls,
314
+ model=model,
315
+ optimizer_cls=optimizer_cls,
391
316
  device=device,
392
317
  ).train(epochs=epochs, batch_size=batch_size)
393
318
 
394
319
  return trainer.get_estimator(), trainer.get_loss()
395
320
 
396
321
 
397
- def train_torch_time_course(
322
+ def train_time_course(
398
323
  features: pd.DataFrame,
399
324
  targets: pd.DataFrame,
400
325
  epochs: int,
401
326
  batch_size: int | None = None,
402
- approximator: nn.Module | None = None,
403
- optimimzer_cls: Callable[[ParamsT], Adam] = Adam,
327
+ model: nn.Module | None = None,
328
+ optimizer_cls: Callable[[ParamsT], Adam] = Adam,
404
329
  device: torch.device = DefaultDevice,
405
- ) -> tuple[TorchTimeCourse, pd.Series]:
330
+ ) -> tuple[TimeCourse, pd.Series]:
406
331
  """Train a PyTorch time course estimator.
407
332
 
408
333
  This function trains a neural network model to estimate time course data
@@ -417,19 +342,19 @@ def train_torch_time_course(
417
342
  targets: DataFrame containing the target values for training
418
343
  epochs: Number of training epochs
419
344
  batch_size: Size of mini-batches for training (None for full-batch)
420
- approximator: Predefined neural network model (None to use default LSTM)
421
- optimimzer_cls: Optimizer class to use for training (default: Adam)
345
+ model: Predefined neural network model (None to use default LSTM)
346
+ optimizer_cls: Optimizer class to use for training (default: Adam)
422
347
  device: Device to run the training on (default: DefaultDevice)
423
348
 
424
349
  Returns:
425
350
  tuple[TorchTimeSeriesEstimator, pd.Series]: Trained estimator and loss history
426
351
 
427
352
  """
428
- trainer = TorchTimeCourseTrainer(
353
+ trainer = TimeCourseTrainer(
429
354
  features=features,
430
355
  targets=targets,
431
- approximator=approximator,
432
- optimimzer_cls=optimimzer_cls,
356
+ model=model,
357
+ optimizer_cls=optimizer_cls,
433
358
  device=device,
434
359
  ).train(epochs=epochs, batch_size=batch_size)
435
360