qadence 1.8.0__py3-none-any.whl → 1.9.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. qadence/__init__.py +1 -1
  2. qadence/analog/parse_analog.py +1 -2
  3. qadence/backends/gpsr.py +8 -2
  4. qadence/backends/pulser/backend.py +7 -23
  5. qadence/backends/pyqtorch/backend.py +80 -5
  6. qadence/backends/pyqtorch/config.py +10 -3
  7. qadence/backends/pyqtorch/convert_ops.py +63 -2
  8. qadence/blocks/primitive.py +1 -0
  9. qadence/execution.py +0 -2
  10. qadence/log_config.yaml +10 -0
  11. qadence/measurements/shadow.py +97 -128
  12. qadence/measurements/utils.py +2 -2
  13. qadence/mitigations/readout.py +12 -6
  14. qadence/ml_tools/__init__.py +4 -8
  15. qadence/ml_tools/callbacks/__init__.py +30 -0
  16. qadence/ml_tools/callbacks/callback.py +451 -0
  17. qadence/ml_tools/callbacks/callbackmanager.py +214 -0
  18. qadence/ml_tools/{saveload.py → callbacks/saveload.py} +11 -11
  19. qadence/ml_tools/callbacks/writer_registry.py +441 -0
  20. qadence/ml_tools/config.py +132 -258
  21. qadence/ml_tools/data.py +7 -3
  22. qadence/ml_tools/loss/__init__.py +10 -0
  23. qadence/ml_tools/loss/loss.py +87 -0
  24. qadence/ml_tools/optimize_step.py +45 -10
  25. qadence/ml_tools/stages.py +46 -0
  26. qadence/ml_tools/train_utils/__init__.py +7 -0
  27. qadence/ml_tools/train_utils/base_trainer.py +555 -0
  28. qadence/ml_tools/train_utils/config_manager.py +184 -0
  29. qadence/ml_tools/trainer.py +708 -0
  30. qadence/model.py +1 -1
  31. qadence/noise/__init__.py +2 -2
  32. qadence/noise/protocols.py +18 -53
  33. qadence/operations/ham_evo.py +87 -26
  34. qadence/transpile/noise.py +12 -5
  35. qadence/types.py +15 -3
  36. {qadence-1.8.0.dist-info → qadence-1.9.1.dist-info}/METADATA +3 -4
  37. {qadence-1.8.0.dist-info → qadence-1.9.1.dist-info}/RECORD +39 -32
  38. {qadence-1.8.0.dist-info → qadence-1.9.1.dist-info}/WHEEL +1 -1
  39. qadence/ml_tools/printing.py +0 -154
  40. qadence/ml_tools/train_grad.py +0 -395
  41. qadence/ml_tools/train_no_grad.py +0 -199
  42. qadence/noise/readout.py +0 -218
  43. {qadence-1.8.0.dist-info → qadence-1.9.1.dist-info}/licenses/LICENSE +0 -0
@@ -11,7 +11,7 @@ from nevergrad.optimization.base import Optimizer as NGOptimizer
11
11
  from torch.nn import Module
12
12
  from torch.optim import Optimizer
13
13
 
14
- logger = getLogger(__name__)
14
+ logger = getLogger("ml_tools")
15
15
 
16
16
 
17
17
  def get_latest_checkpoint_name(folder: Path, type: str, device: str | torch.device = "cpu") -> Path:
@@ -19,6 +19,7 @@ def get_latest_checkpoint_name(folder: Path, type: str, device: str | torch.devi
19
19
  files = [f for f in os.listdir(folder) if f.endswith(".pt") and type in f]
20
20
  if len(files) == 0:
21
21
  logger.error(f"Directory {folder} does not contain any {type} checkpoints.")
22
+ pass
22
23
  if len(files) == 1:
23
24
  file = Path(files[0])
24
25
  else:
@@ -66,8 +67,7 @@ def write_checkpoint(
66
67
  iteration: int | str,
67
68
  ) -> None:
68
69
  from qadence import QuantumModel
69
-
70
- from .models import QNN
70
+ from qadence.ml_tools.models import QNN
71
71
 
72
72
  device = None
73
73
  try:
@@ -79,10 +79,8 @@ def write_checkpoint(
79
79
  )
80
80
  device = str(device).split(":")[0] # in case of using several CUDA devices
81
81
  except Exception as e:
82
- msg = (
83
- f"Unable to identify in which device the QuantumModel is stored due to {e}."
84
- "Setting device to None"
85
- )
82
+ msg = f"""Unable to identify in which device the QuantumModel is stored due to {e}.
83
+ Setting device to None"""
86
84
  logger.warning(msg)
87
85
 
88
86
  iteration_substring = f"{iteration:03n}" if isinstance(iteration, int) else iteration
@@ -135,7 +133,9 @@ def load_model(
135
133
  model_ckpt_name = get_latest_checkpoint_name(folder, "model", device)
136
134
 
137
135
  try:
138
- iteration, model_dict = torch.load(folder / model_ckpt_name, *args, **kwargs)
136
+ iteration, model_dict = torch.load(
137
+ folder / model_ckpt_name, weights_only=False, *args, **kwargs
138
+ )
139
139
  if isinstance(model, (QuantumModel, QNN)):
140
140
  model.load_params_from_dict(model_dict)
141
141
  elif isinstance(model, Module):
@@ -146,8 +146,8 @@ def load_model(
146
146
  model.to(device)
147
147
 
148
148
  except Exception as e:
149
- msg = f"Unable to load state dict due to {e}.\
150
- No corresponding pre-trained model found. Returning the un-trained model."
149
+ msg = f"""Unable to load state dict due to {e}.
150
+ No corresponding pre-trained model found."""
151
151
  logger.warning(msg)
152
152
  return model, iteration
153
153
 
@@ -162,7 +162,7 @@ def load_optimizer(
162
162
  opt_ckpt_name = get_latest_checkpoint_name(folder, "opt", device)
163
163
  if os.path.isfile(folder / opt_ckpt_name):
164
164
  if isinstance(optimizer, Optimizer):
165
- (_, OptType, optimizer_state) = torch.load(folder / opt_ckpt_name)
165
+ (_, OptType, optimizer_state) = torch.load(folder / opt_ckpt_name, weights_only=False)
166
166
  if isinstance(optimizer, OptType):
167
167
  optimizer.load_state_dict(optimizer_state)
168
168
 
@@ -0,0 +1,441 @@
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ from abc import ABC, abstractmethod
5
+ from logging import getLogger
6
+ from types import ModuleType
7
+ from typing import Any, Callable, Union
8
+ from uuid import uuid4
9
+
10
+ from matplotlib.figure import Figure
11
+ from torch import Tensor
12
+ from torch.nn import Module
13
+ from torch.utils.data import DataLoader
14
+ from torch.utils.tensorboard import SummaryWriter
15
+
16
+ from qadence.ml_tools.config import TrainConfig
17
+ from qadence.ml_tools.data import DictDataLoader, OptimizeResult
18
+ from qadence.types import ExperimentTrackingTool
19
+
20
+ logger = getLogger("ml_tools")
21
+
22
+ # Type aliases
23
+ PlottingFunction = Callable[[Module, int], tuple[str, Figure]]
24
+ InputData = Union[Tensor, dict[str, Tensor]]
25
+
26
+
27
+ class BaseWriter(ABC):
28
+ """
29
+ Abstract base class for experiment tracking writers.
30
+
31
+ Methods:
32
+ open(config, iteration=None): Opens the writer and sets up the logging
33
+ environment.
34
+ close(): Closes the writer and finalizes any ongoing logging processes.
35
+ print_metrics(result): Prints metrics and loss in a formatted manner.
36
+ write(result): Writes the optimization results to the tracking tool.
37
+ log_hyperparams(hyperparams): Logs the hyperparameters to the tracking tool.
38
+ plot(model, iteration, plotting_functions): Logs model plots using provided
39
+ plotting functions.
40
+ log_model(model, dataloader): Logs the model and any relevant information.
41
+ """
42
+
43
+ run: Any # [attr-defined]
44
+
45
+ @abstractmethod
46
+ def open(self, config: TrainConfig, iteration: int | None = None) -> Any:
47
+ """
48
+ Opens the writer and prepares it for logging.
49
+
50
+ Args:
51
+ config: Configuration object containing settings for logging.
52
+ iteration (int, optional): The iteration step to start logging from.
53
+ Defaults to None.
54
+ """
55
+ raise NotImplementedError("Writers must implement an open method.")
56
+
57
+ @abstractmethod
58
+ def close(self) -> None:
59
+ """Closes the writer and finalizes logging."""
60
+ raise NotImplementedError("Writers must implement a close method.")
61
+
62
+ @abstractmethod
63
+ def write(self, result: OptimizeResult) -> None:
64
+ """
65
+ Logs the results of the current iteration.
66
+
67
+ Args:
68
+ result (OptimizeResult): The optimization results to log.
69
+ """
70
+ raise NotImplementedError("Writers must implement a write method.")
71
+
72
+ @abstractmethod
73
+ def log_hyperparams(self, hyperparams: dict) -> None:
74
+ """
75
+ Logs hyperparameters.
76
+
77
+ Args:
78
+ hyperparams (dict): A dictionary of hyperparameters to log.
79
+ """
80
+ raise NotImplementedError("Writers must implement a log_hyperparams method.")
81
+
82
+ @abstractmethod
83
+ def plot(
84
+ self,
85
+ model: Module,
86
+ iteration: int,
87
+ plotting_functions: tuple[PlottingFunction, ...],
88
+ ) -> None:
89
+ """
90
+ Logs plots of the model using provided plotting functions.
91
+
92
+ Args:
93
+ model (Module): The model to plot.
94
+ iteration (int): The current iteration number.
95
+ plotting_functions (tuple[PlottingFunction, ...]): Functions used to
96
+ generate plots.
97
+ """
98
+ raise NotImplementedError("Writers must implement a plot method.")
99
+
100
+ @abstractmethod
101
+ def log_model(
102
+ self,
103
+ model: Module,
104
+ train_dataloader: DataLoader | DictDataLoader | None = None,
105
+ val_dataloader: DataLoader | DictDataLoader | None = None,
106
+ test_dataloader: DataLoader | DictDataLoader | None = None,
107
+ ) -> None:
108
+ """
109
+ Logs the model and associated data.
110
+
111
+ Args:
112
+ model (Module): The model to log.
113
+ train_dataloader (DataLoader | DictDataLoader | None): DataLoader for training data.
114
+ val_dataloader (DataLoader | DictDataLoader | None): DataLoader for validation data.
115
+ test_dataloader (DataLoader | DictDataLoader | None): DataLoader for testing data.
116
+ """
117
+ raise NotImplementedError("Writers must implement a log_model method.")
118
+
119
+ def print_metrics(self, result: OptimizeResult) -> None:
120
+ """Prints the metrics and loss in a readable format.
121
+
122
+ Args:
123
+ result (OptimizeResult): The optimization results to display.
124
+ """
125
+
126
+ # Find the key in result.metrics that contains "loss" (case-insensitive)
127
+ loss_key = next((k for k in result.metrics if "loss" in k.lower()), None)
128
+ if loss_key:
129
+ loss_value = result.metrics[loss_key]
130
+ msg = f"Iteration {result.iteration: >7} | {loss_key.title()}: {loss_value:.7f} -"
131
+ else:
132
+ msg = f"Iteration {result.iteration: >7} | Loss: None -"
133
+ msg += " ".join([f"{k}: {v:.7f}" for k, v in result.metrics.items() if k != loss_key])
134
+ print(msg)
135
+
136
+
137
+ class TensorBoardWriter(BaseWriter):
138
+ """Writer for logging to TensorBoard.
139
+
140
+ Attributes:
141
+ writer (SummaryWriter): The TensorBoard SummaryWriter instance.
142
+ """
143
+
144
+ def __init__(self) -> None:
145
+ self.writer = None
146
+
147
+ def open(self, config: TrainConfig, iteration: int | None = None) -> SummaryWriter:
148
+ """
149
+ Opens the TensorBoard writer.
150
+
151
+ Args:
152
+ config: Configuration object containing settings for logging.
153
+ iteration (int, optional): The iteration step to start logging from.
154
+ Defaults to None.
155
+
156
+ Returns:
157
+ SummaryWriter: The initialized TensorBoard writer.
158
+ """
159
+ log_dir = str(config.log_folder)
160
+ purge_step = iteration if isinstance(iteration, int) else None
161
+ self.writer = SummaryWriter(log_dir=log_dir, purge_step=purge_step)
162
+ return self.writer
163
+
164
+ def close(self) -> None:
165
+ """Closes the TensorBoard writer."""
166
+ if self.writer:
167
+ self.writer.close()
168
+
169
+ def write(self, result: OptimizeResult) -> None:
170
+ """
171
+ Logs the results of the current iteration to TensorBoard.
172
+
173
+ Args:
174
+ result (OptimizeResult): The optimization results to log.
175
+ """
176
+ # Not writing loss as loss is available in the metrics
177
+ # if result.loss is not None:
178
+ # self.writer.add_scalar("loss", float(result.loss), result.iteration)
179
+ if self.writer:
180
+ for key, value in result.metrics.items():
181
+ self.writer.add_scalar(key, value, result.iteration)
182
+ else:
183
+ raise RuntimeError(
184
+ "The writer is not initialized."
185
+ "Please call the 'writer.open()' method before writing"
186
+ )
187
+
188
+ def log_hyperparams(self, hyperparams: dict) -> None:
189
+ """
190
+ Logs hyperparameters to TensorBoard.
191
+
192
+ Args:
193
+ hyperparams (dict): A dictionary of hyperparameters to log.
194
+ """
195
+ if self.writer:
196
+ self.writer.add_hparams(hyperparams, {})
197
+ else:
198
+ raise RuntimeError(
199
+ "The writer is not initialized."
200
+ "Please call the 'writer.open()' method before writing"
201
+ )
202
+
203
+ def plot(
204
+ self,
205
+ model: Module,
206
+ iteration: int,
207
+ plotting_functions: tuple[PlottingFunction, ...],
208
+ ) -> None:
209
+ """
210
+ Logs plots of the model using provided plotting functions.
211
+
212
+ Args:
213
+ model (Module): The model to plot.
214
+ iteration (int): The current iteration number.
215
+ plotting_functions (tuple[PlottingFunction, ...]): Functions used
216
+ to generate plots.
217
+ """
218
+ if self.writer:
219
+ for pf in plotting_functions:
220
+ descr, fig = pf(model, iteration)
221
+ self.writer.add_figure(descr, fig, global_step=iteration)
222
+ else:
223
+ raise RuntimeError(
224
+ "The writer is not initialized."
225
+ "Please call the 'writer.open()' method before writing"
226
+ )
227
+
228
+ def log_model(
229
+ self,
230
+ model: Module,
231
+ train_dataloader: DataLoader | DictDataLoader | None = None,
232
+ val_dataloader: DataLoader | DictDataLoader | None = None,
233
+ test_dataloader: DataLoader | DictDataLoader | None = None,
234
+ ) -> None:
235
+ """
236
+ Logs the model.
237
+
238
+ Currently not supported by TensorBoard.
239
+
240
+ Args:
241
+ model (Module): The model to log.
242
+ train_dataloader (DataLoader | DictDataLoader | None): DataLoader for training data.
243
+ val_dataloader (DataLoader | DictDataLoader | None): DataLoader for validation data.
244
+ test_dataloader (DataLoader | DictDataLoader | None): DataLoader for testing data.
245
+ """
246
+ logger.warning("Model logging is not supported by tensorboard. No model will be logged.")
247
+
248
+
249
+ class MLFlowWriter(BaseWriter):
250
+ """
251
+ Writer for logging to MLflow.
252
+
253
+ Attributes:
254
+ run: The active MLflow run.
255
+ mlflow: The MLflow module.
256
+ """
257
+
258
+ def __init__(self) -> None:
259
+ try:
260
+ from mlflow.entities import Run
261
+ except ImportError:
262
+ raise ImportError(
263
+ "mlflow is not installed. Please install qadence with the mlflow feature: "
264
+ "`pip install qadence[mlflow]`."
265
+ )
266
+
267
+ self.run: Run
268
+ self.mlflow: ModuleType
269
+
270
+ def open(self, config: TrainConfig, iteration: int | None = None) -> ModuleType | None:
271
+ """
272
+ Opens the MLflow writer and initializes an MLflow run.
273
+
274
+ Args:
275
+ config: Configuration object containing settings for logging.
276
+ iteration (int, optional): The iteration step to start logging from.
277
+ Defaults to None.
278
+
279
+ Returns:
280
+ mlflow: The MLflow module instance.
281
+ """
282
+ import mlflow
283
+
284
+ self.mlflow = mlflow
285
+ tracking_uri = os.getenv("MLFLOW_TRACKING_URI", "")
286
+ experiment_name = os.getenv("MLFLOW_EXPERIMENT_NAME", str(uuid4()))
287
+ run_name = os.getenv("MLFLOW_RUN_NAME", str(uuid4()))
288
+
289
+ if self.mlflow:
290
+ self.mlflow.set_tracking_uri(tracking_uri)
291
+
292
+ # Create or get the experiment
293
+ exp_filter_string = f"name = '{experiment_name}'"
294
+ experiments = self.mlflow.search_experiments(filter_string=exp_filter_string)
295
+ if not experiments:
296
+ self.mlflow.create_experiment(name=experiment_name)
297
+
298
+ self.mlflow.set_experiment(experiment_name)
299
+ self.run = self.mlflow.start_run(run_name=run_name, nested=False)
300
+
301
+ return self.mlflow
302
+
303
+ def close(self) -> None:
304
+ """Closes the MLflow run."""
305
+ if self.run:
306
+ self.mlflow.end_run()
307
+
308
+ def write(self, result: OptimizeResult) -> None:
309
+ """
310
+ Logs the results of the current iteration to MLflow.
311
+
312
+ Args:
313
+ result (OptimizeResult): The optimization results to log.
314
+ """
315
+ # Not writing loss as loss is available in the metrics
316
+ # if result.loss is not None:
317
+ # self.mlflow.log_metric("loss", float(result.loss), step=result.iteration)
318
+ if self.mlflow:
319
+ self.mlflow.log_metrics(result.metrics, step=result.iteration)
320
+ else:
321
+ raise RuntimeError(
322
+ "The writer is not initialized."
323
+ "Please call the 'writer.open()' method before writing"
324
+ )
325
+
326
+ def log_hyperparams(self, hyperparams: dict) -> None:
327
+ """
328
+ Logs hyperparameters to MLflow.
329
+
330
+ Args:
331
+ hyperparams (dict): A dictionary of hyperparameters to log.
332
+ """
333
+ if self.mlflow:
334
+ self.mlflow.log_params(hyperparams)
335
+ else:
336
+ raise RuntimeError(
337
+ "The writer is not initialized."
338
+ "Please call the 'writer.open()' method before writing"
339
+ )
340
+
341
+ def plot(
342
+ self,
343
+ model: Module,
344
+ iteration: int,
345
+ plotting_functions: tuple[PlottingFunction, ...],
346
+ ) -> None:
347
+ """
348
+ Logs plots of the model using provided plotting functions.
349
+
350
+ Args:
351
+ model (Module): The model to plot.
352
+ iteration (int): The current iteration number.
353
+ plotting_functions (tuple[PlottingFunction, ...]): Functions used
354
+ to generate plots.
355
+ """
356
+ if self.mlflow:
357
+ for pf in plotting_functions:
358
+ descr, fig = pf(model, iteration)
359
+ self.mlflow.log_figure(fig, descr)
360
+ else:
361
+ raise RuntimeError(
362
+ "The writer is not initialized."
363
+ "Please call the 'writer.open()' method before writing"
364
+ )
365
+
366
+ def get_signature_from_dataloader(
367
+ self, model: Module, dataloader: DataLoader | DictDataLoader | None
368
+ ) -> Any:
369
+ """
370
+ Infers the signature of the model based on the input data from the dataloader.
371
+
372
+ Args:
373
+ model (Module): The model to use for inference.
374
+ dataloader (DataLoader | DictDataLoader | None): DataLoader for model inputs.
375
+
376
+ Returns:
377
+ Optional[Any]: The inferred signature, if available.
378
+ """
379
+ from mlflow.models import infer_signature
380
+
381
+ if dataloader is None:
382
+ return None
383
+
384
+ xs: InputData
385
+ xs, *_ = next(iter(dataloader))
386
+ preds = model(xs)
387
+
388
+ if isinstance(xs, Tensor):
389
+ xs = xs.detach().cpu().numpy()
390
+ preds = preds.detach().cpu().numpy()
391
+ return infer_signature(xs, preds)
392
+
393
+ return None
394
+
395
+ def log_model(
396
+ self,
397
+ model: Module,
398
+ train_dataloader: DataLoader | DictDataLoader | None = None,
399
+ val_dataloader: DataLoader | DictDataLoader | None = None,
400
+ test_dataloader: DataLoader | DictDataLoader | None = None,
401
+ ) -> None:
402
+ """
403
+ Logs the model and its signature to MLflow using the provided data loaders.
404
+
405
+ Args:
406
+ model (Module): The model to log.
407
+ train_dataloader (DataLoader | DictDataLoader | None): DataLoader for training data.
408
+ val_dataloader (DataLoader | DictDataLoader | None): DataLoader for validation data.
409
+ test_dataloader (DataLoader | DictDataLoader | None): DataLoader for testing data.
410
+ """
411
+ if not self.mlflow:
412
+ raise RuntimeError(
413
+ "The writer is not initialized."
414
+ "Please call the 'writer.open()' method before writing"
415
+ )
416
+
417
+ signatures = self.get_signature_from_dataloader(model, train_dataloader)
418
+ self.mlflow.pytorch.log_model(model, artifact_path="model", signature=signatures)
419
+
420
+
421
+ # Writer registry
422
+ WRITER_REGISTRY = {
423
+ ExperimentTrackingTool.TENSORBOARD: TensorBoardWriter,
424
+ ExperimentTrackingTool.MLFLOW: MLFlowWriter,
425
+ }
426
+
427
+
428
+ def get_writer(tracking_tool: ExperimentTrackingTool) -> BaseWriter:
429
+ """Factory method to get the appropriate writer based on the tracking tool.
430
+
431
+ Args:
432
+ tracking_tool (ExperimentTrackingTool): The experiment tracking tool to use.
433
+
434
+ Returns:
435
+ BaseWriter: An instance of the appropriate writer.
436
+ """
437
+ writer_class = WRITER_REGISTRY.get(tracking_tool)
438
+ if writer_class:
439
+ return writer_class()
440
+ else:
441
+ raise ValueError(f"Unsupported tracking tool: {tracking_tool}")