qadence 1.8.0__py3-none-any.whl → 1.9.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. qadence/__init__.py +1 -1
  2. qadence/analog/parse_analog.py +1 -2
  3. qadence/backends/gpsr.py +8 -2
  4. qadence/backends/pulser/backend.py +7 -23
  5. qadence/backends/pyqtorch/backend.py +80 -5
  6. qadence/backends/pyqtorch/config.py +10 -3
  7. qadence/backends/pyqtorch/convert_ops.py +63 -2
  8. qadence/blocks/primitive.py +1 -0
  9. qadence/execution.py +0 -2
  10. qadence/log_config.yaml +10 -0
  11. qadence/measurements/shadow.py +97 -128
  12. qadence/measurements/utils.py +2 -2
  13. qadence/mitigations/readout.py +12 -6
  14. qadence/ml_tools/__init__.py +4 -8
  15. qadence/ml_tools/callbacks/__init__.py +30 -0
  16. qadence/ml_tools/callbacks/callback.py +451 -0
  17. qadence/ml_tools/callbacks/callbackmanager.py +214 -0
  18. qadence/ml_tools/{saveload.py → callbacks/saveload.py} +11 -11
  19. qadence/ml_tools/callbacks/writer_registry.py +441 -0
  20. qadence/ml_tools/config.py +132 -258
  21. qadence/ml_tools/data.py +7 -3
  22. qadence/ml_tools/loss/__init__.py +10 -0
  23. qadence/ml_tools/loss/loss.py +87 -0
  24. qadence/ml_tools/optimize_step.py +45 -10
  25. qadence/ml_tools/stages.py +46 -0
  26. qadence/ml_tools/train_utils/__init__.py +7 -0
  27. qadence/ml_tools/train_utils/base_trainer.py +555 -0
  28. qadence/ml_tools/train_utils/config_manager.py +184 -0
  29. qadence/ml_tools/trainer.py +708 -0
  30. qadence/model.py +1 -1
  31. qadence/noise/__init__.py +2 -2
  32. qadence/noise/protocols.py +18 -53
  33. qadence/operations/ham_evo.py +87 -26
  34. qadence/transpile/noise.py +12 -5
  35. qadence/types.py +15 -3
  36. {qadence-1.8.0.dist-info → qadence-1.9.1.dist-info}/METADATA +3 -4
  37. {qadence-1.8.0.dist-info → qadence-1.9.1.dist-info}/RECORD +39 -32
  38. {qadence-1.8.0.dist-info → qadence-1.9.1.dist-info}/WHEEL +1 -1
  39. qadence/ml_tools/printing.py +0 -154
  40. qadence/ml_tools/train_grad.py +0 -395
  41. qadence/ml_tools/train_no_grad.py +0 -199
  42. qadence/noise/readout.py +0 -218
  43. {qadence-1.8.0.dist-info → qadence-1.9.1.dist-info}/licenses/LICENSE +0 -0
@@ -1,199 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import importlib
4
- from logging import getLogger
5
- from typing import Callable
6
-
7
- import nevergrad as ng
8
- from nevergrad.optimization.base import Optimizer as NGOptimizer
9
- from rich.progress import BarColumn, Progress, TaskProgressColumn, TextColumn, TimeRemainingColumn
10
- from torch import Tensor
11
- from torch.nn import Module
12
- from torch.utils.data import DataLoader
13
- from torch.utils.tensorboard import SummaryWriter
14
-
15
- from qadence.ml_tools.config import Callback, TrainConfig, run_callbacks
16
- from qadence.ml_tools.data import DictDataLoader, OptimizeResult
17
- from qadence.ml_tools.parameters import get_parameters, set_parameters
18
- from qadence.ml_tools.printing import (
19
- log_model_tracker,
20
- log_tracker,
21
- plot_tracker,
22
- print_metrics,
23
- write_tracker,
24
- )
25
- from qadence.ml_tools.saveload import load_checkpoint, write_checkpoint
26
- from qadence.ml_tools.tensors import promote_to_tensor
27
- from qadence.types import ExperimentTrackingTool
28
-
29
- logger = getLogger(__name__)
30
-
31
-
32
- def train(
33
- model: Module,
34
- dataloader: DictDataLoader | DataLoader | None,
35
- optimizer: NGOptimizer,
36
- config: TrainConfig,
37
- loss_fn: Callable,
38
- ) -> tuple[Module, NGOptimizer]:
39
- """Runs the training loop with a gradient-free optimizer.
40
-
41
- Assumes that `loss_fn` returns a tuple of (loss, metrics: dict), where
42
- `metrics` is a dict of scalars. Loss and metrics are written to
43
- tensorboard. Checkpoints are written every `config.checkpoint_every` steps
44
- (and after the last training step). If a checkpoint is found at `config.folder`
45
- we resume training from there. The tensorboard logs can be viewed via
46
- `tensorboard --logdir /path/to/folder`.
47
-
48
- Args:
49
- model: The model to train
50
- dataloader: Dataloader constructed via `dictdataloader`
51
- optimizer: The optimizer to use taken from the Nevergrad library. If this is not
52
- the case the function will raise an AssertionError
53
- config: `TrainConfig` with additional training options.
54
- loss_fn: Loss function returning (loss: float, metrics: dict[str, float])
55
- """
56
- init_iter = 0
57
- if config.folder:
58
- model, optimizer, init_iter = load_checkpoint(config.folder, model, optimizer)
59
- logger.debug(f"Loaded model and optimizer from {config.folder}")
60
-
61
- def _update_parameters(
62
- data: Tensor | None, ng_params: ng.p.Array
63
- ) -> tuple[float, dict, ng.p.Array]:
64
- loss, metrics = loss_fn(model, data) # type: ignore[misc]
65
- optimizer.tell(ng_params, float(loss))
66
- ng_params = optimizer.ask() # type: ignore [assignment]
67
- params = promote_to_tensor(ng_params.value, requires_grad=False)
68
- set_parameters(model, params)
69
- return loss, metrics, ng_params
70
-
71
- assert loss_fn is not None, "Provide a valid loss function"
72
- # TODO: support also Scipy optimizers
73
- assert isinstance(optimizer, NGOptimizer), "Use only optimizers from the Nevergrad library"
74
-
75
- # initialize tracking tool
76
- if config.tracking_tool == ExperimentTrackingTool.TENSORBOARD:
77
- writer = SummaryWriter(config.folder, purge_step=init_iter)
78
- else:
79
- writer = importlib.import_module("mlflow")
80
-
81
- # set optimizer configuration and initial parameters
82
- optimizer.budget = config.max_iter
83
- optimizer.enable_pickling()
84
-
85
- # TODO: Make it GPU compatible if possible
86
- params = get_parameters(model).detach().numpy()
87
- ng_params = ng.p.Array(init=params)
88
-
89
- if not ((dataloader is None) or isinstance(dataloader, (DictDataLoader, DataLoader))):
90
- raise NotImplementedError(
91
- f"Unsupported dataloader type: {type(dataloader)}. "
92
- "You can use e.g. `qadence.ml_tools.to_dataloader` to build a dataloader."
93
- )
94
-
95
- # serial training
96
- # TODO: Add a parallelization using the num_workers argument in Nevergrad
97
- progress = Progress(
98
- TextColumn("[progress.description]{task.description}"),
99
- BarColumn(),
100
- TaskProgressColumn(),
101
- TimeRemainingColumn(elapsed_when_finished=True),
102
- )
103
-
104
- # populate callbacks with already available internal functions
105
- # printing, writing and plotting
106
- callbacks = config.callbacks
107
-
108
- # printing
109
- if config.verbose and config.print_every > 0:
110
- callbacks += [
111
- Callback(
112
- lambda opt_res: print_metrics(opt_res.loss, opt_res.metrics, opt_res.iteration),
113
- called_every=config.print_every,
114
- )
115
- ]
116
-
117
- # writing metrics
118
- if config.write_every > 0:
119
- callbacks += [
120
- Callback(
121
- lambda opt_res: write_tracker(
122
- writer,
123
- opt_res.loss,
124
- opt_res.metrics,
125
- opt_res.iteration,
126
- tracking_tool=config.tracking_tool,
127
- ),
128
- called_every=config.write_every,
129
- call_after_opt=True,
130
- )
131
- ]
132
-
133
- # plot tracker
134
- if config.plot_every > 0:
135
- callbacks += [
136
- Callback(
137
- lambda opt_res: plot_tracker(
138
- writer,
139
- opt_res.model,
140
- opt_res.iteration,
141
- config.plotting_functions,
142
- tracking_tool=config.tracking_tool,
143
- ),
144
- called_every=config.plot_every,
145
- )
146
- ]
147
-
148
- # checkpointing
149
- if config.folder and config.checkpoint_every > 0:
150
- callbacks += [
151
- Callback(
152
- lambda opt_res: write_checkpoint(
153
- config.folder, # type: ignore[arg-type]
154
- opt_res.model,
155
- opt_res.optimizer,
156
- opt_res.iteration,
157
- ),
158
- called_every=config.checkpoint_every,
159
- call_after_opt=True,
160
- )
161
- ]
162
-
163
- callbacks_end_opt = [
164
- callback
165
- for callback in callbacks
166
- if callback.call_end_epoch and not callback.call_during_eval
167
- ]
168
-
169
- with progress:
170
- dl_iter = iter(dataloader) if dataloader is not None else None
171
-
172
- for iteration in progress.track(range(init_iter, init_iter + config.max_iter)):
173
- loss, metrics, ng_params = _update_parameters(
174
- None if dataloader is None else next(dl_iter), ng_params # type: ignore[arg-type]
175
- )
176
- opt_result = OptimizeResult(iteration, model, optimizer, loss, metrics)
177
- run_callbacks(callbacks_end_opt, opt_result)
178
-
179
- if iteration >= init_iter + config.max_iter:
180
- break
181
-
182
- # writing hyperparameters
183
- if config.hyperparams:
184
- log_tracker(writer, config.hyperparams, metrics, tracking_tool=config.tracking_tool)
185
-
186
- if config.log_model:
187
- log_model_tracker(writer, model, dataloader, tracking_tool=config.tracking_tool)
188
-
189
- # Final callbacks
190
- callbacks_after_opt = [callback for callback in callbacks if callback.call_after_opt]
191
- run_callbacks(callbacks_after_opt, opt_result, is_last_iteration=True)
192
-
193
- # close tracker
194
- if config.tracking_tool == ExperimentTrackingTool.TENSORBOARD:
195
- writer.close()
196
- elif config.tracking_tool == ExperimentTrackingTool.MLFLOW:
197
- writer.end_run()
198
-
199
- return model, optimizer
qadence/noise/readout.py DELETED
@@ -1,218 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from collections import Counter
4
- from enum import Enum
5
- from logging import getLogger
6
-
7
- import torch
8
- from torch import Tensor
9
- from torch.distributions import normal, poisson, uniform
10
-
11
- logger = getLogger(__name__)
12
-
13
-
14
- class WhiteNoise(Enum):
15
- """White noise distributions."""
16
-
17
- UNIFORM = staticmethod(uniform.Uniform(low=0.0, high=1.0))
18
- """Uniform white noise."""
19
-
20
- GAUSSIAN = staticmethod(normal.Normal(loc=0.0, scale=1.0))
21
- """Gaussian white noise."""
22
-
23
- POISSON = staticmethod(poisson.Poisson(rate=0.1))
24
- """Poisson white noise."""
25
-
26
-
27
- def bitstring_to_tensor(bitstring: str) -> Tensor:
28
- """
29
- A helper function to convert bit strings to torch.Tensor.
30
-
31
- Args:
32
- bitstring: A str format of a bit string.
33
-
34
- Returns:
35
- A torch.Tensor out of the input bit string.
36
- """
37
- return torch.as_tensor(list(map(int, bitstring)))
38
-
39
-
40
- def tensor_to_bitstring(bitstring: Tensor) -> str:
41
- """
42
- A helper function to convert torch.Tensor to bit strings.
43
-
44
- Args:
45
- bitstring: A torch.Tensor format of a bit string.
46
-
47
- Returns:
48
- A str out of the input bit string.
49
- """
50
- return "".join(list(map(str, bitstring.detach().tolist())))
51
-
52
-
53
- def bit_flip(bit: Tensor, cond: Tensor) -> Tensor:
54
- """
55
- A helper function that reverses the states 0 and 1 in the bit string.
56
-
57
- Args:
58
- bit: A integer-value bit in a bitstring to be inverted.
59
- cond: A Bool value of whether or not a bit should be flipped.
60
-
61
- Returns:
62
- The inverse value of the input bit
63
- """
64
- return torch.where(
65
- cond,
66
- torch.where(
67
- bit == torch.zeros(1, dtype=torch.int64),
68
- torch.ones(1, dtype=torch.int64),
69
- torch.zeros(1, dtype=torch.int64),
70
- ),
71
- bit,
72
- )
73
-
74
-
75
- def sample_to_matrix(sample: dict) -> Tensor:
76
- """
77
- A helper function that maps a sample dict to a bit string array.
78
-
79
- Args:
80
- sample: A dictionary with bit stings as keys and values
81
- as their counts.
82
-
83
- Returns: A torch.Tensor of bit strings n_shots x n_qubits.
84
- """
85
-
86
- return torch.concatenate(
87
- list(
88
- map(
89
- lambda bitstring: torch.broadcast_to(
90
- bitstring_to_tensor(bitstring), [sample[bitstring], len(bitstring)]
91
- ),
92
- sample.keys(),
93
- )
94
- )
95
- )
96
-
97
-
98
- def create_noise_matrix(
99
- noise_distribution: torch.distributions, n_shots: int, n_qubits: int
100
- ) -> Tensor:
101
- """
102
- A helper function that creates a noise matrix for bit string corruption.
103
-
104
- NB: The noise matrix is not square, as all bits are considered independent.
105
-
106
- Args:
107
- noise_distribution: Torch statistical distribution one of Gaussian,
108
- Uniform, or Poisson.
109
- n_shots: Number of shots/samples.
110
- n_qubits: Number of qubits
111
-
112
- Returns:
113
- A sample out of the requested distribution given the number of shots/samples.
114
- """
115
- # the noise_matrix should be available to the user if they want to do error correction
116
- return noise_distribution.sample([n_shots, n_qubits])
117
-
118
-
119
- def bs_corruption(
120
- err_idx: Tensor,
121
- sample: Tensor,
122
- ) -> Counter:
123
- """
124
- A function that incorporates the expected readout error in a sample of bit strings.
125
-
126
- given a noise matrix.
127
-
128
- Args:
129
- err_idx: A Boolean array of bit string indices that need to be corrupted.
130
- sample: A torch.Tensor of bit strings n_shots x n_qubits.
131
-
132
- Returns:
133
- A counter of bit strings after readout corruption.
134
- """
135
-
136
- func = torch.func.vmap(bit_flip)
137
-
138
- return Counter([tensor_to_bitstring(k) for k in func(sample, err_idx)])
139
-
140
-
141
- def create_confusion_matrices(noise_matrix: Tensor, error_probability: float) -> Tensor:
142
- confusion_matrices = []
143
- for i in range(noise_matrix.size()[1]):
144
- column_tensor = noise_matrix[:, i]
145
- flip_proba = column_tensor[column_tensor < error_probability].mean().item()
146
- confusion_matrix = torch.tensor(
147
- [[1.0 - flip_proba, flip_proba], [flip_proba, 1.0 - flip_proba]], dtype=torch.float64
148
- )
149
- confusion_matrices.append(confusion_matrix)
150
- return torch.stack(confusion_matrices)
151
-
152
-
153
- def add_noise(
154
- counters: list[Counter],
155
- n_qubits: int,
156
- n_shots: int = 1000,
157
- options: dict = dict(),
158
- ) -> list[Counter]:
159
- """
160
- Implements a simple uniform readout error model for position-independent bit string.
161
-
162
- corruption.
163
-
164
- Args:
165
- counters (list): Samples of bit string as Counters.
166
- n_qubits: Number of qubits in the bit string.
167
- n_shots: Number of shots to sample.
168
- options: A dict of options:
169
- seed: Random seed value if any.
170
- error_probability: Uniform error probability of wrong readout at any position
171
- in the bit strings.
172
- noise_distribution: Noise distribution.
173
- noise_matrix: An input noise matrix if known.
174
-
175
- Returns:
176
- Samples of corrupted bit strings as list[Counter].
177
- """
178
-
179
- seed = options.get("seed", None)
180
- error_probability = options.get("error_probability")
181
- noise_distribution = options.get("noise_distribution", WhiteNoise.UNIFORM)
182
- noise_matrix = options.get("noise_matrix")
183
-
184
- # option for reproducibility
185
- if seed is not None:
186
- torch.manual_seed(seed)
187
-
188
- if error_probability is None:
189
- error_probability = 0.1
190
- # Return the default error probability for mitigation purposes.
191
- options["default_error_probability"] = error_probability
192
- if noise_matrix is None:
193
- # assumes that all bits can be flipped independently of each other
194
- noise_matrix = create_noise_matrix(noise_distribution, n_shots, n_qubits)
195
- confusion_matrices = create_confusion_matrices(
196
- noise_matrix=noise_matrix, error_probability=error_probability
197
- )
198
- # Return the generated noise matrix for mitigation purposes.
199
- options["confusion_matrices"] = confusion_matrices
200
- else:
201
- # check noise_matrix shape and values
202
- assert (
203
- noise_matrix.shape[0] == noise_matrix.shape[1]
204
- ), "The error probabilities matrix needs to be square."
205
- assert noise_matrix.shape == (
206
- n_qubits,
207
- n_qubits,
208
- ), "The error probabilities matrix needs to be n_qubits x n_qubits."
209
-
210
- # the simplest approach - an event occurs if its probability is higher than expected
211
- # by random chance
212
- err_idx = torch.as_tensor(noise_matrix < error_probability)
213
-
214
- corrupted_bitstrings = []
215
- for counter in counters:
216
- sample = sample_to_matrix(counter)
217
- corrupted_bitstrings.append(bs_corruption(err_idx=err_idx, sample=sample))
218
- return corrupted_bitstrings