qadence 1.6.3__py3-none-any.whl → 1.7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. qadence/__init__.py +2 -2
  2. qadence/backends/api.py +47 -60
  3. qadence/backends/gpsr.py +1 -0
  4. qadence/backends/pyqtorch/backend.py +1 -2
  5. qadence/backends/pyqtorch/config.py +5 -0
  6. qadence/backends/pyqtorch/convert_ops.py +83 -10
  7. qadence/backends/utils.py +62 -7
  8. qadence/blocks/abstract.py +7 -0
  9. qadence/blocks/embedding.py +17 -12
  10. qadence/blocks/matrix.py +1 -1
  11. qadence/blocks/primitive.py +1 -1
  12. qadence/constructors/__init__.py +2 -0
  13. qadence/constructors/hamiltonians.py +38 -1
  14. qadence/draw/utils.py +1 -1
  15. qadence/execution.py +11 -3
  16. qadence/extensions.py +62 -36
  17. qadence/ml_tools/__init__.py +11 -3
  18. qadence/ml_tools/config.py +283 -2
  19. qadence/ml_tools/constructors.py +796 -0
  20. qadence/ml_tools/models.py +373 -251
  21. qadence/ml_tools/printing.py +5 -2
  22. qadence/ml_tools/saveload.py +42 -18
  23. qadence/ml_tools/train_grad.py +48 -14
  24. qadence/ml_tools/utils.py +2 -8
  25. qadence/{models/quantum_model.py → model.py} +178 -10
  26. qadence/operations/ham_evo.py +10 -0
  27. qadence/overlap.py +1 -1
  28. qadence/parameters.py +10 -1
  29. qadence/register.py +98 -22
  30. qadence/serialization.py +6 -6
  31. qadence/types.py +44 -0
  32. qadence/utils.py +2 -8
  33. {qadence-1.6.3.dist-info → qadence-1.7.1.dist-info}/METADATA +7 -6
  34. {qadence-1.6.3.dist-info → qadence-1.7.1.dist-info}/RECORD +36 -38
  35. {qadence-1.6.3.dist-info → qadence-1.7.1.dist-info}/WHEEL +1 -1
  36. qadence/finitediff.py +0 -47
  37. qadence/models/__init__.py +0 -7
  38. qadence/models/qnn.py +0 -265
  39. {qadence-1.6.3.dist-info → qadence-1.7.1.dist-info}/licenses/LICENSE +0 -0
@@ -11,8 +11,11 @@ def print_metrics(loss: float | None, metrics: dict, iteration: int) -> None:
11
11
  print(msg)
12
12
 
13
13
 
14
- def write_tensorboard(writer: SummaryWriter, loss: float, metrics: dict, iteration: int) -> None:
15
- writer.add_scalar("loss", loss, iteration)
14
+ def write_tensorboard(
15
+ writer: SummaryWriter, loss: float = None, metrics: dict = {}, iteration: int = 0
16
+ ) -> None:
17
+ if loss is not None:
18
+ writer.add_scalar("loss", loss, iteration)
16
19
  for key, arg in metrics.items():
17
20
  writer.add_scalar(key, arg, iteration)
18
21
 
@@ -14,7 +14,7 @@ from torch.optim import Optimizer
14
14
  logger = getLogger(__name__)
15
15
 
16
16
 
17
- def get_latest_checkpoint_name(folder: Path, type: str) -> Path:
17
+ def get_latest_checkpoint_name(folder: Path, type: str, device: str | torch.device = "cpu") -> Path:
18
18
  file = Path("")
19
19
  files = [f for f in os.listdir(folder) if f.endswith(".pt") and type in f]
20
20
  if len(files) == 0:
@@ -22,12 +22,18 @@ def get_latest_checkpoint_name(folder: Path, type: str) -> Path:
22
22
  if len(files) == 1:
23
23
  file = Path(files[0])
24
24
  else:
25
- pattern = re.compile(".*_(\d+).pt$")
25
+ device = str(device).split(":")[0]
26
+ pattern = re.compile(f".*_(\d+)_device_{device}.pt$")
27
+ legacy_pattern = re.compile(".*_(\d+).pt$")
26
28
  max_index = -1
27
29
  for f in files:
30
+ legacy_match = legacy_pattern.search(f)
28
31
  match = pattern.search(f)
29
- if match:
30
- index_str = match.group(1).replace("_", "")
32
+ if match or legacy_match:
33
+ if legacy_match:
34
+ logger.warn(f"Found checkpoint(s) in legacy format: {f}.")
35
+ match = legacy_match
36
+ index_str = match.group(1).replace("_", "") # type: ignore [union-attr]
31
37
  index = int(index_str)
32
38
  if index > max_index:
33
39
  max_index = index
@@ -41,29 +47,38 @@ def load_checkpoint(
41
47
  optimizer: Optimizer | NGOptimizer,
42
48
  model_ckpt_name: str | Path = "",
43
49
  opt_ckpt_name: str | Path = "",
50
+ device: str | torch.device = "cpu",
44
51
  ) -> tuple[Module, Optimizer | NGOptimizer, int]:
45
52
  if isinstance(folder, str):
46
53
  folder = Path(folder)
47
54
  if not folder.exists():
48
55
  folder.mkdir(parents=True)
49
56
  return model, optimizer, 0
50
- model, iter = load_model(folder, model, model_ckpt_name)
51
- optimizer = load_optimizer(folder, optimizer, opt_ckpt_name)
57
+ model, iter = load_model(folder, model, model_ckpt_name, device)
58
+ optimizer = load_optimizer(folder, optimizer, opt_ckpt_name, device)
52
59
  return model, optimizer, iter
53
60
 
54
61
 
55
62
  def write_checkpoint(
56
- folder: Path, model: Module, optimizer: Optimizer | NGOptimizer, iteration: int | str
63
+ folder: Path,
64
+ model: Module,
65
+ optimizer: Optimizer | NGOptimizer,
66
+ iteration: int | str,
57
67
  ) -> None:
58
- from qadence.ml_tools.models import TransformedModule
59
- from qadence.models import QNN, QuantumModel
68
+ from qadence import QuantumModel
69
+
70
+ from .models import QNN
60
71
 
61
72
  device = None
62
73
  try:
63
74
  # We extract the device from the pyqtorch native circuit
64
75
  device = str(model.device).split(":")[0] # in case of using several CUDA devices
65
- except Exception:
66
- pass
76
+ except Exception as e:
77
+ msg = (
78
+ f"Unable to identify in which device the QuantumModel is stored due to {e}."
79
+ "Setting device to None"
80
+ )
81
+ logger.warning(msg)
67
82
 
68
83
  iteration_substring = f"{iteration:03n}" if isinstance(iteration, int) else iteration
69
84
  model_checkpoint_name: str = (
@@ -81,7 +96,7 @@ def write_checkpoint(
81
96
  try:
82
97
  d = (
83
98
  model._to_dict(save_params=True)
84
- if isinstance(model, (QNN, QuantumModel)) or isinstance(model, TransformedModule)
99
+ if isinstance(model, (QNN, QuantumModel))
85
100
  else model.state_dict()
86
101
  )
87
102
  torch.save((iteration, d), folder / model_checkpoint_name)
@@ -101,21 +116,29 @@ def write_checkpoint(
101
116
 
102
117
 
103
118
  def load_model(
104
- folder: Path, model: Module, model_ckpt_name: str | Path = "", *args: Any, **kwargs: Any
119
+ folder: Path,
120
+ model: Module,
121
+ model_ckpt_name: str | Path = "",
122
+ device: str | torch.device = "cpu",
123
+ *args: Any,
124
+ **kwargs: Any,
105
125
  ) -> tuple[Module, int]:
106
- from qadence.ml_tools.models import TransformedModule
107
- from qadence.models import QNN, QuantumModel
126
+ from qadence import QNN, QuantumModel
108
127
 
109
128
  iteration = 0
110
129
  if model_ckpt_name == "":
111
- model_ckpt_name = get_latest_checkpoint_name(folder, "model")
130
+ model_ckpt_name = get_latest_checkpoint_name(folder, "model", device)
112
131
 
113
132
  try:
114
133
  iteration, model_dict = torch.load(folder / model_ckpt_name, *args, **kwargs)
115
- if isinstance(model, (QuantumModel, QNN, TransformedModule)):
134
+ if isinstance(model, (QuantumModel, QNN)):
116
135
  model._from_dict(model_dict, as_torch=True)
117
136
  elif isinstance(model, Module):
118
137
  model.load_state_dict(model_dict, strict=True)
138
+ # Load model to a specific gpu device if specified
139
+ pattern = re.compile("cuda:\d+$")
140
+ if pattern.search(str(device)):
141
+ model.to(device)
119
142
 
120
143
  except Exception as e:
121
144
  msg = f"Unable to load state dict due to {e}.\
@@ -128,9 +151,10 @@ def load_optimizer(
128
151
  folder: Path,
129
152
  optimizer: Optimizer | NGOptimizer,
130
153
  opt_ckpt_name: str | Path = "",
154
+ device: str | torch.device = "cpu",
131
155
  ) -> Optimizer | NGOptimizer:
132
156
  if opt_ckpt_name == "":
133
- opt_ckpt_name = get_latest_checkpoint_name(folder, "opt")
157
+ opt_ckpt_name = get_latest_checkpoint_name(folder, "opt", device)
134
158
  if os.path.isfile(folder / opt_ckpt_name):
135
159
  if isinstance(optimizer, Optimizer):
136
160
  (_, OptType, optimizer_state) = torch.load(folder / opt_ckpt_name)
@@ -52,16 +52,12 @@ def train(
52
52
  device: String defining device to train on, pass 'cuda' for GPU.
53
53
  optimize_step: Customizable optimization callback which is called at every iteration.=
54
54
  The function must have the signature `optimize_step(model,
55
- optimizer, loss_fn, xs, device="cpu")` (see the example below).
56
- Apart from the default we already supply three other optimization
57
- functions `optimize_step_evo`, `optimize_step_grad_norm`, and
58
- `optimize_step_inv_dirichlet`. Learn more about how to use this in
59
- the [Advancded features](../../tutorials/advanced) tutorial of the
60
- documentation.
55
+ optimizer, loss_fn, xs, device="cpu")`.
61
56
  write_tensorboard: Customizable tensorboard logging callback which is
62
57
  called every `config.write_every` iterations. The function must have
63
58
  the signature `write_tensorboard(writer, loss, metrics, iteration)`
64
59
  (see the example below).
60
+ dtype: The dtype to use for the data.
65
61
 
66
62
  Example:
67
63
  ```python exec="on" source="material-block"
@@ -70,7 +66,7 @@ def train(
70
66
  from itertools import count
71
67
  from qadence import Parameter, QuantumCircuit, Z
72
68
  from qadence import hamiltonian_factory, hea, feature_map, chain
73
- from qadence.models import QNN
69
+ from qadence import QNN
74
70
  from qadence.ml_tools import TrainConfig, train_with_grad, to_dataloader
75
71
 
76
72
  n_qubits = 2
@@ -114,8 +110,11 @@ def train(
114
110
  """
115
111
  # load available checkpoint
116
112
  init_iter = 0
113
+ log_device = "cpu" if device is None else device
117
114
  if config.folder:
118
- model, optimizer, init_iter = load_checkpoint(config.folder, model, optimizer)
115
+ model, optimizer, init_iter = load_checkpoint(
116
+ config.folder, model, optimizer, device=log_device
117
+ )
119
118
  logger.debug(f"Loaded model and optimizer from {config.folder}")
120
119
 
121
120
  # Move model to device before optimizer is loaded
@@ -154,12 +153,32 @@ def train(
154
153
  data_dtype = float64 if dtype == complex128 else float32
155
154
 
156
155
  best_val_loss = math.inf
156
+
157
157
  with progress:
158
158
  dl_iter = iter(dataloader) if dataloader is not None else None
159
- if perform_val:
160
- dl_iter_val = iter(val_dataloader) if val_dataloader is not None else None
159
+
160
+ # Initial validation evaluation
161
+ try:
162
+ if perform_val:
163
+ dl_iter_val = iter(val_dataloader) if val_dataloader is not None else None
164
+ xs = next(dl_iter_val)
165
+ xs_to_device = data_to_device(xs, device=device, dtype=data_dtype)
166
+ best_val_loss, metrics = loss_fn(model, xs_to_device)
167
+
168
+ metrics["val_loss"] = best_val_loss
169
+ write_tensorboard(writer, None, metrics, init_iter)
170
+
171
+ if config.folder:
172
+ if config.checkpoint_best_only:
173
+ write_checkpoint(config.folder, model, optimizer, iteration="best")
174
+ else:
175
+ write_checkpoint(config.folder, model, optimizer, init_iter)
176
+
177
+ except KeyboardInterrupt:
178
+ logger.info("Terminating training gracefully after the current iteration.")
161
179
 
162
180
  # outer epoch loop
181
+ init_iter += 1
163
182
  for iteration in progress.track(range(init_iter, init_iter + config.max_iter)):
164
183
  try:
165
184
  # in case there is not data needed by the model
@@ -193,10 +212,13 @@ def train(
193
212
  )
194
213
 
195
214
  if iteration % config.print_every == 0 and config.verbose:
196
- print_metrics(loss, metrics, iteration)
215
+ # Note that the loss returned by optimize_step
216
+ # is the value before doing the training step
217
+ # which is printed accordingly by the previous iteration number
218
+ print_metrics(loss, metrics, iteration - 1)
197
219
 
198
220
  if iteration % config.write_every == 0:
199
- write_tensorboard(writer, loss, metrics, iteration)
221
+ write_tensorboard(writer, loss, metrics, iteration - 1)
200
222
 
201
223
  if perform_val:
202
224
  if iteration % config.val_every == 0:
@@ -208,7 +230,7 @@ def train(
208
230
  if config.folder and config.checkpoint_best_only:
209
231
  write_checkpoint(config.folder, model, optimizer, iteration="best")
210
232
  metrics["val_loss"] = val_loss
211
- write_tensorboard(writer, math.nan, metrics, iteration)
233
+ write_tensorboard(writer, None, metrics, iteration)
212
234
 
213
235
  if config.folder:
214
236
  if iteration % config.checkpoint_every == 0 and not config.checkpoint_best_only:
@@ -218,7 +240,19 @@ def train(
218
240
  logger.info("Terminating training gracefully after the current iteration.")
219
241
  break
220
242
 
221
- # Final writing and checkpointing
243
+ # Handling printing the last training loss
244
+ # as optimize_step does not give the loss value at the last iteration
245
+ try:
246
+ xs = next(dl_iter) if dataloader is not None else None # type: ignore[arg-type]
247
+ xs_to_device = data_to_device(xs, device=device, dtype=data_dtype)
248
+ loss, metrics = loss_fn(model, xs_to_device)
249
+ if iteration % config.print_every == 0 and config.verbose:
250
+ print_metrics(loss, metrics, iteration)
251
+
252
+ except KeyboardInterrupt:
253
+ logger.info("Terminating training gracefully after the current iteration.")
254
+
255
+ # Final printing, writing and checkpointing
222
256
  if config.folder and not config.checkpoint_best_only:
223
257
  write_checkpoint(config.folder, model, optimizer, iteration)
224
258
  write_tensorboard(writer, loss, metrics, iteration)
qadence/ml_tools/utils.py CHANGED
@@ -5,16 +5,15 @@ from typing import Any
5
5
 
6
6
  from torch import Tensor, rand
7
7
 
8
+ from qadence import QNN, QuantumModel
8
9
  from qadence.blocks import AbstractBlock, parameters
9
10
  from qadence.circuit import QuantumCircuit
10
- from qadence.ml_tools.models import TransformedModule
11
- from qadence.models import QNN, QuantumModel
12
11
  from qadence.parameters import Parameter, stringify
13
12
 
14
13
 
15
14
  @singledispatch
16
15
  def rand_featureparameters(
17
- x: QuantumCircuit | AbstractBlock | QuantumModel | QNN | TransformedModule, *args: Any
16
+ x: QuantumCircuit | AbstractBlock | QuantumModel | QNN, *args: Any
18
17
  ) -> dict[str, Tensor]:
19
18
  raise NotImplementedError(f"Unable to generate random featureparameters for object {type(x)}.")
20
19
 
@@ -39,8 +38,3 @@ def _(qm: QuantumModel, batch_size: int = 1) -> dict[str, Tensor]:
39
38
  @rand_featureparameters.register
40
39
  def _(qnn: QNN, batch_size: int = 1) -> dict[str, Tensor]:
41
40
  return rand_featureparameters(qnn._circuit.abstract, batch_size)
42
-
43
-
44
- @rand_featureparameters.register
45
- def _(tm: TransformedModule, batch_size: int = 1) -> dict[str, Tensor]:
46
- return rand_featureparameters(tm.model, batch_size)
@@ -36,7 +36,37 @@ class QuantumModel(nn.Module):
36
36
 
37
37
  This class should be used as base class for any new quantum model supported in the qadence
38
38
  framework for information on the implementation of custom models see
39
- [here](/advanced_tutorials/custom-models.md).
39
+ [here](../tutorials/advanced_tutorials/custom-models.md).
40
+
41
+ Example:
42
+ ```python exec="on" source="material-block" result="json"
43
+ import torch
44
+ from qadence import QuantumModel, QuantumCircuit, RX, RY, Z, PI, chain, kron
45
+ from qadence import FeatureParameter, VariationalParameter
46
+
47
+ theta = VariationalParameter("theta")
48
+ phi = FeatureParameter("phi")
49
+
50
+ block = chain(
51
+ kron(RX(0, theta), RY(1, theta)),
52
+ kron(RX(0, phi), RY(1, phi)),
53
+ )
54
+
55
+ circuit = QuantumCircuit(2, block)
56
+
57
+ observable = Z(0) + Z(1)
58
+
59
+ model = QuantumModel(circuit, observable)
60
+ values = {"phi": torch.tensor([PI, PI/2]), "theta": torch.tensor([PI, PI/2])}
61
+
62
+ wf = model.run(values)
63
+ xs = model.sample(values, n_shots=100)
64
+ ex = model.expectation(values)
65
+ print(wf)
66
+ print(xs)
67
+ print(ex)
68
+ ```
69
+ ```
40
70
  """
41
71
 
42
72
  backend: Backend | DifferentiableBackend
@@ -120,6 +150,7 @@ class QuantumModel(nn.Module):
120
150
 
121
151
  @property
122
152
  def vparams(self) -> OrderedDict:
153
+ """Variational parameters."""
123
154
  return OrderedDict({k: v.data for k, v in self._params.items() if v.requires_grad})
124
155
 
125
156
  @property
@@ -145,9 +176,26 @@ class QuantumModel(nn.Module):
145
176
  return len(self.vals_vparams)
146
177
 
147
178
  def circuit(self, circuit: QuantumCircuit) -> ConvertedCircuit:
179
+ """Get backend-converted circuit.
180
+
181
+ Args:
182
+ circuit: QuantumCircuit instance.
183
+
184
+ Returns:
185
+ Backend circuit.
186
+ """
148
187
  return self.backend.circuit(circuit)
149
188
 
150
189
  def observable(self, observable: AbstractBlock, n_qubits: int) -> Any:
190
+ """Get backend observable.
191
+
192
+ Args:
193
+ observable: Observable block.
194
+ n_qubits: Number of qubits
195
+
196
+ Returns:
197
+ Backend observable.
198
+ """
151
199
  return self.backend.observable(observable, n_qubits)
152
200
 
153
201
  def reset_vparams(self, values: Sequence) -> None:
@@ -161,6 +209,11 @@ class QuantumModel(nn.Module):
161
209
  current_vparams[k].data = torch.tensor([values[i]])
162
210
 
163
211
  def forward(self, *args: Any, **kwargs: Any) -> Tensor:
212
+ """Calls run method with arguments.
213
+
214
+ Returns:
215
+ Tensor: A torch.Tensor representing output.
216
+ """
164
217
  return self.run(*args, **kwargs)
165
218
 
166
219
  def run(
@@ -169,9 +222,26 @@ class QuantumModel(nn.Module):
169
222
  state: Tensor | None = None,
170
223
  endianness: Endianness = Endianness.BIG,
171
224
  ) -> Tensor:
225
+ r"""Run model.
226
+
227
+ Given an input state $| \psi_0 \rangle$,
228
+ a set of variational parameters $\vec{\theta}$
229
+ and the unitary representation of the model $U(\vec{\theta})$
230
+ we return $U(\vec{\theta}) | \psi_0 \rangle$.
231
+
232
+ Arguments:
233
+ values: Values dict which contains values for the parameters.
234
+ state: Optional input state to apply model on.
235
+ endianness: Storage convention for binary information.
236
+
237
+ Returns:
238
+ A torch.Tensor representing output.
239
+ """
172
240
  if values is None:
173
241
  values = {}
242
+
174
243
  params = self.embedding_fn(self._params, values)
244
+
175
245
  return self.backend.run(self._circuit, params, state=state, endianness=endianness)
176
246
 
177
247
  def sample(
@@ -183,6 +253,19 @@ class QuantumModel(nn.Module):
183
253
  mitigation: Mitigations | None = None,
184
254
  endianness: Endianness = Endianness.BIG,
185
255
  ) -> list[Counter]:
256
+ """Obtain samples from model.
257
+
258
+ Arguments:
259
+ values: Values dict which contains values for the parameters.
260
+ n_shots: Observable part of the expectation.
261
+ state: Optional input state to apply model on.
262
+ noise: A noise model to use.
263
+ mitigation: A mitigation protocol to use.
264
+ endianness: Storage convention for binary information.
265
+
266
+ Returns:
267
+ A list of Counter instances with the sample results.
268
+ """
186
269
  params = self.embedding_fn(self._params, values)
187
270
  if noise is None:
188
271
  noise = self._noise
@@ -208,7 +291,27 @@ class QuantumModel(nn.Module):
208
291
  mitigation: Mitigations | None = None,
209
292
  endianness: Endianness = Endianness.BIG,
210
293
  ) -> Tensor:
211
- """Compute expectation using the given backend.
294
+ r"""Compute expectation using the given backend.
295
+
296
+
297
+
298
+ Given an input state $|\psi_0 \rangle$,
299
+ a set of variational parameters $\vec{\theta}$
300
+ and the unitary representation of the model $U(\vec{\theta})$
301
+ we return $\langle \psi_0 | U(\vec{\theta}) | \psi_0 \rangle$.
302
+
303
+ Arguments:
304
+ values: Values dict which contains values for the parameters.
305
+ observable: Observable part of the expectation.
306
+ state: Optional input state.
307
+ measurement: Optional measurement protocol. If None, use
308
+ exact expectation value with a statevector simulator.
309
+ noise: A noise model to use.
310
+ mitigation: A mitigation protocol to use.
311
+ endianness: Storage convention for binary information.
312
+
313
+ Raises:
314
+ ValueError: when no observable is set.
212
315
 
213
316
  Returns:
214
317
  A torch.Tensor of shape n_batches x n_obs
@@ -243,9 +346,22 @@ class QuantumModel(nn.Module):
243
346
  )
244
347
 
245
348
  def overlap(self) -> Tensor:
349
+ """Overlap of model.
350
+
351
+ Raises:
352
+ NotImplementedError: The overlap method is not implemented for this model.
353
+ """
246
354
  raise NotImplementedError("The overlap method is not implemented for this model.")
247
355
 
248
356
  def _to_dict(self, save_params: bool = False) -> dict[str, Any]:
357
+ """Convert QuantumModel to a dictionary for serialization.
358
+
359
+ Arguments:
360
+ save_params: Optionally save parameters. Defaults to False.
361
+
362
+ Returns:
363
+ The dictionary
364
+ """
249
365
  d = dict()
250
366
  try:
251
367
  if isinstance(self._observable, list):
@@ -258,10 +374,10 @@ class QuantumModel(nn.Module):
258
374
  "observable": abs_obs,
259
375
  "backend": self._backend_name,
260
376
  "diff_mode": self._diff_mode,
261
- "measurement": self._measurement._to_dict()
262
- if self._measurement is not None
263
- else {},
264
- "noise": self._noise._to_dict() if self._noise is not None else {},
377
+ "measurement": (
378
+ self._measurement._to_dict() if self._measurement is not None else dict()
379
+ ),
380
+ "noise": self._noise._to_dict() if self._noise is not None else dict(),
265
381
  "backend_configuration": asdict(self.backend.backend.config), # type: ignore
266
382
  }
267
383
  param_dict_conv = {}
@@ -275,6 +391,15 @@ class QuantumModel(nn.Module):
275
391
 
276
392
  @classmethod
277
393
  def _from_dict(cls, d: dict, as_torch: bool = False) -> QuantumModel:
394
+ """Initialize instance of QuantumModel from dictionary.
395
+
396
+ Args:
397
+ d: Dictionary.
398
+ as_torch: Load parameters as torch tensors. Defaults to False.
399
+
400
+ Returns:
401
+ QuantumModel instance
402
+ """
278
403
  from qadence.serialization import deserialize
279
404
 
280
405
  qm: QuantumModel
@@ -310,6 +435,16 @@ class QuantumModel(nn.Module):
310
435
  def save(
311
436
  self, folder: str | Path, file_name: str = "quantum_model.pt", save_params: bool = True
312
437
  ) -> None:
438
+ """Save model.
439
+
440
+ Arguments:
441
+ folder: Folder where model is saved.
442
+ file_name: File name for saving model. Defaults to "quantum_model.pt".
443
+ save_params: Save parameters if True. Defaults to True.
444
+
445
+ Raises:
446
+ FileNotFoundError: If folder is not a directory.
447
+ """
313
448
  if not os.path.isdir(folder):
314
449
  raise FileNotFoundError
315
450
  try:
@@ -321,6 +456,16 @@ class QuantumModel(nn.Module):
321
456
  def load(
322
457
  cls, file_path: str | Path, as_torch: bool = False, map_location: str | torch.device = "cpu"
323
458
  ) -> QuantumModel:
459
+ """Load QuantumModel.
460
+
461
+ Arguments:
462
+ file_path: File path to load model from.
463
+ as_torch: Load parameters as torch tensor. Defaults to False.
464
+ map_location (str | torch.device, optional): Location for loading. Defaults to "cpu".
465
+
466
+ Returns:
467
+ QuantumModel from file_path.
468
+ """
324
469
  qm_pt = {}
325
470
  if isinstance(file_path, str):
326
471
  file_path = Path(file_path)
@@ -336,11 +481,23 @@ class QuantumModel(nn.Module):
336
481
  return cls._from_dict(qm_pt, as_torch)
337
482
 
338
483
  def assign_parameters(self, values: dict[str, Tensor]) -> Any:
339
- """Return the final, assigned circuit that is used in e.g. `backend.run`."""
484
+ """Return the final, assigned circuit that is used in e.g. `backend.run`.
485
+
486
+ Arguments:
487
+ values: Values dict which contains values for the parameters.
488
+
489
+ Returns:
490
+ Final, assigned circuit that is used in e.g. `backend.run`
491
+ """
340
492
  params = self.embedding_fn(self._params, values)
341
493
  return self.backend.assign_parameters(self._circuit, params)
342
494
 
343
495
  def to(self, *args: Any, **kwargs: Any) -> QuantumModel:
496
+ """Conversion method for device or types.
497
+
498
+ Returns:
499
+ QuantumModel with conversions.
500
+ """
344
501
  from pyqtorch import QuantumCircuit as PyQCircuit
345
502
 
346
503
  try:
@@ -354,9 +511,11 @@ class QuantumModel(nn.Module):
354
511
  obs.native = obs.native.to(*args, **kwargs)
355
512
  self._params = self._params.to(
356
513
  device=self._circuit.native.device,
357
- dtype=torch.float64
358
- if self._circuit.native.dtype == torch.cdouble
359
- else torch.float32,
514
+ dtype=(
515
+ torch.float64
516
+ if self._circuit.native.dtype == torch.cdouble
517
+ else torch.float32
518
+ ),
360
519
  )
361
520
  logger.debug(f"Moved {self} to {args}, {kwargs}.")
362
521
  else:
@@ -367,8 +526,17 @@ class QuantumModel(nn.Module):
367
526
 
368
527
  @property
369
528
  def device(self) -> torch.device:
529
+ """Get device.
530
+
531
+ Returns:
532
+ torch.device
533
+ """
370
534
  return (
371
535
  self._circuit.native.device
372
536
  if self.backend.backend.name == "pyqtorch" # type: ignore[union-attr]
373
537
  else torch.device("cpu")
374
538
  )
539
+
540
+
541
+ # Modules to be automatically added to the qadence namespace
542
+ __all__ = ["QuantumModel"] # type: ignore
@@ -43,6 +43,7 @@ class HamEvo(TimeEvolutionBlock):
43
43
  generator: Either a AbstractBlock, torch.Tensor or numpy.ndarray.
44
44
  parameter: A scalar or vector of numeric or torch.Tensor type.
45
45
  qubit_support: The qubits on which the evolution will be performed on.
46
+ duration: duration of evolution in case of time-dependent generator
46
47
 
47
48
  Examples:
48
49
 
@@ -66,6 +67,7 @@ class HamEvo(TimeEvolutionBlock):
66
67
  generator: Union[TGenerator, AbstractBlock],
67
68
  parameter: TParameter,
68
69
  qubit_support: tuple[int, ...] = None,
70
+ duration: float | None = None,
69
71
  ):
70
72
  gen_exprs = {}
71
73
  if qubit_support is None and not isinstance(generator, AbstractBlock):
@@ -75,6 +77,10 @@ class HamEvo(TimeEvolutionBlock):
75
77
  qubit_support = generator.qubit_support
76
78
  if generator.is_parametric:
77
79
  gen_exprs = {str(e): e for e in expressions(generator)}
80
+
81
+ if generator.is_time_dependent and duration is None:
82
+ raise ValueError("For time-dependent generators, a duration must be specified.")
83
+
78
84
  elif isinstance(generator, torch.Tensor):
79
85
  msg = "Please provide a square generator."
80
86
  if len(generator.shape) == 2:
@@ -99,6 +105,7 @@ class HamEvo(TimeEvolutionBlock):
99
105
  ps = {"parameter": Parameter(parameter), **gen_exprs}
100
106
  self.parameters = ParamMap(**ps)
101
107
  self.generator = generator
108
+ self.duration = duration
102
109
 
103
110
  @classmethod
104
111
  def num_parameters(cls) -> int:
@@ -197,3 +204,6 @@ class HamEvo(TimeEvolutionBlock):
197
204
  raise NotImplementedError(
198
205
  "The current digital decomposition can be applied only to Pauli Hamiltonians."
199
206
  )
207
+
208
+ def __matmul__(self, other: AbstractBlock) -> AbstractBlock:
209
+ return super().__matmul__(other)
qadence/overlap.py CHANGED
@@ -13,7 +13,7 @@ from qadence.blocks.utils import chain, kron, tag
13
13
  from qadence.circuit import QuantumCircuit
14
14
  from qadence.divergences import js_divergence
15
15
  from qadence.measurements import Measurements
16
- from qadence.models.quantum_model import QuantumModel
16
+ from qadence.model import QuantumModel
17
17
  from qadence.operations import SWAP, H, I, S
18
18
  from qadence.transpile import reassign
19
19
  from qadence.types import BackendName, DiffMode, OverlapMethod