qadence 1.5.2__py3-none-any.whl → 1.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. qadence/__init__.py +33 -5
  2. qadence/backend.py +2 -2
  3. qadence/backends/adjoint.py +8 -4
  4. qadence/backends/braket/backend.py +3 -2
  5. qadence/backends/braket/config.py +2 -2
  6. qadence/backends/gpsr.py +1 -1
  7. qadence/backends/horqrux/backend.py +23 -31
  8. qadence/backends/horqrux/config.py +2 -2
  9. qadence/backends/pulser/backend.py +82 -45
  10. qadence/backends/pulser/config.py +0 -28
  11. qadence/backends/pulser/convert_ops.py +20 -7
  12. qadence/backends/pulser/pulses.py +2 -2
  13. qadence/backends/pyqtorch/backend.py +3 -2
  14. qadence/backends/pyqtorch/config.py +2 -2
  15. qadence/backends/pyqtorch/convert_ops.py +40 -16
  16. qadence/blocks/block_to_tensor.py +7 -6
  17. qadence/blocks/matrix.py +2 -2
  18. qadence/blocks/primitive.py +2 -1
  19. qadence/blocks/utils.py +2 -2
  20. qadence/circuit.py +5 -2
  21. qadence/constructors/__init__.py +1 -10
  22. qadence/constructors/ansatze.py +1 -65
  23. qadence/constructors/daqc/daqc.py +3 -2
  24. qadence/constructors/daqc/gen_parser.py +3 -2
  25. qadence/constructors/daqc/utils.py +3 -3
  26. qadence/constructors/feature_maps.py +2 -90
  27. qadence/constructors/hamiltonians.py +2 -6
  28. qadence/constructors/rydberg_feature_maps.py +2 -2
  29. qadence/decompose.py +2 -2
  30. qadence/engines/torch/differentiable_expectation.py +7 -0
  31. qadence/extensions.py +4 -15
  32. qadence/log_config.yaml +24 -0
  33. qadence/logger.py +9 -27
  34. qadence/measurements/shadow.py +3 -16
  35. qadence/ml_tools/config.py +11 -1
  36. qadence/ml_tools/models.py +10 -2
  37. qadence/ml_tools/printing.py +1 -3
  38. qadence/ml_tools/saveload.py +23 -6
  39. qadence/ml_tools/train_grad.py +39 -6
  40. qadence/ml_tools/train_no_grad.py +2 -2
  41. qadence/models/quantum_model.py +13 -6
  42. qadence/noise/readout.py +2 -3
  43. qadence/operations/__init__.py +0 -2
  44. qadence/operations/analog.py +2 -12
  45. qadence/operations/control_ops.py +3 -2
  46. qadence/operations/ham_evo.py +5 -7
  47. qadence/operations/parametric.py +3 -2
  48. qadence/operations/primitive.py +2 -2
  49. qadence/overlap.py +7 -12
  50. qadence/parameters.py +2 -2
  51. qadence/serialization.py +2 -2
  52. qadence/states.py +20 -5
  53. qadence/transpile/block.py +2 -2
  54. qadence/types.py +2 -2
  55. qadence/utils.py +42 -3
  56. {qadence-1.5.2.dist-info → qadence-1.6.1.dist-info}/METADATA +15 -9
  57. {qadence-1.5.2.dist-info → qadence-1.6.1.dist-info}/RECORD +59 -58
  58. {qadence-1.5.2.dist-info → qadence-1.6.1.dist-info}/WHEEL +0 -0
  59. {qadence-1.5.2.dist-info → qadence-1.6.1.dist-info}/licenses/LICENSE +0 -0
qadence/extensions.py CHANGED
@@ -1,16 +1,16 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import importlib
4
+ from logging import getLogger
4
5
  from string import Template
5
6
 
6
7
  from qadence.backend import Backend
7
8
  from qadence.blocks.abstract import TAbstractBlock
8
- from qadence.logger import get_logger
9
9
  from qadence.types import BackendName, DiffMode, Engine
10
10
 
11
11
  backends_namespace = Template("qadence.backends.$name")
12
12
 
13
- logger = get_logger(__name__)
13
+ logger = getLogger(__name__)
14
14
 
15
15
 
16
16
  def _available_engines() -> dict:
@@ -24,7 +24,7 @@ def _available_engines() -> dict:
24
24
  res[engine] = DifferentiableBackendCls
25
25
  except (ImportError, ModuleNotFoundError):
26
26
  pass
27
- logger.info(f"Found engines: {res.keys()}")
27
+ logger.debug(f"Found engines: {res.keys()}")
28
28
  return res
29
29
 
30
30
 
@@ -39,7 +39,7 @@ def _available_backends() -> dict:
39
39
  res[backend] = BackendCls
40
40
  except (ImportError, ModuleNotFoundError):
41
41
  pass
42
- logger.info(f"Found backends: {res.keys()}")
42
+ logger.debug(f"Found backends: {res.keys()}")
43
43
  return res
44
44
 
45
45
 
@@ -77,16 +77,6 @@ def _validate_diff_mode(backend: Backend, diff_mode: DiffMode) -> None:
77
77
  raise TypeError(f"Backend {backend.name} does not support diff_mode {DiffMode.ADJOINT}.")
78
78
 
79
79
 
80
- def _validate_backend_config(backend: Backend) -> None:
81
- if backend.config.use_gradient_checkpointing:
82
- # FIXME: Remove in v1.5.0
83
- msg = "use_gradient_checkpointing is deprecated."
84
- import warnings
85
-
86
- warnings.warn(msg, UserWarning)
87
- logger.warn(msg)
88
-
89
-
90
80
  def _set_backend_config(backend: Backend, diff_mode: DiffMode) -> None:
91
81
  """Fallback function for native Qadence backends if extensions is not present.
92
82
 
@@ -96,7 +86,6 @@ def _set_backend_config(backend: Backend, diff_mode: DiffMode) -> None:
96
86
  """
97
87
 
98
88
  _validate_diff_mode(backend, diff_mode)
99
- _validate_backend_config(backend)
100
89
 
101
90
  # (1) When using PSR with any backend or (2) we use the backends Pulser or Braket,
102
91
  # we have to use gate-level parameters
@@ -0,0 +1,24 @@
1
+ version: 1
2
+ disable_existing_loggers: false
3
+ formatters:
4
+ base:
5
+ format: "%(levelname) -5s %(asctime)s - %(name)s: %(message)s"
6
+ datefmt: "%Y-%m-%d %H:%M:%S"
7
+ handlers:
8
+ console:
9
+ class: logging.StreamHandler
10
+ formatter: base
11
+ stream: ext://sys.stderr
12
+ loggers:
13
+ qadence:
14
+ level: INFO
15
+ handlers: [console]
16
+ propagate: yes
17
+ pyqtorch:
18
+ level: INFO
19
+ handlers: [console]
20
+ propagate: yes
21
+ script:
22
+ level: INFO
23
+ handlers: [console]
24
+ propagate: yes
qadence/logger.py CHANGED
@@ -1,35 +1,17 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import logging
4
- import os
5
- import sys
6
-
7
- logging_levels = {
8
- "DEBUG": logging.DEBUG,
9
- "INFO": logging.INFO,
10
- "WARNING": logging.WARNING,
11
- "ERROR": logging.ERROR,
12
- "CRITICAL": logging.CRITICAL,
13
- }
14
-
15
- LOG_STREAM_HANDLER = sys.stdout
16
-
17
- DEFAULT_LOGGING_LEVEL = logging.INFO
18
-
19
- # FIXME: introduce a better handling of the configuration
20
- LOGGING_LEVEL = os.environ.get("LOGGING_LEVEL", "warning").upper()
4
+ from warnings import warn
21
5
 
22
6
 
23
7
  def get_logger(name: str) -> logging.Logger:
24
- logger: logging.Logger = logging.getLogger(name)
25
-
26
- level = logging_levels.get(LOGGING_LEVEL, DEFAULT_LOGGING_LEVEL)
27
- logger.setLevel(level)
8
+ warn(
9
+ '"get_logger" will be deprected soon.\
10
+ Please use "get_script_logger" instead.',
11
+ DeprecationWarning,
12
+ )
13
+ return logging.getLogger(name)
28
14
 
29
- formatter = logging.Formatter("%(levelname) -5s %(asctime)s: %(message)s", "%Y-%m-%d %H:%M:%S")
30
- # formatter = logging.Formatter(LOG_FORMAT)
31
- sh = logging.StreamHandler(LOG_STREAM_HANDLER)
32
- sh.setFormatter(formatter)
33
- logger.addHandler(sh)
34
15
 
35
- return logger
16
+ def get_script_logger(name: str = "") -> logging.Logger:
17
+ return logging.getLogger(f"script.{name}")
@@ -10,13 +10,7 @@ from torch import Tensor
10
10
  from qadence.backend import Backend
11
11
  from qadence.backends.pyqtorch import Backend as PyQBackend
12
12
  from qadence.blocks import AbstractBlock, chain, kron
13
- from qadence.blocks.block_to_tensor import (
14
- HMAT,
15
- IMAT,
16
- SDAGMAT,
17
- ZMAT,
18
- block_to_tensor,
19
- )
13
+ from qadence.blocks.block_to_tensor import HMAT, IMAT, SDAGMAT, ZMAT, block_to_tensor
20
14
  from qadence.blocks.composite import CompositeBlock
21
15
  from qadence.blocks.primitive import PrimitiveBlock
22
16
  from qadence.blocks.utils import get_pauli_blocks, unroll_block_with_scaling
@@ -24,8 +18,8 @@ from qadence.circuit import QuantumCircuit
24
18
  from qadence.engines.differentiable_backend import DifferentiableBackend
25
19
  from qadence.noise import Noise
26
20
  from qadence.operations import X, Y, Z
27
- from qadence.states import one_state, zero_state
28
21
  from qadence.types import Endianness
22
+ from qadence.utils import P0_MATRIX, P1_MATRIX
29
23
 
30
24
  pauli_gates = [X, Y, Z]
31
25
 
@@ -37,13 +31,6 @@ UNITARY_TENSOR = [
37
31
  ]
38
32
 
39
33
 
40
- # Projector matrices in Big-Endian convention.
41
- PROJECTOR_MATRICES = {
42
- "0": zero_state(n_qubits=1).t() @ zero_state(n_qubits=1),
43
- "1": one_state(n_qubits=1).t() @ one_state(n_qubits=1),
44
- }
45
-
46
-
47
34
  def identity(n_qubits: int) -> Tensor:
48
35
  return torch.eye(2**n_qubits, dtype=torch.complex128)
49
36
 
@@ -113,7 +100,7 @@ def local_shadow(sample: Counter, unitary_ids: list) -> Tensor:
113
100
  bitstring = list(sample.keys())[0]
114
101
  local_density_matrices = []
115
102
  for bit, unitary_id in zip(bitstring, unitary_ids):
116
- proj_mat = PROJECTOR_MATRICES[bit]
103
+ proj_mat = P0_MATRIX if bit == "0" else P1_MATRIX
117
104
  unitary_tensor = UNITARY_TENSOR[unitary_id].squeeze(dim=0)
118
105
  local_density_matrices.append(
119
106
  3 * (unitary_tensor.adjoint() @ proj_mat @ unitary_tensor) - identity(1)
@@ -38,6 +38,16 @@ class TrainConfig:
38
38
  """
39
39
  checkpoint_best_only: bool = False
40
40
  """Write model/optimizer checkpoint only if a metric has improved."""
41
+ val_every: int | None = None
42
+ """Calculate validation metric.
43
+
44
+ If None, validation check is not performed.
45
+ """
46
+ val_epsilon: float = 1e-5
47
+ """Safety margin to check if validation loss is smaller than the lowest.
48
+
49
+ validation loss across previous iterations.
50
+ """
41
51
  validation_criterion: Optional[Callable] = None
42
52
  """A boolean function which evaluates a given validation metric is satisfied."""
43
53
  trainstop_criterion: Optional[Callable] = None
@@ -59,4 +69,4 @@ class TrainConfig:
59
69
  if self.trainstop_criterion is None:
60
70
  self.trainstop_criterion = lambda x: x <= self.max_iter
61
71
  if self.validation_criterion is None:
62
- self.validation_criterion = lambda x: False
72
+ self.validation_criterion = lambda *x: False
@@ -1,5 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
+ from logging import getLogger
3
4
  from typing import Any, Counter, List
4
5
 
5
6
  import numpy as np
@@ -8,14 +9,13 @@ from torch import Tensor
8
9
  from torch.nn import Parameter as TorchParam
9
10
 
10
11
  from qadence.backend import ConvertedObservable
11
- from qadence.logger import get_logger
12
12
  from qadence.measurements import Measurements
13
13
  from qadence.ml_tools import promote_to_tensor
14
14
  from qadence.models import QNN, QuantumModel
15
15
  from qadence.noise import Noise
16
16
  from qadence.utils import Endianness
17
17
 
18
- logger = get_logger(__name__)
18
+ logger = getLogger(__name__)
19
19
 
20
20
 
21
21
  def _set_fixed_operation(
@@ -310,3 +310,11 @@ class TransformedModule(torch.nn.Module):
310
310
  except Exception as e:
311
311
  logger.warning(f"Unable to move {self} to {args}, {kwargs} due to {e}.")
312
312
  return self
313
+
314
+ @property
315
+ def device(self) -> torch.device:
316
+ return (
317
+ self.model.device
318
+ if isinstance(self.model, QuantumModel)
319
+ else self._input_scaling.device
320
+ )
@@ -11,9 +11,7 @@ def print_metrics(loss: float | None, metrics: dict, iteration: int) -> None:
11
11
  print(msg)
12
12
 
13
13
 
14
- def write_tensorboard(
15
- writer: SummaryWriter, loss: float | None, metrics: dict, iteration: int
16
- ) -> None:
14
+ def write_tensorboard(writer: SummaryWriter, loss: float, metrics: dict, iteration: int) -> None:
17
15
  writer.add_scalar("loss", loss, iteration)
18
16
  for key, arg in metrics.items():
19
17
  writer.add_scalar(key, arg, iteration)
@@ -2,6 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  import os
4
4
  import re
5
+ from logging import getLogger
5
6
  from pathlib import Path
6
7
  from typing import Any
7
8
 
@@ -10,9 +11,7 @@ from nevergrad.optimization.base import Optimizer as NGOptimizer
10
11
  from torch.nn import Module
11
12
  from torch.optim import Optimizer
12
13
 
13
- from qadence.logger import get_logger
14
-
15
- logger = get_logger(__name__)
14
+ logger = getLogger(__name__)
16
15
 
17
16
 
18
17
  def get_latest_checkpoint_name(folder: Path, type: str) -> Path:
@@ -54,13 +53,31 @@ def load_checkpoint(
54
53
 
55
54
 
56
55
  def write_checkpoint(
57
- folder: Path, model: Module, optimizer: Optimizer | NGOptimizer, iteration: int
56
+ folder: Path, model: Module, optimizer: Optimizer | NGOptimizer, iteration: int | str
58
57
  ) -> None:
59
58
  from qadence.ml_tools.models import TransformedModule
60
59
  from qadence.models import QNN, QuantumModel
61
60
 
62
- model_checkpoint_name: str = f"model_{type(model).__name__}_ckpt_" + f"{iteration:03n}" + ".pt"
63
- opt_checkpoint_name: str = f"opt_{type(optimizer).__name__}_ckpt_" + f"{iteration:03n}" + ".pt"
61
+ device = None
62
+ try:
63
+ # We extract the device from the pyqtorch native circuit
64
+ device = str(model.device).split(":")[0] # in case of using several CUDA devices
65
+ except Exception:
66
+ pass
67
+
68
+ iteration_substring = f"{iteration:03n}" if isinstance(iteration, int) else iteration
69
+ model_checkpoint_name: str = (
70
+ f"model_{type(model).__name__}_ckpt_"
71
+ + f"{iteration_substring}"
72
+ + f"_device_{device}"
73
+ + ".pt"
74
+ )
75
+ opt_checkpoint_name: str = (
76
+ f"opt_{type(optimizer).__name__}_ckpt_"
77
+ + f"{iteration_substring}"
78
+ + f"_device_{device}"
79
+ + ".pt"
80
+ )
64
81
  try:
65
82
  d = (
66
83
  model._to_dict(save_params=True)
@@ -1,5 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
+ import math
4
+ from logging import getLogger
3
5
  from typing import Callable, Union
4
6
 
5
7
  from rich.progress import BarColumn, Progress, TaskProgressColumn, TextColumn, TimeRemainingColumn
@@ -11,14 +13,13 @@ from torch.optim import Optimizer
11
13
  from torch.utils.data import DataLoader
12
14
  from torch.utils.tensorboard import SummaryWriter
13
15
 
14
- from qadence.logger import get_logger
15
16
  from qadence.ml_tools.config import TrainConfig
16
- from qadence.ml_tools.data import DictDataLoader
17
+ from qadence.ml_tools.data import DictDataLoader, data_to_device
17
18
  from qadence.ml_tools.optimize_step import optimize_step
18
19
  from qadence.ml_tools.printing import print_metrics, write_tensorboard
19
20
  from qadence.ml_tools.saveload import load_checkpoint, write_checkpoint
20
21
 
21
- logger = get_logger(__name__)
22
+ logger = getLogger(__name__)
22
23
 
23
24
 
24
25
  def train(
@@ -125,6 +126,22 @@ def train(
125
126
  # initialize tensorboard
126
127
  writer = SummaryWriter(config.folder, purge_step=init_iter)
127
128
 
129
+ perform_val = isinstance(config.val_every, int)
130
+ if perform_val:
131
+ if not isinstance(dataloader, DictDataLoader):
132
+ raise ValueError(
133
+ "If `config.val_every` is provided as an integer, dataloader must"
134
+ "be an instance of `DictDataLoader`."
135
+ )
136
+ iter_keys = dataloader.dataloaders.keys()
137
+ if "train" not in iter_keys or "val" not in iter_keys:
138
+ raise ValueError(
139
+ "If `config.val_every` is provided as an integer, the dictdataloader"
140
+ "must have `train` and `val` keys to access the respective dataloaders."
141
+ )
142
+ val_dataloader = dataloader.dataloaders["val"]
143
+ dataloader = dataloader.dataloaders["train"]
144
+
128
145
  ## Training
129
146
  progress = Progress(
130
147
  TextColumn("[progress.description]{task.description}"),
@@ -135,8 +152,12 @@ def train(
135
152
  data_dtype = None
136
153
  if dtype:
137
154
  data_dtype = float64 if dtype == complex128 else float32
155
+
156
+ best_val_loss = math.inf
138
157
  with progress:
139
158
  dl_iter = iter(dataloader) if dataloader is not None else None
159
+ if perform_val:
160
+ dl_iter_val = iter(val_dataloader) if val_dataloader is not None else None
140
161
 
141
162
  # outer epoch loop
142
163
  for iteration in progress.track(range(init_iter, init_iter + config.max_iter)):
@@ -177,16 +198,28 @@ def train(
177
198
  if iteration % config.write_every == 0:
178
199
  write_tensorboard(writer, loss, metrics, iteration)
179
200
 
201
+ if perform_val:
202
+ if iteration % config.val_every == 0:
203
+ xs = next(dl_iter_val)
204
+ xs_to_device = data_to_device(xs, device=device, dtype=data_dtype)
205
+ val_loss, _ = loss_fn(model, xs_to_device)
206
+ if config.validation_criterion(val_loss, best_val_loss, config.val_epsilon): # type: ignore[misc]
207
+ best_val_loss = val_loss
208
+ if config.folder and config.checkpoint_best_only:
209
+ write_checkpoint(config.folder, model, optimizer, iteration="best")
210
+ metrics["val_loss"] = val_loss
211
+ write_tensorboard(writer, math.nan, metrics, iteration)
212
+
180
213
  if config.folder:
181
- if iteration % config.checkpoint_every == 0:
214
+ if iteration % config.checkpoint_every == 0 and not config.checkpoint_best_only:
182
215
  write_checkpoint(config.folder, model, optimizer, iteration)
183
216
 
184
217
  except KeyboardInterrupt:
185
- print("Terminating training gracefully after the current iteration.")
218
+ logger.info("Terminating training gracefully after the current iteration.")
186
219
  break
187
220
 
188
221
  # Final writing and checkpointing
189
- if config.folder:
222
+ if config.folder and not config.checkpoint_best_only:
190
223
  write_checkpoint(config.folder, model, optimizer, iteration)
191
224
  write_tensorboard(writer, loss, metrics, iteration)
192
225
  writer.close()
@@ -1,5 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
+ from logging import getLogger
3
4
  from typing import Callable
4
5
 
5
6
  import nevergrad as ng
@@ -10,7 +11,6 @@ from torch.nn import Module
10
11
  from torch.utils.data import DataLoader
11
12
  from torch.utils.tensorboard import SummaryWriter
12
13
 
13
- from qadence.logger import get_logger
14
14
  from qadence.ml_tools.config import TrainConfig
15
15
  from qadence.ml_tools.data import DictDataLoader
16
16
  from qadence.ml_tools.parameters import get_parameters, set_parameters
@@ -18,7 +18,7 @@ from qadence.ml_tools.printing import print_metrics, write_tensorboard
18
18
  from qadence.ml_tools.saveload import load_checkpoint, write_checkpoint
19
19
  from qadence.ml_tools.tensors import promote_to_tensor
20
20
 
21
- logger = get_logger(__name__)
21
+ logger = getLogger(__name__)
22
22
 
23
23
 
24
24
  def train(
@@ -3,6 +3,7 @@ from __future__ import annotations
3
3
  import os
4
4
  from collections import Counter, OrderedDict
5
5
  from dataclasses import asdict
6
+ from logging import getLogger
6
7
  from pathlib import Path
7
8
  from typing import Any, Callable, Optional, Sequence
8
9
 
@@ -21,14 +22,13 @@ from qadence.blocks.abstract import AbstractBlock
21
22
  from qadence.blocks.utils import chain, unique_parameters
22
23
  from qadence.circuit import QuantumCircuit
23
24
  from qadence.engines.differentiable_backend import DifferentiableBackend
24
- from qadence.logger import get_logger
25
25
  from qadence.measurements import Measurements
26
26
  from qadence.mitigations import Mitigations
27
27
  from qadence.noise import Noise
28
28
  from qadence.parameters import Parameter
29
29
  from qadence.types import DiffMode, Endianness
30
30
 
31
- logger = get_logger(__name__)
31
+ logger = getLogger(__name__)
32
32
 
33
33
 
34
34
  class QuantumModel(nn.Module):
@@ -44,6 +44,7 @@ class QuantumModel(nn.Module):
44
44
  _params: nn.ParameterDict
45
45
  _circuit: ConvertedCircuit
46
46
  _observable: list[ConvertedObservable] | None
47
+ logger.debug("Initialised")
47
48
 
48
49
  def __init__(
49
50
  self,
@@ -185,8 +186,6 @@ class QuantumModel(nn.Module):
185
186
  params = self.embedding_fn(self._params, values)
186
187
  if noise is None:
187
188
  noise = self._noise
188
- else:
189
- self._noise = noise
190
189
  if mitigation is None:
191
190
  mitigation = self._mitigation
192
191
  return self.backend.sample(
@@ -316,7 +315,7 @@ class QuantumModel(nn.Module):
316
315
  try:
317
316
  torch.save(self._to_dict(save_params), folder / Path(file_name))
318
317
  except Exception as e:
319
- print(f"Unable to write QuantumModel to disk due to {e}")
318
+ logger.error(f"Unable to write QuantumModel to disk due to {e}")
320
319
 
321
320
  @classmethod
322
321
  def load(
@@ -333,7 +332,7 @@ class QuantumModel(nn.Module):
333
332
  try:
334
333
  qm_pt = torch.load(file_path, map_location=map_location)
335
334
  except Exception as e:
336
- print(f"Unable to load QuantumModel due to {e}")
335
+ logger.error(f"Unable to load QuantumModel due to {e}")
337
336
  return cls._from_dict(qm_pt, as_torch)
338
337
 
339
338
  def assign_parameters(self, values: dict[str, Tensor]) -> Any:
@@ -365,3 +364,11 @@ class QuantumModel(nn.Module):
365
364
  except Exception as e:
366
365
  logger.warning(f"Unable to move {self} to {args}, {kwargs} due to {e}.")
367
366
  return self
367
+
368
+ @property
369
+ def device(self) -> torch.device:
370
+ return (
371
+ self._circuit.native.device
372
+ if self.backend.backend.name == "pyqtorch" # type: ignore[union-attr]
373
+ else torch.device("cpu")
374
+ )
qadence/noise/readout.py CHANGED
@@ -2,14 +2,13 @@ from __future__ import annotations
2
2
 
3
3
  from collections import Counter
4
4
  from enum import Enum
5
+ from logging import getLogger
5
6
 
6
7
  import torch
7
8
  from torch import Tensor
8
9
  from torch.distributions import normal, poisson, uniform
9
10
 
10
- from qadence.logger import get_logger
11
-
12
- logger = get_logger(__name__)
11
+ logger = getLogger(__name__)
13
12
 
14
13
 
15
14
  class WhiteNoise(Enum):
@@ -10,7 +10,6 @@ from .analog import (
10
10
  AnalogSWAP,
11
11
  ConstantAnalogRotation,
12
12
  entangle,
13
- wait,
14
13
  )
15
14
  from .control_ops import (
16
15
  CNOT,
@@ -89,7 +88,6 @@ __all__ = [
89
88
  "CSWAP",
90
89
  "MCPHASE",
91
90
  "Toffoli",
92
- "wait",
93
91
  "entangle",
94
92
  "AnalogEntanglement",
95
93
  "AnalogInteraction",
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  from dataclasses import dataclass
4
+ from logging import getLogger
4
5
  from typing import Any, Tuple
5
6
 
6
7
  import numpy as np
@@ -19,7 +20,6 @@ from qadence.blocks.utils import (
19
20
  add, # noqa
20
21
  kron,
21
22
  )
22
- from qadence.logger import get_logger
23
23
  from qadence.parameters import (
24
24
  Parameter,
25
25
  ParamMap,
@@ -29,7 +29,7 @@ from qadence.types import PI, OpName, TNumber, TParameter
29
29
  from .ham_evo import HamEvo
30
30
  from .primitive import I, X, Z
31
31
 
32
- logger = get_logger(__name__)
32
+ logger = getLogger(__name__)
33
33
 
34
34
 
35
35
  class AnalogSWAP(HamEvo):
@@ -84,16 +84,6 @@ def AnalogInteraction(
84
84
  return InteractionBlock(parameters=ps, qubit_support=q, add_pattern=add_pattern)
85
85
 
86
86
 
87
- # FIXME: Remove in v1.5.0
88
- def wait(
89
- duration: TNumber | sympy.Basic,
90
- qubit_support: str | QubitSupport | tuple = "global",
91
- add_pattern: bool = True,
92
- ) -> InteractionBlock:
93
- logger.warning("The alias `wait` is deprecated, please use `AnalogInteraction`")
94
- return AnalogInteraction(duration, qubit_support, add_pattern)
95
-
96
-
97
87
  # FIXME: clarify the usage of this gate, rename more formally, and implement in PyQ
98
88
  @dataclass(eq=False, repr=False)
99
89
  class AnalogEntanglement(AnalogBlock):
@@ -1,5 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
+ from logging import getLogger
4
+
3
5
  import sympy
4
6
  import torch
5
7
  from rich.console import Console, RenderableType
@@ -16,7 +18,6 @@ from qadence.blocks.utils import (
16
18
  chain,
17
19
  kron,
18
20
  )
19
- from qadence.logger import get_logger
20
21
  from qadence.parameters import (
21
22
  Parameter,
22
23
  evaluate,
@@ -26,7 +27,7 @@ from qadence.types import OpName, TNumber, TParameter
26
27
  from .parametric import PHASE, RX, RY, RZ
27
28
  from .primitive import SWAP, I, N, X, Y, Z
28
29
 
29
- logger = get_logger(__name__)
30
+ logger = getLogger(__name__)
30
31
 
31
32
 
32
33
  class CNOT(ControlBlock):
@@ -2,6 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  from copy import deepcopy
4
4
  from functools import cached_property
5
+ from logging import getLogger
5
6
  from typing import Any, Union
6
7
 
7
8
  import numpy as np
@@ -9,10 +10,7 @@ import sympy
9
10
  import torch
10
11
  from torch import Tensor
11
12
 
12
- from qadence.blocks import (
13
- AbstractBlock,
14
- TimeEvolutionBlock,
15
- )
13
+ from qadence.blocks import AbstractBlock, TimeEvolutionBlock
16
14
  from qadence.blocks.block_to_tensor import block_to_tensor
17
15
  from qadence.blocks.utils import (
18
16
  add, # noqa
@@ -22,7 +20,6 @@ from qadence.blocks.utils import (
22
20
  expressions,
23
21
  )
24
22
  from qadence.decompose import lie_trotter_suzuki
25
- from qadence.logger import get_logger
26
23
  from qadence.parameters import (
27
24
  Parameter,
28
25
  ParamMap,
@@ -30,9 +27,8 @@ from qadence.parameters import (
30
27
  extract_original_param_entry,
31
28
  )
32
29
  from qadence.types import LTSOrder, OpName, TGenerator, TParameter
33
- from qadence.utils import eigenvalues
34
30
 
35
- logger = get_logger(__name__)
31
+ logger = getLogger(__name__)
36
32
 
37
33
 
38
34
  class HamEvo(TimeEvolutionBlock):
@@ -112,6 +108,8 @@ class HamEvo(TimeEvolutionBlock):
112
108
  def eigenvalues_generator(
113
109
  self, max_num_evals: int | None = None, max_num_gaps: int | None = None
114
110
  ) -> Tensor:
111
+ from qadence.utils import eigenvalues
112
+
115
113
  if isinstance(self.generator, AbstractBlock):
116
114
  generator_tensor = block_to_tensor(self.generator)
117
115
  elif isinstance(self.generator, Tensor):
@@ -1,5 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
+ from logging import getLogger
4
+
3
5
  import numpy as np
4
6
  import sympy
5
7
  import torch
@@ -13,7 +15,6 @@ from qadence.blocks.utils import (
13
15
  add, # noqa
14
16
  chain,
15
17
  )
16
- from qadence.logger import get_logger
17
18
  from qadence.parameters import (
18
19
  Parameter,
19
20
  ParamMap,
@@ -23,7 +24,7 @@ from qadence.types import OpName, TNumber, TParameter
23
24
 
24
25
  from .primitive import I, X, Y, Z
25
26
 
26
- logger = get_logger(__name__)
27
+ logger = getLogger(__name__)
27
28
 
28
29
 
29
30
  class PHASE(ParametricBlock):
@@ -1,5 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
+ from logging import getLogger
3
4
  from typing import Union
4
5
 
5
6
  import numpy as np
@@ -17,13 +18,12 @@ from qadence.blocks.utils import (
17
18
  chain,
18
19
  kron,
19
20
  )
20
- from qadence.logger import get_logger
21
21
  from qadence.parameters import (
22
22
  Parameter,
23
23
  )
24
24
  from qadence.types import OpName, TNumber
25
25
 
26
- logger = get_logger(__name__)
26
+ logger = getLogger(__name__)
27
27
 
28
28
 
29
29
  class X(PrimitiveBlock):