qadence 1.7.4__py3-none-any.whl → 1.7.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -59,9 +59,13 @@ class AddressingPattern:
59
59
  ) -> dict:
60
60
  # augment weight dict if needed
61
61
  weights = {
62
- i: Parameter(0.0)
63
- if i not in weights
64
- else (Parameter(weights[i]) if not isinstance(weights[i], Parameter) else weights[i])
62
+ i: (
63
+ Parameter(0.0)
64
+ if i not in weights
65
+ else (
66
+ Parameter(weights[i]) if not isinstance(weights[i], Parameter) else weights[i]
67
+ )
68
+ )
65
69
  for i in range(self.n_qubits)
66
70
  }
67
71
 
qadence/backends/api.py CHANGED
@@ -3,6 +3,9 @@ from __future__ import annotations
3
3
  from qadence.backend import Backend, BackendConfiguration
4
4
  from qadence.engines.differentiable_backend import DifferentiableBackend
5
5
  from qadence.extensions import (
6
+ BackendNotFoundError,
7
+ ConfigNotFoundError,
8
+ EngineNotFoundError,
6
9
  import_backend,
7
10
  import_config,
8
11
  import_engine,
@@ -49,12 +52,9 @@ def backend_factory(
49
52
  diff_backend_cls = import_engine(backend_inst.engine)
50
53
  backend_inst = diff_backend_cls(backend=backend_inst, diff_mode=DiffMode(diff_mode)) # type: ignore[operator]
51
54
  return backend_inst
52
- except Exception as e:
53
- msg = f"The requested backend '{backend}' is either not installed\
54
- or could not be imported due to {e}."
55
- logger.error(msg)
56
- raise Exception(msg)
57
- # Set backend configurations which depend on the differentiation mode
55
+ except (BackendNotFoundError, EngineNotFoundError, ConfigNotFoundError) as e:
56
+ logger.error(e.msg)
57
+ raise e
58
58
 
59
59
 
60
60
  def config_factory(backend_name: BackendName | str, config: dict) -> BackendConfiguration:
@@ -62,6 +62,7 @@ def config_factory(backend_name: BackendName | str, config: dict) -> BackendConf
62
62
  try:
63
63
  BackendConfigCls = import_config(backend_name)
64
64
  cfg = BackendConfigCls(**config) # type: ignore[operator]
65
- except Exception as e:
66
- logger.debug(f"Unable to import config for backend {backend_name} due to {e}.")
65
+ except ConfigNotFoundError as e:
66
+ logger.error(e.msg)
67
+ raise e
67
68
  return cfg
qadence/backends/gpsr.py CHANGED
@@ -10,7 +10,7 @@ from qadence.types import PI
10
10
  from qadence.utils import _round_complex
11
11
 
12
12
 
13
- def general_psr(spectrum: Tensor, n_eqs: int | None = None, shift_prefac: float = 0.5) -> Callable:
13
+ def general_psr(spectrum: Tensor, shift_prefac: float = 0.5) -> Callable:
14
14
  diffs = _round_complex(spectrum - spectrum.reshape(-1, 1))
15
15
  sorted_unique_spectral_gaps = torch.unique(torch.abs(torch.tril(diffs)))
16
16
 
@@ -7,11 +7,11 @@ from operator import add
7
7
  from typing import Any, Callable, Dict
8
8
 
9
9
  import jax.numpy as jnp
10
- from horqrux.abstract import Primitive as Gate
11
10
  from horqrux.analog import _HamiltonianEvolution as NativeHorqHEvo
12
11
  from horqrux.apply import apply_gate
13
12
  from horqrux.parametric import RX, RY, RZ
14
13
  from horqrux.primitive import NOT, SWAP, H, I, X, Y, Z
14
+ from horqrux.primitive import Primitive as Gate
15
15
  from horqrux.utils import inner
16
16
  from jax import Array
17
17
  from jax.scipy.linalg import expm
@@ -71,19 +71,16 @@ class Backend(BackendInterface):
71
71
  def observable(self, observable: AbstractBlock, n_qubits: int) -> ConvertedObservable:
72
72
  # make sure only leaves, i.e. primitive blocks are scaled
73
73
  transpilations = [
74
- lambda block: chain_single_qubit_ops(block)
75
- if self.config.use_single_qubit_composition
76
- else flatten(block),
74
+ lambda block: (
75
+ chain_single_qubit_ops(block)
76
+ if self.config.use_single_qubit_composition
77
+ else flatten(block)
78
+ ),
77
79
  scale_primitive_blocks_only,
78
80
  ]
79
81
  block = transpile(*transpilations)(observable) # type: ignore[call-overload]
80
82
  operations = convert_block(block, n_qubits, self.config)
81
- obs_cls = (
82
- pyq.DiagonalObservable
83
- if block._is_diag_pauli and not block.is_parametric
84
- else pyq.Observable
85
- )
86
- native = obs_cls(n_qubits=n_qubits, operations=operations)
83
+ native = pyq.Observable(operations=operations)
87
84
  return ConvertedObservable(native=native, abstract=block, original=observable)
88
85
 
89
86
  def run(
@@ -140,7 +137,7 @@ class Backend(BackendInterface):
140
137
  )
141
138
  observable = observable if isinstance(observable, list) else [observable]
142
139
  _expectation = torch.hstack(
143
- [obs.native(state, param_values).reshape(-1, 1) for obs in observable]
140
+ [obs.native.expectation(state, param_values).reshape(-1, 1) for obs in observable]
144
141
  )
145
142
  return _expectation
146
143
 
@@ -169,7 +166,7 @@ class Backend(BackendInterface):
169
166
  observables = observable if isinstance(observable, list) else [observable]
170
167
  for vals in to_list_of_dicts(param_values):
171
168
  wf = self.run(circuit, vals, state, endianness, pyqify_state=True, unpyqify_state=False)
172
- exs = torch.cat([obs.native(wf, vals) for obs in observables], 0)
169
+ exs = torch.cat([obs.native.expectation(wf, vals) for obs in observables], 0)
173
170
  list_expvals.append(exs)
174
171
 
175
172
  batch_expvals = torch.vstack(list_expvals)
@@ -118,7 +118,7 @@ def convert_block(
118
118
  # ]
119
119
  return [PyQHamiltonianEvolution(qubit_support, n_qubits, block, config)]
120
120
  elif isinstance(block, MatrixBlock):
121
- return [pyq.primitive.Primitive(block.matrix, block.qubit_support)]
121
+ return [pyq.primitives.Primitive(block.matrix, block.qubit_support)]
122
122
  elif isinstance(block, CompositeBlock):
123
123
  ops = list(flatten(*(convert_block(b, n_qubits, config) for b in block.blocks)))
124
124
  if isinstance(block, AddBlock):
@@ -374,8 +374,6 @@ class PyQHamiltonianEvolution(Module):
374
374
  state,
375
375
  self.unitary(values),
376
376
  self.qubit_support,
377
- self.n_qubits,
378
- self.batch_size,
379
377
  )
380
378
 
381
379
  return result
qadence/backends/utils.py CHANGED
@@ -9,7 +9,7 @@ import pyqtorch as pyq
9
9
  import torch
10
10
  from numpy.typing import ArrayLike
11
11
  from pyqtorch.apply import apply_operator
12
- from pyqtorch.parametric import Parametric as PyQParametric
12
+ from pyqtorch.primitives import Parametric as PyQParametric
13
13
  from torch import (
14
14
  Tensor,
15
15
  cat,
@@ -129,9 +129,11 @@ class CompositeBlock(AbstractBlock):
129
129
  from qadence.blocks.utils import _construct, tag
130
130
 
131
131
  blocks = [
132
- getattr(operations, b["type"])._from_dict(b)
133
- if hasattr(operations, b["type"])
134
- else getattr(qadenceblocks, b["type"])._from_dict(b)
132
+ (
133
+ getattr(operations, b["type"])._from_dict(b)
134
+ if hasattr(operations, b["type"])
135
+ else getattr(qadenceblocks, b["type"])._from_dict(b)
136
+ )
135
137
  for b in d["blocks"]
136
138
  ]
137
139
  block = _construct(cls, blocks) # type: ignore[arg-type]
qadence/blocks/utils.py CHANGED
@@ -263,11 +263,29 @@ def expression_to_uuids(block: AbstractBlock) -> dict[Expr, list[str]]:
263
263
  return expr_to_uuid
264
264
 
265
265
 
266
- def uuid_to_eigen(block: AbstractBlock) -> dict[str, Tensor]:
266
+ def uuid_to_eigen(
267
+ block: AbstractBlock, rescale_eigenvals_timeevo: bool = False
268
+ ) -> dict[str, Tensor]:
267
269
  """Creates a mapping between a parametric block's param_id and its' eigenvalues.
268
270
 
269
271
  This method is needed for constructing the PSR rules for a given block.
270
272
 
273
+ A PSR shift factor is also added in the mapping for dealing
274
+ with the time evolution case as it requires rescaling.
275
+
276
+ Args:
277
+ block (AbstractBlock): Block input
278
+ rescale_eigenvals_timeevo (bool, optional): If True, rescale
279
+ eigenvalues and shift factor
280
+ by 2 times spectral gap
281
+ for the TimeEvolutionBlock case to allow
282
+ differientiating with Hamevo.
283
+ Defaults to False.
284
+
285
+ Returns:
286
+ dict[str, Tensor]: Mapping between block's param_id, eigenvalues and
287
+ PSR shift.
288
+
271
289
  !!! warn
272
290
  Will ignore eigenvalues of AnalogBlocks that are not yet computed.
273
291
  """
@@ -276,7 +294,23 @@ def uuid_to_eigen(block: AbstractBlock) -> dict[str, Tensor]:
276
294
  for uuid, b in uuid_to_block(block).items():
277
295
  if b.eigenvalues_generator is not None:
278
296
  if b.eigenvalues_generator.numel() > 0:
279
- result[uuid] = b.eigenvalues_generator
297
+ # GPSR assumes a factor 0.5 for differentiation
298
+ # so need rescaling
299
+ if isinstance(b, TimeEvolutionBlock) and rescale_eigenvals_timeevo:
300
+ if b.eigenvalues_generator.numel() > 1:
301
+ result[uuid] = (
302
+ b.eigenvalues_generator * 2.0,
303
+ 0.5,
304
+ )
305
+ else:
306
+ result[uuid] = (
307
+ b.eigenvalues_generator * 2.0,
308
+ 1.0 / (b.eigenvalues_generator.item() * 2.0)
309
+ if len(b.eigenvalues_generator) == 1
310
+ else 1.0,
311
+ )
312
+ else:
313
+ result[uuid] = (b.eigenvalues_generator, 1.0)
280
314
 
281
315
  # leave only angle parameter uuid with eigenvals for ConstantAnalogRotation block
282
316
  if isinstance(block, ConstantAnalogRotation):
@@ -7,61 +7,59 @@ import sympy
7
7
  from qadence.blocks import KronBlock, kron
8
8
  from qadence.operations import RY
9
9
  from qadence.parameters import FeatureParameter, Parameter
10
- from qadence.types import PI
10
+ from qadence.types import PI, BasisSet, MultivariateStrategy, ReuploadScaling
11
11
 
12
12
 
13
- def generator_prefactor(spectrum: str, qubit_index: int) -> float | int:
13
+ def generator_prefactor(reupload_scaling: ReuploadScaling, qubit_index: int) -> float | int:
14
14
  """Converts a spectrum string, e.g. tower or exponential.
15
15
 
16
16
  The result is the correct generator prefactor.
17
17
  """
18
- spectrum = spectrum.lower()
19
18
  conversion_dict: dict[str, float | int] = {
20
- "simple": 1,
21
- "tower": qubit_index + 1,
22
- "exponential": 2 * PI / (2 ** (qubit_index + 1)),
19
+ ReuploadScaling.CONSTANT: 1,
20
+ ReuploadScaling.TOWER: qubit_index + 1,
21
+ ReuploadScaling.EXP: 2 * PI / (2 ** (qubit_index + 1)),
23
22
  }
24
- return conversion_dict[spectrum]
23
+ return conversion_dict[reupload_scaling]
25
24
 
26
25
 
27
- def basis_func(basis: str, x: Parameter) -> Parameter | sympy.Expr:
28
- basis = basis.lower()
26
+ def basis_func(basis: BasisSet, x: Parameter) -> Parameter | sympy.Expr:
29
27
  conversion_dict: dict[str, Parameter | sympy.Expr] = {
30
- "fourier": x,
31
- "chebyshev": 2 * sympy.acos(x),
28
+ BasisSet.FOURIER: x,
29
+ BasisSet.CHEBYSHEV: 2 * sympy.acos(x),
32
30
  }
33
31
  return conversion_dict[basis]
34
32
 
35
33
 
36
34
  def build_idx_fms(
37
- basis: str,
35
+ basis: BasisSet,
38
36
  fm_pauli: Type[RY],
39
- fm_strategy: str,
37
+ multivariate_strategy: MultivariateStrategy,
40
38
  n_features: int,
41
39
  n_qubits: int,
42
- spectrum: str,
40
+ reupload_scaling: ReuploadScaling,
43
41
  ) -> list[KronBlock]:
44
42
  """Builds the index feature maps based on the given parameters.
45
43
 
46
44
  Args:
47
- basis (str): Type of basis chosen for the feature map.
45
+ basis (BasisSet): Type of basis chosen for the feature map.
48
46
  fm_pauli (PrimitiveBlock type): The chosen Pauli rotation type.
49
- fm_strategy (str): The feature map strategy to be used. Possible values are
50
- 'parallel' or 'serial'.
47
+ multivariate_strategy (MultivariateStrategy): The strategy used for encoding
48
+ the multivariate feature map.
51
49
  n_features (int): The number of features.
52
50
  n_qubits (int): The number of qubits.
53
- spectrum (str): The chosen spectrum.
51
+ reupload_scaling (ReuploadScaling): The chosen scaling for the reupload.
54
52
 
55
53
  Returns:
56
54
  List[KronBlock]: The list of index feature maps.
57
55
  """
58
56
  idx_fms = []
59
57
  for i in range(n_features):
60
- target_qubits = get_fm_qubits(fm_strategy, i, n_qubits, n_features)
58
+ target_qubits = get_fm_qubits(multivariate_strategy, i, n_qubits, n_features)
61
59
  param = FeatureParameter(f"x{i}")
62
60
  block = kron(
63
61
  *[
64
- fm_pauli(qubit, generator_prefactor(spectrum, j) * basis_func(basis, param))
62
+ fm_pauli(qubit, generator_prefactor(reupload_scaling, j) * basis_func(basis, param))
65
63
  for j, qubit in enumerate(target_qubits)
66
64
  ]
67
65
  )
@@ -70,12 +68,14 @@ def build_idx_fms(
70
68
  return idx_fms
71
69
 
72
70
 
73
- def get_fm_qubits(fm_strategy: str, i: int, n_qubits: int, n_features: int) -> Iterable:
71
+ def get_fm_qubits(
72
+ multivariate_strategy: MultivariateStrategy, i: int, n_qubits: int, n_features: int
73
+ ) -> Iterable:
74
74
  """Returns the list of target qubits for the given feature map strategy and feature index.
75
75
 
76
76
  Args:
77
- fm_strategy (str): The feature map strategy to be used. Possible values
78
- are 'parallel' or 'serial'.
77
+ multivariate_strategy (MultivariateStrategy): The strategy used for encoding
78
+ the multivariate feature map.
79
79
  i (int): The feature index.
80
80
  n_qubits (int): The number of qubits.
81
81
  n_features (int): The number of features.
@@ -86,11 +86,11 @@ def get_fm_qubits(fm_strategy: str, i: int, n_qubits: int, n_features: int) -> I
86
86
  Raises:
87
87
  ValueError: If the feature map strategy is not implemented.
88
88
  """
89
- if fm_strategy == "parallel":
89
+ if multivariate_strategy == MultivariateStrategy.PARALLEL:
90
90
  n_qubits_per_feature = int(n_qubits / n_features)
91
91
  target_qubits = range(i * n_qubits_per_feature, (i + 1) * n_qubits_per_feature)
92
- elif fm_strategy == "serial":
92
+ elif multivariate_strategy == MultivariateStrategy.SERIES:
93
93
  target_qubits = range(0, n_qubits)
94
94
  else:
95
- raise ValueError(f"Feature map strategy {fm_strategy} not implemented.")
95
+ raise ValueError(f"Multivariate strategy {multivariate_strategy} not implemented.")
96
96
  return target_qubits
@@ -52,7 +52,7 @@ class DifferentiableExpectation:
52
52
  return expectation_fn(state, values, psr_params)
53
53
 
54
54
  uuid_to_eigs = {
55
- k: tensor_to_jnp(v) for k, v in uuid_to_eigen(self.circuit.abstract.block).items()
55
+ k: tensor_to_jnp(v[0]) for k, v in uuid_to_eigen(self.circuit.abstract.block).items()
56
56
  }
57
57
  self.psr_params = {
58
58
  k: self.param_values[k] for k in uuid_to_eigs.keys()
@@ -6,7 +6,7 @@ from functools import partial
6
6
  from typing import Any, Callable, Sequence
7
7
 
8
8
  import torch
9
- from pyqtorch.adjoint import AdjointExpectation
9
+ from pyqtorch.differentiation import AdjointExpectation
10
10
  from torch import Tensor
11
11
  from torch.autograd import Function
12
12
 
@@ -211,24 +211,28 @@ class DifferentiableExpectation:
211
211
  circuit: QuantumCircuit,
212
212
  observable: list[AbstractBlock],
213
213
  psr_fn: Callable,
214
- **psr_args: int | float | None,
214
+ **psr_args: float | None,
215
215
  ) -> dict[str, Callable]:
216
216
  """Create a mapping between parameters and PSR functions."""
217
217
 
218
- uuid_to_eigs = uuid_to_eigen(circuit.block)
218
+ uuid_to_eigs = uuid_to_eigen(circuit.block, rescale_eigenvals_timeevo=True)
219
219
  # We currently rely on implicit ordering to match the PSR to the parameter,
220
220
  # because we want to cache PSRs.
221
221
 
222
222
  param_to_psr = OrderedDict()
223
- for param_id, eigenvalues in uuid_to_eigs.items():
223
+ for param_id, eigenvalues_shift_factor in uuid_to_eigs.items():
224
+ eigenvalues, shift_factor = eigenvalues_shift_factor
224
225
  if eigenvalues is None:
225
226
  raise ValueError(
226
227
  f"Eigenvalues are not defined for param_id {param_id}\n"
227
228
  # f"of type {type(block)}.\n"
228
229
  "PSR cannot be defined in that case."
229
230
  )
230
-
231
- param_to_psr[param_id] = psr_fn(eigenvalues, **psr_args)
231
+ if shift_factor == 1:
232
+ param_to_psr[param_id] = psr_fn(eigenvalues, **psr_args)
233
+ else:
234
+ psr_args_factor = {k: v * shift_factor for k, v in psr_args.items()}
235
+ param_to_psr[param_id] = psr_fn(eigenvalues, **psr_args)
232
236
  for obs in observable:
233
237
  for param_id, _ in uuid_to_eigen(obs).items():
234
238
  # We need the embedded fixed params of the observable in the param_values dict
qadence/extensions.py CHANGED
@@ -15,6 +15,22 @@ EngineClsType = TypeVar("EngineClsType", bound=DifferentiableBackend)
15
15
  logger = getLogger(__name__)
16
16
 
17
17
 
18
+ class ConfigNotFoundError(ModuleNotFoundError):
19
+ ...
20
+
21
+
22
+ class BackendNotFoundError(ModuleNotFoundError):
23
+ ...
24
+
25
+
26
+ class EngineNotFoundError(ModuleNotFoundError):
27
+ ...
28
+
29
+
30
+ class SupportedGatesNotFoundError(ModuleNotFoundError):
31
+ ...
32
+
33
+
18
34
  def import_config(backend_name: str | BackendName) -> BackendConfiguration:
19
35
  module_path = f"qadence.backends.{backend_name}.config"
20
36
  cfg: BackendConfiguration
@@ -22,7 +38,8 @@ def import_config(backend_name: str | BackendName) -> BackendConfiguration:
22
38
  module = importlib.import_module(module_path)
23
39
  cfg = getattr(module, "Configuration")
24
40
  except (ModuleNotFoundError, ImportError) as e:
25
- raise type(e)(f"Failed to import backend config of {backend_name} due to {e}.") from e
41
+ msg = f"Failed to import backend config for '{backend_name}' due to: '{e.msg}'."
42
+ raise ConfigNotFoundError(msg)
26
43
  return cfg
27
44
 
28
45
 
@@ -37,7 +54,8 @@ def import_backend(backend_name: str | BackendName) -> Backend:
37
54
  try:
38
55
  module = importlib.import_module(module_path)
39
56
  except (ModuleNotFoundError, ImportError) as e:
40
- raise type(e)
57
+ msg = f"Failed to import backend '{backend_name}' due to: '{e.msg}'."
58
+ raise BackendNotFoundError(msg)
41
59
  backend = getattr(module, "Backend")
42
60
  return backend
43
61
 
@@ -48,8 +66,8 @@ def _available_backends() -> dict[BackendName, Backend]:
48
66
  for backend in BackendName.list():
49
67
  try:
50
68
  res[backend] = import_backend(backend)
51
- except (ModuleNotFoundError, ImportError) as e:
52
- raise type(e)(f"Failed to import backend {backend} due to {e}.") from e
69
+ except BackendNotFoundError as e:
70
+ raise e
53
71
  logger.debug(f"Found backends: {res.keys()}")
54
72
  return res
55
73
 
@@ -61,7 +79,8 @@ def import_engine(engine_name: str | Engine) -> DifferentiableBackend:
61
79
  module = importlib.import_module(module_path)
62
80
  engine = getattr(module, "DifferentiableBackend")
63
81
  except (ModuleNotFoundError, ImportError) as e:
64
- raise type(e)
82
+ msg = f"Failed to import engine '{engine_name}' due to: '{e.msg}'."
83
+ raise EngineNotFoundError(msg)
65
84
  return engine
66
85
 
67
86
 
@@ -71,8 +90,8 @@ def _available_engines() -> dict[Engine, DifferentiableBackend]:
71
90
  for engine in Engine.list():
72
91
  try:
73
92
  res[engine] = import_engine(engine)
74
- except (ModuleNotFoundError, ImportError) as e:
75
- raise type(e)(f"Failed to import engine {engine} due to {e}.") from e
93
+ except EngineNotFoundError as e:
94
+ raise e
76
95
  logger.debug(f"Found engines: {res.keys()}")
77
96
  return res
78
97
 
@@ -87,7 +106,8 @@ def _supported_gates(backend_name: str) -> list[TAbstractBlock]:
87
106
  try:
88
107
  module = importlib.import_module(module_path)
89
108
  except (ModuleNotFoundError, ImportError) as e:
90
- raise type(e)(f"Failed to import backend module for {backend_name} due to {e}.") from e
109
+ msg = f"Failed to import supported gates for '{backend_name}' due to: '{e.msg}'."
110
+ raise SupportedGatesNotFoundError(msg)
91
111
  _supported_gates = getattr(module, "supported_gates")
92
112
  return [getattr(operations, gate) for gate in _supported_gates]
93
113
 
@@ -1,6 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
- from .config import AnsatzConfig, FeatureMapConfig, TrainConfig
3
+ from .config import AnsatzConfig, Callback, FeatureMapConfig, TrainConfig
4
4
  from .constructors import create_ansatz, create_fm_blocks, observable_from_config
5
5
  from .data import DictDataLoader, InfiniteTensorDataset, to_dataloader
6
6
  from .models import QNN
@@ -23,6 +23,7 @@ __all__ = [
23
23
  "observable_from_config",
24
24
  "QNN",
25
25
  "TrainConfig",
26
+ "Callback",
26
27
  "train_with_grad",
27
28
  "train_gradient_free",
28
29
  "write_checkpoint",