qadence 1.6.3__py3-none-any.whl → 1.7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. qadence/__init__.py +2 -2
  2. qadence/backends/api.py +47 -60
  3. qadence/backends/gpsr.py +1 -0
  4. qadence/backends/pyqtorch/backend.py +1 -2
  5. qadence/backends/pyqtorch/config.py +5 -0
  6. qadence/backends/pyqtorch/convert_ops.py +83 -10
  7. qadence/backends/utils.py +62 -7
  8. qadence/blocks/abstract.py +7 -0
  9. qadence/blocks/embedding.py +17 -12
  10. qadence/blocks/matrix.py +1 -1
  11. qadence/blocks/primitive.py +1 -1
  12. qadence/constructors/__init__.py +2 -0
  13. qadence/constructors/hamiltonians.py +38 -1
  14. qadence/draw/utils.py +1 -1
  15. qadence/execution.py +11 -3
  16. qadence/extensions.py +62 -36
  17. qadence/ml_tools/__init__.py +11 -3
  18. qadence/ml_tools/config.py +283 -2
  19. qadence/ml_tools/constructors.py +796 -0
  20. qadence/ml_tools/models.py +373 -251
  21. qadence/ml_tools/printing.py +5 -2
  22. qadence/ml_tools/saveload.py +42 -18
  23. qadence/ml_tools/train_grad.py +48 -14
  24. qadence/ml_tools/utils.py +2 -8
  25. qadence/{models/quantum_model.py → model.py} +178 -10
  26. qadence/operations/ham_evo.py +10 -0
  27. qadence/overlap.py +1 -1
  28. qadence/parameters.py +10 -1
  29. qadence/register.py +98 -22
  30. qadence/serialization.py +6 -6
  31. qadence/types.py +44 -0
  32. qadence/utils.py +2 -8
  33. {qadence-1.6.3.dist-info → qadence-1.7.1.dist-info}/METADATA +7 -6
  34. {qadence-1.6.3.dist-info → qadence-1.7.1.dist-info}/RECORD +36 -38
  35. {qadence-1.6.3.dist-info → qadence-1.7.1.dist-info}/WHEEL +1 -1
  36. qadence/finitediff.py +0 -47
  37. qadence/models/__init__.py +0 -7
  38. qadence/models/qnn.py +0 -265
  39. {qadence-1.6.3.dist-info → qadence-1.7.1.dist-info}/licenses/LICENSE +0 -0
qadence/__init__.py CHANGED
@@ -49,7 +49,7 @@ from .exceptions import *
49
49
  from .execution import *
50
50
  from .measurements import *
51
51
  from .ml_tools import *
52
- from .models import *
52
+ from .model import *
53
53
  from .noise import *
54
54
  from .operations import *
55
55
  from .overlap import *
@@ -82,7 +82,7 @@ list_of_submodules = [
82
82
  ".execution",
83
83
  ".measurements",
84
84
  ".ml_tools",
85
- ".models",
85
+ ".model",
86
86
  ".operations",
87
87
  ".overlap",
88
88
  ".parameters",
qadence/backends/api.py CHANGED
@@ -2,10 +2,17 @@ from __future__ import annotations
2
2
 
3
3
  from qadence.backend import Backend, BackendConfiguration
4
4
  from qadence.engines.differentiable_backend import DifferentiableBackend
5
- from qadence.extensions import available_backends, available_engines, set_backend_config
6
- from qadence.types import BackendName, DiffMode, Engine
5
+ from qadence.extensions import (
6
+ import_backend,
7
+ import_config,
8
+ import_engine,
9
+ set_backend_config,
10
+ )
11
+ from qadence.logger import get_logger
12
+ from qadence.types import BackendName, DiffMode
7
13
 
8
14
  __all__ = ["backend_factory", "config_factory"]
15
+ logger = get_logger(__name__)
9
16
 
10
17
 
11
18
  def backend_factory(
@@ -14,67 +21,47 @@ def backend_factory(
14
21
  configuration: BackendConfiguration | dict | None = None,
15
22
  ) -> Backend | DifferentiableBackend:
16
23
  backend_inst: Backend | DifferentiableBackend
17
- backends = available_backends()
18
24
  try:
19
- backend_name = BackendName(backend)
20
- except ValueError:
21
- raise NotImplementedError(f"The requested backend '{backend}' is not implemented.")
22
- try:
23
- BackendCls = backends[backend_name]
24
- except Exception as e:
25
- raise ImportError(
26
- f"The requested backend '{backend_name}' is either not installed\
27
- or could not be imported due to {e}."
28
- )
29
-
30
- default_config = BackendCls.default_configuration()
31
- if configuration is None:
32
- configuration = default_config
33
- elif isinstance(configuration, dict):
34
- configuration = config_factory(backend_name, configuration)
35
- else:
36
- # NOTE: types have to match exactly, hence we use `type`
37
- if not isinstance(configuration, type(BackendCls.default_configuration())):
38
- raise ValueError(
39
- f"Given config class '{type(configuration)}' does not match the backend",
40
- f" class: '{BackendCls}'. Expected: '{type(BackendCls.default_configuration())}.'",
41
- )
42
-
43
- # Create the backend
44
- backend_inst = BackendCls(
45
- config=configuration
46
- if configuration is not None
47
- else BackendCls.default_configuration() # type: ignore[attr-defined]
48
- )
25
+ BackendCls = import_backend(backend)
26
+ default_config = BackendCls.default_configuration()
27
+ if configuration is None:
28
+ configuration = default_config
29
+ elif isinstance(configuration, dict):
30
+ configuration = config_factory(backend, configuration)
31
+ else:
32
+ # NOTE: types have to match exactly, hence we use `type`
33
+ if not isinstance(configuration, type(BackendCls.default_configuration())):
34
+ expected_cfg = BackendCls.default_configuration()
35
+ raise ValueError(
36
+ f"Given config class '{type(configuration)}' does not match the backend",
37
+ f" class: '{BackendCls}'. Expected: '{type(expected_cfg)}.'",
38
+ )
49
39
 
40
+ # Instantiate the backend
41
+ backend_inst = BackendCls( # type: ignore[operator]
42
+ config=configuration
43
+ if configuration is not None
44
+ else BackendCls.default_configuration()
45
+ )
46
+ set_backend_config(backend_inst, diff_mode)
47
+ # Wrap the quantum Backend in a DifferentiableBackend if a diff_mode is passed.
48
+ if diff_mode is not None:
49
+ diff_backend_cls = import_engine(backend_inst.engine)
50
+ backend_inst = diff_backend_cls(backend=backend_inst, diff_mode=DiffMode(diff_mode)) # type: ignore[operator]
51
+ return backend_inst
52
+ except Exception as e:
53
+ msg = f"The requested backend '{backend}' is either not installed\
54
+ or could not be imported due to {e}."
55
+ logger.error(msg)
56
+ raise Exception(msg)
50
57
  # Set backend configurations which depend on the differentiation mode
51
- set_backend_config(backend_inst, diff_mode)
52
- # Wrap the quantum Backend in a DifferentiableBackend if a diff_mode is passed.
53
- if diff_mode is not None:
54
- try:
55
- engine_name = Engine(backend_inst.engine)
56
- except ValueError:
57
- raise NotImplementedError(
58
- f"The requested engine '{backend_inst.engine}' is not implemented."
59
- )
60
- try:
61
- diff_backend_cls = available_engines()[engine_name]
62
- backend_inst = diff_backend_cls(backend=backend_inst, diff_mode=DiffMode(diff_mode)) # type: ignore[arg-type]
63
- except Exception as e:
64
- raise ImportError(
65
- f"The requested engine '{engine_name}' is either not installed\
66
- or could not be imported due to {e}."
67
- )
68
- return backend_inst
69
-
70
58
 
71
- def config_factory(name: BackendName | str, config: dict) -> BackendConfiguration:
72
- backends = available_backends()
73
59
 
60
+ def config_factory(backend_name: BackendName | str, config: dict) -> BackendConfiguration:
61
+ cfg: BackendConfiguration
74
62
  try:
75
- BackendCls = backends[BackendName(name)]
76
- except KeyError:
77
- raise NotImplementedError(f"The requested backend '{name}' is not implemented!")
78
-
79
- BackendConfigCls = type(BackendCls.default_configuration())
80
- return BackendConfigCls(**config) # type: ignore[no-any-return]
63
+ BackendConfigCls = import_config(backend_name)
64
+ cfg = BackendConfigCls(**config) # type: ignore[operator]
65
+ except Exception as e:
66
+ logger.debug(f"Unable to import config for backend {backend_name} due to {e}.")
67
+ return cfg
qadence/backends/gpsr.py CHANGED
@@ -126,6 +126,7 @@ def multi_gap_psr(
126
126
 
127
127
  # get number of observables from expectation value tensor
128
128
  if f_plus.numel() > 1:
129
+ batch_size = F[0].shape[0]
129
130
  n_obs = F[0].shape[1]
130
131
 
131
132
  # reshape F vector
@@ -12,6 +12,7 @@ from torch import Tensor
12
12
  from qadence.backend import Backend as BackendInterface
13
13
  from qadence.backend import ConvertedCircuit, ConvertedObservable
14
14
  from qadence.backends.utils import (
15
+ infer_batchsize,
15
16
  pyqify,
16
17
  to_list_of_dicts,
17
18
  unpyqify,
@@ -31,7 +32,6 @@ from qadence.transpile import (
31
32
  transpile,
32
33
  )
33
34
  from qadence.types import BackendName, Endianness, Engine
34
- from qadence.utils import infer_batchsize
35
35
 
36
36
  from .config import Configuration, default_passes
37
37
  from .convert_ops import convert_block
@@ -165,7 +165,6 @@ class Backend(BackendInterface):
165
165
  "Looping expectation does not make sense with batched initial state. "
166
166
  "Define your initial state with `batch_size=1`"
167
167
  )
168
-
169
168
  list_expvals = []
170
169
  observables = observable if isinstance(observable, list) else [observable]
171
170
  for vals in to_list_of_dicts(param_values):
@@ -4,6 +4,8 @@ from dataclasses import dataclass
4
4
  from logging import getLogger
5
5
  from typing import Callable
6
6
 
7
+ from pyqtorch.utils import SolverType
8
+
7
9
  from qadence.analog import add_background_hamiltonian
8
10
  from qadence.backend import BackendConfiguration
9
11
  from qadence.transpile import (
@@ -41,6 +43,9 @@ class Configuration(BackendConfiguration):
41
43
  algo_hevo: AlgoHEvo = AlgoHEvo.EXP
42
44
  """Determine which kind of Hamiltonian evolution algorithm to use."""
43
45
 
46
+ ode_solver: SolverType = SolverType.DP5_SE
47
+ """Determine which ODE solver to use for time-dependent blocks."""
48
+
44
49
  n_steps_hevo: int = 100
45
50
  """Default number of steps for the Hamiltonian evolution."""
46
51
 
@@ -6,8 +6,10 @@ from typing import Any, Sequence, Tuple
6
6
 
7
7
  import pyqtorch as pyq
8
8
  import sympy
9
+ import torch
9
10
  from pyqtorch.apply import apply_operator
10
11
  from pyqtorch.matrices import _dagger
12
+ from pyqtorch.time_dependent.sesolve import sesolve
11
13
  from pyqtorch.utils import is_diag
12
14
  from torch import (
13
15
  Tensor,
@@ -26,6 +28,8 @@ from torch.nn import Module
26
28
 
27
29
  from qadence.backends.utils import (
28
30
  finitediff,
31
+ pyqify,
32
+ unpyqify,
29
33
  )
30
34
  from qadence.blocks import (
31
35
  AbstractBlock,
@@ -38,8 +42,12 @@ from qadence.blocks import (
38
42
  ScaleBlock,
39
43
  TimeEvolutionBlock,
40
44
  )
41
- from qadence.blocks.block_to_tensor import _block_to_tensor_embedded, block_to_tensor
45
+ from qadence.blocks.block_to_tensor import (
46
+ _block_to_tensor_embedded,
47
+ block_to_tensor,
48
+ )
42
49
  from qadence.blocks.primitive import ProjectorBlock
50
+ from qadence.blocks.utils import parameters
43
51
  from qadence.operations import (
44
52
  U,
45
53
  multi_qubit_gateset,
@@ -177,6 +185,7 @@ class PyQHamiltonianEvolution(Module):
177
185
  self.param_names = config.get_param_name(block)
178
186
  self.block = block
179
187
  self.hmat: Tensor
188
+ self.config = config
180
189
 
181
190
  if isinstance(block.generator, AbstractBlock) and not block.generator.is_parametric:
182
191
  hmat = block_to_tensor(
@@ -253,7 +262,8 @@ class PyQHamiltonianEvolution(Module):
253
262
  """Approximate jacobian of the evolved operator with respect to time evolution."""
254
263
  return finitediff(
255
264
  lambda t: self._unitary(time_evolution=t, hamiltonian=self._hamiltonian(self, values)),
256
- values[self.param_names[0]],
265
+ values[self.param_names[0]].reshape(-1, 1),
266
+ (0,),
257
267
  )
258
268
 
259
269
  def jacobian_generator(self, values: dict[str, Tensor]) -> Tensor:
@@ -280,25 +290,88 @@ class PyQHamiltonianEvolution(Module):
280
290
  lambda v: self._unitary(
281
291
  time_evolution=self._time_evolution(values), hamiltonian=_generator(v)
282
292
  ),
283
- values[self.param_names[1]],
293
+ values[self.param_names[1]].reshape(-1, 1),
294
+ (0,),
284
295
  )
285
296
 
286
297
  def dagger(self, values: dict[str, Tensor]) -> Tensor:
287
298
  """Dagger of the evolved operator given the current parameter values."""
288
299
  return _dagger(self.unitary(values))
289
300
 
301
+ def _get_time_parameter(self) -> str:
302
+ # get unique time parameters
303
+ unique_time_params = set()
304
+ for p in parameters(self.block.generator): # type: ignore [arg-type]
305
+ if getattr(p, "is_time", False):
306
+ unique_time_params.add(str(p))
307
+
308
+ if len(unique_time_params) > 1:
309
+ raise Exception("Only a single time parameter is supported.")
310
+
311
+ return unique_time_params.pop()
312
+
290
313
  def forward(
291
314
  self,
292
315
  state: Tensor,
293
316
  values: dict[str, Tensor],
294
317
  ) -> Tensor:
295
- return apply_operator(
296
- state,
297
- self.unitary(values),
298
- self.qubit_support,
299
- self.n_qubits,
300
- self.batch_size,
301
- )
318
+ if getattr(self.block.generator, "is_time_dependent", False): # type: ignore [union-attr]
319
+
320
+ def Ht(t: Tensor | float) -> Tensor:
321
+ # values dict has to change with new value of t
322
+ # initial value of a feature parameter inside generator block
323
+ # has to be inferred here
324
+ new_vals = dict()
325
+ for str_expr, val in values.items():
326
+ expr = sympy.sympify(str_expr)
327
+ t_symb = sympy.Symbol(self._get_time_parameter())
328
+ free_symbols = expr.free_symbols
329
+ if t_symb in free_symbols:
330
+ # create substitution list for time and feature params
331
+ subs_list = [(t_symb, t)]
332
+
333
+ if len(free_symbols) > 1:
334
+ # get feature param symbols
335
+ feat_symbols = free_symbols.difference(set([t_symb]))
336
+
337
+ # get feature param values
338
+ feat_vals = values["orig_param_values"]
339
+
340
+ # update substitution list with feature param values
341
+ for fs in feat_symbols:
342
+ subs_list.append((fs, feat_vals[str(fs)]))
343
+
344
+ # evaluate expression with new time param value
345
+ new_vals[str_expr] = torch.tensor(float(expr.subs(subs_list)))
346
+ else:
347
+ # expression doesn't contain time parameter - copy it as is
348
+ new_vals[str_expr] = val
349
+
350
+ # get matrix form of generator
351
+ hmat = _block_to_tensor_embedded(
352
+ self.block.generator, # type: ignore[arg-type]
353
+ values=new_vals,
354
+ qubit_support=self.qubit_support,
355
+ use_full_support=False,
356
+ device=self.device,
357
+ ).squeeze(0)
358
+
359
+ return hmat
360
+
361
+ tsave = torch.linspace(0, self.block.duration, self.config.n_steps_hevo) # type: ignore [attr-defined]
362
+ result = pyqify(
363
+ sesolve(Ht, unpyqify(state).T[:, 0:1], tsave, self.config.ode_solver).states[-1].T
364
+ )
365
+ else:
366
+ result = apply_operator(
367
+ state,
368
+ self.unitary(values),
369
+ self.qubit_support,
370
+ self.n_qubits,
371
+ self.batch_size,
372
+ )
373
+
374
+ return result
302
375
 
303
376
  @property
304
377
  def device(self) -> torch_device:
qadence/backends/utils.py CHANGED
@@ -20,8 +20,8 @@ from torch import (
20
20
  rand,
21
21
  )
22
22
 
23
- from qadence.types import ParamDictType
24
- from qadence.utils import Endianness, int_to_basis, is_qadence_shape
23
+ from qadence.types import Endianness, ParamDictType
24
+ from qadence.utils import int_to_basis, is_qadence_shape
25
25
 
26
26
  FINITE_DIFF_EPS = 1e-06
27
27
  # Dict of NumPy dtype -> torch dtype (when the correspondence exists)
@@ -98,10 +98,11 @@ def to_list_of_dicts(param_values: ParamDictType) -> list[ParamDictType]:
98
98
  if not param_values:
99
99
  return [param_values]
100
100
 
101
- max_batch_size = max(p.size()[0] for p in param_values.values())
101
+ max_batch_size = max(p.size()[0] for p in param_values.values() if isinstance(p, Tensor))
102
102
  batched_values = {
103
103
  k: (v if v.size()[0] == max_batch_size else v.repeat(max_batch_size, 1))
104
104
  for k, v in param_values.items()
105
+ if isinstance(v, Tensor)
105
106
  }
106
107
 
107
108
  return [{k: v[i] for k, v in batched_values.items()} for i in range(max_batch_size)]
@@ -143,17 +144,71 @@ def validate_state(state: Tensor, n_qubits: int) -> None:
143
144
  )
144
145
 
145
146
 
146
- def infer_batchsize(param_values: ParamDictType = None) -> int:
147
+ def infer_batchsize(param_values: dict[str, Tensor] = None) -> int:
147
148
  """Infer the batch_size through the length of the parameter tensors."""
148
- return max([len(tensor) for tensor in param_values.values()]) if param_values else 1
149
+ try:
150
+ return (
151
+ max(
152
+ [
153
+ len(tensor_or_dict)
154
+ for tensor_or_dict in param_values.values()
155
+ if isinstance(tensor_or_dict, Tensor)
156
+ ]
157
+ )
158
+ if param_values
159
+ else 1
160
+ )
161
+ except Exception:
162
+ return 1
149
163
 
150
164
 
151
165
  # The following functions can be used to compute potentially higher order gradients using pyqtorch's
152
166
  # native 'jacobian' methods.
153
167
 
154
168
 
155
- def finitediff(f: Callable, x: Tensor, eps: float = FINITE_DIFF_EPS) -> Tensor:
156
- return (f(x + eps) - f(x - eps)) / (2 * eps) # type: ignore
169
+ def finitediff(
170
+ f: Callable,
171
+ x: Tensor,
172
+ derivative_indices: tuple[int, ...],
173
+ eps: float = None,
174
+ ) -> Tensor:
175
+ """
176
+ Compute the finite difference of a function at a point.
177
+
178
+ Args:
179
+ f: The function to differentiate.
180
+ x: Input of size `(batch_size, input_size)`.
181
+ derivative_indices: Which *input* to differentiate (i.e. which variable x[:,i])
182
+ eps: finite difference spacing (uses `torch.finfo(x.dtype).eps ** (1 / (2 + order))`
183
+ as default)
184
+
185
+ Returns:
186
+ (Tensor): The finite difference of the function at the point `x`.
187
+ """
188
+
189
+ if eps is None:
190
+ order = len(derivative_indices)
191
+ eps = torch.finfo(x.dtype).eps ** (1 / (2 + order))
192
+
193
+ # compute derivative direction vector(s)
194
+ eps = torch.as_tensor(eps, dtype=x.dtype)
195
+ _eps = 1 / eps # type: ignore[operator]
196
+ ev = torch.zeros_like(x)
197
+ i = derivative_indices[0]
198
+ ev[:, i] += eps
199
+
200
+ # recursive finite differencing for higher order than 3 / mixed derivatives
201
+ if len(derivative_indices) > 3 or len(set(derivative_indices)) > 1:
202
+ di = derivative_indices[1:]
203
+ return (finitediff(f, x + ev, di) - finitediff(f, x - ev, di)) * _eps / 2
204
+ elif len(derivative_indices) == 3:
205
+ return (f(x + 2 * ev) - 2 * f(x + ev) + 2 * f(x - ev) - f(x - 2 * ev)) * _eps**3 / 2
206
+ elif len(derivative_indices) == 2:
207
+ return (f(x + ev) + f(x - ev) - 2 * f(x)) * _eps**2
208
+ elif len(derivative_indices) == 1:
209
+ return (f(x + ev) - f(x - ev)) * _eps / 2
210
+ else:
211
+ raise ValueError("If you see this error there is a bug in the `finitediff` function.")
157
212
 
158
213
 
159
214
  def finitediff_sampling(
@@ -300,6 +300,13 @@ class AbstractBlock(ABC):
300
300
  params: list[sympy.Basic] = parameters(self)
301
301
  return any(isinstance(p, Parameter) for p in params)
302
302
 
303
+ @property
304
+ def is_time_dependent(self) -> bool:
305
+ from qadence.blocks.utils import parameters
306
+
307
+ params: list[sympy.Basic] = parameters(self)
308
+ return any(getattr(p, "is_time", False) for p in params)
309
+
303
310
  def tensor(self, values: dict[str, TNumber | torch.Tensor] = {}) -> torch.Tensor:
304
311
  from .block_to_tensor import block_to_tensor
305
312
 
@@ -111,18 +111,21 @@ def embedding(
111
111
  angle: ArrayLike
112
112
  values = {}
113
113
  for symbol in expr.free_symbols:
114
- if symbol.name in inputs:
115
- value = inputs[symbol.name]
116
- elif symbol.name in params:
117
- value = params[symbol.name]
114
+ if not symbol.is_time:
115
+ if symbol.name in inputs:
116
+ value = inputs[symbol.name]
117
+ elif symbol.name in params:
118
+ value = params[symbol.name]
119
+ else:
120
+ msg_trainable = "Trainable" if symbol.trainable else "Non-trainable"
121
+ raise KeyError(
122
+ f"{msg_trainable} parameter '{symbol.name}' not found in the "
123
+ f"inputs list: {list(inputs.keys())} nor the "
124
+ f"params list: {list(params.keys())}."
125
+ )
126
+ values[symbol.name] = value
118
127
  else:
119
- msg_trainable = "Trainable" if symbol.trainable else "Non-trainable"
120
- raise KeyError(
121
- f"{msg_trainable} parameter '{symbol.name}' not found in the "
122
- f"inputs list: {list(inputs.keys())} nor the "
123
- f"params list: {list(params.keys())}."
124
- )
125
- values[symbol.name] = value
128
+ values[symbol.name] = tensor(1.0)
126
129
  angle = fn(**values)
127
130
  # do not reshape parameters which are multi-dimensional
128
131
  # tensors, such as for example generator matrices
@@ -139,7 +142,9 @@ def embedding(
139
142
  gate_lvl_params[uuid] = embedded_params[e]
140
143
  return gate_lvl_params
141
144
  else:
142
- return {stringify(k): v for k, v in embedded_params.items()}
145
+ out = {stringify(k): v for k, v in embedded_params.items()}
146
+ out.update({"orig_param_values": inputs})
147
+ return out
143
148
 
144
149
  params: ParamDictType
145
150
  params = {
qadence/blocks/matrix.py CHANGED
@@ -27,7 +27,7 @@ class MatrixBlock(PrimitiveBlock):
27
27
  from qadence.circuit import QuantumCircuit
28
28
  from qadence.types import BackendName, DiffMode
29
29
  from qadence.blocks.matrix import MatrixBlock
30
- from qadence.models import QuantumModel
30
+ from qadence.model import QuantumModel
31
31
  from qadence.operations import X, Z
32
32
  from qadence.states import random_state
33
33
 
@@ -27,7 +27,7 @@ class PrimitiveBlock(AbstractBlock):
27
27
  Primitive blocks represent elementary unitary operations.
28
28
 
29
29
  Examples are single/multi-qubit gates or Hamiltonian evolution.
30
- See [`qadence.operations`](/qadence/operations.md) for a full list of
30
+ See [`qadence.operations`](operations.md) for a full list of
31
31
  primitive blocks.
32
32
  """
33
33
 
@@ -14,6 +14,7 @@ from .daqc import daqc_transform
14
14
  from .hamiltonians import (
15
15
  hamiltonian_factory,
16
16
  ising_hamiltonian,
17
+ ObservableConfig,
17
18
  total_magnetization,
18
19
  zz_hamiltonian,
19
20
  )
@@ -31,6 +32,7 @@ __all__ = [
31
32
  "identity_initialized_ansatz",
32
33
  "hamiltonian_factory",
33
34
  "ising_hamiltonian",
35
+ "ObservableConfig",
34
36
  "total_magnetization",
35
37
  "zz_hamiltonian",
36
38
  "qft",
@@ -1,15 +1,17 @@
1
1
  from __future__ import annotations
2
2
 
3
+ from dataclasses import dataclass
3
4
  from logging import getLogger
4
5
  from typing import Callable, List, Type, Union
5
6
 
6
7
  import numpy as np
7
8
  from torch import Tensor, double, ones, rand
9
+ from typing_extensions import Any
8
10
 
9
11
  from qadence.blocks import AbstractBlock, add, block_is_qubit_hamiltonian
10
12
  from qadence.operations import N, X, Y, Z
11
13
  from qadence.register import Register
12
- from qadence.types import Interaction, TArray
14
+ from qadence.types import Interaction, ObservableTransform, TArray, TParameter
13
15
 
14
16
  logger = getLogger(__name__)
15
17
 
@@ -229,3 +231,38 @@ def ising_hamiltonian(
229
231
  zz_ham = zz_hamiltonian(n_qubits, z_terms=z_terms, zz_terms=zz_terms)
230
232
  x_ham = hamiltonian_factory(n_qubits, detuning=X, detuning_strength=x_terms)
231
233
  return zz_ham + x_ham
234
+
235
+
236
+ def is_numeric(x: Any) -> bool:
237
+ return type(x) in (int, float, complex, np.int64, np.float64)
238
+
239
+
240
+ @dataclass
241
+ class ObservableConfig:
242
+ detuning: TDetuning
243
+ """
244
+ Single qubit detuning of the observable Hamiltonian.
245
+
246
+ Accepts single-qubit operator N, X, Y, or Z.
247
+ """
248
+ scale: TParameter = 1.0
249
+ """The scale by which to multiply the output of the observable."""
250
+ shift: TParameter = 0.0
251
+ """The shift to add to the output of the observable."""
252
+ transformation_type: ObservableTransform = ObservableTransform.NONE # type: ignore[assignment]
253
+ """The type of transformation."""
254
+ trainable_transform: bool | None = None
255
+ """
256
+ Whether to have a trainable transformation on the output of the observable.
257
+
258
+ If None, the scale and shift are numbers.
259
+ If True, the scale and shift are VariationalParameter.
260
+ If False, the scale and shift are FeatureParameter.
261
+ """
262
+
263
+ def __post_init__(self) -> None:
264
+ if is_numeric(self.scale) and is_numeric(self.shift):
265
+ assert (
266
+ self.trainable_transform is None
267
+ ), f"If scale and shift are numbers, trainable_transform must be None. \
268
+ But got: {self.trainable_transform}"
qadence/draw/utils.py CHANGED
@@ -23,7 +23,7 @@ from qadence.blocks import (
23
23
  )
24
24
  from qadence.blocks.analog import ConstantAnalogRotation, InteractionBlock
25
25
  from qadence.circuit import QuantumCircuit
26
- from qadence.models import QuantumModel
26
+ from qadence.model import QuantumModel
27
27
  from qadence.operations import RX, RY, RZ, SWAP, HamEvo, I
28
28
  from qadence.transpile.block import fill_identities
29
29
  from qadence.utils import format_parameter
qadence/execution.py CHANGED
@@ -71,7 +71,10 @@ def _(
71
71
  endianness: Endianness = Endianness.BIG,
72
72
  configuration: Union[BackendConfiguration, dict, None] = None,
73
73
  ) -> Tensor:
74
- bknd = backend_factory(backend, configuration=configuration)
74
+ diff_mode = None
75
+ if backend == BackendName.PYQTORCH:
76
+ diff_mode = DiffMode.AD
77
+ bknd = backend_factory(backend, diff_mode=diff_mode, configuration=configuration)
75
78
  conv = bknd.convert(circuit)
76
79
  with no_grad():
77
80
  return bknd.run(
@@ -147,7 +150,10 @@ def _(
147
150
  endianness: Endianness = Endianness.BIG,
148
151
  configuration: Union[BackendConfiguration, dict, None] = None,
149
152
  ) -> list[Counter]:
150
- bknd = backend_factory(backend, configuration=configuration)
153
+ diff_mode = None
154
+ if backend == BackendName.PYQTORCH:
155
+ diff_mode = DiffMode.AD
156
+ bknd = backend_factory(backend, diff_mode=diff_mode, configuration=configuration)
151
157
  conv = bknd.convert(circuit)
152
158
  return bknd.sample(
153
159
  circuit=conv.circuit,
@@ -242,7 +248,9 @@ def _(
242
248
  configuration: Union[BackendConfiguration, dict, None] = None,
243
249
  ) -> Tensor:
244
250
  observable = observable if isinstance(observable, list) else [observable]
245
- bknd = backend_factory(backend, configuration=configuration, diff_mode=diff_mode)
251
+ if backend == BackendName.PYQTORCH:
252
+ diff_mode = DiffMode.AD
253
+ bknd = backend_factory(backend, diff_mode=diff_mode, configuration=configuration)
246
254
  conv = bknd.convert(circuit, observable)
247
255
 
248
256
  def _expectation() -> Tensor: