qadence 1.6.2__py3-none-any.whl → 1.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
qadence/__init__.py CHANGED
@@ -49,7 +49,7 @@ from .exceptions import *
49
49
  from .execution import *
50
50
  from .measurements import *
51
51
  from .ml_tools import *
52
- from .models import *
52
+ from .model import *
53
53
  from .noise import *
54
54
  from .operations import *
55
55
  from .overlap import *
@@ -82,7 +82,7 @@ list_of_submodules = [
82
82
  ".execution",
83
83
  ".measurements",
84
84
  ".ml_tools",
85
- ".models",
85
+ ".model",
86
86
  ".operations",
87
87
  ".overlap",
88
88
  ".parameters",
qadence/backends/gpsr.py CHANGED
@@ -48,7 +48,13 @@ def single_gap_psr(
48
48
  Returns:
49
49
  Tensor: tensor containing derivative values
50
50
  """
51
-
51
+ device = torch.device("cpu")
52
+ try:
53
+ device = [v.device for v in param_dict.values()][0]
54
+ except Exception:
55
+ pass
56
+ spectral_gap = spectral_gap.to(device=device)
57
+ shift = shift.to(device=device)
52
58
  # + pi/2 shift
53
59
  shifted_params = param_dict.copy()
54
60
  shifted_params[param_name] = shifted_params[param_name] + shift
@@ -89,11 +95,17 @@ def multi_gap_psr(
89
95
 
90
96
  # get shift values
91
97
  shifts = shift_prefac * torch.linspace(PI / 2 - PI / 5, PI / 2 + PI / 5, n_eqs)
92
-
98
+ device = torch.device("cpu")
99
+ try:
100
+ device = [v.device for v in param_dict.values()][0]
101
+ except Exception:
102
+ pass
103
+ spectral_gaps = spectral_gaps.to(device=device)
104
+ shifts = shifts.to(device=device)
93
105
  # calculate F vector and M matrix
94
106
  # (see: https://arxiv.org/pdf/2108.01218.pdf on p. 4 for definitions)
95
107
  F = []
96
- M = torch.empty((n_eqs, n_eqs))
108
+ M = torch.empty((n_eqs, n_eqs)).to(device=device)
97
109
  n_obs = 1
98
110
  for i in range(n_eqs):
99
111
  # + shift
@@ -31,10 +31,10 @@ from qadence.transpile import (
31
31
  transpile,
32
32
  )
33
33
  from qadence.types import BackendName, Endianness, Engine
34
- from qadence.utils import infer_batchsize, int_to_basis
34
+ from qadence.utils import infer_batchsize
35
35
 
36
36
  from .config import Configuration, default_passes
37
- from .convert_ops import convert_block, convert_observable
37
+ from .convert_ops import convert_block
38
38
 
39
39
  logger = getLogger(__name__)
40
40
 
@@ -77,8 +77,13 @@ class Backend(BackendInterface):
77
77
  scale_primitive_blocks_only,
78
78
  ]
79
79
  block = transpile(*transpilations)(observable) # type: ignore[call-overload]
80
-
81
- (native,) = convert_observable(block, n_qubits=n_qubits, config=self.config)
80
+ operations = convert_block(block, n_qubits, self.config)
81
+ obs_cls = (
82
+ pyq.DiagonalObservable
83
+ if block._is_diag_pauli and not block.is_parametric
84
+ else pyq.Observable
85
+ )
86
+ native = obs_cls(n_qubits=n_qubits, operations=operations)
82
87
  return ConvertedObservable(native=native, abstract=block, original=observable)
83
88
 
84
89
  def run(
@@ -99,7 +104,7 @@ class Backend(BackendInterface):
99
104
  validate_state(state, n_qubits)
100
105
  # pyqtorch expects input shape [2] * n_qubits + [batch_size]
101
106
  state = pyqify(state, n_qubits) if pyqify_state else state
102
- state = circuit.native.run(state, param_values)
107
+ state = circuit.native.run(state=state, values=param_values)
103
108
  state = unpyqify(state) if unpyqify_state else state
104
109
  state = invert_endianness(state) if endianness != self.native_endianness else state
105
110
  return state
@@ -208,46 +213,26 @@ class Backend(BackendInterface):
208
213
  noise: Noise | None = None,
209
214
  mitigation: Mitigations | None = None,
210
215
  endianness: Endianness = Endianness.BIG,
216
+ pyqify_state: bool = True,
211
217
  ) -> list[Counter]:
212
- if n_shots < 1:
213
- raise ValueError("You can only call sample with n_shots>0.")
214
-
215
- def _sample(_probs: Tensor, n_shots: int, endianness: Endianness, n_qubits: int) -> Counter:
216
- return Counter(
217
- {
218
- int_to_basis(k=k, n_qubits=n_qubits, endianness=endianness): count.item()
219
- for k, count in enumerate(
220
- torch.bincount(
221
- torch.multinomial(input=_probs, num_samples=n_shots, replacement=True)
222
- )
223
- )
224
- if count > 0
225
- }
226
- )
227
-
228
- with torch.no_grad():
229
- wf = self.run(circuit=circuit, param_values=param_values, state=state)
230
- probs = torch.abs(torch.pow(wf, 2))
231
- samples = list(
232
- map(
233
- lambda _probs: _sample(
234
- _probs=_probs,
235
- n_shots=n_shots,
236
- endianness=endianness,
237
- n_qubits=circuit.abstract.n_qubits,
238
- ),
239
- probs,
240
- )
218
+ if state is None:
219
+ state = circuit.native.init_state(batch_size=infer_batchsize(param_values))
220
+ elif state is not None and pyqify_state:
221
+ n_qubits = circuit.abstract.n_qubits
222
+ state = pyqify(state, n_qubits) if pyqify_state else state
223
+ samples: list[Counter] = circuit.native.sample(
224
+ state=state, values=param_values, n_shots=n_shots
225
+ )
226
+ samples = invert_endianness(samples) if endianness != Endianness.BIG else samples
227
+ if noise is not None:
228
+ samples = apply_noise(noise=noise, samples=samples)
229
+ if mitigation is not None:
230
+ logger.warning(
231
+ "Mitigation protocol is deprecated. Use qadence-protocols instead.",
241
232
  )
242
- if noise is not None:
243
- samples = apply_noise(noise=noise, samples=samples)
244
- if mitigation is not None:
245
- logger.warning(
246
- "Mitigation protocol is deprecated. Use qadence-protocols instead.",
247
- )
248
- assert noise
249
- samples = apply_mitigation(noise=noise, mitigation=mitigation, samples=samples)
250
- return samples
233
+ assert noise
234
+ samples = apply_mitigation(noise=noise, mitigation=mitigation, samples=samples)
235
+ return samples
251
236
 
252
237
  def assign_parameters(self, circuit: ConvertedCircuit, param_values: dict[str, Tensor]) -> Any:
253
238
  raise NotImplementedError
@@ -1,10 +1,8 @@
1
1
  from __future__ import annotations
2
2
 
3
- from functools import reduce
4
3
  from itertools import chain as flatten
5
4
  from math import prod
6
- from operator import add
7
- from typing import Any, Iterable, Sequence, Tuple
5
+ from typing import Any, Sequence, Tuple
8
6
 
9
7
  import pyqtorch as pyq
10
8
  import sympy
@@ -13,15 +11,12 @@ from pyqtorch.matrices import _dagger
13
11
  from pyqtorch.utils import is_diag
14
12
  from torch import (
15
13
  Tensor,
16
- argsort,
17
- bmm,
18
14
  cdouble,
19
15
  diag_embed,
20
16
  diagonal,
21
17
  exp,
18
+ float64,
22
19
  linalg,
23
- ones_like,
24
- permute,
25
20
  tensor,
26
21
  transpose,
27
22
  )
@@ -31,8 +26,6 @@ from torch.nn import Module
31
26
 
32
27
  from qadence.backends.utils import (
33
28
  finitediff,
34
- pyqify,
35
- unpyqify,
36
29
  )
37
30
  from qadence.blocks import (
38
31
  AbstractBlock,
@@ -45,11 +38,7 @@ from qadence.blocks import (
45
38
  ScaleBlock,
46
39
  TimeEvolutionBlock,
47
40
  )
48
- from qadence.blocks.block_to_tensor import (
49
- _block_to_tensor_embedded,
50
- block_to_diagonal,
51
- block_to_tensor,
52
- )
41
+ from qadence.blocks.block_to_tensor import _block_to_tensor_embedded, block_to_tensor
53
42
  from qadence.blocks.primitive import ProjectorBlock
54
43
  from qadence.operations import (
55
44
  U,
@@ -60,7 +49,6 @@ from qadence.operations import (
60
49
  two_qubit_gateset,
61
50
  )
62
51
  from qadence.types import OpName
63
- from qadence.utils import infer_batchsize
64
52
 
65
53
  from .config import Configuration
66
54
 
@@ -81,15 +69,13 @@ def is_single_qubit_chain(block: AbstractBlock) -> bool:
81
69
  )
82
70
 
83
71
 
84
- def convert_observable(
85
- block: AbstractBlock, n_qubits: int, config: Configuration = None
86
- ) -> Sequence[Module]:
87
- return [PyQObservable(block, n_qubits, config)]
88
-
89
-
90
72
  def convert_block(
91
73
  block: AbstractBlock, n_qubits: int = None, config: Configuration = None
92
- ) -> Sequence[Module]:
74
+ ) -> Sequence[Module | Tensor | str | sympy.Expr]:
75
+ if isinstance(block, (Tensor, str, sympy.Expr)): # case for hamevo generators
76
+ if isinstance(block, Tensor):
77
+ block = block.permute(1, 2, 0) # put batch size in the back
78
+ return [block]
93
79
  qubit_support = block.qubit_support
94
80
  if n_qubits is None:
95
81
  n_qubits = max(qubit_support) + 1
@@ -98,37 +84,40 @@ def convert_block(
98
84
  config = Configuration()
99
85
 
100
86
  if isinstance(block, ScaleBlock):
101
- return [ScalePyQOperation(n_qubits, block, config)]
102
-
103
- elif isinstance(block, AddBlock):
104
- ops = list(flatten(*(convert_block(b, n_qubits, config) for b in block.blocks)))
105
- return [AddPyQOperation(n_qubits, ops)]
87
+ scaled_ops = convert_block(block.block, n_qubits, config)
88
+ scale = (
89
+ tensor([block.parameters.parameter], dtype=float64)
90
+ if not block.is_parametric
91
+ else config.get_param_name(block)[0]
92
+ )
93
+ return [pyq.Scale(pyq.Sequence(scaled_ops), scale)]
106
94
 
107
95
  elif isinstance(block, TimeEvolutionBlock):
108
- return [
109
- PyQHamiltonianEvolution(
110
- qubit_support=qubit_support,
111
- n_qubits=n_qubits,
112
- block=block,
113
- config=config,
114
- )
115
- ]
96
+ # TODO add native pyq hamevo
97
+ # generator = convert_block(block.generator, n_qubits, config)[0] # type: ignore[arg-type]
98
+ # time_param = config.get_param_name(block)[0]
99
+ # is_parametric = (
100
+ # block.generator.is_parametric if isinstance(block.generator, AbstractBlock) else False
101
+ # )
102
+ # return [
103
+ # pyq.HamiltonianEvolution(
104
+ # qubit_support=qubit_support,
105
+ # generator=generator,
106
+ # time=time_param,
107
+ # generator_parametric=is_parametric, # type: ignore[union-attr]
108
+ # )
109
+ # ]
110
+ return [PyQHamiltonianEvolution(qubit_support, n_qubits, block, config)]
116
111
  elif isinstance(block, MatrixBlock):
117
- return [PyQMatrixBlock(block, n_qubits, config)]
112
+ return [pyq.primitive.Primitive(block.matrix, block.qubit_support)]
118
113
  elif isinstance(block, CompositeBlock):
119
114
  ops = list(flatten(*(convert_block(b, n_qubits, config) for b in block.blocks)))
120
- if is_single_qubit_chain(block) and config.use_single_qubit_composition:
121
- return [PyQComposedBlock(ops, qubit_support, n_qubits, config)]
115
+ if isinstance(block, AddBlock):
116
+ return [pyq.Add(ops)] # add
117
+ elif is_single_qubit_chain(block) and config.use_single_qubit_composition:
118
+ return [pyq.Merge(ops)] # for chains of single qubit ops on the same qubit
122
119
  else:
123
- # NOTE: without wrapping in a pyq.QuantumCircuit here the kron/chain
124
- # blocks won't be properly nested which leads to incorrect results from
125
- # the `AddBlock`s. For example:
126
- # add(chain(Z(0), Z(1))) has to result in the following (pseudo-code)
127
- # AddPyQOperation(pyq.QuantumCircuit(Z, Z))
128
- # as opposed to
129
- # AddPyQOperation(Z, Z)
130
- # which would be wrong.
131
- return [pyq.QuantumCircuit(n_qubits, ops)]
120
+ return [pyq.Sequence(ops)] # for kron and chain
132
121
  elif isinstance(block, tuple(non_unitary_gateset)):
133
122
  if isinstance(block, ProjectorBlock):
134
123
  projector = getattr(pyq, block.name)
@@ -161,7 +150,10 @@ def convert_block(
161
150
  if isinstance(block, ParametricBlock):
162
151
  op = pyq_cls(qubit_support[:-1], qubit_support[-1], config.get_param_name(block)[0])
163
152
  else:
164
- op = pyq_cls(qubit_support[:-1], qubit_support[-1])
153
+ if "CSWAP" in block_name:
154
+ op = pyq_cls(qubit_support[:-2], qubit_support[-2:])
155
+ else:
156
+ op = pyq_cls(qubit_support[:-1], qubit_support[-1])
165
157
  return [op]
166
158
  else:
167
159
  raise NotImplementedError(
@@ -171,147 +163,6 @@ def convert_block(
171
163
  )
172
164
 
173
165
 
174
- class PyQMatrixBlock(Module):
175
- def __init__(self, block: MatrixBlock, n_qubits: int, config: Configuration = None):
176
- super().__init__()
177
- self.n_qubits = n_qubits
178
- self.qubits = block.qubit_support
179
- self.register_buffer("mat", block.matrix.unsqueeze(2))
180
- self.mat: Tensor
181
- self._device: torch_device = self.mat.device
182
- self._dtype: torch_dtype = self.mat.dtype
183
-
184
- def forward(self, state: Tensor, _: dict[str, Tensor] = None) -> Tensor:
185
- return apply_operator(state, self.mat, self.qubits, self.n_qubits)
186
-
187
- @property
188
- def device(self) -> torch_device:
189
- return self._device
190
-
191
- def to(self, *args: Any, **kwargs: Any) -> PyQMatrixBlock:
192
- self.mat = self.mat.to(*args, **kwargs)
193
- self._device = self.mat.device
194
- self._dtype = self.mat.dtype
195
- return self
196
-
197
-
198
- class PyQComposedBlock(pyq.QuantumCircuit):
199
- def __init__(
200
- self,
201
- ops: list[Module],
202
- qubits: Tuple[int, ...],
203
- n_qubits: int,
204
- config: Configuration = None,
205
- ):
206
- """
207
- Merge operations that are adjacent and have identical qubit_support.
208
-
209
- It results in fewer call of apply_operator
210
- """
211
- super().__init__(n_qubits, ops)
212
- self.qubits = qubits
213
- self.merged_qubits_support = [
214
- grouped_op[-1].qubit_support for grouped_op in self.grouped_operations()
215
- ]
216
-
217
- def grouped_operations(self) -> list[list[Module]]:
218
- # takes a list of operations and group adjacent operations into sublist
219
- # if those operations have the same control qubits
220
- def _sublist_grouper(x: Iterable[list[Module]], y: Module) -> list[list[Module]]:
221
- # Appends the element y with the last sublist in the list x
222
- # if they have the same qubit_support.
223
- # Appends the element y as a new sublist to x if it has different qubit_domain
224
- x = list(x)
225
- if y.qubit_support == x[-1][-1].qubit_support:
226
- x[-1].append(y)
227
- return x
228
- else:
229
- x.append([y])
230
- return x
231
-
232
- return list(reduce(_sublist_grouper, iter(self.operations[1:]), [[self.operations[0]]]))
233
-
234
- def merged_unitary(self, values: dict[str, Tensor] | None, batch_size: int) -> list[Tensor]:
235
- # compute the tensor multiplication of each group of operations
236
- batch_first_perm = (2, 0, 1)
237
- undo_perm = tuple(argsort(tensor(batch_first_perm)))
238
-
239
- def _expand(m: Tensor) -> Tensor:
240
- if len(m.size()) == 2:
241
- m = m.unsqueeze(2).repeat(
242
- 1, 1, batch_size
243
- ) # Primitive gates are 2D, so we expand them.
244
- elif m.shape != (2, 2, batch_size) and m.shape != (4, 4, batch_size):
245
- m = m.repeat(1, 1, batch_size) # In case a tensor is 3D doesnt have batch_size.
246
- return m
247
-
248
- def _batch_first(m: Tensor) -> Tensor:
249
- return permute(m, batch_first_perm) # This returns shape (batch_size, 2, 2)
250
-
251
- def _batch_last(m: Tensor) -> Tensor:
252
- return permute(
253
- m, undo_perm
254
- ) # We need to undo the permute since PyQ expects (2, 2, batch_size).
255
-
256
- def _list_wise_bmm(ops: list[Module]) -> Tensor:
257
- # Takes a list of operations and apply torch.bmm to all the unitaries of the list
258
- return _batch_last(
259
- reduce(bmm, [_batch_first(_expand(op.unitary(values))) for op in reversed(ops)])
260
- ) # We reverse the list of tensors here since matmul is not commutative.
261
-
262
- return list(map(_list_wise_bmm, reversed(self.grouped_operations())))[::-1]
263
-
264
- def forward(self, state: Tensor, values: dict[str, Tensor] | None = None) -> Tensor:
265
- # compute evolution of the state by the list of operations
266
- batch_size = infer_batchsize(values)
267
- return reduce(
268
- lambda y, x: apply_operator(state=y, operator=x[0], qubits=x[1]),
269
- zip(self.merged_unitary(values, batch_size), self.merged_qubits_support),
270
- state,
271
- )
272
-
273
-
274
- class PyQObservable(Module):
275
- def __init__(self, block: AbstractBlock, n_qubits: int, config: Configuration = None):
276
- super().__init__()
277
- if config is None:
278
- config = Configuration()
279
- self.n_qubits = n_qubits
280
- if block._is_diag_pauli and not block.is_parametric:
281
- self.register_buffer("operation", block_to_diagonal(block, tuple(range(n_qubits))))
282
- self._forward = lambda self, state, values: pyqify(
283
- self.operation * unpyqify(state), n_qubits=self.n_qubits
284
- )
285
- else:
286
- self.operation = pyq.QuantumCircuit(
287
- n_qubits,
288
- convert_block(block, n_qubits, config),
289
- )
290
- self._forward = lambda self, state, values: self.operation(state, values)
291
- self._device = self.operation.device
292
- self._dtype = self.operation.dtype
293
-
294
- def run(self, state: Tensor, values: dict[str, Tensor]) -> Tensor:
295
- return self._forward(self, state, values)
296
-
297
- def forward(self, state: Tensor, values: dict[str, Tensor]) -> Tensor:
298
- return pyq.inner_prod(state, self.run(state, values)).real
299
-
300
- @property
301
- def device(self) -> torch_device:
302
- return self._device
303
-
304
- @property
305
- def dtype(self) -> torch_dtype:
306
- return self._dtype
307
-
308
- def to(self, *args: Any, **kwargs: Any) -> PyQObservable:
309
- self.operation = self.operation.to(*args, **kwargs)
310
- self._device = self.operation.device
311
- self._dtype = self.operation.dtype
312
- return self
313
-
314
-
315
166
  class PyQHamiltonianEvolution(Module):
316
167
  def __init__(
317
168
  self,
@@ -402,7 +253,8 @@ class PyQHamiltonianEvolution(Module):
402
253
  """Approximate jacobian of the evolved operator with respect to time evolution."""
403
254
  return finitediff(
404
255
  lambda t: self._unitary(time_evolution=t, hamiltonian=self._hamiltonian(self, values)),
405
- values[self.param_names[0]],
256
+ values[self.param_names[0]].reshape(-1, 1),
257
+ (0,),
406
258
  )
407
259
 
408
260
  def jacobian_generator(self, values: dict[str, Tensor]) -> Tensor:
@@ -429,7 +281,8 @@ class PyQHamiltonianEvolution(Module):
429
281
  lambda v: self._unitary(
430
282
  time_evolution=self._time_evolution(values), hamiltonian=_generator(v)
431
283
  ),
432
- values[self.param_names[1]],
284
+ values[self.param_names[1]].reshape(-1, 1),
285
+ (0,),
433
286
  )
434
287
 
435
288
  def dagger(self, values: dict[str, Tensor]) -> Tensor:
@@ -463,39 +316,3 @@ class PyQHamiltonianEvolution(Module):
463
316
  self._device = self.hmat.device
464
317
  self._dtype = self.hmat.dtype
465
318
  return self
466
-
467
-
468
- class AddPyQOperation(pyq.QuantumCircuit):
469
- def __init__(self, n_qubits: int, operations: list[Module]):
470
- super().__init__(n_qubits=n_qubits, operations=operations)
471
-
472
- def forward(self, state: Tensor, values: dict[str, Tensor]) -> Tensor:
473
- return reduce(add, (op(state, values) for op in self.operations))
474
-
475
-
476
- class ScalePyQOperation(pyq.QuantumCircuit):
477
- def __init__(self, n_qubits: int, block: ScaleBlock, config: Configuration):
478
- if not isinstance(block.block, PrimitiveBlock):
479
- raise NotImplementedError(
480
- "The pyqtorch backend can currently only scale `PrimitiveBlock` types.\
481
- Please use the following transpile function on your circuit first:\
482
- from qadence.transpile import scale_primitive_blocks_only"
483
- )
484
- ops = convert_block(block.block, n_qubits, config)
485
- assert len(ops) == 1
486
- super().__init__(n_qubits, ops)
487
- (self.param_name,) = config.get_param_name(block)
488
- self.qubit_support = self.operations[0].qubit_support
489
-
490
- def forward(self, state: Tensor, values: dict[str, Tensor]) -> Tensor:
491
- return apply_operator(state, self.unitary(values), self.qubit_support, self.n_qubits)
492
-
493
- def unitary(self, values: dict[str, Tensor]) -> Tensor:
494
- thetas = values[self.param_name]
495
- return thetas * self.operations[0].unitary(values)
496
-
497
- def dagger(self, values: dict[str, Tensor]) -> Tensor:
498
- return _dagger(self.unitary(values))
499
-
500
- def jacobian(self, values: dict[str, Tensor]) -> Tensor:
501
- return values[self.param_name] * ones_like(self.unitary(values))
qadence/backends/utils.py CHANGED
@@ -20,8 +20,8 @@ from torch import (
20
20
  rand,
21
21
  )
22
22
 
23
- from qadence.types import ParamDictType
24
- from qadence.utils import Endianness, int_to_basis, is_qadence_shape
23
+ from qadence.types import Endianness, ParamDictType
24
+ from qadence.utils import int_to_basis, is_qadence_shape
25
25
 
26
26
  FINITE_DIFF_EPS = 1e-06
27
27
  # Dict of NumPy dtype -> torch dtype (when the correspondence exists)
@@ -152,8 +152,49 @@ def infer_batchsize(param_values: ParamDictType = None) -> int:
152
152
  # native 'jacobian' methods.
153
153
 
154
154
 
155
- def finitediff(f: Callable, x: Tensor, eps: float = FINITE_DIFF_EPS) -> Tensor:
156
- return (f(x + eps) - f(x - eps)) / (2 * eps) # type: ignore
155
+ def finitediff(
156
+ f: Callable,
157
+ x: Tensor,
158
+ derivative_indices: tuple[int, ...],
159
+ eps: float = None,
160
+ ) -> Tensor:
161
+ """
162
+ Compute the finite difference of a function at a point.
163
+
164
+ Args:
165
+ f: The function to differentiate.
166
+ x: Input of size `(batch_size, input_size)`.
167
+ derivative_indices: Which *input* to differentiate (i.e. which variable x[:,i])
168
+ eps: finite difference spacing (uses `torch.finfo(x.dtype).eps ** (1 / (2 + order))`
169
+ as default)
170
+
171
+ Returns:
172
+ (Tensor): The finite difference of the function at the point `x`.
173
+ """
174
+
175
+ if eps is None:
176
+ order = len(derivative_indices)
177
+ eps = torch.finfo(x.dtype).eps ** (1 / (2 + order))
178
+
179
+ # compute derivative direction vector(s)
180
+ eps = torch.as_tensor(eps, dtype=x.dtype)
181
+ _eps = 1 / eps # type: ignore[operator]
182
+ ev = torch.zeros_like(x)
183
+ i = derivative_indices[0]
184
+ ev[:, i] += eps
185
+
186
+ # recursive finite differencing for higher order than 3 / mixed derivatives
187
+ if len(derivative_indices) > 3 or len(set(derivative_indices)) > 1:
188
+ di = derivative_indices[1:]
189
+ return (finitediff(f, x + ev, di) - finitediff(f, x - ev, di)) * _eps / 2
190
+ elif len(derivative_indices) == 3:
191
+ return (f(x + 2 * ev) - 2 * f(x + ev) + 2 * f(x - ev) - f(x - 2 * ev)) * _eps**3 / 2
192
+ elif len(derivative_indices) == 2:
193
+ return (f(x + ev) + f(x - ev) - 2 * f(x)) * _eps**2
194
+ elif len(derivative_indices) == 1:
195
+ return (f(x + ev) - f(x - ev)) * _eps / 2
196
+ else:
197
+ raise ValueError("If you see this error there is a bug in the `finitediff` function.")
157
198
 
158
199
 
159
200
  def finitediff_sampling(
qadence/blocks/matrix.py CHANGED
@@ -22,12 +22,12 @@ class MatrixBlock(PrimitiveBlock):
22
22
 
23
23
  Examples:
24
24
  ```python exec="on" source="material-block" result="json"
25
- import torch
25
+ import torch
26
26
 
27
27
  from qadence.circuit import QuantumCircuit
28
28
  from qadence.types import BackendName, DiffMode
29
29
  from qadence.blocks.matrix import MatrixBlock
30
- from qadence.models import QuantumModel
30
+ from qadence.model import QuantumModel
31
31
  from qadence.operations import X, Z
32
32
  from qadence.states import random_state
33
33
 
@@ -27,7 +27,7 @@ class PrimitiveBlock(AbstractBlock):
27
27
  Primitive blocks represent elementary unitary operations.
28
28
 
29
29
  Examples are single/multi-qubit gates or Hamiltonian evolution.
30
- See [`qadence.operations`](/qadence/operations.md) for a full list of
30
+ See [`qadence.operations`](operations.md) for a full list of
31
31
  primitive blocks.
32
32
  """
33
33
 
@@ -14,6 +14,7 @@ from .daqc import daqc_transform
14
14
  from .hamiltonians import (
15
15
  hamiltonian_factory,
16
16
  ising_hamiltonian,
17
+ ObservableConfig,
17
18
  total_magnetization,
18
19
  zz_hamiltonian,
19
20
  )
@@ -31,6 +32,7 @@ __all__ = [
31
32
  "identity_initialized_ansatz",
32
33
  "hamiltonian_factory",
33
34
  "ising_hamiltonian",
35
+ "ObservableConfig",
34
36
  "total_magnetization",
35
37
  "zz_hamiltonian",
36
38
  "qft",
@@ -1,15 +1,17 @@
1
1
  from __future__ import annotations
2
2
 
3
+ from dataclasses import dataclass
3
4
  from logging import getLogger
4
5
  from typing import Callable, List, Type, Union
5
6
 
6
7
  import numpy as np
7
8
  from torch import Tensor, double, ones, rand
9
+ from typing_extensions import Any
8
10
 
9
11
  from qadence.blocks import AbstractBlock, add, block_is_qubit_hamiltonian
10
12
  from qadence.operations import N, X, Y, Z
11
13
  from qadence.register import Register
12
- from qadence.types import Interaction, TArray
14
+ from qadence.types import Interaction, ObservableTransform, TArray, TParameter
13
15
 
14
16
  logger = getLogger(__name__)
15
17
 
@@ -229,3 +231,38 @@ def ising_hamiltonian(
229
231
  zz_ham = zz_hamiltonian(n_qubits, z_terms=z_terms, zz_terms=zz_terms)
230
232
  x_ham = hamiltonian_factory(n_qubits, detuning=X, detuning_strength=x_terms)
231
233
  return zz_ham + x_ham
234
+
235
+
236
+ def is_numeric(x: Any) -> bool:
237
+ return type(x) in (int, float, complex, np.int64, np.float64)
238
+
239
+
240
+ @dataclass
241
+ class ObservableConfig:
242
+ detuning: TDetuning
243
+ """
244
+ Single qubit detuning of the observable Hamiltonian.
245
+
246
+ Accepts single-qubit operator N, X, Y, or Z.
247
+ """
248
+ scale: TParameter = 1.0
249
+ """The scale by which to multiply the output of the observable."""
250
+ shift: TParameter = 0.0
251
+ """The shift to add to the output of the observable."""
252
+ transformation_type: ObservableTransform = ObservableTransform.NONE # type: ignore[assignment]
253
+ """The type of transformation."""
254
+ trainable_transform: bool | None = None
255
+ """
256
+ Whether to have a trainable transformation on the output of the observable.
257
+
258
+ If None, the scale and shift are numbers.
259
+ If True, the scale and shift are VariationalParameter.
260
+ If False, the scale and shift are FeatureParameter.
261
+ """
262
+
263
+ def __post_init__(self) -> None:
264
+ if is_numeric(self.scale) and is_numeric(self.shift):
265
+ assert (
266
+ self.trainable_transform is None
267
+ ), f"If scale and shift are numbers, trainable_transform must be None. \
268
+ But got: {self.trainable_transform}"