qadence 1.1.1__py3-none-any.whl → 1.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- qadence/__init__.py +1 -0
- qadence/analog/__init__.py +4 -2
- qadence/analog/addressing.py +167 -0
- qadence/analog/constants.py +59 -0
- qadence/analog/device.py +82 -0
- qadence/analog/hamiltonian_terms.py +101 -0
- qadence/analog/parse_analog.py +120 -0
- qadence/backend.py +42 -12
- qadence/backends/__init__.py +1 -2
- qadence/backends/api.py +27 -9
- qadence/backends/braket/backend.py +3 -2
- qadence/backends/horqrux/__init__.py +5 -0
- qadence/backends/horqrux/backend.py +216 -0
- qadence/backends/horqrux/config.py +26 -0
- qadence/backends/horqrux/convert_ops.py +273 -0
- qadence/backends/jax_utils.py +45 -0
- qadence/backends/pulser/__init__.py +0 -1
- qadence/backends/pulser/backend.py +31 -15
- qadence/backends/pulser/config.py +19 -10
- qadence/backends/pulser/devices.py +57 -63
- qadence/backends/pulser/pulses.py +70 -12
- qadence/backends/pyqtorch/backend.py +4 -4
- qadence/backends/pyqtorch/config.py +18 -12
- qadence/backends/pyqtorch/convert_ops.py +15 -7
- qadence/backends/utils.py +5 -9
- qadence/blocks/abstract.py +5 -1
- qadence/blocks/analog.py +18 -9
- qadence/blocks/block_to_tensor.py +11 -0
- qadence/blocks/embedding.py +46 -24
- qadence/blocks/primitive.py +81 -9
- qadence/blocks/utils.py +20 -1
- qadence/circuit.py +3 -9
- qadence/constructors/__init__.py +4 -0
- qadence/constructors/feature_maps.py +84 -60
- qadence/constructors/hamiltonians.py +27 -98
- qadence/constructors/rydberg_feature_maps.py +113 -0
- qadence/divergences.py +12 -0
- qadence/engines/__init__.py +0 -0
- qadence/engines/differentiable_backend.py +152 -0
- qadence/engines/jax/__init__.py +8 -0
- qadence/engines/jax/differentiable_backend.py +73 -0
- qadence/engines/jax/differentiable_expectation.py +94 -0
- qadence/engines/torch/__init__.py +4 -0
- qadence/engines/torch/differentiable_backend.py +85 -0
- qadence/extensions.py +21 -9
- qadence/finitediff.py +47 -0
- qadence/mitigations/readout.py +92 -25
- qadence/ml_tools/models.py +10 -3
- qadence/models/qnn.py +88 -23
- qadence/models/quantum_model.py +13 -2
- qadence/operations.py +55 -70
- qadence/parameters.py +24 -13
- qadence/register.py +91 -43
- qadence/transpile/__init__.py +1 -0
- qadence/transpile/apply_fn.py +40 -0
- qadence/types.py +32 -2
- qadence/utils.py +35 -0
- {qadence-1.1.1.dist-info → qadence-1.2.1.dist-info}/METADATA +22 -3
- {qadence-1.1.1.dist-info → qadence-1.2.1.dist-info}/RECORD +62 -44
- {qadence-1.1.1.dist-info → qadence-1.2.1.dist-info}/WHEEL +1 -1
- qadence/analog/interaction.py +0 -198
- qadence/analog/utils.py +0 -132
- /qadence/{backends/pytorch_wrapper.py → engines/torch/differentiable_expectation.py} +0 -0
- {qadence-1.1.1.dist-info → qadence-1.2.1.dist-info}/licenses/LICENSE +0 -0
qadence/backends/utils.py
CHANGED
@@ -17,9 +17,9 @@ from torch import (
|
|
17
17
|
no_grad,
|
18
18
|
rand,
|
19
19
|
)
|
20
|
-
from torch import flatten as torchflatten
|
21
20
|
|
22
|
-
from qadence.
|
21
|
+
from qadence.types import ParamDictType
|
22
|
+
from qadence.utils import Endianness, int_to_basis, is_qadence_shape
|
23
23
|
|
24
24
|
FINITE_DIFF_EPS = 1e-06
|
25
25
|
# Dict of NumPy dtype -> torch dtype (when the correspondence exists)
|
@@ -92,7 +92,7 @@ def count_bitstrings(sample: Tensor, endianness: Endianness = Endianness.BIG) ->
|
|
92
92
|
)
|
93
93
|
|
94
94
|
|
95
|
-
def to_list_of_dicts(param_values:
|
95
|
+
def to_list_of_dicts(param_values: ParamDictType) -> list[ParamDictType]:
|
96
96
|
if not param_values:
|
97
97
|
return [param_values]
|
98
98
|
|
@@ -119,17 +119,13 @@ def pyqify(state: Tensor, n_qubits: int = None) -> Tensor:
|
|
119
119
|
|
120
120
|
def unpyqify(state: Tensor) -> Tensor:
|
121
121
|
"""Convert a state of shape [2] * n_qubits + [batch_size] to (batch_size, 2**n_qubits)."""
|
122
|
-
return
|
122
|
+
return torch.flatten(state, start_dim=0, end_dim=-2).t()
|
123
123
|
|
124
124
|
|
125
125
|
def is_pyq_shape(state: Tensor, n_qubits: int) -> bool:
|
126
126
|
return state.size()[:-1] == [2] * n_qubits # type: ignore[no-any-return]
|
127
127
|
|
128
128
|
|
129
|
-
def is_qadence_shape(state: Tensor, n_qubits: int) -> bool:
|
130
|
-
return state.shape[1] == 2**n_qubits # type: ignore[no-any-return]
|
131
|
-
|
132
|
-
|
133
129
|
def validate_state(state: Tensor, n_qubits: int) -> None:
|
134
130
|
"""Check if a custom initial state conforms to the qadence or the pyqtorch format."""
|
135
131
|
if state.dtype != complex128:
|
@@ -145,7 +141,7 @@ def validate_state(state: Tensor, n_qubits: int) -> None:
|
|
145
141
|
)
|
146
142
|
|
147
143
|
|
148
|
-
def infer_batchsize(param_values:
|
144
|
+
def infer_batchsize(param_values: ParamDictType = None) -> int:
|
149
145
|
"""Infer the batch_size through the length of the parameter tensors."""
|
150
146
|
return max([len(tensor) for tensor in param_values.values()]) if param_values else 1
|
151
147
|
|
qadence/blocks/abstract.py
CHANGED
@@ -5,7 +5,7 @@ from abc import ABC, abstractmethod, abstractproperty
|
|
5
5
|
from dataclasses import dataclass
|
6
6
|
from functools import cached_property
|
7
7
|
from pathlib import Path
|
8
|
-
from typing import ClassVar, Iterable, Tuple, Union, get_args
|
8
|
+
from typing import ClassVar, Iterable, Tuple, TypeVar, Union, get_args
|
9
9
|
|
10
10
|
import sympy
|
11
11
|
import torch
|
@@ -287,6 +287,7 @@ class AbstractBlock(ABC):
|
|
287
287
|
def __hash__(self) -> int:
|
288
288
|
return hash(self._to_json())
|
289
289
|
|
290
|
+
@abstractmethod
|
290
291
|
def dagger(self) -> AbstractBlock:
|
291
292
|
raise NotImplementedError(
|
292
293
|
f"Hermitian adjoint of the Block '{type(self)}' is not implemented yet!"
|
@@ -333,3 +334,6 @@ class AbstractBlock(ABC):
|
|
333
334
|
elif isinstance(self, PrimitiveBlock):
|
334
335
|
return self.name == "I"
|
335
336
|
return False
|
337
|
+
|
338
|
+
|
339
|
+
TAbstractBlock = TypeVar("TAbstractBlock", bound=AbstractBlock)
|
qadence/blocks/analog.py
CHANGED
@@ -59,8 +59,8 @@ class AnalogBlock(AbstractBlock):
|
|
59
59
|
@property
|
60
60
|
def eigenvalues_generator(self) -> torch.Tensor:
|
61
61
|
msg = (
|
62
|
-
"Eigenvalues of analog blocks can be computed via "
|
63
|
-
"`
|
62
|
+
"Eigenvalues of for generator of analog blocks can be computed via "
|
63
|
+
"`add_background_hamiltonian(block, register).eigenvalues_generator`. "
|
64
64
|
)
|
65
65
|
raise NotImplementedError(msg)
|
66
66
|
|
@@ -68,7 +68,7 @@ class AnalogBlock(AbstractBlock):
|
|
68
68
|
def eigenvalues(self) -> torch.Tensor:
|
69
69
|
msg = (
|
70
70
|
"Eigenvalues of analog blocks can be computed via "
|
71
|
-
"`
|
71
|
+
"`add_background_hamiltonian(block, register).eigenvalues`. "
|
72
72
|
)
|
73
73
|
raise NotImplementedError(msg)
|
74
74
|
|
@@ -83,11 +83,19 @@ class AnalogBlock(AbstractBlock):
|
|
83
83
|
return s
|
84
84
|
|
85
85
|
def compute_eigenvalues_generator(
|
86
|
-
self,
|
86
|
+
self,
|
87
|
+
block: AbstractBlock,
|
88
|
+
register: Register,
|
87
89
|
) -> torch.Tensor:
|
88
|
-
|
90
|
+
# FIXME: Revisit analog blocks eigenvalues
|
91
|
+
from qadence.analog import add_background_hamiltonian
|
92
|
+
|
93
|
+
return add_background_hamiltonian(block, register).eigenvalues_generator # type: ignore [union-attr]
|
89
94
|
|
90
|
-
|
95
|
+
def dagger(self) -> AbstractBlock:
|
96
|
+
raise NotImplementedError(
|
97
|
+
f"Hermitian adjoint of block type {type(self)} is not implemented yet."
|
98
|
+
)
|
91
99
|
|
92
100
|
|
93
101
|
@dataclass(eq=False, repr=False)
|
@@ -108,8 +116,6 @@ class WaitBlock(AnalogBlock):
|
|
108
116
|
with `nᵢ = (1-Zᵢ)/2`.
|
109
117
|
|
110
118
|
To construct this block, use the [`wait`][qadence.operations.wait] function.
|
111
|
-
|
112
|
-
Can be used with `add_interaction`.
|
113
119
|
"""
|
114
120
|
|
115
121
|
_eigenvalues_generator: torch.Tensor | None = None
|
@@ -117,6 +123,8 @@ class WaitBlock(AnalogBlock):
|
|
117
123
|
parameters: ParamMap = ParamMap(duration=1000.0) # ns
|
118
124
|
qubit_support: QubitSupport = QubitSupport("global")
|
119
125
|
|
126
|
+
add_pattern: bool = True
|
127
|
+
|
120
128
|
@property
|
121
129
|
def eigenvalues_generator(self) -> torch.Tensor | None:
|
122
130
|
return self._eigenvalues_generator
|
@@ -145,7 +153,6 @@ class ConstantAnalogRotation(AnalogBlock):
|
|
145
153
|
[`AnalogRY`][qadence.operations.AnalogRY],
|
146
154
|
[`AnalogRZ`][qadence.operations.AnalogRZ]
|
147
155
|
|
148
|
-
Can be used with `add_interaction`.
|
149
156
|
WARNING: do not use `ConstantAnalogRotation` with `alpha` as differentiable parameter - use
|
150
157
|
the convenience wrappers mentioned above.
|
151
158
|
"""
|
@@ -161,6 +168,8 @@ class ConstantAnalogRotation(AnalogBlock):
|
|
161
168
|
)
|
162
169
|
qubit_support: QubitSupport = QubitSupport("global")
|
163
170
|
|
171
|
+
add_pattern: bool = True
|
172
|
+
|
164
173
|
@property
|
165
174
|
def _block_title(self) -> str:
|
166
175
|
a = self.parameters.alpha
|
@@ -15,8 +15,11 @@ from qadence.blocks import (
|
|
15
15
|
PrimitiveBlock,
|
16
16
|
ScaleBlock,
|
17
17
|
)
|
18
|
+
from qadence.blocks.primitive import ProjectorBlock
|
18
19
|
from qadence.blocks.utils import chain, kron, uuid_to_expression
|
19
20
|
from qadence.parameters import evaluate, stringify
|
21
|
+
|
22
|
+
# from qadence.states import product_state
|
20
23
|
from qadence.types import Endianness, TensorType, TNumber
|
21
24
|
|
22
25
|
J = torch.tensor(1j)
|
@@ -463,6 +466,14 @@ def _block_to_tensor_embedded(
|
|
463
466
|
# add missing identities on unused qubits
|
464
467
|
mat = _fill_identities(block_mat, block.qubit_support, qubit_support, endianness=endianness)
|
465
468
|
|
469
|
+
elif isinstance(block, ProjectorBlock):
|
470
|
+
from qadence.states import product_state
|
471
|
+
|
472
|
+
bra = product_state(block.bra)
|
473
|
+
ket = product_state(block.ket)
|
474
|
+
|
475
|
+
mat = torch.kron(ket, bra.T)
|
476
|
+
|
466
477
|
else:
|
467
478
|
raise TypeError(f"Conversion for block type {type(block)} not supported.")
|
468
479
|
|
qadence/blocks/embedding.py
CHANGED
@@ -2,11 +2,10 @@ from __future__ import annotations
|
|
2
2
|
|
3
3
|
from typing import Callable, Iterable, List
|
4
4
|
|
5
|
-
import numpy as np
|
6
5
|
import sympy
|
7
|
-
import
|
8
|
-
import
|
9
|
-
from torch import
|
6
|
+
from numpy import array as nparray
|
7
|
+
from numpy import cdouble as npcdouble
|
8
|
+
from torch import tensor
|
10
9
|
|
11
10
|
from qadence.blocks import (
|
12
11
|
AbstractBlock,
|
@@ -16,9 +15,24 @@ from qadence.blocks.utils import (
|
|
16
15
|
parameters,
|
17
16
|
uuid_to_expression,
|
18
17
|
)
|
19
|
-
from qadence.parameters import evaluate,
|
18
|
+
from qadence.parameters import evaluate, make_differentiable, stringify
|
19
|
+
from qadence.types import ArrayLike, DifferentiableExpression, Engine, ParamDictType, TNumber
|
20
20
|
|
21
|
-
|
21
|
+
|
22
|
+
def _concretize_parameter(engine: Engine) -> Callable:
|
23
|
+
if engine == Engine.JAX:
|
24
|
+
from jax.numpy import array as jaxarray
|
25
|
+
from jax.numpy import float64 as jaxfloat64
|
26
|
+
|
27
|
+
def concretize_parameter(value: TNumber, trainable: bool = False) -> ArrayLike:
|
28
|
+
return jaxarray([value], dtype=jaxfloat64)
|
29
|
+
|
30
|
+
else:
|
31
|
+
|
32
|
+
def concretize_parameter(value: TNumber, trainable: bool = False) -> ArrayLike:
|
33
|
+
return tensor([value], requires_grad=trainable)
|
34
|
+
|
35
|
+
return concretize_parameter
|
22
36
|
|
23
37
|
|
24
38
|
def unique(x: Iterable) -> List:
|
@@ -26,14 +40,13 @@ def unique(x: Iterable) -> List:
|
|
26
40
|
|
27
41
|
|
28
42
|
def embedding(
|
29
|
-
block: AbstractBlock, to_gate_params: bool = False
|
30
|
-
) -> tuple[
|
31
|
-
"""Construct embedding function
|
43
|
+
block: AbstractBlock, to_gate_params: bool = False, engine: Engine = Engine.TORCH
|
44
|
+
) -> tuple[ParamDictType, Callable[[ParamDictType, ParamDictType], ParamDictType],]:
|
45
|
+
"""Construct embedding function which maps user-facing parameters to either *expression-level*.
|
32
46
|
|
33
|
-
|
34
|
-
parameters or *gate-level* parameters. The construced embedding function has the signature:
|
47
|
+
parameters or *gate-level* parameters. The constructed embedding function has the signature:
|
35
48
|
|
36
|
-
embedding_fn(params:
|
49
|
+
embedding_fn(params: ParamDictType, inputs: ParamDictType) -> ParamDictType:
|
37
50
|
|
38
51
|
which means that it maps the *variational* parameter dict `params` and the *feature* parameter
|
39
52
|
dict `inputs` to one new parameter dict `embedded_dict` which holds all parameters that are
|
@@ -56,6 +69,13 @@ def embedding(
|
|
56
69
|
Returns:
|
57
70
|
A tuple with variational parameter dict and the embedding function.
|
58
71
|
"""
|
72
|
+
concretize_parameter = _concretize_parameter(engine)
|
73
|
+
if engine == Engine.TORCH:
|
74
|
+
cast_dtype = tensor
|
75
|
+
else:
|
76
|
+
from jax.numpy import array
|
77
|
+
|
78
|
+
cast_dtype = array
|
59
79
|
|
60
80
|
unique_expressions = unique(expressions(block))
|
61
81
|
unique_symbols = [p for p in unique(parameters(block)) if not isinstance(p, sympy.Array)]
|
@@ -77,16 +97,18 @@ def embedding(
|
|
77
97
|
# we dont need to care about constant symbols if they are contained in an symbolic expression
|
78
98
|
# we only care about gate params which are ONLY a constant
|
79
99
|
|
80
|
-
embeddings: dict[sympy.Expr,
|
81
|
-
expr:
|
100
|
+
embeddings: dict[sympy.Expr, DifferentiableExpression] = {
|
101
|
+
expr: make_differentiable(expr=expr, engine=engine)
|
102
|
+
for expr in unique_expressions
|
103
|
+
if not expr.is_number
|
82
104
|
}
|
83
105
|
|
84
106
|
uuid_to_expr = uuid_to_expression(block)
|
85
107
|
|
86
|
-
def embedding_fn(params:
|
87
|
-
embedded_params: dict[sympy.Expr,
|
108
|
+
def embedding_fn(params: ParamDictType, inputs: ParamDictType) -> ParamDictType:
|
109
|
+
embedded_params: dict[sympy.Expr, ArrayLike] = {}
|
88
110
|
for expr, fn in embeddings.items():
|
89
|
-
angle:
|
111
|
+
angle: ArrayLike
|
90
112
|
values = {}
|
91
113
|
for symbol in expr.free_symbols:
|
92
114
|
if symbol.name in inputs:
|
@@ -112,26 +134,26 @@ def embedding(
|
|
112
134
|
embedded_params[e] = params[stringify(e)]
|
113
135
|
|
114
136
|
if to_gate_params:
|
115
|
-
gate_lvl_params:
|
137
|
+
gate_lvl_params: ParamDictType = {}
|
116
138
|
for uuid, e in uuid_to_expr.items():
|
117
139
|
gate_lvl_params[uuid] = embedded_params[e]
|
118
140
|
return gate_lvl_params
|
119
141
|
else:
|
120
142
|
return {stringify(k): v for k, v in embedded_params.items()}
|
121
143
|
|
122
|
-
params:
|
123
|
-
params = {
|
144
|
+
params: ParamDictType
|
145
|
+
params = {
|
146
|
+
p.name: concretize_parameter(value=p.value, trainable=True) for p in trainable_symbols
|
147
|
+
}
|
124
148
|
params.update(
|
125
149
|
{
|
126
|
-
stringify(expr):
|
150
|
+
stringify(expr): concretize_parameter(value=evaluate(expr), trainable=False)
|
127
151
|
for expr in constant_expressions
|
128
152
|
}
|
129
153
|
)
|
130
154
|
params.update(
|
131
155
|
{
|
132
|
-
stringify(expr):
|
133
|
-
np.array(expr.tolist(), dtype=np.cdouble), requires_grad=False
|
134
|
-
)
|
156
|
+
stringify(expr): cast_dtype(nparray(expr.tolist(), dtype=npcdouble))
|
135
157
|
for expr in unique_const_matrices
|
136
158
|
}
|
137
159
|
)
|
qadence/blocks/primitive.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
|
3
3
|
from abc import abstractmethod
|
4
|
+
from copy import deepcopy
|
4
5
|
from typing import Any, Iterable, Tuple
|
5
6
|
|
6
7
|
import sympy
|
@@ -13,6 +14,7 @@ from qadence.blocks.abstract import AbstractBlock
|
|
13
14
|
from qadence.parameters import (
|
14
15
|
Parameter,
|
15
16
|
ParamMap,
|
17
|
+
dagger_expression,
|
16
18
|
evaluate,
|
17
19
|
extract_original_param_entry,
|
18
20
|
stringify,
|
@@ -101,6 +103,9 @@ class PrimitiveBlock(AbstractBlock):
|
|
101
103
|
def n_supports(self) -> int:
|
102
104
|
return len(self.qubit_support)
|
103
105
|
|
106
|
+
def dagger(self) -> PrimitiveBlock:
|
107
|
+
return self
|
108
|
+
|
104
109
|
|
105
110
|
class ParametricBlock(PrimitiveBlock):
|
106
111
|
"""Parameterized primitive blocks."""
|
@@ -200,11 +205,10 @@ class ParametricBlock(PrimitiveBlock):
|
|
200
205
|
target = d["qubit_support"][0]
|
201
206
|
return cls(target, params) # type: ignore[call-arg]
|
202
207
|
|
203
|
-
def dagger(self) -> ParametricBlock:
|
208
|
+
def dagger(self) -> ParametricBlock:
|
204
209
|
exprs = self.parameters.expressions()
|
205
|
-
|
206
|
-
|
207
|
-
return self.__class__(*args) # type: ignore[arg-type]
|
210
|
+
params = tuple(-extract_original_param_entry(param) for param in exprs)
|
211
|
+
return type(self)(*self.qubit_support, *params) # type: ignore[arg-type]
|
208
212
|
|
209
213
|
|
210
214
|
class ScaleBlock(ParametricBlock):
|
@@ -304,9 +308,8 @@ class ScaleBlock(ParametricBlock):
|
|
304
308
|
)
|
305
309
|
|
306
310
|
def dagger(self) -> ScaleBlock:
|
307
|
-
|
308
|
-
|
309
|
-
)
|
311
|
+
p = list(self.parameters.expressions())[0]
|
312
|
+
return self.__class__(self.block.dagger(), dagger_expression(p))
|
310
313
|
|
311
314
|
def _to_dict(self) -> dict:
|
312
315
|
return {
|
@@ -350,13 +353,25 @@ class ControlBlock(PrimitiveBlock):
|
|
350
353
|
"""The abstract ControlBlock."""
|
351
354
|
|
352
355
|
name = "Control"
|
356
|
+
control: tuple[int, ...]
|
357
|
+
target: tuple[int, ...]
|
353
358
|
|
354
359
|
def __init__(self, control: tuple[int, ...], target_block: PrimitiveBlock) -> None:
|
360
|
+
self.control = control
|
355
361
|
self.blocks = (target_block,)
|
362
|
+
self.target = target_block.qubit_support
|
356
363
|
|
357
364
|
# using tuple expansion because some control operations could
|
358
365
|
# have multiple targets, e.g. CSWAP
|
359
|
-
super().__init__((*control, *
|
366
|
+
super().__init__((*control, *self.target)) # target_block.qubit_support[0]))
|
367
|
+
|
368
|
+
@property
|
369
|
+
def n_controls(self) -> int:
|
370
|
+
return len(self.control)
|
371
|
+
|
372
|
+
@property
|
373
|
+
def n_targets(self) -> int:
|
374
|
+
return len(self.target)
|
360
375
|
|
361
376
|
@property
|
362
377
|
def _block_title(self) -> str:
|
@@ -391,16 +406,28 @@ class ControlBlock(PrimitiveBlock):
|
|
391
406
|
target = d["qubit_support"][1]
|
392
407
|
return cls(control, target)
|
393
408
|
|
409
|
+
def dagger(self) -> ControlBlock:
|
410
|
+
blk = deepcopy(self)
|
411
|
+
blk.blocks = (self.blocks[0].dagger(),)
|
412
|
+
return blk
|
413
|
+
|
394
414
|
|
395
415
|
class ParametricControlBlock(ParametricBlock):
|
396
416
|
"""The abstract parametrized ControlBlock."""
|
397
417
|
|
398
418
|
name = "ParameterizedControl"
|
419
|
+
control: tuple[int, ...] = ()
|
420
|
+
blocks: tuple[ParametricBlock, ...]
|
399
421
|
|
400
422
|
def __init__(self, control: tuple[int, ...], target_block: ParametricBlock) -> None:
|
401
423
|
self.blocks = (target_block,)
|
424
|
+
self.control = control
|
402
425
|
self.parameters = target_block.parameters
|
403
|
-
super().__init__((*control, target_block.qubit_support
|
426
|
+
super().__init__((*control, *target_block.qubit_support))
|
427
|
+
|
428
|
+
@property
|
429
|
+
def n_controls(self) -> int:
|
430
|
+
return len(self.control)
|
404
431
|
|
405
432
|
@property
|
406
433
|
def eigenvalues_generator(self) -> torch.Tensor:
|
@@ -454,3 +481,48 @@ class ParametricControlBlock(ParametricBlock):
|
|
454
481
|
|
455
482
|
s += rf" \[params: {params_str}]"
|
456
483
|
return s if self.tag is None else (s + rf" \[tag: {self.tag}]")
|
484
|
+
|
485
|
+
def dagger(self) -> ParametricControlBlock:
|
486
|
+
blk = deepcopy(self)
|
487
|
+
blocks = tuple(b.dagger() for b in blk.blocks)
|
488
|
+
blk.blocks = blocks
|
489
|
+
blk.parameters = blocks[0].parameters
|
490
|
+
return blk
|
491
|
+
|
492
|
+
|
493
|
+
class ProjectorBlock(PrimitiveBlock):
|
494
|
+
"""The abstract ProjectorBlock."""
|
495
|
+
|
496
|
+
name = "ProjectorBlock"
|
497
|
+
|
498
|
+
def __init__(
|
499
|
+
self,
|
500
|
+
ket: str,
|
501
|
+
bra: str,
|
502
|
+
qubit_support: int | tuple[int, ...],
|
503
|
+
) -> None:
|
504
|
+
"""
|
505
|
+
Arguments:
|
506
|
+
|
507
|
+
ket (str): The ket given as a bitstring.
|
508
|
+
bra (str): The bra given as a bitstring.
|
509
|
+
qubit_support (int | tuple[int]): The qubit_support of the block.
|
510
|
+
"""
|
511
|
+
if isinstance(qubit_support, int):
|
512
|
+
qubit_support = (qubit_support,)
|
513
|
+
if len(bra) != len(ket):
|
514
|
+
raise ValueError(
|
515
|
+
"Bra and ket must be bitstrings of same length in the 'Projector' definition."
|
516
|
+
)
|
517
|
+
elif len(bra) != len(qubit_support):
|
518
|
+
raise ValueError("Bra or ket must be of same length as the 'qubit_support'")
|
519
|
+
for wf in [bra, ket]:
|
520
|
+
if not all(int(item) == 0 or int(item) == 1 for item in wf):
|
521
|
+
raise ValueError(
|
522
|
+
"All qubits must be either in the '0' or '1' state"
|
523
|
+
" in the 'ProjectorBlock' definition."
|
524
|
+
)
|
525
|
+
|
526
|
+
self.ket = ket
|
527
|
+
self.bra = bra
|
528
|
+
super().__init__(qubit_support)
|
qadence/blocks/utils.py
CHANGED
@@ -5,7 +5,7 @@ from enum import Enum
|
|
5
5
|
from itertools import chain as _flatten
|
6
6
|
from typing import Generator, List, Type, TypeVar, Union, get_args
|
7
7
|
|
8
|
-
from sympy import Basic, Expr
|
8
|
+
from sympy import Array, Basic, Expr
|
9
9
|
from torch import Tensor
|
10
10
|
|
11
11
|
from qadence.blocks import (
|
@@ -503,3 +503,22 @@ def assert_same_block(b1: AbstractBlock, b2: AbstractBlock) -> None:
|
|
503
503
|
), f"Blocks {b1} and {b2} have differing numbers of parameters."
|
504
504
|
for p1, p2 in zip(b1.parameters.expressions(), b2.parameters.expressions()):
|
505
505
|
assert p1 == p2
|
506
|
+
|
507
|
+
|
508
|
+
def unique_parameters(block: AbstractBlock) -> list[Parameter]:
|
509
|
+
"""Return the unique parameters in the block.
|
510
|
+
|
511
|
+
These parameters are the actual user-facing parameters which
|
512
|
+
can be assigned by the user. Multiple gates can contain the
|
513
|
+
same unique parameter
|
514
|
+
|
515
|
+
Returns:
|
516
|
+
list[Parameter]: List of unique parameters in the circuit
|
517
|
+
"""
|
518
|
+
symbols = []
|
519
|
+
for p in parameters(block):
|
520
|
+
if isinstance(p, Array):
|
521
|
+
continue
|
522
|
+
elif not p.is_number and p not in symbols:
|
523
|
+
symbols.append(p)
|
524
|
+
return symbols
|
qadence/circuit.py
CHANGED
@@ -5,10 +5,10 @@ from itertools import chain as flatten
|
|
5
5
|
from pathlib import Path
|
6
6
|
from typing import Iterable
|
7
7
|
|
8
|
-
from sympy import
|
8
|
+
from sympy import Basic
|
9
9
|
|
10
10
|
from qadence.blocks import AbstractBlock, AnalogBlock, CompositeBlock, chain
|
11
|
-
from qadence.blocks.utils import parameters, primitive_blocks
|
11
|
+
from qadence.blocks.utils import parameters, primitive_blocks, unique_parameters
|
12
12
|
from qadence.parameters import Parameter
|
13
13
|
from qadence.register import Register
|
14
14
|
|
@@ -88,13 +88,7 @@ class QuantumCircuit:
|
|
88
88
|
Returns:
|
89
89
|
list[Parameter]: List of unique parameters in the circuit
|
90
90
|
"""
|
91
|
-
|
92
|
-
for p in parameters(self.block):
|
93
|
-
if isinstance(p, Array):
|
94
|
-
continue
|
95
|
-
elif not p.is_number and p not in symbols:
|
96
|
-
symbols.append(p)
|
97
|
-
return symbols
|
91
|
+
return unique_parameters(self.block)
|
98
92
|
|
99
93
|
@property
|
100
94
|
def num_unique_parameters(self) -> int:
|
qadence/constructors/__init__.py
CHANGED
@@ -23,6 +23,7 @@ from .hamiltonians import (
|
|
23
23
|
)
|
24
24
|
|
25
25
|
from .rydberg_hea import rydberg_hea, rydberg_hea_layer
|
26
|
+
from .rydberg_feature_maps import rydberg_feature_map, analog_feature_map, rydberg_tower_feature_map
|
26
27
|
|
27
28
|
from .qft import qft
|
28
29
|
|
@@ -45,4 +46,7 @@ __all__ = [
|
|
45
46
|
"daqc_transform",
|
46
47
|
"rydberg_hea",
|
47
48
|
"rydberg_hea_layer",
|
49
|
+
"rydberg_feature_map",
|
50
|
+
"analog_feature_map",
|
51
|
+
"rydberg_tower_feature_map",
|
48
52
|
]
|