qadence 1.1.1__py3-none-any.whl → 1.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. qadence/__init__.py +1 -0
  2. qadence/analog/__init__.py +4 -2
  3. qadence/analog/addressing.py +167 -0
  4. qadence/analog/constants.py +59 -0
  5. qadence/analog/device.py +82 -0
  6. qadence/analog/hamiltonian_terms.py +101 -0
  7. qadence/analog/parse_analog.py +120 -0
  8. qadence/backend.py +42 -12
  9. qadence/backends/__init__.py +1 -2
  10. qadence/backends/api.py +27 -9
  11. qadence/backends/braket/backend.py +3 -2
  12. qadence/backends/horqrux/__init__.py +5 -0
  13. qadence/backends/horqrux/backend.py +216 -0
  14. qadence/backends/horqrux/config.py +26 -0
  15. qadence/backends/horqrux/convert_ops.py +273 -0
  16. qadence/backends/jax_utils.py +45 -0
  17. qadence/backends/pulser/__init__.py +0 -1
  18. qadence/backends/pulser/backend.py +31 -15
  19. qadence/backends/pulser/config.py +19 -10
  20. qadence/backends/pulser/devices.py +57 -63
  21. qadence/backends/pulser/pulses.py +70 -12
  22. qadence/backends/pyqtorch/backend.py +4 -4
  23. qadence/backends/pyqtorch/config.py +18 -12
  24. qadence/backends/pyqtorch/convert_ops.py +15 -7
  25. qadence/backends/utils.py +5 -9
  26. qadence/blocks/abstract.py +5 -1
  27. qadence/blocks/analog.py +18 -9
  28. qadence/blocks/block_to_tensor.py +11 -0
  29. qadence/blocks/embedding.py +46 -24
  30. qadence/blocks/primitive.py +81 -9
  31. qadence/blocks/utils.py +20 -1
  32. qadence/circuit.py +3 -9
  33. qadence/constructors/__init__.py +4 -0
  34. qadence/constructors/feature_maps.py +84 -60
  35. qadence/constructors/hamiltonians.py +27 -98
  36. qadence/constructors/rydberg_feature_maps.py +113 -0
  37. qadence/divergences.py +12 -0
  38. qadence/engines/__init__.py +0 -0
  39. qadence/engines/differentiable_backend.py +152 -0
  40. qadence/engines/jax/__init__.py +8 -0
  41. qadence/engines/jax/differentiable_backend.py +73 -0
  42. qadence/engines/jax/differentiable_expectation.py +94 -0
  43. qadence/engines/torch/__init__.py +4 -0
  44. qadence/engines/torch/differentiable_backend.py +85 -0
  45. qadence/extensions.py +21 -9
  46. qadence/finitediff.py +47 -0
  47. qadence/mitigations/readout.py +92 -25
  48. qadence/ml_tools/models.py +10 -3
  49. qadence/models/qnn.py +88 -23
  50. qadence/models/quantum_model.py +13 -2
  51. qadence/operations.py +55 -70
  52. qadence/parameters.py +24 -13
  53. qadence/register.py +91 -43
  54. qadence/transpile/__init__.py +1 -0
  55. qadence/transpile/apply_fn.py +40 -0
  56. qadence/types.py +32 -2
  57. qadence/utils.py +35 -0
  58. {qadence-1.1.1.dist-info → qadence-1.2.1.dist-info}/METADATA +22 -3
  59. {qadence-1.1.1.dist-info → qadence-1.2.1.dist-info}/RECORD +62 -44
  60. {qadence-1.1.1.dist-info → qadence-1.2.1.dist-info}/WHEEL +1 -1
  61. qadence/analog/interaction.py +0 -198
  62. qadence/analog/utils.py +0 -132
  63. /qadence/{backends/pytorch_wrapper.py → engines/torch/differentiable_expectation.py} +0 -0
  64. {qadence-1.1.1.dist-info → qadence-1.2.1.dist-info}/licenses/LICENSE +0 -0
qadence/backends/utils.py CHANGED
@@ -17,9 +17,9 @@ from torch import (
17
17
  no_grad,
18
18
  rand,
19
19
  )
20
- from torch import flatten as torchflatten
21
20
 
22
- from qadence.utils import Endianness, int_to_basis
21
+ from qadence.types import ParamDictType
22
+ from qadence.utils import Endianness, int_to_basis, is_qadence_shape
23
23
 
24
24
  FINITE_DIFF_EPS = 1e-06
25
25
  # Dict of NumPy dtype -> torch dtype (when the correspondence exists)
@@ -92,7 +92,7 @@ def count_bitstrings(sample: Tensor, endianness: Endianness = Endianness.BIG) ->
92
92
  )
93
93
 
94
94
 
95
- def to_list_of_dicts(param_values: dict[str, Tensor]) -> list[dict[str, float]]:
95
+ def to_list_of_dicts(param_values: ParamDictType) -> list[ParamDictType]:
96
96
  if not param_values:
97
97
  return [param_values]
98
98
 
@@ -119,17 +119,13 @@ def pyqify(state: Tensor, n_qubits: int = None) -> Tensor:
119
119
 
120
120
  def unpyqify(state: Tensor) -> Tensor:
121
121
  """Convert a state of shape [2] * n_qubits + [batch_size] to (batch_size, 2**n_qubits)."""
122
- return torchflatten(state, start_dim=0, end_dim=-2).t()
122
+ return torch.flatten(state, start_dim=0, end_dim=-2).t()
123
123
 
124
124
 
125
125
  def is_pyq_shape(state: Tensor, n_qubits: int) -> bool:
126
126
  return state.size()[:-1] == [2] * n_qubits # type: ignore[no-any-return]
127
127
 
128
128
 
129
- def is_qadence_shape(state: Tensor, n_qubits: int) -> bool:
130
- return state.shape[1] == 2**n_qubits # type: ignore[no-any-return]
131
-
132
-
133
129
  def validate_state(state: Tensor, n_qubits: int) -> None:
134
130
  """Check if a custom initial state conforms to the qadence or the pyqtorch format."""
135
131
  if state.dtype != complex128:
@@ -145,7 +141,7 @@ def validate_state(state: Tensor, n_qubits: int) -> None:
145
141
  )
146
142
 
147
143
 
148
- def infer_batchsize(param_values: dict[str, Tensor] = None) -> int:
144
+ def infer_batchsize(param_values: ParamDictType = None) -> int:
149
145
  """Infer the batch_size through the length of the parameter tensors."""
150
146
  return max([len(tensor) for tensor in param_values.values()]) if param_values else 1
151
147
 
@@ -5,7 +5,7 @@ from abc import ABC, abstractmethod, abstractproperty
5
5
  from dataclasses import dataclass
6
6
  from functools import cached_property
7
7
  from pathlib import Path
8
- from typing import ClassVar, Iterable, Tuple, Union, get_args
8
+ from typing import ClassVar, Iterable, Tuple, TypeVar, Union, get_args
9
9
 
10
10
  import sympy
11
11
  import torch
@@ -287,6 +287,7 @@ class AbstractBlock(ABC):
287
287
  def __hash__(self) -> int:
288
288
  return hash(self._to_json())
289
289
 
290
+ @abstractmethod
290
291
  def dagger(self) -> AbstractBlock:
291
292
  raise NotImplementedError(
292
293
  f"Hermitian adjoint of the Block '{type(self)}' is not implemented yet!"
@@ -333,3 +334,6 @@ class AbstractBlock(ABC):
333
334
  elif isinstance(self, PrimitiveBlock):
334
335
  return self.name == "I"
335
336
  return False
337
+
338
+
339
+ TAbstractBlock = TypeVar("TAbstractBlock", bound=AbstractBlock)
qadence/blocks/analog.py CHANGED
@@ -59,8 +59,8 @@ class AnalogBlock(AbstractBlock):
59
59
  @property
60
60
  def eigenvalues_generator(self) -> torch.Tensor:
61
61
  msg = (
62
- "Eigenvalues of analog blocks can be computed via "
63
- "`add_interaction(register, block).eigenvalues`"
62
+ "Eigenvalues of for generator of analog blocks can be computed via "
63
+ "`add_background_hamiltonian(block, register).eigenvalues_generator`. "
64
64
  )
65
65
  raise NotImplementedError(msg)
66
66
 
@@ -68,7 +68,7 @@ class AnalogBlock(AbstractBlock):
68
68
  def eigenvalues(self) -> torch.Tensor:
69
69
  msg = (
70
70
  "Eigenvalues of analog blocks can be computed via "
71
- "`add_interaction(register, block).eigenvalues`"
71
+ "`add_background_hamiltonian(block, register).eigenvalues`. "
72
72
  )
73
73
  raise NotImplementedError(msg)
74
74
 
@@ -83,11 +83,19 @@ class AnalogBlock(AbstractBlock):
83
83
  return s
84
84
 
85
85
  def compute_eigenvalues_generator(
86
- self, register: Register, block: AbstractBlock
86
+ self,
87
+ block: AbstractBlock,
88
+ register: Register,
87
89
  ) -> torch.Tensor:
88
- from qadence import add_interaction
90
+ # FIXME: Revisit analog blocks eigenvalues
91
+ from qadence.analog import add_background_hamiltonian
92
+
93
+ return add_background_hamiltonian(block, register).eigenvalues_generator # type: ignore [union-attr]
89
94
 
90
- return add_interaction(register, block).eigenvalues_generator
95
+ def dagger(self) -> AbstractBlock:
96
+ raise NotImplementedError(
97
+ f"Hermitian adjoint of block type {type(self)} is not implemented yet."
98
+ )
91
99
 
92
100
 
93
101
  @dataclass(eq=False, repr=False)
@@ -108,8 +116,6 @@ class WaitBlock(AnalogBlock):
108
116
  with `nᵢ = (1-Zᵢ)/2`.
109
117
 
110
118
  To construct this block, use the [`wait`][qadence.operations.wait] function.
111
-
112
- Can be used with `add_interaction`.
113
119
  """
114
120
 
115
121
  _eigenvalues_generator: torch.Tensor | None = None
@@ -117,6 +123,8 @@ class WaitBlock(AnalogBlock):
117
123
  parameters: ParamMap = ParamMap(duration=1000.0) # ns
118
124
  qubit_support: QubitSupport = QubitSupport("global")
119
125
 
126
+ add_pattern: bool = True
127
+
120
128
  @property
121
129
  def eigenvalues_generator(self) -> torch.Tensor | None:
122
130
  return self._eigenvalues_generator
@@ -145,7 +153,6 @@ class ConstantAnalogRotation(AnalogBlock):
145
153
  [`AnalogRY`][qadence.operations.AnalogRY],
146
154
  [`AnalogRZ`][qadence.operations.AnalogRZ]
147
155
 
148
- Can be used with `add_interaction`.
149
156
  WARNING: do not use `ConstantAnalogRotation` with `alpha` as differentiable parameter - use
150
157
  the convenience wrappers mentioned above.
151
158
  """
@@ -161,6 +168,8 @@ class ConstantAnalogRotation(AnalogBlock):
161
168
  )
162
169
  qubit_support: QubitSupport = QubitSupport("global")
163
170
 
171
+ add_pattern: bool = True
172
+
164
173
  @property
165
174
  def _block_title(self) -> str:
166
175
  a = self.parameters.alpha
@@ -15,8 +15,11 @@ from qadence.blocks import (
15
15
  PrimitiveBlock,
16
16
  ScaleBlock,
17
17
  )
18
+ from qadence.blocks.primitive import ProjectorBlock
18
19
  from qadence.blocks.utils import chain, kron, uuid_to_expression
19
20
  from qadence.parameters import evaluate, stringify
21
+
22
+ # from qadence.states import product_state
20
23
  from qadence.types import Endianness, TensorType, TNumber
21
24
 
22
25
  J = torch.tensor(1j)
@@ -463,6 +466,14 @@ def _block_to_tensor_embedded(
463
466
  # add missing identities on unused qubits
464
467
  mat = _fill_identities(block_mat, block.qubit_support, qubit_support, endianness=endianness)
465
468
 
469
+ elif isinstance(block, ProjectorBlock):
470
+ from qadence.states import product_state
471
+
472
+ bra = product_state(block.bra)
473
+ ket = product_state(block.ket)
474
+
475
+ mat = torch.kron(ket, bra.T)
476
+
466
477
  else:
467
478
  raise TypeError(f"Conversion for block type {type(block)} not supported.")
468
479
 
@@ -2,11 +2,10 @@ from __future__ import annotations
2
2
 
3
3
  from typing import Callable, Iterable, List
4
4
 
5
- import numpy as np
6
5
  import sympy
7
- import sympytorch # type: ignore [import]
8
- import torch
9
- from torch import Tensor
6
+ from numpy import array as nparray
7
+ from numpy import cdouble as npcdouble
8
+ from torch import tensor
10
9
 
11
10
  from qadence.blocks import (
12
11
  AbstractBlock,
@@ -16,9 +15,24 @@ from qadence.blocks.utils import (
16
15
  parameters,
17
16
  uuid_to_expression,
18
17
  )
19
- from qadence.parameters import evaluate, stringify, torchify
18
+ from qadence.parameters import evaluate, make_differentiable, stringify
19
+ from qadence.types import ArrayLike, DifferentiableExpression, Engine, ParamDictType, TNumber
20
20
 
21
- StrTensorDict = dict[str, Tensor]
21
+
22
+ def _concretize_parameter(engine: Engine) -> Callable:
23
+ if engine == Engine.JAX:
24
+ from jax.numpy import array as jaxarray
25
+ from jax.numpy import float64 as jaxfloat64
26
+
27
+ def concretize_parameter(value: TNumber, trainable: bool = False) -> ArrayLike:
28
+ return jaxarray([value], dtype=jaxfloat64)
29
+
30
+ else:
31
+
32
+ def concretize_parameter(value: TNumber, trainable: bool = False) -> ArrayLike:
33
+ return tensor([value], requires_grad=trainable)
34
+
35
+ return concretize_parameter
22
36
 
23
37
 
24
38
  def unique(x: Iterable) -> List:
@@ -26,14 +40,13 @@ def unique(x: Iterable) -> List:
26
40
 
27
41
 
28
42
  def embedding(
29
- block: AbstractBlock, to_gate_params: bool = False
30
- ) -> tuple[StrTensorDict, Callable[[StrTensorDict, StrTensorDict], StrTensorDict],]:
31
- """Construct embedding function.
43
+ block: AbstractBlock, to_gate_params: bool = False, engine: Engine = Engine.TORCH
44
+ ) -> tuple[ParamDictType, Callable[[ParamDictType, ParamDictType], ParamDictType],]:
45
+ """Construct embedding function which maps user-facing parameters to either *expression-level*.
32
46
 
33
- It maps user-facing parameters to either *expression-level*
34
- parameters or *gate-level* parameters. The construced embedding function has the signature:
47
+ parameters or *gate-level* parameters. The constructed embedding function has the signature:
35
48
 
36
- embedding_fn(params: StrTensorDict, inputs: StrTensorDict) -> StrTensorDict:
49
+ embedding_fn(params: ParamDictType, inputs: ParamDictType) -> ParamDictType:
37
50
 
38
51
  which means that it maps the *variational* parameter dict `params` and the *feature* parameter
39
52
  dict `inputs` to one new parameter dict `embedded_dict` which holds all parameters that are
@@ -56,6 +69,13 @@ def embedding(
56
69
  Returns:
57
70
  A tuple with variational parameter dict and the embedding function.
58
71
  """
72
+ concretize_parameter = _concretize_parameter(engine)
73
+ if engine == Engine.TORCH:
74
+ cast_dtype = tensor
75
+ else:
76
+ from jax.numpy import array
77
+
78
+ cast_dtype = array
59
79
 
60
80
  unique_expressions = unique(expressions(block))
61
81
  unique_symbols = [p for p in unique(parameters(block)) if not isinstance(p, sympy.Array)]
@@ -77,16 +97,18 @@ def embedding(
77
97
  # we dont need to care about constant symbols if they are contained in an symbolic expression
78
98
  # we only care about gate params which are ONLY a constant
79
99
 
80
- embeddings: dict[sympy.Expr, sympytorch.SymPyModule] = {
81
- expr: torchify(expr) for expr in unique_expressions if not expr.is_number
100
+ embeddings: dict[sympy.Expr, DifferentiableExpression] = {
101
+ expr: make_differentiable(expr=expr, engine=engine)
102
+ for expr in unique_expressions
103
+ if not expr.is_number
82
104
  }
83
105
 
84
106
  uuid_to_expr = uuid_to_expression(block)
85
107
 
86
- def embedding_fn(params: StrTensorDict, inputs: StrTensorDict) -> StrTensorDict:
87
- embedded_params: dict[sympy.Expr, Tensor] = {}
108
+ def embedding_fn(params: ParamDictType, inputs: ParamDictType) -> ParamDictType:
109
+ embedded_params: dict[sympy.Expr, ArrayLike] = {}
88
110
  for expr, fn in embeddings.items():
89
- angle: Tensor
111
+ angle: ArrayLike
90
112
  values = {}
91
113
  for symbol in expr.free_symbols:
92
114
  if symbol.name in inputs:
@@ -112,26 +134,26 @@ def embedding(
112
134
  embedded_params[e] = params[stringify(e)]
113
135
 
114
136
  if to_gate_params:
115
- gate_lvl_params: StrTensorDict = {}
137
+ gate_lvl_params: ParamDictType = {}
116
138
  for uuid, e in uuid_to_expr.items():
117
139
  gate_lvl_params[uuid] = embedded_params[e]
118
140
  return gate_lvl_params
119
141
  else:
120
142
  return {stringify(k): v for k, v in embedded_params.items()}
121
143
 
122
- params: StrTensorDict
123
- params = {p.name: torch.tensor([p.value], requires_grad=True) for p in trainable_symbols}
144
+ params: ParamDictType
145
+ params = {
146
+ p.name: concretize_parameter(value=p.value, trainable=True) for p in trainable_symbols
147
+ }
124
148
  params.update(
125
149
  {
126
- stringify(expr): torch.tensor([evaluate(expr)], requires_grad=False)
150
+ stringify(expr): concretize_parameter(value=evaluate(expr), trainable=False)
127
151
  for expr in constant_expressions
128
152
  }
129
153
  )
130
154
  params.update(
131
155
  {
132
- stringify(expr): torch.tensor(
133
- np.array(expr.tolist(), dtype=np.cdouble), requires_grad=False
134
- )
156
+ stringify(expr): cast_dtype(nparray(expr.tolist(), dtype=npcdouble))
135
157
  for expr in unique_const_matrices
136
158
  }
137
159
  )
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  from abc import abstractmethod
4
+ from copy import deepcopy
4
5
  from typing import Any, Iterable, Tuple
5
6
 
6
7
  import sympy
@@ -13,6 +14,7 @@ from qadence.blocks.abstract import AbstractBlock
13
14
  from qadence.parameters import (
14
15
  Parameter,
15
16
  ParamMap,
17
+ dagger_expression,
16
18
  evaluate,
17
19
  extract_original_param_entry,
18
20
  stringify,
@@ -101,6 +103,9 @@ class PrimitiveBlock(AbstractBlock):
101
103
  def n_supports(self) -> int:
102
104
  return len(self.qubit_support)
103
105
 
106
+ def dagger(self) -> PrimitiveBlock:
107
+ return self
108
+
104
109
 
105
110
  class ParametricBlock(PrimitiveBlock):
106
111
  """Parameterized primitive blocks."""
@@ -200,11 +205,10 @@ class ParametricBlock(PrimitiveBlock):
200
205
  target = d["qubit_support"][0]
201
206
  return cls(target, params) # type: ignore[call-arg]
202
207
 
203
- def dagger(self) -> ParametricBlock: # type: ignore[override]
208
+ def dagger(self) -> ParametricBlock:
204
209
  exprs = self.parameters.expressions()
205
- args = tuple(-extract_original_param_entry(param) for param in exprs)
206
- args = args if -1 in self.qubit_support else (*self.qubit_support, *args)
207
- return self.__class__(*args) # type: ignore[arg-type]
210
+ params = tuple(-extract_original_param_entry(param) for param in exprs)
211
+ return type(self)(*self.qubit_support, *params) # type: ignore[arg-type]
208
212
 
209
213
 
210
214
  class ScaleBlock(ParametricBlock):
@@ -304,9 +308,8 @@ class ScaleBlock(ParametricBlock):
304
308
  )
305
309
 
306
310
  def dagger(self) -> ScaleBlock:
307
- return self.__class__(
308
- self.block, Parameter(-extract_original_param_entry(self.parameters.parameter))
309
- )
311
+ p = list(self.parameters.expressions())[0]
312
+ return self.__class__(self.block.dagger(), dagger_expression(p))
310
313
 
311
314
  def _to_dict(self) -> dict:
312
315
  return {
@@ -350,13 +353,25 @@ class ControlBlock(PrimitiveBlock):
350
353
  """The abstract ControlBlock."""
351
354
 
352
355
  name = "Control"
356
+ control: tuple[int, ...]
357
+ target: tuple[int, ...]
353
358
 
354
359
  def __init__(self, control: tuple[int, ...], target_block: PrimitiveBlock) -> None:
360
+ self.control = control
355
361
  self.blocks = (target_block,)
362
+ self.target = target_block.qubit_support
356
363
 
357
364
  # using tuple expansion because some control operations could
358
365
  # have multiple targets, e.g. CSWAP
359
- super().__init__((*control, *target_block.qubit_support)) # target_block.qubit_support[0]))
366
+ super().__init__((*control, *self.target)) # target_block.qubit_support[0]))
367
+
368
+ @property
369
+ def n_controls(self) -> int:
370
+ return len(self.control)
371
+
372
+ @property
373
+ def n_targets(self) -> int:
374
+ return len(self.target)
360
375
 
361
376
  @property
362
377
  def _block_title(self) -> str:
@@ -391,16 +406,28 @@ class ControlBlock(PrimitiveBlock):
391
406
  target = d["qubit_support"][1]
392
407
  return cls(control, target)
393
408
 
409
+ def dagger(self) -> ControlBlock:
410
+ blk = deepcopy(self)
411
+ blk.blocks = (self.blocks[0].dagger(),)
412
+ return blk
413
+
394
414
 
395
415
  class ParametricControlBlock(ParametricBlock):
396
416
  """The abstract parametrized ControlBlock."""
397
417
 
398
418
  name = "ParameterizedControl"
419
+ control: tuple[int, ...] = ()
420
+ blocks: tuple[ParametricBlock, ...]
399
421
 
400
422
  def __init__(self, control: tuple[int, ...], target_block: ParametricBlock) -> None:
401
423
  self.blocks = (target_block,)
424
+ self.control = control
402
425
  self.parameters = target_block.parameters
403
- super().__init__((*control, target_block.qubit_support[0]))
426
+ super().__init__((*control, *target_block.qubit_support))
427
+
428
+ @property
429
+ def n_controls(self) -> int:
430
+ return len(self.control)
404
431
 
405
432
  @property
406
433
  def eigenvalues_generator(self) -> torch.Tensor:
@@ -454,3 +481,48 @@ class ParametricControlBlock(ParametricBlock):
454
481
 
455
482
  s += rf" \[params: {params_str}]"
456
483
  return s if self.tag is None else (s + rf" \[tag: {self.tag}]")
484
+
485
+ def dagger(self) -> ParametricControlBlock:
486
+ blk = deepcopy(self)
487
+ blocks = tuple(b.dagger() for b in blk.blocks)
488
+ blk.blocks = blocks
489
+ blk.parameters = blocks[0].parameters
490
+ return blk
491
+
492
+
493
+ class ProjectorBlock(PrimitiveBlock):
494
+ """The abstract ProjectorBlock."""
495
+
496
+ name = "ProjectorBlock"
497
+
498
+ def __init__(
499
+ self,
500
+ ket: str,
501
+ bra: str,
502
+ qubit_support: int | tuple[int, ...],
503
+ ) -> None:
504
+ """
505
+ Arguments:
506
+
507
+ ket (str): The ket given as a bitstring.
508
+ bra (str): The bra given as a bitstring.
509
+ qubit_support (int | tuple[int]): The qubit_support of the block.
510
+ """
511
+ if isinstance(qubit_support, int):
512
+ qubit_support = (qubit_support,)
513
+ if len(bra) != len(ket):
514
+ raise ValueError(
515
+ "Bra and ket must be bitstrings of same length in the 'Projector' definition."
516
+ )
517
+ elif len(bra) != len(qubit_support):
518
+ raise ValueError("Bra or ket must be of same length as the 'qubit_support'")
519
+ for wf in [bra, ket]:
520
+ if not all(int(item) == 0 or int(item) == 1 for item in wf):
521
+ raise ValueError(
522
+ "All qubits must be either in the '0' or '1' state"
523
+ " in the 'ProjectorBlock' definition."
524
+ )
525
+
526
+ self.ket = ket
527
+ self.bra = bra
528
+ super().__init__(qubit_support)
qadence/blocks/utils.py CHANGED
@@ -5,7 +5,7 @@ from enum import Enum
5
5
  from itertools import chain as _flatten
6
6
  from typing import Generator, List, Type, TypeVar, Union, get_args
7
7
 
8
- from sympy import Basic, Expr
8
+ from sympy import Array, Basic, Expr
9
9
  from torch import Tensor
10
10
 
11
11
  from qadence.blocks import (
@@ -503,3 +503,22 @@ def assert_same_block(b1: AbstractBlock, b2: AbstractBlock) -> None:
503
503
  ), f"Blocks {b1} and {b2} have differing numbers of parameters."
504
504
  for p1, p2 in zip(b1.parameters.expressions(), b2.parameters.expressions()):
505
505
  assert p1 == p2
506
+
507
+
508
+ def unique_parameters(block: AbstractBlock) -> list[Parameter]:
509
+ """Return the unique parameters in the block.
510
+
511
+ These parameters are the actual user-facing parameters which
512
+ can be assigned by the user. Multiple gates can contain the
513
+ same unique parameter
514
+
515
+ Returns:
516
+ list[Parameter]: List of unique parameters in the circuit
517
+ """
518
+ symbols = []
519
+ for p in parameters(block):
520
+ if isinstance(p, Array):
521
+ continue
522
+ elif not p.is_number and p not in symbols:
523
+ symbols.append(p)
524
+ return symbols
qadence/circuit.py CHANGED
@@ -5,10 +5,10 @@ from itertools import chain as flatten
5
5
  from pathlib import Path
6
6
  from typing import Iterable
7
7
 
8
- from sympy import Array, Basic
8
+ from sympy import Basic
9
9
 
10
10
  from qadence.blocks import AbstractBlock, AnalogBlock, CompositeBlock, chain
11
- from qadence.blocks.utils import parameters, primitive_blocks
11
+ from qadence.blocks.utils import parameters, primitive_blocks, unique_parameters
12
12
  from qadence.parameters import Parameter
13
13
  from qadence.register import Register
14
14
 
@@ -88,13 +88,7 @@ class QuantumCircuit:
88
88
  Returns:
89
89
  list[Parameter]: List of unique parameters in the circuit
90
90
  """
91
- symbols = []
92
- for p in parameters(self.block):
93
- if isinstance(p, Array):
94
- continue
95
- elif not p.is_number and p not in symbols:
96
- symbols.append(p)
97
- return symbols
91
+ return unique_parameters(self.block)
98
92
 
99
93
  @property
100
94
  def num_unique_parameters(self) -> int:
@@ -23,6 +23,7 @@ from .hamiltonians import (
23
23
  )
24
24
 
25
25
  from .rydberg_hea import rydberg_hea, rydberg_hea_layer
26
+ from .rydberg_feature_maps import rydberg_feature_map, analog_feature_map, rydberg_tower_feature_map
26
27
 
27
28
  from .qft import qft
28
29
 
@@ -45,4 +46,7 @@ __all__ = [
45
46
  "daqc_transform",
46
47
  "rydberg_hea",
47
48
  "rydberg_hea_layer",
49
+ "rydberg_feature_map",
50
+ "analog_feature_map",
51
+ "rydberg_tower_feature_map",
48
52
  ]