qadence 1.7.0__py3-none-any.whl → 1.7.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
qadence/backends/api.py CHANGED
@@ -2,10 +2,17 @@ from __future__ import annotations
2
2
 
3
3
  from qadence.backend import Backend, BackendConfiguration
4
4
  from qadence.engines.differentiable_backend import DifferentiableBackend
5
- from qadence.extensions import available_backends, available_engines, set_backend_config
6
- from qadence.types import BackendName, DiffMode, Engine
5
+ from qadence.extensions import (
6
+ import_backend,
7
+ import_config,
8
+ import_engine,
9
+ set_backend_config,
10
+ )
11
+ from qadence.logger import get_logger
12
+ from qadence.types import BackendName, DiffMode
7
13
 
8
14
  __all__ = ["backend_factory", "config_factory"]
15
+ logger = get_logger(__name__)
9
16
 
10
17
 
11
18
  def backend_factory(
@@ -14,67 +21,47 @@ def backend_factory(
14
21
  configuration: BackendConfiguration | dict | None = None,
15
22
  ) -> Backend | DifferentiableBackend:
16
23
  backend_inst: Backend | DifferentiableBackend
17
- backends = available_backends()
18
24
  try:
19
- backend_name = BackendName(backend)
20
- except ValueError:
21
- raise NotImplementedError(f"The requested backend '{backend}' is not implemented.")
22
- try:
23
- BackendCls = backends[backend_name]
24
- except Exception as e:
25
- raise ImportError(
26
- f"The requested backend '{backend_name}' is either not installed\
27
- or could not be imported due to {e}."
28
- )
29
-
30
- default_config = BackendCls.default_configuration()
31
- if configuration is None:
32
- configuration = default_config
33
- elif isinstance(configuration, dict):
34
- configuration = config_factory(backend_name, configuration)
35
- else:
36
- # NOTE: types have to match exactly, hence we use `type`
37
- if not isinstance(configuration, type(BackendCls.default_configuration())):
38
- raise ValueError(
39
- f"Given config class '{type(configuration)}' does not match the backend",
40
- f" class: '{BackendCls}'. Expected: '{type(BackendCls.default_configuration())}.'",
41
- )
42
-
43
- # Create the backend
44
- backend_inst = BackendCls(
45
- config=configuration
46
- if configuration is not None
47
- else BackendCls.default_configuration() # type: ignore[attr-defined]
48
- )
25
+ BackendCls = import_backend(backend)
26
+ default_config = BackendCls.default_configuration()
27
+ if configuration is None:
28
+ configuration = default_config
29
+ elif isinstance(configuration, dict):
30
+ configuration = config_factory(backend, configuration)
31
+ else:
32
+ # NOTE: types have to match exactly, hence we use `type`
33
+ if not isinstance(configuration, type(BackendCls.default_configuration())):
34
+ expected_cfg = BackendCls.default_configuration()
35
+ raise ValueError(
36
+ f"Given config class '{type(configuration)}' does not match the backend",
37
+ f" class: '{BackendCls}'. Expected: '{type(expected_cfg)}.'",
38
+ )
49
39
 
40
+ # Instantiate the backend
41
+ backend_inst = BackendCls( # type: ignore[operator]
42
+ config=configuration
43
+ if configuration is not None
44
+ else BackendCls.default_configuration()
45
+ )
46
+ set_backend_config(backend_inst, diff_mode)
47
+ # Wrap the quantum Backend in a DifferentiableBackend if a diff_mode is passed.
48
+ if diff_mode is not None:
49
+ diff_backend_cls = import_engine(backend_inst.engine)
50
+ backend_inst = diff_backend_cls(backend=backend_inst, diff_mode=DiffMode(diff_mode)) # type: ignore[operator]
51
+ return backend_inst
52
+ except Exception as e:
53
+ msg = f"The requested backend '{backend}' is either not installed\
54
+ or could not be imported due to {e}."
55
+ logger.error(msg)
56
+ raise Exception(msg)
50
57
  # Set backend configurations which depend on the differentiation mode
51
- set_backend_config(backend_inst, diff_mode)
52
- # Wrap the quantum Backend in a DifferentiableBackend if a diff_mode is passed.
53
- if diff_mode is not None:
54
- try:
55
- engine_name = Engine(backend_inst.engine)
56
- except ValueError:
57
- raise NotImplementedError(
58
- f"The requested engine '{backend_inst.engine}' is not implemented."
59
- )
60
- try:
61
- diff_backend_cls = available_engines()[engine_name]
62
- backend_inst = diff_backend_cls(backend=backend_inst, diff_mode=DiffMode(diff_mode)) # type: ignore[arg-type]
63
- except Exception as e:
64
- raise ImportError(
65
- f"The requested engine '{engine_name}' is either not installed\
66
- or could not be imported due to {e}."
67
- )
68
- return backend_inst
69
-
70
58
 
71
- def config_factory(name: BackendName | str, config: dict) -> BackendConfiguration:
72
- backends = available_backends()
73
59
 
60
+ def config_factory(backend_name: BackendName | str, config: dict) -> BackendConfiguration:
61
+ cfg: BackendConfiguration
74
62
  try:
75
- BackendCls = backends[BackendName(name)]
76
- except KeyError:
77
- raise NotImplementedError(f"The requested backend '{name}' is not implemented!")
78
-
79
- BackendConfigCls = type(BackendCls.default_configuration())
80
- return BackendConfigCls(**config) # type: ignore[no-any-return]
63
+ BackendConfigCls = import_config(backend_name)
64
+ cfg = BackendConfigCls(**config) # type: ignore[operator]
65
+ except Exception as e:
66
+ logger.debug(f"Unable to import config for backend {backend_name} due to {e}.")
67
+ return cfg
qadence/backends/gpsr.py CHANGED
@@ -126,6 +126,7 @@ def multi_gap_psr(
126
126
 
127
127
  # get number of observables from expectation value tensor
128
128
  if f_plus.numel() > 1:
129
+ batch_size = F[0].shape[0]
129
130
  n_obs = F[0].shape[1]
130
131
 
131
132
  # reshape F vector
@@ -12,6 +12,7 @@ from torch import Tensor
12
12
  from qadence.backend import Backend as BackendInterface
13
13
  from qadence.backend import ConvertedCircuit, ConvertedObservable
14
14
  from qadence.backends.utils import (
15
+ infer_batchsize,
15
16
  pyqify,
16
17
  to_list_of_dicts,
17
18
  unpyqify,
@@ -31,7 +32,6 @@ from qadence.transpile import (
31
32
  transpile,
32
33
  )
33
34
  from qadence.types import BackendName, Endianness, Engine
34
- from qadence.utils import infer_batchsize
35
35
 
36
36
  from .config import Configuration, default_passes
37
37
  from .convert_ops import convert_block
@@ -165,7 +165,6 @@ class Backend(BackendInterface):
165
165
  "Looping expectation does not make sense with batched initial state. "
166
166
  "Define your initial state with `batch_size=1`"
167
167
  )
168
-
169
168
  list_expvals = []
170
169
  observables = observable if isinstance(observable, list) else [observable]
171
170
  for vals in to_list_of_dicts(param_values):
@@ -4,6 +4,8 @@ from dataclasses import dataclass
4
4
  from logging import getLogger
5
5
  from typing import Callable
6
6
 
7
+ from pyqtorch.utils import SolverType
8
+
7
9
  from qadence.analog import add_background_hamiltonian
8
10
  from qadence.backend import BackendConfiguration
9
11
  from qadence.transpile import (
@@ -41,6 +43,9 @@ class Configuration(BackendConfiguration):
41
43
  algo_hevo: AlgoHEvo = AlgoHEvo.EXP
42
44
  """Determine which kind of Hamiltonian evolution algorithm to use."""
43
45
 
46
+ ode_solver: SolverType = SolverType.DP5_SE
47
+ """Determine which ODE solver to use for time-dependent blocks."""
48
+
44
49
  n_steps_hevo: int = 100
45
50
  """Default number of steps for the Hamiltonian evolution."""
46
51
 
@@ -6,8 +6,10 @@ from typing import Any, Sequence, Tuple
6
6
 
7
7
  import pyqtorch as pyq
8
8
  import sympy
9
+ import torch
9
10
  from pyqtorch.apply import apply_operator
10
11
  from pyqtorch.matrices import _dagger
12
+ from pyqtorch.time_dependent.sesolve import sesolve
11
13
  from pyqtorch.utils import is_diag
12
14
  from torch import (
13
15
  Tensor,
@@ -26,6 +28,8 @@ from torch.nn import Module
26
28
 
27
29
  from qadence.backends.utils import (
28
30
  finitediff,
31
+ pyqify,
32
+ unpyqify,
29
33
  )
30
34
  from qadence.blocks import (
31
35
  AbstractBlock,
@@ -38,8 +42,12 @@ from qadence.blocks import (
38
42
  ScaleBlock,
39
43
  TimeEvolutionBlock,
40
44
  )
41
- from qadence.blocks.block_to_tensor import _block_to_tensor_embedded, block_to_tensor
45
+ from qadence.blocks.block_to_tensor import (
46
+ _block_to_tensor_embedded,
47
+ block_to_tensor,
48
+ )
42
49
  from qadence.blocks.primitive import ProjectorBlock
50
+ from qadence.blocks.utils import parameters
43
51
  from qadence.operations import (
44
52
  U,
45
53
  multi_qubit_gateset,
@@ -177,6 +185,7 @@ class PyQHamiltonianEvolution(Module):
177
185
  self.param_names = config.get_param_name(block)
178
186
  self.block = block
179
187
  self.hmat: Tensor
188
+ self.config = config
180
189
 
181
190
  if isinstance(block.generator, AbstractBlock) and not block.generator.is_parametric:
182
191
  hmat = block_to_tensor(
@@ -289,18 +298,80 @@ class PyQHamiltonianEvolution(Module):
289
298
  """Dagger of the evolved operator given the current parameter values."""
290
299
  return _dagger(self.unitary(values))
291
300
 
301
+ def _get_time_parameter(self) -> str:
302
+ # get unique time parameters
303
+ unique_time_params = set()
304
+ for p in parameters(self.block.generator): # type: ignore [arg-type]
305
+ if getattr(p, "is_time", False):
306
+ unique_time_params.add(str(p))
307
+
308
+ if len(unique_time_params) > 1:
309
+ raise Exception("Only a single time parameter is supported.")
310
+
311
+ return unique_time_params.pop()
312
+
292
313
  def forward(
293
314
  self,
294
315
  state: Tensor,
295
316
  values: dict[str, Tensor],
296
317
  ) -> Tensor:
297
- return apply_operator(
298
- state,
299
- self.unitary(values),
300
- self.qubit_support,
301
- self.n_qubits,
302
- self.batch_size,
303
- )
318
+ if getattr(self.block.generator, "is_time_dependent", False): # type: ignore [union-attr]
319
+
320
+ def Ht(t: Tensor | float) -> Tensor:
321
+ # values dict has to change with new value of t
322
+ # initial value of a feature parameter inside generator block
323
+ # has to be inferred here
324
+ new_vals = dict()
325
+ for str_expr, val in values.items():
326
+ expr = sympy.sympify(str_expr)
327
+ t_symb = sympy.Symbol(self._get_time_parameter())
328
+ free_symbols = expr.free_symbols
329
+ if t_symb in free_symbols:
330
+ # create substitution list for time and feature params
331
+ subs_list = [(t_symb, t)]
332
+
333
+ if len(free_symbols) > 1:
334
+ # get feature param symbols
335
+ feat_symbols = free_symbols.difference(set([t_symb]))
336
+
337
+ # get feature param values
338
+ feat_vals = values["orig_param_values"]
339
+
340
+ # update substitution list with feature param values
341
+ for fs in feat_symbols:
342
+ subs_list.append((fs, feat_vals[str(fs)]))
343
+
344
+ # evaluate expression with new time param value
345
+ new_vals[str_expr] = torch.tensor(float(expr.subs(subs_list)))
346
+ else:
347
+ # expression doesn't contain time parameter - copy it as is
348
+ new_vals[str_expr] = val
349
+
350
+ # get matrix form of generator
351
+ hmat = _block_to_tensor_embedded(
352
+ self.block.generator, # type: ignore[arg-type]
353
+ values=new_vals,
354
+ qubit_support=self.qubit_support,
355
+ use_full_support=False,
356
+ device=self.device,
357
+ ).squeeze(0)
358
+
359
+ return hmat
360
+
361
+ tsave = torch.linspace(0, self.block.duration, self.config.n_steps_hevo) # type: ignore [attr-defined]
362
+ result = pyqify(
363
+ sesolve(Ht, unpyqify(state).T[:, 0:1], tsave, self.config.ode_solver).states[-1].T
364
+ )
365
+ else:
366
+ result = apply_operator(
367
+ state,
368
+ self.unitary(values),
369
+ self.qubit_support,
370
+ self.n_qubits,
371
+ self.batch_size,
372
+ )
373
+
374
+ return result
304
375
 
305
376
  @property
306
377
  def device(self) -> torch_device:
qadence/backends/utils.py CHANGED
@@ -98,10 +98,11 @@ def to_list_of_dicts(param_values: ParamDictType) -> list[ParamDictType]:
98
98
  if not param_values:
99
99
  return [param_values]
100
100
 
101
- max_batch_size = max(p.size()[0] for p in param_values.values())
101
+ max_batch_size = max(p.size()[0] for p in param_values.values() if isinstance(p, Tensor))
102
102
  batched_values = {
103
103
  k: (v if v.size()[0] == max_batch_size else v.repeat(max_batch_size, 1))
104
104
  for k, v in param_values.items()
105
+ if isinstance(v, Tensor)
105
106
  }
106
107
 
107
108
  return [{k: v[i] for k, v in batched_values.items()} for i in range(max_batch_size)]
@@ -143,9 +144,22 @@ def validate_state(state: Tensor, n_qubits: int) -> None:
143
144
  )
144
145
 
145
146
 
146
- def infer_batchsize(param_values: ParamDictType = None) -> int:
147
+ def infer_batchsize(param_values: dict[str, Tensor] = None) -> int:
147
148
  """Infer the batch_size through the length of the parameter tensors."""
148
- return max([len(tensor) for tensor in param_values.values()]) if param_values else 1
149
+ try:
150
+ return (
151
+ max(
152
+ [
153
+ len(tensor_or_dict)
154
+ for tensor_or_dict in param_values.values()
155
+ if isinstance(tensor_or_dict, Tensor)
156
+ ]
157
+ )
158
+ if param_values
159
+ else 1
160
+ )
161
+ except Exception:
162
+ return 1
149
163
 
150
164
 
151
165
  # The following functions can be used to compute potentially higher order gradients using pyqtorch's
@@ -300,6 +300,13 @@ class AbstractBlock(ABC):
300
300
  params: list[sympy.Basic] = parameters(self)
301
301
  return any(isinstance(p, Parameter) for p in params)
302
302
 
303
+ @property
304
+ def is_time_dependent(self) -> bool:
305
+ from qadence.blocks.utils import parameters
306
+
307
+ params: list[sympy.Basic] = parameters(self)
308
+ return any(getattr(p, "is_time", False) for p in params)
309
+
303
310
  def tensor(self, values: dict[str, TNumber | torch.Tensor] = {}) -> torch.Tensor:
304
311
  from .block_to_tensor import block_to_tensor
305
312
 
@@ -111,18 +111,21 @@ def embedding(
111
111
  angle: ArrayLike
112
112
  values = {}
113
113
  for symbol in expr.free_symbols:
114
- if symbol.name in inputs:
115
- value = inputs[symbol.name]
116
- elif symbol.name in params:
117
- value = params[symbol.name]
114
+ if not symbol.is_time:
115
+ if symbol.name in inputs:
116
+ value = inputs[symbol.name]
117
+ elif symbol.name in params:
118
+ value = params[symbol.name]
119
+ else:
120
+ msg_trainable = "Trainable" if symbol.trainable else "Non-trainable"
121
+ raise KeyError(
122
+ f"{msg_trainable} parameter '{symbol.name}' not found in the "
123
+ f"inputs list: {list(inputs.keys())} nor the "
124
+ f"params list: {list(params.keys())}."
125
+ )
126
+ values[symbol.name] = value
118
127
  else:
119
- msg_trainable = "Trainable" if symbol.trainable else "Non-trainable"
120
- raise KeyError(
121
- f"{msg_trainable} parameter '{symbol.name}' not found in the "
122
- f"inputs list: {list(inputs.keys())} nor the "
123
- f"params list: {list(params.keys())}."
124
- )
125
- values[symbol.name] = value
128
+ values[symbol.name] = tensor(1.0)
126
129
  angle = fn(**values)
127
130
  # do not reshape parameters which are multi-dimensional
128
131
  # tensors, such as for example generator matrices
@@ -139,7 +142,9 @@ def embedding(
139
142
  gate_lvl_params[uuid] = embedded_params[e]
140
143
  return gate_lvl_params
141
144
  else:
142
- return {stringify(k): v for k, v in embedded_params.items()}
145
+ out = {stringify(k): v for k, v in embedded_params.items()}
146
+ out.update({"orig_param_values": inputs})
147
+ return out
143
148
 
144
149
  params: ParamDictType
145
150
  params = {
qadence/execution.py CHANGED
@@ -71,7 +71,10 @@ def _(
71
71
  endianness: Endianness = Endianness.BIG,
72
72
  configuration: Union[BackendConfiguration, dict, None] = None,
73
73
  ) -> Tensor:
74
- bknd = backend_factory(backend, configuration=configuration)
74
+ diff_mode = None
75
+ if backend == BackendName.PYQTORCH:
76
+ diff_mode = DiffMode.AD
77
+ bknd = backend_factory(backend, diff_mode=diff_mode, configuration=configuration)
75
78
  conv = bknd.convert(circuit)
76
79
  with no_grad():
77
80
  return bknd.run(
@@ -147,7 +150,10 @@ def _(
147
150
  endianness: Endianness = Endianness.BIG,
148
151
  configuration: Union[BackendConfiguration, dict, None] = None,
149
152
  ) -> list[Counter]:
150
- bknd = backend_factory(backend, configuration=configuration)
153
+ diff_mode = None
154
+ if backend == BackendName.PYQTORCH:
155
+ diff_mode = DiffMode.AD
156
+ bknd = backend_factory(backend, diff_mode=diff_mode, configuration=configuration)
151
157
  conv = bknd.convert(circuit)
152
158
  return bknd.sample(
153
159
  circuit=conv.circuit,
@@ -242,7 +248,9 @@ def _(
242
248
  configuration: Union[BackendConfiguration, dict, None] = None,
243
249
  ) -> Tensor:
244
250
  observable = observable if isinstance(observable, list) else [observable]
245
- bknd = backend_factory(backend, configuration=configuration, diff_mode=diff_mode)
251
+ if backend == BackendName.PYQTORCH:
252
+ diff_mode = DiffMode.AD
253
+ bknd = backend_factory(backend, diff_mode=diff_mode, configuration=configuration)
246
254
  conv = bknd.convert(circuit, observable)
247
255
 
248
256
  def _expectation() -> Tensor:
qadence/extensions.py CHANGED
@@ -2,62 +2,93 @@ from __future__ import annotations
2
2
 
3
3
  import importlib
4
4
  from logging import getLogger
5
- from string import Template
5
+ from typing import TypeVar
6
6
 
7
- from qadence.backend import Backend
7
+ from qadence.backend import Backend, BackendConfiguration
8
8
  from qadence.blocks.abstract import TAbstractBlock
9
+ from qadence.engines.differentiable_backend import DifferentiableBackend
9
10
  from qadence.types import BackendName, DiffMode, Engine
10
11
 
11
- backends_namespace = Template("qadence.backends.$name")
12
+ BackendClsType = TypeVar("BackendClsType", bound=Backend)
13
+ EngineClsType = TypeVar("EngineClsType", bound=DifferentiableBackend)
12
14
 
13
15
  logger = getLogger(__name__)
14
16
 
15
17
 
16
- def _available_engines() -> dict:
17
- """Returns a dictionary of currently installed, native qadence engines."""
18
- res = {}
19
- for engine in Engine.list():
20
- module_path = f"qadence.engines.{engine}.differentiable_backend"
18
+ def import_config(backend_name: str | BackendName) -> BackendConfiguration:
19
+ module_path = f"qadence.backends.{backend_name}.config"
20
+ cfg: BackendConfiguration
21
+ try:
22
+ module = importlib.import_module(module_path)
23
+ cfg = getattr(module, "Configuration")
24
+ except (ModuleNotFoundError, ImportError) as e:
25
+ raise type(e)(f"Failed to import backend config of {backend_name} due to {e}.") from e
26
+ return cfg
27
+
28
+
29
+ def import_backend(backend_name: str | BackendName) -> Backend:
30
+ module_path = f"qadence.backends.{backend_name}.backend"
31
+ backend: Backend
32
+ try:
33
+ module = importlib.import_module(module_path)
34
+ except (ModuleNotFoundError, ImportError) as e:
35
+ # If backend is not in Qadence, search in extensions.
36
+ module_path = f"qadence_extensions.backends.{backend_name}.backend"
21
37
  try:
22
38
  module = importlib.import_module(module_path)
23
- DifferentiableBackendCls = getattr(module, "DifferentiableBackend")
24
- res[engine] = DifferentiableBackendCls
25
- except (ImportError, ModuleNotFoundError):
26
- pass
27
- logger.debug(f"Found engines: {res.keys()}")
28
- return res
39
+ except (ModuleNotFoundError, ImportError) as e:
40
+ raise type(e)
41
+ backend = getattr(module, "Backend")
42
+ return backend
29
43
 
30
44
 
31
- def _available_backends() -> dict:
32
- """Returns a dictionary of currently installed, native qadence backends."""
33
- res = {}
45
+ def _available_backends() -> dict[BackendName, Backend]:
46
+ """Return a dictionary of currently installed, native qadence backends."""
47
+ res: dict[BackendName, Backend] = dict()
34
48
  for backend in BackendName.list():
35
- module_path = f"qadence.backends.{backend}.backend"
36
49
  try:
37
- module = importlib.import_module(module_path)
38
- BackendCls = getattr(module, "Backend")
39
- res[backend] = BackendCls
40
- except (ImportError, ModuleNotFoundError):
41
- pass
50
+ res[backend] = import_backend(backend)
51
+ except (ModuleNotFoundError, ImportError) as e:
52
+ raise type(e)(f"Failed to import backend {backend} due to {e}.") from e
42
53
  logger.debug(f"Found backends: {res.keys()}")
43
54
  return res
44
55
 
45
56
 
46
- def _supported_gates(name: BackendName | str) -> list[TAbstractBlock]:
47
- """Returns a list of supported gates for the queried backend 'name'."""
57
+ def import_engine(engine_name: str | Engine) -> DifferentiableBackend:
58
+ module_path = f"qadence.engines.{engine_name}.differentiable_backend"
59
+ engine: DifferentiableBackend
60
+ try:
61
+ module = importlib.import_module(module_path)
62
+ engine = getattr(module, "DifferentiableBackend")
63
+ except (ModuleNotFoundError, ImportError) as e:
64
+ raise type(e)
65
+ return engine
66
+
67
+
68
+ def _available_engines() -> dict[Engine, DifferentiableBackend]:
69
+ """Return a dictionary of currently installed, native qadence engines."""
70
+ res: dict[Engine, DifferentiableBackend] = dict()
71
+ for engine in Engine.list():
72
+ try:
73
+ res[engine] = import_engine(engine)
74
+ except (ModuleNotFoundError, ImportError) as e:
75
+ raise type(e)(f"Failed to import engine {engine} due to {e}.") from e
76
+ logger.debug(f"Found engines: {res.keys()}")
77
+ return res
78
+
79
+
80
+ def _supported_gates(backend_name: str) -> list[TAbstractBlock]:
81
+ """Return a list of supported gates for the queried backend 'name'."""
48
82
  from qadence import operations
49
83
 
50
- name = str(BackendName(name).name.lower())
84
+ backend_name = BackendName(backend_name) # Validate backend name.
85
+ module_path = f"qadence.backends.{backend_name}"
51
86
 
52
87
  try:
53
- backend_namespace = backends_namespace.substitute(name=name)
54
- module = importlib.import_module(backend_namespace)
55
- except KeyError:
56
- pass
57
- _supported_gates = getattr(module, "supported_gates", None)
58
- assert (
59
- _supported_gates is not None
60
- ), f"{name} backend should define a 'supported_gates' variable"
88
+ module = importlib.import_module(module_path)
89
+ except (ModuleNotFoundError, ImportError) as e:
90
+ raise type(e)(f"Failed to import backend module for {backend_name} due to {e}.") from e
91
+ _supported_gates = getattr(module, "supported_gates")
61
92
  return [getattr(operations, gate) for gate in _supported_gates]
62
93
 
63
94
 
@@ -85,8 +85,12 @@ class TrainConfig:
85
85
 
86
86
  @dataclass
87
87
  class FeatureMapConfig:
88
- num_features: int = 1
89
- """Number of feature parameters to be encoded."""
88
+ num_features: int = 0
89
+ """
90
+ Number of feature parameters to be encoded.
91
+
92
+ Defaults to 0. Thus, no feature parameters are encoded.
93
+ """
90
94
 
91
95
  basis_set: BasisSet | dict[str, BasisSet] = BasisSet.FOURIER
92
96
  """
@@ -235,7 +239,9 @@ class FeatureMapConfig:
235
239
  len(self.inputs) == self.num_features
236
240
  ), "Inputs list must be of same size as the number of features"
237
241
  else:
238
- if self.num_features == 1:
242
+ if self.num_features == 0:
243
+ self.inputs = []
244
+ elif self.num_features == 1:
239
245
  self.inputs = ["x"]
240
246
  else:
241
247
  raise ValueError(