qadence 1.6.3__py3-none-any.whl → 1.7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. qadence/__init__.py +2 -2
  2. qadence/backends/api.py +47 -60
  3. qadence/backends/gpsr.py +1 -0
  4. qadence/backends/pyqtorch/backend.py +1 -2
  5. qadence/backends/pyqtorch/config.py +5 -0
  6. qadence/backends/pyqtorch/convert_ops.py +83 -10
  7. qadence/backends/utils.py +62 -7
  8. qadence/blocks/abstract.py +7 -0
  9. qadence/blocks/embedding.py +17 -12
  10. qadence/blocks/matrix.py +1 -1
  11. qadence/blocks/primitive.py +1 -1
  12. qadence/constructors/__init__.py +2 -0
  13. qadence/constructors/hamiltonians.py +38 -1
  14. qadence/draw/utils.py +1 -1
  15. qadence/execution.py +11 -3
  16. qadence/extensions.py +62 -36
  17. qadence/ml_tools/__init__.py +11 -3
  18. qadence/ml_tools/config.py +283 -2
  19. qadence/ml_tools/constructors.py +796 -0
  20. qadence/ml_tools/models.py +373 -251
  21. qadence/ml_tools/printing.py +5 -2
  22. qadence/ml_tools/saveload.py +42 -18
  23. qadence/ml_tools/train_grad.py +48 -14
  24. qadence/ml_tools/utils.py +2 -8
  25. qadence/{models/quantum_model.py → model.py} +178 -10
  26. qadence/operations/ham_evo.py +10 -0
  27. qadence/overlap.py +1 -1
  28. qadence/parameters.py +10 -1
  29. qadence/register.py +98 -22
  30. qadence/serialization.py +6 -6
  31. qadence/types.py +44 -0
  32. qadence/utils.py +2 -8
  33. {qadence-1.6.3.dist-info → qadence-1.7.1.dist-info}/METADATA +7 -6
  34. {qadence-1.6.3.dist-info → qadence-1.7.1.dist-info}/RECORD +36 -38
  35. {qadence-1.6.3.dist-info → qadence-1.7.1.dist-info}/WHEEL +1 -1
  36. qadence/finitediff.py +0 -47
  37. qadence/models/__init__.py +0 -7
  38. qadence/models/qnn.py +0 -265
  39. {qadence-1.6.3.dist-info → qadence-1.7.1.dist-info}/licenses/LICENSE +0 -0
qadence/extensions.py CHANGED
@@ -2,62 +2,88 @@ from __future__ import annotations
2
2
 
3
3
  import importlib
4
4
  from logging import getLogger
5
- from string import Template
5
+ from typing import TypeVar
6
6
 
7
- from qadence.backend import Backend
7
+ from qadence.backend import Backend, BackendConfiguration
8
8
  from qadence.blocks.abstract import TAbstractBlock
9
+ from qadence.engines.differentiable_backend import DifferentiableBackend
9
10
  from qadence.types import BackendName, DiffMode, Engine
10
11
 
11
- backends_namespace = Template("qadence.backends.$name")
12
+ BackendClsType = TypeVar("BackendClsType", bound=Backend)
13
+ EngineClsType = TypeVar("EngineClsType", bound=DifferentiableBackend)
12
14
 
13
15
  logger = getLogger(__name__)
14
16
 
15
17
 
16
- def _available_engines() -> dict:
17
- """Returns a dictionary of currently installed, native qadence engines."""
18
- res = {}
19
- for engine in Engine.list():
20
- module_path = f"qadence.engines.{engine}.differentiable_backend"
21
- try:
22
- module = importlib.import_module(module_path)
23
- DifferentiableBackendCls = getattr(module, "DifferentiableBackend")
24
- res[engine] = DifferentiableBackendCls
25
- except (ImportError, ModuleNotFoundError):
26
- pass
27
- logger.debug(f"Found engines: {res.keys()}")
28
- return res
18
+ def import_config(backend_name: str | BackendName) -> BackendConfiguration:
19
+ module_path = f"qadence.backends.{backend_name}.config"
20
+ cfg: BackendConfiguration
21
+ try:
22
+ module = importlib.import_module(module_path)
23
+ cfg = getattr(module, "Configuration")
24
+ except (ModuleNotFoundError, ImportError) as e:
25
+ raise type(e)(f"Failed to import backend config of {backend_name} due to {e}.") from e
26
+ return cfg
29
27
 
30
28
 
31
- def _available_backends() -> dict:
32
- """Returns a dictionary of currently installed, native qadence backends."""
33
- res = {}
29
+ def import_backend(backend_name: str | BackendName) -> Backend:
30
+ module_path = f"qadence.backends.{backend_name}.backend"
31
+ backend: Backend
32
+ try:
33
+ module = importlib.import_module(module_path)
34
+ backend = getattr(module, "Backend")
35
+ except (ModuleNotFoundError, ImportError) as e:
36
+ raise type(e)
37
+ return backend
38
+
39
+
40
+ def _available_backends() -> dict[BackendName, Backend]:
41
+ """Return a dictionary of currently installed, native qadence backends."""
42
+ res: dict[BackendName, Backend] = dict()
34
43
  for backend in BackendName.list():
35
- module_path = f"qadence.backends.{backend}.backend"
36
44
  try:
37
- module = importlib.import_module(module_path)
38
- BackendCls = getattr(module, "Backend")
39
- res[backend] = BackendCls
40
- except (ImportError, ModuleNotFoundError):
41
- pass
45
+ res[backend] = import_backend(backend)
46
+ except (ModuleNotFoundError, ImportError) as e:
47
+ raise type(e)(f"Failed to import backend {backend} due to {e}.") from e
42
48
  logger.debug(f"Found backends: {res.keys()}")
43
49
  return res
44
50
 
45
51
 
46
- def _supported_gates(name: BackendName | str) -> list[TAbstractBlock]:
47
- """Returns a list of supported gates for the queried backend 'name'."""
52
+ def import_engine(engine_name: str | Engine) -> DifferentiableBackend:
53
+ module_path = f"qadence.engines.{engine_name}.differentiable_backend"
54
+ engine: DifferentiableBackend
55
+ try:
56
+ module = importlib.import_module(module_path)
57
+ engine = getattr(module, "DifferentiableBackend")
58
+ except (ModuleNotFoundError, ImportError) as e:
59
+ raise type(e)
60
+ return engine
61
+
62
+
63
+ def _available_engines() -> dict[Engine, DifferentiableBackend]:
64
+ """Return a dictionary of currently installed, native qadence engines."""
65
+ res: dict[Engine, DifferentiableBackend] = dict()
66
+ for engine in Engine.list():
67
+ try:
68
+ res[engine] = import_engine(engine)
69
+ except (ModuleNotFoundError, ImportError) as e:
70
+ raise type(e)(f"Failed to import engine {engine} due to {e}.") from e
71
+ logger.debug(f"Found engines: {res.keys()}")
72
+ return res
73
+
74
+
75
+ def _supported_gates(backend_name: str) -> list[TAbstractBlock]:
76
+ """Return a list of supported gates for the queried backend 'name'."""
48
77
  from qadence import operations
49
78
 
50
- name = str(BackendName(name).name.lower())
79
+ backend_name = BackendName(backend_name) # Validate backend name.
80
+ module_path = f"qadence.backends.{backend_name}"
51
81
 
52
82
  try:
53
- backend_namespace = backends_namespace.substitute(name=name)
54
- module = importlib.import_module(backend_namespace)
55
- except KeyError:
56
- pass
57
- _supported_gates = getattr(module, "supported_gates", None)
58
- assert (
59
- _supported_gates is not None
60
- ), f"{name} backend should define a 'supported_gates' variable"
83
+ module = importlib.import_module(module_path)
84
+ except (ModuleNotFoundError, ImportError) as e:
85
+ raise type(e)(f"Failed to import backend module for {backend_name} due to {e}.") from e
86
+ _supported_gates = getattr(module, "supported_gates")
61
87
  return [getattr(operations, gate) for gate in _supported_gates]
62
88
 
63
89
 
@@ -1,7 +1,9 @@
1
1
  from __future__ import annotations
2
2
 
3
- from .config import TrainConfig
3
+ from .config import AnsatzConfig, FeatureMapConfig, TrainConfig
4
+ from .constructors import create_ansatz, create_fm_blocks, observable_from_config
4
5
  from .data import DictDataLoader, InfiniteTensorDataset, to_dataloader
6
+ from .models import QNN
5
7
  from .optimize_step import optimize_step as default_optimize_step
6
8
  from .parameters import get_parameters, num_parameters, set_parameters
7
9
  from .printing import print_metrics, write_tensorboard
@@ -12,10 +14,16 @@ from .train_no_grad import train as train_gradient_free
12
14
 
13
15
  # Modules to be automatically added to the qadence namespace
14
16
  __all__ = [
15
- "TrainConfig",
17
+ "AnsatzConfig",
18
+ "create_ansatz",
19
+ "create_fm_blocks",
16
20
  "DictDataLoader",
21
+ "FeatureMapConfig",
22
+ "load_checkpoint",
23
+ "observable_from_config",
24
+ "QNN",
25
+ "TrainConfig",
17
26
  "train_with_grad",
18
27
  "train_gradient_free",
19
- "load_checkpoint",
20
28
  "write_checkpoint",
21
29
  ]
@@ -2,9 +2,20 @@ from __future__ import annotations
2
2
 
3
3
  import datetime
4
4
  import os
5
- from dataclasses import dataclass
5
+ from dataclasses import dataclass, field, fields
6
+ from logging import getLogger
6
7
  from pathlib import Path
7
- from typing import Callable, Optional
8
+ from typing import Callable, Optional, Type
9
+
10
+ from sympy import Basic
11
+
12
+ from qadence.blocks.analog import AnalogBlock
13
+ from qadence.blocks.primitive import ParametricBlock
14
+ from qadence.operations import RX, AnalogRX
15
+ from qadence.parameters import Parameter
16
+ from qadence.types import AnsatzType, BasisSet, MultivariateStrategy, ReuploadScaling, Strategy
17
+
18
+ logger = getLogger(__file__)
8
19
 
9
20
 
10
21
  @dataclass
@@ -70,3 +81,273 @@ class TrainConfig:
70
81
  self.trainstop_criterion = lambda x: x <= self.max_iter
71
82
  if self.validation_criterion is None:
72
83
  self.validation_criterion = lambda *x: False
84
+
85
+
86
+ @dataclass
87
+ class FeatureMapConfig:
88
+ num_features: int = 0
89
+ """
90
+ Number of feature parameters to be encoded.
91
+
92
+ Defaults to 0. Thus, no feature parameters are encoded.
93
+ """
94
+
95
+ basis_set: BasisSet | dict[str, BasisSet] = BasisSet.FOURIER
96
+ """
97
+ Basis set for feature encoding.
98
+
99
+ Takes qadence.BasisSet.
100
+ Give a single BasisSet to use the same for all features.
101
+ Give a dict of (str, BasisSet) where the key is the name of the variable and the
102
+ value is the BasisSet to use for encoding that feature.
103
+ BasisSet.FOURIER for Fourier encoding.
104
+ BasisSet.CHEBYSHEV for Chebyshev encoding.
105
+ """
106
+
107
+ reupload_scaling: ReuploadScaling | dict[str, ReuploadScaling] = ReuploadScaling.CONSTANT
108
+ """
109
+ Scaling for encoding the same feature on different qubits.
110
+
111
+ Scaling used to encode the same feature on different qubits in the
112
+ same layer of the feature maps. Takes qadence.ReuploadScaling.
113
+ Give a single ReuploadScaling to use the same for all features.
114
+ Give a dict of (str, ReuploadScaling) where the key is the name of the variable and the
115
+ value is the ReuploadScaling to use for encoding that feature.
116
+ ReuploadScaling.CONSTANT for constant scaling.
117
+ ReuploadScaling.TOWER for linearly increasing scaling.
118
+ ReuploadScaling.EXP for exponentially increasing scaling.
119
+ """
120
+
121
+ feature_range: tuple[float, float] | dict[str, tuple[float, float]] | None = None
122
+ """
123
+ Range of data that the input data is assumed to come from.
124
+
125
+ Give a single tuple to use the same range for all features.
126
+ Give a dict of (str, tuple) where the key is the name of the variable and the
127
+ value is the feature range to use for that feature.
128
+ """
129
+
130
+ target_range: tuple[float, float] | dict[str, tuple[float, float]] | None = None
131
+ """
132
+ Range of data the data encoder assumes as natural range.
133
+
134
+ Give a single tuple to use the same range for all features.
135
+ Give a dict of (str, tuple) where the key is the name of the variable and the
136
+ value is the target range to use for that feature.
137
+ """
138
+
139
+ multivariate_strategy: MultivariateStrategy = MultivariateStrategy.PARALLEL
140
+ """
141
+ The encoding strategy in case of multi-variate function.
142
+
143
+ Takes qadence.MultivariateStrategy.
144
+ If PARALLEL, the features are encoded in one block of rotation gates
145
+ with each feature given an equal number of qubits.
146
+ If SERIES, the features are encoded sequentially, with an ansatz block
147
+ between. PARALLEL is allowed only for DIGITAL `feature_map_strategy`.
148
+ """
149
+
150
+ feature_map_strategy: Strategy = Strategy.DIGITAL
151
+ """
152
+ Strategy for feature map.
153
+
154
+ Accepts DIGITAL, ANALOG or RYDBERG. Defaults to DIGITAL.
155
+ If the strategy is incompatible with the `operation` chosen, then `operation`
156
+ gets preference and the given strategy is ignored.
157
+ """
158
+
159
+ param_prefix: str | None = None
160
+ """
161
+ String prefix to create trainable parameters in Feature Map.
162
+
163
+ A string prefix to create trainable parameters multiplying the feature parameter
164
+ inside the feature-encoding function. Note that currently this does not take into
165
+ account the domain of the feature-encoding function.
166
+ Defaults to `None` and thus, the feature map is not trainable.
167
+ Note that this is separate from the name of the parameter.
168
+ The user can provide a single prefix for all features, and they will be appended
169
+ by appropriate feature name automatically.
170
+ """
171
+
172
+ num_repeats: int | dict[str, int] = 0
173
+ """
174
+ Number of feature map layers repeated in the data reuploadig step.
175
+
176
+ If all are to be repeated the same number of times, then can give a single
177
+ `int`. For different number of repeatitions for each feature, provide a dict
178
+ of (str, int) where the key is the name of the variable and the value is the
179
+ number of repeatitions for that feature.
180
+ This amounts to the number of additional reuploads. So if `num_repeats` is N,
181
+ the data gets uploaded N+1 times. Defaults to no repeatition.
182
+ """
183
+
184
+ operation: Callable[[Parameter | Basic], AnalogBlock] | Type[RX] | None = None
185
+ """
186
+ Type of operation.
187
+
188
+ Choose among the analog or digital rotations or a custom
189
+ callable function returning an AnalogBlock instance. If the type of operation is
190
+ incompatible with the `strategy` chosen, then `operation` gets preference and
191
+ the given strategy is ignored.
192
+ """
193
+
194
+ inputs: list[Basic | str] | None = None
195
+ """
196
+ List that indicates the order of variables of the tensors that are passed.
197
+
198
+ Optional if a single feature is being encoded, required otherwise. Given input tensors
199
+ `xs = torch.rand(batch_size, input_size:=2)` a QNN with `inputs=["t", "x"]` will
200
+ assign `t, x = xs[:,0], xs[:,1]`.
201
+ """
202
+
203
+ def __post_init__(self) -> None:
204
+ if self.multivariate_strategy == MultivariateStrategy.PARALLEL and self.num_features > 1:
205
+ assert (
206
+ self.feature_map_strategy == Strategy.DIGITAL
207
+ ), "For `parallel` encoding of multiple features, the `feature_map_strategy` must be \
208
+ of `digital` type."
209
+
210
+ if self.operation is None:
211
+ if self.feature_map_strategy == Strategy.DIGITAL:
212
+ self.operation = RX
213
+ elif self.feature_map_strategy == Strategy.ANALOG:
214
+ self.operation = AnalogRX # type: ignore[assignment]
215
+
216
+ else:
217
+ if self.feature_map_strategy == Strategy.DIGITAL:
218
+ if isinstance(self.operation, AnalogBlock):
219
+ logger.warning(
220
+ "The `operation` is of type `AnalogBlock` but the `feature_map_strategy` is\
221
+ `digital`. The `feature_map_strategy` will be modified and given operation\
222
+ will be used."
223
+ )
224
+
225
+ self.feature_map_strategy = Strategy.ANALOG
226
+
227
+ elif self.feature_map_strategy == Strategy.ANALOG:
228
+ if isinstance(self.operation, ParametricBlock):
229
+ logger.warning(
230
+ "The `operation` is a digital gate but the `feature_map_strategy` is\
231
+ `analog`. The `feature_map_strategy` will be modified and given operation\
232
+ will be used."
233
+ )
234
+
235
+ self.feature_map_strategy = Strategy.DIGITAL
236
+
237
+ if self.inputs is not None:
238
+ assert (
239
+ len(self.inputs) == self.num_features
240
+ ), "Inputs list must be of same size as the number of features"
241
+ else:
242
+ if self.num_features == 0:
243
+ self.inputs = []
244
+ elif self.num_features == 1:
245
+ self.inputs = ["x"]
246
+ else:
247
+ raise ValueError(
248
+ """
249
+ Your QNN has more than one input. Please provide a list of inputs in the order
250
+ of your tensor domain. For example, if you want to pass
251
+ `xs = torch.rand(batch_size, input_size:=3)` to you QNN, where
252
+ ```
253
+ t = x[:,0]
254
+ x = x[:,1]
255
+ y = x[:,2]
256
+ ```
257
+ you have to specify
258
+ ```
259
+ inputs=["t", "x", "y"]
260
+ ```
261
+ You can also pass a list of sympy symbols.
262
+ """
263
+ )
264
+
265
+ property_list = [
266
+ "basis_set",
267
+ "reupload_scaling",
268
+ "feature_range",
269
+ "target_range",
270
+ "num_repeats",
271
+ ]
272
+
273
+ for target_field in fields(self):
274
+ if target_field.name in property_list:
275
+ prop = getattr(self, target_field.name)
276
+ if isinstance(prop, dict):
277
+ assert set(prop.keys()) == set(
278
+ self.inputs
279
+ ), f"The keys in {target_field.name} must be the same as the inputs provided. \
280
+ Alternatively, provide a single value of {target_field.name} to use the same\
281
+ {target_field.name} for all features."
282
+ else:
283
+ prop = {key: prop for key in self.inputs}
284
+ setattr(self, target_field.name, prop)
285
+
286
+
287
+ @dataclass
288
+ class AnsatzConfig:
289
+ depth: int = 1
290
+ """Number of layers of the ansatz."""
291
+
292
+ ansatz_type: AnsatzType = AnsatzType.HEA
293
+ """What type of ansatz.
294
+
295
+ HEA for Hardware Efficient Ansatz.
296
+ IIA for Identity intialized Ansatz.
297
+ """
298
+
299
+ ansatz_strategy: Strategy = Strategy.DIGITAL
300
+ """Ansatz strategy.
301
+
302
+ DIGITAL for fully digital ansatz. Required if `ansatz_type` is `iia`.
303
+ SDAQC for analog entangling block.
304
+ RYDBERG for fully rydberg hea ansatz.
305
+ """
306
+
307
+ strategy_args: dict = field(default_factory=dict)
308
+ """
309
+ A dictionary containing keyword arguments to the function creating the ansatz.
310
+
311
+ Details about each below.
312
+
313
+ For DIGITAL strategy, accepts the following:
314
+ periodic (bool): if the qubits should be linked periodically.
315
+ periodic=False is not supported in emu-c.
316
+ operations (list): list of operations to cycle through in the
317
+ digital single-qubit rotations of each layer.
318
+ Defaults to [RX, RY, RX] for hea and [RX, RY] for iia.
319
+ entangler (AbstractBlock): 2-qubit entangling operation.
320
+ Supports CNOT, CZ, CRX, CRY, CRZ, CPHASE. Controlld rotations
321
+ will have variational parameters on the rotation angles.
322
+ Defaults to CNOT
323
+
324
+ For SDAQC strategy, accepts the following:
325
+ operations (list): list of operations to cycle through in the
326
+ digital single-qubit rotations of each layer.
327
+ Defaults to [RX, RY, RX] for hea and [RX, RY] for iia.
328
+ entangler (AbstractBlock): Hamiltonian generator for the
329
+ analog entangling layer. Time parameter is considered variational.
330
+ Defaults to NN interaction.
331
+
332
+ For RYDBERG strategy, accepts the following:
333
+ addressable_detuning: whether to turn on the trainable semi-local addressing pattern
334
+ on the detuning (n_i terms in the Hamiltonian).
335
+ Defaults to True.
336
+ addressable_drive: whether to turn on the trainable semi-local addressing pattern
337
+ on the drive (sigma_i^x terms in the Hamiltonian).
338
+ Defaults to False.
339
+ tunable_phase: whether to have a tunable phase to get both sigma^x and sigma^y rotations
340
+ in the drive term. If False, only a sigma^x term will be included in the drive part
341
+ of the Hamiltonian generator.
342
+ Defaults to False.
343
+ """
344
+ # The default for a dataclass can not be a mutable object without using this default_factory.
345
+
346
+ param_prefix: str = "theta"
347
+ """The base bame of the variational parameter."""
348
+
349
+ def __post_init__(self) -> None:
350
+ if self.ansatz_type == AnsatzType.IIA:
351
+ assert (
352
+ self.ansatz_strategy != Strategy.RYDBERG
353
+ ), "Rydberg strategy not allowed for Identity-initialized ansatz."