qadence 1.6.3__py3-none-any.whl → 1.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
qadence/__init__.py CHANGED
@@ -49,7 +49,7 @@ from .exceptions import *
49
49
  from .execution import *
50
50
  from .measurements import *
51
51
  from .ml_tools import *
52
- from .models import *
52
+ from .model import *
53
53
  from .noise import *
54
54
  from .operations import *
55
55
  from .overlap import *
@@ -82,7 +82,7 @@ list_of_submodules = [
82
82
  ".execution",
83
83
  ".measurements",
84
84
  ".ml_tools",
85
- ".models",
85
+ ".model",
86
86
  ".operations",
87
87
  ".overlap",
88
88
  ".parameters",
@@ -253,7 +253,8 @@ class PyQHamiltonianEvolution(Module):
253
253
  """Approximate jacobian of the evolved operator with respect to time evolution."""
254
254
  return finitediff(
255
255
  lambda t: self._unitary(time_evolution=t, hamiltonian=self._hamiltonian(self, values)),
256
- values[self.param_names[0]],
256
+ values[self.param_names[0]].reshape(-1, 1),
257
+ (0,),
257
258
  )
258
259
 
259
260
  def jacobian_generator(self, values: dict[str, Tensor]) -> Tensor:
@@ -280,7 +281,8 @@ class PyQHamiltonianEvolution(Module):
280
281
  lambda v: self._unitary(
281
282
  time_evolution=self._time_evolution(values), hamiltonian=_generator(v)
282
283
  ),
283
- values[self.param_names[1]],
284
+ values[self.param_names[1]].reshape(-1, 1),
285
+ (0,),
284
286
  )
285
287
 
286
288
  def dagger(self, values: dict[str, Tensor]) -> Tensor:
qadence/backends/utils.py CHANGED
@@ -20,8 +20,8 @@ from torch import (
20
20
  rand,
21
21
  )
22
22
 
23
- from qadence.types import ParamDictType
24
- from qadence.utils import Endianness, int_to_basis, is_qadence_shape
23
+ from qadence.types import Endianness, ParamDictType
24
+ from qadence.utils import int_to_basis, is_qadence_shape
25
25
 
26
26
  FINITE_DIFF_EPS = 1e-06
27
27
  # Dict of NumPy dtype -> torch dtype (when the correspondence exists)
@@ -152,8 +152,49 @@ def infer_batchsize(param_values: ParamDictType = None) -> int:
152
152
  # native 'jacobian' methods.
153
153
 
154
154
 
155
- def finitediff(f: Callable, x: Tensor, eps: float = FINITE_DIFF_EPS) -> Tensor:
156
- return (f(x + eps) - f(x - eps)) / (2 * eps) # type: ignore
155
+ def finitediff(
156
+ f: Callable,
157
+ x: Tensor,
158
+ derivative_indices: tuple[int, ...],
159
+ eps: float = None,
160
+ ) -> Tensor:
161
+ """
162
+ Compute the finite difference of a function at a point.
163
+
164
+ Args:
165
+ f: The function to differentiate.
166
+ x: Input of size `(batch_size, input_size)`.
167
+ derivative_indices: Which *input* to differentiate (i.e. which variable x[:,i])
168
+ eps: finite difference spacing (uses `torch.finfo(x.dtype).eps ** (1 / (2 + order))`
169
+ as default)
170
+
171
+ Returns:
172
+ (Tensor): The finite difference of the function at the point `x`.
173
+ """
174
+
175
+ if eps is None:
176
+ order = len(derivative_indices)
177
+ eps = torch.finfo(x.dtype).eps ** (1 / (2 + order))
178
+
179
+ # compute derivative direction vector(s)
180
+ eps = torch.as_tensor(eps, dtype=x.dtype)
181
+ _eps = 1 / eps # type: ignore[operator]
182
+ ev = torch.zeros_like(x)
183
+ i = derivative_indices[0]
184
+ ev[:, i] += eps
185
+
186
+ # recursive finite differencing for higher order than 3 / mixed derivatives
187
+ if len(derivative_indices) > 3 or len(set(derivative_indices)) > 1:
188
+ di = derivative_indices[1:]
189
+ return (finitediff(f, x + ev, di) - finitediff(f, x - ev, di)) * _eps / 2
190
+ elif len(derivative_indices) == 3:
191
+ return (f(x + 2 * ev) - 2 * f(x + ev) + 2 * f(x - ev) - f(x - 2 * ev)) * _eps**3 / 2
192
+ elif len(derivative_indices) == 2:
193
+ return (f(x + ev) + f(x - ev) - 2 * f(x)) * _eps**2
194
+ elif len(derivative_indices) == 1:
195
+ return (f(x + ev) - f(x - ev)) * _eps / 2
196
+ else:
197
+ raise ValueError("If you see this error there is a bug in the `finitediff` function.")
157
198
 
158
199
 
159
200
  def finitediff_sampling(
qadence/blocks/matrix.py CHANGED
@@ -27,7 +27,7 @@ class MatrixBlock(PrimitiveBlock):
27
27
  from qadence.circuit import QuantumCircuit
28
28
  from qadence.types import BackendName, DiffMode
29
29
  from qadence.blocks.matrix import MatrixBlock
30
- from qadence.models import QuantumModel
30
+ from qadence.model import QuantumModel
31
31
  from qadence.operations import X, Z
32
32
  from qadence.states import random_state
33
33
 
@@ -27,7 +27,7 @@ class PrimitiveBlock(AbstractBlock):
27
27
  Primitive blocks represent elementary unitary operations.
28
28
 
29
29
  Examples are single/multi-qubit gates or Hamiltonian evolution.
30
- See [`qadence.operations`](/qadence/operations.md) for a full list of
30
+ See [`qadence.operations`](operations.md) for a full list of
31
31
  primitive blocks.
32
32
  """
33
33
 
@@ -14,6 +14,7 @@ from .daqc import daqc_transform
14
14
  from .hamiltonians import (
15
15
  hamiltonian_factory,
16
16
  ising_hamiltonian,
17
+ ObservableConfig,
17
18
  total_magnetization,
18
19
  zz_hamiltonian,
19
20
  )
@@ -31,6 +32,7 @@ __all__ = [
31
32
  "identity_initialized_ansatz",
32
33
  "hamiltonian_factory",
33
34
  "ising_hamiltonian",
35
+ "ObservableConfig",
34
36
  "total_magnetization",
35
37
  "zz_hamiltonian",
36
38
  "qft",
@@ -1,15 +1,17 @@
1
1
  from __future__ import annotations
2
2
 
3
+ from dataclasses import dataclass
3
4
  from logging import getLogger
4
5
  from typing import Callable, List, Type, Union
5
6
 
6
7
  import numpy as np
7
8
  from torch import Tensor, double, ones, rand
9
+ from typing_extensions import Any
8
10
 
9
11
  from qadence.blocks import AbstractBlock, add, block_is_qubit_hamiltonian
10
12
  from qadence.operations import N, X, Y, Z
11
13
  from qadence.register import Register
12
- from qadence.types import Interaction, TArray
14
+ from qadence.types import Interaction, ObservableTransform, TArray, TParameter
13
15
 
14
16
  logger = getLogger(__name__)
15
17
 
@@ -229,3 +231,38 @@ def ising_hamiltonian(
229
231
  zz_ham = zz_hamiltonian(n_qubits, z_terms=z_terms, zz_terms=zz_terms)
230
232
  x_ham = hamiltonian_factory(n_qubits, detuning=X, detuning_strength=x_terms)
231
233
  return zz_ham + x_ham
234
+
235
+
236
+ def is_numeric(x: Any) -> bool:
237
+ return type(x) in (int, float, complex, np.int64, np.float64)
238
+
239
+
240
+ @dataclass
241
+ class ObservableConfig:
242
+ detuning: TDetuning
243
+ """
244
+ Single qubit detuning of the observable Hamiltonian.
245
+
246
+ Accepts single-qubit operator N, X, Y, or Z.
247
+ """
248
+ scale: TParameter = 1.0
249
+ """The scale by which to multiply the output of the observable."""
250
+ shift: TParameter = 0.0
251
+ """The shift to add to the output of the observable."""
252
+ transformation_type: ObservableTransform = ObservableTransform.NONE # type: ignore[assignment]
253
+ """The type of transformation."""
254
+ trainable_transform: bool | None = None
255
+ """
256
+ Whether to have a trainable transformation on the output of the observable.
257
+
258
+ If None, the scale and shift are numbers.
259
+ If True, the scale and shift are VariationalParameter.
260
+ If False, the scale and shift are FeatureParameter.
261
+ """
262
+
263
+ def __post_init__(self) -> None:
264
+ if is_numeric(self.scale) and is_numeric(self.shift):
265
+ assert (
266
+ self.trainable_transform is None
267
+ ), f"If scale and shift are numbers, trainable_transform must be None. \
268
+ But got: {self.trainable_transform}"
qadence/draw/utils.py CHANGED
@@ -23,7 +23,7 @@ from qadence.blocks import (
23
23
  )
24
24
  from qadence.blocks.analog import ConstantAnalogRotation, InteractionBlock
25
25
  from qadence.circuit import QuantumCircuit
26
- from qadence.models import QuantumModel
26
+ from qadence.model import QuantumModel
27
27
  from qadence.operations import RX, RY, RZ, SWAP, HamEvo, I
28
28
  from qadence.transpile.block import fill_identities
29
29
  from qadence.utils import format_parameter
@@ -1,7 +1,9 @@
1
1
  from __future__ import annotations
2
2
 
3
- from .config import TrainConfig
3
+ from .config import AnsatzConfig, FeatureMapConfig, TrainConfig
4
+ from .constructors import create_ansatz, create_fm_blocks, observable_from_config
4
5
  from .data import DictDataLoader, InfiniteTensorDataset, to_dataloader
6
+ from .models import QNN
5
7
  from .optimize_step import optimize_step as default_optimize_step
6
8
  from .parameters import get_parameters, num_parameters, set_parameters
7
9
  from .printing import print_metrics, write_tensorboard
@@ -12,10 +14,16 @@ from .train_no_grad import train as train_gradient_free
12
14
 
13
15
  # Modules to be automatically added to the qadence namespace
14
16
  __all__ = [
15
- "TrainConfig",
17
+ "AnsatzConfig",
18
+ "create_ansatz",
19
+ "create_fm_blocks",
16
20
  "DictDataLoader",
21
+ "FeatureMapConfig",
22
+ "load_checkpoint",
23
+ "observable_from_config",
24
+ "QNN",
25
+ "TrainConfig",
17
26
  "train_with_grad",
18
27
  "train_gradient_free",
19
- "load_checkpoint",
20
28
  "write_checkpoint",
21
29
  ]
@@ -2,9 +2,20 @@ from __future__ import annotations
2
2
 
3
3
  import datetime
4
4
  import os
5
- from dataclasses import dataclass
5
+ from dataclasses import dataclass, field, fields
6
+ from logging import getLogger
6
7
  from pathlib import Path
7
- from typing import Callable, Optional
8
+ from typing import Callable, Optional, Type
9
+
10
+ from sympy import Basic
11
+
12
+ from qadence.blocks.analog import AnalogBlock
13
+ from qadence.blocks.primitive import ParametricBlock
14
+ from qadence.operations import RX, AnalogRX
15
+ from qadence.parameters import Parameter
16
+ from qadence.types import AnsatzType, BasisSet, MultivariateStrategy, ReuploadScaling, Strategy
17
+
18
+ logger = getLogger(__file__)
8
19
 
9
20
 
10
21
  @dataclass
@@ -70,3 +81,267 @@ class TrainConfig:
70
81
  self.trainstop_criterion = lambda x: x <= self.max_iter
71
82
  if self.validation_criterion is None:
72
83
  self.validation_criterion = lambda *x: False
84
+
85
+
86
+ @dataclass
87
+ class FeatureMapConfig:
88
+ num_features: int = 1
89
+ """Number of feature parameters to be encoded."""
90
+
91
+ basis_set: BasisSet | dict[str, BasisSet] = BasisSet.FOURIER
92
+ """
93
+ Basis set for feature encoding.
94
+
95
+ Takes qadence.BasisSet.
96
+ Give a single BasisSet to use the same for all features.
97
+ Give a dict of (str, BasisSet) where the key is the name of the variable and the
98
+ value is the BasisSet to use for encoding that feature.
99
+ BasisSet.FOURIER for Fourier encoding.
100
+ BasisSet.CHEBYSHEV for Chebyshev encoding.
101
+ """
102
+
103
+ reupload_scaling: ReuploadScaling | dict[str, ReuploadScaling] = ReuploadScaling.CONSTANT
104
+ """
105
+ Scaling for encoding the same feature on different qubits.
106
+
107
+ Scaling used to encode the same feature on different qubits in the
108
+ same layer of the feature maps. Takes qadence.ReuploadScaling.
109
+ Give a single ReuploadScaling to use the same for all features.
110
+ Give a dict of (str, ReuploadScaling) where the key is the name of the variable and the
111
+ value is the ReuploadScaling to use for encoding that feature.
112
+ ReuploadScaling.CONSTANT for constant scaling.
113
+ ReuploadScaling.TOWER for linearly increasing scaling.
114
+ ReuploadScaling.EXP for exponentially increasing scaling.
115
+ """
116
+
117
+ feature_range: tuple[float, float] | dict[str, tuple[float, float]] | None = None
118
+ """
119
+ Range of data that the input data is assumed to come from.
120
+
121
+ Give a single tuple to use the same range for all features.
122
+ Give a dict of (str, tuple) where the key is the name of the variable and the
123
+ value is the feature range to use for that feature.
124
+ """
125
+
126
+ target_range: tuple[float, float] | dict[str, tuple[float, float]] | None = None
127
+ """
128
+ Range of data the data encoder assumes as natural range.
129
+
130
+ Give a single tuple to use the same range for all features.
131
+ Give a dict of (str, tuple) where the key is the name of the variable and the
132
+ value is the target range to use for that feature.
133
+ """
134
+
135
+ multivariate_strategy: MultivariateStrategy = MultivariateStrategy.PARALLEL
136
+ """
137
+ The encoding strategy in case of multi-variate function.
138
+
139
+ Takes qadence.MultivariateStrategy.
140
+ If PARALLEL, the features are encoded in one block of rotation gates
141
+ with each feature given an equal number of qubits.
142
+ If SERIES, the features are encoded sequentially, with an ansatz block
143
+ between. PARALLEL is allowed only for DIGITAL `feature_map_strategy`.
144
+ """
145
+
146
+ feature_map_strategy: Strategy = Strategy.DIGITAL
147
+ """
148
+ Strategy for feature map.
149
+
150
+ Accepts DIGITAL, ANALOG or RYDBERG. Defaults to DIGITAL.
151
+ If the strategy is incompatible with the `operation` chosen, then `operation`
152
+ gets preference and the given strategy is ignored.
153
+ """
154
+
155
+ param_prefix: str | None = None
156
+ """
157
+ String prefix to create trainable parameters in Feature Map.
158
+
159
+ A string prefix to create trainable parameters multiplying the feature parameter
160
+ inside the feature-encoding function. Note that currently this does not take into
161
+ account the domain of the feature-encoding function.
162
+ Defaults to `None` and thus, the feature map is not trainable.
163
+ Note that this is separate from the name of the parameter.
164
+ The user can provide a single prefix for all features, and they will be appended
165
+ by appropriate feature name automatically.
166
+ """
167
+
168
+ num_repeats: int | dict[str, int] = 0
169
+ """
170
+ Number of feature map layers repeated in the data reuploadig step.
171
+
172
+ If all are to be repeated the same number of times, then can give a single
173
+ `int`. For different number of repeatitions for each feature, provide a dict
174
+ of (str, int) where the key is the name of the variable and the value is the
175
+ number of repeatitions for that feature.
176
+ This amounts to the number of additional reuploads. So if `num_repeats` is N,
177
+ the data gets uploaded N+1 times. Defaults to no repeatition.
178
+ """
179
+
180
+ operation: Callable[[Parameter | Basic], AnalogBlock] | Type[RX] | None = None
181
+ """
182
+ Type of operation.
183
+
184
+ Choose among the analog or digital rotations or a custom
185
+ callable function returning an AnalogBlock instance. If the type of operation is
186
+ incompatible with the `strategy` chosen, then `operation` gets preference and
187
+ the given strategy is ignored.
188
+ """
189
+
190
+ inputs: list[Basic | str] | None = None
191
+ """
192
+ List that indicates the order of variables of the tensors that are passed.
193
+
194
+ Optional if a single feature is being encoded, required otherwise. Given input tensors
195
+ `xs = torch.rand(batch_size, input_size:=2)` a QNN with `inputs=["t", "x"]` will
196
+ assign `t, x = xs[:,0], xs[:,1]`.
197
+ """
198
+
199
+ def __post_init__(self) -> None:
200
+ if self.multivariate_strategy == MultivariateStrategy.PARALLEL and self.num_features > 1:
201
+ assert (
202
+ self.feature_map_strategy == Strategy.DIGITAL
203
+ ), "For `parallel` encoding of multiple features, the `feature_map_strategy` must be \
204
+ of `digital` type."
205
+
206
+ if self.operation is None:
207
+ if self.feature_map_strategy == Strategy.DIGITAL:
208
+ self.operation = RX
209
+ elif self.feature_map_strategy == Strategy.ANALOG:
210
+ self.operation = AnalogRX # type: ignore[assignment]
211
+
212
+ else:
213
+ if self.feature_map_strategy == Strategy.DIGITAL:
214
+ if isinstance(self.operation, AnalogBlock):
215
+ logger.warning(
216
+ "The `operation` is of type `AnalogBlock` but the `feature_map_strategy` is\
217
+ `digital`. The `feature_map_strategy` will be modified and given operation\
218
+ will be used."
219
+ )
220
+
221
+ self.feature_map_strategy = Strategy.ANALOG
222
+
223
+ elif self.feature_map_strategy == Strategy.ANALOG:
224
+ if isinstance(self.operation, ParametricBlock):
225
+ logger.warning(
226
+ "The `operation` is a digital gate but the `feature_map_strategy` is\
227
+ `analog`. The `feature_map_strategy` will be modified and given operation\
228
+ will be used."
229
+ )
230
+
231
+ self.feature_map_strategy = Strategy.DIGITAL
232
+
233
+ if self.inputs is not None:
234
+ assert (
235
+ len(self.inputs) == self.num_features
236
+ ), "Inputs list must be of same size as the number of features"
237
+ else:
238
+ if self.num_features == 1:
239
+ self.inputs = ["x"]
240
+ else:
241
+ raise ValueError(
242
+ """
243
+ Your QNN has more than one input. Please provide a list of inputs in the order
244
+ of your tensor domain. For example, if you want to pass
245
+ `xs = torch.rand(batch_size, input_size:=3)` to you QNN, where
246
+ ```
247
+ t = x[:,0]
248
+ x = x[:,1]
249
+ y = x[:,2]
250
+ ```
251
+ you have to specify
252
+ ```
253
+ inputs=["t", "x", "y"]
254
+ ```
255
+ You can also pass a list of sympy symbols.
256
+ """
257
+ )
258
+
259
+ property_list = [
260
+ "basis_set",
261
+ "reupload_scaling",
262
+ "feature_range",
263
+ "target_range",
264
+ "num_repeats",
265
+ ]
266
+
267
+ for target_field in fields(self):
268
+ if target_field.name in property_list:
269
+ prop = getattr(self, target_field.name)
270
+ if isinstance(prop, dict):
271
+ assert set(prop.keys()) == set(
272
+ self.inputs
273
+ ), f"The keys in {target_field.name} must be the same as the inputs provided. \
274
+ Alternatively, provide a single value of {target_field.name} to use the same\
275
+ {target_field.name} for all features."
276
+ else:
277
+ prop = {key: prop for key in self.inputs}
278
+ setattr(self, target_field.name, prop)
279
+
280
+
281
+ @dataclass
282
+ class AnsatzConfig:
283
+ depth: int = 1
284
+ """Number of layers of the ansatz."""
285
+
286
+ ansatz_type: AnsatzType = AnsatzType.HEA
287
+ """What type of ansatz.
288
+
289
+ HEA for Hardware Efficient Ansatz.
290
+ IIA for Identity intialized Ansatz.
291
+ """
292
+
293
+ ansatz_strategy: Strategy = Strategy.DIGITAL
294
+ """Ansatz strategy.
295
+
296
+ DIGITAL for fully digital ansatz. Required if `ansatz_type` is `iia`.
297
+ SDAQC for analog entangling block.
298
+ RYDBERG for fully rydberg hea ansatz.
299
+ """
300
+
301
+ strategy_args: dict = field(default_factory=dict)
302
+ """
303
+ A dictionary containing keyword arguments to the function creating the ansatz.
304
+
305
+ Details about each below.
306
+
307
+ For DIGITAL strategy, accepts the following:
308
+ periodic (bool): if the qubits should be linked periodically.
309
+ periodic=False is not supported in emu-c.
310
+ operations (list): list of operations to cycle through in the
311
+ digital single-qubit rotations of each layer.
312
+ Defaults to [RX, RY, RX] for hea and [RX, RY] for iia.
313
+ entangler (AbstractBlock): 2-qubit entangling operation.
314
+ Supports CNOT, CZ, CRX, CRY, CRZ, CPHASE. Controlld rotations
315
+ will have variational parameters on the rotation angles.
316
+ Defaults to CNOT
317
+
318
+ For SDAQC strategy, accepts the following:
319
+ operations (list): list of operations to cycle through in the
320
+ digital single-qubit rotations of each layer.
321
+ Defaults to [RX, RY, RX] for hea and [RX, RY] for iia.
322
+ entangler (AbstractBlock): Hamiltonian generator for the
323
+ analog entangling layer. Time parameter is considered variational.
324
+ Defaults to NN interaction.
325
+
326
+ For RYDBERG strategy, accepts the following:
327
+ addressable_detuning: whether to turn on the trainable semi-local addressing pattern
328
+ on the detuning (n_i terms in the Hamiltonian).
329
+ Defaults to True.
330
+ addressable_drive: whether to turn on the trainable semi-local addressing pattern
331
+ on the drive (sigma_i^x terms in the Hamiltonian).
332
+ Defaults to False.
333
+ tunable_phase: whether to have a tunable phase to get both sigma^x and sigma^y rotations
334
+ in the drive term. If False, only a sigma^x term will be included in the drive part
335
+ of the Hamiltonian generator.
336
+ Defaults to False.
337
+ """
338
+ # The default for a dataclass can not be a mutable object without using this default_factory.
339
+
340
+ param_prefix: str = "theta"
341
+ """The base bame of the variational parameter."""
342
+
343
+ def __post_init__(self) -> None:
344
+ if self.ansatz_type == AnsatzType.IIA:
345
+ assert (
346
+ self.ansatz_strategy != Strategy.RYDBERG
347
+ ), "Rydberg strategy not allowed for Identity-initialized ansatz."