qadence 1.1.1__py3-none-any.whl → 1.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- qadence/analog/__init__.py +4 -2
- qadence/analog/addressing.py +167 -0
- qadence/analog/constants.py +59 -0
- qadence/analog/device.py +82 -0
- qadence/analog/hamiltonian_terms.py +101 -0
- qadence/analog/parse_analog.py +120 -0
- qadence/backend.py +27 -1
- qadence/backends/braket/backend.py +1 -1
- qadence/backends/pulser/__init__.py +0 -1
- qadence/backends/pulser/backend.py +30 -15
- qadence/backends/pulser/config.py +19 -10
- qadence/backends/pulser/devices.py +57 -63
- qadence/backends/pulser/pulses.py +70 -12
- qadence/backends/pyqtorch/backend.py +2 -3
- qadence/backends/pyqtorch/config.py +18 -12
- qadence/backends/pyqtorch/convert_ops.py +12 -4
- qadence/backends/pytorch_wrapper.py +2 -1
- qadence/backends/utils.py +1 -10
- qadence/blocks/abstract.py +5 -1
- qadence/blocks/analog.py +18 -9
- qadence/blocks/block_to_tensor.py +11 -0
- qadence/blocks/primitive.py +81 -9
- qadence/constructors/__init__.py +4 -0
- qadence/constructors/feature_maps.py +84 -60
- qadence/constructors/hamiltonians.py +27 -98
- qadence/constructors/rydberg_feature_maps.py +113 -0
- qadence/divergences.py +12 -0
- qadence/extensions.py +1 -6
- qadence/finitediff.py +47 -0
- qadence/mitigations/readout.py +92 -25
- qadence/models/qnn.py +88 -23
- qadence/operations.py +55 -70
- qadence/parameters.py +10 -2
- qadence/register.py +91 -43
- qadence/transpile/__init__.py +1 -0
- qadence/transpile/apply_fn.py +40 -0
- qadence/types.py +19 -1
- qadence/utils.py +35 -0
- {qadence-1.1.1.dist-info → qadence-1.2.0.dist-info}/METADATA +2 -2
- {qadence-1.1.1.dist-info → qadence-1.2.0.dist-info}/RECORD +42 -36
- {qadence-1.1.1.dist-info → qadence-1.2.0.dist-info}/WHEEL +1 -1
- qadence/analog/interaction.py +0 -198
- qadence/analog/utils.py +0 -132
- {qadence-1.1.1.dist-info → qadence-1.2.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,113 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
from typing import Callable
|
4
|
+
|
5
|
+
import numpy as np
|
6
|
+
from sympy import Basic, Function
|
7
|
+
|
8
|
+
from qadence.blocks import AnalogBlock, KronBlock, kron
|
9
|
+
from qadence.constructors.feature_maps import fm_parameter
|
10
|
+
from qadence.logger import get_logger
|
11
|
+
from qadence.operations import AnalogRot, AnalogRX, AnalogRY, AnalogRZ
|
12
|
+
from qadence.parameters import FeatureParameter, Parameter, VariationalParameter
|
13
|
+
from qadence.types import BasisSet, ReuploadScaling, TParameter
|
14
|
+
|
15
|
+
logger = get_logger(__file__)
|
16
|
+
|
17
|
+
AnalogRotationTypes = [AnalogRX, AnalogRY, AnalogRZ]
|
18
|
+
|
19
|
+
|
20
|
+
def rydberg_feature_map(
|
21
|
+
n_qubits: int,
|
22
|
+
param: str = "phi",
|
23
|
+
max_abs_detuning: float = 2 * np.pi * 10,
|
24
|
+
weights: list[float] | None = None,
|
25
|
+
) -> KronBlock:
|
26
|
+
"""Feature map using semi-local addressing patterns.
|
27
|
+
|
28
|
+
If not weights are specified, variational parameters are created
|
29
|
+
for the pattern
|
30
|
+
|
31
|
+
Args:
|
32
|
+
n_qubits (int): number of qubits
|
33
|
+
param: the name of the feature parameter
|
34
|
+
max_abs_detuning: maximum value of absolute detuning for each qubit. Defaulted at 10 MHz.
|
35
|
+
weights: a list of wegiths to assign to each qubit parameter in the feature map
|
36
|
+
|
37
|
+
Returns:
|
38
|
+
The block representing the feature map
|
39
|
+
"""
|
40
|
+
|
41
|
+
tower_coeffs: list[float | Parameter]
|
42
|
+
tower_coeffs = (
|
43
|
+
[VariationalParameter(f"w_{param}_{i}") for i in range(n_qubits)]
|
44
|
+
if weights is None
|
45
|
+
else weights
|
46
|
+
)
|
47
|
+
tower_detuning = max_abs_detuning / (sum(tower_coeffs[i] for i in range(n_qubits)))
|
48
|
+
|
49
|
+
param = FeatureParameter(param)
|
50
|
+
duration = 1000 * param / tower_detuning
|
51
|
+
return kron(
|
52
|
+
AnalogRot(
|
53
|
+
duration=duration,
|
54
|
+
delta=-tower_detuning * tower_coeffs[i],
|
55
|
+
phase=0.0,
|
56
|
+
qubit_support=(i,),
|
57
|
+
)
|
58
|
+
for i in range(n_qubits)
|
59
|
+
)
|
60
|
+
|
61
|
+
|
62
|
+
def rydberg_tower_feature_map(
|
63
|
+
n_qubits: int, param: str = "phi", max_abs_detuning: float = 2 * np.pi * 10
|
64
|
+
) -> KronBlock:
|
65
|
+
weights = list(np.arange(1, n_qubits + 1))
|
66
|
+
return rydberg_feature_map(
|
67
|
+
n_qubits, param=param, max_abs_detuning=max_abs_detuning, weights=weights
|
68
|
+
)
|
69
|
+
|
70
|
+
|
71
|
+
def analog_feature_map(
|
72
|
+
param: str = "phi",
|
73
|
+
op: Callable[[Parameter | Basic], AnalogBlock] = AnalogRX,
|
74
|
+
fm_type: BasisSet | type[Function] | str = BasisSet.FOURIER,
|
75
|
+
reupload_scaling: ReuploadScaling | Callable | str = ReuploadScaling.CONSTANT,
|
76
|
+
feature_range: tuple[float, float] | None = None,
|
77
|
+
target_range: tuple[float, float] | None = None,
|
78
|
+
multiplier: Parameter | TParameter | None = None,
|
79
|
+
) -> AnalogBlock:
|
80
|
+
"""Generate a fully analog feature map.
|
81
|
+
|
82
|
+
Args:
|
83
|
+
param: Parameter of the feature map; you can pass a string or Parameter;
|
84
|
+
it will be set as non-trainable (FeatureParameter) regardless.
|
85
|
+
op: type of operation. Choose among AnalogRX, AnalogRY, AnalogRZ or a custom
|
86
|
+
callable function returning an AnalogBlock instance
|
87
|
+
fm_type: Basis set for data encoding; choose from `BasisSet.FOURIER` for Fourier
|
88
|
+
encoding, or `BasisSet.CHEBYSHEV` for Chebyshev polynomials of the first kind.
|
89
|
+
reupload_scaling: how the feature map scales the data that is re-uploaded. Given that
|
90
|
+
this feature map uses analog rotations, the reuploading works by simply
|
91
|
+
adding additional operations with different scaling factors in the parameter.
|
92
|
+
Choose from `ReuploadScaling` enumeration, currently only CONSTANT works,
|
93
|
+
or provide your own function with the first argument being the given
|
94
|
+
operation `op` and the second argument the feature parameter
|
95
|
+
feature_range: range of data that the input data is assumed to come from.
|
96
|
+
target_range: range of data the data encoder assumes as the natural range. For example,
|
97
|
+
in Chebyshev polynomials it is (-1, 1), while for Fourier it may be chosen as (0, 2*pi).
|
98
|
+
multiplier: overall multiplier; this is useful for reuploading the feature map serially with
|
99
|
+
different scalings; can be a number or parameter/expression.
|
100
|
+
"""
|
101
|
+
transformed_feature = fm_parameter(
|
102
|
+
fm_type, param, feature_range=feature_range, target_range=target_range
|
103
|
+
)
|
104
|
+
multiplier = 1.0 if multiplier is None else Parameter(multiplier)
|
105
|
+
|
106
|
+
if callable(reupload_scaling):
|
107
|
+
return reupload_scaling(op, multiplier * transformed_feature) # type: ignore[no-any-return]
|
108
|
+
elif reupload_scaling == ReuploadScaling.CONSTANT:
|
109
|
+
return op(multiplier * transformed_feature)
|
110
|
+
# TODO: implement tower scaling by reuploading multiple times
|
111
|
+
# using different analog rotations
|
112
|
+
else:
|
113
|
+
raise NotImplementedError(f"Reupload scaling {str(reupload_scaling)} not implemented!")
|
qadence/divergences.py
CHANGED
@@ -36,3 +36,15 @@ def js_divergence(counter_p: Counter, counter_q: Counter) -> float:
|
|
36
36
|
entropy_p = shannon_entropy(counter_p)
|
37
37
|
entropy_q = shannon_entropy(counter_q)
|
38
38
|
return float(average_entropy - (entropy_p + entropy_q) / 2.0)
|
39
|
+
|
40
|
+
|
41
|
+
def norm_difference(counter_p: Counter, counter_q: Counter) -> float:
|
42
|
+
# Normalise counters
|
43
|
+
|
44
|
+
counter_p = np.array([v for v in counter_p.values()])
|
45
|
+
counter_q = np.array([v for v in counter_q.values()])
|
46
|
+
|
47
|
+
prob_p = counter_p / np.sum(counter_p)
|
48
|
+
prob_q = counter_q / np.sum(counter_q)
|
49
|
+
|
50
|
+
return float(np.linalg.norm(prob_p - prob_q))
|
qadence/extensions.py
CHANGED
@@ -2,17 +2,12 @@ from __future__ import annotations
|
|
2
2
|
|
3
3
|
import importlib
|
4
4
|
from string import Template
|
5
|
-
from typing import TypeVar
|
6
5
|
|
7
6
|
from qadence.backend import Backend
|
8
|
-
from qadence.blocks import
|
9
|
-
AbstractBlock,
|
10
|
-
)
|
7
|
+
from qadence.blocks.abstract import TAbstractBlock
|
11
8
|
from qadence.logger import get_logger
|
12
9
|
from qadence.types import BackendName, DiffMode
|
13
10
|
|
14
|
-
TAbstractBlock = TypeVar("TAbstractBlock", bound=AbstractBlock)
|
15
|
-
|
16
11
|
backends_namespace = Template("qadence.backends.$name")
|
17
12
|
|
18
13
|
logger = get_logger(__name__)
|
qadence/finitediff.py
ADDED
@@ -0,0 +1,47 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
from typing import Callable
|
4
|
+
|
5
|
+
import torch
|
6
|
+
from torch import Tensor
|
7
|
+
|
8
|
+
|
9
|
+
def finitediff(
|
10
|
+
f: Callable,
|
11
|
+
x: Tensor,
|
12
|
+
derivative_indices: tuple[int, ...],
|
13
|
+
eps: float = None,
|
14
|
+
) -> Tensor:
|
15
|
+
"""
|
16
|
+
Arguments:
|
17
|
+
|
18
|
+
f: Function to differentiate
|
19
|
+
x: Input of shape `(batch_size, input_size)`
|
20
|
+
derivative_indices: which *input* to differentiate (i.e. which variable x[:,i])
|
21
|
+
eps: finite difference spacing (uses `torch.finfo(x.dtype).eps ** (1 / (2 + order))` as a
|
22
|
+
default)
|
23
|
+
"""
|
24
|
+
|
25
|
+
if eps is None:
|
26
|
+
order = len(derivative_indices)
|
27
|
+
eps = torch.finfo(x.dtype).eps ** (1 / (2 + order))
|
28
|
+
|
29
|
+
# compute derivative direction vector(s)
|
30
|
+
eps = torch.as_tensor(eps, dtype=x.dtype)
|
31
|
+
_eps = 1 / eps # type: ignore[operator]
|
32
|
+
ev = torch.zeros_like(x)
|
33
|
+
i = derivative_indices[0]
|
34
|
+
ev[:, i] += eps
|
35
|
+
|
36
|
+
# recursive finite differencing for higher order than 3 / mixed derivatives
|
37
|
+
if len(derivative_indices) > 3 or len(set(derivative_indices)) > 1:
|
38
|
+
di = derivative_indices[1:]
|
39
|
+
return (finitediff(f, x + ev, di) - finitediff(f, x - ev, di)) * _eps / 2
|
40
|
+
elif len(derivative_indices) == 3:
|
41
|
+
return (f(x + 2 * ev) - 2 * f(x + ev) + 2 * f(x - ev) - f(x - 2 * ev)) * _eps**3 / 2
|
42
|
+
elif len(derivative_indices) == 2:
|
43
|
+
return (f(x + ev) + f(x - ev) - 2 * f(x)) * _eps**2
|
44
|
+
elif len(derivative_indices) == 1:
|
45
|
+
return (f(x + ev) - f(x - ev)) * _eps / 2
|
46
|
+
else:
|
47
|
+
raise ValueError("If you see this error there is a bug in the `finitediff` function.")
|
qadence/mitigations/readout.py
CHANGED
@@ -6,17 +6,50 @@ from functools import reduce
|
|
6
6
|
import numpy as np
|
7
7
|
import numpy.typing as npt
|
8
8
|
import torch
|
9
|
+
from numpy.linalg import inv, matrix_rank, pinv
|
9
10
|
from scipy.linalg import norm
|
10
11
|
from scipy.optimize import LinearConstraint, minimize
|
11
12
|
|
12
13
|
from qadence.mitigations.protocols import Mitigations
|
13
14
|
from qadence.noise.protocols import Noise
|
15
|
+
from qadence.types import ReadOutOptimization
|
14
16
|
|
15
17
|
|
16
18
|
def corrected_probas(p_corr: npt.NDArray, T: npt.NDArray, p_raw: npt.NDArray) -> np.double:
|
17
19
|
return norm(T @ p_corr.T - p_raw.T, ord=2) ** 2
|
18
20
|
|
19
21
|
|
22
|
+
def mle_solve(p_raw: npt.NDArray) -> npt.NDArray:
|
23
|
+
"""
|
24
|
+
Compute the MLE probability vector.
|
25
|
+
|
26
|
+
Algorithmic details can be found in https://arxiv.org/pdf/1106.5458.pdf Page(3).
|
27
|
+
"""
|
28
|
+
# Sort p_raw by values while keeping track of indices.
|
29
|
+
index_sort = p_raw.argsort()
|
30
|
+
p_sort = p_raw[index_sort]
|
31
|
+
neg_sum = 0
|
32
|
+
breakpoint = len(p_sort) - 1
|
33
|
+
|
34
|
+
for i in range(len(p_sort)):
|
35
|
+
## if neg_sum cannot be distributed among other probabilities, continue to accumulate
|
36
|
+
if p_sort[i] + neg_sum / (len(p_sort) - i) < 0:
|
37
|
+
neg_sum += p_sort[i]
|
38
|
+
p_sort[i] = 0
|
39
|
+
# set breakpoint to current index
|
40
|
+
else:
|
41
|
+
breakpoint = i
|
42
|
+
break
|
43
|
+
## number of entries to which i can distribute(includes breakpoint)
|
44
|
+
size = len(p_sort) - breakpoint
|
45
|
+
p_sort[breakpoint:] += neg_sum / size
|
46
|
+
|
47
|
+
re_index_sort = index_sort.argsort()
|
48
|
+
p_corr = p_sort[re_index_sort]
|
49
|
+
|
50
|
+
return p_corr
|
51
|
+
|
52
|
+
|
20
53
|
def renormalize_counts(corrected_counts: npt.NDArray, n_shots: int) -> npt.NDArray:
|
21
54
|
"""Renormalize counts rounding discrepancies."""
|
22
55
|
total_counts = sum(corrected_counts)
|
@@ -25,51 +58,85 @@ def renormalize_counts(corrected_counts: npt.NDArray, n_shots: int) -> npt.NDArr
|
|
25
58
|
corrected_counts -= counts_diff
|
26
59
|
corrected_counts = np.where(corrected_counts < 0, 0, corrected_counts)
|
27
60
|
sum_corrected_counts = sum(corrected_counts)
|
28
|
-
|
29
|
-
|
30
|
-
sum_corrected_counts, n_shots
|
31
|
-
)
|
32
|
-
else:
|
33
|
-
renormalization_factor = min(sum_corrected_counts, n_shots) / max(
|
34
|
-
sum_corrected_counts, n_shots
|
35
|
-
)
|
61
|
+
|
62
|
+
renormalization_factor = n_shots / sum_corrected_counts
|
36
63
|
corrected_counts = np.rint(corrected_counts * renormalization_factor).astype(int)
|
37
64
|
return corrected_counts
|
38
65
|
|
39
66
|
|
67
|
+
def matrix_inv(K: npt.NDArray) -> npt.NDArray:
|
68
|
+
return inv(K) if matrix_rank(K) == K.shape[0] else pinv(K)
|
69
|
+
|
70
|
+
|
40
71
|
def mitigation_minimization(
|
41
|
-
noise: Noise,
|
72
|
+
noise: Noise,
|
73
|
+
mitigation: Mitigations,
|
74
|
+
samples: list[Counter],
|
42
75
|
) -> list[Counter]:
|
43
76
|
"""Minimize a correction matrix subjected to stochasticity constraints.
|
44
77
|
|
45
78
|
See Equation (5) in https://arxiv.org/pdf/2001.09980.pdf.
|
79
|
+
See Page(3) in https://arxiv.org/pdf/1106.5458.pdf for MLE implementation
|
80
|
+
|
81
|
+
Args:
|
82
|
+
noise: Specifies confusion matrix and default error probability
|
83
|
+
mitigation: Selects additional mitigation options based on noise choice.
|
84
|
+
For readout we have the following mitigation options for optimization
|
85
|
+
1. constrained 2. mle. Default : mle
|
86
|
+
samples: List of samples to be mitigated
|
87
|
+
|
88
|
+
Returns:
|
89
|
+
Mitigated counts computed by the algorithm
|
46
90
|
"""
|
47
91
|
noise_matrices = noise.options.get("noise_matrix", noise.options["confusion_matrices"])
|
92
|
+
optimization_type = mitigation.options.get("optimization_type", ReadOutOptimization.MLE)
|
48
93
|
n_qubits = len(list(samples[0].keys())[0])
|
49
94
|
n_shots = sum(samples[0].values())
|
50
|
-
# Build the whole T matrix.
|
51
|
-
T_matrix = reduce(torch.kron, noise_matrices).detach().numpy()
|
52
95
|
corrected_counters: list[Counter] = []
|
96
|
+
|
97
|
+
if optimization_type == ReadOutOptimization.CONSTRAINED:
|
98
|
+
# Build the whole T matrix.
|
99
|
+
T_matrix = reduce(torch.kron, noise_matrices).detach().numpy()
|
100
|
+
|
101
|
+
if optimization_type == ReadOutOptimization.MLE:
|
102
|
+
# Check if matrix is singular and use appropriate inverse.
|
103
|
+
noise_matrices_inv = list(map(matrix_inv, noise_matrices.numpy()))
|
104
|
+
T_inv = reduce(np.kron, noise_matrices_inv)
|
105
|
+
|
53
106
|
for sample in samples:
|
54
107
|
bitstring_length = 2**n_qubits
|
55
108
|
# List of bitstrings in lexicographical order.
|
56
109
|
ordered_bitstrings = [f"{i:0{n_qubits}b}" for i in range(bitstring_length)]
|
57
110
|
# Array of raw probabilites.
|
58
111
|
p_raw = np.array([sample[bs] for bs in ordered_bitstrings]) / n_shots
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
112
|
+
|
113
|
+
if optimization_type == ReadOutOptimization.CONSTRAINED:
|
114
|
+
# Initial random guess in [0,1].
|
115
|
+
p_corr0 = np.random.rand(bitstring_length)
|
116
|
+
# Stochasticity constraints.
|
117
|
+
normality_constraint = LinearConstraint(
|
118
|
+
np.ones(bitstring_length).astype(int), lb=1.0, ub=1.0
|
119
|
+
)
|
120
|
+
positivity_constraint = LinearConstraint(
|
121
|
+
np.eye(bitstring_length).astype(int), lb=0.0, ub=1.0
|
122
|
+
)
|
123
|
+
constraints = [normality_constraint, positivity_constraint]
|
124
|
+
# Minimize the corrected probabilities.
|
125
|
+
res = minimize(
|
126
|
+
corrected_probas, p_corr0, args=(T_matrix, p_raw), constraints=constraints
|
127
|
+
)
|
128
|
+
p_corr = res.x
|
129
|
+
|
130
|
+
elif optimization_type == ReadOutOptimization.MLE:
|
131
|
+
# Compute corrected inverse using matrix inversion and run MLE.
|
132
|
+
p_corr = mle_solve(T_inv @ p_raw)
|
133
|
+
else:
|
134
|
+
raise NotImplementedError(
|
135
|
+
f"Requested method {optimization_type} does not match supported protocols."
|
136
|
+
)
|
137
|
+
|
138
|
+
corrected_counts = np.rint(p_corr * n_shots).astype(int)
|
139
|
+
|
73
140
|
# Renormalize if total counts differs from n_shots.
|
74
141
|
corrected_counts = renormalize_counts(corrected_counts=corrected_counts, n_shots=n_shots)
|
75
142
|
# At this point, the count should be off by at most 2, added or substracted to/from the
|
qadence/models/qnn.py
CHANGED
@@ -1,13 +1,16 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
|
3
|
+
from collections import Counter
|
3
4
|
from typing import Callable
|
4
5
|
|
6
|
+
import sympy
|
5
7
|
from torch import Tensor
|
6
8
|
|
7
|
-
from qadence.backend import BackendConfiguration
|
9
|
+
from qadence.backend import BackendConfiguration, ConvertedObservable
|
8
10
|
from qadence.blocks.abstract import AbstractBlock
|
9
11
|
from qadence.circuit import QuantumCircuit
|
10
12
|
from qadence.measurements import Measurements
|
13
|
+
from qadence.mitigations import Mitigations
|
11
14
|
from qadence.models.quantum_model import QuantumModel
|
12
15
|
from qadence.noise import Noise
|
13
16
|
from qadence.types import BackendName, DiffMode, Endianness
|
@@ -19,22 +22,25 @@ class QNN(QuantumModel):
|
|
19
22
|
Examples:
|
20
23
|
```python exec="on" source="material-block" result="json"
|
21
24
|
import torch
|
22
|
-
from qadence import QuantumCircuit, QNN
|
23
|
-
from qadence import hea, feature_map, hamiltonian_factory,
|
25
|
+
from qadence import QuantumCircuit, QNN, Z
|
26
|
+
from qadence import hea, feature_map, hamiltonian_factory, kron
|
24
27
|
|
25
28
|
# create the circuit
|
26
29
|
n_qubits, depth = 2, 4
|
27
|
-
fm =
|
30
|
+
fm = kron(
|
31
|
+
feature_map(1, support=(0,), param="x"),
|
32
|
+
feature_map(1, support=(1,), param="y")
|
33
|
+
)
|
28
34
|
ansatz = hea(n_qubits=n_qubits, depth=depth)
|
29
35
|
circuit = QuantumCircuit(n_qubits, fm, ansatz)
|
30
|
-
obs_base = hamiltonian_factory(n_qubits, detuning
|
36
|
+
obs_base = hamiltonian_factory(n_qubits, detuning=Z)
|
31
37
|
|
32
38
|
# the QNN will yield two outputs
|
33
39
|
obs = [2.0 * obs_base, 4.0 * obs_base]
|
34
40
|
|
35
41
|
# initialize and use the model
|
36
|
-
qnn = QNN(circuit, obs,
|
37
|
-
y = qnn
|
42
|
+
qnn = QNN(circuit, obs, inputs=["x", "y"])
|
43
|
+
y = qnn(torch.rand(3, 2))
|
38
44
|
print(str(y)) # markdown-exec: hide
|
39
45
|
```
|
40
46
|
"""
|
@@ -49,6 +55,7 @@ class QNN(QuantumModel):
|
|
49
55
|
measurement: Measurements | None = None,
|
50
56
|
noise: Noise | None = None,
|
51
57
|
configuration: BackendConfiguration | dict | None = None,
|
58
|
+
inputs: list[sympy.Basic | str] | None = None,
|
52
59
|
):
|
53
60
|
"""Initialize the QNN.
|
54
61
|
|
@@ -59,6 +66,9 @@ class QNN(QuantumModel):
|
|
59
66
|
Args:
|
60
67
|
circuit: The quantum circuit to use for the QNN.
|
61
68
|
transform: A transformation applied to the output of the QNN.
|
69
|
+
inputs: Tuple that indicates the order of variables of the tensors that are passed
|
70
|
+
to the model. Given input tensors `xs = torch.rand(batch_size, input_size:=2)` a QNN
|
71
|
+
with `inputs=("t", "x")` will assign `t, x = xs[:,0], xs[:,1]`.
|
62
72
|
backend: The chosen quantum backend.
|
63
73
|
diff_mode: The differentiation engine to use. Choices 'gpsr' or 'ad'.
|
64
74
|
measurement: optional measurement protocol. If None,
|
@@ -67,7 +77,7 @@ class QNN(QuantumModel):
|
|
67
77
|
configuration: optional configuration for the backend
|
68
78
|
"""
|
69
79
|
super().__init__(
|
70
|
-
circuit
|
80
|
+
circuit,
|
71
81
|
observable=observable,
|
72
82
|
backend=backend,
|
73
83
|
diff_mode=diff_mode,
|
@@ -75,12 +85,33 @@ class QNN(QuantumModel):
|
|
75
85
|
configuration=configuration,
|
76
86
|
noise=noise,
|
77
87
|
)
|
78
|
-
|
79
88
|
if self.out_features is None:
|
80
89
|
raise ValueError("You need to provide at least one observable in the QNN constructor")
|
81
|
-
|
82
90
|
self.transform = transform if transform else lambda x: x
|
83
91
|
|
92
|
+
if (inputs is not None) and (len(self.inputs) == len(inputs)):
|
93
|
+
self.inputs = [sympy.symbols(x) if isinstance(x, str) else x for x in inputs] # type: ignore[union-attr]
|
94
|
+
elif (inputs is None) and len(self.inputs) <= 1:
|
95
|
+
self.inputs = [sympy.symbols(x) if isinstance(x, str) else x for x in self.inputs] # type: ignore[union-attr]
|
96
|
+
else:
|
97
|
+
raise ValueError(
|
98
|
+
"""
|
99
|
+
Your QNN has more than one input. Please provide a list of inputs in the order of
|
100
|
+
your tensor domain. For example, if you want to pass
|
101
|
+
`xs = torch.rand(batch_size, input_size:=3)` to you QNN, where
|
102
|
+
```
|
103
|
+
t = x[:,0]
|
104
|
+
x = x[:,1]
|
105
|
+
y = x[:,2]
|
106
|
+
```
|
107
|
+
you have to specify
|
108
|
+
```
|
109
|
+
QNN(circuit, observable, inputs=["t", "x", "y"])
|
110
|
+
```
|
111
|
+
You can also pass a list of sympy symbols.
|
112
|
+
"""
|
113
|
+
)
|
114
|
+
|
84
115
|
def forward(
|
85
116
|
self,
|
86
117
|
values: dict[str, Tensor] | Tensor = None,
|
@@ -103,7 +134,7 @@ class QNN(QuantumModel):
|
|
103
134
|
is instead `n_batches x n_observables`
|
104
135
|
|
105
136
|
Args:
|
106
|
-
values
|
137
|
+
values: the values of the feature parameters
|
107
138
|
state: Initial state.
|
108
139
|
measurement: optional measurement protocol. If None,
|
109
140
|
use exact expectation value with a statevector simulator
|
@@ -114,18 +145,55 @@ class QNN(QuantumModel):
|
|
114
145
|
Tensor: a tensor with the expectation value of the observables passed
|
115
146
|
in the constructor of the model
|
116
147
|
"""
|
148
|
+
return self.expectation(
|
149
|
+
values, state=state, measurement=measurement, noise=noise, endianness=endianness
|
150
|
+
)
|
151
|
+
|
152
|
+
def run(
|
153
|
+
self,
|
154
|
+
values: Tensor | dict[str, Tensor] = None,
|
155
|
+
state: Tensor | None = None,
|
156
|
+
endianness: Endianness = Endianness.BIG,
|
157
|
+
) -> Tensor:
|
158
|
+
return super().run(values=self._format_to_dict(values), state=state, endianness=endianness)
|
159
|
+
|
160
|
+
def sample(
|
161
|
+
self,
|
162
|
+
values: Tensor | dict[str, Tensor] = {},
|
163
|
+
n_shots: int = 1000,
|
164
|
+
state: Tensor | None = None,
|
165
|
+
noise: Noise | None = None,
|
166
|
+
mitigation: Mitigations | None = None,
|
167
|
+
endianness: Endianness = Endianness.BIG,
|
168
|
+
) -> list[Counter]:
|
169
|
+
return super().sample(
|
170
|
+
values=self._format_to_dict(values),
|
171
|
+
n_shots=n_shots,
|
172
|
+
state=state,
|
173
|
+
noise=noise,
|
174
|
+
mitigation=mitigation,
|
175
|
+
endianness=endianness,
|
176
|
+
)
|
177
|
+
|
178
|
+
def expectation(
|
179
|
+
self,
|
180
|
+
values: Tensor | dict[str, Tensor] = {},
|
181
|
+
observable: list[ConvertedObservable] | ConvertedObservable | None = None,
|
182
|
+
state: Tensor | None = None,
|
183
|
+
measurement: Measurements | None = None,
|
184
|
+
noise: Noise | None = None,
|
185
|
+
mitigation: Mitigations | None = None,
|
186
|
+
endianness: Endianness = Endianness.BIG,
|
187
|
+
) -> Tensor:
|
117
188
|
if values is None:
|
118
189
|
values = {}
|
119
|
-
if not isinstance(values, dict):
|
120
|
-
values = self._format_to_dict(values)
|
121
190
|
if measurement is None:
|
122
191
|
measurement = self._measurement
|
123
192
|
if noise is None:
|
124
193
|
noise = self._noise
|
125
|
-
|
126
194
|
return self.transform(
|
127
|
-
|
128
|
-
values=values,
|
195
|
+
super().expectation(
|
196
|
+
values=self._format_to_dict(values),
|
129
197
|
state=state,
|
130
198
|
measurement=measurement,
|
131
199
|
endianness=endianness,
|
@@ -139,6 +207,9 @@ class QNN(QuantumModel):
|
|
139
207
|
The tensor is assumed to have dimensions: n_batches x in_features where in_features
|
140
208
|
corresponds to the number of input features of the QNN
|
141
209
|
"""
|
210
|
+
# for backwards compat...
|
211
|
+
if isinstance(values, dict):
|
212
|
+
return values
|
142
213
|
|
143
214
|
if len(values.size()) == 1:
|
144
215
|
values = values.reshape(-1, 1)
|
@@ -146,10 +217,4 @@ class QNN(QuantumModel):
|
|
146
217
|
assert len(values.size()) == 2, msg
|
147
218
|
assert values.size()[1] == self.in_features, msg
|
148
219
|
|
149
|
-
|
150
|
-
res = {}
|
151
|
-
for i, name in enumerate(names):
|
152
|
-
res[name] = values[:, i]
|
153
|
-
return res
|
154
|
-
|
155
|
-
# TODO: Implement derivatives w.r.t. to inputs
|
220
|
+
return {var.name: values[:, self.inputs.index(var)] for var in self.inputs}
|