qadence 1.6.2__py3-none-any.whl → 1.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
qadence/finitediff.py DELETED
@@ -1,47 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from typing import Callable
4
-
5
- import torch
6
- from torch import Tensor
7
-
8
-
9
- def finitediff(
10
- f: Callable,
11
- x: Tensor,
12
- derivative_indices: tuple[int, ...],
13
- eps: float = None,
14
- ) -> Tensor:
15
- """
16
- Arguments:
17
-
18
- f: Function to differentiate
19
- x: Input of shape `(batch_size, input_size)`
20
- derivative_indices: which *input* to differentiate (i.e. which variable x[:,i])
21
- eps: finite difference spacing (uses `torch.finfo(x.dtype).eps ** (1 / (2 + order))` as a
22
- default)
23
- """
24
-
25
- if eps is None:
26
- order = len(derivative_indices)
27
- eps = torch.finfo(x.dtype).eps ** (1 / (2 + order))
28
-
29
- # compute derivative direction vector(s)
30
- eps = torch.as_tensor(eps, dtype=x.dtype)
31
- _eps = 1 / eps # type: ignore[operator]
32
- ev = torch.zeros_like(x)
33
- i = derivative_indices[0]
34
- ev[:, i] += eps
35
-
36
- # recursive finite differencing for higher order than 3 / mixed derivatives
37
- if len(derivative_indices) > 3 or len(set(derivative_indices)) > 1:
38
- di = derivative_indices[1:]
39
- return (finitediff(f, x + ev, di) - finitediff(f, x - ev, di)) * _eps / 2
40
- elif len(derivative_indices) == 3:
41
- return (f(x + 2 * ev) - 2 * f(x + ev) + 2 * f(x - ev) - f(x - 2 * ev)) * _eps**3 / 2
42
- elif len(derivative_indices) == 2:
43
- return (f(x + ev) + f(x - ev) - 2 * f(x)) * _eps**2
44
- elif len(derivative_indices) == 1:
45
- return (f(x + ev) - f(x - ev)) * _eps / 2
46
- else:
47
- raise ValueError("If you see this error there is a bug in the `finitediff` function.")
@@ -1,7 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from .qnn import QNN
4
- from .quantum_model import QuantumModel
5
-
6
- # Modules to be automatically added to the qadence namespace
7
- __all__ = ["QNN", "QuantumModel"]
qadence/models/qnn.py DELETED
@@ -1,265 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from collections import Counter
4
- from typing import Callable
5
-
6
- import sympy
7
- from torch import Tensor, nn
8
-
9
- from qadence.backend import BackendConfiguration, ConvertedObservable
10
- from qadence.backends.api import config_factory
11
- from qadence.blocks.abstract import AbstractBlock
12
- from qadence.circuit import QuantumCircuit
13
- from qadence.logger import get_logger
14
- from qadence.measurements import Measurements
15
- from qadence.mitigations import Mitigations
16
- from qadence.models.quantum_model import QuantumModel
17
- from qadence.noise import Noise
18
- from qadence.types import BackendName, DiffMode, Endianness
19
-
20
- logger = get_logger(__name__)
21
-
22
-
23
- class QNN(QuantumModel):
24
- """Quantum neural network model for n-dimensional inputs.
25
-
26
- Examples:
27
- ```python exec="on" source="material-block" result="json"
28
- import torch
29
- from qadence import QuantumCircuit, QNN, Z
30
- from qadence import hea, feature_map, hamiltonian_factory, kron
31
-
32
- # create the circuit
33
- n_qubits, depth = 2, 4
34
- fm = kron(
35
- feature_map(1, support=(0,), param="x"),
36
- feature_map(1, support=(1,), param="y")
37
- )
38
- ansatz = hea(n_qubits=n_qubits, depth=depth)
39
- circuit = QuantumCircuit(n_qubits, fm, ansatz)
40
- obs_base = hamiltonian_factory(n_qubits, detuning=Z)
41
-
42
- # the QNN will yield two outputs
43
- obs = [2.0 * obs_base, 4.0 * obs_base]
44
-
45
- # initialize and use the model
46
- qnn = QNN(circuit, obs, inputs=["x", "y"])
47
- y = qnn(torch.rand(3, 2))
48
- print(str(y)) # markdown-exec: hide
49
- ```
50
- """
51
-
52
- def __init__(
53
- self,
54
- circuit: QuantumCircuit,
55
- observable: list[AbstractBlock] | AbstractBlock,
56
- transform: Callable[[Tensor], Tensor] = None, # transform output of the QNN
57
- backend: BackendName = BackendName.PYQTORCH,
58
- diff_mode: DiffMode = DiffMode.AD,
59
- measurement: Measurements | None = None,
60
- noise: Noise | None = None,
61
- configuration: BackendConfiguration | dict | None = None,
62
- inputs: list[sympy.Basic | str] | None = None,
63
- ):
64
- """Initialize the QNN.
65
-
66
- The number of inputs is determined by the feature parameters in the input
67
- quantum circuit while the number of outputs is determined by how many
68
- observables are provided as input
69
-
70
- Args:
71
- circuit: The quantum circuit to use for the QNN.
72
- transform: A transformation applied to the output of the QNN.
73
- inputs: Tuple that indicates the order of variables of the tensors that are passed
74
- to the model. Given input tensors `xs = torch.rand(batch_size, input_size:=2)` a QNN
75
- with `inputs=("t", "x")` will assign `t, x = xs[:,0], xs[:,1]`.
76
- backend: The chosen quantum backend.
77
- diff_mode: The differentiation engine to use. Choices 'gpsr' or 'ad'.
78
- measurement: optional measurement protocol. If None,
79
- use exact expectation value with a statevector simulator
80
- noise: A noise model to use.
81
- configuration: optional configuration for the backend
82
- """
83
- super().__init__(
84
- circuit,
85
- observable=observable,
86
- backend=backend,
87
- diff_mode=diff_mode,
88
- measurement=measurement,
89
- configuration=configuration,
90
- noise=noise,
91
- )
92
- if self.out_features is None:
93
- raise ValueError("You need to provide at least one observable in the QNN constructor")
94
- self.transform = transform if transform else lambda x: x
95
-
96
- if (inputs is not None) and (len(self.inputs) == len(inputs)):
97
- self.inputs = [sympy.symbols(x) if isinstance(x, str) else x for x in inputs] # type: ignore[union-attr]
98
- elif (inputs is None) and len(self.inputs) <= 1:
99
- self.inputs = [sympy.symbols(x) if isinstance(x, str) else x for x in self.inputs] # type: ignore[union-attr]
100
- else:
101
- raise ValueError(
102
- """
103
- Your QNN has more than one input. Please provide a list of inputs in the order of
104
- your tensor domain. For example, if you want to pass
105
- `xs = torch.rand(batch_size, input_size:=3)` to you QNN, where
106
- ```
107
- t = x[:,0]
108
- x = x[:,1]
109
- y = x[:,2]
110
- ```
111
- you have to specify
112
- ```
113
- QNN(circuit, observable, inputs=["t", "x", "y"])
114
- ```
115
- You can also pass a list of sympy symbols.
116
- """
117
- )
118
-
119
- def forward(
120
- self,
121
- values: dict[str, Tensor] | Tensor = None,
122
- state: Tensor | None = None,
123
- measurement: Measurements | None = None,
124
- noise: Noise | None = None,
125
- endianness: Endianness = Endianness.BIG,
126
- ) -> Tensor:
127
- """Forward pass of the model.
128
-
129
- This returns the (differentiable) expectation value of the given observable
130
- operator defined in the constructor. Differently from the base QuantumModel
131
- class, the QNN accepts also a tensor as input for the forward pass. The
132
- tensor is expected to have shape: `n_batches x in_features` where `n_batches`
133
- is the number of data points and `in_features` is the dimensionality of the problem
134
-
135
- The output of the forward pass is the expectation value of the input
136
- observable(s). If a single observable is given, the output shape is
137
- `n_batches` while if multiple observables are given the output shape
138
- is instead `n_batches x n_observables`
139
-
140
- Args:
141
- values: the values of the feature parameters
142
- state: Initial state.
143
- measurement: optional measurement protocol. If None,
144
- use exact expectation value with a statevector simulator
145
- noise: A noise model to use.
146
- endianness: Endianness of the resulting bit strings.
147
-
148
- Returns:
149
- Tensor: a tensor with the expectation value of the observables passed
150
- in the constructor of the model
151
- """
152
- return self.expectation(
153
- values, state=state, measurement=measurement, noise=noise, endianness=endianness
154
- )
155
-
156
- def run(
157
- self,
158
- values: Tensor | dict[str, Tensor] = None,
159
- state: Tensor | None = None,
160
- endianness: Endianness = Endianness.BIG,
161
- ) -> Tensor:
162
- return super().run(values=self._format_to_dict(values), state=state, endianness=endianness)
163
-
164
- def sample(
165
- self,
166
- values: Tensor | dict[str, Tensor] = {},
167
- n_shots: int = 1000,
168
- state: Tensor | None = None,
169
- noise: Noise | None = None,
170
- mitigation: Mitigations | None = None,
171
- endianness: Endianness = Endianness.BIG,
172
- ) -> list[Counter]:
173
- return super().sample(
174
- values=self._format_to_dict(values),
175
- n_shots=n_shots,
176
- state=state,
177
- noise=noise,
178
- mitigation=mitigation,
179
- endianness=endianness,
180
- )
181
-
182
- def expectation(
183
- self,
184
- values: Tensor | dict[str, Tensor] = {},
185
- observable: list[ConvertedObservable] | ConvertedObservable | None = None,
186
- state: Tensor | None = None,
187
- measurement: Measurements | None = None,
188
- noise: Noise | None = None,
189
- mitigation: Mitigations | None = None,
190
- endianness: Endianness = Endianness.BIG,
191
- ) -> Tensor:
192
- if values is None:
193
- values = {}
194
- if measurement is None:
195
- measurement = self._measurement
196
- if noise is None:
197
- noise = self._noise
198
- return self.transform(
199
- super().expectation(
200
- values=self._format_to_dict(values),
201
- state=state,
202
- measurement=measurement,
203
- endianness=endianness,
204
- noise=noise,
205
- )
206
- )
207
-
208
- def _format_to_dict(self, values: Tensor) -> dict[str, Tensor]:
209
- """Format an input tensor into the format required by the forward pass.
210
-
211
- The tensor is assumed to have dimensions: n_batches x in_features where in_features
212
- corresponds to the number of input features of the QNN
213
- """
214
- # for backwards compat...
215
- if isinstance(values, dict):
216
- return values
217
-
218
- if len(values.size()) == 1:
219
- values = values.reshape(-1, 1)
220
- msg = f"Model expects in_features={self.in_features} but got {values.size()[1]}."
221
- assert len(values.size()) == 2, msg
222
- assert values.size()[1] == self.in_features, msg
223
-
224
- return {var.name: values[:, self.inputs.index(var)] for var in self.inputs}
225
-
226
- def _to_dict(self, save_params: bool = False) -> dict:
227
- d = dict()
228
- try:
229
- d = super()._to_dict(save_params)
230
- d[self.__class__.__name__]["inputs"] = [str(i) for i in self.inputs]
231
- logger.debug(f"{self.__class__.__name__} serialized to {d}.")
232
- except Exception as e:
233
- logger.warning(f"Unable to serialize {self.__class__.__name__} due to {e}.")
234
- return d
235
-
236
- @classmethod
237
- def _from_dict(cls, d: dict, as_torch: bool = False) -> QNN:
238
- from qadence.serialization import deserialize
239
-
240
- qnn: QNN
241
- try:
242
- qm_dict = d[cls.__name__]
243
- qnn = cls(
244
- circuit=QuantumCircuit._from_dict(qm_dict["circuit"]),
245
- observable=[deserialize(q_obs) for q_obs in qm_dict["observable"]], # type: ignore[misc]
246
- backend=qm_dict["backend"],
247
- diff_mode=qm_dict["diff_mode"],
248
- measurement=Measurements._from_dict(qm_dict["measurement"]),
249
- noise=Noise._from_dict(qm_dict["noise"]),
250
- configuration=config_factory(qm_dict["backend"], qm_dict["backend_configuration"]),
251
- inputs=qm_dict["inputs"],
252
- )
253
-
254
- if as_torch:
255
- conv_pd = nn.ParameterDict()
256
- param_dict = d["param_dict"]
257
- for n, param in param_dict.items():
258
- conv_pd[n] = nn.Parameter(param)
259
- qnn._params = conv_pd
260
- logger.debug(f"Initialized {cls.__name__} from {d}.")
261
-
262
- except Exception as e:
263
- logger.warning(f"Unable to deserialize object {d} to {cls.__name__} due to {e}.")
264
-
265
- return qnn