qadence 1.6.3__py3-none-any.whl → 1.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,320 +1,408 @@
1
1
  from __future__ import annotations
2
2
 
3
+ from collections import Counter
3
4
  from logging import getLogger
4
- from typing import Any, Counter, List
5
+ from typing import Any, Callable
5
6
 
6
- import numpy as np
7
+ import sympy
7
8
  import torch
8
- from torch import Tensor
9
- from torch.nn import Parameter as TorchParam
9
+ from torch import Tensor, nn
10
10
 
11
- from qadence.backend import ConvertedObservable
11
+ from qadence.backend import BackendConfiguration, ConvertedObservable
12
+ from qadence.backends.api import config_factory
13
+ from qadence.blocks.abstract import AbstractBlock
14
+ from qadence.circuit import QuantumCircuit
12
15
  from qadence.measurements import Measurements
13
- from qadence.ml_tools import promote_to_tensor
14
- from qadence.models import QNN, QuantumModel
16
+ from qadence.mitigations import Mitigations
17
+ from qadence.model import QuantumModel
15
18
  from qadence.noise import Noise
16
- from qadence.utils import Endianness
19
+ from qadence.register import Register
20
+ from qadence.types import BackendName, DiffMode, Endianness, InputDiffMode, ParamDictType
17
21
 
18
22
  logger = getLogger(__name__)
19
23
 
20
24
 
21
- def _set_fixed_operation(
22
- dim: int,
23
- x: float | np.ndarray | Tensor | None = None,
24
- operation_name: str = "scale",
25
- ) -> Tensor:
26
- dim = dim if dim > 0 else 1
27
- if x is None:
28
- if operation_name == "shift":
29
- x = torch.zeros(dim)
30
- elif operation_name == "scale":
31
- x = torch.ones(dim)
32
- else:
33
- NotImplementedError
34
- res = promote_to_tensor(x, requires_grad=False).squeeze(0)
35
- assert (
36
- res.numel() == dim
37
- ), f"Number of {operation_name} values is {res.numel()}\
38
- and does not match number of dimensions = {dim}."
39
- return res
25
+ def _torch_derivative(
26
+ ufa: Callable, x: torch.Tensor, derivative_indices: tuple[int, ...]
27
+ ) -> torch.Tensor:
28
+ y = ufa(x)
29
+ for idx in derivative_indices:
30
+ out = torch.autograd.grad(y, x, torch.ones_like(y), create_graph=True)[0]
31
+ y = out[:, idx]
32
+ return y.reshape(-1, 1)
40
33
 
41
34
 
42
- class TransformedModule(torch.nn.Module):
43
- """
44
- This class accepts a torch.nn.Module or a QuantumModel/QNN.
45
-
46
- Wraps it with either non-trainble or trainable scaling and shifting parameters
47
- for both input and output. When given a torch.nn.Module,
48
- in_features and out_features need to be passed.
49
-
50
- Args:
51
- model: The original model to transform.
52
- in_features: The number of input dimensions of the model.
53
- out_features: The number of output dimensions of the model.
54
- input_scaling: The rescaling factor for the model input. Defaults to None.
55
- input_shifting: The translation factor for the model input. Defaults to None.
56
- output_scaling: The rescaling factor for the model output. Defaults to None.
57
- output_shifting: The translation factor for the model output. Defaults to None.
58
-
59
- Example:
35
+ def derivative(ufa: torch.nn.Module, x: Tensor, derivative_indices: tuple[int, ...]) -> Tensor:
36
+ """Compute derivatives w.r.t.
37
+
38
+ inputs of a UFA with a single output. The
39
+ `derivative_indices` specify which derivative(s) are computed. E.g.
40
+ `derivative_indices=(1,2)` would compute the a second order derivative w.r.t
41
+ to the indices `1` and `2` of the input tensor.
42
+
43
+ Arguments:
44
+ ufa: The model for which we want to compute the derivative.
45
+ x (Tensor): (batch_size, input_size) input tensor.
46
+ derivative_indices (tuple): Define which derivatives to compute.
47
+
48
+ Examples:
49
+ If we create a UFA with three inputs and denote the first, second, and third
50
+ input with `x`, `y`, and `z` we can compute the following derivatives w.r.t
51
+ to those inputs:
52
+ ```py exec="on" source="material-block"
53
+ import torch
54
+ from qadence.ml_tools.models import derivative, QNN
55
+ from qadence.ml_tools.config import FeatureMapConfig, AnsatzConfig
56
+ from qadence.constructors.hamiltonians import ObservableConfig
57
+ from qadence.operations import Z
58
+
59
+ fm_config = FeatureMapConfig(num_features=3, inputs=["x", "y", "z"])
60
+ ansatz_config = AnsatzConfig()
61
+ obs_config = ObservableConfig(detuning=Z)
62
+
63
+ f = QNN.from_configs(
64
+ register=3, fm_config=fm_config, ansatz_config=ansatz_config, obs_config=obs_config
65
+ )
66
+ inputs = torch.rand(5,3,requires_grad=True)
67
+
68
+ # df_dx
69
+ derivative(f, inputs, (0,))
70
+
71
+ # d2f_dydz
72
+ derivative(f, inputs, (1,2))
73
+
74
+ # d3fdy2dx
75
+ derivative(f, inputs, (1,1,0))
60
76
  ```
77
+ """
78
+ assert ufa.out_features == 1, "Can only call `derivative` on models with 1D output."
79
+ return ufa._derivative(x, derivative_indices)
80
+
81
+
82
+ def format_to_dict_fn(
83
+ inputs: list[sympy.Symbol | str] = [],
84
+ ) -> Callable[[Tensor | ParamDictType], ParamDictType]:
85
+ """Format an input tensor into the format required by the forward pass.
86
+
87
+ The tensor is assumed to have dimensions: n_batches x in_features where in_features
88
+ corresponds to the number of input features of the QNN
89
+ """
90
+ in_features = len(inputs)
91
+
92
+ def tensor_to_dict(values: Tensor | ParamDictType) -> ParamDictType:
93
+ if isinstance(values, Tensor):
94
+ values = values.reshape(-1, 1) if len(values.size()) == 1 else values
95
+ if not values.shape[1] == in_features:
96
+ raise ValueError(
97
+ f"Model expects in_features={in_features} but got {values.shape[1]}."
98
+ )
99
+ values = {fparam.name: values[:, inputs.index(fparam)] for fparam in inputs} # type: ignore[union-attr]
100
+ return values
101
+
102
+ return tensor_to_dict
103
+
104
+
105
+ class QNN(QuantumModel):
106
+ """Quantum neural network model for n-dimensional inputs.
107
+
108
+ Examples:
109
+ ```python exec="on" source="material-block" result="json"
61
110
  import torch
62
- from torch.nn import Parameter as TorchParam
63
- from qadence.models import QNN, TransformedModule
64
- from qadence.circuit import QuantumCircuit
65
- from qadence.blocks import chain
66
- from qadence.constructors import hamiltonian_factory, hea
67
- from qadence import Parameter, QuantumCircuit, Z
68
-
69
- n_qubits = 2
70
- phi = Parameter("phi", trainable=False)
71
- fm = chain(*[RY(i, phi) for i in range(n_qubits)])
72
- ansatz = hea(n_qubits=n_qubits, depth=3)
73
- observable = hamiltonian_factory(n_qubits, detuning = Z)
111
+ from qadence import QuantumCircuit, QNN, Z
112
+ from qadence import hea, feature_map, hamiltonian_factory, kron
113
+
114
+ # create the circuit
115
+ n_qubits, depth = 2, 4
116
+ fm = kron(
117
+ feature_map(1, support=(0,), param="x"),
118
+ feature_map(1, support=(1,), param="y")
119
+ )
120
+ ansatz = hea(n_qubits=n_qubits, depth=depth)
74
121
  circuit = QuantumCircuit(n_qubits, fm, ansatz)
122
+ obs_base = hamiltonian_factory(n_qubits, detuning=Z)
75
123
 
76
- model = QNN(circuit, observable, backend="pyqtorch", diff_mode="ad")
77
- batch_size = 1
78
- input_values = {"phi": torch.rand(batch_size, requires_grad=True)}
79
- pred = model(input_values)
80
- assert not torch.isnan(pred)
81
-
82
- transformed_model = TransformedModule(
83
- model=model,
84
- in_features=None,
85
- out_features=None,
86
- input_scaling=TorchParam(torch.tensor(1.0)),
87
- input_shifting=0.0,
88
- output_scaling=1.0,
89
- output_shifting=TorchParam(torch.tensor(0.0))
90
- )
91
- pred_transformed = transformed_model(input_values)
124
+ # the QNN will yield two outputs
125
+ obs = [2.0 * obs_base, 4.0 * obs_base]
126
+
127
+ # initialize and use the model
128
+ qnn = QNN(circuit, obs, inputs=["x", "y"])
129
+ y = qnn(torch.rand(3, 2))
130
+ print(str(y)) # markdown-exec: hide
92
131
  ```
93
132
  """
94
133
 
95
134
  def __init__(
96
135
  self,
97
- model: torch.nn.Module | QuantumModel | QNN,
98
- in_features: int | None = None,
99
- out_features: int | None = None,
100
- input_scaling: TorchParam | float | int | torch.Tensor | None = None,
101
- input_shifting: TorchParam | float | int | torch.Tensor | None = None,
102
- output_scaling: TorchParam | float | int | torch.Tensor | None = None,
103
- output_shifting: TorchParam | float | int | torch.Tensor | None = None,
104
- ) -> None:
105
- super().__init__()
106
- self.model = model
107
- if in_features is None and out_features is None:
108
- assert isinstance(model, (QuantumModel, QNN))
109
- self.in_features = model.in_features
110
- self.out_features = model.out_features if model.out_features else 1
111
- else:
112
- self.in_features = in_features # type: ignore[assignment]
113
- self.out_features = out_features # type: ignore[assignment]
114
- if not isinstance(input_scaling, torch.Tensor):
115
- self.register_buffer(
116
- "_input_scaling",
117
- _set_fixed_operation(self.in_features, input_scaling, "scale"),
118
- )
119
- else:
120
- self._input_scaling = input_scaling
121
- if not isinstance(input_shifting, torch.Tensor):
122
- self.register_buffer(
123
- "_input_shifting",
124
- _set_fixed_operation(self.in_features, input_shifting, "shift"),
125
- )
126
- else:
127
- self._input_shifting = input_shifting
128
- if not isinstance(output_scaling, torch.Tensor):
129
- self.register_buffer(
130
- "_output_scaling",
131
- _set_fixed_operation(self.out_features, output_scaling, "scale"),
132
- )
136
+ circuit: QuantumCircuit,
137
+ observable: list[AbstractBlock] | AbstractBlock,
138
+ backend: BackendName = BackendName.PYQTORCH,
139
+ diff_mode: DiffMode = DiffMode.AD,
140
+ measurement: Measurements | None = None,
141
+ noise: Noise | None = None,
142
+ configuration: BackendConfiguration | dict | None = None,
143
+ inputs: list[sympy.Basic | str] | None = None,
144
+ input_diff_mode: InputDiffMode | str = InputDiffMode.AD,
145
+ ):
146
+ """Initialize the QNN.
147
+
148
+ The number of inputs is determined by the feature parameters in the input
149
+ quantum circuit while the number of outputs is determined by how many
150
+ observables are provided as input
151
+
152
+ Args:
153
+ circuit: The quantum circuit to use for the QNN.
154
+ observable: The observable.
155
+ backend: The chosen quantum backend.
156
+ diff_mode: The differentiation engine to use. Choices 'gpsr' or 'ad'.
157
+ measurement: optional measurement protocol. If None,
158
+ use exact expectation value with a statevector simulator
159
+ noise: A noise model to use.
160
+ configuration: optional configuration for the backend
161
+ inputs: List that indicates the order of variables of the tensors that are passed
162
+ to the model. Given input tensors `xs = torch.rand(batch_size, input_size:=2)` a QNN
163
+ with `inputs=["t", "x"]` will assign `t, x = xs[:,0], xs[:,1]`.
164
+ input_diff_mode: The differentiation mode for the input tensor.
165
+ """
166
+ super().__init__(
167
+ circuit,
168
+ observable=observable,
169
+ backend=backend,
170
+ diff_mode=diff_mode,
171
+ measurement=measurement,
172
+ configuration=configuration,
173
+ noise=noise,
174
+ )
175
+ if self._observable is None:
176
+ raise ValueError("You need to provide at least one observable in the QNN constructor")
177
+ if (inputs is not None) and (len(self.inputs) == len(inputs)):
178
+ self.inputs = [sympy.symbols(x) if isinstance(x, str) else x for x in inputs] # type: ignore[union-attr]
179
+ elif (inputs is None) and len(self.inputs) <= 1:
180
+ self.inputs = [sympy.symbols(x) if isinstance(x, str) else x for x in self.inputs] # type: ignore[union-attr]
133
181
  else:
134
- self._output_scaling = output_scaling
135
- if not isinstance(output_shifting, torch.Tensor):
136
- self.register_buffer(
137
- "_output_shifting",
138
- _set_fixed_operation(self.out_features, output_shifting, "shift"),
182
+ raise ValueError(
183
+ """
184
+ Your QNN has more than one input. Please provide a list of inputs in the order of
185
+ your tensor domain. For example, if you want to pass
186
+ `xs = torch.rand(batch_size, input_size:=3)` to you QNN, where
187
+ ```
188
+ t = x[:,0]
189
+ x = x[:,1]
190
+ y = x[:,2]
191
+ ```
192
+ you have to specify
193
+ ```
194
+ QNN(circuit, observable, inputs=["t", "x", "y"])
195
+ ```
196
+ You can also pass a list of sympy symbols.
197
+ """
139
198
  )
199
+ self.format_to_dict = format_to_dict_fn(self.inputs) # type: ignore[arg-type]
200
+ self.input_diff_mode = InputDiffMode(input_diff_mode)
201
+ if self.input_diff_mode == InputDiffMode.FD:
202
+ from qadence.backends.utils import finitediff
203
+
204
+ self.__derivative = finitediff
205
+ elif self.input_diff_mode == InputDiffMode.AD:
206
+ self.__derivative = _torch_derivative # type: ignore[assignment]
140
207
  else:
141
- self._output_shifting = output_shifting
208
+ raise ValueError(f"Unkown forward diff mode: {self.input_diff_mode}")
142
209
 
143
- def _format_to_dict(self, values: Tensor) -> dict[str, Tensor]:
144
- """Format an input tensor into the format required by the forward pass.
210
+ @classmethod
211
+ def from_configs(
212
+ cls,
213
+ register: int | Register,
214
+ fm_config: Any,
215
+ ansatz_config: Any,
216
+ obs_config: Any,
217
+ ) -> QNN:
218
+ """Create a QNN from a set of configurations.
219
+
220
+ Args:
221
+ register: The number of qubits or a register object.
222
+ fm_config: The configuration for the feature map.
223
+ ansatz_config: The configuration for the ansatz.
224
+ obs_config: The configuration for the observable.
145
225
 
146
- The tensor is assumed to have dimensions: n_batches x in_features where in_features
147
- corresponds to the number of input features of the QNN
148
- """
226
+ Returns:
227
+ A QNN object.
228
+
229
+ Example:
230
+ ```python exec="on" source="material-block" result="json"
231
+ import torch
232
+ from qadence.ml_tools.config import AnsatzConfig, FeatureMapConfig
233
+ from qadence.ml_tools import QNN
234
+ from qadence.constructors import ObservableConfig
235
+ from qadence.operations import Z
236
+ from qadence.types import (
237
+ AnsatzType, BasisSet, ReuploadScaling, ObservableTransform, Strategy
238
+ )
149
239
 
150
- if len(values.size()) == 1:
151
- values = values.reshape(-1, 1)
152
- if len(values.size()) != 2 or values.shape[1] != len(self.model.inputs):
153
- raise ValueError(
154
- f"Model expects in_features={self.model.in_features} but got {values.size()[1]}."
155
- )
156
- names = [p.name for p in self.model.inputs]
157
- res = {}
158
- for i, name in enumerate(names):
159
- res[name] = values[:, i]
160
- return res
240
+ register = 4
241
+ fm_config = FeatureMapConfig(
242
+ num_features=2,
243
+ inputs=["x", "y"],
244
+ basis_set=BasisSet.FOURIER,
245
+ reupload_scaling=ReuploadScaling.CONSTANT,
246
+ feature_range={
247
+ "x": (-1.0, 1.0),
248
+ "y": (0.0, 1.0),
249
+ },
250
+ )
251
+ ansatz_config = AnsatzConfig(
252
+ depth=2,
253
+ ansatz_type=AnsatzType.HEA,
254
+ ansatz_strategy=Strategy.DIGITAL,
255
+ )
256
+ obs_config = ObservableConfig(
257
+ detuning=Z,
258
+ scale=5.0,
259
+ shift=0.0,
260
+ transformation_type=ObservableTransform.SCALE,
261
+ trainable_transform=None,
262
+ )
263
+
264
+ qnn = QNN.from_configs(register, fm_config, ansatz_config, obs_config)
161
265
 
162
- def _transform_x(self, x: dict[str, torch.Tensor] | Tensor) -> dict[str, Tensor] | Tensor:
266
+ x = torch.rand(2, 2)
267
+ y = qnn(x)
268
+ print(str(y)) # markdown-exec: hide
269
+ ```
163
270
  """
164
- X can either be a torch Tensor in when using torch.nn.Module, or a standard values dict.
271
+ from .constructors import build_qnn_from_configs
165
272
 
166
- Scales and shifts the tensors in the values dict, containing Featureparameters.
167
- Transformation of inputs can be used to speed up training and avoid potential issues
168
- with numerical stability that can arise due to differing feature scales.
169
- If none are provided, it uses 0. for shifting and 1. for scaling (hence, identity).
273
+ return build_qnn_from_configs(register, fm_config, ansatz_config, obs_config)
170
274
 
171
- Arguments:
172
- values: A torch Tensor or a dict containing values for Featureparameters.
275
+ def forward(
276
+ self,
277
+ values: dict[str, Tensor] | Tensor = None,
278
+ state: Tensor | None = None,
279
+ measurement: Measurements | None = None,
280
+ noise: Noise | None = None,
281
+ endianness: Endianness = Endianness.BIG,
282
+ ) -> Tensor:
283
+ """Forward pass of the model.
284
+
285
+ This returns the (differentiable) expectation value of the given observable
286
+ operator defined in the constructor. Differently from the base QuantumModel
287
+ class, the QNN accepts also a tensor as input for the forward pass. The
288
+ tensor is expected to have shape: `n_batches x in_features` where `n_batches`
289
+ is the number of data points and `in_features` is the dimensionality of the problem
290
+
291
+ The output of the forward pass is the expectation value of the input
292
+ observable(s). If a single observable is given, the output shape is
293
+ `n_batches` while if multiple observables are given the output shape
294
+ is instead `n_batches x n_observables`
295
+
296
+ Args:
297
+ values: the values of the feature parameters
298
+ state: Initial state.
299
+ measurement: optional measurement protocol. If None,
300
+ use exact expectation value with a statevector simulator
301
+ noise: A noise model to use.
302
+ endianness: Endianness of the resulting bit strings.
173
303
 
174
304
  Returns:
175
- A Tensor or dict containing transformed (scaled and/or shifted) Featureparameters.
305
+ Tensor: a tensor with the expectation value of the observables passed
306
+ in the constructor of the model
176
307
  """
177
-
178
- if isinstance(self.model, (QuantumModel, QNN)):
179
- if not isinstance(x, dict):
180
- x = self._format_to_dict(x)
181
- if self.in_features == 1:
182
- return {
183
- key: self._input_scaling * (val + self._input_shifting)
184
- for key, val in x.items()
185
- }
186
- else:
187
- return {
188
- key: self._input_scaling[idx] * (val + self._input_shifting[idx])
189
- for idx, (key, val) in enumerate(x.items())
190
- }
191
-
192
- else:
193
- assert isinstance(self.model, torch.nn.Module) and isinstance(x, Tensor)
194
- return self._input_scaling * (x + self._input_shifting)
195
-
196
- def forward(self, x: dict[str, Tensor] | Tensor, *args: Any, **kwargs: Any) -> Tensor:
197
- y = self.model(self._transform_x(x), *args, **kwargs)
198
- return self._output_scaling * y + self._output_shifting
308
+ return self.expectation(
309
+ values, state=state, measurement=measurement, noise=noise, endianness=endianness
310
+ )
199
311
 
200
312
  def run(
201
313
  self,
202
- values: dict[str, torch.Tensor],
203
- state: torch.Tensor | None = None,
314
+ values: Tensor | dict[str, Tensor] = None,
315
+ state: Tensor | None = None,
204
316
  endianness: Endianness = Endianness.BIG,
205
317
  ) -> Tensor:
206
- return self.model.run(values=self._transform_x(values), state=state, endianness=endianness)
318
+ return super().run(
319
+ values=self.format_to_dict(values),
320
+ state=state,
321
+ endianness=endianness,
322
+ )
207
323
 
208
324
  def sample(
209
325
  self,
210
- values: dict[str, torch.Tensor],
326
+ values: Tensor | dict[str, Tensor] = {},
211
327
  n_shots: int = 1000,
212
- state: torch.Tensor | None = None,
328
+ state: Tensor | None = None,
213
329
  noise: Noise | None = None,
330
+ mitigation: Mitigations | None = None,
214
331
  endianness: Endianness = Endianness.BIG,
215
332
  ) -> list[Counter]:
216
- return self.model.sample( # type: ignore[no-any-return]
217
- values=self._transform_x(values),
333
+ return super().sample(
334
+ values=self.format_to_dict(values),
218
335
  n_shots=n_shots,
219
336
  state=state,
220
- endianness=endianness,
221
337
  noise=noise,
338
+ mitigation=mitigation,
339
+ endianness=endianness,
222
340
  )
223
341
 
224
342
  def expectation(
225
343
  self,
226
- values: dict[str, torch.Tensor],
227
- observable: List[ConvertedObservable] | ConvertedObservable | None = None,
228
- state: torch.Tensor | None = None,
344
+ values: Tensor | dict[str, Tensor] = {},
345
+ observable: list[ConvertedObservable] | ConvertedObservable | None = None,
346
+ state: Tensor | None = None,
229
347
  measurement: Measurements | None = None,
230
348
  noise: Noise | None = None,
349
+ mitigation: Mitigations | None = None,
231
350
  endianness: Endianness = Endianness.BIG,
232
351
  ) -> Tensor:
233
- """
234
- Computes standard expectation.
235
-
236
- However, scales and shifts the output tensor of the underlying model.
237
- If none are provided, it uses 0. for shifting and 1. for scaling.
238
- Transformation of ouputs can be used if the magnitude
239
- of the targets exceeds the domain (-1,1).
240
- """
241
- exp = self.model.expectation(
242
- values=self._transform_x(values),
243
- observable=observable if observable is not None else self.model._observable,
352
+ if values is None:
353
+ values = {}
354
+ if measurement is None:
355
+ measurement = self._measurement
356
+ if noise is None:
357
+ noise = self._noise
358
+ return super().expectation(
359
+ values=self.format_to_dict(values),
244
360
  state=state,
245
361
  measurement=measurement,
246
- noise=noise,
247
362
  endianness=endianness,
363
+ noise=noise,
248
364
  )
249
- return self._output_scaling * exp + self._output_shifting
250
-
251
- def _to_dict(self, save_params: bool = True) -> dict:
252
- from qadence.serialization import serialize
253
-
254
- def store_fn(x: torch.Tensor) -> list[float]:
255
- res: list[float]
256
- if x.requires_grad:
257
- res = x.detach().numpy().tolist()
258
- else:
259
- res = x.numpy().tolist()
260
- return res # type: ignore[no-any-return]
261
-
262
- _d = serialize(self.model, save_params=save_params)
263
-
264
- return {
265
- self.__class__.__name__: _d,
266
- "in_features": self.in_features,
267
- "out_features": self.out_features,
268
- "_input_scaling": store_fn(self._input_scaling),
269
- "_output_scaling": store_fn(self._output_scaling),
270
- "_input_shifting": store_fn(self._input_shifting),
271
- "_output_shifting": store_fn(self._output_shifting),
272
- }
365
+
366
+ def _derivative(self, x: Tensor, derivative_indices: tuple[int, ...]) -> Tensor:
367
+ return self.__derivative(self, x, derivative_indices)
368
+
369
+ def _to_dict(self, save_params: bool = False) -> dict:
370
+ d = dict()
371
+ try:
372
+ d = super()._to_dict(save_params)
373
+ d[self.__class__.__name__]["inputs"] = [str(i) for i in self.inputs]
374
+ logger.debug(f"{self.__class__.__name__} serialized to {d}.")
375
+ except Exception as e:
376
+ logger.warning(f"Unable to serialize {self.__class__.__name__} due to {e}.")
377
+ return d
273
378
 
274
379
  @classmethod
275
- def _from_dict(cls, d: dict, as_torch: bool = False) -> TransformedModule:
380
+ def _from_dict(cls, d: dict, as_torch: bool = False) -> QNN:
276
381
  from qadence.serialization import deserialize
277
382
 
278
- _m: QuantumModel | QNN = deserialize(d[cls.__name__], as_torch) # type: ignore[assignment]
279
- return cls(
280
- _m,
281
- in_features=d["in_features"],
282
- out_features=d["out_features"],
283
- input_scaling=torch.tensor(d["_input_scaling"]),
284
- output_scaling=torch.tensor(d["_output_scaling"]),
285
- input_shifting=torch.tensor(d["_input_shifting"]),
286
- output_shifting=torch.tensor(d["_output_shifting"]),
287
- )
288
-
289
- def to(self, *args: Any, **kwargs: Any) -> TransformedModule:
383
+ qnn: QNN
290
384
  try:
291
- self.model = self.model.to(*args, **kwargs)
292
- if isinstance(self.model, QuantumModel):
293
- device = self.model._circuit.native.device
294
- dtype = (
295
- torch.float64
296
- if self.model._circuit.native.dtype == torch.cdouble
297
- else torch.float32
298
- )
385
+ qm_dict = d[cls.__name__]
386
+ qnn = cls(
387
+ circuit=QuantumCircuit._from_dict(qm_dict["circuit"]),
388
+ observable=[deserialize(q_obs) for q_obs in qm_dict["observable"]], # type: ignore[misc]
389
+ backend=qm_dict["backend"],
390
+ diff_mode=qm_dict["diff_mode"],
391
+ measurement=Measurements._from_dict(qm_dict["measurement"]),
392
+ noise=Noise._from_dict(qm_dict["noise"]),
393
+ configuration=config_factory(qm_dict["backend"], qm_dict["backend_configuration"]),
394
+ inputs=qm_dict["inputs"],
395
+ )
396
+
397
+ if as_torch:
398
+ conv_pd = nn.ParameterDict()
399
+ param_dict = d["param_dict"]
400
+ for n, param in param_dict.items():
401
+ conv_pd[n] = nn.Parameter(param)
402
+ qnn._params = conv_pd
403
+ logger.debug(f"Initialized {cls.__name__} from {d}.")
299
404
 
300
- self._input_scaling = self._input_scaling.to(device=device, dtype=dtype)
301
- self._input_shifting = self._input_shifting.to(device=device, dtype=dtype)
302
- self._output_scaling = self._output_scaling.to(device=device, dtype=dtype)
303
- self._output_shifting = self._output_shifting.to(device=device, dtype=dtype)
304
- elif isinstance(self.model, torch.nn.Module):
305
- self._input_scaling = self._input_scaling.to(*args, **kwargs)
306
- self._input_shifting = self._input_shifting.to(*args, **kwargs)
307
- self._output_scaling = self._output_scaling.to(*args, **kwargs)
308
- self._output_shifting = self._output_shifting.to(*args, **kwargs)
309
- logger.debug(f"Moved {self} to {args}, {kwargs}.")
310
405
  except Exception as e:
311
- logger.warning(f"Unable to move {self} to {args}, {kwargs} due to {e}.")
312
- return self
313
-
314
- @property
315
- def device(self) -> torch.device:
316
- return (
317
- self.model.device
318
- if isinstance(self.model, QuantumModel)
319
- else self._input_scaling.device
320
- )
406
+ logger.warning(f"Unable to deserialize object {d} to {cls.__name__} due to {e}.")
407
+
408
+ return qnn