qadence 1.7.5__py3-none-any.whl → 1.7.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- qadence/backend.py +0 -27
- qadence/backends/braket/backend.py +0 -10
- qadence/backends/gpsr.py +19 -3
- qadence/backends/horqrux/backend.py +0 -10
- qadence/backends/pulser/backend.py +31 -26
- qadence/backends/pyqtorch/backend.py +0 -10
- qadence/backends/pyqtorch/convert_ops.py +107 -120
- qadence/engines/torch/differentiable_expectation.py +9 -2
- qadence/mitigations/analog_zne.py +2 -2
- qadence/ml_tools/__init__.py +2 -1
- qadence/ml_tools/config.py +61 -3
- qadence/ml_tools/saveload.py +5 -1
- qadence/ml_tools/train_grad.py +2 -7
- qadence/ml_tools/train_no_grad.py +2 -6
- {qadence-1.7.5.dist-info → qadence-1.7.7.dist-info}/METADATA +4 -4
- {qadence-1.7.5.dist-info → qadence-1.7.7.dist-info}/RECORD +18 -18
- {qadence-1.7.5.dist-info → qadence-1.7.7.dist-info}/WHEEL +0 -0
- {qadence-1.7.5.dist-info → qadence-1.7.7.dist-info}/licenses/LICENSE +0 -0
qadence/backend.py
CHANGED
@@ -282,33 +282,6 @@ class Backend(ABC):
|
|
282
282
|
"""
|
283
283
|
raise NotImplementedError
|
284
284
|
|
285
|
-
@abstractmethod
|
286
|
-
def run_dm(
|
287
|
-
self,
|
288
|
-
circuit: ConvertedCircuit,
|
289
|
-
noise: Noise,
|
290
|
-
param_values: dict[str, ArrayLike] = {},
|
291
|
-
state: Tensor | None = None,
|
292
|
-
endianness: Endianness = Endianness.BIG,
|
293
|
-
) -> Tensor:
|
294
|
-
"""Run a circuit and return the resulting the density matrix.
|
295
|
-
|
296
|
-
TODO: Temporary method for the purposes of noise model implementation.
|
297
|
-
To be removed in a later refactoring.
|
298
|
-
|
299
|
-
Arguments:
|
300
|
-
circuit: A converted circuit as returned by `backend.circuit`.
|
301
|
-
param_values: _**Already embedded**_ parameters of the circuit. See
|
302
|
-
[`embedding`][qadence.blocks.embedding.embedding] for more info.
|
303
|
-
state: Initial state.
|
304
|
-
endianness: Endianness of the resulting density matrix.
|
305
|
-
|
306
|
-
Returns:
|
307
|
-
A list of Counter objects where each key represents a bitstring
|
308
|
-
and its value the number of times it has been sampled from the given wave function.
|
309
|
-
"""
|
310
|
-
raise NotImplementedError
|
311
|
-
|
312
285
|
@abstractmethod
|
313
286
|
def expectation(
|
314
287
|
self,
|
@@ -131,16 +131,6 @@ class Backend(BackendInterface):
|
|
131
131
|
states = invert_endianness(states)
|
132
132
|
return states
|
133
133
|
|
134
|
-
def run_dm(
|
135
|
-
self,
|
136
|
-
circuit: ConvertedCircuit,
|
137
|
-
noise: Noise,
|
138
|
-
param_values: dict[str, Tensor] = {},
|
139
|
-
state: Tensor | None = None,
|
140
|
-
endianness: Endianness = Endianness.BIG,
|
141
|
-
) -> Tensor:
|
142
|
-
raise NotImplementedError
|
143
|
-
|
144
134
|
def sample(
|
145
135
|
self,
|
146
136
|
circuit: ConvertedCircuit,
|
qadence/backends/gpsr.py
CHANGED
@@ -10,14 +10,30 @@ from qadence.types import PI
|
|
10
10
|
from qadence.utils import _round_complex
|
11
11
|
|
12
12
|
|
13
|
-
def general_psr(spectrum: Tensor, shift_prefac: float = 0.5) -> Callable:
|
13
|
+
def general_psr(spectrum: Tensor, n_eqs: int | None = None, shift_prefac: float = 0.5) -> Callable:
|
14
|
+
"""Define whether single_gap_psr or multi_gap_psr is used.
|
15
|
+
|
16
|
+
Args:
|
17
|
+
spectrum (Tensor): Spectrum of the operation we apply PSR onto.
|
18
|
+
n_eqs (int | None, optional): Number of equations. Defaults to None.
|
19
|
+
If provided, we keep the n_eqs higher spectral gaps.
|
20
|
+
shift_prefac (float, optional): Shift prefactor. Defaults to 0.5.
|
21
|
+
|
22
|
+
Returns:
|
23
|
+
Callable: single_gap_psr or multi_gap_psr function for
|
24
|
+
concerned operation.
|
25
|
+
"""
|
14
26
|
diffs = _round_complex(spectrum - spectrum.reshape(-1, 1))
|
15
27
|
sorted_unique_spectral_gaps = torch.unique(torch.abs(torch.tril(diffs)))
|
16
28
|
|
17
29
|
# We have to filter out zeros
|
18
30
|
sorted_unique_spectral_gaps = sorted_unique_spectral_gaps[sorted_unique_spectral_gaps > 0]
|
19
|
-
n_eqs =
|
20
|
-
|
31
|
+
n_eqs = (
|
32
|
+
len(sorted_unique_spectral_gaps)
|
33
|
+
if n_eqs is None
|
34
|
+
else min(n_eqs, len(sorted_unique_spectral_gaps))
|
35
|
+
)
|
36
|
+
sorted_unique_spectral_gaps = torch.tensor(list(sorted_unique_spectral_gaps)[:n_eqs])
|
21
37
|
|
22
38
|
if n_eqs == 1:
|
23
39
|
return single_gap_psr
|
@@ -107,16 +107,6 @@ class Backend(BackendInterface):
|
|
107
107
|
state = unhorqify(state)
|
108
108
|
return state
|
109
109
|
|
110
|
-
def run_dm(
|
111
|
-
self,
|
112
|
-
circuit: ConvertedCircuit,
|
113
|
-
noise: Noise,
|
114
|
-
param_values: ParamDictType = {},
|
115
|
-
state: ArrayLike | None = None,
|
116
|
-
endianness: Endianness = Endianness.BIG,
|
117
|
-
) -> ArrayLike:
|
118
|
-
raise NotImplementedError
|
119
|
-
|
120
110
|
def expectation(
|
121
111
|
self,
|
122
112
|
circuit: ConvertedCircuit,
|
@@ -187,6 +187,7 @@ class Backend(BackendInterface):
|
|
187
187
|
param_values: dict[str, Tensor] = {},
|
188
188
|
state: Tensor | None = None,
|
189
189
|
endianness: Endianness = Endianness.BIG,
|
190
|
+
noise: Noise | None = None,
|
190
191
|
) -> Tensor:
|
191
192
|
vals = to_list_of_dicts(param_values)
|
192
193
|
|
@@ -197,37 +198,41 @@ class Backend(BackendInterface):
|
|
197
198
|
"specify any cloud credentials to use the .run() method"
|
198
199
|
)
|
199
200
|
|
200
|
-
|
201
|
-
|
201
|
+
if noise is None:
|
202
|
+
state = state if state is None else _convert_init_state(state)
|
203
|
+
batched_wf = np.zeros((len(vals), 2**circuit.abstract.n_qubits), dtype=np.complex128)
|
202
204
|
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
205
|
+
for i, param_values_el in enumerate(vals):
|
206
|
+
sequence = self.assign_parameters(circuit, param_values_el)
|
207
|
+
pattern = circuit.original.register.device_specs.pattern
|
208
|
+
if pattern is not None:
|
209
|
+
add_addressing_pattern(sequence, pattern)
|
210
|
+
sequence.measure()
|
211
|
+
sim_result = simulate_sequence(sequence, self.config, state, n_shots=None)
|
212
|
+
wf = (
|
213
|
+
sim_result.get_final_state( # type:ignore [union-attr]
|
214
|
+
ignore_global_phase=False, normalize=True
|
215
|
+
)
|
216
|
+
.full()
|
217
|
+
.flatten()
|
213
218
|
)
|
214
|
-
|
215
|
-
.
|
216
|
-
|
217
|
-
# We flip the wavefunction coming out of pulser,
|
218
|
-
# essentially changing logic 0 with logic 1 in the basis states.
|
219
|
-
batched_wf[i] = np.flip(wf)
|
219
|
+
# We flip the wavefunction coming out of pulser,
|
220
|
+
# essentially changing logic 0 with logic 1 in the basis states.
|
221
|
+
batched_wf[i] = np.flip(wf)
|
220
222
|
|
221
|
-
|
223
|
+
batched_wf_torch = torch.from_numpy(batched_wf)
|
222
224
|
|
223
|
-
|
224
|
-
|
225
|
+
if endianness != self.native_endianness:
|
226
|
+
from qadence.transpile import invert_endianness
|
225
227
|
|
226
|
-
|
228
|
+
batched_wf_torch = invert_endianness(batched_wf_torch)
|
227
229
|
|
228
|
-
|
230
|
+
return batched_wf_torch
|
229
231
|
|
230
|
-
|
232
|
+
else:
|
233
|
+
return self._run_noisy(circuit, noise, param_values, state, endianness)
|
234
|
+
|
235
|
+
def _run_noisy(
|
231
236
|
self,
|
232
237
|
circuit: ConvertedCircuit,
|
233
238
|
noise: Noise,
|
@@ -342,12 +347,12 @@ class Backend(BackendInterface):
|
|
342
347
|
res = res if len(res.shape) > 0 else res.reshape(1)
|
343
348
|
return res.real
|
344
349
|
elif noise is not None:
|
345
|
-
dms = self.
|
350
|
+
dms = self.run(
|
346
351
|
circuit=circuit,
|
347
|
-
noise=noise,
|
348
352
|
param_values=param_values,
|
349
353
|
state=state,
|
350
354
|
endianness=endianness,
|
355
|
+
noise=noise,
|
351
356
|
)
|
352
357
|
support = sorted(list(circuit.abstract.register.support))
|
353
358
|
# TODO: There should be a better check for batched density matrices.
|
@@ -106,16 +106,6 @@ class Backend(BackendInterface):
|
|
106
106
|
state = invert_endianness(state) if endianness != self.native_endianness else state
|
107
107
|
return state
|
108
108
|
|
109
|
-
def run_dm(
|
110
|
-
self,
|
111
|
-
circuit: ConvertedCircuit,
|
112
|
-
noise: Noise,
|
113
|
-
param_values: dict[str, Tensor] = {},
|
114
|
-
state: Tensor | None = None,
|
115
|
-
endianness: Endianness = Endianness.BIG,
|
116
|
-
) -> Tensor:
|
117
|
-
raise NotImplementedError
|
118
|
-
|
119
109
|
def _batched_expectation(
|
120
110
|
self,
|
121
111
|
circuit: ConvertedCircuit,
|
@@ -7,7 +7,6 @@ from typing import Any, Sequence, Tuple
|
|
7
7
|
import pyqtorch as pyq
|
8
8
|
import sympy
|
9
9
|
import torch
|
10
|
-
from pyqtorch.apply import apply_operator
|
11
10
|
from pyqtorch.embed import Embedding
|
12
11
|
from pyqtorch.matrices import _dagger
|
13
12
|
from pyqtorch.time_dependent.sesolve import sesolve
|
@@ -15,6 +14,7 @@ from pyqtorch.utils import is_diag
|
|
15
14
|
from torch import (
|
16
15
|
Tensor,
|
17
16
|
cdouble,
|
17
|
+
complex64,
|
18
18
|
diag_embed,
|
19
19
|
diagonal,
|
20
20
|
exp,
|
@@ -45,7 +45,6 @@ from qadence.blocks import (
|
|
45
45
|
)
|
46
46
|
from qadence.blocks.block_to_tensor import (
|
47
47
|
_block_to_tensor_embedded,
|
48
|
-
block_to_tensor,
|
49
48
|
)
|
50
49
|
from qadence.blocks.primitive import ProjectorBlock
|
51
50
|
from qadence.blocks.utils import parameters
|
@@ -78,6 +77,27 @@ def is_single_qubit_chain(block: AbstractBlock) -> bool:
|
|
78
77
|
)
|
79
78
|
|
80
79
|
|
80
|
+
def extract_parameter(block: ScaleBlock | ParametricBlock, config: Configuration) -> str | Tensor:
|
81
|
+
"""Extract the parameter as string or its tensor value.
|
82
|
+
|
83
|
+
Args:
|
84
|
+
block (ScaleBlock | ParametricBlock): Block to extract parameter from.
|
85
|
+
config (Configuration): Configuration instance.
|
86
|
+
|
87
|
+
Returns:
|
88
|
+
str | Tensor: Parameter value or symbol.
|
89
|
+
"""
|
90
|
+
if not block.is_parametric:
|
91
|
+
tensor_val = tensor([block.parameters.parameter], dtype=complex64)
|
92
|
+
return (
|
93
|
+
tensor([block.parameters.parameter], dtype=float64)
|
94
|
+
if torch.all(tensor_val.imag == 0)
|
95
|
+
else tensor_val
|
96
|
+
)
|
97
|
+
|
98
|
+
return config.get_param_name(block)[0]
|
99
|
+
|
100
|
+
|
81
101
|
def convert_block(
|
82
102
|
block: AbstractBlock, n_qubits: int = None, config: Configuration = None
|
83
103
|
) -> Sequence[Module | Tensor | str | sympy.Expr]:
|
@@ -94,29 +114,37 @@ def convert_block(
|
|
94
114
|
|
95
115
|
if isinstance(block, ScaleBlock):
|
96
116
|
scaled_ops = convert_block(block.block, n_qubits, config)
|
97
|
-
scale = (
|
98
|
-
tensor([block.parameters.parameter], dtype=float64)
|
99
|
-
if not block.is_parametric
|
100
|
-
else config.get_param_name(block)[0]
|
101
|
-
)
|
117
|
+
scale = extract_parameter(block, config)
|
102
118
|
return [pyq.Scale(pyq.Sequence(scaled_ops), scale)]
|
103
119
|
|
104
120
|
elif isinstance(block, TimeEvolutionBlock):
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
121
|
+
if getattr(block.generator, "is_time_dependent", False):
|
122
|
+
return [PyQTimeDependentEvolution(qubit_support, n_qubits, block, config)]
|
123
|
+
else:
|
124
|
+
if isinstance(block.generator, sympy.Basic):
|
125
|
+
generator = config.get_param_name(block)[1]
|
126
|
+
elif isinstance(block.generator, Tensor):
|
127
|
+
m = block.generator.to(dtype=cdouble)
|
128
|
+
generator = convert_block(
|
129
|
+
MatrixBlock(
|
130
|
+
m,
|
131
|
+
qubit_support=qubit_support,
|
132
|
+
check_unitary=False,
|
133
|
+
check_hermitian=True,
|
134
|
+
)
|
135
|
+
)[0]
|
136
|
+
else:
|
137
|
+
generator = convert_block(block.generator, n_qubits, config)[0] # type: ignore[arg-type]
|
138
|
+
time_param = config.get_param_name(block)[0]
|
139
|
+
return [
|
140
|
+
pyq.HamiltonianEvolution(
|
141
|
+
qubit_support=qubit_support,
|
142
|
+
generator=generator,
|
143
|
+
time=time_param,
|
144
|
+
cache_length=0,
|
145
|
+
)
|
146
|
+
]
|
147
|
+
|
120
148
|
elif isinstance(block, MatrixBlock):
|
121
149
|
return [pyq.primitives.Primitive(block.matrix, block.qubit_support)]
|
122
150
|
elif isinstance(block, CompositeBlock):
|
@@ -142,14 +170,14 @@ def convert_block(
|
|
142
170
|
if isinstance(block, U):
|
143
171
|
op = pyq_cls(qubit_support[0], *config.get_param_name(block))
|
144
172
|
else:
|
145
|
-
op = pyq_cls(qubit_support[0],
|
173
|
+
op = pyq_cls(qubit_support[0], extract_parameter(block, config))
|
146
174
|
else:
|
147
175
|
op = pyq_cls(qubit_support[0])
|
148
176
|
return [op]
|
149
177
|
elif isinstance(block, tuple(two_qubit_gateset)):
|
150
178
|
pyq_cls = getattr(pyq, block.name)
|
151
179
|
if isinstance(block, ParametricBlock):
|
152
|
-
op = pyq_cls(qubit_support[0], qubit_support[1],
|
180
|
+
op = pyq_cls(qubit_support[0], qubit_support[1], extract_parameter(block, config))
|
153
181
|
else:
|
154
182
|
op = pyq_cls(qubit_support[0], qubit_support[1])
|
155
183
|
return [op]
|
@@ -157,7 +185,7 @@ def convert_block(
|
|
157
185
|
block_name = block.name[1:] if block.name.startswith("M") else block.name
|
158
186
|
pyq_cls = getattr(pyq, block_name)
|
159
187
|
if isinstance(block, ParametricBlock):
|
160
|
-
op = pyq_cls(qubit_support[:-1], qubit_support[-1],
|
188
|
+
op = pyq_cls(qubit_support[:-1], qubit_support[-1], extract_parameter(block, config))
|
161
189
|
else:
|
162
190
|
if "CSWAP" in block_name:
|
163
191
|
op = pyq_cls(qubit_support[:-2], qubit_support[-2:])
|
@@ -172,7 +200,7 @@ def convert_block(
|
|
172
200
|
)
|
173
201
|
|
174
202
|
|
175
|
-
class
|
203
|
+
class PyQTimeDependentEvolution(Module):
|
176
204
|
def __init__(
|
177
205
|
self,
|
178
206
|
qubit_support: Tuple[int, ...],
|
@@ -188,50 +216,17 @@ class PyQHamiltonianEvolution(Module):
|
|
188
216
|
self.hmat: Tensor
|
189
217
|
self.config = config
|
190
218
|
|
191
|
-
|
192
|
-
hmat =
|
193
|
-
block.generator,
|
194
|
-
|
195
|
-
use_full_support=False,
|
196
|
-
)
|
197
|
-
hmat = hmat.permute(1, 2, 0)
|
198
|
-
self.register_buffer("hmat", hmat)
|
199
|
-
self._hamiltonian = lambda self, values: self.hmat
|
200
|
-
|
201
|
-
elif isinstance(block.generator, Tensor):
|
202
|
-
m = block.generator.to(dtype=cdouble)
|
203
|
-
hmat = block_to_tensor(
|
204
|
-
MatrixBlock(
|
205
|
-
m,
|
206
|
-
qubit_support=block.qubit_support,
|
207
|
-
check_unitary=False,
|
208
|
-
check_hermitian=True,
|
209
|
-
),
|
219
|
+
def _hamiltonian(self: PyQTimeDependentEvolution, values: dict[str, Tensor]) -> Tensor:
|
220
|
+
hmat = _block_to_tensor_embedded(
|
221
|
+
block.generator, # type: ignore[arg-type]
|
222
|
+
values=values,
|
210
223
|
qubit_support=self.qubit_support,
|
211
224
|
use_full_support=False,
|
225
|
+
device=self.device,
|
212
226
|
)
|
213
|
-
|
214
|
-
self.register_buffer("hmat", hmat)
|
215
|
-
self._hamiltonian = lambda self, values: self.hmat
|
216
|
-
|
217
|
-
elif isinstance(block.generator, sympy.Basic):
|
218
|
-
self._hamiltonian = (
|
219
|
-
lambda self, values: values[self.param_names[1]].squeeze(3).permute(1, 2, 0)
|
220
|
-
)
|
221
|
-
# FIXME Why are we squeezing
|
222
|
-
else:
|
223
|
-
|
224
|
-
def _hamiltonian(self: PyQHamiltonianEvolution, values: dict[str, Tensor]) -> Tensor:
|
225
|
-
hmat = _block_to_tensor_embedded(
|
226
|
-
block.generator, # type: ignore[arg-type]
|
227
|
-
values=values,
|
228
|
-
qubit_support=self.qubit_support,
|
229
|
-
use_full_support=False,
|
230
|
-
device=self.device,
|
231
|
-
)
|
232
|
-
return hmat.permute(1, 2, 0)
|
227
|
+
return hmat.permute(1, 2, 0)
|
233
228
|
|
234
|
-
|
229
|
+
self._hamiltonian = _hamiltonian
|
235
230
|
|
236
231
|
self._time_evolution = lambda values: values[self.param_names[0]]
|
237
232
|
self._device: torch_device = (
|
@@ -322,59 +317,51 @@ class PyQHamiltonianEvolution(Module):
|
|
322
317
|
values: dict[str, Tensor] | ParameterDict = dict(),
|
323
318
|
embedding: Embedding | None = None,
|
324
319
|
) -> Tensor:
|
325
|
-
|
326
|
-
|
327
|
-
|
328
|
-
|
329
|
-
|
330
|
-
|
331
|
-
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
|
336
|
-
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
|
344
|
-
|
345
|
-
|
346
|
-
|
347
|
-
|
348
|
-
|
349
|
-
|
350
|
-
|
351
|
-
|
352
|
-
|
353
|
-
|
354
|
-
|
355
|
-
|
356
|
-
|
357
|
-
#
|
358
|
-
|
359
|
-
|
360
|
-
|
361
|
-
|
362
|
-
|
363
|
-
|
364
|
-
|
365
|
-
|
366
|
-
|
367
|
-
|
368
|
-
|
369
|
-
|
370
|
-
sesolve(Ht, unpyqify(state).T[:, 0:1], tsave, self.config.ode_solver).states[-1].T
|
371
|
-
)
|
372
|
-
else:
|
373
|
-
result = apply_operator(
|
374
|
-
state,
|
375
|
-
self.unitary(values),
|
376
|
-
self.qubit_support,
|
377
|
-
)
|
320
|
+
def Ht(t: Tensor | float) -> Tensor:
|
321
|
+
# values dict has to change with new value of t
|
322
|
+
# initial value of a feature parameter inside generator block
|
323
|
+
# has to be inferred here
|
324
|
+
new_vals = dict()
|
325
|
+
for str_expr, val in values.items():
|
326
|
+
expr = sympy.sympify(str_expr)
|
327
|
+
t_symb = sympy.Symbol(self._get_time_parameter())
|
328
|
+
free_symbols = expr.free_symbols
|
329
|
+
if t_symb in free_symbols:
|
330
|
+
# create substitution list for time and feature params
|
331
|
+
subs_list = [(t_symb, t)]
|
332
|
+
|
333
|
+
if len(free_symbols) > 1:
|
334
|
+
# get feature param symbols
|
335
|
+
feat_symbols = free_symbols.difference(set([t_symb]))
|
336
|
+
|
337
|
+
# get feature param values
|
338
|
+
feat_vals = values["orig_param_values"]
|
339
|
+
|
340
|
+
# update substitution list with feature param values
|
341
|
+
for fs in feat_symbols:
|
342
|
+
subs_list.append((fs, feat_vals[str(fs)]))
|
343
|
+
|
344
|
+
# evaluate expression with new time param value
|
345
|
+
new_vals[str_expr] = torch.tensor(float(expr.subs(subs_list)))
|
346
|
+
else:
|
347
|
+
# expression doesn't contain time parameter - copy it as is
|
348
|
+
new_vals[str_expr] = val
|
349
|
+
|
350
|
+
# get matrix form of generator
|
351
|
+
hmat = _block_to_tensor_embedded(
|
352
|
+
self.block.generator, # type: ignore[arg-type]
|
353
|
+
values=new_vals,
|
354
|
+
qubit_support=self.qubit_support,
|
355
|
+
use_full_support=False,
|
356
|
+
device=self.device,
|
357
|
+
).squeeze(0)
|
358
|
+
|
359
|
+
return hmat
|
360
|
+
|
361
|
+
tsave = torch.linspace(0, self.block.duration, self.config.n_steps_hevo) # type: ignore [attr-defined]
|
362
|
+
result = pyqify(
|
363
|
+
sesolve(Ht, unpyqify(state).T[:, 0:1], tsave, self.config.ode_solver).states[-1].T
|
364
|
+
)
|
378
365
|
|
379
366
|
return result
|
380
367
|
|
@@ -386,7 +373,7 @@ class PyQHamiltonianEvolution(Module):
|
|
386
373
|
def dtype(self) -> torch_dtype:
|
387
374
|
return self._dtype
|
388
375
|
|
389
|
-
def to(self, *args: Any, **kwargs: Any) ->
|
376
|
+
def to(self, *args: Any, **kwargs: Any) -> PyQTimeDependentEvolution:
|
390
377
|
if hasattr(self, "hmat"):
|
391
378
|
self.hmat = self.hmat.to(*args, **kwargs)
|
392
379
|
self._device = self.hmat.device
|
@@ -231,8 +231,15 @@ class DifferentiableExpectation:
|
|
231
231
|
if shift_factor == 1:
|
232
232
|
param_to_psr[param_id] = psr_fn(eigenvalues, **psr_args)
|
233
233
|
else:
|
234
|
-
psr_args_factor =
|
235
|
-
|
234
|
+
psr_args_factor = psr_args.copy()
|
235
|
+
if "shift_prefac" in psr_args_factor:
|
236
|
+
if psr_args_factor["shift_prefac"] is not None:
|
237
|
+
psr_args_factor["shift_prefac"] = (
|
238
|
+
shift_factor * psr_args_factor["shift_prefac"]
|
239
|
+
)
|
240
|
+
else:
|
241
|
+
psr_args_factor["shift_prefac"] = shift_factor
|
242
|
+
param_to_psr[param_id] = psr_fn(eigenvalues, **psr_args_factor)
|
236
243
|
for obs in observable:
|
237
244
|
for param_id, _ in uuid_to_eigen(obs).items():
|
238
245
|
# We need the embedded fixed params of the observable in the param_values dict
|
@@ -82,7 +82,7 @@ def pulse_experiment(
|
|
82
82
|
conv_circuit = backend.circuit(stretched_circuit)
|
83
83
|
noisy_density_matrices.append(
|
84
84
|
# Contain a single experiment result for the stretch.
|
85
|
-
backend.
|
85
|
+
backend.run(
|
86
86
|
conv_circuit,
|
87
87
|
param_values=param_values,
|
88
88
|
state=state,
|
@@ -124,7 +124,7 @@ def noise_level_experiment(
|
|
124
124
|
zne_datasets: list = []
|
125
125
|
# Get noisy density matrices.
|
126
126
|
conv_circuit = backend.circuit(circuit)
|
127
|
-
noisy_density_matrices = backend.
|
127
|
+
noisy_density_matrices = backend.run(
|
128
128
|
conv_circuit, param_values=param_values, state=state, noise=noise, endianness=endianness
|
129
129
|
)
|
130
130
|
# Convert observable to Numpy types compatible with QuTip simulations.
|
qadence/ml_tools/__init__.py
CHANGED
@@ -2,7 +2,7 @@ from __future__ import annotations
|
|
2
2
|
|
3
3
|
from .config import AnsatzConfig, Callback, FeatureMapConfig, TrainConfig
|
4
4
|
from .constructors import create_ansatz, create_fm_blocks, observable_from_config
|
5
|
-
from .data import DictDataLoader, InfiniteTensorDataset, to_dataloader
|
5
|
+
from .data import DictDataLoader, InfiniteTensorDataset, OptimizeResult, to_dataloader
|
6
6
|
from .models import QNN
|
7
7
|
from .optimize_step import optimize_step as default_optimize_step
|
8
8
|
from .parameters import get_parameters, num_parameters, set_parameters
|
@@ -23,6 +23,7 @@ __all__ = [
|
|
23
23
|
"observable_from_config",
|
24
24
|
"QNN",
|
25
25
|
"TrainConfig",
|
26
|
+
"OptimizeResult",
|
26
27
|
"Callback",
|
27
28
|
"train_with_grad",
|
28
29
|
"train_gradient_free",
|
qadence/ml_tools/config.py
CHANGED
@@ -38,15 +38,22 @@ class Callback:
|
|
38
38
|
Each callback function should take at least as first input
|
39
39
|
an OptimizeResult instance.
|
40
40
|
|
41
|
+
Note: when setting call_after_opt to True, we skip
|
42
|
+
verifying iteration % called_every == 0.
|
43
|
+
|
41
44
|
Attributes:
|
42
45
|
callback (CallbackFunction): Callback function accepting an
|
43
46
|
OptimizeResult as first argument.
|
44
47
|
callback_condition (CallbackConditionFunction | None, optional): Function that
|
45
48
|
conditions the call to callback. Defaults to None.
|
49
|
+
modify_optimize_result (CallbackFunction | dict[str, Any] | None, optional):
|
50
|
+
Function that modify the OptimizeResult before callback.
|
51
|
+
For instance, one can change the `extra` (dict) argument to be used in callback.
|
52
|
+
If a dict is provided, the `extra` field of OptimizeResult is updated with the dict.
|
46
53
|
called_every (int, optional): Callback to be called each `called_every` epoch.
|
47
54
|
Defaults to 1.
|
48
55
|
If callback_condition is None, we set
|
49
|
-
callback_condition to returns True when iteration %
|
56
|
+
callback_condition to returns True when iteration % called_every == 0.
|
50
57
|
call_before_opt (bool, optional): If true, callback is applied before training.
|
51
58
|
Defaults to False.
|
52
59
|
call_end_epoch (bool, optional): If true, callback is applied during training,
|
@@ -61,6 +68,7 @@ class Callback:
|
|
61
68
|
self,
|
62
69
|
callback: CallbackFunction,
|
63
70
|
callback_condition: CallbackConditionFunction | None = None,
|
71
|
+
modify_optimize_result: CallbackFunction | dict[str, Any] | None = None,
|
64
72
|
called_every: int = 1,
|
65
73
|
call_before_opt: bool = False,
|
66
74
|
call_end_epoch: bool = True,
|
@@ -74,10 +82,13 @@ class Callback:
|
|
74
82
|
OptimizeResult as ifrst argument.
|
75
83
|
callback_condition (CallbackConditionFunction | None, optional): Function that
|
76
84
|
conditions the call to callback. Defaults to None.
|
85
|
+
modify_optimize_result (CallbackFunction | dict[str, Any] | None , optional):
|
86
|
+
Function that modify the OptimizeResult before callback. If a dict
|
87
|
+
is provided, this updates the `extra` field of OptimizeResult.
|
77
88
|
called_every (int, optional): Callback to be called each `called_every` epoch.
|
78
89
|
Defaults to 1.
|
79
90
|
If callback_condition is None, we set
|
80
|
-
callback_condition to returns True when iteration %
|
91
|
+
callback_condition to returns True when iteration % called_every == 0.
|
81
92
|
call_before_opt (bool, optional): If true, callback is applied before training.
|
82
93
|
Defaults to False.
|
83
94
|
call_end_epoch (bool, optional): If true, callback is applied during training,
|
@@ -102,9 +113,56 @@ class Callback:
|
|
102
113
|
else:
|
103
114
|
self.callback_condition = callback_condition
|
104
115
|
|
105
|
-
|
116
|
+
if modify_optimize_result is None:
|
117
|
+
self.modify_optimize_result = lambda opt_result: opt_result
|
118
|
+
elif isinstance(modify_optimize_result, dict):
|
119
|
+
|
120
|
+
def update_extra(opt_result: OptimizeResult) -> OptimizeResult:
|
121
|
+
opt_result.extra.update(modify_optimize_result)
|
122
|
+
return opt_result
|
123
|
+
|
124
|
+
self.modify_optimize_result = update_extra
|
125
|
+
else:
|
126
|
+
self.modify_optimize_result = modify_optimize_result
|
127
|
+
|
128
|
+
def __call__(self, opt_result: OptimizeResult, is_last_iteration: bool = False) -> Any:
|
129
|
+
"""Apply callback if conditions are met.
|
130
|
+
|
131
|
+
Note that the current result may be modified by specifying a function
|
132
|
+
`modify_optimize_result` for instance to add inputs to the `extra` argument
|
133
|
+
of the current OptimizeResult.
|
134
|
+
|
135
|
+
Args:
|
136
|
+
opt_result (OptimizeResult): Current result.
|
137
|
+
is_last_iteration (bool, optional): When True,
|
138
|
+
avoid verifying modulo. Defaults to False.
|
139
|
+
Useful when call_after_opt is True.
|
140
|
+
|
141
|
+
Returns:
|
142
|
+
Any: The result of the callback.
|
143
|
+
"""
|
144
|
+
opt_result = self.modify_optimize_result(opt_result)
|
106
145
|
if opt_result.iteration % self.called_every == 0 and self.callback_condition(opt_result):
|
107
146
|
return self.callback(opt_result)
|
147
|
+
if is_last_iteration and self.callback_condition(opt_result):
|
148
|
+
return self.callback(opt_result)
|
149
|
+
|
150
|
+
|
151
|
+
def run_callbacks(
|
152
|
+
callback_iterable: list[Callback], opt_res: OptimizeResult, is_last_iteration: bool = False
|
153
|
+
) -> None:
|
154
|
+
"""Run a list of Callback given the current OptimizeResult.
|
155
|
+
|
156
|
+
Used in train functions.
|
157
|
+
|
158
|
+
Args:
|
159
|
+
callback_iterable (list[Callback]): Iterable of Callbacks
|
160
|
+
opt_res (OptimizeResult): Current optimization result,
|
161
|
+
is_last_iteration (bool, optional): Whether we reached the last iteration or not.
|
162
|
+
Defaults to False.
|
163
|
+
"""
|
164
|
+
for callback in callback_iterable:
|
165
|
+
callback(opt_res, is_last_iteration)
|
108
166
|
|
109
167
|
|
110
168
|
@dataclass
|
qadence/ml_tools/saveload.py
CHANGED
@@ -72,7 +72,11 @@ def write_checkpoint(
|
|
72
72
|
device = None
|
73
73
|
try:
|
74
74
|
# We extract the device from the pyqtorch native circuit
|
75
|
-
device =
|
75
|
+
device = (
|
76
|
+
model.device
|
77
|
+
if isinstance(model, (QNN, QuantumModel))
|
78
|
+
else next(model.parameters()).device
|
79
|
+
)
|
76
80
|
device = str(device).split(":")[0] # in case of using several CUDA devices
|
77
81
|
except Exception as e:
|
78
82
|
msg = (
|
qadence/ml_tools/train_grad.py
CHANGED
@@ -14,7 +14,7 @@ from torch.optim import Optimizer
|
|
14
14
|
from torch.utils.data import DataLoader
|
15
15
|
from torch.utils.tensorboard import SummaryWriter
|
16
16
|
|
17
|
-
from qadence.ml_tools.config import Callback, TrainConfig
|
17
|
+
from qadence.ml_tools.config import Callback, TrainConfig, run_callbacks
|
18
18
|
from qadence.ml_tools.data import DictDataLoader, OptimizeResult, data_to_device
|
19
19
|
from qadence.ml_tools.optimize_step import optimize_step
|
20
20
|
from qadence.ml_tools.printing import (
|
@@ -194,7 +194,6 @@ def train(
|
|
194
194
|
Callback(
|
195
195
|
lambda opt_res: print_metrics(opt_res.loss, opt_res.metrics, opt_res.iteration - 1),
|
196
196
|
called_every=config.print_every,
|
197
|
-
call_after_opt=True,
|
198
197
|
)
|
199
198
|
]
|
200
199
|
|
@@ -262,10 +261,6 @@ def train(
|
|
262
261
|
)
|
263
262
|
]
|
264
263
|
|
265
|
-
def run_callbacks(callback_iterable: list[Callback], opt_res: OptimizeResult) -> None:
|
266
|
-
for callback in callback_iterable:
|
267
|
-
callback(opt_res)
|
268
|
-
|
269
264
|
callbacks_before_opt = [
|
270
265
|
callback
|
271
266
|
for callback in callbacks
|
@@ -349,7 +344,7 @@ def train(
|
|
349
344
|
|
350
345
|
# Final callbacks, by default checkpointing and writing
|
351
346
|
callbacks_after_opt = [callback for callback in callbacks if callback.call_after_opt]
|
352
|
-
run_callbacks(callbacks_after_opt, opt_result)
|
347
|
+
run_callbacks(callbacks_after_opt, opt_result, is_last_iteration=True)
|
353
348
|
|
354
349
|
# writing hyperparameters
|
355
350
|
if config.hyperparams:
|
@@ -12,7 +12,7 @@ from torch.nn import Module
|
|
12
12
|
from torch.utils.data import DataLoader
|
13
13
|
from torch.utils.tensorboard import SummaryWriter
|
14
14
|
|
15
|
-
from qadence.ml_tools.config import Callback, TrainConfig
|
15
|
+
from qadence.ml_tools.config import Callback, TrainConfig, run_callbacks
|
16
16
|
from qadence.ml_tools.data import DictDataLoader, OptimizeResult
|
17
17
|
from qadence.ml_tools.parameters import get_parameters, set_parameters
|
18
18
|
from qadence.ml_tools.printing import (
|
@@ -160,10 +160,6 @@ def train(
|
|
160
160
|
)
|
161
161
|
]
|
162
162
|
|
163
|
-
def run_callbacks(callback_iterable: list[Callback], opt_res: OptimizeResult) -> None:
|
164
|
-
for callback in callback_iterable:
|
165
|
-
callback(opt_res)
|
166
|
-
|
167
163
|
callbacks_end_opt = [
|
168
164
|
callback
|
169
165
|
for callback in callbacks
|
@@ -192,7 +188,7 @@ def train(
|
|
192
188
|
|
193
189
|
# Final callbacks
|
194
190
|
callbacks_after_opt = [callback for callback in callbacks if callback.call_after_opt]
|
195
|
-
run_callbacks(callbacks_after_opt, opt_result)
|
191
|
+
run_callbacks(callbacks_after_opt, opt_result, is_last_iteration=True)
|
196
192
|
|
197
193
|
# close tracker
|
198
194
|
if config.tracking_tool == ExperimentTrackingTool.TENSORBOARD:
|
@@ -1,8 +1,8 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: qadence
|
3
|
-
Version: 1.7.
|
3
|
+
Version: 1.7.7
|
4
4
|
Summary: Pasqal interface for circuit-based quantum computing SDKs
|
5
|
-
Author-email: Aleksander Wennersteen <aleksander.wennersteen@pasqal.com>, Gert-Jan Both <gert-jan.both@pasqal.com>, Niklas Heim <niklas.heim@pasqal.com>, Mario Dagrada <mario.dagrada@pasqal.com>, Vincent Elfving <vincent.elfving@pasqal.com>, Dominik Seitz <dominik.seitz@pasqal.com>, Roland Guichard <roland.guichard@pasqal.com>, "Joao P. Moutinho" <joao.moutinho@pasqal.com>, Vytautas Abramavicius <vytautas.abramavicius@pasqal.com>, Gergana Velikova <gergana.velikova@pasqal.com>, Eduardo Maschio <eduardo.maschio@pasqal.com>, Smit Chaudhary <smit.chaudhary@pasqal.com>, Ignacio Fernández Graña <ignacio.fernandez-grana@pasqal.com>, Charles Moussa <charles.moussa@pasqal.com>, Giorgio Tosti Balducci <giorgio.tosti-balducci@pasqal.com>
|
5
|
+
Author-email: Aleksander Wennersteen <aleksander.wennersteen@pasqal.com>, Gert-Jan Both <gert-jan.both@pasqal.com>, Niklas Heim <niklas.heim@pasqal.com>, Mario Dagrada <mario.dagrada@pasqal.com>, Vincent Elfving <vincent.elfving@pasqal.com>, Dominik Seitz <dominik.seitz@pasqal.com>, Roland Guichard <roland.guichard@pasqal.com>, "Joao P. Moutinho" <joao.moutinho@pasqal.com>, Vytautas Abramavicius <vytautas.abramavicius@pasqal.com>, Gergana Velikova <gergana.velikova@pasqal.com>, Eduardo Maschio <eduardo.maschio@pasqal.com>, Smit Chaudhary <smit.chaudhary@pasqal.com>, Ignacio Fernández Graña <ignacio.fernandez-grana@pasqal.com>, Charles Moussa <charles.moussa@pasqal.com>, Giorgio Tosti Balducci <giorgio.tosti-balducci@pasqal.com>, Daniele Cucurachi <daniele.cucurachi@pasqal.com>
|
6
6
|
License: Apache 2.0
|
7
7
|
License-File: LICENSE
|
8
8
|
Classifier: License :: OSI Approved :: Apache Software License
|
@@ -22,7 +22,7 @@ Requires-Dist: matplotlib
|
|
22
22
|
Requires-Dist: nevergrad
|
23
23
|
Requires-Dist: numpy
|
24
24
|
Requires-Dist: openfermion
|
25
|
-
Requires-Dist: pyqtorch==1.4.
|
25
|
+
Requires-Dist: pyqtorch==1.4.7
|
26
26
|
Requires-Dist: pyyaml
|
27
27
|
Requires-Dist: rich
|
28
28
|
Requires-Dist: scipy
|
@@ -57,7 +57,7 @@ Requires-Dist: mlflow; extra == 'mlflow'
|
|
57
57
|
Provides-Extra: protocols
|
58
58
|
Requires-Dist: qadence-protocols; extra == 'protocols'
|
59
59
|
Provides-Extra: pulser
|
60
|
-
Requires-Dist: pasqal-cloud==0.11.
|
60
|
+
Requires-Dist: pasqal-cloud==0.11.4; extra == 'pulser'
|
61
61
|
Requires-Dist: pulser-core==0.19.0; extra == 'pulser'
|
62
62
|
Requires-Dist: pulser-simulation==0.19.0; extra == 'pulser'
|
63
63
|
Provides-Extra: visualization
|
@@ -1,5 +1,5 @@
|
|
1
1
|
qadence/__init__.py,sha256=0SFU1_XZ-3WlSU1rA4W1Y0edxpZLO_sNg-YnpjlD77w,2638
|
2
|
-
qadence/backend.py,sha256=
|
2
|
+
qadence/backend.py,sha256=N27CRrmjkgFGhwdTJvdRKn2hKjuTwGM5t0QFzGEgvJA,13351
|
3
3
|
qadence/circuit.py,sha256=3lQdjj_srxgk6f5M3eh3kE-Qdov4FA9TZxZZb0E1_mI,6966
|
4
4
|
qadence/decompose.py,sha256=C4LYia_GcC9Rx3QO0ZLWTI9dN63a8WTEAXO0ZVQWuiE,5221
|
5
5
|
qadence/divergences.py,sha256=JhpELhWSnuDvQxa9hJp_DE3EQg2Ban-Ta0mHZ_fVrHg,1832
|
@@ -28,19 +28,19 @@ qadence/analog/hamiltonian_terms.py,sha256=9LKidqqEMJTTdXeaxkxP_otTmcv9i4yeJ-JKC
|
|
28
28
|
qadence/analog/parse_analog.py,sha256=ppvMZtsKXOIkIehCgjbdmG9n232DIycSanyuyVth5Wg,4223
|
29
29
|
qadence/backends/__init__.py,sha256=ibm7wmZxuIoMYAQxgAx0MsfLYWOVHNWgLwyS1HjMuuI,215
|
30
30
|
qadence/backends/api.py,sha256=NPrvtZQ4klUBabUWJ5hbTUCVoaoW9-sHVbiXxAnTt3A,2643
|
31
|
-
qadence/backends/gpsr.py,sha256=
|
31
|
+
qadence/backends/gpsr.py,sha256=3lcOHgt0soCiDXAyZ8DVyS8dMgUypIPwkDADds2boSE,5371
|
32
32
|
qadence/backends/jax_utils.py,sha256=VfKhqCKknHDWZO21UFipWH_Lkiq175Z5GkP49gWjbyw,5038
|
33
33
|
qadence/backends/utils.py,sha256=7gWiV_yJH3yyGFxwt-AQLEMLYkBX8aThvmFUlF0M2R0,8302
|
34
34
|
qadence/backends/braket/__init__.py,sha256=eruyDZKMqkh1LE7eJ980vcrLJbia35uUX6krAP78clI,121
|
35
|
-
qadence/backends/braket/backend.py,sha256=
|
35
|
+
qadence/backends/braket/backend.py,sha256=HNqs4ASB1wgIaInBNifC83GDsXOfz8s6FIIvP4aY9IM,8481
|
36
36
|
qadence/backends/braket/config.py,sha256=7cu22dmYdp48Fu760HPfxBHinaUnGmzx9MkE_EPhVN8,594
|
37
37
|
qadence/backends/braket/convert_ops.py,sha256=DVXV7sT9sX_yGOgPKclD9KIGgmbBRuDy_e39i1Z8I1s,3417
|
38
38
|
qadence/backends/horqrux/__init__.py,sha256=0OdVy6cq0oQggV48LO1WXdaZuSkDkz7OYNEPIkNAmfk,140
|
39
|
-
qadence/backends/horqrux/backend.py,sha256=
|
39
|
+
qadence/backends/horqrux/backend.py,sha256=9BqJP_av_cyLtIw07ri8gwoOXLz3b2yCzm9CmP36ZWw,8821
|
40
40
|
qadence/backends/horqrux/config.py,sha256=xz7JlUcwW_4JAbvProbSI9hA1SXZRRAN0Hr2bvmLzfg,892
|
41
41
|
qadence/backends/horqrux/convert_ops.py,sha256=3uG3yLq5wjfrWzFHDs0HEnd8kER91ZHVX3HCpYjOdjk,8565
|
42
42
|
qadence/backends/pulser/__init__.py,sha256=capQ-eHqwtOeLf4mWsI0BIseAHhiLGie5cFD4-iVhUo,116
|
43
|
-
qadence/backends/pulser/backend.py,sha256=
|
43
|
+
qadence/backends/pulser/backend.py,sha256=bALJrLH4ZyJ24ehxilRat1LdFL7cwIpF7MSjHTXhPZQ,15621
|
44
44
|
qadence/backends/pulser/channels.py,sha256=ZF0yEXUFHAmi3IdeXjzdTNGR5NzaRRFTiUpUGVg2sO4,329
|
45
45
|
qadence/backends/pulser/cloud.py,sha256=0uUluvbFV9sOuCPraE-9uiVtC3Q8QaDY1IJMDi8grDM,2057
|
46
46
|
qadence/backends/pulser/config.py,sha256=aoHDmtgq5i0Zryxenw_p3uARY0B1w-UaYvfqDmrWHM0,2175
|
@@ -49,9 +49,9 @@ qadence/backends/pulser/devices.py,sha256=DermLZNfmCB3SqteKVW4uhg4jp6ya1G6ptnXbB
|
|
49
49
|
qadence/backends/pulser/pulses.py,sha256=F4fExIRAhLPMtVg1bhNtDihUYHxu5RExGjovk8-CQIo,11884
|
50
50
|
qadence/backends/pulser/waveforms.py,sha256=0uz95b7rUaUUtN0tuHBZmJ0H6UBmfHST_59ozwsRCzg,2227
|
51
51
|
qadence/backends/pyqtorch/__init__.py,sha256=0OdVy6cq0oQggV48LO1WXdaZuSkDkz7OYNEPIkNAmfk,140
|
52
|
-
qadence/backends/pyqtorch/backend.py,sha256=
|
52
|
+
qadence/backends/pyqtorch/backend.py,sha256=NG83pZBuL2eevIFxqioiWWBrmCMVNvcvnEAAo-gju3A,8907
|
53
53
|
qadence/backends/pyqtorch/config.py,sha256=jK-if0OF6L_inP-oZhWI4-b8wcrOiK8-EVv3NYDOfBM,2056
|
54
|
-
qadence/backends/pyqtorch/convert_ops.py,sha256=
|
54
|
+
qadence/backends/pyqtorch/convert_ops.py,sha256=PNn9TVXHWMGVyEpa8DqF1eJ4uSNAhZfuP9R_oT_fxOs,14314
|
55
55
|
qadence/blocks/__init__.py,sha256=H6jEA_CptkE-eoB4UfSbUiDszbxxhZwECV_TgoZWXoU,960
|
56
56
|
qadence/blocks/abstract.py,sha256=QFwKPagbTrn3V4c2DHpBd-QL_mVIUXfbvyBLUdD6zw4,12023
|
57
57
|
qadence/blocks/analog.py,sha256=ymnnlSVoW1XL05ZvnnHCqRTHuOXIEY_7E9M0PNKJZy4,10812
|
@@ -90,7 +90,7 @@ qadence/engines/jax/differentiable_backend.py,sha256=W5rDA8wb-ECnFWoLj4dVugF9v1l
|
|
90
90
|
qadence/engines/jax/differentiable_expectation.py,sha256=poI--yV3srG3wndTcg6hk1lV63RYPJEQjypiWGzwqsk,3680
|
91
91
|
qadence/engines/torch/__init__.py,sha256=iZFdD32ot0B0CVyC-f5dVViOBnqoalxa6M9Lj4WQuPE,160
|
92
92
|
qadence/engines/torch/differentiable_backend.py,sha256=AWthwvKE8pCOih4dZ3tXxQX4W1ps9mBcvo7n4V9V24Y,3553
|
93
|
-
qadence/engines/torch/differentiable_expectation.py,sha256=
|
93
|
+
qadence/engines/torch/differentiable_expectation.py,sha256=iaWpd4Y3e_rGKt-S0TNXqqSFg5z6I_5_ZIJxjQxd7Ow,10290
|
94
94
|
qadence/exceptions/__init__.py,sha256=BU6vWrI9mshzr1aTPm1Ticr_o_42GjTrWI4OZXhThsI,203
|
95
95
|
qadence/exceptions/exceptions.py,sha256=4j_VJpx2sZ2Mir5BJUWu4nwb131FY1ygO4q8-XlyfRc,190
|
96
96
|
qadence/measurements/__init__.py,sha256=RIjG9tVJMqhNzyj7maZI250Um0KgHl2PizDcKJag-JU,161
|
@@ -100,21 +100,21 @@ qadence/measurements/shadow.py,sha256=lYZWbBCJJh7pFXPV5jSvsyN_0g22ao3jARpKnx1jeJ
|
|
100
100
|
qadence/measurements/tomography.py,sha256=8fzXhYOu_DaMiUoZzLvpP03WhuwlZ3ldkWepLUHjWqM,2665
|
101
101
|
qadence/measurements/utils.py,sha256=CJmnSobzdeR4T4FuEpad7d-BSJ9W-wTaU9hRbveB6kY,6534
|
102
102
|
qadence/mitigations/__init__.py,sha256=RzaxYJftePFMloGhBVSixZ8fSe-ps_Jc-EyPm6xz-bs,159
|
103
|
-
qadence/mitigations/analog_zne.py,sha256=
|
103
|
+
qadence/mitigations/analog_zne.py,sha256=M43TRSlPy8HjM1PQJHZUgVPXTodkVMe5KkOYFWVDa-4,7762
|
104
104
|
qadence/mitigations/protocols.py,sha256=Jq9MyLujfTyWmc7XVUGYVRUkJT1MmZw-GgmWpVjmX2Y,1608
|
105
105
|
qadence/mitigations/readout.py,sha256=HPfYmdjRlieUdOBMZTghFK4DRWfveM4KkDkEI0bMI0E,6262
|
106
|
-
qadence/ml_tools/__init__.py,sha256=
|
107
|
-
qadence/ml_tools/config.py,sha256
|
106
|
+
qadence/ml_tools/__init__.py,sha256=nTXcVpfSnMBWwHjU18ASIbvqfht5TIY9Zt9Wu1DATUg,1118
|
107
|
+
qadence/ml_tools/config.py,sha256=3vXodiN_1t17vO9uZnss3qvMS9saGqTa_tAClDKQMbs,24999
|
108
108
|
qadence/ml_tools/constructors.py,sha256=VM7VdtvQ4-4b6SBzUdDpy6fbdDAeQPrj4t2HnUlvUas,27877
|
109
109
|
qadence/ml_tools/data.py,sha256=ubwtkNvoBf0ZTGQm2M2Lgaim2tBAiAsa9VoTRR_MWks,5175
|
110
110
|
qadence/ml_tools/models.py,sha256=SjwAPbSl9zn9YqfmwqHc2lIXCkIpwG_ysz4jieRh7W0,16996
|
111
111
|
qadence/ml_tools/optimize_step.py,sha256=L92-kNILrmwz20d_Xd_FIQw6SDGJYIEbFN3tSRz9eno,1835
|
112
112
|
qadence/ml_tools/parameters.py,sha256=gew2Kq_5-RgRpaTvs8eauVhgo0sTqqDQEV6WHFEiLGM,1301
|
113
113
|
qadence/ml_tools/printing.py,sha256=2xMhsn2j0nQdO2klLcLWY33GT_7r-Gi83Fv2M2rGQQE,4789
|
114
|
-
qadence/ml_tools/saveload.py,sha256=
|
114
|
+
qadence/ml_tools/saveload.py,sha256=B6709ZdqHkg6kCZJmlZhCoWaNJ4ZynJe_W2IoaexLTs,5945
|
115
115
|
qadence/ml_tools/tensors.py,sha256=xZ9ZRzOqEaMgLUGWQf1najDmL6iLuN1ojCGVFs1Tm94,1337
|
116
|
-
qadence/ml_tools/train_grad.py,sha256=
|
117
|
-
qadence/ml_tools/train_no_grad.py,sha256=
|
116
|
+
qadence/ml_tools/train_grad.py,sha256=sOQkx6aDxXrQ4HmFB1X7VKqKcOlaFthGD-5lx6gS0Jw,13502
|
117
|
+
qadence/ml_tools/train_no_grad.py,sha256=jUjnsxvWMrAa-7NV1bewpAo6mV9grF02gwBHy0SVTws,7249
|
118
118
|
qadence/ml_tools/utils.py,sha256=PW8FyoV0mG_DtN1U8njTDV5qxZ0EK4mnFwMAsLBArfk,1410
|
119
119
|
qadence/noise/__init__.py,sha256=r0nR8uEZeB1M9pI2UisjWq0bjw50fPFfVGzIMev923g,147
|
120
120
|
qadence/noise/protocols.py,sha256=-aZ06JvMnpxCeT5v5lI_RNPOLbb9Ju1Pi1AB6uAXxVE,1653
|
@@ -133,7 +133,7 @@ qadence/transpile/digitalize.py,sha256=iWRwYAYQsD2INHj0HNbGJriv_3fRCuBW1nDBrwtKS
|
|
133
133
|
qadence/transpile/flatten.py,sha256=EdhSG5WyF56nbnxINNLqrHgY84MRM1YFjT3fR4aph5Q,3427
|
134
134
|
qadence/transpile/invert.py,sha256=KAefHTG2AWr39aengVhXrzCtJPhrZC-ZnL6vYvmbnY0,4867
|
135
135
|
qadence/transpile/transpile.py,sha256=6MRRkk1OS279L1fwUQjazA6qlfpbd-T_EJMKT8hAhOU,2721
|
136
|
-
qadence-1.7.
|
137
|
-
qadence-1.7.
|
138
|
-
qadence-1.7.
|
139
|
-
qadence-1.7.
|
136
|
+
qadence-1.7.7.dist-info/METADATA,sha256=hkypZEbHl7IP8RyJRdlViMHC2hBVSXre2XAnpp1Q2JM,9986
|
137
|
+
qadence-1.7.7.dist-info/WHEEL,sha256=1yFddiXMmvYK7QYTqtRNtX66WJ0Mz8PYEiEUoOUUxRY,87
|
138
|
+
qadence-1.7.7.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
|
139
|
+
qadence-1.7.7.dist-info/RECORD,,
|
File without changes
|
File without changes
|