qadence 1.5.2__py3-none-any.whl → 1.6.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- qadence/__init__.py +33 -5
- qadence/backend.py +2 -2
- qadence/backends/adjoint.py +8 -4
- qadence/backends/braket/backend.py +3 -2
- qadence/backends/braket/config.py +2 -2
- qadence/backends/gpsr.py +1 -1
- qadence/backends/horqrux/backend.py +23 -31
- qadence/backends/horqrux/config.py +2 -2
- qadence/backends/pulser/backend.py +82 -45
- qadence/backends/pulser/config.py +0 -28
- qadence/backends/pulser/convert_ops.py +20 -7
- qadence/backends/pulser/pulses.py +2 -2
- qadence/backends/pyqtorch/backend.py +3 -2
- qadence/backends/pyqtorch/config.py +2 -2
- qadence/backends/pyqtorch/convert_ops.py +40 -16
- qadence/blocks/block_to_tensor.py +7 -6
- qadence/blocks/matrix.py +2 -2
- qadence/blocks/primitive.py +2 -1
- qadence/blocks/utils.py +2 -2
- qadence/circuit.py +5 -2
- qadence/constructors/__init__.py +1 -10
- qadence/constructors/ansatze.py +1 -65
- qadence/constructors/daqc/daqc.py +3 -2
- qadence/constructors/daqc/gen_parser.py +3 -2
- qadence/constructors/daqc/utils.py +3 -3
- qadence/constructors/feature_maps.py +2 -90
- qadence/constructors/hamiltonians.py +2 -6
- qadence/constructors/rydberg_feature_maps.py +2 -2
- qadence/decompose.py +2 -2
- qadence/engines/torch/differentiable_expectation.py +7 -0
- qadence/extensions.py +4 -15
- qadence/log_config.yaml +24 -0
- qadence/logger.py +9 -27
- qadence/measurements/shadow.py +3 -16
- qadence/ml_tools/config.py +11 -1
- qadence/ml_tools/models.py +10 -2
- qadence/ml_tools/printing.py +1 -3
- qadence/ml_tools/saveload.py +23 -6
- qadence/ml_tools/train_grad.py +39 -6
- qadence/ml_tools/train_no_grad.py +2 -2
- qadence/models/quantum_model.py +13 -6
- qadence/noise/readout.py +2 -3
- qadence/operations/__init__.py +0 -2
- qadence/operations/analog.py +2 -12
- qadence/operations/control_ops.py +3 -2
- qadence/operations/ham_evo.py +5 -7
- qadence/operations/parametric.py +3 -2
- qadence/operations/primitive.py +2 -2
- qadence/overlap.py +7 -12
- qadence/parameters.py +2 -2
- qadence/serialization.py +2 -2
- qadence/states.py +20 -5
- qadence/transpile/block.py +2 -2
- qadence/types.py +2 -2
- qadence/utils.py +42 -3
- {qadence-1.5.2.dist-info → qadence-1.6.1.dist-info}/METADATA +15 -9
- {qadence-1.5.2.dist-info → qadence-1.6.1.dist-info}/RECORD +59 -58
- {qadence-1.5.2.dist-info → qadence-1.6.1.dist-info}/WHEEL +0 -0
- {qadence-1.5.2.dist-info → qadence-1.6.1.dist-info}/licenses/LICENSE +0 -0
qadence/__init__.py
CHANGED
@@ -1,10 +1,43 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
|
3
|
+
import logging
|
4
|
+
import logging.config
|
5
|
+
import os
|
3
6
|
from importlib import import_module
|
7
|
+
from pathlib import Path
|
4
8
|
|
9
|
+
import yaml
|
5
10
|
from torch import cdouble, set_default_dtype
|
6
11
|
from torch import float64 as torchfloat64
|
7
12
|
|
13
|
+
DEFAULT_FLOAT_DTYPE = torchfloat64
|
14
|
+
DEFAULT_COMPLEX_DTYPE = cdouble
|
15
|
+
set_default_dtype(DEFAULT_FLOAT_DTYPE)
|
16
|
+
|
17
|
+
logging_levels = {
|
18
|
+
"DEBUG": logging.DEBUG,
|
19
|
+
"INFO": logging.INFO,
|
20
|
+
"WARNING": logging.WARNING,
|
21
|
+
"ERROR": logging.ERROR,
|
22
|
+
"CRITICAL": logging.CRITICAL,
|
23
|
+
}
|
24
|
+
LOG_CONFIG_PATH = os.environ.get("QADENCE_LOG_CONFIG", f"{Path(__file__).parent}/log_config.yaml")
|
25
|
+
LOG_BASE_LEVEL = os.environ.get("QADENCE_LOG_LEVEL", "").upper()
|
26
|
+
|
27
|
+
with open(LOG_CONFIG_PATH, "r") as stream:
|
28
|
+
log_config = yaml.load(stream, Loader=yaml.FullLoader)
|
29
|
+
logging.config.dictConfig(log_config)
|
30
|
+
|
31
|
+
logger: logging.Logger = logging.getLogger(__name__)
|
32
|
+
LOG_LEVEL = logging_levels.get(LOG_BASE_LEVEL, logging.INFO) # type: ignore[arg-type]
|
33
|
+
logger.setLevel(LOG_LEVEL)
|
34
|
+
[
|
35
|
+
h.setLevel(LOG_LEVEL) # type: ignore[func-returns-value]
|
36
|
+
for h in logger.handlers
|
37
|
+
if h.get_name() == "console"
|
38
|
+
]
|
39
|
+
logger.debug(f"Qadence logger successfully setup with log level {LOG_LEVEL}")
|
40
|
+
|
8
41
|
from .analog import *
|
9
42
|
from .backend import *
|
10
43
|
from .backends import *
|
@@ -28,11 +61,6 @@ from .transpile import *
|
|
28
61
|
from .types import *
|
29
62
|
from .utils import *
|
30
63
|
|
31
|
-
DEFAULT_FLOAT_DTYPE = torchfloat64
|
32
|
-
DEFAULT_COMPLEX_DTYPE = cdouble
|
33
|
-
set_default_dtype(DEFAULT_FLOAT_DTYPE)
|
34
|
-
|
35
|
-
|
36
64
|
"""Fetch the functions defined in the __all__ of each sub-module.
|
37
65
|
|
38
66
|
Import to the qadence name space. Make sure each added submodule has the respective definition:
|
qadence/backend.py
CHANGED
@@ -3,6 +3,7 @@ from __future__ import annotations
|
|
3
3
|
from abc import ABC, abstractmethod
|
4
4
|
from collections import Counter
|
5
5
|
from dataclasses import dataclass, fields
|
6
|
+
from logging import getLogger
|
6
7
|
from typing import Any, Callable, Iterator, Tuple
|
7
8
|
|
8
9
|
from openfermion import QubitOperator
|
@@ -20,14 +21,13 @@ from qadence.blocks import (
|
|
20
21
|
)
|
21
22
|
from qadence.blocks.analog import ConstantAnalogRotation, InteractionBlock
|
22
23
|
from qadence.circuit import QuantumCircuit
|
23
|
-
from qadence.logger import get_logger
|
24
24
|
from qadence.measurements import Measurements
|
25
25
|
from qadence.mitigations import Mitigations
|
26
26
|
from qadence.noise import Noise
|
27
27
|
from qadence.parameters import stringify
|
28
28
|
from qadence.types import ArrayLike, BackendName, DiffMode, Endianness, Engine, ParamDictType
|
29
29
|
|
30
|
-
logger =
|
30
|
+
logger = getLogger(__name__)
|
31
31
|
|
32
32
|
|
33
33
|
@dataclass
|
qadence/backends/adjoint.py
CHANGED
@@ -7,7 +7,7 @@ from pyqtorch.circuit import QuantumCircuit as PyQCircuit
|
|
7
7
|
from pyqtorch.parametric import Parametric as PyQParametric
|
8
8
|
from pyqtorch.primitive import Primitive as PyQPrimitive
|
9
9
|
from pyqtorch.utils import inner_prod, param_dict
|
10
|
-
from torch import Tensor, no_grad,
|
10
|
+
from torch import Tensor, no_grad, zeros
|
11
11
|
from torch.autograd import Function
|
12
12
|
from torch.nn import Module
|
13
13
|
|
@@ -125,7 +125,7 @@ class AdjointExpectation(Function):
|
|
125
125
|
ctx.projected_state, op.dagger(values), op.qubit_support
|
126
126
|
)
|
127
127
|
elif isinstance(op, PyQCircuit):
|
128
|
-
grads = [g for sub_op in op.
|
128
|
+
grads = [g for sub_op in op.operations[::-1] for g in _apply_adjoint(ctx, sub_op)]
|
129
129
|
elif isinstance(op, PyQPrimitive):
|
130
130
|
ctx.out_state = apply_operator(ctx.out_state, op.dagger(values), op.qubit_support)
|
131
131
|
if isinstance(op, PyQParametric) and values[op.param_name].requires_grad:
|
@@ -147,13 +147,17 @@ class AdjointExpectation(Function):
|
|
147
147
|
|
148
148
|
grads = list(
|
149
149
|
reversed(
|
150
|
-
[
|
150
|
+
[
|
151
|
+
grad_out * g
|
152
|
+
for op in ctx.circuit.operations[::-1]
|
153
|
+
for g in _apply_adjoint(ctx, op)
|
154
|
+
]
|
151
155
|
)
|
152
156
|
)
|
153
157
|
num_grads = len(grads)
|
154
158
|
num_params = len(ctx.saved_tensors)
|
155
159
|
diff = num_params - num_grads
|
156
|
-
grads = grads + [
|
160
|
+
grads = grads + [zeros(1, device=ctx.circuit.device) for _ in range(diff)]
|
157
161
|
# Set observable grads to 0
|
158
162
|
ctx.save_for_backward(*grads)
|
159
163
|
return (None, None, None, None, *grads)
|
@@ -2,6 +2,7 @@ from __future__ import annotations
|
|
2
2
|
|
3
3
|
from collections import Counter
|
4
4
|
from dataclasses import dataclass, field
|
5
|
+
from logging import getLogger
|
5
6
|
from typing import Any
|
6
7
|
|
7
8
|
import numpy as np
|
@@ -15,7 +16,6 @@ from qadence.backend import ConvertedCircuit, ConvertedObservable
|
|
15
16
|
from qadence.backends.utils import to_list_of_dicts
|
16
17
|
from qadence.blocks import AbstractBlock, block_to_tensor
|
17
18
|
from qadence.circuit import QuantumCircuit
|
18
|
-
from qadence.logger import get_logger
|
19
19
|
from qadence.measurements import Measurements
|
20
20
|
from qadence.mitigations import Mitigations
|
21
21
|
from qadence.mitigations.protocols import apply_mitigation
|
@@ -29,7 +29,7 @@ from qadence.utils import Endianness
|
|
29
29
|
from .config import Configuration, default_passes
|
30
30
|
from .convert_ops import convert_block
|
31
31
|
|
32
|
-
logger =
|
32
|
+
logger = getLogger(__name__)
|
33
33
|
|
34
34
|
|
35
35
|
def promote_parameters(parameters: dict[str, Tensor | float]) -> dict[str, float]:
|
@@ -56,6 +56,7 @@ class Backend(BackendInterface):
|
|
56
56
|
native_endianness: Endianness = Endianness.BIG
|
57
57
|
config: Configuration = field(default_factory=Configuration)
|
58
58
|
engine: Engine = Engine.TORCH
|
59
|
+
logger.debug("Initialised")
|
59
60
|
|
60
61
|
# braket specifics
|
61
62
|
# TODO: include it in the configuration?
|
@@ -1,13 +1,13 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
|
3
3
|
from dataclasses import dataclass, field
|
4
|
+
from logging import getLogger
|
4
5
|
from typing import Callable
|
5
6
|
|
6
7
|
from qadence.backend import BackendConfiguration
|
7
|
-
from qadence.logger import get_logger
|
8
8
|
from qadence.transpile import digitalize, fill_identities
|
9
9
|
|
10
|
-
logger =
|
10
|
+
logger = getLogger(__name__)
|
11
11
|
|
12
12
|
default_passes: list[Callable] = [fill_identities, digitalize]
|
13
13
|
|
qadence/backends/gpsr.py
CHANGED
@@ -20,7 +20,7 @@ def general_psr(spectrum: Tensor, n_eqs: int | None = None, shift_prefac: float
|
|
20
20
|
sorted_unique_spectral_gaps = torch.tensor(list(sorted_unique_spectral_gaps))
|
21
21
|
|
22
22
|
if n_eqs == 1:
|
23
|
-
return
|
23
|
+
return single_gap_psr
|
24
24
|
else:
|
25
25
|
return partial(
|
26
26
|
multi_gap_psr,
|
@@ -2,6 +2,7 @@ from __future__ import annotations
|
|
2
2
|
|
3
3
|
from collections import Counter
|
4
4
|
from dataclasses import dataclass, field
|
5
|
+
from logging import getLogger
|
5
6
|
from typing import Any
|
6
7
|
|
7
8
|
import jax
|
@@ -28,6 +29,8 @@ from qadence.utils import int_to_basis
|
|
28
29
|
from .config import Configuration, default_passes
|
29
30
|
from .convert_ops import HorqruxCircuit, convert_block, convert_observable
|
30
31
|
|
32
|
+
logger = getLogger(__name__)
|
33
|
+
|
31
34
|
|
32
35
|
@dataclass(frozen=True, eq=True)
|
33
36
|
class Backend(BackendInterface):
|
@@ -43,6 +46,7 @@ class Backend(BackendInterface):
|
|
43
46
|
native_endianness: Endianness = Endianness.BIG
|
44
47
|
config: Configuration = field(default_factory=Configuration)
|
45
48
|
engine: Engine = Engine.JAX
|
49
|
+
logger.debug("Initialised")
|
46
50
|
|
47
51
|
def circuit(self, circuit: QuantumCircuit) -> ConvertedCircuit:
|
48
52
|
passes = self.config.transpilation_passes
|
@@ -191,28 +195,6 @@ class Backend(BackendInterface):
|
|
191
195
|
if n_shots < 1:
|
192
196
|
raise ValueError("You can only call sample with n_shots>0.")
|
193
197
|
|
194
|
-
def _sample(
|
195
|
-
_probs: ArrayLike, n_shots: int, endianness: Endianness, n_qubits: int
|
196
|
-
) -> Counter:
|
197
|
-
_logits = jax.vmap(lambda _p: jnp.log(_p / (1 - _p)))(_probs)
|
198
|
-
|
199
|
-
def _smple(accumulator: ArrayLike, i: int) -> tuple[ArrayLike, None]:
|
200
|
-
accumulator = accumulator.at[i].set(
|
201
|
-
jax.random.categorical(jax.random.PRNGKey(i), _logits)
|
202
|
-
)
|
203
|
-
return accumulator, None
|
204
|
-
|
205
|
-
samples = jax.lax.scan(
|
206
|
-
_smple, jnp.empty_like(jnp.arange(n_shots)), jnp.arange(n_shots)
|
207
|
-
)[0]
|
208
|
-
return Counter(
|
209
|
-
{
|
210
|
-
int_to_basis(k=k, n_qubits=n_qubits, endianness=endianness): count.item()
|
211
|
-
for k, count in enumerate(jnp.bincount(samples))
|
212
|
-
if count > 0
|
213
|
-
}
|
214
|
-
)
|
215
|
-
|
216
198
|
wf = self.run(
|
217
199
|
circuit=circuit,
|
218
200
|
param_values=param_values,
|
@@ -221,16 +203,26 @@ class Backend(BackendInterface):
|
|
221
203
|
unhorqify_state=False,
|
222
204
|
)
|
223
205
|
probs = jnp.abs(jnp.float_power(wf, 2.0)).ravel()
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
)
|
231
|
-
|
206
|
+
key = jax.random.PRNGKey(0)
|
207
|
+
# JAX handles pseudo random number generation by tracking an explicit state via a random key
|
208
|
+
# For more details, see https://jax.readthedocs.io/en/latest/random-numbers.html
|
209
|
+
samples = jax.vmap(
|
210
|
+
lambda subkey: jax.random.choice(
|
211
|
+
key=subkey, a=jnp.arange(0, 2**circuit.abstract.n_qubits), p=probs
|
212
|
+
)
|
213
|
+
)(jax.random.split(key, n_shots))
|
232
214
|
|
233
|
-
return
|
215
|
+
return [
|
216
|
+
Counter(
|
217
|
+
{
|
218
|
+
int_to_basis(
|
219
|
+
k=k, n_qubits=circuit.abstract.n_qubits, endianness=endianness
|
220
|
+
): count.item()
|
221
|
+
for k, count in enumerate(jnp.bincount(samples))
|
222
|
+
if count > 0
|
223
|
+
}
|
224
|
+
)
|
225
|
+
]
|
234
226
|
|
235
227
|
def assign_parameters(self, circuit: ConvertedCircuit, param_values: ParamDictType) -> Any:
|
236
228
|
raise NotImplementedError
|
@@ -1,18 +1,18 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
|
3
3
|
from dataclasses import dataclass
|
4
|
+
from logging import getLogger
|
4
5
|
from typing import Callable
|
5
6
|
|
6
7
|
from qadence.analog import add_background_hamiltonian
|
7
8
|
from qadence.backend import BackendConfiguration
|
8
|
-
from qadence.logger import get_logger
|
9
9
|
from qadence.transpile import (
|
10
10
|
blockfn_to_circfn,
|
11
11
|
flatten,
|
12
12
|
scale_primitive_blocks_only,
|
13
13
|
)
|
14
14
|
|
15
|
-
logger =
|
15
|
+
logger = getLogger(__name__)
|
16
16
|
|
17
17
|
|
18
18
|
def default_passes(config: Configuration) -> list[Callable]:
|
@@ -1,7 +1,9 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
|
3
3
|
from collections import Counter
|
4
|
+
from collections.abc import Iterable
|
4
5
|
from dataclasses import dataclass, field
|
6
|
+
from logging import getLogger
|
5
7
|
from typing import Any
|
6
8
|
|
7
9
|
import numpy as np
|
@@ -10,7 +12,7 @@ import torch
|
|
10
12
|
from pulser import Register as PulserRegister
|
11
13
|
from pulser import Sequence
|
12
14
|
from pulser_simulation import SimConfig
|
13
|
-
from pulser_simulation.simresults import SimulationResults
|
15
|
+
from pulser_simulation.simresults import CoherentResults, SimulationResults
|
14
16
|
from pulser_simulation.simulation import QutipEmulator
|
15
17
|
from torch import Tensor
|
16
18
|
|
@@ -19,7 +21,6 @@ from qadence.backend import ConvertedCircuit, ConvertedObservable
|
|
19
21
|
from qadence.backends.utils import to_list_of_dicts
|
20
22
|
from qadence.blocks import AbstractBlock
|
21
23
|
from qadence.circuit import QuantumCircuit
|
22
|
-
from qadence.logger import get_logger
|
23
24
|
from qadence.measurements import Measurements
|
24
25
|
from qadence.mitigations import Mitigations
|
25
26
|
from qadence.mitigations.protocols import apply_mitigation
|
@@ -37,7 +38,7 @@ from .convert_ops import convert_observable
|
|
37
38
|
from .devices import IdealDevice, RealisticDevice
|
38
39
|
from .pulses import add_addressing_pattern, add_pulses
|
39
40
|
|
40
|
-
logger =
|
41
|
+
logger = getLogger(__name__)
|
41
42
|
|
42
43
|
|
43
44
|
def _convert_init_state(state: Tensor) -> np.ndarray:
|
@@ -70,27 +71,6 @@ def make_sequence(circ: QuantumCircuit, config: Configuration) -> Sequence:
|
|
70
71
|
f"Specified device of type {device_specs.type} is not supported by the pulser backend."
|
71
72
|
)
|
72
73
|
|
73
|
-
########
|
74
|
-
# FIXME: Remove the block below in V1.5.0
|
75
|
-
if config.spacing is not None:
|
76
|
-
logger.warning(
|
77
|
-
"Passing register spacing in the backend configuration is deprecated. "
|
78
|
-
"Please pass it in the register directly, as detailed in the register tutorial."
|
79
|
-
)
|
80
|
-
# Rescales the register coordinates, as was done with the previous "spacing" argument.
|
81
|
-
qadence_register = qadence_register.rescale_coords(scaling=config.spacing)
|
82
|
-
else:
|
83
|
-
if qadence_register.min_distance < 4.0:
|
84
|
-
# Throws warning for minimum distance below 4 because the typical values used
|
85
|
-
# for the standard pulser device parameters is ~7-8, so this likely means the user
|
86
|
-
# forgot to set the spacing at register creation.
|
87
|
-
logger.warning(
|
88
|
-
"Register with distance between atoms smaller than 4µm detected. "
|
89
|
-
"Pulser backend no longer has a default spacing of 8µm applied to the register. "
|
90
|
-
"Make sure you set the desired spacing as detailed in the register tutorial."
|
91
|
-
)
|
92
|
-
########
|
93
|
-
|
94
74
|
pulser_register = create_register(qadence_register)
|
95
75
|
|
96
76
|
sequence = Sequence(pulser_register, device)
|
@@ -160,6 +140,7 @@ class Backend(BackendInterface):
|
|
160
140
|
native_endianness: Endianness = Endianness.BIG
|
161
141
|
config: Configuration = field(default_factory=Configuration)
|
162
142
|
engine: Engine = Engine.TORCH
|
143
|
+
logger.debug("Initialised")
|
163
144
|
|
164
145
|
def circuit(self, circ: QuantumCircuit) -> Sequence:
|
165
146
|
passes = self.config.transpilation_passes
|
@@ -250,30 +231,52 @@ class Backend(BackendInterface):
|
|
250
231
|
self,
|
251
232
|
circuit: ConvertedCircuit,
|
252
233
|
noise: Noise,
|
253
|
-
param_values: dict[str, Tensor] =
|
234
|
+
param_values: dict[str, Tensor] = dict(),
|
254
235
|
state: Tensor | None = None,
|
255
236
|
endianness: Endianness = Endianness.BIG,
|
256
|
-
) ->
|
237
|
+
) -> Tensor:
|
257
238
|
vals = to_list_of_dicts(param_values)
|
258
239
|
noise_probs = noise.options.get("noise_probs", None)
|
259
240
|
if noise_probs is None:
|
260
|
-
KeyError(
|
261
|
-
|
262
|
-
|
241
|
+
KeyError("A `noise probs` option should be passed to the <class QuantumModel>.")
|
242
|
+
if not (isinstance(noise_probs, float) or isinstance(noise_probs, Iterable)):
|
243
|
+
KeyError(
|
244
|
+
"A single or a range of noise probabilities"
|
245
|
+
" should be passed. Got {type(noise_probs)}."
|
246
|
+
)
|
263
247
|
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
|
248
|
+
def run_noisy_sim(noise_prob: float) -> Tensor:
|
249
|
+
batched_dm = np.zeros(
|
250
|
+
(len(vals), 2**circuit.abstract.n_qubits, 2**circuit.abstract.n_qubits),
|
251
|
+
dtype=np.complex128,
|
252
|
+
)
|
253
|
+
sim_config = {"noise": noise.protocol, noise.protocol + "_rate": noise_prob}
|
268
254
|
self.config.sim_config = SimConfig(**sim_config)
|
269
255
|
|
270
256
|
for i, param_values_el in enumerate(vals):
|
271
257
|
sequence = self.assign_parameters(circuit, param_values_el)
|
272
|
-
sim_result = simulate_sequence(sequence, self.config, state)
|
273
|
-
|
274
|
-
|
258
|
+
sim_result: CoherentResults = simulate_sequence(sequence, self.config, state)
|
259
|
+
final_state = sim_result.get_final_state().data.toarray()
|
260
|
+
batched_dm[i] = np.flip(final_state)
|
261
|
+
return torch.from_numpy(batched_dm)
|
275
262
|
|
276
|
-
|
263
|
+
# Pulser requires numpy types.
|
264
|
+
if isinstance(noise_probs, Iterable):
|
265
|
+
noisy_batched_dms = []
|
266
|
+
for noise_prob in noise_probs:
|
267
|
+
noisy_sim = run_noisy_sim(noise_prob)
|
268
|
+
if not param_values:
|
269
|
+
noisy_sim = noisy_sim[0]
|
270
|
+
noisy_batched_dms.append(noisy_sim)
|
271
|
+
noisy_batched_dms = torch.stack(noisy_batched_dms)
|
272
|
+
else:
|
273
|
+
noisy_batched_dms = run_noisy_sim(noise_probs)
|
274
|
+
|
275
|
+
if endianness != self.native_endianness:
|
276
|
+
from qadence.transpile import invert_endianness
|
277
|
+
|
278
|
+
noisy_batched_dms = invert_endianness(noisy_batched_dms)
|
279
|
+
return noisy_batched_dms
|
277
280
|
|
278
281
|
def sample(
|
279
282
|
self,
|
@@ -327,14 +330,48 @@ class Backend(BackendInterface):
|
|
327
330
|
) -> Tensor:
|
328
331
|
observable = observable if isinstance(observable, list) else [observable]
|
329
332
|
if mitigation is None:
|
330
|
-
|
331
|
-
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
|
336
|
-
|
337
|
-
|
333
|
+
if noise is None:
|
334
|
+
state = self.run(
|
335
|
+
circuit, param_values=param_values, state=state, endianness=endianness
|
336
|
+
)
|
337
|
+
support = sorted(list(circuit.abstract.register.support))
|
338
|
+
res_list = [
|
339
|
+
obs.native(state, param_values, qubit_support=support) for obs in observable
|
340
|
+
]
|
341
|
+
res = torch.transpose(torch.stack(res_list), 0, 1)
|
342
|
+
res = res if len(res.shape) > 0 else res.reshape(1)
|
343
|
+
return res.real
|
344
|
+
elif noise is not None:
|
345
|
+
dms = self.run_dm(
|
346
|
+
circuit=circuit,
|
347
|
+
noise=noise,
|
348
|
+
param_values=param_values,
|
349
|
+
state=state,
|
350
|
+
endianness=endianness,
|
351
|
+
)
|
352
|
+
support = sorted(list(circuit.abstract.register.support))
|
353
|
+
# TODO: There should be a better check for batched density matrices.
|
354
|
+
if dms.size()[0] > 1:
|
355
|
+
res_list = [
|
356
|
+
[
|
357
|
+
obs.native(
|
358
|
+
dm.squeeze(), param_values, qubit_support=support, noise=noise
|
359
|
+
)
|
360
|
+
for dm in dms
|
361
|
+
]
|
362
|
+
for obs in observable
|
363
|
+
]
|
364
|
+
res = torch.stack(
|
365
|
+
[torch.transpose(torch.stack(res), 0, -1) for res in res_list]
|
366
|
+
)
|
367
|
+
|
368
|
+
else:
|
369
|
+
res_list = [
|
370
|
+
obs.native(dms, param_values, qubit_support=support) for obs in observable
|
371
|
+
]
|
372
|
+
res = torch.transpose(torch.stack(res_list), 0, 1)
|
373
|
+
res = res if len(res.shape) > 0 else res.reshape(1)
|
374
|
+
return res.real
|
338
375
|
elif mitigation is not None:
|
339
376
|
logger.warning(
|
340
377
|
"Mitigation protocol is deprecated. Use qadence-protocols instead.",
|
@@ -8,7 +8,6 @@ from pasqal_cloud.device import EmulatorType
|
|
8
8
|
from pulser_simulation.simconfig import SimConfig
|
9
9
|
|
10
10
|
from qadence.backend import BackendConfiguration
|
11
|
-
from qadence.types import DeviceType, Interaction
|
12
11
|
|
13
12
|
DEFAULT_CLOUD_ENV = "prod"
|
14
13
|
|
@@ -25,14 +24,6 @@ class CloudConfiguration:
|
|
25
24
|
|
26
25
|
@dataclass
|
27
26
|
class Configuration(BackendConfiguration):
|
28
|
-
device_type: DeviceType = DeviceType.IDEALIZED
|
29
|
-
"""The type of quantum Device to use in the simulations.
|
30
|
-
|
31
|
-
FIXME: This is deprecated, the device_type is now controlled in the
|
32
|
-
Qadence Device, as detailed in the documentation.
|
33
|
-
FIXME: Remove in v1.5.0
|
34
|
-
"""
|
35
|
-
|
36
27
|
sampling_rate: float = 1.0
|
37
28
|
"""Sampling rate to be used for local simulations.
|
38
29
|
|
@@ -40,14 +31,6 @@ class Configuration(BackendConfiguration):
|
|
40
31
|
to avoid any interpolation in the solving procedure
|
41
32
|
"""
|
42
33
|
|
43
|
-
spacing: Optional[float] = None
|
44
|
-
"""Spacing that multiplies the coordinates of the register.
|
45
|
-
|
46
|
-
FIXME: This is deprecated, spacing is now controlled in the Register,
|
47
|
-
as detailed in the register tutorial.
|
48
|
-
FIXME: Remove in v1.5.0
|
49
|
-
"""
|
50
|
-
|
51
34
|
method_solv: str = "adams"
|
52
35
|
"""Solver method to pass to the Qutip solver."""
|
53
36
|
|
@@ -83,17 +66,6 @@ class Configuration(BackendConfiguration):
|
|
83
66
|
FIXME: To be deprecated.
|
84
67
|
"""
|
85
68
|
|
86
|
-
interaction: Interaction = Interaction.NN
|
87
|
-
"""Type of interaction introduced in the Hamiltonian.
|
88
|
-
|
89
|
-
Currently, only
|
90
|
-
NN interaction is support. XY interaction is possible but not implemented
|
91
|
-
|
92
|
-
FIXME: This is deprecated, the interaction is now controlled in the
|
93
|
-
Qadence Device, as detailed in the documentation.
|
94
|
-
FIXME: Remove in v1.5.0
|
95
|
-
"""
|
96
|
-
|
97
69
|
# configuration for cloud simulations
|
98
70
|
cloud_configuration: Optional[CloudConfiguration] = None
|
99
71
|
|
@@ -5,6 +5,7 @@ from typing import Sequence
|
|
5
5
|
import torch
|
6
6
|
from torch.nn import Module
|
7
7
|
|
8
|
+
from qadence import Noise
|
8
9
|
from qadence.blocks import (
|
9
10
|
AbstractBlock,
|
10
11
|
)
|
@@ -26,17 +27,29 @@ class PulserObservable(Module):
|
|
26
27
|
def __init__(self, block: AbstractBlock, n_qubits: int | None):
|
27
28
|
super().__init__()
|
28
29
|
self.block = block
|
29
|
-
self.n_qubits = n_qubits
|
30
|
+
self.n_qubits = n_qubits if n_qubits else max(block.qubit_support) + 1
|
31
|
+
|
32
|
+
if not self.block.is_parametric:
|
33
|
+
block_mat = block_to_tensor(
|
34
|
+
self.block, {}, qubit_support=tuple(i for i in range(self.n_qubits))
|
35
|
+
).squeeze(0)
|
36
|
+
self.register_buffer("block_mat", block_mat)
|
30
37
|
|
31
38
|
def forward(
|
32
39
|
self,
|
33
40
|
state: torch.Tensor,
|
34
|
-
values: dict[str, torch.Tensor]
|
41
|
+
values: dict[str, torch.Tensor] = dict(),
|
35
42
|
qubit_support: tuple | None = None,
|
43
|
+
noise: Noise | None = None,
|
36
44
|
endianness: Endianness = Endianness.BIG,
|
37
45
|
) -> torch.Tensor:
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
46
|
+
if not self.block.is_parametric:
|
47
|
+
block_mat = self.block_mat
|
48
|
+
else:
|
49
|
+
block_mat = block_to_tensor(
|
50
|
+
self.block, values, qubit_support=qubit_support, endianness=endianness # type: ignore [arg-type] # noqa
|
51
|
+
).squeeze(0)
|
52
|
+
if noise is None: # Compute expectations for state vector.
|
53
|
+
return torch.sum(torch.matmul(state, block_mat) * state.conj(), dim=1)
|
54
|
+
else: # Compute expectations for density matrices.
|
55
|
+
return torch.trace(torch.matmul(block_mat, state))
|
@@ -1,6 +1,7 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
|
3
3
|
from functools import partial
|
4
|
+
from logging import getLogger
|
4
5
|
from typing import Union
|
5
6
|
|
6
7
|
import numpy as np
|
@@ -20,7 +21,6 @@ from qadence.blocks.analog import (
|
|
20
21
|
Interaction,
|
21
22
|
InteractionBlock,
|
22
23
|
)
|
23
|
-
from qadence.logger import get_logger
|
24
24
|
from qadence.operations import RX, RY, RZ, AnalogEntanglement
|
25
25
|
from qadence.parameters import evaluate
|
26
26
|
from qadence.types import PI, OpName
|
@@ -29,7 +29,7 @@ from .channels import GLOBAL_CHANNEL, LOCAL_CHANNEL
|
|
29
29
|
from .config import Configuration
|
30
30
|
from .waveforms import SquareWaveform
|
31
31
|
|
32
|
-
logger =
|
32
|
+
logger = getLogger(__name__)
|
33
33
|
|
34
34
|
TVar = Union[Variable, VariableItem]
|
35
35
|
|
@@ -2,6 +2,7 @@ from __future__ import annotations
|
|
2
2
|
|
3
3
|
from collections import Counter
|
4
4
|
from dataclasses import dataclass, field
|
5
|
+
from logging import getLogger
|
5
6
|
from typing import Any
|
6
7
|
|
7
8
|
import pyqtorch as pyq
|
@@ -18,7 +19,6 @@ from qadence.backends.utils import (
|
|
18
19
|
)
|
19
20
|
from qadence.blocks import AbstractBlock
|
20
21
|
from qadence.circuit import QuantumCircuit
|
21
|
-
from qadence.logger import get_logger
|
22
22
|
from qadence.measurements import Measurements
|
23
23
|
from qadence.mitigations.protocols import Mitigations, apply_mitigation
|
24
24
|
from qadence.noise import Noise
|
@@ -36,7 +36,7 @@ from qadence.utils import infer_batchsize, int_to_basis
|
|
36
36
|
from .config import Configuration, default_passes
|
37
37
|
from .convert_ops import convert_block, convert_observable
|
38
38
|
|
39
|
-
logger =
|
39
|
+
logger = getLogger(__name__)
|
40
40
|
|
41
41
|
|
42
42
|
@dataclass(frozen=True, eq=True)
|
@@ -53,6 +53,7 @@ class Backend(BackendInterface):
|
|
53
53
|
native_endianness: Endianness = Endianness.BIG
|
54
54
|
config: Configuration = field(default_factory=Configuration)
|
55
55
|
engine: Engine = Engine.TORCH
|
56
|
+
logger.debug("Initialised")
|
56
57
|
|
57
58
|
def circuit(self, circuit: QuantumCircuit) -> ConvertedCircuit:
|
58
59
|
passes = self.config.transpilation_passes
|
@@ -1,11 +1,11 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
|
3
3
|
from dataclasses import dataclass
|
4
|
+
from logging import getLogger
|
4
5
|
from typing import Callable
|
5
6
|
|
6
7
|
from qadence.analog import add_background_hamiltonian
|
7
8
|
from qadence.backend import BackendConfiguration
|
8
|
-
from qadence.logger import get_logger
|
9
9
|
from qadence.transpile import (
|
10
10
|
blockfn_to_circfn,
|
11
11
|
chain_single_qubit_ops,
|
@@ -14,7 +14,7 @@ from qadence.transpile import (
|
|
14
14
|
)
|
15
15
|
from qadence.types import AlgoHEvo
|
16
16
|
|
17
|
-
logger =
|
17
|
+
logger = getLogger(__name__)
|
18
18
|
|
19
19
|
|
20
20
|
def default_passes(config: Configuration) -> list[Callable]:
|