qadence 1.7.8__py3-none-any.whl → 1.9.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- qadence/__init__.py +1 -1
- qadence/analog/device.py +1 -1
- qadence/analog/parse_analog.py +1 -2
- qadence/backend.py +3 -3
- qadence/backends/gpsr.py +8 -2
- qadence/backends/horqrux/backend.py +3 -3
- qadence/backends/pulser/backend.py +21 -38
- qadence/backends/pulser/convert_ops.py +2 -2
- qadence/backends/pyqtorch/backend.py +85 -10
- qadence/backends/pyqtorch/config.py +10 -3
- qadence/backends/pyqtorch/convert_ops.py +245 -233
- qadence/backends/utils.py +9 -1
- qadence/blocks/abstract.py +1 -1
- qadence/blocks/embedding.py +21 -11
- qadence/blocks/matrix.py +3 -1
- qadence/blocks/primitive.py +37 -11
- qadence/circuit.py +1 -1
- qadence/constructors/__init__.py +2 -1
- qadence/constructors/ansatze.py +176 -0
- qadence/engines/differentiable_backend.py +3 -3
- qadence/engines/jax/differentiable_backend.py +2 -2
- qadence/engines/jax/differentiable_expectation.py +2 -2
- qadence/engines/torch/differentiable_backend.py +2 -2
- qadence/engines/torch/differentiable_expectation.py +2 -2
- qadence/execution.py +14 -16
- qadence/extensions.py +1 -1
- qadence/log_config.yaml +10 -0
- qadence/measurements/shadow.py +101 -133
- qadence/measurements/tomography.py +2 -2
- qadence/measurements/utils.py +4 -4
- qadence/mitigations/analog_zne.py +8 -7
- qadence/mitigations/protocols.py +2 -2
- qadence/mitigations/readout.py +14 -5
- qadence/ml_tools/__init__.py +4 -8
- qadence/ml_tools/callbacks/__init__.py +30 -0
- qadence/ml_tools/callbacks/callback.py +451 -0
- qadence/ml_tools/callbacks/callbackmanager.py +214 -0
- qadence/ml_tools/{saveload.py → callbacks/saveload.py} +11 -11
- qadence/ml_tools/callbacks/writer_registry.py +430 -0
- qadence/ml_tools/config.py +132 -258
- qadence/ml_tools/constructors.py +2 -2
- qadence/ml_tools/data.py +7 -3
- qadence/ml_tools/loss/__init__.py +10 -0
- qadence/ml_tools/loss/loss.py +87 -0
- qadence/ml_tools/models.py +7 -7
- qadence/ml_tools/optimize_step.py +45 -10
- qadence/ml_tools/stages.py +46 -0
- qadence/ml_tools/train_utils/__init__.py +7 -0
- qadence/ml_tools/train_utils/base_trainer.py +548 -0
- qadence/ml_tools/train_utils/config_manager.py +184 -0
- qadence/ml_tools/trainer.py +692 -0
- qadence/model.py +6 -6
- qadence/noise/__init__.py +2 -2
- qadence/noise/protocols.py +188 -36
- qadence/operations/control_ops.py +37 -22
- qadence/operations/ham_evo.py +88 -26
- qadence/operations/parametric.py +32 -10
- qadence/operations/primitive.py +61 -29
- qadence/overlap.py +0 -6
- qadence/parameters.py +3 -2
- qadence/transpile/__init__.py +2 -1
- qadence/transpile/noise.py +53 -0
- qadence/types.py +39 -3
- {qadence-1.7.8.dist-info → qadence-1.9.0.dist-info}/METADATA +5 -9
- {qadence-1.7.8.dist-info → qadence-1.9.0.dist-info}/RECORD +67 -63
- {qadence-1.7.8.dist-info → qadence-1.9.0.dist-info}/WHEEL +1 -1
- qadence/backends/braket/__init__.py +0 -4
- qadence/backends/braket/backend.py +0 -234
- qadence/backends/braket/config.py +0 -22
- qadence/backends/braket/convert_ops.py +0 -116
- qadence/ml_tools/printing.py +0 -153
- qadence/ml_tools/train_grad.py +0 -395
- qadence/ml_tools/train_no_grad.py +0 -199
- qadence/noise/readout.py +0 -218
- {qadence-1.7.8.dist-info → qadence-1.9.0.dist-info}/licenses/LICENSE +0 -0
qadence/blocks/matrix.py
CHANGED
@@ -8,6 +8,7 @@ import torch
|
|
8
8
|
from torch.linalg import eigvals
|
9
9
|
|
10
10
|
from qadence.blocks import PrimitiveBlock
|
11
|
+
from qadence.noise import NoiseHandler
|
11
12
|
|
12
13
|
logger = getLogger(__name__)
|
13
14
|
|
@@ -64,6 +65,7 @@ class MatrixBlock(PrimitiveBlock):
|
|
64
65
|
self,
|
65
66
|
matrix: torch.Tensor | np.ndarray,
|
66
67
|
qubit_support: tuple[int, ...],
|
68
|
+
noise: NoiseHandler | None = None,
|
67
69
|
check_unitary: bool = True,
|
68
70
|
check_hermitian: bool = False,
|
69
71
|
) -> None:
|
@@ -82,7 +84,7 @@ class MatrixBlock(PrimitiveBlock):
|
|
82
84
|
if not self.is_unitary(matrix):
|
83
85
|
logger.warning("Provided matrix is not unitary.")
|
84
86
|
self.matrix = matrix.clone()
|
85
|
-
super().__init__(qubit_support)
|
87
|
+
super().__init__(qubit_support, noise)
|
86
88
|
|
87
89
|
@cached_property
|
88
90
|
def eigenvalues_generator(self) -> torch.Tensor:
|
qadence/blocks/primitive.py
CHANGED
@@ -11,6 +11,7 @@ from rich.panel import Panel
|
|
11
11
|
from rich.tree import Tree
|
12
12
|
|
13
13
|
from qadence.blocks.abstract import AbstractBlock
|
14
|
+
from qadence.noise import NoiseHandler
|
14
15
|
from qadence.parameters import (
|
15
16
|
Parameter,
|
16
17
|
ParamMap,
|
@@ -33,13 +34,22 @@ class PrimitiveBlock(AbstractBlock):
|
|
33
34
|
|
34
35
|
name = "PrimitiveBlock"
|
35
36
|
|
36
|
-
def __init__(
|
37
|
+
def __init__(
|
38
|
+
self,
|
39
|
+
qubit_support: tuple[int, ...],
|
40
|
+
noise: NoiseHandler | None = None,
|
41
|
+
):
|
37
42
|
self._qubit_support = qubit_support
|
43
|
+
self._noise = noise
|
38
44
|
|
39
45
|
@property
|
40
46
|
def qubit_support(self) -> Tuple[int, ...]:
|
41
47
|
return self._qubit_support
|
42
48
|
|
49
|
+
@property
|
50
|
+
def noise(self) -> NoiseHandler | None:
|
51
|
+
return self._noise
|
52
|
+
|
43
53
|
def digital_decomposition(self) -> AbstractBlock:
|
44
54
|
"""Decomposition into purely digital gates.
|
45
55
|
|
@@ -85,11 +95,12 @@ class PrimitiveBlock(AbstractBlock):
|
|
85
95
|
"type": type(self).__name__,
|
86
96
|
"qubit_support": self.qubit_support,
|
87
97
|
"tag": self.tag,
|
98
|
+
"noise": self.noise._to_dict() if self.noise is not None else None,
|
88
99
|
}
|
89
100
|
|
90
101
|
@classmethod
|
91
102
|
def _from_dict(cls, d: dict) -> PrimitiveBlock:
|
92
|
-
return cls(*d["qubit_support"])
|
103
|
+
return cls(*d["qubit_support"], NoiseHandler._from_dict(d.get("noise"))) # type: ignore[call-arg]
|
93
104
|
|
94
105
|
def __hash__(self) -> int:
|
95
106
|
return hash(self._to_json())
|
@@ -196,18 +207,19 @@ class ParametricBlock(PrimitiveBlock):
|
|
196
207
|
"qubit_support": self.qubit_support,
|
197
208
|
"tag": self.tag,
|
198
209
|
"parameters": self.parameters._to_dict(),
|
210
|
+
"noise": self.noise._to_dict() if self.noise is not None else None,
|
199
211
|
}
|
200
212
|
|
201
213
|
@classmethod
|
202
214
|
def _from_dict(cls, d: dict) -> ParametricBlock:
|
203
215
|
params = ParamMap._from_dict(d["parameters"])
|
204
216
|
target = d["qubit_support"][0]
|
205
|
-
return cls(target, params) # type: ignore[call-arg]
|
217
|
+
return cls(target, params, NoiseHandler._from_dict(d.get("noise"))) # type: ignore[call-arg, arg-type]
|
206
218
|
|
207
219
|
def dagger(self) -> ParametricBlock:
|
208
220
|
exprs = self.parameters.expressions()
|
209
221
|
params = tuple(-extract_original_param_entry(param) for param in exprs)
|
210
|
-
return type(self)(*self.qubit_support, *params) # type: ignore[arg-type]
|
222
|
+
return type(self)(*self.qubit_support, *params, self.noise) # type: ignore[call-arg, arg-type]
|
211
223
|
|
212
224
|
|
213
225
|
class ScaleBlock(ParametricBlock):
|
@@ -344,6 +356,7 @@ class TimeEvolutionBlock(ParametricBlock):
|
|
344
356
|
"""
|
345
357
|
|
346
358
|
name = "TimeEvolutionBlock"
|
359
|
+
noise_operators: list = list()
|
347
360
|
|
348
361
|
@property
|
349
362
|
def has_parametrized_generator(self) -> bool:
|
@@ -357,14 +370,19 @@ class ControlBlock(PrimitiveBlock):
|
|
357
370
|
control: tuple[int, ...]
|
358
371
|
target: tuple[int, ...]
|
359
372
|
|
360
|
-
def __init__(
|
373
|
+
def __init__(
|
374
|
+
self,
|
375
|
+
control: tuple[int, ...],
|
376
|
+
target_block: PrimitiveBlock,
|
377
|
+
noise: NoiseHandler | None = None,
|
378
|
+
) -> None:
|
361
379
|
self.control = control
|
362
380
|
self.blocks = (target_block,)
|
363
381
|
self.target = target_block.qubit_support
|
364
382
|
|
365
383
|
# using tuple expansion because some control operations could
|
366
384
|
# have multiple targets, e.g. CSWAP
|
367
|
-
super().__init__((*control, *self.target)) # target_block.qubit_support[0]))
|
385
|
+
super().__init__((*control, *self.target), noise=noise) # target_block.qubit_support[0]))
|
368
386
|
|
369
387
|
@property
|
370
388
|
def n_controls(self) -> int:
|
@@ -395,13 +413,14 @@ class ControlBlock(PrimitiveBlock):
|
|
395
413
|
"qubit_support": self.qubit_support,
|
396
414
|
"tag": self.tag,
|
397
415
|
"blocks": [b._to_dict() for b in self.blocks],
|
416
|
+
"noise": self.noise._to_dict() if self.noise is not None else None,
|
398
417
|
}
|
399
418
|
|
400
419
|
@classmethod
|
401
420
|
def _from_dict(cls, d: dict) -> ControlBlock:
|
402
421
|
control = d["qubit_support"][0]
|
403
422
|
target = d["qubit_support"][1]
|
404
|
-
return cls(control, target)
|
423
|
+
return cls(control, target, NoiseHandler._from_dict(d.get("noise")))
|
405
424
|
|
406
425
|
def dagger(self) -> ControlBlock:
|
407
426
|
blk = deepcopy(self)
|
@@ -416,11 +435,16 @@ class ParametricControlBlock(ParametricBlock):
|
|
416
435
|
control: tuple[int, ...] = ()
|
417
436
|
blocks: tuple[ParametricBlock, ...]
|
418
437
|
|
419
|
-
def __init__(
|
438
|
+
def __init__(
|
439
|
+
self,
|
440
|
+
control: tuple[int, ...],
|
441
|
+
target_block: ParametricBlock,
|
442
|
+
noise: NoiseHandler | None = None,
|
443
|
+
) -> None:
|
420
444
|
self.blocks = (target_block,)
|
421
445
|
self.control = control
|
422
446
|
self.parameters = target_block.parameters
|
423
|
-
super().__init__((*control, *target_block.qubit_support))
|
447
|
+
super().__init__((*control, *target_block.qubit_support), noise=noise)
|
424
448
|
|
425
449
|
@property
|
426
450
|
def n_controls(self) -> int:
|
@@ -450,6 +474,7 @@ class ParametricControlBlock(ParametricBlock):
|
|
450
474
|
"qubit_support": self.qubit_support,
|
451
475
|
"tag": self.tag,
|
452
476
|
"blocks": [b._to_dict() for b in self.blocks],
|
477
|
+
"noise": self.noise._to_dict() if self.noise is not None else None,
|
453
478
|
}
|
454
479
|
|
455
480
|
@classmethod
|
@@ -460,7 +485,7 @@ class ParametricControlBlock(ParametricBlock):
|
|
460
485
|
target = d["qubit_support"][1]
|
461
486
|
targetblock = d["blocks"][0]
|
462
487
|
expr = deserialize(targetblock["parameters"])
|
463
|
-
block = cls(control, target, expr) # type: ignore[call-arg]
|
488
|
+
block = cls(control, target, NoiseHandler._from_dict(d.get("noise")), expr) # type: ignore[call-arg]
|
464
489
|
return block
|
465
490
|
|
466
491
|
@property
|
@@ -497,6 +522,7 @@ class ProjectorBlock(PrimitiveBlock):
|
|
497
522
|
ket: str,
|
498
523
|
bra: str,
|
499
524
|
qubit_support: int | tuple[int, ...],
|
525
|
+
noise: NoiseHandler | None = None,
|
500
526
|
) -> None:
|
501
527
|
"""
|
502
528
|
Arguments:
|
@@ -522,4 +548,4 @@ class ProjectorBlock(PrimitiveBlock):
|
|
522
548
|
|
523
549
|
self.ket = ket
|
524
550
|
self.bra = bra
|
525
|
-
super().__init__(qubit_support)
|
551
|
+
super().__init__(qubit_support, noise=noise)
|
qadence/circuit.py
CHANGED
qadence/constructors/__init__.py
CHANGED
@@ -5,7 +5,7 @@ from .feature_maps import (
|
|
5
5
|
exp_fourier_feature_map,
|
6
6
|
)
|
7
7
|
|
8
|
-
from .ansatze import hea
|
8
|
+
from .ansatze import hea, alt
|
9
9
|
|
10
10
|
from .iia import identity_initialized_ansatz
|
11
11
|
|
@@ -29,6 +29,7 @@ __all__ = [
|
|
29
29
|
"feature_map",
|
30
30
|
"exp_fourier_feature_map",
|
31
31
|
"hea",
|
32
|
+
"alt",
|
32
33
|
"identity_initialized_ansatz",
|
33
34
|
"hamiltonian_factory",
|
34
35
|
"ising_hamiltonian",
|
qadence/constructors/ansatze.py
CHANGED
@@ -320,3 +320,179 @@ def hea_bDAQC(*args: Any, **kwargs: Any) -> Any:
|
|
320
320
|
|
321
321
|
def hea_analog(*args: Any, **kwargs: Any) -> Any:
|
322
322
|
raise NotImplementedError
|
323
|
+
|
324
|
+
|
325
|
+
def alt(
|
326
|
+
n_qubits: int,
|
327
|
+
m_block_qubits: int,
|
328
|
+
depth: int = 1,
|
329
|
+
param_prefix: str = "theta",
|
330
|
+
support: tuple[int, ...] = None,
|
331
|
+
strategy: Strategy = Strategy.DIGITAL,
|
332
|
+
**strategy_args: Any,
|
333
|
+
) -> AbstractBlock:
|
334
|
+
"""
|
335
|
+
Factory function for the alternating layer ansatz (alt).
|
336
|
+
|
337
|
+
Args:
|
338
|
+
n_qubits: number of qubits in the block
|
339
|
+
m_block_qubits: number of qubits in the local entangling block
|
340
|
+
depth: number of layers of the alt
|
341
|
+
param_prefix: the base name of the variational parameters
|
342
|
+
support: qubit indexes where the alt is applied
|
343
|
+
strategy: Strategy.Digital or Strategy.DigitalAnalog
|
344
|
+
**strategy_args: see below
|
345
|
+
|
346
|
+
Keyword Arguments:
|
347
|
+
operations (list): list of operations to cycle through in the
|
348
|
+
digital single-qubit rotations of each layer. Valid for
|
349
|
+
Digital .
|
350
|
+
entangler (AbstractBlock):
|
351
|
+
- Digital: 2-qubit entangling operation. Supports CNOT, CZ,
|
352
|
+
CRX, CRY, CRZ, CPHASE. Controlled rotations will have variational
|
353
|
+
parameters on the rotation angles.
|
354
|
+
"""
|
355
|
+
|
356
|
+
if support is None:
|
357
|
+
support = tuple(range(n_qubits))
|
358
|
+
|
359
|
+
alt_func_dict = {
|
360
|
+
Strategy.DIGITAL: alt_digital,
|
361
|
+
Strategy.SDAQC: alt_sDAQC,
|
362
|
+
Strategy.BDAQC: alt_bDAQC,
|
363
|
+
Strategy.ANALOG: alt_analog,
|
364
|
+
}
|
365
|
+
|
366
|
+
try:
|
367
|
+
alt_func = alt_func_dict[strategy]
|
368
|
+
except KeyError:
|
369
|
+
raise KeyError(f"Strategy {strategy} not recognized.")
|
370
|
+
|
371
|
+
hea_block: AbstractBlock = alt_func(
|
372
|
+
n_qubits=n_qubits,
|
373
|
+
m_block_qubits=m_block_qubits,
|
374
|
+
depth=depth,
|
375
|
+
param_prefix=param_prefix,
|
376
|
+
support=support,
|
377
|
+
**strategy_args,
|
378
|
+
) # type: ignore
|
379
|
+
|
380
|
+
return hea_block
|
381
|
+
|
382
|
+
|
383
|
+
#################
|
384
|
+
## DIGITAL ALT ##
|
385
|
+
#################
|
386
|
+
|
387
|
+
|
388
|
+
def _entanglers_block_digital(
|
389
|
+
n_qubits: int,
|
390
|
+
m_block_qubits: int,
|
391
|
+
depth: int,
|
392
|
+
param_prefix: str = "theta",
|
393
|
+
support: tuple[int, ...] = None,
|
394
|
+
entangler: Type[DigitalEntanglers] = CNOT,
|
395
|
+
) -> list[AbstractBlock]:
|
396
|
+
if support is None:
|
397
|
+
support = tuple(range(n_qubits))
|
398
|
+
iterator = itertools.count()
|
399
|
+
ent_list: list[AbstractBlock] = []
|
400
|
+
|
401
|
+
for d in range(depth):
|
402
|
+
start_i = 0 if not d % 2 else -m_block_qubits // 2
|
403
|
+
ents = [
|
404
|
+
kron(
|
405
|
+
_entangler(
|
406
|
+
control=support[i + j],
|
407
|
+
target=support[i + j + 1],
|
408
|
+
param_str=param_prefix + f"_ent_{next(iterator)}",
|
409
|
+
op=entangler,
|
410
|
+
)
|
411
|
+
for j in range(start_j, m_block_qubits, 2)
|
412
|
+
for i in range(start_i, n_qubits, m_block_qubits)
|
413
|
+
if i + j + 1 < n_qubits and j + 1 < m_block_qubits and i + j >= 0
|
414
|
+
)
|
415
|
+
for start_j in [i for i in range(2) if m_block_qubits > 2 or i == 0]
|
416
|
+
]
|
417
|
+
|
418
|
+
ent_list.append(chain(*ents))
|
419
|
+
return ent_list
|
420
|
+
|
421
|
+
|
422
|
+
def alt_digital(
|
423
|
+
n_qubits: int,
|
424
|
+
m_block_qubits: int,
|
425
|
+
depth: int = 1,
|
426
|
+
support: tuple[int, ...] = None,
|
427
|
+
param_prefix: str = "theta",
|
428
|
+
operations: list[type[AbstractBlock]] = [RX, RY],
|
429
|
+
entangler: Type[DigitalEntanglers] = CNOT,
|
430
|
+
) -> AbstractBlock:
|
431
|
+
"""
|
432
|
+
Construct the digital alternating layer ansatz (ALT).
|
433
|
+
|
434
|
+
Args:
|
435
|
+
n_qubits (int): number of qubits in the ansatz.
|
436
|
+
m_block_qubits (int): number of qubits in the local entangling block.
|
437
|
+
depth (int): number of layers of the ALT.
|
438
|
+
param_prefix (str): the base name of the variational parameters
|
439
|
+
operations (list): list of operations to cycle through in the
|
440
|
+
digital single-qubit rotations of each layer.
|
441
|
+
support (tuple): qubit indexes where the ALT is applied.
|
442
|
+
entangler (AbstractBlock): 2-qubit entangling operation.
|
443
|
+
Supports CNOT, CZ, CRX, CRY, CRZ. Controlld rotations
|
444
|
+
will have variational parameters on the rotation angles.
|
445
|
+
"""
|
446
|
+
|
447
|
+
try:
|
448
|
+
if entangler not in [CNOT, CZ, CRX, CRY, CRZ, CPHASE]:
|
449
|
+
raise ValueError(
|
450
|
+
"Please provide a valid two-qubit entangler operation for digital ALT."
|
451
|
+
)
|
452
|
+
except TypeError:
|
453
|
+
raise ValueError("Please provide a valid two-qubit entangler operation for digital ALT.")
|
454
|
+
|
455
|
+
rot_list = _rotations_digital(
|
456
|
+
n_qubits=n_qubits,
|
457
|
+
depth=depth,
|
458
|
+
support=support,
|
459
|
+
param_prefix=param_prefix,
|
460
|
+
operations=operations,
|
461
|
+
)
|
462
|
+
|
463
|
+
ent_list = _entanglers_block_digital(
|
464
|
+
n_qubits,
|
465
|
+
m_block_qubits,
|
466
|
+
param_prefix=param_prefix + "_ent",
|
467
|
+
depth=depth,
|
468
|
+
support=support,
|
469
|
+
entangler=entangler,
|
470
|
+
)
|
471
|
+
|
472
|
+
layers = []
|
473
|
+
for d in range(depth):
|
474
|
+
layers.append(rot_list[d])
|
475
|
+
layers.append(ent_list[d])
|
476
|
+
|
477
|
+
return tag(chain(*layers), "ALT")
|
478
|
+
|
479
|
+
|
480
|
+
#################
|
481
|
+
## sdaqc ALT ##
|
482
|
+
#################
|
483
|
+
def alt_sDAQC(*args: Any, **kwargs: Any) -> Any:
|
484
|
+
raise NotImplementedError
|
485
|
+
|
486
|
+
|
487
|
+
#################
|
488
|
+
## bdaqc ALT ##
|
489
|
+
#################
|
490
|
+
def alt_bDAQC(*args: Any, **kwargs: Any) -> Any:
|
491
|
+
raise NotImplementedError
|
492
|
+
|
493
|
+
|
494
|
+
#################
|
495
|
+
## analog ALT ##
|
496
|
+
#################
|
497
|
+
def alt_analog(*args: Any, **kwargs: Any) -> Any:
|
498
|
+
raise NotImplementedError
|
@@ -11,7 +11,7 @@ from qadence.blocks.utils import uuid_to_block
|
|
11
11
|
from qadence.circuit import QuantumCircuit
|
12
12
|
from qadence.measurements import Measurements
|
13
13
|
from qadence.mitigations import Mitigations
|
14
|
-
from qadence.noise import
|
14
|
+
from qadence.noise import NoiseHandler
|
15
15
|
from qadence.types import ArrayLike, DiffMode, Endianness, Engine, ParamDictType
|
16
16
|
|
17
17
|
|
@@ -40,7 +40,7 @@ class DifferentiableBackend(ABC):
|
|
40
40
|
param_values: ParamDictType = {},
|
41
41
|
n_shots: int = 100,
|
42
42
|
state: ArrayLike | None = None,
|
43
|
-
noise:
|
43
|
+
noise: NoiseHandler | None = None,
|
44
44
|
mitigation: Mitigations | None = None,
|
45
45
|
endianness: Endianness = Endianness.BIG,
|
46
46
|
) -> list[Counter]:
|
@@ -89,7 +89,7 @@ class DifferentiableBackend(ABC):
|
|
89
89
|
param_values: ParamDictType = {},
|
90
90
|
state: ArrayLike | None = None,
|
91
91
|
measurement: Measurements | None = None,
|
92
|
-
noise:
|
92
|
+
noise: NoiseHandler | None = None,
|
93
93
|
mitigation: Mitigations | None = None,
|
94
94
|
endianness: Endianness = Endianness.BIG,
|
95
95
|
) -> Any:
|
@@ -7,7 +7,7 @@ from qadence.engines.differentiable_backend import (
|
|
7
7
|
from qadence.engines.jax.differentiable_expectation import DifferentiableExpectation
|
8
8
|
from qadence.measurements import Measurements
|
9
9
|
from qadence.mitigations import Mitigations
|
10
|
-
from qadence.noise import
|
10
|
+
from qadence.noise import NoiseHandler
|
11
11
|
from qadence.types import ArrayLike, DiffMode, Endianness, Engine, ParamDictType
|
12
12
|
|
13
13
|
|
@@ -36,7 +36,7 @@ class DifferentiableBackend(DifferentiableBackendInterface):
|
|
36
36
|
param_values: ParamDictType = {},
|
37
37
|
state: ArrayLike | None = None,
|
38
38
|
measurement: Measurements | None = None,
|
39
|
-
noise:
|
39
|
+
noise: NoiseHandler | None = None,
|
40
40
|
mitigation: Mitigations | None = None,
|
41
41
|
endianness: Endianness = Endianness.BIG,
|
42
42
|
) -> ArrayLike:
|
@@ -14,7 +14,7 @@ from qadence.backends.jax_utils import (
|
|
14
14
|
from qadence.blocks.utils import uuid_to_eigen
|
15
15
|
from qadence.measurements import Measurements
|
16
16
|
from qadence.mitigations import Mitigations
|
17
|
-
from qadence.noise import
|
17
|
+
from qadence.noise import NoiseHandler
|
18
18
|
from qadence.types import Endianness, Engine, ParamDictType
|
19
19
|
|
20
20
|
|
@@ -34,7 +34,7 @@ class DifferentiableExpectation:
|
|
34
34
|
param_values: ParamDictType
|
35
35
|
state: Array | None = None
|
36
36
|
measurement: Measurements | None = None
|
37
|
-
noise:
|
37
|
+
noise: NoiseHandler | None = None
|
38
38
|
mitigation: Mitigations | None = None
|
39
39
|
endianness: Endianness = Endianness.BIG
|
40
40
|
engine: Engine = Engine.JAX
|
@@ -11,7 +11,7 @@ from qadence.engines.torch.differentiable_expectation import DifferentiableExpec
|
|
11
11
|
from qadence.extensions import get_gpsr_fns
|
12
12
|
from qadence.measurements import Measurements
|
13
13
|
from qadence.mitigations import Mitigations
|
14
|
-
from qadence.noise import
|
14
|
+
from qadence.noise import NoiseHandler
|
15
15
|
from qadence.types import ArrayLike, DiffMode, Endianness, Engine, ParamDictType
|
16
16
|
|
17
17
|
|
@@ -40,7 +40,7 @@ class DifferentiableBackend(DifferentiableBackendInterface):
|
|
40
40
|
param_values: ParamDictType = {},
|
41
41
|
state: ArrayLike | None = None,
|
42
42
|
measurement: Measurements | None = None,
|
43
|
-
noise:
|
43
|
+
noise: NoiseHandler | None = None,
|
44
44
|
mitigation: Mitigations | None = None,
|
45
45
|
endianness: Endianness = Endianness.BIG,
|
46
46
|
) -> ArrayLike:
|
@@ -19,7 +19,7 @@ from qadence.circuit import QuantumCircuit
|
|
19
19
|
from qadence.measurements import Measurements
|
20
20
|
from qadence.mitigations import Mitigations
|
21
21
|
from qadence.ml_tools import promote_to_tensor
|
22
|
-
from qadence.noise import
|
22
|
+
from qadence.noise import NoiseHandler
|
23
23
|
from qadence.types import Endianness
|
24
24
|
|
25
25
|
|
@@ -95,7 +95,7 @@ class DifferentiableExpectation:
|
|
95
95
|
param_values: dict[str, Tensor]
|
96
96
|
state: Tensor | None = None
|
97
97
|
measurement: Measurements | None = None
|
98
|
-
noise:
|
98
|
+
noise: NoiseHandler | None = None
|
99
99
|
mitigation: Mitigations | None = None
|
100
100
|
endianness: Endianness = Endianness.BIG
|
101
101
|
|
qadence/execution.py
CHANGED
@@ -12,7 +12,7 @@ from qadence.blocks import AbstractBlock
|
|
12
12
|
from qadence.circuit import QuantumCircuit
|
13
13
|
from qadence.measurements import Measurements
|
14
14
|
from qadence.mitigations import Mitigations
|
15
|
-
from qadence.noise import
|
15
|
+
from qadence.noise import NoiseHandler
|
16
16
|
from qadence.qubit_support import QubitSupport
|
17
17
|
from qadence.register import Register
|
18
18
|
from qadence.types import BackendName, DiffMode, Endianness
|
@@ -35,7 +35,7 @@ def _n_qubits_block(block: AbstractBlock) -> int:
|
|
35
35
|
def run(
|
36
36
|
x: Union[QuantumCircuit, AbstractBlock, Register, int],
|
37
37
|
*args: Any,
|
38
|
-
values: dict =
|
38
|
+
values: Union[dict, None] = None,
|
39
39
|
state: Tensor = None,
|
40
40
|
backend: BackendName = BackendName.PYQTORCH,
|
41
41
|
endianness: Endianness = Endianness.BIG,
|
@@ -65,7 +65,7 @@ def run(
|
|
65
65
|
@run.register
|
66
66
|
def _(
|
67
67
|
circuit: QuantumCircuit,
|
68
|
-
values: dict =
|
68
|
+
values: Union[dict, None] = None,
|
69
69
|
state: Tensor = None,
|
70
70
|
backend: BackendName = BackendName.PYQTORCH,
|
71
71
|
endianness: Endianness = Endianness.BIG,
|
@@ -79,7 +79,7 @@ def _(
|
|
79
79
|
with no_grad():
|
80
80
|
return bknd.run(
|
81
81
|
circuit=conv.circuit,
|
82
|
-
param_values=conv.embedding_fn(conv.params, values),
|
82
|
+
param_values=conv.embedding_fn(conv.params, values or dict()),
|
83
83
|
state=state,
|
84
84
|
endianness=endianness,
|
85
85
|
)
|
@@ -113,12 +113,12 @@ def _(circs: list, **kwargs: Any) -> Tensor: # type: ignore[misc]
|
|
113
113
|
def sample(
|
114
114
|
x: Union[QuantumCircuit, AbstractBlock, Register, int],
|
115
115
|
*args: Any,
|
116
|
-
values: dict =
|
116
|
+
values: Union[dict, None] = None,
|
117
117
|
state: Union[Tensor, None] = None,
|
118
118
|
n_shots: int = 100,
|
119
119
|
backend: BackendName = BackendName.PYQTORCH,
|
120
120
|
endianness: Endianness = Endianness.BIG,
|
121
|
-
noise: Union[
|
121
|
+
noise: Union[NoiseHandler, None] = None,
|
122
122
|
configuration: Union[BackendConfiguration, dict, None] = None,
|
123
123
|
) -> list[Counter]:
|
124
124
|
"""Convenience wrapper for the `QuantumModel.sample` method.
|
@@ -142,11 +142,11 @@ def sample(
|
|
142
142
|
@sample.register
|
143
143
|
def _(
|
144
144
|
circuit: QuantumCircuit,
|
145
|
-
values: dict =
|
145
|
+
values: Union[dict, None] = None,
|
146
146
|
state: Union[Tensor, None] = None,
|
147
147
|
n_shots: int = 100,
|
148
148
|
backend: BackendName = BackendName.PYQTORCH,
|
149
|
-
noise: Union[
|
149
|
+
noise: Union[NoiseHandler, None] = None,
|
150
150
|
endianness: Endianness = Endianness.BIG,
|
151
151
|
configuration: Union[BackendConfiguration, dict, None] = None,
|
152
152
|
) -> list[Counter]:
|
@@ -157,7 +157,7 @@ def _(
|
|
157
157
|
conv = bknd.convert(circuit)
|
158
158
|
return bknd.sample(
|
159
159
|
circuit=conv.circuit,
|
160
|
-
param_values=conv.embedding_fn(conv.params, values),
|
160
|
+
param_values=conv.embedding_fn(conv.params, values or dict()),
|
161
161
|
n_shots=n_shots,
|
162
162
|
state=state,
|
163
163
|
noise=noise,
|
@@ -185,11 +185,11 @@ def _(block: AbstractBlock, **kwargs: Any) -> Tensor:
|
|
185
185
|
def expectation(
|
186
186
|
x: Union[QuantumCircuit, AbstractBlock, Register, int],
|
187
187
|
observable: Union[list[AbstractBlock], AbstractBlock],
|
188
|
-
values: dict =
|
188
|
+
values: Union[dict, None] = None,
|
189
189
|
state: Tensor = None,
|
190
190
|
backend: BackendName = BackendName.PYQTORCH,
|
191
191
|
diff_mode: Union[DiffMode, str, None] = None,
|
192
|
-
noise: Union[
|
192
|
+
noise: Union[NoiseHandler, None] = None,
|
193
193
|
endianness: Endianness = Endianness.BIG,
|
194
194
|
configuration: Union[BackendConfiguration, dict, None] = None,
|
195
195
|
) -> Tensor:
|
@@ -237,19 +237,17 @@ def expectation(
|
|
237
237
|
def _(
|
238
238
|
circuit: QuantumCircuit,
|
239
239
|
observable: Union[list[AbstractBlock], AbstractBlock],
|
240
|
-
values: dict =
|
240
|
+
values: Union[dict, None] = None,
|
241
241
|
state: Tensor = None,
|
242
242
|
backend: BackendName = BackendName.PYQTORCH,
|
243
243
|
diff_mode: Union[DiffMode, str, None] = None,
|
244
244
|
measurement: Measurements = None,
|
245
|
-
noise: Union[
|
245
|
+
noise: Union[NoiseHandler, None] = None,
|
246
246
|
mitigation: Mitigations = None,
|
247
247
|
endianness: Endianness = Endianness.BIG,
|
248
248
|
configuration: Union[BackendConfiguration, dict, None] = None,
|
249
249
|
) -> Tensor:
|
250
250
|
observable = observable if isinstance(observable, list) else [observable]
|
251
|
-
if backend == BackendName.PYQTORCH:
|
252
|
-
diff_mode = DiffMode.AD
|
253
251
|
bknd = backend_factory(backend, diff_mode=diff_mode, configuration=configuration)
|
254
252
|
conv = bknd.convert(circuit, observable)
|
255
253
|
|
@@ -257,7 +255,7 @@ def _(
|
|
257
255
|
return bknd.expectation(
|
258
256
|
circuit=conv.circuit,
|
259
257
|
observable=conv.observable, # type: ignore[arg-type]
|
260
|
-
param_values=conv.embedding_fn(conv.params, values),
|
258
|
+
param_values=conv.embedding_fn(conv.params, values or dict()),
|
261
259
|
state=state,
|
262
260
|
measurement=measurement,
|
263
261
|
noise=noise,
|
qadence/extensions.py
CHANGED
@@ -138,7 +138,7 @@ def _set_backend_config(backend: Backend, diff_mode: DiffMode) -> None:
|
|
138
138
|
|
139
139
|
_validate_diff_mode(backend, diff_mode)
|
140
140
|
|
141
|
-
# (1) When using PSR with any backend or (2) we use the backends Pulser
|
141
|
+
# (1) When using PSR with any backend or (2) we use the backends Pulser,
|
142
142
|
# we have to use gate-level parameters
|
143
143
|
|
144
144
|
# We can use expression-level parameters for AD.
|
qadence/log_config.yaml
CHANGED
@@ -4,11 +4,17 @@ formatters:
|
|
4
4
|
base:
|
5
5
|
format: "%(levelname) -5s %(asctime)s - %(name)s: %(message)s"
|
6
6
|
datefmt: "%Y-%m-%d %H:%M:%S"
|
7
|
+
empty:
|
8
|
+
format: "%(message)s" # Rich formatter for cleaner output
|
9
|
+
datefmt: "%Y-%m-%d %H:%M:%S"
|
7
10
|
handlers:
|
8
11
|
console:
|
9
12
|
class: logging.StreamHandler
|
10
13
|
formatter: base
|
11
14
|
stream: ext://sys.stderr
|
15
|
+
richconsole:
|
16
|
+
class: rich.logging.RichHandler
|
17
|
+
formatter: empty
|
12
18
|
loggers:
|
13
19
|
qadence:
|
14
20
|
level: INFO
|
@@ -22,3 +28,7 @@ loggers:
|
|
22
28
|
level: INFO
|
23
29
|
handlers: [console]
|
24
30
|
propagate: yes
|
31
|
+
ml_tools:
|
32
|
+
level: INFO
|
33
|
+
handlers: [richconsole]
|
34
|
+
propagate: false
|