emu-sv 2.5.1__py3-none-any.whl → 2.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
emu_sv/__init__.py
CHANGED
|
@@ -13,6 +13,7 @@ from pulser.backend import (
|
|
|
13
13
|
)
|
|
14
14
|
|
|
15
15
|
from .dense_operator import DenseOperator
|
|
16
|
+
from .sparse_operator import SparseOperator
|
|
16
17
|
from .sv_backend import SVBackend, SVConfig
|
|
17
18
|
from .state_vector import StateVector, inner
|
|
18
19
|
from .density_matrix_state import DensityMatrix
|
|
@@ -36,6 +37,7 @@ __all__ = [
|
|
|
36
37
|
"StateVector",
|
|
37
38
|
"inner",
|
|
38
39
|
"DensityMatrix",
|
|
40
|
+
"SparseOperator",
|
|
39
41
|
]
|
|
40
42
|
|
|
41
|
-
__version__ = "2.
|
|
43
|
+
__version__ = "2.6.0"
|
|
@@ -0,0 +1,221 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from functools import reduce
|
|
4
|
+
|
|
5
|
+
import torch
|
|
6
|
+
|
|
7
|
+
from typing import Sequence, Type
|
|
8
|
+
|
|
9
|
+
from emu_base import DEVICE_COUNT
|
|
10
|
+
from emu_sv.state_vector import StateVector
|
|
11
|
+
|
|
12
|
+
from pulser.backend import (
|
|
13
|
+
Operator,
|
|
14
|
+
State,
|
|
15
|
+
)
|
|
16
|
+
from pulser.backend.operator import FullOp, QuditOp
|
|
17
|
+
from pulser.backend.state import Eigenstate
|
|
18
|
+
|
|
19
|
+
dtype = torch.complex128
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def sparse_add(self: torch.Tensor, other: torch.Tensor) -> torch.Tensor:
|
|
23
|
+
return torch.sparse_coo_tensor(
|
|
24
|
+
torch.cat((self.indices(), other.indices()), dim=1),
|
|
25
|
+
torch.cat((self.values(), other.values())),
|
|
26
|
+
size=self.shape,
|
|
27
|
+
).coalesce()
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def sparse_kron(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
|
|
31
|
+
a, b = a.coalesce(), b.coalesce()
|
|
32
|
+
sa, sb = a.shape, b.shape
|
|
33
|
+
shape = (sa[0] * sb[0], sa[1] * sb[1])
|
|
34
|
+
i = (
|
|
35
|
+
torch.tensor(sb).reshape(2, 1, 1) * a.indices().reshape(2, -1, 1)
|
|
36
|
+
+ b.indices().reshape(2, 1, -1)
|
|
37
|
+
).reshape(2, -1)
|
|
38
|
+
v = torch.outer(a.values(), b.values()).flatten()
|
|
39
|
+
return torch.sparse_coo_tensor(i, v, shape, is_coalesced=True)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class SparseOperator(Operator[complex, torch.Tensor, StateVector]):
|
|
43
|
+
"""This operator is used to represent a sparse matrix in CSR (Compressed Sparse Row)
|
|
44
|
+
format for efficient computation on the EMU-SV emulator
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
matrix (torch.Tensor): The CSR matrix representation of the operator.
|
|
48
|
+
gpu (bool): Use GPU for computation. True uses the CPU if GPU not available.
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
def __init__(
|
|
52
|
+
self,
|
|
53
|
+
matrix: torch.Tensor,
|
|
54
|
+
*,
|
|
55
|
+
gpu: bool = True,
|
|
56
|
+
):
|
|
57
|
+
device = "cuda" if gpu and DEVICE_COUNT > 0 else "cpu"
|
|
58
|
+
self.matrix = matrix.to(dtype=dtype, device=device)
|
|
59
|
+
|
|
60
|
+
def __repr__(self) -> str:
|
|
61
|
+
return repr(self.matrix)
|
|
62
|
+
|
|
63
|
+
def __add__(self, other: Operator) -> SparseOperator:
|
|
64
|
+
"""
|
|
65
|
+
Element-wise addition of two SparseOperators.
|
|
66
|
+
|
|
67
|
+
Args:
|
|
68
|
+
other: a SparseOperator instance.
|
|
69
|
+
|
|
70
|
+
Returns:
|
|
71
|
+
A new SparseOperator representing the sum.
|
|
72
|
+
"""
|
|
73
|
+
assert isinstance(
|
|
74
|
+
other, SparseOperator
|
|
75
|
+
), "SparseOperator can only be added to another SparseOperator."
|
|
76
|
+
# TODO: figure out a better algorithm.
|
|
77
|
+
# self.matrix + other.matrix doesn't work on mac.
|
|
78
|
+
return SparseOperator(
|
|
79
|
+
sparse_add(
|
|
80
|
+
self.matrix.to_sparse_coo(), other.matrix.to_sparse_coo()
|
|
81
|
+
).to_sparse_csr()
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
def __rmul__(self, scalar: complex) -> SparseOperator:
|
|
85
|
+
"""
|
|
86
|
+
Scalar multiplication of the SparseOperator.
|
|
87
|
+
|
|
88
|
+
Args:
|
|
89
|
+
scalar: a number to scale the operator.
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
A new SparseOperator scaled by the given scalar.
|
|
93
|
+
"""
|
|
94
|
+
|
|
95
|
+
return SparseOperator(scalar * self.matrix)
|
|
96
|
+
|
|
97
|
+
def __matmul__(self, other: Operator) -> SparseOperator:
|
|
98
|
+
"""
|
|
99
|
+
Compose two SparseOperators via matrix multiplication.
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
other: a SparseOperator instance.
|
|
103
|
+
|
|
104
|
+
Returns:
|
|
105
|
+
A new SparseOperator representing the product `self @ other`.
|
|
106
|
+
"""
|
|
107
|
+
raise NotImplementedError()
|
|
108
|
+
|
|
109
|
+
def apply_to(self, other: State) -> StateVector:
|
|
110
|
+
"""
|
|
111
|
+
Apply the SparseOperator to a given StateVector.
|
|
112
|
+
|
|
113
|
+
Args:
|
|
114
|
+
other: a StateVector instance.
|
|
115
|
+
|
|
116
|
+
Returns:
|
|
117
|
+
A new StateVector after applying the operator.
|
|
118
|
+
"""
|
|
119
|
+
assert isinstance(
|
|
120
|
+
other, StateVector
|
|
121
|
+
), "SparseOperator can only be applied to a StateVector."
|
|
122
|
+
|
|
123
|
+
return StateVector(self.matrix @ other.vector)
|
|
124
|
+
|
|
125
|
+
def expect(self, state: State) -> torch.Tensor:
|
|
126
|
+
"""
|
|
127
|
+
Compute the expectation value of the operator with respect to a state.
|
|
128
|
+
|
|
129
|
+
Args:
|
|
130
|
+
state: a StateVector instance.
|
|
131
|
+
|
|
132
|
+
Returns:
|
|
133
|
+
The expectation value as a float or complex number.
|
|
134
|
+
"""
|
|
135
|
+
assert isinstance(
|
|
136
|
+
state, StateVector
|
|
137
|
+
), "Only expectation values of StateVectors are supported."
|
|
138
|
+
|
|
139
|
+
return torch.vdot(state.vector, self.apply_to(state).vector).cpu()
|
|
140
|
+
|
|
141
|
+
@classmethod
|
|
142
|
+
def _from_operator_repr(
|
|
143
|
+
cls: Type[SparseOperator],
|
|
144
|
+
*,
|
|
145
|
+
eigenstates: Sequence[Eigenstate],
|
|
146
|
+
n_qudits: int,
|
|
147
|
+
operations: FullOp[complex],
|
|
148
|
+
) -> tuple[SparseOperator, FullOp[complex]]:
|
|
149
|
+
"""
|
|
150
|
+
Construct a SparseOperator from an operator representation.
|
|
151
|
+
|
|
152
|
+
Args:
|
|
153
|
+
eigenstates: the eigenstates of the basis to use, e.g. ("r", "g") or ("0", "1").
|
|
154
|
+
n_qudits: number of qudits in the system.
|
|
155
|
+
operations: which bitstrings make up the state with what weight.
|
|
156
|
+
|
|
157
|
+
Returns:
|
|
158
|
+
A SparseOperator instance corresponding to the given representation.
|
|
159
|
+
"""
|
|
160
|
+
|
|
161
|
+
assert len(set(eigenstates)) == 2, "Only qubits are supported in EMU-SV."
|
|
162
|
+
|
|
163
|
+
operators_with_tensors: dict[str, torch.Tensor | QuditOp] = dict()
|
|
164
|
+
|
|
165
|
+
if set(eigenstates) == {"r", "g"}:
|
|
166
|
+
# operators_with_tensors will now contain the basis for single qubit ops,
|
|
167
|
+
# and potentially user defined strings in terms of {r, g} or {0, 1}
|
|
168
|
+
operators_with_tensors |= {
|
|
169
|
+
"gg": torch.tensor([[1.0, 0.0], [0.0, 0.0]], dtype=dtype).to_sparse_coo(),
|
|
170
|
+
"rg": torch.tensor([[0.0, 0.0], [1.0, 0.0]], dtype=dtype).to_sparse_coo(),
|
|
171
|
+
"gr": torch.tensor([[0.0, 1.0], [0.0, 0.0]], dtype=dtype).to_sparse_coo(),
|
|
172
|
+
"rr": torch.tensor([[0.0, 0.0], [0.0, 1.0]], dtype=dtype).to_sparse_coo(),
|
|
173
|
+
}
|
|
174
|
+
elif set(eigenstates) == {"0", "1"}:
|
|
175
|
+
raise NotImplementedError(
|
|
176
|
+
"{'0','1'} basis is related to XY Hamiltonian, which is not implemented"
|
|
177
|
+
)
|
|
178
|
+
else:
|
|
179
|
+
raise ValueError("An unsupported basis of eigenstates has been provided.")
|
|
180
|
+
|
|
181
|
+
accum_res = torch.sparse_coo_tensor(
|
|
182
|
+
torch.zeros(2, 0, dtype=torch.int32),
|
|
183
|
+
torch.zeros(0, dtype=dtype),
|
|
184
|
+
(2**n_qudits, 2**n_qudits),
|
|
185
|
+
)
|
|
186
|
+
for coeff, oper_torch_with_target_qubits in operations:
|
|
187
|
+
|
|
188
|
+
def build_torch_operator_from_string(
|
|
189
|
+
oper: QuditOp | torch.Tensor,
|
|
190
|
+
) -> torch.Tensor:
|
|
191
|
+
if isinstance(oper, torch.Tensor):
|
|
192
|
+
return oper
|
|
193
|
+
|
|
194
|
+
result: torch.Tensor = torch.zeros((2, 2), dtype=dtype).to_sparse_coo()
|
|
195
|
+
for opstr, coeff in oper.items():
|
|
196
|
+
tensor = build_torch_operator_from_string(
|
|
197
|
+
operators_with_tensors[opstr]
|
|
198
|
+
)
|
|
199
|
+
operators_with_tensors[opstr] = tensor
|
|
200
|
+
result += tensor * coeff
|
|
201
|
+
return result
|
|
202
|
+
|
|
203
|
+
single_qubit_gates = [torch.eye(2, dtype=dtype).to_sparse_coo()] * n_qudits
|
|
204
|
+
|
|
205
|
+
for operator_torch, target_qubits in oper_torch_with_target_qubits:
|
|
206
|
+
factor = build_torch_operator_from_string(operator_torch)
|
|
207
|
+
for target_qubit in target_qubits:
|
|
208
|
+
single_qubit_gates[target_qubit] = factor
|
|
209
|
+
|
|
210
|
+
accum_res = sparse_add(
|
|
211
|
+
accum_res, coeff * reduce(sparse_kron, single_qubit_gates)
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
return SparseOperator(accum_res.to_sparse_csr()), operations
|
|
215
|
+
|
|
216
|
+
def __deepcopy__(self, memo: dict) -> SparseOperator:
|
|
217
|
+
"""torch CSR tensor does not deepcopy automatically"""
|
|
218
|
+
cls = self.__class__
|
|
219
|
+
result = cls(torch.clone(self.matrix), gpu=self.matrix.is_cuda)
|
|
220
|
+
memo[id(self)] = result
|
|
221
|
+
return result
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: emu-sv
|
|
3
|
-
Version: 2.
|
|
3
|
+
Version: 2.6.0
|
|
4
4
|
Summary: Pasqal State Vector based pulse emulator built on PyTorch
|
|
5
5
|
Project-URL: Documentation, https://pasqal-io.github.io/emulators/
|
|
6
6
|
Project-URL: Repository, https://github.com/pasqal-io/emulators
|
|
@@ -25,7 +25,7 @@ Classifier: Programming Language :: Python :: 3.10
|
|
|
25
25
|
Classifier: Programming Language :: Python :: Implementation :: CPython
|
|
26
26
|
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
|
27
27
|
Requires-Python: >=3.10
|
|
28
|
-
Requires-Dist: emu-base==2.
|
|
28
|
+
Requires-Dist: emu-base==2.6.0
|
|
29
29
|
Description-Content-Type: text/markdown
|
|
30
30
|
|
|
31
31
|
<div align="center">
|
|
@@ -1,15 +1,16 @@
|
|
|
1
|
-
emu_sv/__init__.py,sha256=
|
|
1
|
+
emu_sv/__init__.py,sha256=0ykYwInHIgbHkUig_cY9uFesmU4F8bc8D_DIV511yEo,837
|
|
2
2
|
emu_sv/custom_callback_implementations.py,sha256=_7XLIDzJ-p3DVqz-Jyv0eYbl8nih2x2p-pM4cBCLumA,6367
|
|
3
3
|
emu_sv/dense_operator.py,sha256=GvF0swsiFRqp83bpyaU_CXap2vm74-JLI5lHo-0Hbdk,5901
|
|
4
4
|
emu_sv/density_matrix_state.py,sha256=6QmLZvqEHLR64r0nD7D2jZIiAYOgciNVCjh3ywfvIs0,7243
|
|
5
5
|
emu_sv/hamiltonian.py,sha256=CqNGuWJlO2ZljK47wt130s-5uKiOldQUsC3tjwk1mKA,6106
|
|
6
6
|
emu_sv/lindblad_operator.py,sha256=pgjRNLBcvEM2-qxM8uy9wL74OtrD4A8trQeERi_AXH8,8892
|
|
7
|
+
emu_sv/sparse_operator.py,sha256=xHJapSAKaMCgT5nG0gzMXGk2fCfjHY03OTO_rysszns,7535
|
|
7
8
|
emu_sv/state_vector.py,sha256=v4rqC_qBGc5vO5EMHcHR6BdASjeKujO6_sCdd3pGd0c,9990
|
|
8
9
|
emu_sv/sv_backend.py,sha256=-soOkSEzEBK1dCKnYnbtvYjmNZtZra1_4jP3H1ROOtM,737
|
|
9
10
|
emu_sv/sv_backend_impl.py,sha256=-xWE30B5RI32nOG2pUR8lL3q-wufwvzxegiJexW5g4w,8952
|
|
10
11
|
emu_sv/sv_config.py,sha256=o1esIqflxfN1ZtdwoVBAIWlzZIf9B5X9pvsQe1zHfdg,5433
|
|
11
12
|
emu_sv/time_evolution.py,sha256=Uy3qMdt3BlLB6Aq1-o5uajRTu_3fPuBCtcusHxFPPJc,13545
|
|
12
13
|
emu_sv/utils.py,sha256=t0nMDVo6DF5bQW-vbsyRMCmvkyNxCU-v0Enmns9aOAU,1151
|
|
13
|
-
emu_sv-2.
|
|
14
|
-
emu_sv-2.
|
|
15
|
-
emu_sv-2.
|
|
14
|
+
emu_sv-2.6.0.dist-info/METADATA,sha256=YNa_iPn5JUl5N9FMdaHOwF1Pm000JwsYkLUUcKB-8FQ,3595
|
|
15
|
+
emu_sv-2.6.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
16
|
+
emu_sv-2.6.0.dist-info/RECORD,,
|
|
File without changes
|