emu-mps 1.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- emu_mps/__init__.py +38 -0
- emu_mps/algebra.py +151 -0
- emu_mps/hamiltonian.py +449 -0
- emu_mps/mpo.py +243 -0
- emu_mps/mps.py +528 -0
- emu_mps/mps_backend.py +35 -0
- emu_mps/mps_backend_impl.py +525 -0
- emu_mps/mps_config.py +64 -0
- emu_mps/noise.py +29 -0
- emu_mps/tdvp.py +209 -0
- emu_mps/utils.py +258 -0
- emu_mps-1.2.1.dist-info/METADATA +133 -0
- emu_mps-1.2.1.dist-info/RECORD +14 -0
- emu_mps-1.2.1.dist-info/WHEEL +4 -0
emu_mps/tdvp.py
ADDED
|
@@ -0,0 +1,209 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
|
|
3
|
+
from emu_base import krylov_exp
|
|
4
|
+
from emu_mps import MPS, MPO
|
|
5
|
+
from emu_mps.utils import split_tensor
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def new_right_bath(
|
|
9
|
+
bath: torch.Tensor, state: torch.Tensor, op: torch.Tensor
|
|
10
|
+
) -> torch.Tensor:
|
|
11
|
+
bath = torch.tensordot(state, bath, ([2], [2]))
|
|
12
|
+
bath = torch.tensordot(op.to(bath.device), bath, ([2, 3], [1, 3]))
|
|
13
|
+
return torch.tensordot(state.conj(), bath, ([1, 2], [1, 3]))
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
"""
|
|
17
|
+
function to compute the right baths. The three indices in the bath are as follows:
|
|
18
|
+
(bond of state conj, bond of operator, bond of state)
|
|
19
|
+
The baths have shape
|
|
20
|
+
-xx
|
|
21
|
+
-xx
|
|
22
|
+
-xx
|
|
23
|
+
with the index ordering (top, middle, bottom)
|
|
24
|
+
bath tensors are put on the device of the factor to the left
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def right_baths(state: MPS, op: MPO, final_qubit: int) -> list[torch.Tensor]:
|
|
29
|
+
state_factor = state.factors[-1]
|
|
30
|
+
bath = torch.ones(1, 1, 1, device=state_factor.device, dtype=state_factor.dtype)
|
|
31
|
+
baths = [bath]
|
|
32
|
+
for i in range(len(state.factors) - 1, final_qubit - 1, -1):
|
|
33
|
+
bath = new_right_bath(bath, state.factors[i], op.factors[i])
|
|
34
|
+
bath = bath.to(state.factors[i - 1].device)
|
|
35
|
+
baths.append(bath)
|
|
36
|
+
return baths
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
"""
|
|
40
|
+
Computes H(psi) where
|
|
41
|
+
x- -x
|
|
42
|
+
x || x ||
|
|
43
|
+
H = x- xx -x and psi = -xx-
|
|
44
|
+
x || x
|
|
45
|
+
x- -x
|
|
46
|
+
|
|
47
|
+
Expects the two qubit factors of the MPS precontracted,
|
|
48
|
+
with one 'fat' physical index of dim 4 and index ordering
|
|
49
|
+
(left bond, physical index, right bond):
|
|
50
|
+
||
|
|
51
|
+
-xxxxxx-
|
|
52
|
+
The Hamiltonian should have an index ordering of
|
|
53
|
+
(left bond, out, in, right bond).
|
|
54
|
+
The baths must have shape (top, middle, bottom).
|
|
55
|
+
All tensors must be on the same device
|
|
56
|
+
"""
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def apply_effective_Hamiltonian(
|
|
60
|
+
state: torch.Tensor,
|
|
61
|
+
ham: torch.Tensor,
|
|
62
|
+
left_bath: torch.Tensor,
|
|
63
|
+
right_bath: torch.Tensor,
|
|
64
|
+
) -> torch.Tensor:
|
|
65
|
+
assert left_bath.ndim == 3 and left_bath.shape[0] == left_bath.shape[2]
|
|
66
|
+
assert right_bath.ndim == 3 and right_bath.shape[0] == right_bath.shape[2]
|
|
67
|
+
assert left_bath.shape[2] == state.shape[0] and right_bath.shape[2] == state.shape[2]
|
|
68
|
+
assert left_bath.shape[1] == ham.shape[0] and right_bath.shape[1] == ham.shape[3]
|
|
69
|
+
|
|
70
|
+
# the optimal contraction order depends on the details
|
|
71
|
+
# this order seems to be pretty balanced, but needs to be
|
|
72
|
+
# revisited when use-cases are more well-known
|
|
73
|
+
state = torch.tensordot(left_bath, state, 1)
|
|
74
|
+
state = torch.tensordot(state, ham, ([1, 2], [0, 2]))
|
|
75
|
+
state = torch.tensordot(state, right_bath, ([3, 1], [1, 2]))
|
|
76
|
+
return state
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
_TIME_CONVERSION_COEFF = 0.001 # Omega and delta are given in rad/ms, dt in ns
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
class EvolveConfig:
|
|
83
|
+
def __init__(
|
|
84
|
+
self,
|
|
85
|
+
*,
|
|
86
|
+
exp_tolerance: float,
|
|
87
|
+
norm_tolerance: float,
|
|
88
|
+
max_krylov_dim: int,
|
|
89
|
+
is_hermitian: bool,
|
|
90
|
+
max_error: float,
|
|
91
|
+
max_rank: int
|
|
92
|
+
) -> None:
|
|
93
|
+
self.exp_tolerance = exp_tolerance
|
|
94
|
+
self.norm_tolerance = norm_tolerance
|
|
95
|
+
self.max_krylov_dim = max_krylov_dim
|
|
96
|
+
self.is_hermitian = is_hermitian
|
|
97
|
+
self.max_error = (
|
|
98
|
+
max_error # FIXME: max_error and max_rank are irrelevant for evolve_single
|
|
99
|
+
)
|
|
100
|
+
self.max_rank = max_rank
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def evolve_pair(
|
|
104
|
+
*,
|
|
105
|
+
state_factors: list[torch.Tensor],
|
|
106
|
+
baths: tuple[torch.Tensor, torch.Tensor],
|
|
107
|
+
ham_factors: list[torch.Tensor],
|
|
108
|
+
dt: float,
|
|
109
|
+
orth_center_right: bool,
|
|
110
|
+
config: EvolveConfig
|
|
111
|
+
) -> tuple[torch.Tensor, torch.Tensor]:
|
|
112
|
+
"""
|
|
113
|
+
Time evolution of a pair of tensors of a tensor train using baths and truncated SVD.
|
|
114
|
+
Returned state tensors are kept on their respective devices.
|
|
115
|
+
"""
|
|
116
|
+
assert len(state_factors) == 2
|
|
117
|
+
assert len(baths) == 2
|
|
118
|
+
assert len(ham_factors) == 2
|
|
119
|
+
|
|
120
|
+
left_state_factor, right_state_factor = state_factors
|
|
121
|
+
left_bath, right_bath = baths
|
|
122
|
+
left_ham_factor, right_ham_factor = ham_factors
|
|
123
|
+
|
|
124
|
+
left_device = left_state_factor.device
|
|
125
|
+
right_device = right_state_factor.device
|
|
126
|
+
|
|
127
|
+
left_bond_dim = left_state_factor.shape[0]
|
|
128
|
+
right_bond_dim = right_state_factor.shape[-1]
|
|
129
|
+
|
|
130
|
+
# Computation is done on left_device (arbitrary)
|
|
131
|
+
|
|
132
|
+
combined_state_factors = torch.tensordot(
|
|
133
|
+
left_state_factor, right_state_factor.to(left_device), dims=1
|
|
134
|
+
).reshape(left_bond_dim, 4, right_bond_dim)
|
|
135
|
+
|
|
136
|
+
left_ham_factor = left_ham_factor.to(left_device)
|
|
137
|
+
right_ham_factor = right_ham_factor.to(left_device)
|
|
138
|
+
|
|
139
|
+
combined_hamiltonian_factors = (
|
|
140
|
+
torch.tensordot(left_ham_factor, right_ham_factor, dims=1)
|
|
141
|
+
.transpose(2, 3)
|
|
142
|
+
.reshape(left_ham_factor.shape[0], 4, 4, -1)
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
op = (
|
|
146
|
+
lambda x: -_TIME_CONVERSION_COEFF
|
|
147
|
+
* 1j
|
|
148
|
+
* dt
|
|
149
|
+
* apply_effective_Hamiltonian(
|
|
150
|
+
x, combined_hamiltonian_factors, left_bath, right_bath
|
|
151
|
+
)
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
evol = krylov_exp(
|
|
155
|
+
op,
|
|
156
|
+
combined_state_factors,
|
|
157
|
+
exp_tolerance=config.exp_tolerance,
|
|
158
|
+
norm_tolerance=config.norm_tolerance,
|
|
159
|
+
max_krylov_dim=config.max_krylov_dim,
|
|
160
|
+
is_hermitian=config.is_hermitian,
|
|
161
|
+
).reshape(left_bond_dim * 2, 2 * right_bond_dim)
|
|
162
|
+
|
|
163
|
+
l, r = split_tensor(
|
|
164
|
+
evol,
|
|
165
|
+
max_error=config.max_error,
|
|
166
|
+
max_rank=config.max_rank,
|
|
167
|
+
orth_center_right=orth_center_right,
|
|
168
|
+
)
|
|
169
|
+
|
|
170
|
+
return l.reshape(left_bond_dim, 2, -1), r.reshape(-1, 2, right_bond_dim).to(
|
|
171
|
+
right_device
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
def evolve_single(
|
|
176
|
+
*,
|
|
177
|
+
state_factor: torch.Tensor,
|
|
178
|
+
baths: tuple[torch.Tensor, torch.Tensor],
|
|
179
|
+
ham_factor: torch.Tensor,
|
|
180
|
+
dt: float,
|
|
181
|
+
config: EvolveConfig
|
|
182
|
+
) -> torch.Tensor:
|
|
183
|
+
"""
|
|
184
|
+
Time evolution of a single tensor of a tensor train using baths.
|
|
185
|
+
"""
|
|
186
|
+
assert len(baths) == 2
|
|
187
|
+
|
|
188
|
+
left_bath, right_bath = baths
|
|
189
|
+
|
|
190
|
+
op = (
|
|
191
|
+
lambda x: -_TIME_CONVERSION_COEFF
|
|
192
|
+
* 1j
|
|
193
|
+
* dt
|
|
194
|
+
* apply_effective_Hamiltonian(
|
|
195
|
+
x,
|
|
196
|
+
ham_factor,
|
|
197
|
+
left_bath,
|
|
198
|
+
right_bath,
|
|
199
|
+
)
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
return krylov_exp(
|
|
203
|
+
op,
|
|
204
|
+
state_factor,
|
|
205
|
+
exp_tolerance=config.exp_tolerance,
|
|
206
|
+
norm_tolerance=config.norm_tolerance,
|
|
207
|
+
max_krylov_dim=config.max_krylov_dim,
|
|
208
|
+
is_hermitian=config.is_hermitian,
|
|
209
|
+
)
|
emu_mps/utils.py
ADDED
|
@@ -0,0 +1,258 @@
|
|
|
1
|
+
from typing import List, Optional
|
|
2
|
+
import torch
|
|
3
|
+
import random
|
|
4
|
+
from collections import Counter
|
|
5
|
+
|
|
6
|
+
DEVICE_COUNT = torch.cuda.device_count()
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def new_left_bath(
|
|
10
|
+
bath: torch.Tensor, state: torch.Tensor, op: torch.Tensor
|
|
11
|
+
) -> torch.Tensor:
|
|
12
|
+
# this order is more efficient than contracting the op first in general
|
|
13
|
+
bath = torch.tensordot(bath, state.conj(), ([0], [0]))
|
|
14
|
+
bath = torch.tensordot(bath, op.to(bath.device), ([0, 2], [0, 1]))
|
|
15
|
+
return torch.tensordot(bath, state, ([0, 2], [0, 1]))
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def _determine_cutoff_index(d: torch.Tensor, max_error: float) -> int:
|
|
19
|
+
assert max_error > 0
|
|
20
|
+
squared_max_error = max_error * max_error
|
|
21
|
+
acc = 0
|
|
22
|
+
for i in range(d.shape[0]):
|
|
23
|
+
acc += d[i]
|
|
24
|
+
if acc > squared_max_error:
|
|
25
|
+
return i
|
|
26
|
+
return 0 # type: ignore[no-any-return]
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def split_tensor(
|
|
30
|
+
m: torch.Tensor,
|
|
31
|
+
max_error: float = 1e-5,
|
|
32
|
+
max_rank: int = 1024,
|
|
33
|
+
orth_center_right: bool = True,
|
|
34
|
+
) -> tuple[torch.Tensor, torch.Tensor]:
|
|
35
|
+
"""
|
|
36
|
+
Computes a low-rank approximation split of m using the Eckart-Young-Mirsky theorem.
|
|
37
|
+
"""
|
|
38
|
+
assert m.ndim == 2
|
|
39
|
+
|
|
40
|
+
if orth_center_right:
|
|
41
|
+
d, q = torch.linalg.eigh(m @ m.T.conj())
|
|
42
|
+
max_bond = max(
|
|
43
|
+
_determine_cutoff_index(d, max_error),
|
|
44
|
+
d.shape[0] - max_rank,
|
|
45
|
+
)
|
|
46
|
+
left = q[:, max_bond:]
|
|
47
|
+
right = q.T.conj() @ m
|
|
48
|
+
right = right[max_bond:, :]
|
|
49
|
+
else:
|
|
50
|
+
d, q = torch.linalg.eigh(m.T.conj() @ m)
|
|
51
|
+
max_bond = max(
|
|
52
|
+
_determine_cutoff_index(d, max_error),
|
|
53
|
+
d.shape[0] - max_rank,
|
|
54
|
+
)
|
|
55
|
+
right = q.T.conj()[max_bond:, :]
|
|
56
|
+
left = m @ q
|
|
57
|
+
left = left[:, max_bond:]
|
|
58
|
+
|
|
59
|
+
return left, right
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def truncate_impl(
|
|
63
|
+
factors: list[torch.tensor],
|
|
64
|
+
max_error: float = 1e-5,
|
|
65
|
+
max_rank: int = 1024,
|
|
66
|
+
) -> None:
|
|
67
|
+
"""
|
|
68
|
+
Eigenvalues-based truncation of a matrix product.
|
|
69
|
+
An in-place operation.
|
|
70
|
+
|
|
71
|
+
Note:
|
|
72
|
+
Sweeps from right to left.
|
|
73
|
+
Requires the matrix product to be orthogonalized on the last element.
|
|
74
|
+
At each step moves the orthogonal center to the left while truncating.
|
|
75
|
+
"""
|
|
76
|
+
for i in range(len(factors) - 1, 0, -1):
|
|
77
|
+
factor_shape = factors[i].shape
|
|
78
|
+
|
|
79
|
+
l, r = split_tensor(
|
|
80
|
+
factors[i].reshape(factor_shape[0], -1),
|
|
81
|
+
max_error=max_error,
|
|
82
|
+
max_rank=max_rank,
|
|
83
|
+
orth_center_right=False,
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
factors[i] = r.reshape(-1, *factor_shape[1:])
|
|
87
|
+
factors[i - 1] = torch.tensordot(
|
|
88
|
+
factors[i - 1], l.to(factors[i - 1].device), dims=1
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def assign_devices(tensors: List[torch.Tensor], num_gpus_to_use: int) -> None:
|
|
93
|
+
"""
|
|
94
|
+
Evenly distributes each tensor in the list to a device.
|
|
95
|
+
If num_gpus_to_use is 0, then all tensors go to CPU.
|
|
96
|
+
"""
|
|
97
|
+
num_gpus_to_use = min(len(tensors), num_gpus_to_use)
|
|
98
|
+
|
|
99
|
+
if num_gpus_to_use <= 0:
|
|
100
|
+
for i in range(len(tensors)):
|
|
101
|
+
tensors[i] = tensors[i].to("cpu")
|
|
102
|
+
return
|
|
103
|
+
|
|
104
|
+
tensors_per_device = len(tensors) // num_gpus_to_use
|
|
105
|
+
|
|
106
|
+
if len(tensors) % num_gpus_to_use != 0:
|
|
107
|
+
tensors_per_device += 1
|
|
108
|
+
|
|
109
|
+
for i in range(len(tensors)):
|
|
110
|
+
tensors[i] = tensors[i].to(f"cuda:{i // tensors_per_device}")
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
def extended_mps_factors(
|
|
114
|
+
mps_factors: list[torch.Tensor], where: list[bool]
|
|
115
|
+
) -> list[torch.Tensor]:
|
|
116
|
+
"""
|
|
117
|
+
Given a valid list of MPS factors, accounting for qubits marked as `True` in `where`,
|
|
118
|
+
fills the `False` positions with new qubits in the |0> state.
|
|
119
|
+
"""
|
|
120
|
+
assert len(mps_factors) == sum(1 for b in where if b)
|
|
121
|
+
|
|
122
|
+
bond_dimension = 1
|
|
123
|
+
factor_index = 0
|
|
124
|
+
result = []
|
|
125
|
+
for is_factor in where:
|
|
126
|
+
assert 0 <= factor_index <= len(mps_factors)
|
|
127
|
+
|
|
128
|
+
if is_factor:
|
|
129
|
+
result.append(mps_factors[factor_index])
|
|
130
|
+
bond_dimension = mps_factors[factor_index].shape[2]
|
|
131
|
+
factor_index += 1
|
|
132
|
+
elif factor_index == len(mps_factors):
|
|
133
|
+
factor = torch.zeros(bond_dimension, 2, 1, dtype=torch.complex128)
|
|
134
|
+
factor[:, 0, :] = torch.eye(bond_dimension, 1)
|
|
135
|
+
bond_dimension = 1
|
|
136
|
+
result.append(factor)
|
|
137
|
+
else:
|
|
138
|
+
factor = torch.zeros(
|
|
139
|
+
bond_dimension, 2, bond_dimension, dtype=torch.complex128
|
|
140
|
+
)
|
|
141
|
+
factor[:, 0, :] = torch.eye(bond_dimension, bond_dimension)
|
|
142
|
+
result.append(factor)
|
|
143
|
+
return result
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def extended_mpo_factors(
|
|
147
|
+
mpo_factors: list[torch.Tensor], where: list[bool]
|
|
148
|
+
) -> list[torch.Tensor]:
|
|
149
|
+
"""
|
|
150
|
+
Given a valid list of MPO factors, accounting for qubits marked as `True` in `where`,
|
|
151
|
+
fills the `False` positions with new MPO identity factors.
|
|
152
|
+
"""
|
|
153
|
+
assert len(mpo_factors) == sum(1 for b in where if b)
|
|
154
|
+
|
|
155
|
+
bond_dimension = 1
|
|
156
|
+
factor_index = 0
|
|
157
|
+
result = []
|
|
158
|
+
for is_factor in where:
|
|
159
|
+
assert 0 <= factor_index <= len(mpo_factors)
|
|
160
|
+
|
|
161
|
+
if is_factor:
|
|
162
|
+
result.append(mpo_factors[factor_index])
|
|
163
|
+
bond_dimension = mpo_factors[factor_index].shape[3]
|
|
164
|
+
factor_index += 1
|
|
165
|
+
elif factor_index == len(mpo_factors):
|
|
166
|
+
factor = torch.zeros(bond_dimension, 2, 2, 1, dtype=torch.complex128)
|
|
167
|
+
factor[:, 0, 0, :] = torch.eye(bond_dimension, 1)
|
|
168
|
+
factor[:, 1, 1, :] = torch.eye(bond_dimension, 1)
|
|
169
|
+
bond_dimension = 1
|
|
170
|
+
result.append(factor)
|
|
171
|
+
else:
|
|
172
|
+
factor = torch.zeros(
|
|
173
|
+
bond_dimension, 2, 2, bond_dimension, dtype=torch.complex128
|
|
174
|
+
)
|
|
175
|
+
factor[:, 0, 0, :] = torch.eye(bond_dimension, bond_dimension)
|
|
176
|
+
factor[:, 1, 1, :] = torch.eye(bond_dimension, bond_dimension)
|
|
177
|
+
result.append(factor)
|
|
178
|
+
return result
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
def get_extended_site_index(
|
|
182
|
+
where: list[bool], desired_index: Optional[int]
|
|
183
|
+
) -> Optional[int]:
|
|
184
|
+
"""
|
|
185
|
+
Returns the index in `where` that has `desired_index` preceding True elements.
|
|
186
|
+
|
|
187
|
+
This function is used to find the index of the orthogonality center in an MPS obtained
|
|
188
|
+
with `extended_mps_factors` in the presence of dark qubits:
|
|
189
|
+
`where` is the mask specifying whether qubits are well-prepared.
|
|
190
|
+
`desired_index` is the index of the orthogonality center of the MPS without dark qubits.
|
|
191
|
+
The return value is then the index of the orthogonality center
|
|
192
|
+
in the full MPS with added dark qubits.
|
|
193
|
+
"""
|
|
194
|
+
|
|
195
|
+
if desired_index is None:
|
|
196
|
+
return None
|
|
197
|
+
|
|
198
|
+
index = -1
|
|
199
|
+
for extended_index, boolean_value in enumerate(where):
|
|
200
|
+
if boolean_value:
|
|
201
|
+
index += 1
|
|
202
|
+
if index == desired_index:
|
|
203
|
+
return extended_index
|
|
204
|
+
|
|
205
|
+
raise ValueError(f"Index {desired_index} does not exist")
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
def readout_with_error(c: str, *, p_false_pos: float, p_false_neg: float) -> str:
|
|
209
|
+
# p_false_pos = false positive, p_false_neg = false negative
|
|
210
|
+
r = random.random()
|
|
211
|
+
if c == "0" and r < p_false_pos:
|
|
212
|
+
return "1"
|
|
213
|
+
|
|
214
|
+
if c == "1" and r < p_false_neg:
|
|
215
|
+
return "0"
|
|
216
|
+
|
|
217
|
+
return c
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
def apply_measurement_errors(
|
|
221
|
+
bitstrings: Counter[str], *, p_false_pos: float, p_false_neg: float
|
|
222
|
+
) -> Counter[str]:
|
|
223
|
+
"""
|
|
224
|
+
Given a bag of sampled bitstrings, returns another bag of bitstrings
|
|
225
|
+
sampled with readout/measurement errors.
|
|
226
|
+
|
|
227
|
+
p_false_pos: probability of false positive
|
|
228
|
+
p_false_neg: probability of false negative
|
|
229
|
+
"""
|
|
230
|
+
|
|
231
|
+
result: Counter[str] = Counter()
|
|
232
|
+
for bitstring, count in bitstrings.items():
|
|
233
|
+
for _ in range(count):
|
|
234
|
+
bitstring_with_error = "".join(
|
|
235
|
+
readout_with_error(c, p_false_pos=p_false_pos, p_false_neg=p_false_neg)
|
|
236
|
+
for c in bitstring
|
|
237
|
+
)
|
|
238
|
+
|
|
239
|
+
result[bitstring_with_error] += 1
|
|
240
|
+
|
|
241
|
+
return result
|
|
242
|
+
|
|
243
|
+
|
|
244
|
+
n_operator = torch.tensor(
|
|
245
|
+
[
|
|
246
|
+
[0, 0],
|
|
247
|
+
[0, 1],
|
|
248
|
+
],
|
|
249
|
+
dtype=torch.complex128,
|
|
250
|
+
)
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
def tensor_trace(tensor: torch.Tensor, dim1: int, dim2: int) -> torch.Tensor:
|
|
254
|
+
"""
|
|
255
|
+
Contract two legs of a single tensor.
|
|
256
|
+
"""
|
|
257
|
+
assert tensor.shape[dim1] == tensor.shape[dim2], "dimensions should match"
|
|
258
|
+
return tensor.diagonal(offset=0, dim1=dim1, dim2=dim2).sum(-1)
|
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: emu-mps
|
|
3
|
+
Version: 1.2.1
|
|
4
|
+
Summary: Pasqal MPS based pulse emulator built on PyTorch
|
|
5
|
+
Author-email: Anton Quelle <anton.quelle@pasqal.com>, Mauro Mendizabal <mauro.mendizabal-pico@pasqal.com>, Stefano Grava <stefano.grava@pasqal.com>, Pablo Le Henaff <pablo.le-henaff@pasqal.com>
|
|
6
|
+
License: Proprietary
|
|
7
|
+
Classifier: License :: Other/Proprietary License
|
|
8
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
9
|
+
Classifier: Programming Language :: Python :: Implementation :: CPython
|
|
10
|
+
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
|
11
|
+
Requires-Python: >=3.10
|
|
12
|
+
Requires-Dist: emu-base==1.2.0
|
|
13
|
+
Description-Content-Type: text/markdown
|
|
14
|
+
|
|
15
|
+
<div align="center">
|
|
16
|
+
<img src="docs/logos/LogoTaglineSoftGreen.svg">
|
|
17
|
+
|
|
18
|
+
# Emu-MPS
|
|
19
|
+
</div>
|
|
20
|
+
|
|
21
|
+
**EMU-MPS** is a Pulser backend, designed to **EMU**late the dynamics of programmable arrays of neutral atoms, with matrix product states (**MPS**). It allows users to increase the number of qubits and reduce computation time.
|
|
22
|
+
|
|
23
|
+
Join us on [Slack](https://pasqalworkspace.slack.com/archives/C07MUV5K7EU) or by [e-mail](mailto:emulation@pasqal.com) to give us feedback about how you plan to use Emu-MPS or if you require specific feature-upgrades.
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
## Getting started
|
|
27
|
+
|
|
28
|
+
You can install from source, or download the package from the private pypi registry that pasqal maintains in gitlab.
|
|
29
|
+
For developers, we recommend installing from source, for users we recommend installing from the registry.
|
|
30
|
+
|
|
31
|
+
**Warning:** installing emu-mps will update pulser-core
|
|
32
|
+
|
|
33
|
+
We always recommend using a virtual environment.
|
|
34
|
+
|
|
35
|
+
<details>
|
|
36
|
+
<summary>Click me to see how it is done</summary>
|
|
37
|
+
|
|
38
|
+
#### Create a virtual environment using python
|
|
39
|
+
|
|
40
|
+
```
|
|
41
|
+
python -m venv .venv
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
Or
|
|
45
|
+
|
|
46
|
+
```
|
|
47
|
+
python -m venv /path/to/new/virtual/environment
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
Replace `/path/to/new/virtual/environment` with your desired directory path.
|
|
51
|
+
|
|
52
|
+
Then activate the environment On linux or MacOS
|
|
53
|
+
|
|
54
|
+
```
|
|
55
|
+
source /path/to/new/virtual/environment/bin/activate
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
While on Windows it's
|
|
59
|
+
|
|
60
|
+
```
|
|
61
|
+
C:\> /path/to/new/virtual/environment/Scripts/activate
|
|
62
|
+
```
|
|
63
|
+
|
|
64
|
+
Remember to replace `/path/to/new/virtual/environment` with the actual path to your virtual environment. Once the environment is activated, you can clone emu_mps and install it using
|
|
65
|
+
|
|
66
|
+
</details>
|
|
67
|
+
|
|
68
|
+
### installing from the registry
|
|
69
|
+
|
|
70
|
+
When pip is configured to know about the pasqal registry, Emu-MPS installs as
|
|
71
|
+
|
|
72
|
+
```bash
|
|
73
|
+
pip install emu-mps
|
|
74
|
+
```
|
|
75
|
+
When pip is not already configured, the easiest way to do so, is to add a file `~/.config/pip/pip.conf` containing:
|
|
76
|
+
|
|
77
|
+
```
|
|
78
|
+
[global]
|
|
79
|
+
extra-index-url=https://gitlab.pasqal.com/api/v4/projects/597/packages/pypi/simple
|
|
80
|
+
possible.other.urls
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
As this shows, it is also possible to have multiple extra repositories configured. Note that the order is not important.
|
|
84
|
+
|
|
85
|
+
It is also possible to add the `extra-index-url` to the `pip install` command directly, if you somehow don't want to create a `pip.conf` file.
|
|
86
|
+
|
|
87
|
+
### installing from source
|
|
88
|
+
git clone this [repository ](https://gitlab.pasqal.com/emulation/rydberg-atoms/emu-ct) or download
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
Then, `cd` into the root folder of the repo and type
|
|
92
|
+
|
|
93
|
+
```bash
|
|
94
|
+
pip install -e .
|
|
95
|
+
```
|
|
96
|
+
|
|
97
|
+
<details>
|
|
98
|
+
<summary>Guidelines for developers </summary>
|
|
99
|
+
We recommend using an environment, git clone the repository, then inside the `emu_mps` folder
|
|
100
|
+
|
|
101
|
+
```bash
|
|
102
|
+
pip install -e .
|
|
103
|
+
```
|
|
104
|
+
|
|
105
|
+
Also, the installation of pytest, nbmake, pre-commit.
|
|
106
|
+
|
|
107
|
+
Do not forget to run the unit test suite by simply running `pytest` command.
|
|
108
|
+
|
|
109
|
+
Another way can be using hatch.
|
|
110
|
+
|
|
111
|
+
#### virtual environment with `hatch`
|
|
112
|
+
|
|
113
|
+
```bash
|
|
114
|
+
python -m pip install hatch
|
|
115
|
+
python -m hatch -v shell
|
|
116
|
+
```
|
|
117
|
+
|
|
118
|
+
When inside the shell with development dependencies, install first the pre-commit hook:
|
|
119
|
+
```
|
|
120
|
+
pre-commit install
|
|
121
|
+
```
|
|
122
|
+
</details>
|
|
123
|
+
|
|
124
|
+
## Check the tutorial notebooks and example scripts
|
|
125
|
+
|
|
126
|
+
For more information, you can check the tutorials and examples located in the [examples folder](https://gitlab.pasqal.com/emulation/rydberg-atoms/emu-ct/-/tree/main/examples?ref_type=heads)
|
|
127
|
+
|
|
128
|
+
## Documentation
|
|
129
|
+
|
|
130
|
+
Please check the [documentation](./docs/index.md) page for more info about contributing, the API, benchmarks, etc.
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+

|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
emu_mps/__init__.py,sha256=0yj0gJbi5R8_-wdu7olvvJrvALFaNchusJwUCmCyt7M,646
|
|
2
|
+
emu_mps/algebra.py,sha256=fMkhie3iy-zEivE6G3EpJoo2o6Gde0rqrOwBUKOBkME,5384
|
|
3
|
+
emu_mps/hamiltonian.py,sha256=Ojb-T2bpb-Ym99IbFsTC2WqQ4C06IbcYK4VusA2pt_0,15799
|
|
4
|
+
emu_mps/mpo.py,sha256=RA9n50li1NJsIWCid-uOFgYV-usX4S7ZgmRdPLht8hs,8611
|
|
5
|
+
emu_mps/mps.py,sha256=3Oi_gT5KdY46Q3l9uGifWyrb77DUQOHxnu_O9ky7MYo,18338
|
|
6
|
+
emu_mps/mps_backend.py,sha256=ZCZMCsJz4-Oi0Ox2CqCOWqQWdE9ZJj6n_Jssd0g_1T0,972
|
|
7
|
+
emu_mps/mps_backend_impl.py,sha256=dEhBVciS7bdZ0AymbgDVyNUfprxuv-GgUoRcmKBkLHY,19512
|
|
8
|
+
emu_mps/mps_config.py,sha256=-ScHCNiiAwg_kMBSGOsXydjnbo_y3AqrvqpG3RzTYmU,2703
|
|
9
|
+
emu_mps/noise.py,sha256=h4X2EFjoC_Ok0gZ8I9wN77RANXaVehTBbjkcbY_GAmY,784
|
|
10
|
+
emu_mps/tdvp.py,sha256=mDStFKgbG3OXsEG-Ja8NJKsTGyLxsjNS6sQXK7B33EY,6088
|
|
11
|
+
emu_mps/utils.py,sha256=05s5TBE2zhsJwugbQFDCFQi7L0pVX0ukI-WIW2zN5TA,8052
|
|
12
|
+
emu_mps-1.2.1.dist-info/METADATA,sha256=SUITsADc02ZBCuTVgYUzWMwuyAx_AfIPN4mkC0Tt9Nw,4203
|
|
13
|
+
emu_mps-1.2.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
14
|
+
emu_mps-1.2.1.dist-info/RECORD,,
|