emu-mps 1.2.5__py3-none-any.whl → 1.2.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- emu_mps/__init__.py +1 -1
- emu_mps/mpo.py +1 -2
- emu_mps/mps.py +53 -43
- emu_mps/mps_backend_impl.py +34 -25
- emu_mps/mps_config.py +1 -2
- emu_mps/tdvp.py +1 -0
- emu_mps/utils.py +7 -2
- {emu_mps-1.2.5.dist-info → emu_mps-1.2.7.dist-info}/METADATA +7 -71
- emu_mps-1.2.7.dist-info/RECORD +17 -0
- emu_mps/constants.py +0 -4
- emu_mps-1.2.5.dist-info/RECORD +0 -18
- {emu_mps-1.2.5.dist-info → emu_mps-1.2.7.dist-info}/WHEEL +0 -0
emu_mps/__init__.py
CHANGED
emu_mps/mpo.py
CHANGED
|
@@ -6,10 +6,9 @@ import torch
|
|
|
6
6
|
|
|
7
7
|
from emu_mps.algebra import add_factors, scale_factors, zip_right
|
|
8
8
|
from emu_base.base_classes.operator import FullOp, QuditOp
|
|
9
|
-
from emu_base import Operator, State
|
|
9
|
+
from emu_base import Operator, State, DEVICE_COUNT
|
|
10
10
|
from emu_mps.mps import MPS
|
|
11
11
|
from emu_mps.utils import new_left_bath, assign_devices
|
|
12
|
-
from emu_mps.constants import DEVICE_COUNT
|
|
13
12
|
|
|
14
13
|
|
|
15
14
|
def _validate_operator_targets(operations: FullOp, nqubits: int) -> None:
|
emu_mps/mps.py
CHANGED
|
@@ -6,7 +6,7 @@ from typing import Any, List, Optional, Iterable
|
|
|
6
6
|
|
|
7
7
|
import torch
|
|
8
8
|
|
|
9
|
-
from emu_base import State
|
|
9
|
+
from emu_base import State, DEVICE_COUNT
|
|
10
10
|
from emu_mps import MPSConfig
|
|
11
11
|
from emu_mps.algebra import add_factors, scale_factors
|
|
12
12
|
from emu_mps.utils import (
|
|
@@ -16,7 +16,6 @@ from emu_mps.utils import (
|
|
|
16
16
|
tensor_trace,
|
|
17
17
|
n_operator,
|
|
18
18
|
)
|
|
19
|
-
from emu_mps.constants import DEVICE_COUNT
|
|
20
19
|
|
|
21
20
|
|
|
22
21
|
class MPS(State):
|
|
@@ -190,11 +189,58 @@ class MPS(State):
|
|
|
190
189
|
"""
|
|
191
190
|
self.orthogonalize(0)
|
|
192
191
|
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
bitstrings = Counter(
|
|
196
|
-
|
|
197
|
-
|
|
192
|
+
rnd_matrix = torch.rand(num_shots, self.num_sites).to(self.factors[0].device)
|
|
193
|
+
|
|
194
|
+
bitstrings: Counter[str] = Counter()
|
|
195
|
+
|
|
196
|
+
# Shots are performed in batches.
|
|
197
|
+
# Larger max_batch_size is faster but uses more memory.
|
|
198
|
+
max_batch_size = 32
|
|
199
|
+
|
|
200
|
+
shots_done = 0
|
|
201
|
+
while shots_done < num_shots:
|
|
202
|
+
batch_size = min(max_batch_size, num_shots - shots_done)
|
|
203
|
+
batched_accumulator = torch.ones(
|
|
204
|
+
batch_size, 1, dtype=torch.complex128, device=self.factors[0].device
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
batch_outcomes = torch.empty(batch_size, self.num_sites, dtype=torch.bool)
|
|
208
|
+
|
|
209
|
+
for qubit, factor in enumerate(self.factors):
|
|
210
|
+
batched_accumulator = torch.tensordot(
|
|
211
|
+
batched_accumulator.to(factor.device), factor, dims=1
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
# Probability of measuring qubit == 0 for each shot in the batch
|
|
215
|
+
probas = (
|
|
216
|
+
torch.linalg.vector_norm(batched_accumulator[:, 0, :], dim=1) ** 2
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
outcomes = (
|
|
220
|
+
rnd_matrix[shots_done : shots_done + batch_size, qubit].to(
|
|
221
|
+
factor.device
|
|
222
|
+
)
|
|
223
|
+
> probas
|
|
224
|
+
)
|
|
225
|
+
batch_outcomes[:, qubit] = outcomes
|
|
226
|
+
|
|
227
|
+
# Batch collapse qubit
|
|
228
|
+
tmp = torch.stack((~outcomes, outcomes), dim=1).to(dtype=torch.complex128)
|
|
229
|
+
|
|
230
|
+
batched_accumulator = (
|
|
231
|
+
torch.tensordot(batched_accumulator, tmp, dims=([1], [1]))
|
|
232
|
+
.diagonal(dim1=0, dim2=2)
|
|
233
|
+
.transpose(1, 0)
|
|
234
|
+
)
|
|
235
|
+
batched_accumulator /= torch.sqrt(
|
|
236
|
+
(~outcomes) * probas + outcomes * (1 - probas)
|
|
237
|
+
).unsqueeze(1)
|
|
238
|
+
|
|
239
|
+
shots_done += batch_size
|
|
240
|
+
|
|
241
|
+
for outcome in batch_outcomes:
|
|
242
|
+
bitstrings.update(["".join("0" if x == 0 else "1" for x in outcome)])
|
|
243
|
+
|
|
198
244
|
if p_false_neg > 0 or p_false_pos > 0:
|
|
199
245
|
bitstrings = apply_measurement_errors(
|
|
200
246
|
bitstrings,
|
|
@@ -215,42 +261,6 @@ class MPS(State):
|
|
|
215
261
|
torch.linalg.norm(self.factors[orthogonality_center].to("cpu")).item()
|
|
216
262
|
)
|
|
217
263
|
|
|
218
|
-
def _sample_implementation(self, rnd_vector: torch.Tensor) -> str:
|
|
219
|
-
"""
|
|
220
|
-
Samples this MPS once, returning the resulting bitstring.
|
|
221
|
-
"""
|
|
222
|
-
assert rnd_vector.shape == (self.num_sites,)
|
|
223
|
-
assert self.orthogonality_center == 0
|
|
224
|
-
|
|
225
|
-
num_qubits = len(self.factors)
|
|
226
|
-
|
|
227
|
-
bitstring = ""
|
|
228
|
-
acc_mps_j: torch.Tensor = self.factors[0]
|
|
229
|
-
|
|
230
|
-
for qubit in range(num_qubits):
|
|
231
|
-
# comp_basis is a projector: 0 is for ket |0> and 1 for ket |1>
|
|
232
|
-
comp_basis = 0 # check if the qubit is in |0>
|
|
233
|
-
# Measure the qubit j by applying the projector onto nth comp basis state
|
|
234
|
-
tensorj_projected_n = acc_mps_j[:, comp_basis, :]
|
|
235
|
-
probability_n = (tensorj_projected_n.norm() ** 2).item()
|
|
236
|
-
|
|
237
|
-
if rnd_vector[qubit] > probability_n:
|
|
238
|
-
# the qubit is in |1>
|
|
239
|
-
comp_basis = 1
|
|
240
|
-
tensorj_projected_n = acc_mps_j[:, comp_basis, :]
|
|
241
|
-
probability_n = 1 - probability_n
|
|
242
|
-
|
|
243
|
-
bitstring += str(comp_basis)
|
|
244
|
-
if qubit < num_qubits - 1:
|
|
245
|
-
acc_mps_j = torch.tensordot(
|
|
246
|
-
tensorj_projected_n.to(device=self.factors[qubit + 1].device),
|
|
247
|
-
self.factors[qubit + 1],
|
|
248
|
-
dims=1,
|
|
249
|
-
)
|
|
250
|
-
acc_mps_j /= math.sqrt(probability_n)
|
|
251
|
-
|
|
252
|
-
return bitstring
|
|
253
|
-
|
|
254
264
|
def inner(self, other: State) -> float | complex:
|
|
255
265
|
"""
|
|
256
266
|
Compute the inner product between this state and other.
|
emu_mps/mps_backend_impl.py
CHANGED
|
@@ -11,9 +11,8 @@ import torch
|
|
|
11
11
|
import time
|
|
12
12
|
from pulser import Sequence
|
|
13
13
|
|
|
14
|
-
from emu_base import Results, State, PulserData
|
|
14
|
+
from emu_base import Results, State, PulserData, DEVICE_COUNT
|
|
15
15
|
from emu_base.math.brents_root_finding import BrentsRootFinder
|
|
16
|
-
from emu_mps.constants import DEVICE_COUNT
|
|
17
16
|
from emu_mps.hamiltonian import make_H, update_H
|
|
18
17
|
from emu_mps.mpo import MPO
|
|
19
18
|
from emu_mps.mps import MPS
|
|
@@ -55,7 +54,8 @@ class MPSBackendImpl:
|
|
|
55
54
|
|
|
56
55
|
def __init__(self, mps_config: MPSConfig, pulser_data: PulserData):
|
|
57
56
|
self.config = mps_config
|
|
58
|
-
self.
|
|
57
|
+
self.target_times = pulser_data.target_times
|
|
58
|
+
self.target_time = self.target_times[1]
|
|
59
59
|
self.pulser_data = pulser_data
|
|
60
60
|
self.qubit_count = pulser_data.qubit_count
|
|
61
61
|
assert self.qubit_count >= 2
|
|
@@ -95,18 +95,16 @@ class MPSBackendImpl:
|
|
|
95
95
|
return pathlib.Path(os.getcwd()) / (autosave_prefix + str(uuid.uuid1()) + ".dat")
|
|
96
96
|
|
|
97
97
|
def init_dark_qubits(self) -> None:
|
|
98
|
-
has_state_preparation_error
|
|
98
|
+
# has_state_preparation_error
|
|
99
|
+
if (
|
|
99
100
|
self.config.noise_model is not None
|
|
100
101
|
and self.config.noise_model.state_prep_error > 0.0
|
|
101
|
-
)
|
|
102
|
-
|
|
103
|
-
self.well_prepared_qubits_filter = (
|
|
104
|
-
pick_well_prepared_qubits(
|
|
102
|
+
):
|
|
103
|
+
self.well_prepared_qubits_filter = pick_well_prepared_qubits(
|
|
105
104
|
self.config.noise_model.state_prep_error, self.qubit_count
|
|
106
105
|
)
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
)
|
|
106
|
+
else:
|
|
107
|
+
self.well_prepared_qubits_filter = None
|
|
110
108
|
|
|
111
109
|
if self.well_prepared_qubits_filter is not None:
|
|
112
110
|
self.qubit_count = sum(1 for x in self.well_prepared_qubits_filter if x)
|
|
@@ -153,9 +151,11 @@ class MPSBackendImpl:
|
|
|
153
151
|
too many factors are put in the Hamiltonian
|
|
154
152
|
"""
|
|
155
153
|
self.hamiltonian = make_H(
|
|
156
|
-
interaction_matrix=
|
|
157
|
-
|
|
158
|
-
|
|
154
|
+
interaction_matrix=(
|
|
155
|
+
self.masked_interaction_matrix
|
|
156
|
+
if self.is_masked
|
|
157
|
+
else self.full_interaction_matrix
|
|
158
|
+
),
|
|
159
159
|
hamiltonian_type=self.hamiltonian_type,
|
|
160
160
|
num_gpus_to_use=self.config.num_gpus_to_use,
|
|
161
161
|
)
|
|
@@ -177,6 +177,12 @@ class MPSBackendImpl:
|
|
|
177
177
|
self.right_baths = right_baths(self.state, self.hamiltonian, final_qubit=2)
|
|
178
178
|
assert len(self.right_baths) == self.qubit_count - 1
|
|
179
179
|
|
|
180
|
+
def get_current_right_bath(self) -> torch.Tensor:
|
|
181
|
+
return self.right_baths[-1]
|
|
182
|
+
|
|
183
|
+
def get_current_left_bath(self) -> torch.Tensor:
|
|
184
|
+
return self.left_baths[-1]
|
|
185
|
+
|
|
180
186
|
def init(self) -> None:
|
|
181
187
|
self.init_dark_qubits()
|
|
182
188
|
self.init_initial_state(self.config.initial_state)
|
|
@@ -197,7 +203,7 @@ class MPSBackendImpl:
|
|
|
197
203
|
"""
|
|
198
204
|
assert 1 <= len(indices) <= 2
|
|
199
205
|
|
|
200
|
-
baths = (self.
|
|
206
|
+
baths = (self.get_current_left_bath(), self.get_current_right_bath())
|
|
201
207
|
|
|
202
208
|
if len(indices) == 1:
|
|
203
209
|
assert orth_center_right is None
|
|
@@ -269,10 +275,10 @@ class MPSBackendImpl:
|
|
|
269
275
|
)
|
|
270
276
|
self.left_baths.append(
|
|
271
277
|
new_left_bath(
|
|
272
|
-
self.
|
|
278
|
+
self.get_current_left_bath(),
|
|
273
279
|
self.state.factors[self.tdvp_index],
|
|
274
280
|
self.hamiltonian.factors[self.tdvp_index],
|
|
275
|
-
)
|
|
281
|
+
).to(self.state.factors[self.tdvp_index + 1].device)
|
|
276
282
|
)
|
|
277
283
|
self._evolve(self.tdvp_index + 1, dt=-delta_time / 2)
|
|
278
284
|
self.right_baths.pop()
|
|
@@ -298,10 +304,10 @@ class MPSBackendImpl:
|
|
|
298
304
|
assert self.tdvp_index <= self.qubit_count - 2
|
|
299
305
|
self.right_baths.append(
|
|
300
306
|
new_right_bath(
|
|
301
|
-
self.
|
|
307
|
+
self.get_current_right_bath(),
|
|
302
308
|
self.state.factors[self.tdvp_index + 1],
|
|
303
309
|
self.hamiltonian.factors[self.tdvp_index + 1],
|
|
304
|
-
)
|
|
310
|
+
).to(self.state.factors[self.tdvp_index].device)
|
|
305
311
|
)
|
|
306
312
|
if not self.has_lindblad_noise:
|
|
307
313
|
# Free memory because it won't be used anymore
|
|
@@ -334,7 +340,6 @@ class MPSBackendImpl:
|
|
|
334
340
|
def timestep_complete(self) -> None:
|
|
335
341
|
self.fill_results()
|
|
336
342
|
self.timestep_index += 1
|
|
337
|
-
self.target_time = float((self.timestep_index + 1) * self.config.dt)
|
|
338
343
|
if self.is_masked and self.current_time >= self.slm_end_time:
|
|
339
344
|
self.is_masked = False
|
|
340
345
|
self.hamiltonian = make_H(
|
|
@@ -344,6 +349,7 @@ class MPSBackendImpl:
|
|
|
344
349
|
)
|
|
345
350
|
|
|
346
351
|
if not self.is_finished():
|
|
352
|
+
self.target_time = self.target_times[self.timestep_index + 1]
|
|
347
353
|
update_H(
|
|
348
354
|
hamiltonian=self.hamiltonian,
|
|
349
355
|
omega=self.omega[self.timestep_index, :],
|
|
@@ -480,12 +486,15 @@ class NoisyMPSBackendImpl(MPSBackendImpl):
|
|
|
480
486
|
self.aggregated_lindblad_ops = stacked.conj().transpose(1, 2) @ stacked
|
|
481
487
|
|
|
482
488
|
self.lindblad_noise = compute_noise_from_lindbladians(self.lindblad_ops)
|
|
483
|
-
|
|
489
|
+
|
|
490
|
+
def set_jump_threshold(self, bound: float) -> None:
|
|
491
|
+
self.jump_threshold = random.uniform(0.0, bound)
|
|
484
492
|
self.norm_gap_before_jump = self.state.norm() ** 2 - self.jump_threshold
|
|
485
493
|
|
|
486
494
|
def init(self) -> None:
|
|
487
|
-
super().init()
|
|
488
495
|
self.init_lindblad_noise()
|
|
496
|
+
super().init()
|
|
497
|
+
self.set_jump_threshold(1.0)
|
|
489
498
|
|
|
490
499
|
def tdvp_complete(self) -> None:
|
|
491
500
|
previous_time = self.current_time
|
|
@@ -516,7 +525,7 @@ class NoisyMPSBackendImpl(MPSBackendImpl):
|
|
|
516
525
|
|
|
517
526
|
if self.root_finder.is_converged(tolerance=1):
|
|
518
527
|
self.do_random_quantum_jump()
|
|
519
|
-
self.target_time =
|
|
528
|
+
self.target_time = self.target_times[self.timestep_index + 1]
|
|
520
529
|
self.root_finder = None
|
|
521
530
|
else:
|
|
522
531
|
self.target_time = self.root_finder.get_next_abscissa()
|
|
@@ -535,11 +544,11 @@ class NoisyMPSBackendImpl(MPSBackendImpl):
|
|
|
535
544
|
self.state.apply(jumped_qubit_index, jump_operator)
|
|
536
545
|
self.state.orthogonalize(0)
|
|
537
546
|
self.state *= 1 / self.state.norm()
|
|
547
|
+
self.init_baths()
|
|
538
548
|
|
|
539
549
|
norm_after_normalizing = self.state.norm()
|
|
540
550
|
assert math.isclose(norm_after_normalizing, 1, abs_tol=1e-10)
|
|
541
|
-
self.
|
|
542
|
-
self.norm_gap_before_jump = norm_after_normalizing**2 - self.jump_threshold
|
|
551
|
+
self.set_jump_threshold(norm_after_normalizing**2)
|
|
543
552
|
|
|
544
553
|
def fill_results(self) -> None:
|
|
545
554
|
# Remove the noise from self.hamiltonian for the callbacks.
|
emu_mps/mps_config.py
CHANGED
emu_mps/tdvp.py
CHANGED
|
@@ -117,6 +117,7 @@ def evolve_pair(
|
|
|
117
117
|
|
|
118
118
|
left_ham_factor = left_ham_factor.to(left_device)
|
|
119
119
|
right_ham_factor = right_ham_factor.to(left_device)
|
|
120
|
+
right_bath = right_bath.to(left_device)
|
|
120
121
|
|
|
121
122
|
combined_hamiltonian_factors = (
|
|
122
123
|
torch.tensordot(left_ham_factor, right_ham_factor, dims=1)
|
emu_mps/utils.py
CHANGED
|
@@ -130,13 +130,18 @@ def extended_mps_factors(
|
|
|
130
130
|
bond_dimension = mps_factors[factor_index].shape[2]
|
|
131
131
|
factor_index += 1
|
|
132
132
|
elif factor_index == len(mps_factors):
|
|
133
|
-
factor = torch.zeros(
|
|
133
|
+
factor = torch.zeros(
|
|
134
|
+
bond_dimension, 2, 1, dtype=torch.complex128
|
|
135
|
+
) # FIXME: assign device
|
|
134
136
|
factor[:, 0, :] = torch.eye(bond_dimension, 1)
|
|
135
137
|
bond_dimension = 1
|
|
136
138
|
result.append(factor)
|
|
137
139
|
else:
|
|
138
140
|
factor = torch.zeros(
|
|
139
|
-
bond_dimension,
|
|
141
|
+
bond_dimension,
|
|
142
|
+
2,
|
|
143
|
+
bond_dimension,
|
|
144
|
+
dtype=torch.complex128, # FIXME: assign device
|
|
140
145
|
)
|
|
141
146
|
factor[:, 0, :] = torch.eye(bond_dimension, bond_dimension)
|
|
142
147
|
result.append(factor)
|
|
@@ -1,7 +1,10 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: emu-mps
|
|
3
|
-
Version: 1.2.
|
|
3
|
+
Version: 1.2.7
|
|
4
4
|
Summary: Pasqal MPS based pulse emulator built on PyTorch
|
|
5
|
+
Project-URL: Documentation, https://pasqal-io.github.io/emulators/
|
|
6
|
+
Project-URL: Repository, https://github.com/pasqal-io/emulators
|
|
7
|
+
Project-URL: Issues, https://github.com/pasqal-io/emulators/issues
|
|
5
8
|
Author-email: Anton Quelle <anton.quelle@pasqal.com>, Mauro Mendizabal <mauro.mendizabal-pico@pasqal.com>, Stefano Grava <stefano.grava@pasqal.com>, Pablo Le Henaff <pablo.le-henaff@pasqal.com>
|
|
6
9
|
License: PASQAL OPEN-SOURCE SOFTWARE LICENSE AGREEMENT (MIT-derived)
|
|
7
10
|
|
|
@@ -22,80 +25,13 @@ Classifier: Programming Language :: Python :: 3.10
|
|
|
22
25
|
Classifier: Programming Language :: Python :: Implementation :: CPython
|
|
23
26
|
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
|
24
27
|
Requires-Python: >=3.10
|
|
25
|
-
Requires-Dist: emu-base==1.2.
|
|
28
|
+
Requires-Dist: emu-base==1.2.7
|
|
26
29
|
Description-Content-Type: text/markdown
|
|
27
30
|
|
|
28
31
|
<div align="center">
|
|
29
32
|
<img src="docs/logos/LogoTaglineSoftGreen.svg">
|
|
30
|
-
|
|
31
|
-
# Emu-MPS
|
|
32
33
|
</div>
|
|
33
34
|
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
As of this writing, Emu-MPS is provided for Linux and macOS but will not work under Windows.
|
|
37
|
-
|
|
38
|
-
## Installation
|
|
39
|
-
|
|
40
|
-
**Warning:** installing emu-mps will update pulser-core
|
|
41
|
-
|
|
42
|
-
### Using `hatch`, `uv` or any pyproject-compatible Python manager
|
|
43
|
-
|
|
44
|
-
To add `emu-mps` to your project, edit your `pyproject.toml` to add the line
|
|
45
|
-
|
|
46
|
-
```toml
|
|
47
|
-
"emu-mps"
|
|
48
|
-
```
|
|
49
|
-
|
|
50
|
-
to the list of `dependencies`.
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
### Using `pip` or `pipx`
|
|
54
|
-
To install the `pipy` package using `pip` or `pipx`
|
|
55
|
-
|
|
56
|
-
1. Create a `venv` if that's not done yet
|
|
57
|
-
|
|
58
|
-
```sh
|
|
59
|
-
$ python -m venv venv
|
|
60
|
-
|
|
61
|
-
```
|
|
62
|
-
|
|
63
|
-
2. Enter the venv
|
|
64
|
-
|
|
65
|
-
If you're running Unix:
|
|
66
|
-
|
|
67
|
-
```sh
|
|
68
|
-
$ . venv/bin/activate
|
|
69
|
-
```
|
|
70
|
-
|
|
71
|
-
If you're running Windows:
|
|
72
|
-
|
|
73
|
-
```sh
|
|
74
|
-
C:\> /path/to/new/virtual/environment/Scripts/activate
|
|
75
|
-
```
|
|
76
|
-
|
|
77
|
-
3. Install the package
|
|
78
|
-
|
|
79
|
-
```sh
|
|
80
|
-
$ pip install emu-mps
|
|
81
|
-
# or
|
|
82
|
-
$ pipx install emu-mps
|
|
83
|
-
```
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
Join us on [Slack](https://pasqalworkspace.slack.com/archives/C07MUV5K7EU) or by [e-mail](mailto:emulation@pasqal.com) to give us feedback about how you plan to use Emu-MPS or if you require specific feature-upgrades.
|
|
87
|
-
|
|
88
|
-
## Usage
|
|
89
|
-
|
|
90
|
-
For the time being, the easiest way to learn how to use this package is to look
|
|
91
|
-
at the [examples](examples/emu_mps_examples) and [notebooks](https://pasqal-io.github.io/emulators/latest/).
|
|
92
|
-
|
|
93
|
-
See also the [full documentation](https://github.com/pasqal-io/emulators/blob/main/docs/index.md) for
|
|
94
|
-
the API, information about contributing, benchmarks, etc.
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
## Getting in touch
|
|
35
|
+
# Welcome to the Pasqal analog emulators
|
|
98
36
|
|
|
99
|
-
|
|
100
|
-
- [GitHub Repository](https://github.com/pasqal-io/quantum-evolution-kernel) (source code, issue tracker).
|
|
101
|
-
- [Professional Support](https://www.pasqal.com/contact-us/) (if you need tech support, custom licenses, a variant of this library optimized for your workload, your own QPU, remote access to a QPU, ...)
|
|
37
|
+
Welcome, and please see the [GitHub pages](https://pasqal-io.github.io/emulators/) for a landing page to this repo.
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
emu_mps/__init__.py,sha256=KSTZWIZSKHhjt0yt8-fS23rFFcFVQciNiXBgHS0pnHU,646
|
|
2
|
+
emu_mps/algebra.py,sha256=ngPtTH-j2ZCBWoaJZXlkUyIlug7dY7Q92gzfnRlpPMA,5485
|
|
3
|
+
emu_mps/hamiltonian.py,sha256=LcBs6CKBb643a1e9AAVtQoUfa4L_0dIhLOKecx5OOWs,15864
|
|
4
|
+
emu_mps/mpo.py,sha256=H5vkJvz4AfXfnPbvgWznBWpMUO8LnGL3_NAP3IhxZzQ,8740
|
|
5
|
+
emu_mps/mps.py,sha256=J0I4oQP_F1woEKmnOqnXPOWxx2Y1addxNjosL3yhYAY,18214
|
|
6
|
+
emu_mps/mps_backend.py,sha256=6fVaq-D4xyicYRjGjhqMEieC7---90LpfpbV7ZD7zkQ,2192
|
|
7
|
+
emu_mps/mps_backend_impl.py,sha256=XT2HccHWd6Y1gIAs070pBxjPUPIHBl-hFCuqXJaPS-E,21256
|
|
8
|
+
emu_mps/mps_config.py,sha256=ydKN0OOaWCBcNd9V-4CU5ZZ4w1FRT-bbKyZQD2WCaME,3317
|
|
9
|
+
emu_mps/noise.py,sha256=h4X2EFjoC_Ok0gZ8I9wN77RANXaVehTBbjkcbY_GAmY,784
|
|
10
|
+
emu_mps/tdvp.py,sha256=pIQ2NXA2Mrkp3elhqQbX3pdJVbtKkG3c5r9fFlJo7pI,5755
|
|
11
|
+
emu_mps/utils.py,sha256=BqRJYAcXqprtZVJ0V_j954ON2bhTdtZiaTojsYyrWrg,8193
|
|
12
|
+
emu_mps/optimatrix/__init__.py,sha256=lHWYNeThHp57ZrwTwXd0p8bNvcCv0w_AZ31iCWflBUo,226
|
|
13
|
+
emu_mps/optimatrix/optimiser.py,sha256=cVMdm2r_4OpbthcQuFMrJ9rNR9WEJRga9c_lHrJFkhw,6687
|
|
14
|
+
emu_mps/optimatrix/permutations.py,sha256=JRXGont8B4QgbkV9CzrA0w7uzLgBrmZ1J9aa0G52hPo,1979
|
|
15
|
+
emu_mps-1.2.7.dist-info/METADATA,sha256=-yHfBZrLmNsmc-tA-Yb0KfmxULGgVrcLVUbx4F37oA4,3505
|
|
16
|
+
emu_mps-1.2.7.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
17
|
+
emu_mps-1.2.7.dist-info/RECORD,,
|
emu_mps/constants.py
DELETED
emu_mps-1.2.5.dist-info/RECORD
DELETED
|
@@ -1,18 +0,0 @@
|
|
|
1
|
-
emu_mps/__init__.py,sha256=EdVngqaa6GtyXmEP2aK3BfTHjYLBJSN3wR0CvltS9NQ,646
|
|
2
|
-
emu_mps/algebra.py,sha256=ngPtTH-j2ZCBWoaJZXlkUyIlug7dY7Q92gzfnRlpPMA,5485
|
|
3
|
-
emu_mps/constants.py,sha256=41LYkKLUCz-oxPbd-j7nUDZuhIbUrnez6prT0uR0jcE,56
|
|
4
|
-
emu_mps/hamiltonian.py,sha256=LcBs6CKBb643a1e9AAVtQoUfa4L_0dIhLOKecx5OOWs,15864
|
|
5
|
-
emu_mps/mpo.py,sha256=7y6q0UIfyX9igQknqtgt6nymuVcgjHlH3-Qv7N7uOZE,8769
|
|
6
|
-
emu_mps/mps.py,sha256=OjG_caqPOioCdOt-bFUkOf2xuNGnKzj0LaMc3EJCHi4,17855
|
|
7
|
-
emu_mps/mps_backend.py,sha256=6fVaq-D4xyicYRjGjhqMEieC7---90LpfpbV7ZD7zkQ,2192
|
|
8
|
-
emu_mps/mps_backend_impl.py,sha256=HKDqUakqSs1THeeEZP5MFZaUyALnaIhrlsZTQ0Qp4qU,20867
|
|
9
|
-
emu_mps/mps_config.py,sha256=MxahrPDaOpfdB6SLG1610iDUOuLR04IaCjKQRk99ICY,3346
|
|
10
|
-
emu_mps/noise.py,sha256=h4X2EFjoC_Ok0gZ8I9wN77RANXaVehTBbjkcbY_GAmY,784
|
|
11
|
-
emu_mps/tdvp.py,sha256=TH4CcBNczRURXYGPXndWKDs0jWXz_x9ozM961uGiSOw,5711
|
|
12
|
-
emu_mps/utils.py,sha256=n9BcpuIz4Kl6EYlATaK8TKsyF-T7FTwbBo6KSAQYzl8,8066
|
|
13
|
-
emu_mps/optimatrix/__init__.py,sha256=lHWYNeThHp57ZrwTwXd0p8bNvcCv0w_AZ31iCWflBUo,226
|
|
14
|
-
emu_mps/optimatrix/optimiser.py,sha256=cVMdm2r_4OpbthcQuFMrJ9rNR9WEJRga9c_lHrJFkhw,6687
|
|
15
|
-
emu_mps/optimatrix/permutations.py,sha256=JRXGont8B4QgbkV9CzrA0w7uzLgBrmZ1J9aa0G52hPo,1979
|
|
16
|
-
emu_mps-1.2.5.dist-info/METADATA,sha256=bOtqQKZZ6ZkK9C6aV4puIipHUm5v1G9-tQrUftkIw0M,5559
|
|
17
|
-
emu_mps-1.2.5.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
18
|
-
emu_mps-1.2.5.dist-info/RECORD,,
|
|
File without changes
|