emu-mps 2.4.1__py3-none-any.whl → 2.4.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- emu_mps/__init__.py +1 -1
- emu_mps/algebra.py +1 -1
- emu_mps/mpo.py +4 -4
- emu_mps/mps.py +2 -2
- emu_mps/mps_backend_impl.py +10 -6
- emu_mps/mps_config.py +7 -4
- {emu_mps-2.4.1.dist-info → emu_mps-2.4.2.dist-info}/METADATA +2 -2
- {emu_mps-2.4.1.dist-info → emu_mps-2.4.2.dist-info}/RECORD +9 -9
- {emu_mps-2.4.1.dist-info → emu_mps-2.4.2.dist-info}/WHEEL +0 -0
emu_mps/__init__.py
CHANGED
emu_mps/algebra.py
CHANGED
|
@@ -49,7 +49,7 @@ def add_factors(
|
|
|
49
49
|
|
|
50
50
|
|
|
51
51
|
def scale_factors(
|
|
52
|
-
factors: list[torch.Tensor], scalar: complex, *, which: int
|
|
52
|
+
factors: list[torch.Tensor], scalar: complex | torch.Tensor, *, which: int
|
|
53
53
|
) -> list[torch.Tensor]:
|
|
54
54
|
"""
|
|
55
55
|
Returns a new list of factors where the tensor at the given index is scaled by `scalar`.
|
emu_mps/mpo.py
CHANGED
|
@@ -173,10 +173,10 @@ class MPO(Operator[complex, torch.Tensor, MPS]):
|
|
|
173
173
|
"gg": torch.tensor([[1.0, 0.0], [0.0, 0.0]], dtype=torch.complex128).view(
|
|
174
174
|
1, 2, 2, 1
|
|
175
175
|
),
|
|
176
|
-
"
|
|
176
|
+
"rg": torch.tensor([[0.0, 0.0], [1.0, 0.0]], dtype=torch.complex128).view(
|
|
177
177
|
1, 2, 2, 1
|
|
178
178
|
),
|
|
179
|
-
"
|
|
179
|
+
"gr": torch.tensor([[0.0, 1.0], [0.0, 0.0]], dtype=torch.complex128).view(
|
|
180
180
|
1, 2, 2, 1
|
|
181
181
|
),
|
|
182
182
|
"rr": torch.tensor([[0.0, 0.0], [0.0, 1.0]], dtype=torch.complex128).view(
|
|
@@ -190,10 +190,10 @@ class MPO(Operator[complex, torch.Tensor, MPS]):
|
|
|
190
190
|
"00": torch.tensor([[1.0, 0.0], [0.0, 0.0]], dtype=torch.complex128).view(
|
|
191
191
|
1, 2, 2, 1
|
|
192
192
|
),
|
|
193
|
-
"
|
|
193
|
+
"10": torch.tensor([[0.0, 0.0], [1.0, 0.0]], dtype=torch.complex128).view(
|
|
194
194
|
1, 2, 2, 1
|
|
195
195
|
),
|
|
196
|
-
"
|
|
196
|
+
"01": torch.tensor([[0.0, 1.0], [0.0, 0.0]], dtype=torch.complex128).view(
|
|
197
197
|
1, 2, 2, 1
|
|
198
198
|
),
|
|
199
199
|
"11": torch.tensor([[0.0, 0.0], [0.0, 1.0]], dtype=torch.complex128).view(
|
emu_mps/mps.py
CHANGED
|
@@ -394,7 +394,7 @@ class MPS(State[complex, torch.Tensor]):
|
|
|
394
394
|
result.truncate()
|
|
395
395
|
return result
|
|
396
396
|
|
|
397
|
-
def __rmul__(self, scalar: complex) -> MPS:
|
|
397
|
+
def __rmul__(self, scalar: complex | torch.Tensor) -> MPS:
|
|
398
398
|
"""
|
|
399
399
|
Multiply an MPS by a scalar.
|
|
400
400
|
|
|
@@ -419,7 +419,7 @@ class MPS(State[complex, torch.Tensor]):
|
|
|
419
419
|
eigenstates=self.eigenstates,
|
|
420
420
|
)
|
|
421
421
|
|
|
422
|
-
def __imul__(self, scalar: complex) -> MPS:
|
|
422
|
+
def __imul__(self, scalar: complex | torch.Tensor) -> MPS:
|
|
423
423
|
return self.__rmul__(scalar)
|
|
424
424
|
|
|
425
425
|
@classmethod
|
emu_mps/mps_backend_impl.py
CHANGED
|
@@ -155,12 +155,16 @@ class MPSBackendImpl:
|
|
|
155
155
|
f"""To resume: `MPSBackend().resume("{self.autosave_file}")`"""
|
|
156
156
|
)
|
|
157
157
|
self.last_save_time = time.time()
|
|
158
|
+
requested_num_gpus = self.config.num_gpus_to_use
|
|
158
159
|
|
|
159
|
-
if
|
|
160
|
+
if requested_num_gpus is None:
|
|
161
|
+
requested_num_gpus = DEVICE_COUNT
|
|
162
|
+
elif requested_num_gpus > DEVICE_COUNT:
|
|
160
163
|
self.config.logger.warning(
|
|
161
|
-
f"Requested to use {
|
|
164
|
+
f"Requested to use {requested_num_gpus} GPU(s) "
|
|
162
165
|
f"but only {DEVICE_COUNT if DEVICE_COUNT > 0 else 'cpu'} available"
|
|
163
166
|
)
|
|
167
|
+
self.resolved_num_gpus = requested_num_gpus
|
|
164
168
|
|
|
165
169
|
def __getstate__(self) -> dict:
|
|
166
170
|
d = self.__dict__.copy()
|
|
@@ -211,7 +215,7 @@ class MPSBackendImpl:
|
|
|
211
215
|
self.qubit_count,
|
|
212
216
|
precision=self.config.precision,
|
|
213
217
|
max_bond_dim=self.config.max_bond_dim,
|
|
214
|
-
num_gpus_to_use=self.
|
|
218
|
+
num_gpus_to_use=self.resolved_num_gpus,
|
|
215
219
|
)
|
|
216
220
|
return
|
|
217
221
|
|
|
@@ -239,7 +243,7 @@ class MPSBackendImpl:
|
|
|
239
243
|
[f.detach().clone() for f in initial_state.factors],
|
|
240
244
|
precision=self.config.precision,
|
|
241
245
|
max_bond_dim=self.config.max_bond_dim,
|
|
242
|
-
num_gpus_to_use=self.
|
|
246
|
+
num_gpus_to_use=self.resolved_num_gpus,
|
|
243
247
|
eigenstates=initial_state.eigenstates,
|
|
244
248
|
)
|
|
245
249
|
initial_state.truncate()
|
|
@@ -259,7 +263,7 @@ class MPSBackendImpl:
|
|
|
259
263
|
else self.full_interaction_matrix
|
|
260
264
|
),
|
|
261
265
|
hamiltonian_type=self.hamiltonian_type,
|
|
262
|
-
num_gpus_to_use=self.
|
|
266
|
+
num_gpus_to_use=self.resolved_num_gpus,
|
|
263
267
|
)
|
|
264
268
|
|
|
265
269
|
update_H(
|
|
@@ -447,7 +451,7 @@ class MPSBackendImpl:
|
|
|
447
451
|
self.hamiltonian = make_H(
|
|
448
452
|
interaction_matrix=self.full_interaction_matrix,
|
|
449
453
|
hamiltonian_type=self.hamiltonian_type,
|
|
450
|
-
num_gpus_to_use=self.
|
|
454
|
+
num_gpus_to_use=self.resolved_num_gpus,
|
|
451
455
|
)
|
|
452
456
|
|
|
453
457
|
if not self.is_finished():
|
emu_mps/mps_config.py
CHANGED
|
@@ -3,7 +3,6 @@ from types import MethodType
|
|
|
3
3
|
|
|
4
4
|
import copy
|
|
5
5
|
|
|
6
|
-
from emu_base import DEVICE_COUNT
|
|
7
6
|
from emu_mps.mps import MPS, DEFAULT_MAX_BOND_DIM, DEFAULT_PRECISION
|
|
8
7
|
from emu_mps.mpo import MPO
|
|
9
8
|
from emu_mps.solver import Solver
|
|
@@ -45,8 +44,12 @@ class MPSConfig(EmulationConfig):
|
|
|
45
44
|
The size of the krylov subspace that the Lanczos algorithm maximally builds
|
|
46
45
|
extra_krylov_tolerance:
|
|
47
46
|
The Lanczos algorithm uses this*precision as the convergence tolerance
|
|
48
|
-
num_gpus_to_use:
|
|
49
|
-
|
|
47
|
+
num_gpus_to_use: number of GPUs to be used in a given simulation.
|
|
48
|
+
- if it is set to a number `n > 0`, the state will be distributed across `n` GPUs.
|
|
49
|
+
- if it is set to `n = 0`, the entire simulation runs on the CPU.
|
|
50
|
+
- if it is `None` (the default value), the backend internally chooses the number of GPUs
|
|
51
|
+
based on the hardware availability during runtime.
|
|
52
|
+
As shown in the benchmarks, using multiple GPUs might
|
|
50
53
|
alleviate memory pressure per GPU, but the runtime should be similar.
|
|
51
54
|
optimize_qubit_ordering: Optimize the register ordering. Improves performance and
|
|
52
55
|
accuracy, but disables certain features.
|
|
@@ -86,7 +89,7 @@ class MPSConfig(EmulationConfig):
|
|
|
86
89
|
max_bond_dim: int = DEFAULT_MAX_BOND_DIM,
|
|
87
90
|
max_krylov_dim: int = 100,
|
|
88
91
|
extra_krylov_tolerance: float = 1e-3,
|
|
89
|
-
num_gpus_to_use: int =
|
|
92
|
+
num_gpus_to_use: int | None = None,
|
|
90
93
|
optimize_qubit_ordering: bool = False,
|
|
91
94
|
interaction_cutoff: float = 0.0,
|
|
92
95
|
log_level: int = logging.INFO,
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: emu-mps
|
|
3
|
-
Version: 2.4.
|
|
3
|
+
Version: 2.4.2
|
|
4
4
|
Summary: Pasqal MPS based pulse emulator built on PyTorch
|
|
5
5
|
Project-URL: Documentation, https://pasqal-io.github.io/emulators/
|
|
6
6
|
Project-URL: Repository, https://github.com/pasqal-io/emulators
|
|
@@ -25,7 +25,7 @@ Classifier: Programming Language :: Python :: 3.10
|
|
|
25
25
|
Classifier: Programming Language :: Python :: Implementation :: CPython
|
|
26
26
|
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
|
27
27
|
Requires-Python: >=3.10
|
|
28
|
-
Requires-Dist: emu-base==2.4.
|
|
28
|
+
Requires-Dist: emu-base==2.4.2
|
|
29
29
|
Description-Content-Type: text/markdown
|
|
30
30
|
|
|
31
31
|
<div align="center">
|
|
@@ -1,12 +1,12 @@
|
|
|
1
|
-
emu_mps/__init__.py,sha256=
|
|
2
|
-
emu_mps/algebra.py,sha256=
|
|
1
|
+
emu_mps/__init__.py,sha256=fEovcc3ow-4zoEodViCEyn2uZWnb-uwVSG-uRBs-g7U,708
|
|
2
|
+
emu_mps/algebra.py,sha256=VZ5uaX5PYWGqDCpRKKr819BLMMtT0pKDxc2HrlLCdgU,5423
|
|
3
3
|
emu_mps/custom_callback_implementations.py,sha256=WeczmO6qkvBIipvXLqX45i3D7M4ovOrepusIGs6d2Ts,2420
|
|
4
4
|
emu_mps/hamiltonian.py,sha256=gOPxNOBmk6jRPPjevERuCP_scGv0EKYeAJ0uxooihes,15622
|
|
5
|
-
emu_mps/mpo.py,sha256
|
|
6
|
-
emu_mps/mps.py,sha256=
|
|
5
|
+
emu_mps/mpo.py,sha256=-kbElxfodWh3IQa8QDor4TgLJE-zsaMRI5PZ6dZpYck,8049
|
|
6
|
+
emu_mps/mps.py,sha256=1aMvY4NQSBeAwzTwHRegXncGQqm-1g05W00OimFNzt8,21630
|
|
7
7
|
emu_mps/mps_backend.py,sha256=bS83qFxvdoK-c12_1WaPw6O7xUc7vdWifZNHUzNP5sM,2091
|
|
8
|
-
emu_mps/mps_backend_impl.py,sha256=
|
|
9
|
-
emu_mps/mps_config.py,sha256=
|
|
8
|
+
emu_mps/mps_backend_impl.py,sha256=B8hi21uHFUSVJ3k5J4A4fXqyQo4sNHSb2uddN3lUYPA,30558
|
|
9
|
+
emu_mps/mps_config.py,sha256=j7rho3edFzDxPO_VX7j5jc0Drw9wO2NWRymkqZ9hzmU,9128
|
|
10
10
|
emu_mps/observables.py,sha256=4C_ewkd3YkJP0xghTrGUTgXUGvJRCQcetb8cU0SjMl0,1900
|
|
11
11
|
emu_mps/solver.py,sha256=M9xkHhlEouTBvoPw2UYVu6kij7CO4Z1FXw_SiGFtdgo,85
|
|
12
12
|
emu_mps/solver_utils.py,sha256=EnNzEaUrtTMQbrWoqOy8vyDsQwlsfQCUc2HgOp4z8dk,8680
|
|
@@ -14,6 +14,6 @@ emu_mps/utils.py,sha256=pW5N_EbbGiOviQpJCw1a0pVgEDObP_InceNaIqY5bHE,6982
|
|
|
14
14
|
emu_mps/optimatrix/__init__.py,sha256=fBXQ7-rgDro4hcaBijCGhx3J69W96qcw5_3mWc7tND4,364
|
|
15
15
|
emu_mps/optimatrix/optimiser.py,sha256=k9suYmKLKlaZ7ozFuIqvXHyCBoCtGgkX1mpen9GOdOo,6977
|
|
16
16
|
emu_mps/optimatrix/permutations.py,sha256=9DDMZtrGGZ01b9F3GkzHR3paX4qNtZiPoI7Z_Kia3Lc,3727
|
|
17
|
-
emu_mps-2.4.
|
|
18
|
-
emu_mps-2.4.
|
|
19
|
-
emu_mps-2.4.
|
|
17
|
+
emu_mps-2.4.2.dist-info/METADATA,sha256=SMYIZXDve_o9aZ7K1EfB7V6s8FBqA_wltE5a8ecNUf4,3587
|
|
18
|
+
emu_mps-2.4.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
19
|
+
emu_mps-2.4.2.dist-info/RECORD,,
|
|
File without changes
|