Trajectree 0.0.0__py3-none-any.whl → 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- trajectree/__init__.py +3 -0
- trajectree/fock_optics/devices.py +1 -1
- trajectree/fock_optics/light_sources.py +2 -2
- trajectree/fock_optics/measurement.py +3 -3
- trajectree/fock_optics/utils.py +6 -6
- trajectree/quimb/docs/_pygments/_pygments_dark.py +118 -0
- trajectree/quimb/docs/_pygments/_pygments_light.py +118 -0
- trajectree/quimb/docs/conf.py +158 -0
- trajectree/quimb/docs/examples/ex_mpi_expm_evo.py +62 -0
- trajectree/quimb/quimb/__init__.py +507 -0
- trajectree/quimb/quimb/calc.py +1491 -0
- trajectree/quimb/quimb/core.py +2279 -0
- trajectree/quimb/quimb/evo.py +712 -0
- trajectree/quimb/quimb/experimental/__init__.py +0 -0
- trajectree/quimb/quimb/experimental/autojittn.py +129 -0
- trajectree/quimb/quimb/experimental/belief_propagation/__init__.py +109 -0
- trajectree/quimb/quimb/experimental/belief_propagation/bp_common.py +397 -0
- trajectree/quimb/quimb/experimental/belief_propagation/d1bp.py +316 -0
- trajectree/quimb/quimb/experimental/belief_propagation/d2bp.py +653 -0
- trajectree/quimb/quimb/experimental/belief_propagation/hd1bp.py +571 -0
- trajectree/quimb/quimb/experimental/belief_propagation/hv1bp.py +775 -0
- trajectree/quimb/quimb/experimental/belief_propagation/l1bp.py +316 -0
- trajectree/quimb/quimb/experimental/belief_propagation/l2bp.py +537 -0
- trajectree/quimb/quimb/experimental/belief_propagation/regions.py +194 -0
- trajectree/quimb/quimb/experimental/cluster_update.py +286 -0
- trajectree/quimb/quimb/experimental/merabuilder.py +865 -0
- trajectree/quimb/quimb/experimental/operatorbuilder/__init__.py +15 -0
- trajectree/quimb/quimb/experimental/operatorbuilder/operatorbuilder.py +1631 -0
- trajectree/quimb/quimb/experimental/schematic.py +7 -0
- trajectree/quimb/quimb/experimental/tn_marginals.py +130 -0
- trajectree/quimb/quimb/experimental/tnvmc.py +1483 -0
- trajectree/quimb/quimb/gates.py +36 -0
- trajectree/quimb/quimb/gen/__init__.py +2 -0
- trajectree/quimb/quimb/gen/operators.py +1167 -0
- trajectree/quimb/quimb/gen/rand.py +713 -0
- trajectree/quimb/quimb/gen/states.py +479 -0
- trajectree/quimb/quimb/linalg/__init__.py +6 -0
- trajectree/quimb/quimb/linalg/approx_spectral.py +1109 -0
- trajectree/quimb/quimb/linalg/autoblock.py +258 -0
- trajectree/quimb/quimb/linalg/base_linalg.py +719 -0
- trajectree/quimb/quimb/linalg/mpi_launcher.py +397 -0
- trajectree/quimb/quimb/linalg/numpy_linalg.py +244 -0
- trajectree/quimb/quimb/linalg/rand_linalg.py +514 -0
- trajectree/quimb/quimb/linalg/scipy_linalg.py +293 -0
- trajectree/quimb/quimb/linalg/slepc_linalg.py +892 -0
- trajectree/quimb/quimb/schematic.py +1518 -0
- trajectree/quimb/quimb/tensor/__init__.py +401 -0
- trajectree/quimb/quimb/tensor/array_ops.py +610 -0
- trajectree/quimb/quimb/tensor/circuit.py +4824 -0
- trajectree/quimb/quimb/tensor/circuit_gen.py +411 -0
- trajectree/quimb/quimb/tensor/contraction.py +336 -0
- trajectree/quimb/quimb/tensor/decomp.py +1255 -0
- trajectree/quimb/quimb/tensor/drawing.py +1646 -0
- trajectree/quimb/quimb/tensor/fitting.py +385 -0
- trajectree/quimb/quimb/tensor/geometry.py +583 -0
- trajectree/quimb/quimb/tensor/interface.py +114 -0
- trajectree/quimb/quimb/tensor/networking.py +1058 -0
- trajectree/quimb/quimb/tensor/optimize.py +1818 -0
- trajectree/quimb/quimb/tensor/tensor_1d.py +4778 -0
- trajectree/quimb/quimb/tensor/tensor_1d_compress.py +1854 -0
- trajectree/quimb/quimb/tensor/tensor_1d_tebd.py +662 -0
- trajectree/quimb/quimb/tensor/tensor_2d.py +5954 -0
- trajectree/quimb/quimb/tensor/tensor_2d_compress.py +96 -0
- trajectree/quimb/quimb/tensor/tensor_2d_tebd.py +1230 -0
- trajectree/quimb/quimb/tensor/tensor_3d.py +2869 -0
- trajectree/quimb/quimb/tensor/tensor_3d_tebd.py +46 -0
- trajectree/quimb/quimb/tensor/tensor_approx_spectral.py +60 -0
- trajectree/quimb/quimb/tensor/tensor_arbgeom.py +3237 -0
- trajectree/quimb/quimb/tensor/tensor_arbgeom_compress.py +565 -0
- trajectree/quimb/quimb/tensor/tensor_arbgeom_tebd.py +1138 -0
- trajectree/quimb/quimb/tensor/tensor_builder.py +5411 -0
- trajectree/quimb/quimb/tensor/tensor_core.py +11179 -0
- trajectree/quimb/quimb/tensor/tensor_dmrg.py +1472 -0
- trajectree/quimb/quimb/tensor/tensor_mera.py +204 -0
- trajectree/quimb/quimb/utils.py +892 -0
- trajectree/quimb/tests/__init__.py +0 -0
- trajectree/quimb/tests/test_accel.py +501 -0
- trajectree/quimb/tests/test_calc.py +788 -0
- trajectree/quimb/tests/test_core.py +847 -0
- trajectree/quimb/tests/test_evo.py +565 -0
- trajectree/quimb/tests/test_gen/__init__.py +0 -0
- trajectree/quimb/tests/test_gen/test_operators.py +361 -0
- trajectree/quimb/tests/test_gen/test_rand.py +296 -0
- trajectree/quimb/tests/test_gen/test_states.py +261 -0
- trajectree/quimb/tests/test_linalg/__init__.py +0 -0
- trajectree/quimb/tests/test_linalg/test_approx_spectral.py +368 -0
- trajectree/quimb/tests/test_linalg/test_base_linalg.py +351 -0
- trajectree/quimb/tests/test_linalg/test_mpi_linalg.py +127 -0
- trajectree/quimb/tests/test_linalg/test_numpy_linalg.py +84 -0
- trajectree/quimb/tests/test_linalg/test_rand_linalg.py +134 -0
- trajectree/quimb/tests/test_linalg/test_slepc_linalg.py +283 -0
- trajectree/quimb/tests/test_tensor/__init__.py +0 -0
- trajectree/quimb/tests/test_tensor/test_belief_propagation/__init__.py +0 -0
- trajectree/quimb/tests/test_tensor/test_belief_propagation/test_d1bp.py +39 -0
- trajectree/quimb/tests/test_tensor/test_belief_propagation/test_d2bp.py +67 -0
- trajectree/quimb/tests/test_tensor/test_belief_propagation/test_hd1bp.py +64 -0
- trajectree/quimb/tests/test_tensor/test_belief_propagation/test_hv1bp.py +51 -0
- trajectree/quimb/tests/test_tensor/test_belief_propagation/test_l1bp.py +142 -0
- trajectree/quimb/tests/test_tensor/test_belief_propagation/test_l2bp.py +101 -0
- trajectree/quimb/tests/test_tensor/test_circuit.py +816 -0
- trajectree/quimb/tests/test_tensor/test_contract.py +67 -0
- trajectree/quimb/tests/test_tensor/test_decomp.py +40 -0
- trajectree/quimb/tests/test_tensor/test_mera.py +52 -0
- trajectree/quimb/tests/test_tensor/test_optimizers.py +488 -0
- trajectree/quimb/tests/test_tensor/test_tensor_1d.py +1171 -0
- trajectree/quimb/tests/test_tensor/test_tensor_2d.py +606 -0
- trajectree/quimb/tests/test_tensor/test_tensor_2d_tebd.py +144 -0
- trajectree/quimb/tests/test_tensor/test_tensor_3d.py +123 -0
- trajectree/quimb/tests/test_tensor/test_tensor_arbgeom.py +226 -0
- trajectree/quimb/tests/test_tensor/test_tensor_builder.py +441 -0
- trajectree/quimb/tests/test_tensor/test_tensor_core.py +2066 -0
- trajectree/quimb/tests/test_tensor/test_tensor_dmrg.py +388 -0
- trajectree/quimb/tests/test_tensor/test_tensor_spectral_approx.py +63 -0
- trajectree/quimb/tests/test_tensor/test_tensor_tebd.py +270 -0
- trajectree/quimb/tests/test_utils.py +85 -0
- trajectree/trajectory.py +2 -2
- {trajectree-0.0.0.dist-info → trajectree-0.0.1.dist-info}/METADATA +2 -2
- trajectree-0.0.1.dist-info/RECORD +126 -0
- trajectree-0.0.0.dist-info/RECORD +0 -16
- {trajectree-0.0.0.dist-info → trajectree-0.0.1.dist-info}/WHEEL +0 -0
- {trajectree-0.0.0.dist-info → trajectree-0.0.1.dist-info}/licenses/LICENSE +0 -0
- {trajectree-0.0.0.dist-info → trajectree-0.0.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,397 @@
|
|
|
1
|
+
"""Manages the spawning of mpi processes to send to the various solvers.
|
|
2
|
+
"""
|
|
3
|
+
|
|
4
|
+
import os
|
|
5
|
+
import functools
|
|
6
|
+
|
|
7
|
+
import numpy as np
|
|
8
|
+
|
|
9
|
+
from .slepc_linalg import (
|
|
10
|
+
eigs_slepc,
|
|
11
|
+
svds_slepc,
|
|
12
|
+
mfn_multiply_slepc,
|
|
13
|
+
ssolve_slepc,
|
|
14
|
+
)
|
|
15
|
+
from ..core import _NUM_THREAD_WORKERS
|
|
16
|
+
|
|
17
|
+
# Work out if already running as mpi
|
|
18
|
+
if (
|
|
19
|
+
("OMPI_COMM_WORLD_SIZE" in os.environ) # OpenMPI
|
|
20
|
+
or ("PMI_SIZE" in os.environ) # MPICH
|
|
21
|
+
):
|
|
22
|
+
QUIMB_MPI_LAUNCHED = "_QUIMB_MPI_LAUNCHED" in os.environ
|
|
23
|
+
ALREADY_RUNNING_AS_MPI = True
|
|
24
|
+
USE_SYNCRO = "QUIMB_SYNCRO_MPI" in os.environ
|
|
25
|
+
else:
|
|
26
|
+
QUIMB_MPI_LAUNCHED = False
|
|
27
|
+
ALREADY_RUNNING_AS_MPI = False
|
|
28
|
+
USE_SYNCRO = False
|
|
29
|
+
|
|
30
|
+
# default to not allowing mpi spawning capabilities
|
|
31
|
+
ALLOW_SPAWN = {
|
|
32
|
+
"TRUE": True,
|
|
33
|
+
"ON": True,
|
|
34
|
+
"FALSE": False,
|
|
35
|
+
"OFF": False,
|
|
36
|
+
}[os.environ.get("QUIMB_MPI_SPAWN", "False").upper()]
|
|
37
|
+
|
|
38
|
+
# Work out the desired total number of workers
|
|
39
|
+
for _NUM_MPI_WORKERS_VAR in (
|
|
40
|
+
"QUIMB_NUM_MPI_WORKERS",
|
|
41
|
+
"QUIMB_NUM_PROCS",
|
|
42
|
+
"OMPI_COMM_WORLD_SIZE",
|
|
43
|
+
"PMI_SIZE",
|
|
44
|
+
"OMP_NUM_THREADS",
|
|
45
|
+
):
|
|
46
|
+
if _NUM_MPI_WORKERS_VAR in os.environ:
|
|
47
|
+
NUM_MPI_WORKERS = int(os.environ[_NUM_MPI_WORKERS_VAR])
|
|
48
|
+
break
|
|
49
|
+
else:
|
|
50
|
+
import psutil
|
|
51
|
+
|
|
52
|
+
_NUM_MPI_WORKERS_VAR = "psutil"
|
|
53
|
+
NUM_MPI_WORKERS = psutil.cpu_count(logical=False)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def can_use_mpi_pool():
|
|
57
|
+
"""Function to determine whether we are allowed to call `get_mpi_pool`."""
|
|
58
|
+
return ALLOW_SPAWN or ALREADY_RUNNING_AS_MPI
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def bcast(result, comm, result_rank):
|
|
62
|
+
"""Broadcast a result to all workers, dispatching to proper MPI (rather
|
|
63
|
+
than pickled) communication if the result is a numpy array.
|
|
64
|
+
"""
|
|
65
|
+
rank = comm.Get_rank()
|
|
66
|
+
|
|
67
|
+
# make sure all workers know if result is an array or not
|
|
68
|
+
if rank == result_rank:
|
|
69
|
+
is_ndarray = isinstance(result, np.ndarray)
|
|
70
|
+
else:
|
|
71
|
+
is_ndarray = None
|
|
72
|
+
is_ndarray = comm.bcast(is_ndarray, root=result_rank)
|
|
73
|
+
|
|
74
|
+
# standard (pickle) bcast if not array
|
|
75
|
+
if not is_ndarray:
|
|
76
|
+
return comm.bcast(result, root=result_rank)
|
|
77
|
+
|
|
78
|
+
# make sure all workers have shape and dtype
|
|
79
|
+
if rank == result_rank:
|
|
80
|
+
shape_dtype = result.shape, str(result.dtype)
|
|
81
|
+
else:
|
|
82
|
+
shape_dtype = None
|
|
83
|
+
|
|
84
|
+
shape_dtype = comm.bcast(shape_dtype, root=result_rank)
|
|
85
|
+
shape, dtype = shape_dtype
|
|
86
|
+
|
|
87
|
+
# allocate data space
|
|
88
|
+
if rank != result_rank:
|
|
89
|
+
result = np.empty(shape, dtype=dtype)
|
|
90
|
+
|
|
91
|
+
# use fast communication for main array
|
|
92
|
+
comm.Bcast(result, root=result_rank)
|
|
93
|
+
|
|
94
|
+
return result
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
class SyncroFuture:
|
|
98
|
+
def __init__(self, result, result_rank, comm):
|
|
99
|
+
self._result = result
|
|
100
|
+
self.result_rank = result_rank
|
|
101
|
+
self.comm = comm
|
|
102
|
+
|
|
103
|
+
def result(self):
|
|
104
|
+
rank = self.comm.Get_rank()
|
|
105
|
+
|
|
106
|
+
if rank == self.result_rank:
|
|
107
|
+
should_it = isinstance(self._result, tuple) and any(
|
|
108
|
+
isinstance(x, np.ndarray) for x in self._result
|
|
109
|
+
)
|
|
110
|
+
if should_it:
|
|
111
|
+
iterate_over = len(self._result)
|
|
112
|
+
else:
|
|
113
|
+
iterate_over = 0
|
|
114
|
+
else:
|
|
115
|
+
iterate_over = None
|
|
116
|
+
iterate_over = self.comm.bcast(iterate_over, root=self.result_rank)
|
|
117
|
+
|
|
118
|
+
if iterate_over:
|
|
119
|
+
if rank != self.result_rank:
|
|
120
|
+
self._result = (None,) * iterate_over
|
|
121
|
+
|
|
122
|
+
result = tuple(
|
|
123
|
+
bcast(x, self.comm, self.result_rank) for x in self._result
|
|
124
|
+
)
|
|
125
|
+
else:
|
|
126
|
+
result = bcast(self._result, self.comm, self.result_rank)
|
|
127
|
+
|
|
128
|
+
return result
|
|
129
|
+
|
|
130
|
+
@staticmethod
|
|
131
|
+
def cancel():
|
|
132
|
+
raise ValueError(
|
|
133
|
+
"SyncroFutures cannot be cancelled - they are "
|
|
134
|
+
"submitted in a parallel round-robin fasion where "
|
|
135
|
+
"each worker immediately computes all its results."
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
class SynchroMPIPool:
|
|
140
|
+
"""An object that looks like a ``concurrent.futures`` executor but actually
|
|
141
|
+
distributes tasks in a round-robin fashion based to MPI workers, before
|
|
142
|
+
broadcasting the results to each other.
|
|
143
|
+
"""
|
|
144
|
+
|
|
145
|
+
def __init__(self):
|
|
146
|
+
import itertools
|
|
147
|
+
from mpi4py import MPI
|
|
148
|
+
|
|
149
|
+
self.comm = MPI.COMM_WORLD
|
|
150
|
+
self.size = self.comm.Get_size()
|
|
151
|
+
self.rank = self.comm.Get_rank()
|
|
152
|
+
self.counter = itertools.cycle(range(0, self.size))
|
|
153
|
+
self._max_workers = self.size
|
|
154
|
+
|
|
155
|
+
def submit(self, fn, *args, **kwargs):
|
|
156
|
+
# round robin iterate through ranks
|
|
157
|
+
current_counter = next(self.counter)
|
|
158
|
+
|
|
159
|
+
# accept job and compute if have the same rank, else do nothing
|
|
160
|
+
if current_counter == self.rank:
|
|
161
|
+
res = fn(*args, **kwargs)
|
|
162
|
+
else:
|
|
163
|
+
res = None
|
|
164
|
+
|
|
165
|
+
# wrap the result in a SyncroFuture, that will broadcast result
|
|
166
|
+
return SyncroFuture(res, current_counter, self.comm)
|
|
167
|
+
|
|
168
|
+
def shutdown(self):
|
|
169
|
+
pass
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
class CachedPoolWithShutdown:
|
|
173
|
+
"""Decorator for caching the mpi pool when called with the equivalent args,
|
|
174
|
+
and shutting down previous ones when not needed.
|
|
175
|
+
"""
|
|
176
|
+
|
|
177
|
+
def __init__(self, pool_fn):
|
|
178
|
+
self._settings = "__UNINITIALIZED__"
|
|
179
|
+
self._pool_fn = pool_fn
|
|
180
|
+
|
|
181
|
+
def __call__(self, num_workers=None, num_threads=1):
|
|
182
|
+
# convert None to default so the cache the same
|
|
183
|
+
if num_workers is None:
|
|
184
|
+
num_workers = NUM_MPI_WORKERS
|
|
185
|
+
elif ALREADY_RUNNING_AS_MPI and (num_workers != NUM_MPI_WORKERS):
|
|
186
|
+
raise ValueError(
|
|
187
|
+
"Can't specify number of processes when running "
|
|
188
|
+
"under MPI rather than spawning processes."
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
# first call
|
|
192
|
+
if self._settings == "__UNINITIALIZED__":
|
|
193
|
+
self._pool = self._pool_fn(num_workers, num_threads)
|
|
194
|
+
self._settings = (num_workers, num_threads)
|
|
195
|
+
# new type of pool requested
|
|
196
|
+
elif self._settings != (num_workers, num_threads):
|
|
197
|
+
self._pool.shutdown()
|
|
198
|
+
self._pool = self._pool_fn(num_workers, num_threads)
|
|
199
|
+
self._settings = (num_workers, num_threads)
|
|
200
|
+
return self._pool
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
@CachedPoolWithShutdown
|
|
204
|
+
def get_mpi_pool(num_workers=None, num_threads=1):
|
|
205
|
+
"""Get the MPI executor pool, with specified number of processes and
|
|
206
|
+
threads per process.
|
|
207
|
+
"""
|
|
208
|
+
if (num_workers == 1) and (num_threads == _NUM_THREAD_WORKERS):
|
|
209
|
+
from concurrent.futures import ProcessPoolExecutor
|
|
210
|
+
|
|
211
|
+
return ProcessPoolExecutor(1)
|
|
212
|
+
|
|
213
|
+
if not QUIMB_MPI_LAUNCHED:
|
|
214
|
+
raise RuntimeError(
|
|
215
|
+
"For the moment, quimb programs using `get_mpi_pool` need to be "
|
|
216
|
+
"explicitly launched using `quimb-mpi-python`."
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
if USE_SYNCRO:
|
|
220
|
+
return SynchroMPIPool()
|
|
221
|
+
|
|
222
|
+
if not can_use_mpi_pool():
|
|
223
|
+
raise RuntimeError(
|
|
224
|
+
"`get_mpi_pool()` cannot be explicitly called unless already "
|
|
225
|
+
"running under MPI, or you set the environment variable "
|
|
226
|
+
"`QUIMB_MPI_SPAWN=True`."
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
from mpi4py.futures import MPIPoolExecutor
|
|
230
|
+
|
|
231
|
+
return MPIPoolExecutor(
|
|
232
|
+
num_workers,
|
|
233
|
+
main=False,
|
|
234
|
+
env={
|
|
235
|
+
"OMP_NUM_THREADS": str(num_threads),
|
|
236
|
+
"QUIMB_NUM_MPI_WORKERS": str(num_workers),
|
|
237
|
+
"_QUIMB_MPI_LAUNCHED": "SPAWNED",
|
|
238
|
+
},
|
|
239
|
+
)
|
|
240
|
+
|
|
241
|
+
|
|
242
|
+
class GetMPIBeforeCall(object):
|
|
243
|
+
"""Wrap a function to automatically get the correct communicator before
|
|
244
|
+
its called, and to set the `comm_self` kwarg to allow forced self mode.
|
|
245
|
+
|
|
246
|
+
This is called by every mpi process before the function evaluation.
|
|
247
|
+
"""
|
|
248
|
+
|
|
249
|
+
def __init__(self, fn):
|
|
250
|
+
self.fn = fn
|
|
251
|
+
|
|
252
|
+
def __call__(
|
|
253
|
+
self, *args, comm_self=False, wait_for_workers=None, **kwargs
|
|
254
|
+
):
|
|
255
|
+
"""
|
|
256
|
+
Parameters
|
|
257
|
+
----------
|
|
258
|
+
args
|
|
259
|
+
Supplied to self.fn
|
|
260
|
+
comm_self : bool, optional
|
|
261
|
+
Whether to force use of MPI.COMM_SELF
|
|
262
|
+
wait_for_workers : int, optional
|
|
263
|
+
If set, wait for the communicator to have this many workers, this
|
|
264
|
+
can help to catch some errors regarding expected worker numbers.
|
|
265
|
+
kwargs
|
|
266
|
+
Supplied to self.fn
|
|
267
|
+
"""
|
|
268
|
+
from mpi4py import MPI
|
|
269
|
+
|
|
270
|
+
if not comm_self:
|
|
271
|
+
comm = MPI.COMM_WORLD
|
|
272
|
+
else:
|
|
273
|
+
comm = MPI.COMM_SELF
|
|
274
|
+
|
|
275
|
+
if wait_for_workers is not None:
|
|
276
|
+
from time import time
|
|
277
|
+
|
|
278
|
+
t0 = time()
|
|
279
|
+
while comm.Get_size() != wait_for_workers:
|
|
280
|
+
if time() - t0 > 2:
|
|
281
|
+
raise RuntimeError(
|
|
282
|
+
f"Timeout while waiting for {wait_for_workers} "
|
|
283
|
+
f"workers to join comm {comm}."
|
|
284
|
+
)
|
|
285
|
+
|
|
286
|
+
res = self.fn(*args, comm=comm, **kwargs)
|
|
287
|
+
return res
|
|
288
|
+
|
|
289
|
+
|
|
290
|
+
class SpawnMPIProcessesFunc(object):
|
|
291
|
+
"""Automatically wrap a function to be executed in parallel by a
|
|
292
|
+
pool of mpi workers.
|
|
293
|
+
|
|
294
|
+
This is only called by the master mpi process in manual mode, only by
|
|
295
|
+
the (non-mpi) spawning process in automatic mode, or by all processes in
|
|
296
|
+
syncro mode.
|
|
297
|
+
"""
|
|
298
|
+
|
|
299
|
+
def __init__(self, fn):
|
|
300
|
+
self.fn = fn
|
|
301
|
+
|
|
302
|
+
def __call__(
|
|
303
|
+
self,
|
|
304
|
+
*args,
|
|
305
|
+
num_workers=None,
|
|
306
|
+
num_threads=1,
|
|
307
|
+
mpi_pool=None,
|
|
308
|
+
spawn_all=USE_SYNCRO or (not ALREADY_RUNNING_AS_MPI),
|
|
309
|
+
**kwargs,
|
|
310
|
+
):
|
|
311
|
+
"""
|
|
312
|
+
Parameters
|
|
313
|
+
----------
|
|
314
|
+
args
|
|
315
|
+
Supplied to `self.fn`.
|
|
316
|
+
num_workers : int, optional
|
|
317
|
+
How many total process should run function in parallel.
|
|
318
|
+
num_threads : int, optional
|
|
319
|
+
How many (OMP) threads each process should use
|
|
320
|
+
mpi_pool : pool-like, optional
|
|
321
|
+
If not None (default), submit function to this pool.
|
|
322
|
+
spawn_all : bool, optional
|
|
323
|
+
Whether all the parallel processes should be spawned (True), or
|
|
324
|
+
num_workers - 1, so that the current process can also do work.
|
|
325
|
+
kwargs
|
|
326
|
+
Supplied to `self.fn`.
|
|
327
|
+
|
|
328
|
+
Returns
|
|
329
|
+
-------
|
|
330
|
+
`fn` output from the master process.
|
|
331
|
+
"""
|
|
332
|
+
if num_workers is None:
|
|
333
|
+
num_workers = NUM_MPI_WORKERS
|
|
334
|
+
|
|
335
|
+
if (
|
|
336
|
+
# use must explicitly run program as
|
|
337
|
+
(not can_use_mpi_pool())
|
|
338
|
+
or
|
|
339
|
+
# no pool or communicator needed
|
|
340
|
+
(num_workers == 1)
|
|
341
|
+
):
|
|
342
|
+
return self.fn(*args, comm_self=True, **kwargs)
|
|
343
|
+
|
|
344
|
+
kwargs["wait_for_workers"] = num_workers
|
|
345
|
+
|
|
346
|
+
if mpi_pool is not None:
|
|
347
|
+
pool = mpi_pool
|
|
348
|
+
else:
|
|
349
|
+
pool = get_mpi_pool(num_workers, num_threads)
|
|
350
|
+
|
|
351
|
+
# the (non mpi) main process is idle while the workers compute.
|
|
352
|
+
if spawn_all:
|
|
353
|
+
futures = [
|
|
354
|
+
pool.submit(self.fn, *args, **kwargs)
|
|
355
|
+
for _ in range(num_workers)
|
|
356
|
+
]
|
|
357
|
+
results = [f.result() for f in futures]
|
|
358
|
+
|
|
359
|
+
# the master process is the master mpi process and contributes
|
|
360
|
+
else:
|
|
361
|
+
futures = [
|
|
362
|
+
pool.submit(self.fn, *args, **kwargs)
|
|
363
|
+
for _ in range(num_workers - 1)
|
|
364
|
+
]
|
|
365
|
+
results = [self.fn(*args, **kwargs)] + [
|
|
366
|
+
f.result() for f in futures
|
|
367
|
+
]
|
|
368
|
+
|
|
369
|
+
# Get master result, (not always first submitted)
|
|
370
|
+
return next(r for r in results if r is not None)
|
|
371
|
+
|
|
372
|
+
|
|
373
|
+
# ---------------------------------- SLEPC ---------------------------------- #
|
|
374
|
+
|
|
375
|
+
eigs_slepc_mpi = functools.wraps(eigs_slepc)(GetMPIBeforeCall(eigs_slepc))
|
|
376
|
+
eigs_slepc_spawn = functools.wraps(eigs_slepc)(
|
|
377
|
+
SpawnMPIProcessesFunc(eigs_slepc_mpi)
|
|
378
|
+
)
|
|
379
|
+
|
|
380
|
+
svds_slepc_mpi = functools.wraps(svds_slepc)(GetMPIBeforeCall(svds_slepc))
|
|
381
|
+
svds_slepc_spawn = functools.wraps(svds_slepc)(
|
|
382
|
+
SpawnMPIProcessesFunc(svds_slepc_mpi)
|
|
383
|
+
)
|
|
384
|
+
|
|
385
|
+
mfn_multiply_slepc_mpi = functools.wraps(mfn_multiply_slepc)(
|
|
386
|
+
GetMPIBeforeCall(mfn_multiply_slepc)
|
|
387
|
+
)
|
|
388
|
+
mfn_multiply_slepc_spawn = functools.wraps(mfn_multiply_slepc)(
|
|
389
|
+
SpawnMPIProcessesFunc(mfn_multiply_slepc_mpi)
|
|
390
|
+
)
|
|
391
|
+
|
|
392
|
+
ssolve_slepc_mpi = functools.wraps(ssolve_slepc)(
|
|
393
|
+
GetMPIBeforeCall(ssolve_slepc)
|
|
394
|
+
)
|
|
395
|
+
ssolve_slepc_spawn = functools.wraps(ssolve_slepc)(
|
|
396
|
+
SpawnMPIProcessesFunc(ssolve_slepc_mpi)
|
|
397
|
+
)
|
|
@@ -0,0 +1,244 @@
|
|
|
1
|
+
"""Numpy base linear algebra."""
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
import numpy.linalg as nla
|
|
5
|
+
import scipy.linalg as scla
|
|
6
|
+
|
|
7
|
+
import quimb as qu
|
|
8
|
+
from .autoblock import eigensystem_autoblocked
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
_NUMPY_EIG_FUNCS = {
|
|
12
|
+
(True, True): nla.eigh,
|
|
13
|
+
(True, False): nla.eig,
|
|
14
|
+
(False, True): nla.eigvalsh,
|
|
15
|
+
(False, False): nla.eigvals,
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def eig_numpy(A, sort=True, isherm=True, return_vecs=True, autoblock=False):
|
|
20
|
+
"""Numpy based dense eigensolve.
|
|
21
|
+
|
|
22
|
+
Parameters
|
|
23
|
+
----------
|
|
24
|
+
A : array_like
|
|
25
|
+
The operator to decompose.
|
|
26
|
+
sort : bool, optional
|
|
27
|
+
Whether to sort into ascending order.
|
|
28
|
+
isherm : bool, optional
|
|
29
|
+
Whether ``A`` is hermitian.
|
|
30
|
+
return_vecs : bool, optional
|
|
31
|
+
Whether to return the eigenvectors.
|
|
32
|
+
autoblock : bool, optional
|
|
33
|
+
If true, automatically identify and exploit symmetries appearing in the
|
|
34
|
+
current basis as block diagonals formed via permutation of rows and
|
|
35
|
+
columns.
|
|
36
|
+
|
|
37
|
+
Returns
|
|
38
|
+
-------
|
|
39
|
+
evals : 1D-array
|
|
40
|
+
The eigenvalues.
|
|
41
|
+
evecs : qarray
|
|
42
|
+
If ``return_vecs=True``, the eigenvectors.
|
|
43
|
+
"""
|
|
44
|
+
if autoblock:
|
|
45
|
+
return eigensystem_autoblocked(
|
|
46
|
+
A, sort=sort, isherm=isherm, return_vecs=return_vecs
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
evals = _NUMPY_EIG_FUNCS[return_vecs, isherm](A)
|
|
50
|
+
|
|
51
|
+
if return_vecs:
|
|
52
|
+
evals, evecs = evals
|
|
53
|
+
|
|
54
|
+
if sort:
|
|
55
|
+
sortinds = np.argsort(evals)
|
|
56
|
+
evals, evecs = evals[sortinds], evecs[:, sortinds]
|
|
57
|
+
|
|
58
|
+
return evals, qu.qarray(evecs)
|
|
59
|
+
|
|
60
|
+
if sort:
|
|
61
|
+
evals.sort()
|
|
62
|
+
|
|
63
|
+
return evals
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def sort_inds(a, method, sigma=None):
|
|
67
|
+
"""Return the sorting inds of a list
|
|
68
|
+
|
|
69
|
+
Parameters
|
|
70
|
+
----------
|
|
71
|
+
a : array_like
|
|
72
|
+
List to base sort on.
|
|
73
|
+
method : str
|
|
74
|
+
Method of sorting list, one of
|
|
75
|
+
* "LM" - Largest magnitude first
|
|
76
|
+
* "SM" - Smallest magnitude first
|
|
77
|
+
* "SA" - Smallest algebraic first
|
|
78
|
+
* "SR" - Smallest real part first
|
|
79
|
+
* "SI" - Smallest imaginary part first
|
|
80
|
+
* "LA" - Largest algebraic first
|
|
81
|
+
* "LR" - Largest real part first
|
|
82
|
+
* "LI" - Largest imaginary part first
|
|
83
|
+
* "TM" - Magnitude closest to target sigma first
|
|
84
|
+
* "TR" - Real part closest to target sigma first
|
|
85
|
+
* "TI" - Imaginary part closest to target sigma first
|
|
86
|
+
sigma : float, optional
|
|
87
|
+
The target if method={"TM", "TR", or "TI"}.
|
|
88
|
+
|
|
89
|
+
Returns
|
|
90
|
+
-------
|
|
91
|
+
inds : array of int
|
|
92
|
+
Indices that would sort `a` based on `method`
|
|
93
|
+
"""
|
|
94
|
+
_SORT_FUNCS = {
|
|
95
|
+
"LM": lambda a: -abs(a),
|
|
96
|
+
"SM": lambda a: -abs(1 / a),
|
|
97
|
+
"SA": lambda a: a,
|
|
98
|
+
"SR": lambda a: a.real,
|
|
99
|
+
"SI": lambda a: a.imag,
|
|
100
|
+
"LA": lambda a: -a,
|
|
101
|
+
"LR": lambda a: -a.real,
|
|
102
|
+
"LI": lambda a: -a.imag,
|
|
103
|
+
"TM": lambda a: -1 / abs(abs(a) - sigma),
|
|
104
|
+
"TR": lambda a: -1 / abs(a.real - sigma),
|
|
105
|
+
"TI": lambda a: -1 / abs(a.imag - sigma),
|
|
106
|
+
}
|
|
107
|
+
return np.argsort(_SORT_FUNCS[method.upper()](a))
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
_DENSE_EIG_METHODS = {
|
|
111
|
+
(True, True, False): nla.eigh,
|
|
112
|
+
(True, False, False): nla.eigvalsh,
|
|
113
|
+
(False, True, False): nla.eig,
|
|
114
|
+
(False, False, False): nla.eigvals,
|
|
115
|
+
(True, True, True): scla.eigh,
|
|
116
|
+
(True, False, True): scla.eigvalsh,
|
|
117
|
+
(False, True, True): scla.eig,
|
|
118
|
+
(False, False, True): scla.eigvals,
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def eigs_numpy(
|
|
123
|
+
A,
|
|
124
|
+
k,
|
|
125
|
+
B=None,
|
|
126
|
+
which=None,
|
|
127
|
+
return_vecs=True,
|
|
128
|
+
sigma=None,
|
|
129
|
+
isherm=True,
|
|
130
|
+
P=None,
|
|
131
|
+
sort=True,
|
|
132
|
+
**eig_opts,
|
|
133
|
+
):
|
|
134
|
+
"""Partial eigen-decomposition using numpy's dense linear algebra.
|
|
135
|
+
|
|
136
|
+
Parameters
|
|
137
|
+
----------
|
|
138
|
+
A : array_like or quimb.Lazy
|
|
139
|
+
Operator to partially eigen-decompose.
|
|
140
|
+
k : int
|
|
141
|
+
Number of eigenpairs to return.
|
|
142
|
+
B : array_like or quimb.Lazy
|
|
143
|
+
If given, the RHS operator defining a generalized eigen problem.
|
|
144
|
+
which : str, optional
|
|
145
|
+
Which part of the spectrum to target.
|
|
146
|
+
return_vecs : bool, optional
|
|
147
|
+
Whether to return eigenvectors.
|
|
148
|
+
sigma : None or float, optional
|
|
149
|
+
Target eigenvalue.
|
|
150
|
+
isherm : bool, optional
|
|
151
|
+
Whether `a` is hermitian.
|
|
152
|
+
P : array_like or quimb.Lazy
|
|
153
|
+
Perform the eigensolve in the subspace defined by this projector.
|
|
154
|
+
sort : bool, optional
|
|
155
|
+
Whether to sort reduced list of eigenpairs into ascending order.
|
|
156
|
+
eig_opts
|
|
157
|
+
Settings to pass to numpy.eig... functions.
|
|
158
|
+
|
|
159
|
+
Returns
|
|
160
|
+
-------
|
|
161
|
+
lk, (vk): k eigenvalues (and eigenvectors) sorted according to which
|
|
162
|
+
"""
|
|
163
|
+
if isinstance(A, qu.Lazy):
|
|
164
|
+
A = A()
|
|
165
|
+
if isinstance(B, qu.Lazy):
|
|
166
|
+
B = B()
|
|
167
|
+
if isinstance(P, qu.Lazy):
|
|
168
|
+
P = P()
|
|
169
|
+
|
|
170
|
+
# project into subspace
|
|
171
|
+
if P is not None:
|
|
172
|
+
A = qu.dag(P) @ (A @ P)
|
|
173
|
+
|
|
174
|
+
generalized = B is not None
|
|
175
|
+
|
|
176
|
+
eig_fn = _DENSE_EIG_METHODS[(isherm, return_vecs, generalized)]
|
|
177
|
+
|
|
178
|
+
if generalized:
|
|
179
|
+
eig_opts["b"] = B
|
|
180
|
+
|
|
181
|
+
# these might be given for partial eigsys but not relevant for numpy
|
|
182
|
+
eig_opts.pop("ncv", None)
|
|
183
|
+
eig_opts.pop("v0", None)
|
|
184
|
+
eig_opts.pop("tol", None)
|
|
185
|
+
eig_opts.pop("maxiter", None)
|
|
186
|
+
eig_opts.pop("EPSType", None)
|
|
187
|
+
|
|
188
|
+
if return_vecs:
|
|
189
|
+
# get all eigenpairs
|
|
190
|
+
lk, vk = eig_fn(A.toarray() if qu.issparse(A) else A, **eig_opts)
|
|
191
|
+
|
|
192
|
+
# sort and trim according to which k we want
|
|
193
|
+
sk = sort_inds(lk, method=which, sigma=sigma)[:k]
|
|
194
|
+
lk, vk = lk[sk], vk[:, sk]
|
|
195
|
+
|
|
196
|
+
# also potentially sort into ascending order
|
|
197
|
+
if sort:
|
|
198
|
+
so = np.argsort(lk)
|
|
199
|
+
lk, vk = lk[so], vk[:, so]
|
|
200
|
+
|
|
201
|
+
# map eigenvectors out of subspace
|
|
202
|
+
if P is not None:
|
|
203
|
+
vk = P @ vk
|
|
204
|
+
|
|
205
|
+
return lk, qu.qarray(vk)
|
|
206
|
+
|
|
207
|
+
else:
|
|
208
|
+
# get all eigenvalues
|
|
209
|
+
lk = eig_fn(A.toarray() if qu.issparse(A) else A, **eig_opts)
|
|
210
|
+
|
|
211
|
+
# sort and trim according to which k we want
|
|
212
|
+
sk = sort_inds(lk, method=which, sigma=sigma)[:k]
|
|
213
|
+
lk = lk[sk]
|
|
214
|
+
|
|
215
|
+
# also potentially sort into ascending order
|
|
216
|
+
return np.sort(lk) if sort else lk
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
def svds_numpy(a, k, return_vecs=True, **_):
|
|
220
|
+
"""Partial singular value decomposition using numpys (full) singular value
|
|
221
|
+
decomposition.
|
|
222
|
+
|
|
223
|
+
Parameters
|
|
224
|
+
----------
|
|
225
|
+
a : array_like
|
|
226
|
+
Operator to decompose.
|
|
227
|
+
k : int, optional
|
|
228
|
+
Number of singular value triplets to retrieve.
|
|
229
|
+
return_vecs : bool, optional
|
|
230
|
+
whether to return the computed vecs or values only
|
|
231
|
+
|
|
232
|
+
Returns
|
|
233
|
+
-------
|
|
234
|
+
(uk,) sk (, vkt) :
|
|
235
|
+
Singlar value triplets.
|
|
236
|
+
"""
|
|
237
|
+
if return_vecs:
|
|
238
|
+
uk, sk, vkt = nla.svd(
|
|
239
|
+
a.toarray() if qu.issparse(a) else a, compute_uv=True
|
|
240
|
+
)
|
|
241
|
+
return qu.qarray(uk[:, :k]), sk[:k], qu.qarray(vkt[:k, :])
|
|
242
|
+
else:
|
|
243
|
+
sk = nla.svd(a.toarray() if qu.issparse(a) else a, compute_uv=False)
|
|
244
|
+
return sk[:k]
|