Trajectree 0.0.1__py3-none-any.whl → 0.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- trajectree/__init__.py +0 -3
- trajectree/fock_optics/devices.py +1 -1
- trajectree/fock_optics/light_sources.py +2 -2
- trajectree/fock_optics/measurement.py +9 -9
- trajectree/fock_optics/outputs.py +10 -6
- trajectree/fock_optics/utils.py +9 -6
- trajectree/sequence/swap.py +5 -4
- trajectree/trajectory.py +5 -4
- {trajectree-0.0.1.dist-info → trajectree-0.0.3.dist-info}/METADATA +2 -3
- trajectree-0.0.3.dist-info/RECORD +16 -0
- trajectree/quimb/docs/_pygments/_pygments_dark.py +0 -118
- trajectree/quimb/docs/_pygments/_pygments_light.py +0 -118
- trajectree/quimb/docs/conf.py +0 -158
- trajectree/quimb/docs/examples/ex_mpi_expm_evo.py +0 -62
- trajectree/quimb/quimb/__init__.py +0 -507
- trajectree/quimb/quimb/calc.py +0 -1491
- trajectree/quimb/quimb/core.py +0 -2279
- trajectree/quimb/quimb/evo.py +0 -712
- trajectree/quimb/quimb/experimental/__init__.py +0 -0
- trajectree/quimb/quimb/experimental/autojittn.py +0 -129
- trajectree/quimb/quimb/experimental/belief_propagation/__init__.py +0 -109
- trajectree/quimb/quimb/experimental/belief_propagation/bp_common.py +0 -397
- trajectree/quimb/quimb/experimental/belief_propagation/d1bp.py +0 -316
- trajectree/quimb/quimb/experimental/belief_propagation/d2bp.py +0 -653
- trajectree/quimb/quimb/experimental/belief_propagation/hd1bp.py +0 -571
- trajectree/quimb/quimb/experimental/belief_propagation/hv1bp.py +0 -775
- trajectree/quimb/quimb/experimental/belief_propagation/l1bp.py +0 -316
- trajectree/quimb/quimb/experimental/belief_propagation/l2bp.py +0 -537
- trajectree/quimb/quimb/experimental/belief_propagation/regions.py +0 -194
- trajectree/quimb/quimb/experimental/cluster_update.py +0 -286
- trajectree/quimb/quimb/experimental/merabuilder.py +0 -865
- trajectree/quimb/quimb/experimental/operatorbuilder/__init__.py +0 -15
- trajectree/quimb/quimb/experimental/operatorbuilder/operatorbuilder.py +0 -1631
- trajectree/quimb/quimb/experimental/schematic.py +0 -7
- trajectree/quimb/quimb/experimental/tn_marginals.py +0 -130
- trajectree/quimb/quimb/experimental/tnvmc.py +0 -1483
- trajectree/quimb/quimb/gates.py +0 -36
- trajectree/quimb/quimb/gen/__init__.py +0 -2
- trajectree/quimb/quimb/gen/operators.py +0 -1167
- trajectree/quimb/quimb/gen/rand.py +0 -713
- trajectree/quimb/quimb/gen/states.py +0 -479
- trajectree/quimb/quimb/linalg/__init__.py +0 -6
- trajectree/quimb/quimb/linalg/approx_spectral.py +0 -1109
- trajectree/quimb/quimb/linalg/autoblock.py +0 -258
- trajectree/quimb/quimb/linalg/base_linalg.py +0 -719
- trajectree/quimb/quimb/linalg/mpi_launcher.py +0 -397
- trajectree/quimb/quimb/linalg/numpy_linalg.py +0 -244
- trajectree/quimb/quimb/linalg/rand_linalg.py +0 -514
- trajectree/quimb/quimb/linalg/scipy_linalg.py +0 -293
- trajectree/quimb/quimb/linalg/slepc_linalg.py +0 -892
- trajectree/quimb/quimb/schematic.py +0 -1518
- trajectree/quimb/quimb/tensor/__init__.py +0 -401
- trajectree/quimb/quimb/tensor/array_ops.py +0 -610
- trajectree/quimb/quimb/tensor/circuit.py +0 -4824
- trajectree/quimb/quimb/tensor/circuit_gen.py +0 -411
- trajectree/quimb/quimb/tensor/contraction.py +0 -336
- trajectree/quimb/quimb/tensor/decomp.py +0 -1255
- trajectree/quimb/quimb/tensor/drawing.py +0 -1646
- trajectree/quimb/quimb/tensor/fitting.py +0 -385
- trajectree/quimb/quimb/tensor/geometry.py +0 -583
- trajectree/quimb/quimb/tensor/interface.py +0 -114
- trajectree/quimb/quimb/tensor/networking.py +0 -1058
- trajectree/quimb/quimb/tensor/optimize.py +0 -1818
- trajectree/quimb/quimb/tensor/tensor_1d.py +0 -4778
- trajectree/quimb/quimb/tensor/tensor_1d_compress.py +0 -1854
- trajectree/quimb/quimb/tensor/tensor_1d_tebd.py +0 -662
- trajectree/quimb/quimb/tensor/tensor_2d.py +0 -5954
- trajectree/quimb/quimb/tensor/tensor_2d_compress.py +0 -96
- trajectree/quimb/quimb/tensor/tensor_2d_tebd.py +0 -1230
- trajectree/quimb/quimb/tensor/tensor_3d.py +0 -2869
- trajectree/quimb/quimb/tensor/tensor_3d_tebd.py +0 -46
- trajectree/quimb/quimb/tensor/tensor_approx_spectral.py +0 -60
- trajectree/quimb/quimb/tensor/tensor_arbgeom.py +0 -3237
- trajectree/quimb/quimb/tensor/tensor_arbgeom_compress.py +0 -565
- trajectree/quimb/quimb/tensor/tensor_arbgeom_tebd.py +0 -1138
- trajectree/quimb/quimb/tensor/tensor_builder.py +0 -5411
- trajectree/quimb/quimb/tensor/tensor_core.py +0 -11179
- trajectree/quimb/quimb/tensor/tensor_dmrg.py +0 -1472
- trajectree/quimb/quimb/tensor/tensor_mera.py +0 -204
- trajectree/quimb/quimb/utils.py +0 -892
- trajectree/quimb/tests/__init__.py +0 -0
- trajectree/quimb/tests/test_accel.py +0 -501
- trajectree/quimb/tests/test_calc.py +0 -788
- trajectree/quimb/tests/test_core.py +0 -847
- trajectree/quimb/tests/test_evo.py +0 -565
- trajectree/quimb/tests/test_gen/__init__.py +0 -0
- trajectree/quimb/tests/test_gen/test_operators.py +0 -361
- trajectree/quimb/tests/test_gen/test_rand.py +0 -296
- trajectree/quimb/tests/test_gen/test_states.py +0 -261
- trajectree/quimb/tests/test_linalg/__init__.py +0 -0
- trajectree/quimb/tests/test_linalg/test_approx_spectral.py +0 -368
- trajectree/quimb/tests/test_linalg/test_base_linalg.py +0 -351
- trajectree/quimb/tests/test_linalg/test_mpi_linalg.py +0 -127
- trajectree/quimb/tests/test_linalg/test_numpy_linalg.py +0 -84
- trajectree/quimb/tests/test_linalg/test_rand_linalg.py +0 -134
- trajectree/quimb/tests/test_linalg/test_slepc_linalg.py +0 -283
- trajectree/quimb/tests/test_tensor/__init__.py +0 -0
- trajectree/quimb/tests/test_tensor/test_belief_propagation/__init__.py +0 -0
- trajectree/quimb/tests/test_tensor/test_belief_propagation/test_d1bp.py +0 -39
- trajectree/quimb/tests/test_tensor/test_belief_propagation/test_d2bp.py +0 -67
- trajectree/quimb/tests/test_tensor/test_belief_propagation/test_hd1bp.py +0 -64
- trajectree/quimb/tests/test_tensor/test_belief_propagation/test_hv1bp.py +0 -51
- trajectree/quimb/tests/test_tensor/test_belief_propagation/test_l1bp.py +0 -142
- trajectree/quimb/tests/test_tensor/test_belief_propagation/test_l2bp.py +0 -101
- trajectree/quimb/tests/test_tensor/test_circuit.py +0 -816
- trajectree/quimb/tests/test_tensor/test_contract.py +0 -67
- trajectree/quimb/tests/test_tensor/test_decomp.py +0 -40
- trajectree/quimb/tests/test_tensor/test_mera.py +0 -52
- trajectree/quimb/tests/test_tensor/test_optimizers.py +0 -488
- trajectree/quimb/tests/test_tensor/test_tensor_1d.py +0 -1171
- trajectree/quimb/tests/test_tensor/test_tensor_2d.py +0 -606
- trajectree/quimb/tests/test_tensor/test_tensor_2d_tebd.py +0 -144
- trajectree/quimb/tests/test_tensor/test_tensor_3d.py +0 -123
- trajectree/quimb/tests/test_tensor/test_tensor_arbgeom.py +0 -226
- trajectree/quimb/tests/test_tensor/test_tensor_builder.py +0 -441
- trajectree/quimb/tests/test_tensor/test_tensor_core.py +0 -2066
- trajectree/quimb/tests/test_tensor/test_tensor_dmrg.py +0 -388
- trajectree/quimb/tests/test_tensor/test_tensor_spectral_approx.py +0 -63
- trajectree/quimb/tests/test_tensor/test_tensor_tebd.py +0 -270
- trajectree/quimb/tests/test_utils.py +0 -85
- trajectree-0.0.1.dist-info/RECORD +0 -126
- {trajectree-0.0.1.dist-info → trajectree-0.0.3.dist-info}/WHEEL +0 -0
- {trajectree-0.0.1.dist-info → trajectree-0.0.3.dist-info}/licenses/LICENSE +0 -0
- {trajectree-0.0.1.dist-info → trajectree-0.0.3.dist-info}/top_level.txt +0 -0
trajectree/quimb/quimb/core.py
DELETED
|
@@ -1,2279 +0,0 @@
|
|
|
1
|
-
"""Core functions for manipulating quantum objects."""
|
|
2
|
-
|
|
3
|
-
import cmath
|
|
4
|
-
import functools
|
|
5
|
-
import itertools
|
|
6
|
-
import math
|
|
7
|
-
import os
|
|
8
|
-
from numbers import Integral
|
|
9
|
-
|
|
10
|
-
import numpy as np
|
|
11
|
-
import scipy.sparse as sp
|
|
12
|
-
|
|
13
|
-
from .utils import partition_all
|
|
14
|
-
|
|
15
|
-
try:
|
|
16
|
-
from math import prod
|
|
17
|
-
except ImportError:
|
|
18
|
-
import operator
|
|
19
|
-
|
|
20
|
-
def prod(iterable):
|
|
21
|
-
return functools.reduce(operator.mul, iterable, 1)
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
# --------------------------------------------------------------------------- #
|
|
25
|
-
# Accelerated Functions #
|
|
26
|
-
# --------------------------------------------------------------------------- #
|
|
27
|
-
|
|
28
|
-
for env_var in [
|
|
29
|
-
"QUIMB_NUM_THREAD_WORKERS",
|
|
30
|
-
"QUIMB_NUM_PROCS",
|
|
31
|
-
"OMP_NUM_THREADS",
|
|
32
|
-
]:
|
|
33
|
-
if env_var in os.environ:
|
|
34
|
-
_NUM_THREAD_WORKERS = int(os.environ[env_var])
|
|
35
|
-
break
|
|
36
|
-
else:
|
|
37
|
-
import psutil
|
|
38
|
-
|
|
39
|
-
_NUM_THREAD_WORKERS = psutil.cpu_count(logical=False)
|
|
40
|
-
|
|
41
|
-
if "NUMBA_NUM_THREADS" in os.environ:
|
|
42
|
-
if int(os.environ["NUMBA_NUM_THREADS"]) != _NUM_THREAD_WORKERS:
|
|
43
|
-
import warnings
|
|
44
|
-
|
|
45
|
-
warnings.warn(
|
|
46
|
-
"'NUMBA_NUM_THREADS' has been set elsewhere and doesn't match the "
|
|
47
|
-
"value 'quimb' has tried to set - "
|
|
48
|
-
f"{os.environ['NUMBA_NUM_THREADS']} vs {_NUM_THREAD_WORKERS}."
|
|
49
|
-
)
|
|
50
|
-
else:
|
|
51
|
-
os.environ["NUMBA_NUM_THREADS"] = str(_NUM_THREAD_WORKERS)
|
|
52
|
-
|
|
53
|
-
# need to set NUMBA_NUM_THREADS first
|
|
54
|
-
import numba # noqa
|
|
55
|
-
|
|
56
|
-
_NUMBA_CACHE = {
|
|
57
|
-
"TRUE": True,
|
|
58
|
-
"ON": True,
|
|
59
|
-
"FALSE": False,
|
|
60
|
-
"OFF": False,
|
|
61
|
-
}[os.environ.get("QUIMB_NUMBA_CACHE", "True").upper()]
|
|
62
|
-
_NUMBA_PAR = {
|
|
63
|
-
"TRUE": True,
|
|
64
|
-
"ON": True,
|
|
65
|
-
"FALSE": False,
|
|
66
|
-
"OFF": False,
|
|
67
|
-
}[os.environ.get("QUIMB_NUMBA_PARALLEL", "True").upper()]
|
|
68
|
-
|
|
69
|
-
njit = functools.partial(numba.njit, cache=_NUMBA_CACHE)
|
|
70
|
-
"""Numba no-python jit, but obeying cache setting.
|
|
71
|
-
"""
|
|
72
|
-
|
|
73
|
-
pnjit = functools.partial(numba.njit, cache=_NUMBA_CACHE, parallel=_NUMBA_PAR)
|
|
74
|
-
"""Numba no-python jit, but obeying cache setting, with optional parallel
|
|
75
|
-
target, depending on environment variable 'QUIMB_NUMBA_PARALLEL'.
|
|
76
|
-
"""
|
|
77
|
-
|
|
78
|
-
vectorize = functools.partial(numba.vectorize, cache=_NUMBA_CACHE)
|
|
79
|
-
"""Numba vectorize, but obeying cache setting.
|
|
80
|
-
"""
|
|
81
|
-
|
|
82
|
-
pvectorize = functools.partial(
|
|
83
|
-
numba.vectorize,
|
|
84
|
-
cache=_NUMBA_CACHE,
|
|
85
|
-
target="parallel" if _NUMBA_PAR else "cpu",
|
|
86
|
-
)
|
|
87
|
-
"""Numba vectorize, but obeying cache setting, with optional parallel
|
|
88
|
-
target, depending on environment variable 'QUIMB_NUMBA_PARALLEL'.
|
|
89
|
-
"""
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
class CacheThreadPool(object):
|
|
93
|
-
""" """
|
|
94
|
-
|
|
95
|
-
def __init__(self, func):
|
|
96
|
-
self._settings = "__UNINITIALIZED__"
|
|
97
|
-
self._pool_fn = func
|
|
98
|
-
|
|
99
|
-
def __call__(self, num_threads=None):
|
|
100
|
-
# convert None to default so caches the same
|
|
101
|
-
if num_threads is None:
|
|
102
|
-
num_threads = _NUM_THREAD_WORKERS
|
|
103
|
-
# first call
|
|
104
|
-
if self._settings == "__UNINITIALIZED__":
|
|
105
|
-
self._pool = self._pool_fn(num_threads)
|
|
106
|
-
self._settings = num_threads
|
|
107
|
-
# new type of pool requested
|
|
108
|
-
elif self._settings != num_threads:
|
|
109
|
-
self._pool.shutdown()
|
|
110
|
-
self._pool = self._pool_fn(num_threads)
|
|
111
|
-
self._settings = num_threads
|
|
112
|
-
return self._pool
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
@CacheThreadPool
|
|
116
|
-
def get_thread_pool(num_workers=None):
|
|
117
|
-
from concurrent.futures import ThreadPoolExecutor
|
|
118
|
-
|
|
119
|
-
return ThreadPoolExecutor(num_workers)
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
def par_reduce(fn, seq, nthreads=_NUM_THREAD_WORKERS):
|
|
123
|
-
"""Parallel reduce.
|
|
124
|
-
|
|
125
|
-
Parameters
|
|
126
|
-
----------
|
|
127
|
-
fn : callable
|
|
128
|
-
Two argument function to reduce with.
|
|
129
|
-
seq : sequence
|
|
130
|
-
Sequence to reduce.
|
|
131
|
-
nthreads : int, optional
|
|
132
|
-
The number of threads to reduce with in parallel.
|
|
133
|
-
|
|
134
|
-
Returns
|
|
135
|
-
-------
|
|
136
|
-
depends on ``fn`` and ``seq``.
|
|
137
|
-
|
|
138
|
-
Notes
|
|
139
|
-
-----
|
|
140
|
-
This has a several hundred microsecond overhead.
|
|
141
|
-
"""
|
|
142
|
-
if nthreads == 1:
|
|
143
|
-
return functools.reduce(fn, seq)
|
|
144
|
-
|
|
145
|
-
pool = get_thread_pool(nthreads) # cached
|
|
146
|
-
|
|
147
|
-
def _sfn(x):
|
|
148
|
-
"""Single call of `fn`, but accounts for the fact
|
|
149
|
-
that can be passed a single item, in which case
|
|
150
|
-
it should not perform the binary operation.
|
|
151
|
-
"""
|
|
152
|
-
if len(x) == 1:
|
|
153
|
-
return x[0]
|
|
154
|
-
return fn(*x)
|
|
155
|
-
|
|
156
|
-
def _inner_preduce(x):
|
|
157
|
-
"""Splits the sequence into pairs and possibly one
|
|
158
|
-
singlet, on each of which `fn` is performed to create
|
|
159
|
-
a new sequence.
|
|
160
|
-
"""
|
|
161
|
-
if len(x) <= 2:
|
|
162
|
-
return _sfn(x)
|
|
163
|
-
paired_x = partition_all(2, x)
|
|
164
|
-
new_x = tuple(pool.map(_sfn, paired_x))
|
|
165
|
-
return _inner_preduce(new_x)
|
|
166
|
-
|
|
167
|
-
return _inner_preduce(tuple(seq))
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
def make_immutable(mat):
|
|
171
|
-
"""Make array read only, in-place.
|
|
172
|
-
|
|
173
|
-
Parameters
|
|
174
|
-
----------
|
|
175
|
-
mat : sparse or dense array
|
|
176
|
-
Matrix to make immutable.
|
|
177
|
-
"""
|
|
178
|
-
if issparse(mat):
|
|
179
|
-
mat.data.flags.writeable = False
|
|
180
|
-
if mat.format in {"csr", "csc", "bsr"}:
|
|
181
|
-
mat.indices.flags.writeable = False
|
|
182
|
-
mat.indptr.flags.writeable = False
|
|
183
|
-
elif mat.format == "coo":
|
|
184
|
-
mat.row.flags.writeable = False
|
|
185
|
-
mat.col.flags.writeable = False
|
|
186
|
-
else:
|
|
187
|
-
mat.flags.writeable = False
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
def isclose_qarray(a, b, **kwargs):
|
|
191
|
-
"""Check if two qarrays are close. This is a simple wrapper around the
|
|
192
|
-
base numpy function, but ensures that the arrays are converted to standard
|
|
193
|
-
numpy arrays first, to avoid a call to the overridden `__and__` method.
|
|
194
|
-
|
|
195
|
-
Parameters
|
|
196
|
-
----------
|
|
197
|
-
a : qarray
|
|
198
|
-
First array.
|
|
199
|
-
b : qarray
|
|
200
|
-
Second array.
|
|
201
|
-
rtol : array_like
|
|
202
|
-
The relative tolerance parameter.
|
|
203
|
-
atol : array_like
|
|
204
|
-
The absolute tolerance parameter (see Notes).
|
|
205
|
-
equal_nan: bool
|
|
206
|
-
Whether to compare NaN's as equal. If True, NaN's in a will be
|
|
207
|
-
considered equal to NaN's in b in the output array.
|
|
208
|
-
|
|
209
|
-
Returns
|
|
210
|
-
-------
|
|
211
|
-
bool
|
|
212
|
-
"""
|
|
213
|
-
# numpy 2+ uses `&` so we convert arrays to standard ndarray first
|
|
214
|
-
return np.allclose(np.asarray(a), np.asarray(b), **kwargs)
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
_numpy_qarray_overrides = {
|
|
218
|
-
np.isclose: isclose_qarray,
|
|
219
|
-
}
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
class qarray(np.ndarray):
|
|
223
|
-
"""Thin subclass of :class:`numpy.ndarray` with some convenient quantum
|
|
224
|
-
linear algebra related methods and attributes (``.H``, ``&``, etc.), and
|
|
225
|
-
matrix-like preservation of at least 2-dimensions so as to distiguish
|
|
226
|
-
kets and bras.
|
|
227
|
-
"""
|
|
228
|
-
|
|
229
|
-
def __new__(cls, data, dtype=None, order=None):
|
|
230
|
-
return np.asarray(data, dtype=dtype, order=order).view(cls)
|
|
231
|
-
|
|
232
|
-
@property
|
|
233
|
-
def H(self):
|
|
234
|
-
if issubclass(self.dtype.type, np.complexfloating):
|
|
235
|
-
return self.conjugate().transpose()
|
|
236
|
-
else:
|
|
237
|
-
return self.transpose()
|
|
238
|
-
|
|
239
|
-
def toarray(self):
|
|
240
|
-
return np.asarray(self)
|
|
241
|
-
|
|
242
|
-
@property
|
|
243
|
-
def A(self):
|
|
244
|
-
return np.asarray(self)
|
|
245
|
-
|
|
246
|
-
def __array__(self):
|
|
247
|
-
return np.asarray(self)
|
|
248
|
-
|
|
249
|
-
def __and__(self, other):
|
|
250
|
-
return kron_dispatch(self, other)
|
|
251
|
-
|
|
252
|
-
def normalize(self, inplace=True):
|
|
253
|
-
return normalize(self, inplace=inplace)
|
|
254
|
-
|
|
255
|
-
def nmlz(self, inplace=True):
|
|
256
|
-
return normalize(self, inplace=inplace)
|
|
257
|
-
|
|
258
|
-
def chop(self, inplace=True):
|
|
259
|
-
return chop(self, inplace=inplace)
|
|
260
|
-
|
|
261
|
-
def tr(self):
|
|
262
|
-
return _trace_dense(self)
|
|
263
|
-
|
|
264
|
-
def partial_trace(self, dims, keep):
|
|
265
|
-
return partial_trace(self, dims, keep)
|
|
266
|
-
|
|
267
|
-
def ptr(self, dims, keep):
|
|
268
|
-
return partial_trace(self, dims, keep)
|
|
269
|
-
|
|
270
|
-
def __array_function__(self, func, types, args, kwargs):
|
|
271
|
-
if func not in _numpy_qarray_overrides:
|
|
272
|
-
# avoid infinite recursion
|
|
273
|
-
return super().__array_function__(func, types, args, kwargs)
|
|
274
|
-
|
|
275
|
-
return _numpy_qarray_overrides[func](*args, **kwargs)
|
|
276
|
-
|
|
277
|
-
def __str__(self):
|
|
278
|
-
current_printopts = np.get_printoptions()
|
|
279
|
-
np.set_printoptions(precision=6, linewidth=120)
|
|
280
|
-
s = super().__str__()
|
|
281
|
-
np.set_printoptions(**current_printopts)
|
|
282
|
-
return s
|
|
283
|
-
|
|
284
|
-
def __repr__(self):
|
|
285
|
-
current_printopts = np.get_printoptions()
|
|
286
|
-
np.set_printoptions(precision=6, linewidth=120)
|
|
287
|
-
s = super().__str__()
|
|
288
|
-
np.set_printoptions(**current_printopts)
|
|
289
|
-
return s
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
# --------------------------------------------------------------------------- #
|
|
293
|
-
# Decorators for standardizing output #
|
|
294
|
-
# --------------------------------------------------------------------------- #
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
def ensure_qarray(fn):
|
|
298
|
-
"""Decorator that wraps output as a ``qarray``."""
|
|
299
|
-
|
|
300
|
-
@functools.wraps(fn)
|
|
301
|
-
def qarray_fn(*args, **kwargs):
|
|
302
|
-
out = fn(*args, **kwargs)
|
|
303
|
-
if not isinstance(out, qarray):
|
|
304
|
-
return qarray(out)
|
|
305
|
-
return out
|
|
306
|
-
|
|
307
|
-
return qarray_fn
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
def realify_scalar(x, imag_tol=1e-12):
|
|
311
|
-
try:
|
|
312
|
-
return x.real if abs(x.imag) < abs(x.real) * imag_tol else x
|
|
313
|
-
except AttributeError:
|
|
314
|
-
return x
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
def realify(fn, imag_tol=1e-12):
|
|
318
|
-
"""Decorator that drops ``fn``'s output imaginary part if very small."""
|
|
319
|
-
|
|
320
|
-
@functools.wraps(fn)
|
|
321
|
-
def realified_fn(*args, **kwargs):
|
|
322
|
-
return realify_scalar(fn(*args, **kwargs), imag_tol=imag_tol)
|
|
323
|
-
|
|
324
|
-
return realified_fn
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
def zeroify(fn, tol=1e-14):
|
|
328
|
-
"""Decorator that rounds ``fn``'s output to zero if very small."""
|
|
329
|
-
|
|
330
|
-
@functools.wraps(fn)
|
|
331
|
-
def zeroified_f(*args, **kwargs):
|
|
332
|
-
x = fn(*args, **kwargs)
|
|
333
|
-
return 0.0 if abs(x) < tol else x
|
|
334
|
-
|
|
335
|
-
return zeroified_f
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
_COMPLEX_DTYPES = {"complex64", "complex128"}
|
|
339
|
-
_DOUBLE_DTYPES = {"float64", "complex128"}
|
|
340
|
-
_DTYPE_MAP = {
|
|
341
|
-
(False, False): "float32",
|
|
342
|
-
(False, True): "float64",
|
|
343
|
-
(True, False): "complex64",
|
|
344
|
-
(True, True): "complex128",
|
|
345
|
-
}
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
def common_type(*arrays):
|
|
349
|
-
"""Quick compute the minimal dtype sufficient for ``arrays``."""
|
|
350
|
-
dtypes = {array.dtype.name for array in arrays}
|
|
351
|
-
has_complex = not _COMPLEX_DTYPES.isdisjoint(dtypes)
|
|
352
|
-
has_double = not _DOUBLE_DTYPES.isdisjoint(dtypes)
|
|
353
|
-
return _DTYPE_MAP[has_complex, has_double]
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
def upcast(fn):
|
|
357
|
-
"""Decorator to make sure the types of two numpy arguments match."""
|
|
358
|
-
|
|
359
|
-
def upcasted_fn(a, b):
|
|
360
|
-
if a.dtype == b.dtype:
|
|
361
|
-
return fn(a, b)
|
|
362
|
-
else:
|
|
363
|
-
common = common_type(a, b)
|
|
364
|
-
return fn(a.astype(common), b.astype(common))
|
|
365
|
-
|
|
366
|
-
return upcasted_fn
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
# --------------------------------------------------------------------------- #
|
|
370
|
-
# Type and shape checks #
|
|
371
|
-
# --------------------------------------------------------------------------- #
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
def dag(qob):
|
|
375
|
-
"""Conjugate transpose."""
|
|
376
|
-
try:
|
|
377
|
-
return qob.H
|
|
378
|
-
except AttributeError:
|
|
379
|
-
return qob.conj().T
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
def isket(qob):
|
|
383
|
-
"""Checks if ``qob`` is in ket form -- an array column."""
|
|
384
|
-
return qob.shape[0] > 1 and qob.shape[1] == 1 # Column vector check
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
def isbra(qob):
|
|
388
|
-
"""Checks if ``qob`` is in bra form -- an array row."""
|
|
389
|
-
return qob.shape[0] == 1 and qob.shape[1] > 1 # Row vector check
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
def isop(qob):
|
|
393
|
-
"""Checks if ``qob`` is an operator."""
|
|
394
|
-
s = qob.shape
|
|
395
|
-
return len(s) == 2 and (s[0] > 1) and (s[1] > 1)
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
def isvec(qob):
|
|
399
|
-
"""Checks if ``qob`` is row-vector, column-vector or one-dimensional."""
|
|
400
|
-
shp = qob.shape
|
|
401
|
-
return len(shp) == 1 or (len(shp) == 2 and (shp[0] == 1 or shp[1] == 1))
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
def issparse(qob):
|
|
405
|
-
"""Checks if ``qob`` is explicitly sparse."""
|
|
406
|
-
return isinstance(qob, sp.spmatrix)
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
def isdense(qob):
|
|
410
|
-
"""Checks if ``qob`` is explicitly dense."""
|
|
411
|
-
return isinstance(qob, np.ndarray)
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
def isreal(qob, **allclose_opts):
|
|
415
|
-
"""Checks if ``qob`` is approximately real."""
|
|
416
|
-
data = qob.data if issparse(qob) else qob
|
|
417
|
-
|
|
418
|
-
# check dtype
|
|
419
|
-
if np.isrealobj(data):
|
|
420
|
-
return True
|
|
421
|
-
|
|
422
|
-
# else check explicitly
|
|
423
|
-
return np.allclose(data.imag, 0.0, **allclose_opts)
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
def allclose_sparse(A, B, **allclose_opts):
|
|
427
|
-
if A.shape != B.shape:
|
|
428
|
-
return False
|
|
429
|
-
|
|
430
|
-
r1, c1, v1 = sp.find(A)
|
|
431
|
-
r2, c2, v2 = sp.find(B)
|
|
432
|
-
index_match = np.array_equal(r1, r2) & np.array_equal(c1, c2)
|
|
433
|
-
|
|
434
|
-
if not index_match:
|
|
435
|
-
return False
|
|
436
|
-
|
|
437
|
-
return np.allclose(v1, v2, **allclose_opts)
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
def isherm(qob, **allclose_opts):
|
|
441
|
-
"""Checks if ``qob`` is hermitian.
|
|
442
|
-
|
|
443
|
-
Parameters
|
|
444
|
-
----------
|
|
445
|
-
qob : dense or sparse operator
|
|
446
|
-
Matrix to check.
|
|
447
|
-
|
|
448
|
-
Returns
|
|
449
|
-
-------
|
|
450
|
-
bool
|
|
451
|
-
"""
|
|
452
|
-
if issparse(qob):
|
|
453
|
-
return allclose_sparse(qob, dag(qob), **allclose_opts)
|
|
454
|
-
else:
|
|
455
|
-
return np.allclose(qob, dag(qob), **allclose_opts)
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
def ispos(qob, tol=1e-15):
|
|
459
|
-
"""Checks if the dense hermitian ``qob`` is approximately positive
|
|
460
|
-
semi-definite, using the cholesky decomposition.
|
|
461
|
-
|
|
462
|
-
Parameters
|
|
463
|
-
----------
|
|
464
|
-
qob : dense operator
|
|
465
|
-
Matrix to check.
|
|
466
|
-
|
|
467
|
-
Returns
|
|
468
|
-
-------
|
|
469
|
-
bool
|
|
470
|
-
"""
|
|
471
|
-
try:
|
|
472
|
-
np.linalg.cholesky(qob + tol * np.eye(qob.shape[0]))
|
|
473
|
-
return True
|
|
474
|
-
except np.linalg.LinAlgError:
|
|
475
|
-
return False
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
# --------------------------------------------------------------------------- #
|
|
479
|
-
# Core accelerated numeric functions #
|
|
480
|
-
# --------------------------------------------------------------------------- #
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
def _nb_complex_base(real, imag): # pragma: no cover
|
|
484
|
-
return real + 1j * imag
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
_cmplx_sigs = ["complex64(float32, float32)", "complex128(float64, float64)"]
|
|
488
|
-
_nb_complex_seq = vectorize(_cmplx_sigs)(_nb_complex_base)
|
|
489
|
-
_nb_complex_par = pvectorize(_cmplx_sigs)(_nb_complex_base)
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
def complex_array(real, imag):
|
|
493
|
-
"""Accelerated creation of complex array."""
|
|
494
|
-
if real.size > 50000:
|
|
495
|
-
return _nb_complex_par(real, imag)
|
|
496
|
-
return _nb_complex_seq(real, imag)
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
@ensure_qarray
|
|
500
|
-
@upcast
|
|
501
|
-
@njit
|
|
502
|
-
def mul_dense(x, y): # pragma: no cover
|
|
503
|
-
"""Numba-accelerated element-wise multiplication of two dense matrices."""
|
|
504
|
-
return x * y
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
def mul(x, y):
|
|
508
|
-
"""Element-wise multiplication, dispatched to correct dense or sparse
|
|
509
|
-
function.
|
|
510
|
-
|
|
511
|
-
Parameters
|
|
512
|
-
----------
|
|
513
|
-
x : dense or sparse operator
|
|
514
|
-
First array.
|
|
515
|
-
y : dense or sparse operator
|
|
516
|
-
Second array.
|
|
517
|
-
|
|
518
|
-
Returns
|
|
519
|
-
-------
|
|
520
|
-
dense or sparse operator
|
|
521
|
-
Element wise product of ``x`` and ``y``.
|
|
522
|
-
"""
|
|
523
|
-
# dispatch to sparse methods
|
|
524
|
-
if issparse(x):
|
|
525
|
-
return x.multiply(y)
|
|
526
|
-
elif issparse(y):
|
|
527
|
-
return y.multiply(x)
|
|
528
|
-
|
|
529
|
-
return mul_dense(x, y)
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
def _nb_subtract_update_base(X, c, Z): # pragma: no cover
|
|
533
|
-
return X - c * Z
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
_sbtrct_sigs = [
|
|
537
|
-
"float32(float32, float32, float32)",
|
|
538
|
-
"float32(float32, float64, float32)",
|
|
539
|
-
"float64(float64, float64, float64)",
|
|
540
|
-
"complex64(complex64, float32, complex64)",
|
|
541
|
-
"complex64(complex64, float64, complex64)",
|
|
542
|
-
"complex128(complex128, float64, complex128)",
|
|
543
|
-
]
|
|
544
|
-
_nb_subtract_update_seq = vectorize(_sbtrct_sigs)(_nb_subtract_update_base)
|
|
545
|
-
_nb_subtract_update_par = pvectorize(_sbtrct_sigs)(_nb_subtract_update_base)
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
def subtract_update_(X, c, Y):
|
|
549
|
-
"""Accelerated inplace computation of ``X -= c * Y``. This is mainly
|
|
550
|
-
for Lanczos iteration.
|
|
551
|
-
"""
|
|
552
|
-
if X.size > 2048:
|
|
553
|
-
_nb_subtract_update_par(X, c, Y, out=X)
|
|
554
|
-
else:
|
|
555
|
-
_nb_subtract_update_seq(X, c, Y, out=X)
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
def _nb_divide_update_base(X, c): # pragma: no cover
|
|
559
|
-
return X / c
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
_divd_sigs = [
|
|
563
|
-
"float32(float32, float32)",
|
|
564
|
-
"float64(float64, float64)",
|
|
565
|
-
"complex64(complex64, float32)",
|
|
566
|
-
"complex128(complex128, float64)",
|
|
567
|
-
]
|
|
568
|
-
_nb_divide_update_seq = vectorize(_divd_sigs)(_nb_divide_update_base)
|
|
569
|
-
_nb_divide_update_par = pvectorize(_divd_sigs)(_nb_divide_update_base)
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
def divide_update_(X, c, out):
|
|
573
|
-
"""Accelerated computation of ``X / c`` into ``out``."""
|
|
574
|
-
if X.size > 2048:
|
|
575
|
-
_nb_divide_update_par(X, c, out=out)
|
|
576
|
-
else:
|
|
577
|
-
_nb_divide_update_seq(X, c, out=out)
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
@pnjit # pragma: no cover
|
|
581
|
-
def _dot_csr_matvec_prange(data, indptr, indices, vec, out):
|
|
582
|
-
for i in numba.prange(vec.size):
|
|
583
|
-
isum = 0.0
|
|
584
|
-
for j in range(indptr[i], indptr[i + 1]):
|
|
585
|
-
isum += data[j] * vec[indices[j]]
|
|
586
|
-
out[i] = isum
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
def par_dot_csr_matvec(A, x):
|
|
590
|
-
"""Parallel sparse csr-matrix vector dot product.
|
|
591
|
-
|
|
592
|
-
Parameters
|
|
593
|
-
----------
|
|
594
|
-
A : scipy.sparse.csr_matrix
|
|
595
|
-
Operator.
|
|
596
|
-
x : dense vector
|
|
597
|
-
Vector.
|
|
598
|
-
|
|
599
|
-
Returns
|
|
600
|
-
-------
|
|
601
|
-
dense vector
|
|
602
|
-
Result of ``A @ x``.
|
|
603
|
-
|
|
604
|
-
Notes
|
|
605
|
-
-----
|
|
606
|
-
The main bottleneck for sparse matrix vector product is memory access,
|
|
607
|
-
as such this function is only beneficial for pretty large matrices.
|
|
608
|
-
"""
|
|
609
|
-
y = np.empty(x.size, common_type(A, x))
|
|
610
|
-
_dot_csr_matvec_prange(A.data, A.indptr, A.indices, x.ravel(), y)
|
|
611
|
-
y.shape = x.shape
|
|
612
|
-
if isinstance(x, qarray):
|
|
613
|
-
y = qarray(y)
|
|
614
|
-
return y
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
def dot_sparse(a, b):
|
|
618
|
-
"""Dot product for sparse matrix, dispatching to parallel for v large nnz."""
|
|
619
|
-
out = a @ b
|
|
620
|
-
|
|
621
|
-
if isdense(out) and (isinstance(b, qarray) or isinstance(a, qarray)):
|
|
622
|
-
out = qarray(out)
|
|
623
|
-
|
|
624
|
-
return out
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
def dot(a, b):
|
|
628
|
-
"""Matrix multiplication, dispatched to dense or sparse functions.
|
|
629
|
-
|
|
630
|
-
Parameters
|
|
631
|
-
----------
|
|
632
|
-
a : dense or sparse operator
|
|
633
|
-
First array.
|
|
634
|
-
b : dense or sparse operator
|
|
635
|
-
Second array.
|
|
636
|
-
|
|
637
|
-
Returns
|
|
638
|
-
-------
|
|
639
|
-
dense or sparse operator
|
|
640
|
-
Dot product of ``a`` and ``b``.
|
|
641
|
-
"""
|
|
642
|
-
if issparse(a) or issparse(b):
|
|
643
|
-
return dot_sparse(a, b)
|
|
644
|
-
try:
|
|
645
|
-
return a.dot(b)
|
|
646
|
-
except AttributeError:
|
|
647
|
-
return a @ b
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
@realify
|
|
651
|
-
def vdot(a, b):
|
|
652
|
-
"""Accelerated 'Hermitian' inner product of two arrays. In other words,
|
|
653
|
-
``b`` here will be conjugated by the function.
|
|
654
|
-
"""
|
|
655
|
-
return np.vdot(a.ravel(), b.ravel())
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
@realify
|
|
659
|
-
@upcast
|
|
660
|
-
@njit
|
|
661
|
-
def rdot(a, b): # pragma: no cover
|
|
662
|
-
"""Real dot product of two dense vectors.
|
|
663
|
-
|
|
664
|
-
Here, ``b`` will *not* be conjugated before the inner product.
|
|
665
|
-
"""
|
|
666
|
-
a, b = a.reshape((1, -1)), b.reshape((-1, 1))
|
|
667
|
-
return (a @ b).item()
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
@pnjit
|
|
671
|
-
def _l_diag_dot_dense_par(l, A, out): # pragma: no cover
|
|
672
|
-
for i in numba.prange(l.size):
|
|
673
|
-
out[i, :] = l[i] * A[i, :]
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
@ensure_qarray
|
|
677
|
-
def l_diag_dot_dense(diag, mat):
|
|
678
|
-
"""Dot product of diagonal matrix (with only diagonal supplied) and dense
|
|
679
|
-
matrix.
|
|
680
|
-
"""
|
|
681
|
-
|
|
682
|
-
if diag.size <= 128:
|
|
683
|
-
return mul_dense(diag.reshape(-1, 1), mat)
|
|
684
|
-
else:
|
|
685
|
-
out = np.empty_like(mat, dtype=common_type(diag, mat))
|
|
686
|
-
_l_diag_dot_dense_par(diag.ravel(), mat, out)
|
|
687
|
-
|
|
688
|
-
return out
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
def l_diag_dot_sparse(diag, mat):
|
|
692
|
-
"""Dot product of digonal matrix (with only diagonal supplied) and sparse
|
|
693
|
-
matrix.
|
|
694
|
-
"""
|
|
695
|
-
return sp.diags(diag) @ mat
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
def ldmul(diag, mat):
|
|
699
|
-
"""Accelerated left diagonal multiplication. Equivalent to
|
|
700
|
-
``numpy.diag(diag) @ mat``, but faster than numpy.
|
|
701
|
-
|
|
702
|
-
Parameters
|
|
703
|
-
----------
|
|
704
|
-
diag : vector or 1d-array
|
|
705
|
-
Vector representing the diagonal of a matrix.
|
|
706
|
-
mat : dense or sparse matrix
|
|
707
|
-
A normal (non-diagonal) matrix.
|
|
708
|
-
|
|
709
|
-
Returns
|
|
710
|
-
-------
|
|
711
|
-
dense or sparse matrix
|
|
712
|
-
Dot product of the matrix whose diagonal is ``diag`` and ``mat``.
|
|
713
|
-
"""
|
|
714
|
-
if issparse(mat):
|
|
715
|
-
return l_diag_dot_sparse(diag, mat)
|
|
716
|
-
return l_diag_dot_dense(diag, mat)
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
@pnjit
|
|
720
|
-
def _r_diag_dot_dense_par(A, l, out): # pragma: no cover
|
|
721
|
-
for i in numba.prange(l.size):
|
|
722
|
-
out[:, i] = A[:, i] * l[i]
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
@ensure_qarray
|
|
726
|
-
def r_diag_dot_dense(mat, diag):
|
|
727
|
-
"""Dot product of dense matrix and digonal matrix (with only diagonal
|
|
728
|
-
supplied).
|
|
729
|
-
"""
|
|
730
|
-
if diag.size <= 128:
|
|
731
|
-
return mul_dense(mat, diag.reshape(1, -1))
|
|
732
|
-
else:
|
|
733
|
-
out = np.empty_like(mat, dtype=common_type(diag, mat))
|
|
734
|
-
_r_diag_dot_dense_par(mat, diag.ravel(), out)
|
|
735
|
-
|
|
736
|
-
return out
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
def r_diag_dot_sparse(mat, diag):
|
|
740
|
-
"""Dot product of sparse matrix and digonal matrix (with only diagonal
|
|
741
|
-
supplied).
|
|
742
|
-
"""
|
|
743
|
-
return mat @ sp.diags(diag)
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
def rdmul(mat, diag):
|
|
747
|
-
"""Accelerated left diagonal multiplication.
|
|
748
|
-
|
|
749
|
-
Equivalent to ``mat @ numpy.diag(diag)``, but faster.
|
|
750
|
-
|
|
751
|
-
Parameters
|
|
752
|
-
----------
|
|
753
|
-
mat : dense or sparse matrix
|
|
754
|
-
A normal (non-diagonal) matrix.
|
|
755
|
-
diag : vector or 1d-array
|
|
756
|
-
Vector representing the diagonal of a matrix.
|
|
757
|
-
|
|
758
|
-
Returns
|
|
759
|
-
-------
|
|
760
|
-
dense or sparse matrix
|
|
761
|
-
Dot product of ``mat`` and the matrix whose diagonal is ``diag``.
|
|
762
|
-
"""
|
|
763
|
-
if issparse(mat):
|
|
764
|
-
return r_diag_dot_sparse(mat, diag)
|
|
765
|
-
return r_diag_dot_dense(mat, diag)
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
@pnjit
|
|
769
|
-
def _outer_par(a, b, out, m, n): # pragma: no cover
|
|
770
|
-
for i in numba.prange(m):
|
|
771
|
-
out[i, :] = a[i] * b[:]
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
@ensure_qarray
|
|
775
|
-
def outer(a, b):
|
|
776
|
-
"""Outer product between two vectors (no conjugation)."""
|
|
777
|
-
m, n = a.size, b.size
|
|
778
|
-
|
|
779
|
-
if m * n < 2**14:
|
|
780
|
-
return mul_dense(a.reshape(m, 1), b.reshape(1, n))
|
|
781
|
-
|
|
782
|
-
out = np.empty((m, n), dtype=common_type(a, b))
|
|
783
|
-
_outer_par(a.ravel(), b.ravel(), out, m, n)
|
|
784
|
-
|
|
785
|
-
return out
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
@vectorize
|
|
789
|
-
def explt(l, t): # pragma: no cover
|
|
790
|
-
"""Complex exponenital as used in solution to schrodinger equation."""
|
|
791
|
-
return cmath.exp((-1.0j * t) * l)
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
# --------------------------------------------------------------------------- #
|
|
795
|
-
# Kronecker (tensor) product #
|
|
796
|
-
# --------------------------------------------------------------------------- #
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
@njit
|
|
800
|
-
def _nb_kron_exp_seq(a, b, out, m, n, p, q): # pragma: no cover
|
|
801
|
-
for i in range(m):
|
|
802
|
-
for j in range(n):
|
|
803
|
-
ii, fi = i * p, (i + 1) * p
|
|
804
|
-
ij, fj = j * q, (j + 1) * q
|
|
805
|
-
out[ii:fi, ij:fj] = a[i, j] * b
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
@pnjit
|
|
809
|
-
def _nb_kron_exp_par(a, b, out, m, n, p, q): # pragma: no cover
|
|
810
|
-
for i in numba.prange(m):
|
|
811
|
-
for j in range(n):
|
|
812
|
-
ii, fi = i * p, (i + 1) * p
|
|
813
|
-
ij, fj = j * q, (j + 1) * q
|
|
814
|
-
out[ii:fi, ij:fj] = a[i, j] * b
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
@ensure_qarray
|
|
818
|
-
def kron_dense(a, b, par_thresh=4096):
|
|
819
|
-
m, n = a.shape
|
|
820
|
-
p, q = b.shape
|
|
821
|
-
|
|
822
|
-
out = np.empty((m * p, n * q), dtype=common_type(a, b))
|
|
823
|
-
|
|
824
|
-
if out.size > 4096:
|
|
825
|
-
_nb_kron_exp_par(a, b, out, m, n, p, q)
|
|
826
|
-
else:
|
|
827
|
-
_nb_kron_exp_seq(a, b, out, m, n, p, q)
|
|
828
|
-
|
|
829
|
-
return out
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
def kron_sparse(a, b, stype=None):
|
|
833
|
-
"""Sparse tensor (kronecker) product,
|
|
834
|
-
|
|
835
|
-
Output format can be specified or will be automatically determined.
|
|
836
|
-
"""
|
|
837
|
-
if stype is None:
|
|
838
|
-
stype = (
|
|
839
|
-
"bsr"
|
|
840
|
-
if isinstance(b, np.ndarray) or b.format == "bsr"
|
|
841
|
-
else b.format
|
|
842
|
-
if isinstance(a, np.ndarray)
|
|
843
|
-
else "csc"
|
|
844
|
-
if a.format == "csc" and b.format == "csc"
|
|
845
|
-
else "csr"
|
|
846
|
-
)
|
|
847
|
-
|
|
848
|
-
return sp.kron(a, b, format=stype)
|
|
849
|
-
|
|
850
|
-
|
|
851
|
-
def kron_dispatch(a, b, stype=None):
|
|
852
|
-
"""Kronecker product of two arrays, dispatched based on dense/sparse and
|
|
853
|
-
also size of product.
|
|
854
|
-
"""
|
|
855
|
-
if issparse(a) or issparse(b):
|
|
856
|
-
return kron_sparse(a, b, stype=stype)
|
|
857
|
-
|
|
858
|
-
return kron_dense(a, b)
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
# --------------------------------------------------------------------------- #
|
|
862
|
-
# Core Functions #
|
|
863
|
-
# --------------------------------------------------------------------------- #
|
|
864
|
-
|
|
865
|
-
_SPARSE_CONSTRUCTORS = {
|
|
866
|
-
"csr": sp.csr_matrix,
|
|
867
|
-
"bsr": sp.bsr_matrix,
|
|
868
|
-
"csc": sp.csc_matrix,
|
|
869
|
-
"coo": sp.coo_matrix,
|
|
870
|
-
}
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
def sparse_matrix(data, stype="csr", dtype=complex):
|
|
874
|
-
"""Construct a sparse matrix of a particular format.
|
|
875
|
-
|
|
876
|
-
Parameters
|
|
877
|
-
----------
|
|
878
|
-
data : array_like
|
|
879
|
-
Fed to scipy.sparse constructor.
|
|
880
|
-
stype : {'csr', 'csc', 'coo', 'bsr'}, optional
|
|
881
|
-
Sparse format.
|
|
882
|
-
|
|
883
|
-
Returns
|
|
884
|
-
-------
|
|
885
|
-
scipy sparse matrix
|
|
886
|
-
Of format ``stype``.
|
|
887
|
-
"""
|
|
888
|
-
return _SPARSE_CONSTRUCTORS[stype](data, dtype=dtype)
|
|
889
|
-
|
|
890
|
-
|
|
891
|
-
_EXPEC_METHODS = {
|
|
892
|
-
# [isop(a), isop(b), issparse(a) or issparse(b)]
|
|
893
|
-
(0, 0, 0): lambda a, b: abs(vdot(a, b)) ** 2,
|
|
894
|
-
(0, 1, 0): lambda a, b: vdot(a, b @ a),
|
|
895
|
-
(1, 0, 0): lambda a, b: vdot(b, a @ b),
|
|
896
|
-
(1, 1, 0): lambda a, b: _trace_dense(a @ b),
|
|
897
|
-
(0, 0, 1): lambda a, b: abs(dot(dag(a), b)[0, 0]) ** 2,
|
|
898
|
-
(0, 1, 1): realify(lambda a, b: dot(dag(a), dot(b, a))[0, 0]),
|
|
899
|
-
(1, 0, 1): realify(lambda a, b: dot(dag(b), dot(a, b))[0, 0]),
|
|
900
|
-
(1, 1, 1): lambda a, b: _trace_sparse(dot(a, b)),
|
|
901
|
-
}
|
|
902
|
-
|
|
903
|
-
|
|
904
|
-
def expectation(a, b):
|
|
905
|
-
"""'Expectation' between a vector/operator and another vector/operator.
|
|
906
|
-
|
|
907
|
-
The 'operator' inner product between ``a`` and ``b``, but also for vectors.
|
|
908
|
-
This means that for consistency:
|
|
909
|
-
|
|
910
|
-
- for two vectors it will be the absolute expec squared ``|<a|b><b|a>|``,
|
|
911
|
-
*not* ``<a|b>``.
|
|
912
|
-
- for a vector and an operator its will be ``<a|b|a>``
|
|
913
|
-
- for two operators it will be the Hilbert-schmidt inner product
|
|
914
|
-
``tr(A @ B)``
|
|
915
|
-
|
|
916
|
-
In this way ``expectation(a, b) == expectation(dop(a), b) ==
|
|
917
|
-
expectation(dop(a), dop(b))``.
|
|
918
|
-
|
|
919
|
-
Parameters
|
|
920
|
-
----------
|
|
921
|
-
a : vector or operator
|
|
922
|
-
First state or operator - assumed to be ket if vector.
|
|
923
|
-
b : vector or operator
|
|
924
|
-
Second state or operator - assumed to be ket if vector.
|
|
925
|
-
|
|
926
|
-
Returns
|
|
927
|
-
-------
|
|
928
|
-
x : float
|
|
929
|
-
'Expectation' of ``a`` with ``b``.
|
|
930
|
-
"""
|
|
931
|
-
return _EXPEC_METHODS[isop(a), isop(b), issparse(a) or issparse(b)](a, b)
|
|
932
|
-
|
|
933
|
-
|
|
934
|
-
expec = expectation
|
|
935
|
-
"""Alias for :func:`expectation`."""
|
|
936
|
-
|
|
937
|
-
|
|
938
|
-
def normalize(qob, inplace=True):
|
|
939
|
-
"""Normalize a quantum object.
|
|
940
|
-
|
|
941
|
-
Parameters
|
|
942
|
-
----------
|
|
943
|
-
qob : dense or sparse vector or operator
|
|
944
|
-
Quantum object to normalize.
|
|
945
|
-
inplace : bool, optional
|
|
946
|
-
Whether to act inplace on the given operator.
|
|
947
|
-
|
|
948
|
-
Returns
|
|
949
|
-
-------
|
|
950
|
-
dense or sparse vector or operator
|
|
951
|
-
Normalized quantum object.
|
|
952
|
-
"""
|
|
953
|
-
if not inplace:
|
|
954
|
-
qob = qob.copy()
|
|
955
|
-
|
|
956
|
-
if isop(qob):
|
|
957
|
-
n_factor = trace(qob)
|
|
958
|
-
else:
|
|
959
|
-
n_factor = expectation(qob, qob) ** 0.25
|
|
960
|
-
|
|
961
|
-
qob[:] /= n_factor
|
|
962
|
-
return qob
|
|
963
|
-
|
|
964
|
-
|
|
965
|
-
normalize_ = functools.partial(normalize, inplace=True)
|
|
966
|
-
|
|
967
|
-
|
|
968
|
-
def chop(qob, tol=1.0e-15, inplace=True):
|
|
969
|
-
"""Set small values of a dense or sparse array to zero.
|
|
970
|
-
|
|
971
|
-
Parameters
|
|
972
|
-
----------
|
|
973
|
-
qob : dense or sparse vector or operator
|
|
974
|
-
Quantum object to chop.
|
|
975
|
-
tol : float, optional
|
|
976
|
-
Fraction of ``max(abs(qob))`` to chop below.
|
|
977
|
-
inplace : bool, optional
|
|
978
|
-
Whether to act on input array or return copy.
|
|
979
|
-
|
|
980
|
-
Returns
|
|
981
|
-
-------
|
|
982
|
-
dense or sparse vector or operator
|
|
983
|
-
Chopped quantum object.
|
|
984
|
-
"""
|
|
985
|
-
minm = np.abs(qob).max() * tol # minimum value tolerated
|
|
986
|
-
if not inplace:
|
|
987
|
-
qob = qob.copy()
|
|
988
|
-
if issparse(qob):
|
|
989
|
-
qob.data.real[np.abs(qob.data.real) < minm] = 0.0
|
|
990
|
-
qob.data.imag[np.abs(qob.data.imag) < minm] = 0.0
|
|
991
|
-
qob.eliminate_zeros()
|
|
992
|
-
else:
|
|
993
|
-
qob.real[np.abs(qob.real) < minm] = 0.0
|
|
994
|
-
qob.imag[np.abs(qob.imag) < minm] = 0.0
|
|
995
|
-
return qob
|
|
996
|
-
|
|
997
|
-
|
|
998
|
-
chop_ = functools.partial(chop, inplace=True)
|
|
999
|
-
|
|
1000
|
-
|
|
1001
|
-
def quimbify(
|
|
1002
|
-
data,
|
|
1003
|
-
qtype=None,
|
|
1004
|
-
normalized=False,
|
|
1005
|
-
chopped=False,
|
|
1006
|
-
sparse=None,
|
|
1007
|
-
stype=None,
|
|
1008
|
-
dtype=complex,
|
|
1009
|
-
):
|
|
1010
|
-
"""Converts data to 'quantum' i.e. complex matrices, kets being columns.
|
|
1011
|
-
|
|
1012
|
-
Parameters
|
|
1013
|
-
----------
|
|
1014
|
-
data : dense or sparse array_like
|
|
1015
|
-
Array describing vector or operator.
|
|
1016
|
-
qtype : {``'ket'``, ``'bra'`` or ``'dop'``}, optional
|
|
1017
|
-
Quantum object type output type. Note that if an operator is given
|
|
1018
|
-
as ``data`` and ``'ket'`` or ``'bra'`` as ``qtype``, the operator
|
|
1019
|
-
will be unravelled into a column or row vector.
|
|
1020
|
-
sparse : bool, optional
|
|
1021
|
-
Whether to convert output to sparse a format.
|
|
1022
|
-
normalized : bool, optional
|
|
1023
|
-
Whether to normalise the output.
|
|
1024
|
-
chopped : bool, optional
|
|
1025
|
-
Whether to trim almost zero entries of the output.
|
|
1026
|
-
stype : {``'csr'``, ``'csc'``, ``'bsr'``, ``'coo'``}, optional
|
|
1027
|
-
Format of output matrix if sparse, defaults to ``'csr'``.
|
|
1028
|
-
|
|
1029
|
-
Returns
|
|
1030
|
-
-------
|
|
1031
|
-
dense or sparse vector or operator
|
|
1032
|
-
|
|
1033
|
-
Notes
|
|
1034
|
-
-----
|
|
1035
|
-
1. Will unravel an array if ``'ket'`` or ``'bra'`` given.
|
|
1036
|
-
2. Will conjugate if ``'bra'`` given.
|
|
1037
|
-
3. Will leave operators as is if ``'dop'`` given, but construct one if
|
|
1038
|
-
vector given with the assumption that it was a ket.
|
|
1039
|
-
|
|
1040
|
-
Examples
|
|
1041
|
-
--------
|
|
1042
|
-
|
|
1043
|
-
Create a ket (column vector):
|
|
1044
|
-
|
|
1045
|
-
>>> qu([1, 2j, 3])
|
|
1046
|
-
qarray([[1.+0.j],
|
|
1047
|
-
[0.+2.j],
|
|
1048
|
-
[3.+0.j]])
|
|
1049
|
-
|
|
1050
|
-
Create a single precision bra (row vector):
|
|
1051
|
-
|
|
1052
|
-
>>> qu([1, 2j, 3], qtype='bra', dtype='complex64')
|
|
1053
|
-
qarray([[1.-0.j, 0.-2.j, 3.-0.j]], dtype=complex64)
|
|
1054
|
-
|
|
1055
|
-
Create a density operator from a vector:
|
|
1056
|
-
|
|
1057
|
-
>>> qu([1, 2j, 3], qtype='dop')
|
|
1058
|
-
qarray([[1.+0.j, 0.-2.j, 3.+0.j],
|
|
1059
|
-
[0.+2.j, 4.+0.j, 0.+6.j],
|
|
1060
|
-
[3.+0.j, 0.-6.j, 9.+0.j]])
|
|
1061
|
-
|
|
1062
|
-
Create a sparse density operator:
|
|
1063
|
-
|
|
1064
|
-
>>> qu([1, 0, 0], sparse=True, qtype='dop')
|
|
1065
|
-
<3x3 sparse matrix of type '<class 'numpy.complex128'>'
|
|
1066
|
-
with 1 stored elements in Compressed Sparse Row format>
|
|
1067
|
-
"""
|
|
1068
|
-
|
|
1069
|
-
sparse_input = issparse(data)
|
|
1070
|
-
sparse_output = (
|
|
1071
|
-
(sparse)
|
|
1072
|
-
or (sparse_input and sparse is None)
|
|
1073
|
-
or (sparse is None and stype)
|
|
1074
|
-
)
|
|
1075
|
-
# Infer output sparse format from input if necessary
|
|
1076
|
-
if sparse_input and sparse_output and stype is None:
|
|
1077
|
-
stype = data.format
|
|
1078
|
-
|
|
1079
|
-
if (qtype is None) and (np.ndim(data) == 1):
|
|
1080
|
-
# assume quimbify simple list -> ket
|
|
1081
|
-
qtype = "ket"
|
|
1082
|
-
|
|
1083
|
-
if qtype is not None:
|
|
1084
|
-
# Must be dense to reshape
|
|
1085
|
-
data = qarray(data.toarray() if sparse_input else data)
|
|
1086
|
-
if qtype in ("k", "ket"):
|
|
1087
|
-
data = data.reshape((prod(data.shape), 1))
|
|
1088
|
-
elif qtype in ("b", "bra"):
|
|
1089
|
-
data = data.reshape((1, prod(data.shape))).conj()
|
|
1090
|
-
elif qtype in ("d", "r", "rho", "op", "dop") and isvec(data):
|
|
1091
|
-
data = dot(quimbify(data, "ket"), quimbify(data, "bra"))
|
|
1092
|
-
data = data.astype(dtype)
|
|
1093
|
-
|
|
1094
|
-
# Just cast as qarray
|
|
1095
|
-
elif not sparse_output:
|
|
1096
|
-
data = qarray(data.toarray() if sparse_input else data, dtype=dtype)
|
|
1097
|
-
|
|
1098
|
-
# Check if already sparse matrix, or wanted to be one
|
|
1099
|
-
if sparse_output:
|
|
1100
|
-
data = sparse_matrix(
|
|
1101
|
-
data, dtype=dtype, stype=(stype if stype is not None else "csr")
|
|
1102
|
-
)
|
|
1103
|
-
|
|
1104
|
-
# Optionally normalize and chop small components
|
|
1105
|
-
if normalized:
|
|
1106
|
-
normalize_(data)
|
|
1107
|
-
if chopped:
|
|
1108
|
-
chop_(data)
|
|
1109
|
-
|
|
1110
|
-
return data
|
|
1111
|
-
|
|
1112
|
-
|
|
1113
|
-
qu = quimbify
|
|
1114
|
-
"""Alias of :func:`quimbify`."""
|
|
1115
|
-
|
|
1116
|
-
ket = functools.partial(quimbify, qtype="ket")
|
|
1117
|
-
"""Convert an object into a ket."""
|
|
1118
|
-
|
|
1119
|
-
bra = functools.partial(quimbify, qtype="bra")
|
|
1120
|
-
"""Convert an object into a bra."""
|
|
1121
|
-
|
|
1122
|
-
dop = functools.partial(quimbify, qtype="dop")
|
|
1123
|
-
"""Convert an object into a density operator."""
|
|
1124
|
-
|
|
1125
|
-
sparse = functools.partial(quimbify, sparse=True)
|
|
1126
|
-
"""Convert an object into sparse form."""
|
|
1127
|
-
|
|
1128
|
-
|
|
1129
|
-
def infer_size(p, base=2):
|
|
1130
|
-
"""Infer the size, i.e. number of 'sites' in a state.
|
|
1131
|
-
|
|
1132
|
-
Parameters
|
|
1133
|
-
----------
|
|
1134
|
-
p : vector or operator
|
|
1135
|
-
An array representing a state with a shape attribute.
|
|
1136
|
-
base : int, optional
|
|
1137
|
-
Size of the individual states that ``p`` is composed of, e.g. this
|
|
1138
|
-
defauts 2 for qubits.
|
|
1139
|
-
|
|
1140
|
-
Returns
|
|
1141
|
-
-------
|
|
1142
|
-
int
|
|
1143
|
-
Number of composite systems.
|
|
1144
|
-
|
|
1145
|
-
Examples
|
|
1146
|
-
--------
|
|
1147
|
-
>>> infer_size(singlet() & singlet())
|
|
1148
|
-
4
|
|
1149
|
-
|
|
1150
|
-
>>> infersize(rand_rho(5**3), base=5)
|
|
1151
|
-
3
|
|
1152
|
-
"""
|
|
1153
|
-
sz = math.log(max(p.shape), base)
|
|
1154
|
-
|
|
1155
|
-
if sz % 1 > 1e-13:
|
|
1156
|
-
raise ValueError(
|
|
1157
|
-
"This state does not seem to be composed "
|
|
1158
|
-
f"of sites of equal size {base}."
|
|
1159
|
-
)
|
|
1160
|
-
|
|
1161
|
-
return int(sz)
|
|
1162
|
-
|
|
1163
|
-
|
|
1164
|
-
@realify
|
|
1165
|
-
@njit
|
|
1166
|
-
def _trace_dense(op): # pragma: no cover
|
|
1167
|
-
"""Trace of a dense operator."""
|
|
1168
|
-
x = 0.0
|
|
1169
|
-
for i in range(op.shape[0]):
|
|
1170
|
-
x += op[i, i]
|
|
1171
|
-
return x
|
|
1172
|
-
|
|
1173
|
-
|
|
1174
|
-
@realify
|
|
1175
|
-
def _trace_sparse(op):
|
|
1176
|
-
"""Trace of a sparse operator."""
|
|
1177
|
-
return np.sum(op.diagonal())
|
|
1178
|
-
|
|
1179
|
-
|
|
1180
|
-
def trace(mat):
|
|
1181
|
-
"""Trace of a dense or sparse operator.
|
|
1182
|
-
|
|
1183
|
-
Parameters
|
|
1184
|
-
----------
|
|
1185
|
-
mat : operator
|
|
1186
|
-
Operator, dense or sparse.
|
|
1187
|
-
|
|
1188
|
-
Returns
|
|
1189
|
-
-------
|
|
1190
|
-
x : float
|
|
1191
|
-
Trace of ``mat``
|
|
1192
|
-
"""
|
|
1193
|
-
return _trace_sparse(mat) if issparse(mat) else _trace_dense(mat)
|
|
1194
|
-
|
|
1195
|
-
|
|
1196
|
-
@ensure_qarray
|
|
1197
|
-
def _identity_dense(d, dtype=complex):
|
|
1198
|
-
"""Returns a dense, identity of given dimension ``d`` and type ``dtype``."""
|
|
1199
|
-
return np.eye(d, dtype=dtype)
|
|
1200
|
-
|
|
1201
|
-
|
|
1202
|
-
def _identity_sparse(d, stype="csr", dtype=complex):
|
|
1203
|
-
"""Returns a sparse, complex identity of order d."""
|
|
1204
|
-
return sp.eye(d, dtype=dtype, format=stype)
|
|
1205
|
-
|
|
1206
|
-
|
|
1207
|
-
def identity(d, sparse=False, stype="csr", dtype=complex):
|
|
1208
|
-
"""Return identity of size d in complex format, optionally sparse.
|
|
1209
|
-
|
|
1210
|
-
Parameters
|
|
1211
|
-
----------
|
|
1212
|
-
d : int
|
|
1213
|
-
Dimension of identity.
|
|
1214
|
-
sparse : bool, optional
|
|
1215
|
-
Whether to output in sparse form.
|
|
1216
|
-
stype : str, optional
|
|
1217
|
-
If sparse, what format to use.
|
|
1218
|
-
|
|
1219
|
-
Returns
|
|
1220
|
-
-------
|
|
1221
|
-
id : qarray or sparse matrix
|
|
1222
|
-
Identity operator.
|
|
1223
|
-
"""
|
|
1224
|
-
if sparse:
|
|
1225
|
-
return _identity_sparse(d, stype=stype, dtype=dtype)
|
|
1226
|
-
|
|
1227
|
-
return _identity_dense(d, dtype=dtype)
|
|
1228
|
-
|
|
1229
|
-
|
|
1230
|
-
eye = identity
|
|
1231
|
-
"""Alias for :func:`identity`."""
|
|
1232
|
-
|
|
1233
|
-
speye = functools.partial(identity, sparse=True)
|
|
1234
|
-
"""Sparse identity."""
|
|
1235
|
-
|
|
1236
|
-
|
|
1237
|
-
def _kron_core(*ops, stype=None, coo_build=False, parallel=False):
|
|
1238
|
-
"""Core kronecker product for a sequence of objects."""
|
|
1239
|
-
tmp_stype = "coo" if coo_build or stype == "coo" else None
|
|
1240
|
-
reducer = par_reduce if parallel else functools.reduce
|
|
1241
|
-
return reducer(functools.partial(kron_dispatch, stype=tmp_stype), ops)
|
|
1242
|
-
|
|
1243
|
-
|
|
1244
|
-
def dynal(x, bases):
|
|
1245
|
-
"""Generate 'dynamic decimal' for ``x`` given ``dims``.
|
|
1246
|
-
|
|
1247
|
-
Examples
|
|
1248
|
-
--------
|
|
1249
|
-
>>> dims = [13, 2, 7, 3, 10]
|
|
1250
|
-
>>> prod(dims) # total hilbert space size
|
|
1251
|
-
5460
|
|
1252
|
-
|
|
1253
|
-
>>> x = 3279
|
|
1254
|
-
>>> drep = list(dyn_bin(x, dims)) # dyn bases repr
|
|
1255
|
-
>>> drep
|
|
1256
|
-
[7, 1, 4, 0, 9]
|
|
1257
|
-
|
|
1258
|
-
>>> bs_szs = [prod(dims[i + 1:]) for i in range(len(dims))]
|
|
1259
|
-
>>> bs_szs
|
|
1260
|
-
[420, 210, 30, 10, 1]
|
|
1261
|
-
|
|
1262
|
-
>>> # reconstruct x
|
|
1263
|
-
>>> sum(d * b for d, b in zip(drep, bs_szs))
|
|
1264
|
-
3279
|
|
1265
|
-
"""
|
|
1266
|
-
bs_szs = [prod(bases[i + 1 :]) for i in range(len(bases))]
|
|
1267
|
-
|
|
1268
|
-
for b in bs_szs:
|
|
1269
|
-
div = x // b
|
|
1270
|
-
yield div
|
|
1271
|
-
x -= div * b
|
|
1272
|
-
|
|
1273
|
-
|
|
1274
|
-
def gen_matching_dynal(ri, rf, dims):
|
|
1275
|
-
"""Return the matching dynal part of ``ri`` and ``rf``, plus the first pair
|
|
1276
|
-
that don't match.
|
|
1277
|
-
"""
|
|
1278
|
-
for d1, d2 in zip(dynal(ri, dims), dynal(rf, dims)):
|
|
1279
|
-
if d1 == d2:
|
|
1280
|
-
yield (d1, d2)
|
|
1281
|
-
else:
|
|
1282
|
-
yield (d1, d2)
|
|
1283
|
-
break
|
|
1284
|
-
|
|
1285
|
-
|
|
1286
|
-
def gen_ops_maybe_sliced(ops, ix):
|
|
1287
|
-
"""Take ``ops`` and slice the first few, according to the length of ``ix``
|
|
1288
|
-
and with ``ix``, and leave the rest.
|
|
1289
|
-
"""
|
|
1290
|
-
for op, i in itertools.zip_longest(ops, ix):
|
|
1291
|
-
if i is not None:
|
|
1292
|
-
d1, d2 = i
|
|
1293
|
-
# can't slice coo matrices
|
|
1294
|
-
if sp.isspmatrix_coo(op):
|
|
1295
|
-
yield op.tocsr()[slice(d1, d2 + 1), :].tocoo()
|
|
1296
|
-
else:
|
|
1297
|
-
yield op[slice(d1, d2 + 1), :]
|
|
1298
|
-
else:
|
|
1299
|
-
yield op
|
|
1300
|
-
|
|
1301
|
-
|
|
1302
|
-
def kron(*ops, stype=None, coo_build=False, parallel=False, ownership=None):
|
|
1303
|
-
"""Tensor (kronecker) product of variable number of arguments.
|
|
1304
|
-
|
|
1305
|
-
Parameters
|
|
1306
|
-
----------
|
|
1307
|
-
ops : sequence of vectors or matrices
|
|
1308
|
-
Objects to be tensored together.
|
|
1309
|
-
stype : str, optional
|
|
1310
|
-
Desired output format if resultant object is sparse. Should be one
|
|
1311
|
-
of {``'csr'``, ``'bsr'``, ``'coo'``, ``'csc'``}. If ``None``, infer
|
|
1312
|
-
from input matrices.
|
|
1313
|
-
coo_build : bool, optional
|
|
1314
|
-
Whether to force sparse construction to use the ``'coo'``
|
|
1315
|
-
format (only for sparse matrices in the first place.).
|
|
1316
|
-
parallel : bool, optional
|
|
1317
|
-
Perform a parallel reduce on the operators, can be quicker.
|
|
1318
|
-
ownership : (int, int), optional
|
|
1319
|
-
If given, only construct the rows in ``range(*ownership)``. Such that
|
|
1320
|
-
the final operator is actually ``X[slice(*ownership), :]``. Useful for
|
|
1321
|
-
constructing operators in parallel, e.g. for MPI.
|
|
1322
|
-
|
|
1323
|
-
Returns
|
|
1324
|
-
-------
|
|
1325
|
-
X : dense or sparse vector or operator
|
|
1326
|
-
Tensor product of ``ops``.
|
|
1327
|
-
|
|
1328
|
-
Notes
|
|
1329
|
-
-----
|
|
1330
|
-
1. The product is performed as ``(a & (b & (c & ...)))``
|
|
1331
|
-
|
|
1332
|
-
Examples
|
|
1333
|
-
--------
|
|
1334
|
-
Simple example:
|
|
1335
|
-
|
|
1336
|
-
>>> a = np.array([[1, 2], [3, 4]])
|
|
1337
|
-
>>> b = np.array([[1., 1.1], [1.11, 1.111]])
|
|
1338
|
-
>>> kron(a, b)
|
|
1339
|
-
qarray([[1. , 1.1 , 2. , 2.2 ],
|
|
1340
|
-
[1.11 , 1.111, 2.22 , 2.222],
|
|
1341
|
-
[3. , 3.3 , 4. , 4.4 ],
|
|
1342
|
-
[3.33 , 3.333, 4.44 , 4.444]])
|
|
1343
|
-
|
|
1344
|
-
Partial construction of rows:
|
|
1345
|
-
|
|
1346
|
-
>>> ops = [rand_matrix(2, sparse=True) for _ in range(10)]
|
|
1347
|
-
>>> kron(*ops, ownership=(256, 512))
|
|
1348
|
-
<256x1024 sparse matrix of type '<class 'numpy.complex128'>'
|
|
1349
|
-
with 13122 stored elements in Compressed Sparse Row format>
|
|
1350
|
-
"""
|
|
1351
|
-
core_kws = {"coo_build": coo_build, "stype": stype, "parallel": parallel}
|
|
1352
|
-
|
|
1353
|
-
if ownership is None:
|
|
1354
|
-
X = _kron_core(*ops, **core_kws)
|
|
1355
|
-
else:
|
|
1356
|
-
ri, rf = ownership
|
|
1357
|
-
dims = [op.shape[0] for op in ops]
|
|
1358
|
-
|
|
1359
|
-
D = prod(dims)
|
|
1360
|
-
if not ((0 <= ri < D) and (0 < rf <= D)):
|
|
1361
|
-
raise ValueError(f"Ownership ({ri}, {rf}) not in range [0-{D}].")
|
|
1362
|
-
|
|
1363
|
-
matching_dyn = tuple(gen_matching_dynal(ri, rf - 1, dims))
|
|
1364
|
-
sliced_ops = list(gen_ops_maybe_sliced(ops, matching_dyn))
|
|
1365
|
-
X = _kron_core(*sliced_ops, **core_kws)
|
|
1366
|
-
|
|
1367
|
-
# check if the kron has naturally oversliced
|
|
1368
|
-
if matching_dyn:
|
|
1369
|
-
mtchn_bs = [prod(dims[i + 1 :]) for i in range(len(matching_dyn))]
|
|
1370
|
-
coeffs_bases = tuple(zip(mtchn_bs, matching_dyn))
|
|
1371
|
-
ri_got = sum(d * b[0] for d, b in coeffs_bases)
|
|
1372
|
-
rf_got = sum(d * b[1] for d, b in coeffs_bases) + mtchn_bs[-1]
|
|
1373
|
-
else:
|
|
1374
|
-
ri_got, rf_got = 0, D
|
|
1375
|
-
|
|
1376
|
-
# slice the desired rows only using the difference between indices
|
|
1377
|
-
di, df = ri - ri_got, rf - rf_got
|
|
1378
|
-
if di or df:
|
|
1379
|
-
# we can't slice 'coo' matrices -> convert to 'csr'
|
|
1380
|
-
if sp.isspmatrix_coo(X):
|
|
1381
|
-
X = X.tocsr()
|
|
1382
|
-
X = X[di : (None if df == 0 else df), :]
|
|
1383
|
-
|
|
1384
|
-
if stype is not None:
|
|
1385
|
-
return X.asformat(stype)
|
|
1386
|
-
if coo_build or (issparse(X) and X.format == "coo"):
|
|
1387
|
-
return X.asformat("csr")
|
|
1388
|
-
|
|
1389
|
-
return X
|
|
1390
|
-
|
|
1391
|
-
|
|
1392
|
-
def kronpow(a, p, **kron_opts):
|
|
1393
|
-
"""Returns `a` tensored with itself `p` times
|
|
1394
|
-
|
|
1395
|
-
Equivalent to ``reduce(lambda x, y: x & y, [a] * p)``.
|
|
1396
|
-
|
|
1397
|
-
Parameters
|
|
1398
|
-
----------
|
|
1399
|
-
a : dense or sparse vector or operator
|
|
1400
|
-
Object to tensor power.
|
|
1401
|
-
p : int
|
|
1402
|
-
Tensor power.
|
|
1403
|
-
kron_opts :
|
|
1404
|
-
Supplied to :func:`~quimb.kron`.
|
|
1405
|
-
|
|
1406
|
-
Returns
|
|
1407
|
-
-------
|
|
1408
|
-
dense or sparse vector or operator
|
|
1409
|
-
"""
|
|
1410
|
-
ops = (a,) * p
|
|
1411
|
-
return kron(*ops, **kron_opts)
|
|
1412
|
-
|
|
1413
|
-
|
|
1414
|
-
def _find_shape_of_nested_int_array(x):
|
|
1415
|
-
"""Take a n-nested list/tuple of integers and find its array shape."""
|
|
1416
|
-
shape = [len(x)]
|
|
1417
|
-
sub_x = x[0]
|
|
1418
|
-
while not np.issubdtype(type(sub_x), np.integer):
|
|
1419
|
-
shape.append(len(sub_x))
|
|
1420
|
-
sub_x = sub_x[0]
|
|
1421
|
-
return tuple(shape)
|
|
1422
|
-
|
|
1423
|
-
|
|
1424
|
-
def _dim_map_1d(sza, coos):
|
|
1425
|
-
for coo in coos:
|
|
1426
|
-
if 0 <= coo < sza:
|
|
1427
|
-
yield coo
|
|
1428
|
-
else:
|
|
1429
|
-
raise ValueError("One or more coordinates out of range.")
|
|
1430
|
-
|
|
1431
|
-
|
|
1432
|
-
def _dim_map_1dtrim(sza, coos):
|
|
1433
|
-
return (coo for coo in coos if (0 <= coo < sza))
|
|
1434
|
-
|
|
1435
|
-
|
|
1436
|
-
def _dim_map_1dcyclic(sza, coos):
|
|
1437
|
-
return (coo % sza for coo in coos)
|
|
1438
|
-
|
|
1439
|
-
|
|
1440
|
-
def _dim_map_2dcyclic(sza, szb, coos):
|
|
1441
|
-
return (szb * (coo[0] % sza) + coo[1] % szb for coo in coos)
|
|
1442
|
-
|
|
1443
|
-
|
|
1444
|
-
def _dim_map_2dtrim(sza, szb, coos):
|
|
1445
|
-
for coo in coos:
|
|
1446
|
-
x, y = coo
|
|
1447
|
-
if 0 <= x < sza and 0 <= y < szb:
|
|
1448
|
-
yield szb * x + y
|
|
1449
|
-
|
|
1450
|
-
|
|
1451
|
-
def _dim_map_2d(sza, szb, coos):
|
|
1452
|
-
for coo in coos:
|
|
1453
|
-
x, y = coo
|
|
1454
|
-
if 0 <= x < sza and 0 <= y < szb:
|
|
1455
|
-
yield szb * x + y
|
|
1456
|
-
else:
|
|
1457
|
-
raise ValueError("One or more coordinates out of range.")
|
|
1458
|
-
|
|
1459
|
-
|
|
1460
|
-
def _dim_map_nd(szs, coos, cyclic=False, trim=False):
|
|
1461
|
-
strides = [1]
|
|
1462
|
-
for sz in szs[-1:0:-1]:
|
|
1463
|
-
strides.insert(0, sz * strides[0])
|
|
1464
|
-
if cyclic:
|
|
1465
|
-
coos = ((c % sz for c, sz in zip(coo, szs)) for coo in coos)
|
|
1466
|
-
elif trim:
|
|
1467
|
-
coos = (c for c in coos if all(x == x % sz for x, sz in zip(c, szs)))
|
|
1468
|
-
elif not all(all(c == c % sz for c, sz in zip(coo, szs)) for coo in coos):
|
|
1469
|
-
raise ValueError("One or more coordinates out of range.")
|
|
1470
|
-
return (sum(c * m for c, m in zip(coo, strides)) for coo in coos)
|
|
1471
|
-
|
|
1472
|
-
|
|
1473
|
-
_dim_mapper_methods = {
|
|
1474
|
-
(1, False, False): _dim_map_1d,
|
|
1475
|
-
(1, False, True): _dim_map_1dtrim,
|
|
1476
|
-
(1, True, False): _dim_map_1dcyclic,
|
|
1477
|
-
(2, False, False): _dim_map_2d,
|
|
1478
|
-
(2, False, True): _dim_map_2dtrim,
|
|
1479
|
-
(2, True, False): _dim_map_2dcyclic,
|
|
1480
|
-
}
|
|
1481
|
-
|
|
1482
|
-
|
|
1483
|
-
def dim_map(dims, coos, cyclic=False, trim=False):
|
|
1484
|
-
"""Flatten 2d+ dimensions and coordinates.
|
|
1485
|
-
|
|
1486
|
-
Maps multi-dimensional coordinates and indices to flat arrays in a
|
|
1487
|
-
regular way. Wraps or deletes coordinates beyond the system size
|
|
1488
|
-
depending on parameters ``cyclic`` and ``trim``.
|
|
1489
|
-
|
|
1490
|
-
Parameters
|
|
1491
|
-
----------
|
|
1492
|
-
dims : nested tuple of int
|
|
1493
|
-
Multi-dim array of systems' internal dimensions.
|
|
1494
|
-
coos : list of tuples of int
|
|
1495
|
-
Array of coordinate tuples to convert
|
|
1496
|
-
cyclic : bool, optional
|
|
1497
|
-
Whether to automatically wrap coordinates beyond system size or
|
|
1498
|
-
delete them.
|
|
1499
|
-
trim : bool, optional
|
|
1500
|
-
If True, any coordinates beyond dimensions will be deleted,
|
|
1501
|
-
overidden by cyclic.
|
|
1502
|
-
|
|
1503
|
-
Returns
|
|
1504
|
-
-------
|
|
1505
|
-
flat_dims : tuple
|
|
1506
|
-
Flattened version of ``dims``.
|
|
1507
|
-
inds : tuple
|
|
1508
|
-
Indices corresponding to the original coordinates.
|
|
1509
|
-
|
|
1510
|
-
Examples
|
|
1511
|
-
--------
|
|
1512
|
-
|
|
1513
|
-
>>> dims = [[2, 3], [4, 5]]
|
|
1514
|
-
>>> coords = [(0, 0), (1, 1)]
|
|
1515
|
-
>>> flat_dims, inds = dim_map(dims, coords)
|
|
1516
|
-
>>> flat_dims
|
|
1517
|
-
(2, 3, 4, 5)
|
|
1518
|
-
>>> inds
|
|
1519
|
-
(0, 3)
|
|
1520
|
-
|
|
1521
|
-
>>> dim_map(dims, [(2, 0), (-1, 1)], cyclic=True)
|
|
1522
|
-
((2, 3, 4, 5), (0, 3))
|
|
1523
|
-
"""
|
|
1524
|
-
# Figure out shape of dimensions given
|
|
1525
|
-
if isinstance(dims, np.ndarray):
|
|
1526
|
-
szs = dims.shape
|
|
1527
|
-
ndim = dims.ndim
|
|
1528
|
-
else:
|
|
1529
|
-
szs = _find_shape_of_nested_int_array(dims)
|
|
1530
|
-
ndim = len(szs)
|
|
1531
|
-
|
|
1532
|
-
# Ensure `coos` in right format for 1d (i.e. not single tuples)
|
|
1533
|
-
if ndim == 1:
|
|
1534
|
-
if isinstance(coos, np.ndarray):
|
|
1535
|
-
coos = coos.ravel()
|
|
1536
|
-
elif not isinstance(coos[0], Integral):
|
|
1537
|
-
coos = (c[0] for c in coos)
|
|
1538
|
-
|
|
1539
|
-
# Map coordinates to indices
|
|
1540
|
-
try:
|
|
1541
|
-
inds = _dim_mapper_methods[(ndim, cyclic, trim)](*szs, coos)
|
|
1542
|
-
except KeyError:
|
|
1543
|
-
inds = _dim_map_nd(szs, coos, cyclic, trim)
|
|
1544
|
-
|
|
1545
|
-
# Ravel dims
|
|
1546
|
-
while ndim > 1:
|
|
1547
|
-
dims = itertools.chain.from_iterable(dims)
|
|
1548
|
-
ndim -= 1
|
|
1549
|
-
|
|
1550
|
-
return tuple(dims), tuple(inds)
|
|
1551
|
-
|
|
1552
|
-
|
|
1553
|
-
def _dim_compressor(dims, inds): # pragma: no cover
|
|
1554
|
-
"""Helper function for ``dim_compress`` that does the heavy lifting.
|
|
1555
|
-
|
|
1556
|
-
Parameters
|
|
1557
|
-
----------
|
|
1558
|
-
dims : sequence of int
|
|
1559
|
-
The subsystem dimensions.
|
|
1560
|
-
inds : sequence of int
|
|
1561
|
-
The indices of the 'marked' subsystems.
|
|
1562
|
-
|
|
1563
|
-
Returns
|
|
1564
|
-
-------
|
|
1565
|
-
generator of (int, int)
|
|
1566
|
-
Sequence of pairs of new dimension subsystem with marked flag {0, 1}.
|
|
1567
|
-
"""
|
|
1568
|
-
blocksize_id = blocksize_op = 1
|
|
1569
|
-
autoplace_count = 0
|
|
1570
|
-
for i, dim in enumerate(dims):
|
|
1571
|
-
if dim < 0:
|
|
1572
|
-
if blocksize_op > 1:
|
|
1573
|
-
yield (blocksize_op, 1)
|
|
1574
|
-
blocksize_op = 1
|
|
1575
|
-
elif blocksize_id > 1:
|
|
1576
|
-
yield (blocksize_id, 0)
|
|
1577
|
-
blocksize_id = 1
|
|
1578
|
-
autoplace_count += dim
|
|
1579
|
-
elif i in inds:
|
|
1580
|
-
if blocksize_id > 1:
|
|
1581
|
-
yield (blocksize_id, 0)
|
|
1582
|
-
blocksize_id = 1
|
|
1583
|
-
elif autoplace_count < 0:
|
|
1584
|
-
yield (autoplace_count, 1)
|
|
1585
|
-
autoplace_count = 0
|
|
1586
|
-
blocksize_op *= dim
|
|
1587
|
-
else:
|
|
1588
|
-
if blocksize_op > 1:
|
|
1589
|
-
yield (blocksize_op, 1)
|
|
1590
|
-
blocksize_op = 1
|
|
1591
|
-
elif autoplace_count < 0:
|
|
1592
|
-
yield (autoplace_count, 1)
|
|
1593
|
-
autoplace_count = 0
|
|
1594
|
-
blocksize_id *= dim
|
|
1595
|
-
yield (
|
|
1596
|
-
(blocksize_op, 1)
|
|
1597
|
-
if blocksize_op > 1
|
|
1598
|
-
else (blocksize_id, 0)
|
|
1599
|
-
if blocksize_id > 1
|
|
1600
|
-
else (autoplace_count, 1)
|
|
1601
|
-
)
|
|
1602
|
-
|
|
1603
|
-
|
|
1604
|
-
def dim_compress(dims, inds):
|
|
1605
|
-
"""Compress neighbouring subsytem dimensions.
|
|
1606
|
-
|
|
1607
|
-
Take some dimensions and target indices and compress both, i.e.
|
|
1608
|
-
merge adjacent dimensions that are both either in ``dims`` or not. For
|
|
1609
|
-
example, if tensoring an operator onto a single site, with many sites
|
|
1610
|
-
the identity, treat these as single large identities.
|
|
1611
|
-
|
|
1612
|
-
Parameters
|
|
1613
|
-
----------
|
|
1614
|
-
dims : tuple of int
|
|
1615
|
-
List of system's dimensions - 1d or flattened (e.g. with
|
|
1616
|
-
``dim_map``).
|
|
1617
|
-
inds: tuple of int
|
|
1618
|
-
List of target indices, i.e. dimensions not to merge.
|
|
1619
|
-
|
|
1620
|
-
Returns
|
|
1621
|
-
-------
|
|
1622
|
-
dims : tuple of int
|
|
1623
|
-
New compressed dimensions.
|
|
1624
|
-
inds : tuple of int
|
|
1625
|
-
New indexes corresponding to the compressed dimensions. These are
|
|
1626
|
-
guaranteed to now be alternating i.e. either (0, 2, ...) or
|
|
1627
|
-
(1, 3, ...).
|
|
1628
|
-
|
|
1629
|
-
Examples
|
|
1630
|
-
--------
|
|
1631
|
-
>>> dims = [2] * 10
|
|
1632
|
-
>>> inds = [3, 4]
|
|
1633
|
-
>>> compressed_dims, compressed_inds = dim_compress(dims, inds)
|
|
1634
|
-
>>> compressed_dims
|
|
1635
|
-
(8, 4, 32)
|
|
1636
|
-
>>> compressed_inds
|
|
1637
|
-
(1,)
|
|
1638
|
-
"""
|
|
1639
|
-
if isinstance(inds, Integral):
|
|
1640
|
-
inds = (inds,)
|
|
1641
|
-
|
|
1642
|
-
dims, inds = zip(*_dim_compressor(dims, inds))
|
|
1643
|
-
inds = tuple(i for i, b in enumerate(inds) if b)
|
|
1644
|
-
|
|
1645
|
-
return dims, inds
|
|
1646
|
-
|
|
1647
|
-
|
|
1648
|
-
def ikron(
|
|
1649
|
-
ops,
|
|
1650
|
-
dims,
|
|
1651
|
-
inds,
|
|
1652
|
-
sparse=None,
|
|
1653
|
-
stype=None,
|
|
1654
|
-
coo_build=False,
|
|
1655
|
-
parallel=False,
|
|
1656
|
-
ownership=None,
|
|
1657
|
-
):
|
|
1658
|
-
"""Tensor an operator into a larger space by padding with identities.
|
|
1659
|
-
|
|
1660
|
-
Automatically placing a large operator over several dimensions is allowed
|
|
1661
|
-
and a list of operators can be given which are then placed cyclically.
|
|
1662
|
-
|
|
1663
|
-
Parameters
|
|
1664
|
-
----------
|
|
1665
|
-
op : operator or sequence of operators
|
|
1666
|
-
Operator(s) to place into the tensor space. If more than one, these
|
|
1667
|
-
are cyclically placed at each of the ``dims`` specified by ``inds``.
|
|
1668
|
-
dims : sequence of int or nested sequences of int
|
|
1669
|
-
The subsystem dimensions. If treated as an array, should have the same
|
|
1670
|
-
number of dimensions as the system.
|
|
1671
|
-
inds : tuple of int, or sequence of tuple of int
|
|
1672
|
-
Indices, or coordinates, of the dimensions to place operator(s) on.
|
|
1673
|
-
Each dimension specified can be smaller than the size of ``op`` (as
|
|
1674
|
-
long as it factorizes it).
|
|
1675
|
-
sparse : bool, optional
|
|
1676
|
-
Whether to construct the new operator in sparse form.
|
|
1677
|
-
stype : str, optional
|
|
1678
|
-
If sparse, which format to use for the output.
|
|
1679
|
-
coo_build : bool, optional
|
|
1680
|
-
Whether to build the intermediary matrices using the ``'coo'``
|
|
1681
|
-
format - can be faster to build sparse in this way, then
|
|
1682
|
-
convert to chosen format, including dense.
|
|
1683
|
-
parallel : bool, optional
|
|
1684
|
-
Whether to build the operator in parallel using threads (only good
|
|
1685
|
-
for big (d > 2**16) operators).
|
|
1686
|
-
ownership : (int, int), optional
|
|
1687
|
-
If given, only construct the rows in ``range(*ownership)``. Such that
|
|
1688
|
-
the final operator is actually ``X[slice(*ownership), :]``. Useful for
|
|
1689
|
-
constructing operators in parallel, e.g. for MPI.
|
|
1690
|
-
|
|
1691
|
-
Returns
|
|
1692
|
-
-------
|
|
1693
|
-
qarray or sparse matrix
|
|
1694
|
-
Operator such that ops act on ``dims[inds]``.
|
|
1695
|
-
|
|
1696
|
-
See Also
|
|
1697
|
-
--------
|
|
1698
|
-
kron, pkron
|
|
1699
|
-
|
|
1700
|
-
Examples
|
|
1701
|
-
--------
|
|
1702
|
-
Place an operator between two identities:
|
|
1703
|
-
|
|
1704
|
-
>>> IZI = ikron(pauli('z'), [2, 2, 2], 1)
|
|
1705
|
-
>>> np.allclose(IZI, eye(2) & pauli('z') & eye(2))
|
|
1706
|
-
True
|
|
1707
|
-
|
|
1708
|
-
Overlay a large operator on several sites:
|
|
1709
|
-
|
|
1710
|
-
>>> rho_ab = rand_rho(4)
|
|
1711
|
-
>>> rho_abc = ikron(rho_ab, [5, 2, 2, 7], [1, 2]) # overlay both 2s
|
|
1712
|
-
>>> rho_abc.shape
|
|
1713
|
-
(140, 140)
|
|
1714
|
-
|
|
1715
|
-
Place an operator at specified sites, regardless of size:
|
|
1716
|
-
|
|
1717
|
-
>>> A = rand_herm(5)
|
|
1718
|
-
>>> ikron(A, [2, -1, 2, -1, 2, -1], [1, 3, 5]).shape
|
|
1719
|
-
(1000, 1000)
|
|
1720
|
-
|
|
1721
|
-
Create a two site interaction (note the coefficient `jx` we only need to
|
|
1722
|
-
multiply into a single input operator):
|
|
1723
|
-
|
|
1724
|
-
>>> Sx = spin_operator('X')
|
|
1725
|
-
>>> jx = 0.123
|
|
1726
|
-
>>> jSxSx = ikron([jx * Sx, Sx], [2, 2, 2, 2], [0, 3])
|
|
1727
|
-
>>> np.allclose(jSxSx, jx * (Sx & eye(2) & eye(2) & Sx))
|
|
1728
|
-
True
|
|
1729
|
-
"""
|
|
1730
|
-
# TODO: test 2d+ dims and coos
|
|
1731
|
-
# TODO: simplify with compress coords?
|
|
1732
|
-
# TODO: allow -1 in dims to auto place *without* ind? one or other
|
|
1733
|
-
|
|
1734
|
-
# Make sure `ops` islist
|
|
1735
|
-
if isinstance(ops, (np.ndarray, sp.spmatrix)):
|
|
1736
|
-
ops = (ops,)
|
|
1737
|
-
|
|
1738
|
-
dtype = common_type(*ops)
|
|
1739
|
-
|
|
1740
|
-
# Make sure dimensions and coordinates have been flattenened.
|
|
1741
|
-
if np.ndim(dims) > 1:
|
|
1742
|
-
dims, inds = dim_map(dims, inds)
|
|
1743
|
-
# Make sure `inds` is list
|
|
1744
|
-
elif np.ndim(inds) == 0:
|
|
1745
|
-
inds = (inds,)
|
|
1746
|
-
|
|
1747
|
-
# Infer sparsity from list of ops
|
|
1748
|
-
if sparse is None:
|
|
1749
|
-
sparse = any(issparse(op) for op in ops)
|
|
1750
|
-
|
|
1751
|
-
# Create a sorted list of operators with their matching index
|
|
1752
|
-
inds, ops = zip(*sorted(zip(inds, itertools.cycle(ops))))
|
|
1753
|
-
inds, ops = set(inds), iter(ops)
|
|
1754
|
-
|
|
1755
|
-
# can't slice "coo" format so use "csr" if ownership specified
|
|
1756
|
-
eye_kws = {
|
|
1757
|
-
"sparse": sparse,
|
|
1758
|
-
"stype": "csr" if ownership else "coo",
|
|
1759
|
-
"dtype": dtype,
|
|
1760
|
-
}
|
|
1761
|
-
|
|
1762
|
-
def gen_ops():
|
|
1763
|
-
cff_id = 1 # keeps track of compressing adjacent identities
|
|
1764
|
-
cff_ov = 1 # keeps track of overlaying op on multiple dimensions
|
|
1765
|
-
for ind, dim in enumerate(dims):
|
|
1766
|
-
# check if op should be placed here
|
|
1767
|
-
if ind in inds:
|
|
1768
|
-
# check if need preceding identities
|
|
1769
|
-
if cff_id > 1:
|
|
1770
|
-
yield eye(cff_id, **eye_kws)
|
|
1771
|
-
cff_id = 1 # reset cumulative identity size
|
|
1772
|
-
|
|
1773
|
-
# check if first subsystem in placement block
|
|
1774
|
-
if cff_ov == 1:
|
|
1775
|
-
op = next(ops)
|
|
1776
|
-
sz_op = op.shape[0]
|
|
1777
|
-
|
|
1778
|
-
# final dim (of block or total) -> place op
|
|
1779
|
-
if cff_ov * dim == sz_op or dim == -1:
|
|
1780
|
-
yield op
|
|
1781
|
-
cff_ov = 1
|
|
1782
|
-
# accumulate sub-dims
|
|
1783
|
-
else:
|
|
1784
|
-
cff_ov *= dim
|
|
1785
|
-
|
|
1786
|
-
# check if midway through placing operator over several subsystems
|
|
1787
|
-
elif cff_ov > 1:
|
|
1788
|
-
cff_ov *= dim
|
|
1789
|
-
|
|
1790
|
-
# else accumulate adjacent identites
|
|
1791
|
-
else:
|
|
1792
|
-
cff_id *= dim
|
|
1793
|
-
|
|
1794
|
-
# check if trailing identity needed
|
|
1795
|
-
if cff_id > 1:
|
|
1796
|
-
yield eye(cff_id, **eye_kws)
|
|
1797
|
-
|
|
1798
|
-
return kron(
|
|
1799
|
-
*gen_ops(),
|
|
1800
|
-
stype=stype,
|
|
1801
|
-
coo_build=coo_build,
|
|
1802
|
-
parallel=parallel,
|
|
1803
|
-
ownership=ownership,
|
|
1804
|
-
)
|
|
1805
|
-
|
|
1806
|
-
|
|
1807
|
-
@ensure_qarray
|
|
1808
|
-
def _permute_dense(p, dims, perm):
|
|
1809
|
-
"""Permute the subsytems of a dense array."""
|
|
1810
|
-
p, perm = np.asarray(p), np.asarray(perm)
|
|
1811
|
-
d = prod(dims)
|
|
1812
|
-
|
|
1813
|
-
if isop(p):
|
|
1814
|
-
return (
|
|
1815
|
-
p.reshape([*dims, *dims])
|
|
1816
|
-
.transpose([*perm, *(perm + len(dims))])
|
|
1817
|
-
.reshape([d, d])
|
|
1818
|
-
)
|
|
1819
|
-
|
|
1820
|
-
return p.reshape(dims).transpose(perm).reshape([d, 1])
|
|
1821
|
-
|
|
1822
|
-
|
|
1823
|
-
def _permute_sparse(a, dims, perm):
|
|
1824
|
-
"""Permute the subsytems of a sparse matrix."""
|
|
1825
|
-
perm, dims = np.asarray(perm), np.asarray(dims)
|
|
1826
|
-
|
|
1827
|
-
# New dimensions & stride (i.e. product of preceding dimensions)
|
|
1828
|
-
new_dims = dims[perm]
|
|
1829
|
-
odim_stride = np.multiply.accumulate(dims[::-1])[::-1] // dims
|
|
1830
|
-
ndim_stride = np.multiply.accumulate(new_dims[::-1])[::-1] // new_dims
|
|
1831
|
-
|
|
1832
|
-
# Range of possible coordinates for each subsys
|
|
1833
|
-
coos = (tuple(range(dim)) for dim in dims)
|
|
1834
|
-
|
|
1835
|
-
# Complete basis using coordinates for current and new dimensions
|
|
1836
|
-
basis = np.asarray(tuple(itertools.product(*coos, repeat=1)))
|
|
1837
|
-
oinds = np.sum(odim_stride * basis, axis=1)
|
|
1838
|
-
ninds = np.sum(ndim_stride * basis[:, perm], axis=1)
|
|
1839
|
-
|
|
1840
|
-
# Construct permutation matrix and apply it to state
|
|
1841
|
-
perm_mat = sp.coo_matrix((np.ones(a.shape[0]), (ninds, oinds))).tocsr()
|
|
1842
|
-
if isop(a):
|
|
1843
|
-
return dot(dot(perm_mat, a), dag(perm_mat))
|
|
1844
|
-
return dot(perm_mat, a)
|
|
1845
|
-
|
|
1846
|
-
|
|
1847
|
-
def permute(p, dims, perm):
|
|
1848
|
-
"""Permute the subsytems of state or opeator.
|
|
1849
|
-
|
|
1850
|
-
Parameters
|
|
1851
|
-
----------
|
|
1852
|
-
p : vector or operator
|
|
1853
|
-
State or operator to permute.
|
|
1854
|
-
dims : tuple of int
|
|
1855
|
-
Internal dimensions of the system.
|
|
1856
|
-
perm : tuple of int
|
|
1857
|
-
New order of indexes ``range(len(dims))``.
|
|
1858
|
-
|
|
1859
|
-
Returns
|
|
1860
|
-
-------
|
|
1861
|
-
pp : vector or operator
|
|
1862
|
-
Permuted state or operator.
|
|
1863
|
-
|
|
1864
|
-
See Also
|
|
1865
|
-
--------
|
|
1866
|
-
pkron
|
|
1867
|
-
|
|
1868
|
-
Examples
|
|
1869
|
-
--------
|
|
1870
|
-
|
|
1871
|
-
>>> IX = speye(2) & pauli('X', sparse=True)
|
|
1872
|
-
>>> XI = permute(IX, dims=[2, 2], perm=[1, 0])
|
|
1873
|
-
>>> np.allclose(XI.toarray(), pauli('X') & eye(2))
|
|
1874
|
-
True
|
|
1875
|
-
"""
|
|
1876
|
-
if issparse(p):
|
|
1877
|
-
return _permute_sparse(p, dims, perm)
|
|
1878
|
-
return _permute_dense(p, dims, perm)
|
|
1879
|
-
|
|
1880
|
-
|
|
1881
|
-
def pkron(op, dims, inds, **ikron_opts):
|
|
1882
|
-
# TODO: multiple ops
|
|
1883
|
-
# TODO: coo map, coo compress
|
|
1884
|
-
# TODO: sparse, stype, coo_build?
|
|
1885
|
-
"""Advanced, padded tensor product.
|
|
1886
|
-
|
|
1887
|
-
Construct an operator such that ``op`` acts on ``dims[inds]``, and allow it
|
|
1888
|
-
to be arbitrarily split and reversed etc., in other words, permute and then
|
|
1889
|
-
tensor it into a larger space.
|
|
1890
|
-
|
|
1891
|
-
Parameters
|
|
1892
|
-
----------
|
|
1893
|
-
ops : matrix-like or tuple of matrix-like
|
|
1894
|
-
Operator to place into the tensor space.
|
|
1895
|
-
dims : tuple of int
|
|
1896
|
-
Dimensions of tensor space.
|
|
1897
|
-
inds : tuple of int
|
|
1898
|
-
Indices of the dimensions to place operators on. If multiple
|
|
1899
|
-
operators are specified, ``inds[1]`` corresponds to ``ops[1]`` and
|
|
1900
|
-
so on.
|
|
1901
|
-
sparse : bool, optional
|
|
1902
|
-
Whether to construct the new operator in sparse form.
|
|
1903
|
-
stype : str, optional
|
|
1904
|
-
If sparse, which format to use for the output.
|
|
1905
|
-
coo_build : bool, optional
|
|
1906
|
-
Whether to build the intermediary matrices using the ``'coo'``
|
|
1907
|
-
format - can be faster to build sparse in this way, then
|
|
1908
|
-
convert to chosen format, including dense.
|
|
1909
|
-
|
|
1910
|
-
Returns
|
|
1911
|
-
-------
|
|
1912
|
-
operator
|
|
1913
|
-
Operator such that ops act on ``dims[inds]``.
|
|
1914
|
-
|
|
1915
|
-
See Also
|
|
1916
|
-
--------
|
|
1917
|
-
ikron, permute
|
|
1918
|
-
|
|
1919
|
-
Examples
|
|
1920
|
-
--------
|
|
1921
|
-
|
|
1922
|
-
Here we take an operator that acts on spins 0 and 1 with X and Z, and
|
|
1923
|
-
transform it to act on spins 2 and 0 -- i.e. reverse it and sandwich an
|
|
1924
|
-
identity between the two sites it acts on.
|
|
1925
|
-
|
|
1926
|
-
>>> XZ = pauli('X') & pauli('Z')
|
|
1927
|
-
>>> ZIX = pkron(XZ, dims=[2, 3, 2], inds=[2, 0])
|
|
1928
|
-
>>> np.allclose(ZIX, pauli('Z') & eye(3) & pauli('X'))
|
|
1929
|
-
True
|
|
1930
|
-
"""
|
|
1931
|
-
dims, inds = np.asarray(dims), np.asarray(inds)
|
|
1932
|
-
|
|
1933
|
-
# total number of subsytems and size
|
|
1934
|
-
n = len(dims)
|
|
1935
|
-
sz = prod(dims)
|
|
1936
|
-
|
|
1937
|
-
# dimensions of space where op should be placed, and its total size
|
|
1938
|
-
dims_in = dims[inds]
|
|
1939
|
-
sz_in = prod(dims_in)
|
|
1940
|
-
|
|
1941
|
-
# construct pre-permuted full operator
|
|
1942
|
-
b = ikron(op, [sz_in, sz // sz_in], 0, **ikron_opts)
|
|
1943
|
-
|
|
1944
|
-
# inverse of inds
|
|
1945
|
-
if len(dims) == len(inds):
|
|
1946
|
-
inds_out, dims_out = (), ()
|
|
1947
|
-
else:
|
|
1948
|
-
inds_out, dims_out = zip(
|
|
1949
|
-
*((i, x) for i, x in enumerate(dims) if i not in inds)
|
|
1950
|
-
)
|
|
1951
|
-
|
|
1952
|
-
# current order and dimensions of system
|
|
1953
|
-
p = [*inds, *inds_out]
|
|
1954
|
-
dims_cur = (*dims_in, *dims_out)
|
|
1955
|
-
|
|
1956
|
-
# find inverse permutation
|
|
1957
|
-
ip = np.empty(n, dtype=np.int32)
|
|
1958
|
-
ip[p] = np.arange(n)
|
|
1959
|
-
|
|
1960
|
-
return permute(b, dims_cur, ip)
|
|
1961
|
-
|
|
1962
|
-
|
|
1963
|
-
def ind_complement(inds, n):
|
|
1964
|
-
"""Return the indices below ``n`` not contained in ``inds``."""
|
|
1965
|
-
return tuple(i for i in range(n) if i not in inds)
|
|
1966
|
-
|
|
1967
|
-
|
|
1968
|
-
def itrace(a, axes=(0, 1)):
|
|
1969
|
-
"""General tensor trace, i.e. multiple contractions, for a dense array.
|
|
1970
|
-
|
|
1971
|
-
Parameters
|
|
1972
|
-
----------
|
|
1973
|
-
a : numpy.ndarray
|
|
1974
|
-
Tensor to trace.
|
|
1975
|
-
axes : (2,) int or (2,) array of int
|
|
1976
|
-
- (2,) int: Perform trace on the two indices listed.
|
|
1977
|
-
- (2,) array of int: Trace out first sequence of indices with second
|
|
1978
|
-
sequence indices.
|
|
1979
|
-
|
|
1980
|
-
Returns
|
|
1981
|
-
-------
|
|
1982
|
-
numpy.ndarray
|
|
1983
|
-
The tensor remaining after tracing out the specified axes.
|
|
1984
|
-
|
|
1985
|
-
See Also
|
|
1986
|
-
--------
|
|
1987
|
-
trace, partial_trace
|
|
1988
|
-
|
|
1989
|
-
Examples
|
|
1990
|
-
--------
|
|
1991
|
-
Trace out a single pair of dimensions:
|
|
1992
|
-
|
|
1993
|
-
>>> a = randn(2, 3, 4, 2, 3, 4)
|
|
1994
|
-
>>> itrace(a, axes=(0, 3)).shape
|
|
1995
|
-
(3, 4, 3, 4)
|
|
1996
|
-
|
|
1997
|
-
Trace out multiple dimensions:
|
|
1998
|
-
|
|
1999
|
-
>>> itrace(a, axes=([1, 2], [4, 5])).shape
|
|
2000
|
-
(2, 2)
|
|
2001
|
-
"""
|
|
2002
|
-
# Single index pair to trace out
|
|
2003
|
-
if isinstance(axes[0], Integral):
|
|
2004
|
-
return np.trace(a, axis1=axes[0], axis2=axes[1])
|
|
2005
|
-
elif len(axes[0]) == 1:
|
|
2006
|
-
return np.trace(a, axis1=axes[0][0], axis2=axes[1][0])
|
|
2007
|
-
|
|
2008
|
-
# Multiple index pairs to trace out
|
|
2009
|
-
gone = set()
|
|
2010
|
-
for axis1, axis2 in zip(*axes):
|
|
2011
|
-
# Modify indices to adjust for traced out dimensions
|
|
2012
|
-
mod1 = sum(x < axis1 for x in gone)
|
|
2013
|
-
mod2 = sum(x < axis2 for x in gone)
|
|
2014
|
-
gone |= {axis1, axis2}
|
|
2015
|
-
a = np.trace(a, axis1=axis1 - mod1, axis2=axis2 - mod2)
|
|
2016
|
-
return a
|
|
2017
|
-
|
|
2018
|
-
|
|
2019
|
-
@ensure_qarray
|
|
2020
|
-
def _partial_trace_dense(p, dims, keep):
|
|
2021
|
-
"""Perform partial trace of a dense matrix."""
|
|
2022
|
-
if isinstance(keep, Integral):
|
|
2023
|
-
keep = (keep,)
|
|
2024
|
-
if isvec(p): # p = psi
|
|
2025
|
-
p = np.asarray(p).reshape(dims)
|
|
2026
|
-
lose = ind_complement(keep, len(dims))
|
|
2027
|
-
p = np.tensordot(p, p.conj(), (lose, lose))
|
|
2028
|
-
d = int(p.size**0.5)
|
|
2029
|
-
return p.reshape((d, d))
|
|
2030
|
-
else:
|
|
2031
|
-
p = np.asarray(p).reshape((*dims, *dims))
|
|
2032
|
-
total_dims = len(dims)
|
|
2033
|
-
lose = ind_complement(keep, total_dims)
|
|
2034
|
-
lose2 = tuple(ind + total_dims for ind in lose)
|
|
2035
|
-
p = itrace(p, (lose, lose2))
|
|
2036
|
-
d = int(p.size**0.5)
|
|
2037
|
-
return p.reshape((d, d))
|
|
2038
|
-
|
|
2039
|
-
|
|
2040
|
-
def _trace_lose(p, dims, lose):
|
|
2041
|
-
"""Simple partial trace where the single subsytem at ``lose``
|
|
2042
|
-
is traced out.
|
|
2043
|
-
"""
|
|
2044
|
-
p = p if isop(p) else dot(p, dag(p))
|
|
2045
|
-
dims = np.asarray(dims)
|
|
2046
|
-
e = dims[lose]
|
|
2047
|
-
a = prod(dims[:lose])
|
|
2048
|
-
b = prod(dims[lose + 1 :])
|
|
2049
|
-
rhos = np.zeros(shape=(a * b, a * b), dtype=np.complex128)
|
|
2050
|
-
for i in range(a * b):
|
|
2051
|
-
for j in range(i, a * b):
|
|
2052
|
-
i_i = e * b * (i // b) + (i % b)
|
|
2053
|
-
i_f = e * b * (i // b) + (i % b) + (e - 1) * b + 1
|
|
2054
|
-
j_i = e * b * (j // b) + (j % b)
|
|
2055
|
-
j_f = e * b * (j // b) + (j % b) + (e - 1) * b + 1
|
|
2056
|
-
rhos[i, j] = trace(p[i_i:i_f:b, j_i:j_f:b])
|
|
2057
|
-
if j != i:
|
|
2058
|
-
rhos[j, i] = rhos[i, j].conjugate()
|
|
2059
|
-
return rhos
|
|
2060
|
-
|
|
2061
|
-
|
|
2062
|
-
def _trace_keep(p, dims, keep):
|
|
2063
|
-
"""Simple partial trace where the single subsytem
|
|
2064
|
-
at ``keep`` is kept.
|
|
2065
|
-
"""
|
|
2066
|
-
p = p if isop(p) else dot(p, dag(p))
|
|
2067
|
-
dims = np.asarray(dims)
|
|
2068
|
-
s = dims[keep]
|
|
2069
|
-
a = prod(dims[:keep])
|
|
2070
|
-
b = prod(dims[keep + 1 :])
|
|
2071
|
-
rhos = np.zeros(shape=(s, s), dtype=np.complex128)
|
|
2072
|
-
for i in range(s):
|
|
2073
|
-
for j in range(i, s):
|
|
2074
|
-
for k in range(a):
|
|
2075
|
-
i_i = b * i + s * b * k
|
|
2076
|
-
i_f = b * i + s * b * k + b
|
|
2077
|
-
j_i = b * j + s * b * k
|
|
2078
|
-
j_f = b * j + s * b * k + b
|
|
2079
|
-
rhos[i, j] += trace(p[i_i:i_f, j_i:j_f])
|
|
2080
|
-
if j != i:
|
|
2081
|
-
rhos[j, i] = rhos[i, j].conjugate()
|
|
2082
|
-
return rhos
|
|
2083
|
-
|
|
2084
|
-
|
|
2085
|
-
def _partial_trace_simple(p, dims, keep):
|
|
2086
|
-
"""Simple partial trace made up of consecutive single subsystem partial
|
|
2087
|
-
traces, augmented by 'compressing' the dimensions each time.
|
|
2088
|
-
"""
|
|
2089
|
-
p = p if isop(p) else dot(p, dag(p))
|
|
2090
|
-
dims, keep = dim_compress(dims, keep)
|
|
2091
|
-
if len(keep) == 1:
|
|
2092
|
-
return _trace_keep(p, dims, *keep)
|
|
2093
|
-
lmax = max(enumerate(dims), key=lambda ix: (ix[0] not in keep) * ix[1])[0]
|
|
2094
|
-
p = _trace_lose(p, dims, lmax)
|
|
2095
|
-
dims = (*dims[:lmax], *dims[lmax + 1 :])
|
|
2096
|
-
keep = {(ind if ind < lmax else ind - 1) for ind in keep}
|
|
2097
|
-
return _partial_trace_simple(p, dims, keep)
|
|
2098
|
-
|
|
2099
|
-
|
|
2100
|
-
def partial_trace(p, dims, keep):
|
|
2101
|
-
"""Partial trace of a dense or sparse state.
|
|
2102
|
-
|
|
2103
|
-
Parameters
|
|
2104
|
-
----------
|
|
2105
|
-
p : ket or density operator
|
|
2106
|
-
State to perform partial trace on - can be sparse.
|
|
2107
|
-
dims : sequence of int or nested sequences of int
|
|
2108
|
-
The subsystem dimensions. If treated as an array, should have the same
|
|
2109
|
-
number of dimensions as the system.
|
|
2110
|
-
keep : int, sequence of int or sequence of tuple[int]
|
|
2111
|
-
Index or indices of subsytem(s) to keep. If a sequence of integer
|
|
2112
|
-
tuples, each should be a coordinate such that the length matches the
|
|
2113
|
-
number of dimensions of the system.
|
|
2114
|
-
|
|
2115
|
-
Returns
|
|
2116
|
-
-------
|
|
2117
|
-
rho : qarray
|
|
2118
|
-
Density operator of subsytem dimensions ``dims[keep]``.
|
|
2119
|
-
|
|
2120
|
-
See Also
|
|
2121
|
-
--------
|
|
2122
|
-
itrace
|
|
2123
|
-
|
|
2124
|
-
Examples
|
|
2125
|
-
--------
|
|
2126
|
-
Trace out single subsystem of a ket:
|
|
2127
|
-
|
|
2128
|
-
>>> psi = bell_state('psi-')
|
|
2129
|
-
>>> ptr(psi, [2, 2], keep=0) # expect identity
|
|
2130
|
-
qarray([[ 0.5+0.j, 0.0+0.j],
|
|
2131
|
-
[ 0.0+0.j, 0.5+0.j]])
|
|
2132
|
-
|
|
2133
|
-
Trace out multiple subsystems of a density operator:
|
|
2134
|
-
|
|
2135
|
-
>>> rho_abc = rand_rho(3 * 4 * 5)
|
|
2136
|
-
>>> rho_ab = partial_trace(rho_abc, [3, 4, 5], keep=[0, 1])
|
|
2137
|
-
>>> rho_ab.shape
|
|
2138
|
-
(12, 12)
|
|
2139
|
-
|
|
2140
|
-
Trace out qutrits from a 2D system:
|
|
2141
|
-
|
|
2142
|
-
>>> psi_abcd = rand_ket(3 ** 4)
|
|
2143
|
-
>>> dims = [[3, 3],
|
|
2144
|
-
... [3, 3]]
|
|
2145
|
-
>>> keep = [(0, 0), (1, 1)]
|
|
2146
|
-
>>> rho_ac = partial_trace(psi_abcd, dims, keep)
|
|
2147
|
-
>>> rho_ac.shape
|
|
2148
|
-
(9, 9)
|
|
2149
|
-
"""
|
|
2150
|
-
# map 2D+ systems into flat hilbert space
|
|
2151
|
-
try:
|
|
2152
|
-
ndim = dims.ndim
|
|
2153
|
-
except AttributeError:
|
|
2154
|
-
ndim = len(_find_shape_of_nested_int_array(dims))
|
|
2155
|
-
|
|
2156
|
-
if ndim >= 2:
|
|
2157
|
-
dims, keep = dim_map(dims, keep)
|
|
2158
|
-
|
|
2159
|
-
if issparse(p):
|
|
2160
|
-
return _partial_trace_simple(p, dims, keep)
|
|
2161
|
-
|
|
2162
|
-
return _partial_trace_dense(p, dims, keep)
|
|
2163
|
-
|
|
2164
|
-
|
|
2165
|
-
# --------------------------------------------------------------------------- #
|
|
2166
|
-
# MONKEY-PATCHES #
|
|
2167
|
-
# --------------------------------------------------------------------------- #
|
|
2168
|
-
|
|
2169
|
-
|
|
2170
|
-
nmlz = normalize
|
|
2171
|
-
"""Alias for :func:`normalize`."""
|
|
2172
|
-
|
|
2173
|
-
tr = trace
|
|
2174
|
-
"""Alias for :func:`trace`."""
|
|
2175
|
-
|
|
2176
|
-
ptr = partial_trace
|
|
2177
|
-
"""Alias for :func:`partial_trace`."""
|
|
2178
|
-
|
|
2179
|
-
sp.csr_matrix.nmlz = nmlz
|
|
2180
|
-
|
|
2181
|
-
sp.csr_matrix.tr = _trace_sparse
|
|
2182
|
-
sp.csc_matrix.tr = _trace_sparse
|
|
2183
|
-
sp.coo_matrix.tr = _trace_sparse
|
|
2184
|
-
sp.bsr_matrix.tr = _trace_sparse
|
|
2185
|
-
|
|
2186
|
-
sp.csr_matrix.ptr = _partial_trace_simple
|
|
2187
|
-
sp.csc_matrix.ptr = _partial_trace_simple
|
|
2188
|
-
sp.coo_matrix.ptr = _partial_trace_simple
|
|
2189
|
-
sp.bsr_matrix.ptr = _partial_trace_simple
|
|
2190
|
-
|
|
2191
|
-
sp.csr_matrix.__and__ = kron_dispatch
|
|
2192
|
-
sp.bsr_matrix.__and__ = kron_dispatch
|
|
2193
|
-
sp.csc_matrix.__and__ = kron_dispatch
|
|
2194
|
-
sp.coo_matrix.__and__ = kron_dispatch
|
|
2195
|
-
|
|
2196
|
-
|
|
2197
|
-
if not hasattr(sp.csr_matrix, "H"):
|
|
2198
|
-
# scipy >=1.14 removed the .H attribute
|
|
2199
|
-
|
|
2200
|
-
def sparse_hermitian_conjugate(self):
|
|
2201
|
-
return self.conjugate().transpose()
|
|
2202
|
-
|
|
2203
|
-
sp.csr_matrix.H = property(sparse_hermitian_conjugate)
|
|
2204
|
-
sp.csc_matrix.H = property(sparse_hermitian_conjugate)
|
|
2205
|
-
sp.coo_matrix.H = property(sparse_hermitian_conjugate)
|
|
2206
|
-
sp.bsr_matrix.H = property(sparse_hermitian_conjugate)
|
|
2207
|
-
|
|
2208
|
-
|
|
2209
|
-
def csr_mulvec_wrap(fn):
|
|
2210
|
-
"""Dispatch sparse csr-vector multiplication to parallel method."""
|
|
2211
|
-
|
|
2212
|
-
@functools.wraps(fn)
|
|
2213
|
-
def csr_mul_vector(A, x):
|
|
2214
|
-
if A.nnz > 50000 and _NUM_THREAD_WORKERS > 1:
|
|
2215
|
-
return par_dot_csr_matvec(A, x)
|
|
2216
|
-
else:
|
|
2217
|
-
y = fn(A, x)
|
|
2218
|
-
if isinstance(x, qarray):
|
|
2219
|
-
y = qarray(y)
|
|
2220
|
-
return y
|
|
2221
|
-
|
|
2222
|
-
return csr_mul_vector
|
|
2223
|
-
|
|
2224
|
-
|
|
2225
|
-
def sp_mulvec_wrap(fn):
|
|
2226
|
-
"""Scipy sparse doesn't call __array_finalize__ so need to explicitly
|
|
2227
|
-
make sure qarray input -> qarray output.
|
|
2228
|
-
"""
|
|
2229
|
-
|
|
2230
|
-
@functools.wraps(fn)
|
|
2231
|
-
def qarrayed_fn(self, other):
|
|
2232
|
-
out = fn(self, other)
|
|
2233
|
-
if isinstance(other, qarray):
|
|
2234
|
-
out = qarray(out)
|
|
2235
|
-
return out
|
|
2236
|
-
|
|
2237
|
-
return qarrayed_fn
|
|
2238
|
-
|
|
2239
|
-
|
|
2240
|
-
try:
|
|
2241
|
-
# scipy>=1.13
|
|
2242
|
-
sp.csr_matrix._matmul_vector = csr_mulvec_wrap(
|
|
2243
|
-
sp.csr_matrix._matmul_vector
|
|
2244
|
-
)
|
|
2245
|
-
sp.csc_matrix._matmul_vector = sp_mulvec_wrap(sp.csc_matrix._matmul_vector)
|
|
2246
|
-
sp.coo_matrix._matmul_vector = sp_mulvec_wrap(sp.coo_matrix._matmul_vector)
|
|
2247
|
-
sp.bsr_matrix._matmul_vector = sp_mulvec_wrap(sp.bsr_matrix._matmul_vector)
|
|
2248
|
-
|
|
2249
|
-
sp.csr_matrix._matmul_multivector = sp_mulvec_wrap(
|
|
2250
|
-
sp.csr_matrix._matmul_multivector
|
|
2251
|
-
)
|
|
2252
|
-
sp.csc_matrix._matmul_multivector = sp_mulvec_wrap(
|
|
2253
|
-
sp.csc_matrix._matmul_multivector
|
|
2254
|
-
)
|
|
2255
|
-
sp.coo_matrix._matmul_multivector = sp_mulvec_wrap(
|
|
2256
|
-
sp.coo_matrix._matmul_multivector
|
|
2257
|
-
)
|
|
2258
|
-
sp.bsr_matrix._matmul_multivector = sp_mulvec_wrap(
|
|
2259
|
-
sp.bsr_matrix._matmul_multivector
|
|
2260
|
-
)
|
|
2261
|
-
except AttributeError:
|
|
2262
|
-
# scipy <=1.12"
|
|
2263
|
-
sp.csr_matrix._mul_vector = csr_mulvec_wrap(sp.csr_matrix._mul_vector)
|
|
2264
|
-
sp.csc_matrix._mul_vector = sp_mulvec_wrap(sp.csc_matrix._mul_vector)
|
|
2265
|
-
sp.coo_matrix._mul_vector = sp_mulvec_wrap(sp.coo_matrix._mul_vector)
|
|
2266
|
-
sp.bsr_matrix._mul_vector = sp_mulvec_wrap(sp.bsr_matrix._mul_vector)
|
|
2267
|
-
|
|
2268
|
-
sp.csr_matrix._mul_multivector = sp_mulvec_wrap(
|
|
2269
|
-
sp.csr_matrix._mul_multivector
|
|
2270
|
-
)
|
|
2271
|
-
sp.csc_matrix._mul_multivector = sp_mulvec_wrap(
|
|
2272
|
-
sp.csc_matrix._mul_multivector
|
|
2273
|
-
)
|
|
2274
|
-
sp.coo_matrix._mul_multivector = sp_mulvec_wrap(
|
|
2275
|
-
sp.coo_matrix._mul_multivector
|
|
2276
|
-
)
|
|
2277
|
-
sp.bsr_matrix._mul_multivector = sp_mulvec_wrap(
|
|
2278
|
-
sp.bsr_matrix._mul_multivector
|
|
2279
|
-
)
|