Trajectree 0.0.0__py3-none-any.whl → 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- trajectree/__init__.py +3 -0
- trajectree/fock_optics/devices.py +1 -1
- trajectree/fock_optics/light_sources.py +2 -2
- trajectree/fock_optics/measurement.py +3 -3
- trajectree/fock_optics/utils.py +6 -6
- trajectree/quimb/docs/_pygments/_pygments_dark.py +118 -0
- trajectree/quimb/docs/_pygments/_pygments_light.py +118 -0
- trajectree/quimb/docs/conf.py +158 -0
- trajectree/quimb/docs/examples/ex_mpi_expm_evo.py +62 -0
- trajectree/quimb/quimb/__init__.py +507 -0
- trajectree/quimb/quimb/calc.py +1491 -0
- trajectree/quimb/quimb/core.py +2279 -0
- trajectree/quimb/quimb/evo.py +712 -0
- trajectree/quimb/quimb/experimental/__init__.py +0 -0
- trajectree/quimb/quimb/experimental/autojittn.py +129 -0
- trajectree/quimb/quimb/experimental/belief_propagation/__init__.py +109 -0
- trajectree/quimb/quimb/experimental/belief_propagation/bp_common.py +397 -0
- trajectree/quimb/quimb/experimental/belief_propagation/d1bp.py +316 -0
- trajectree/quimb/quimb/experimental/belief_propagation/d2bp.py +653 -0
- trajectree/quimb/quimb/experimental/belief_propagation/hd1bp.py +571 -0
- trajectree/quimb/quimb/experimental/belief_propagation/hv1bp.py +775 -0
- trajectree/quimb/quimb/experimental/belief_propagation/l1bp.py +316 -0
- trajectree/quimb/quimb/experimental/belief_propagation/l2bp.py +537 -0
- trajectree/quimb/quimb/experimental/belief_propagation/regions.py +194 -0
- trajectree/quimb/quimb/experimental/cluster_update.py +286 -0
- trajectree/quimb/quimb/experimental/merabuilder.py +865 -0
- trajectree/quimb/quimb/experimental/operatorbuilder/__init__.py +15 -0
- trajectree/quimb/quimb/experimental/operatorbuilder/operatorbuilder.py +1631 -0
- trajectree/quimb/quimb/experimental/schematic.py +7 -0
- trajectree/quimb/quimb/experimental/tn_marginals.py +130 -0
- trajectree/quimb/quimb/experimental/tnvmc.py +1483 -0
- trajectree/quimb/quimb/gates.py +36 -0
- trajectree/quimb/quimb/gen/__init__.py +2 -0
- trajectree/quimb/quimb/gen/operators.py +1167 -0
- trajectree/quimb/quimb/gen/rand.py +713 -0
- trajectree/quimb/quimb/gen/states.py +479 -0
- trajectree/quimb/quimb/linalg/__init__.py +6 -0
- trajectree/quimb/quimb/linalg/approx_spectral.py +1109 -0
- trajectree/quimb/quimb/linalg/autoblock.py +258 -0
- trajectree/quimb/quimb/linalg/base_linalg.py +719 -0
- trajectree/quimb/quimb/linalg/mpi_launcher.py +397 -0
- trajectree/quimb/quimb/linalg/numpy_linalg.py +244 -0
- trajectree/quimb/quimb/linalg/rand_linalg.py +514 -0
- trajectree/quimb/quimb/linalg/scipy_linalg.py +293 -0
- trajectree/quimb/quimb/linalg/slepc_linalg.py +892 -0
- trajectree/quimb/quimb/schematic.py +1518 -0
- trajectree/quimb/quimb/tensor/__init__.py +401 -0
- trajectree/quimb/quimb/tensor/array_ops.py +610 -0
- trajectree/quimb/quimb/tensor/circuit.py +4824 -0
- trajectree/quimb/quimb/tensor/circuit_gen.py +411 -0
- trajectree/quimb/quimb/tensor/contraction.py +336 -0
- trajectree/quimb/quimb/tensor/decomp.py +1255 -0
- trajectree/quimb/quimb/tensor/drawing.py +1646 -0
- trajectree/quimb/quimb/tensor/fitting.py +385 -0
- trajectree/quimb/quimb/tensor/geometry.py +583 -0
- trajectree/quimb/quimb/tensor/interface.py +114 -0
- trajectree/quimb/quimb/tensor/networking.py +1058 -0
- trajectree/quimb/quimb/tensor/optimize.py +1818 -0
- trajectree/quimb/quimb/tensor/tensor_1d.py +4778 -0
- trajectree/quimb/quimb/tensor/tensor_1d_compress.py +1854 -0
- trajectree/quimb/quimb/tensor/tensor_1d_tebd.py +662 -0
- trajectree/quimb/quimb/tensor/tensor_2d.py +5954 -0
- trajectree/quimb/quimb/tensor/tensor_2d_compress.py +96 -0
- trajectree/quimb/quimb/tensor/tensor_2d_tebd.py +1230 -0
- trajectree/quimb/quimb/tensor/tensor_3d.py +2869 -0
- trajectree/quimb/quimb/tensor/tensor_3d_tebd.py +46 -0
- trajectree/quimb/quimb/tensor/tensor_approx_spectral.py +60 -0
- trajectree/quimb/quimb/tensor/tensor_arbgeom.py +3237 -0
- trajectree/quimb/quimb/tensor/tensor_arbgeom_compress.py +565 -0
- trajectree/quimb/quimb/tensor/tensor_arbgeom_tebd.py +1138 -0
- trajectree/quimb/quimb/tensor/tensor_builder.py +5411 -0
- trajectree/quimb/quimb/tensor/tensor_core.py +11179 -0
- trajectree/quimb/quimb/tensor/tensor_dmrg.py +1472 -0
- trajectree/quimb/quimb/tensor/tensor_mera.py +204 -0
- trajectree/quimb/quimb/utils.py +892 -0
- trajectree/quimb/tests/__init__.py +0 -0
- trajectree/quimb/tests/test_accel.py +501 -0
- trajectree/quimb/tests/test_calc.py +788 -0
- trajectree/quimb/tests/test_core.py +847 -0
- trajectree/quimb/tests/test_evo.py +565 -0
- trajectree/quimb/tests/test_gen/__init__.py +0 -0
- trajectree/quimb/tests/test_gen/test_operators.py +361 -0
- trajectree/quimb/tests/test_gen/test_rand.py +296 -0
- trajectree/quimb/tests/test_gen/test_states.py +261 -0
- trajectree/quimb/tests/test_linalg/__init__.py +0 -0
- trajectree/quimb/tests/test_linalg/test_approx_spectral.py +368 -0
- trajectree/quimb/tests/test_linalg/test_base_linalg.py +351 -0
- trajectree/quimb/tests/test_linalg/test_mpi_linalg.py +127 -0
- trajectree/quimb/tests/test_linalg/test_numpy_linalg.py +84 -0
- trajectree/quimb/tests/test_linalg/test_rand_linalg.py +134 -0
- trajectree/quimb/tests/test_linalg/test_slepc_linalg.py +283 -0
- trajectree/quimb/tests/test_tensor/__init__.py +0 -0
- trajectree/quimb/tests/test_tensor/test_belief_propagation/__init__.py +0 -0
- trajectree/quimb/tests/test_tensor/test_belief_propagation/test_d1bp.py +39 -0
- trajectree/quimb/tests/test_tensor/test_belief_propagation/test_d2bp.py +67 -0
- trajectree/quimb/tests/test_tensor/test_belief_propagation/test_hd1bp.py +64 -0
- trajectree/quimb/tests/test_tensor/test_belief_propagation/test_hv1bp.py +51 -0
- trajectree/quimb/tests/test_tensor/test_belief_propagation/test_l1bp.py +142 -0
- trajectree/quimb/tests/test_tensor/test_belief_propagation/test_l2bp.py +101 -0
- trajectree/quimb/tests/test_tensor/test_circuit.py +816 -0
- trajectree/quimb/tests/test_tensor/test_contract.py +67 -0
- trajectree/quimb/tests/test_tensor/test_decomp.py +40 -0
- trajectree/quimb/tests/test_tensor/test_mera.py +52 -0
- trajectree/quimb/tests/test_tensor/test_optimizers.py +488 -0
- trajectree/quimb/tests/test_tensor/test_tensor_1d.py +1171 -0
- trajectree/quimb/tests/test_tensor/test_tensor_2d.py +606 -0
- trajectree/quimb/tests/test_tensor/test_tensor_2d_tebd.py +144 -0
- trajectree/quimb/tests/test_tensor/test_tensor_3d.py +123 -0
- trajectree/quimb/tests/test_tensor/test_tensor_arbgeom.py +226 -0
- trajectree/quimb/tests/test_tensor/test_tensor_builder.py +441 -0
- trajectree/quimb/tests/test_tensor/test_tensor_core.py +2066 -0
- trajectree/quimb/tests/test_tensor/test_tensor_dmrg.py +388 -0
- trajectree/quimb/tests/test_tensor/test_tensor_spectral_approx.py +63 -0
- trajectree/quimb/tests/test_tensor/test_tensor_tebd.py +270 -0
- trajectree/quimb/tests/test_utils.py +85 -0
- trajectree/trajectory.py +2 -2
- {trajectree-0.0.0.dist-info → trajectree-0.0.1.dist-info}/METADATA +2 -2
- trajectree-0.0.1.dist-info/RECORD +126 -0
- trajectree-0.0.0.dist-info/RECORD +0 -16
- {trajectree-0.0.0.dist-info → trajectree-0.0.1.dist-info}/WHEEL +0 -0
- {trajectree-0.0.0.dist-info → trajectree-0.0.1.dist-info}/licenses/LICENSE +0 -0
- {trajectree-0.0.0.dist-info → trajectree-0.0.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,1631 @@
|
|
|
1
|
+
"""Tools for defining and constructing sparse operators with:
|
|
2
|
+
|
|
3
|
+
* arbitrary geometries,
|
|
4
|
+
* numba acceleration,
|
|
5
|
+
* support for reduced bases,
|
|
6
|
+
* efficient parallelization,
|
|
7
|
+
|
|
8
|
+
and optionally producing:
|
|
9
|
+
|
|
10
|
+
* sparse matrix form
|
|
11
|
+
* matrix product operators,
|
|
12
|
+
* dict of local gates form
|
|
13
|
+
* VMC 'coupled configs' form
|
|
14
|
+
|
|
15
|
+
Currently only supports composing operators which are sums of products of
|
|
16
|
+
diagonal or anti-diagonal real dimension 2 operators.
|
|
17
|
+
|
|
18
|
+
TODO::
|
|
19
|
+
|
|
20
|
+
- [ ] fix sparse matrix being built in opposite direction
|
|
21
|
+
- [ ] product of operators generator (e.g. for PEPS DMRG)
|
|
22
|
+
- [ ] complex and single precision support (lower priority)
|
|
23
|
+
- [ ] support for non-diagonal and qudit operators (lower priority)
|
|
24
|
+
|
|
25
|
+
DONE::
|
|
26
|
+
|
|
27
|
+
- [x] use compact bitbasis
|
|
28
|
+
- [x] design interface for HilbertSpace / OperatorBuilder interaction
|
|
29
|
+
- [x] automatic symbolic jordan wigner transformation
|
|
30
|
+
- [x] numba accelerated coupled config
|
|
31
|
+
- [x] general definition and automatic 'symbolic' jordan wigner
|
|
32
|
+
- [x] multithreaded sparse matrix construction
|
|
33
|
+
- [x] LocalHam generator (e.g. for simple update, normal PEPS algs)
|
|
34
|
+
- [x] automatic MPO generator
|
|
35
|
+
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
import operator
|
|
39
|
+
import functools
|
|
40
|
+
|
|
41
|
+
import numpy as np
|
|
42
|
+
from numba import njit
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
@njit
|
|
46
|
+
def get_local_size(n, rank, world_size):
|
|
47
|
+
"""Given global size n, and a rank in [0, world_size), return the size of
|
|
48
|
+
the portion assigned to this rank.
|
|
49
|
+
"""
|
|
50
|
+
cutoff_rank = n % world_size
|
|
51
|
+
return n // world_size + int(rank < cutoff_rank)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
@njit
|
|
55
|
+
def get_local_range(n, rank, world_size):
|
|
56
|
+
"""Given global size n, and a rank in [0, world_size), return the range of
|
|
57
|
+
indices assigned to this rank.
|
|
58
|
+
"""
|
|
59
|
+
ri = 0
|
|
60
|
+
for rank_below in range(rank):
|
|
61
|
+
ri += get_local_size(n, rank_below, world_size)
|
|
62
|
+
rf = ri + get_local_size(n, rank, world_size)
|
|
63
|
+
return ri, rf
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def parse_edges_to_unique(edges):
|
|
67
|
+
"""Given a list of edges, return a sorted list of unique sites and edges.
|
|
68
|
+
|
|
69
|
+
Parameters
|
|
70
|
+
----------
|
|
71
|
+
edges : Iterable[tuple[hashable, hashable]]]
|
|
72
|
+
The edges to parse.
|
|
73
|
+
|
|
74
|
+
Returns
|
|
75
|
+
-------
|
|
76
|
+
sites : list of hashable
|
|
77
|
+
The unique sites in the edges, sorted.
|
|
78
|
+
edges : list of (hashable, hashable)
|
|
79
|
+
The unique edges, sorted.
|
|
80
|
+
"""
|
|
81
|
+
sites = set()
|
|
82
|
+
uniq_edges = set()
|
|
83
|
+
for i, j in edges:
|
|
84
|
+
if j < i:
|
|
85
|
+
i, j = j, i
|
|
86
|
+
sites.add(i)
|
|
87
|
+
sites.add(j)
|
|
88
|
+
uniq_edges.add((i, j))
|
|
89
|
+
return sorted(sites), sorted(uniq_edges)
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
class HilbertSpace:
|
|
93
|
+
"""Take a set of 'sites' (any sequence of sortable, hashable objects), and
|
|
94
|
+
map this into a 'register' or linearly indexed range, optionally using a
|
|
95
|
+
particular ordering. One can then calculate the size of the Hilbert space,
|
|
96
|
+
including number conserving subspaces, and get a compact 'bitbasis' with
|
|
97
|
+
which to construct sparse operators.
|
|
98
|
+
|
|
99
|
+
Parameters
|
|
100
|
+
----------
|
|
101
|
+
sites : int or sequence of hashable objects
|
|
102
|
+
The sites to map into a linear register. If an integer, simply use
|
|
103
|
+
``range(sites)``.
|
|
104
|
+
order : callable or sequence of hashable objects, optional
|
|
105
|
+
If provided, use this to order the sites. If a callable, it should be a
|
|
106
|
+
sorting key. If a sequence, it should be a permutation of the sites,
|
|
107
|
+
and ``key=order.index`` will be used.
|
|
108
|
+
"""
|
|
109
|
+
|
|
110
|
+
def __init__(self, sites, order=None):
|
|
111
|
+
if isinstance(sites, int):
|
|
112
|
+
sites = range(sites)
|
|
113
|
+
|
|
114
|
+
if (order is not None) and (not callable(order)):
|
|
115
|
+
order = order.index
|
|
116
|
+
self._order = order
|
|
117
|
+
self._sites = tuple(sorted(sites, key=self._order))
|
|
118
|
+
self._mapping_inv = dict(enumerate(self._sites))
|
|
119
|
+
self._mapping = {s: i for i, s in self._mapping_inv.items()}
|
|
120
|
+
|
|
121
|
+
def set_ordering(self, order=None):
|
|
122
|
+
if (order is not None) and (not callable(order)):
|
|
123
|
+
order = order.index
|
|
124
|
+
self._order = order
|
|
125
|
+
self._sites = tuple(sorted(self._sites, key=self._order))
|
|
126
|
+
self._mapping_inv = dict(enumerate(self._sites))
|
|
127
|
+
self._mapping = {s: i for i, s in self._mapping_inv.items()}
|
|
128
|
+
|
|
129
|
+
@classmethod
|
|
130
|
+
def from_edges(cls, edges, order=None):
|
|
131
|
+
"""Construct a HilbertSpace from a set of edges, which are pairs of
|
|
132
|
+
sites.
|
|
133
|
+
"""
|
|
134
|
+
sites, _ = parse_edges_to_unique(edges)
|
|
135
|
+
return cls(sites, order=order)
|
|
136
|
+
|
|
137
|
+
def site_to_reg(self, site):
|
|
138
|
+
"""Convert a site to a linear register index."""
|
|
139
|
+
return self._mapping[site]
|
|
140
|
+
|
|
141
|
+
def reg_to_site(self, reg):
|
|
142
|
+
"""Convert a linear register index back to a site."""
|
|
143
|
+
return self._mapping_inv[reg]
|
|
144
|
+
|
|
145
|
+
def has_site(self, site):
|
|
146
|
+
"""Check if this HilbertSpace contains a given site."""
|
|
147
|
+
return site in self._mapping
|
|
148
|
+
|
|
149
|
+
def config_to_bit(self, config):
|
|
150
|
+
"""Encode a 'configuration' as a bit.
|
|
151
|
+
|
|
152
|
+
Parameters
|
|
153
|
+
----------
|
|
154
|
+
config : dict[hashable, int]
|
|
155
|
+
A dictionary mapping sites to their occupation number / spin.
|
|
156
|
+
|
|
157
|
+
Returns
|
|
158
|
+
-------
|
|
159
|
+
bit : int
|
|
160
|
+
The bit corresponding to this configuration.
|
|
161
|
+
"""
|
|
162
|
+
bit = 0
|
|
163
|
+
for site, val in config.items():
|
|
164
|
+
if val:
|
|
165
|
+
bit |= 1 << self.site_to_reg(site)
|
|
166
|
+
return bit
|
|
167
|
+
|
|
168
|
+
def config_to_flatconfig(self, config):
|
|
169
|
+
"""Turn a configuration into a flat configuration, assuming the order
|
|
170
|
+
given by this ``HilbertSpace``.
|
|
171
|
+
|
|
172
|
+
Parameters
|
|
173
|
+
----------
|
|
174
|
+
config : dict[hashable, int]
|
|
175
|
+
A dictionary mapping sites to their occupation number / spin.
|
|
176
|
+
|
|
177
|
+
Returns
|
|
178
|
+
-------
|
|
179
|
+
flatconfig : ndarray[uint8]
|
|
180
|
+
A flat configuration, with the occupation number of each site in
|
|
181
|
+
the order given by this ``HilbertSpace``.
|
|
182
|
+
"""
|
|
183
|
+
flatconfig = np.empty(self.nsites, dtype=np.uint8)
|
|
184
|
+
for site, val in config.items():
|
|
185
|
+
flatconfig[self.site_to_reg(site)] = val
|
|
186
|
+
return flatconfig
|
|
187
|
+
|
|
188
|
+
def bit_to_config(self, bit):
|
|
189
|
+
"""Decode a bit to a configuration.
|
|
190
|
+
|
|
191
|
+
Parameters
|
|
192
|
+
----------
|
|
193
|
+
bit : int
|
|
194
|
+
The bit to decode.
|
|
195
|
+
|
|
196
|
+
Returns
|
|
197
|
+
-------
|
|
198
|
+
config : dict[hashable, int]
|
|
199
|
+
A dictionary mapping sites to their occupation number / spin.
|
|
200
|
+
"""
|
|
201
|
+
config = {}
|
|
202
|
+
for site, reg in self._mapping.items():
|
|
203
|
+
config[site] = (bit >> reg) & 1
|
|
204
|
+
return config
|
|
205
|
+
|
|
206
|
+
def flatconfig_to_config(self, flatconfig):
|
|
207
|
+
"""Turn a flat configuration into a configuration, assuming the order
|
|
208
|
+
given by this ``HilbertSpace``.
|
|
209
|
+
|
|
210
|
+
Parameters
|
|
211
|
+
----------
|
|
212
|
+
flatconfig : ndarray[uint8]
|
|
213
|
+
A flat configuration, with the occupation number of each site in
|
|
214
|
+
the order given by this ``HilbertSpace``.
|
|
215
|
+
|
|
216
|
+
Returns
|
|
217
|
+
-------
|
|
218
|
+
config : dict[hashable, int]
|
|
219
|
+
A dictionary mapping sites to their occupation number / spin.
|
|
220
|
+
"""
|
|
221
|
+
config = {}
|
|
222
|
+
for i in range(self.nsites):
|
|
223
|
+
config[self.reg_to_site(i)] = flatconfig[i]
|
|
224
|
+
return config
|
|
225
|
+
|
|
226
|
+
def rand_config(self, k=None):
|
|
227
|
+
"""Get a random configuration, optionally requiring it has ``k`` bits
|
|
228
|
+
set.
|
|
229
|
+
"""
|
|
230
|
+
if k is None:
|
|
231
|
+
return {site: np.random.randint(2) for site in self.sites}
|
|
232
|
+
r = np.random.randint(self.get_size(k))
|
|
233
|
+
b = rank_to_bit(r, self.nsites, k)
|
|
234
|
+
return self.bit_to_config(b)
|
|
235
|
+
|
|
236
|
+
@property
|
|
237
|
+
def sites(self):
|
|
238
|
+
"""The ordered tuple of all sites in the Hilbert space."""
|
|
239
|
+
return self._sites
|
|
240
|
+
|
|
241
|
+
@property
|
|
242
|
+
def nsites(self):
|
|
243
|
+
"""The total number of sites in the Hilbert space."""
|
|
244
|
+
return len(self._sites)
|
|
245
|
+
|
|
246
|
+
def get_size(self, *k):
|
|
247
|
+
"""Compute the size of this Hilbert space, optionally taking into
|
|
248
|
+
account number / z-spin conservation.
|
|
249
|
+
|
|
250
|
+
Parameters
|
|
251
|
+
----------
|
|
252
|
+
k : int or tuple of (int, int)
|
|
253
|
+
If provided, compute the size of number conserving subspace(s)::
|
|
254
|
+
|
|
255
|
+
- If a single int, compute the size of the subspace with
|
|
256
|
+
``k`` particles / up states: ``comb(nsites, k)``.
|
|
257
|
+
- If a tuple of (int, int), compute the size of the subspace
|
|
258
|
+
of the product of spaces where each pair (n, k) corresponds
|
|
259
|
+
to n sites with k particles / up states. The sum of every n
|
|
260
|
+
should equal ``nsites``.
|
|
261
|
+
"""
|
|
262
|
+
if not k:
|
|
263
|
+
return 2**self.nsites
|
|
264
|
+
|
|
265
|
+
if (len(k) == 1) and isinstance(k[0], int):
|
|
266
|
+
# single interger - take as k
|
|
267
|
+
return comb(self.nsites, k[0])
|
|
268
|
+
|
|
269
|
+
size = 1
|
|
270
|
+
ncheck = 0
|
|
271
|
+
for n, k in k:
|
|
272
|
+
size *= comb(n, k)
|
|
273
|
+
ncheck += n
|
|
274
|
+
|
|
275
|
+
if ncheck != self.nsites:
|
|
276
|
+
raise ValueError("`k` must sum to the number of sites")
|
|
277
|
+
|
|
278
|
+
return size
|
|
279
|
+
|
|
280
|
+
def get_bitbasis(self, *k, dtype=np.int64):
|
|
281
|
+
"""Get a basis for the Hilbert space, in terms of an integer bitarray,
|
|
282
|
+
optionally taking into account number / z-spin conservation.
|
|
283
|
+
|
|
284
|
+
Parameters
|
|
285
|
+
----------
|
|
286
|
+
k : int or tuple of (int, int)
|
|
287
|
+
If provided, get the basis for a number conserving subspace(s)::
|
|
288
|
+
|
|
289
|
+
- If a single int, compute the size of the subspace with
|
|
290
|
+
``k`` particles / up states: ``comb(nsites, k)``.
|
|
291
|
+
- If a tuple of (int, int), compute the size of the subspace
|
|
292
|
+
of the product of spaces where each pair (n, k) corresponds
|
|
293
|
+
to n sites with k particles / up states. The sum of every n
|
|
294
|
+
should equal ``nsites``.
|
|
295
|
+
|
|
296
|
+
dtype : np.dtype, optional
|
|
297
|
+
The dtype of the bitarray, should be an integer type with at least
|
|
298
|
+
``nsites`` bits.
|
|
299
|
+
|
|
300
|
+
Returns
|
|
301
|
+
-------
|
|
302
|
+
bits : numpy.ndarray
|
|
303
|
+
The basis, each integer element being a binary representation of a
|
|
304
|
+
configuration.
|
|
305
|
+
"""
|
|
306
|
+
if not k:
|
|
307
|
+
return np.arange(1 << self.nsites, dtype=dtype)
|
|
308
|
+
|
|
309
|
+
if (len(k) == 1) and isinstance(k[0], int):
|
|
310
|
+
k = ((self.nsites, k[0]),)
|
|
311
|
+
|
|
312
|
+
return get_number_bitbasis(*k, dtype=dtype)
|
|
313
|
+
|
|
314
|
+
def __repr__(self):
|
|
315
|
+
return (
|
|
316
|
+
f"HilbertSpace(nsites={self.nsites}, total_size={self.get_size()})"
|
|
317
|
+
)
|
|
318
|
+
|
|
319
|
+
|
|
320
|
+
_OPMAP = {
|
|
321
|
+
"I": {0: (0, 1.0), 1: (1, 1.0)},
|
|
322
|
+
# pauli matrices
|
|
323
|
+
"x": {0: (1, 1.0), 1: (0, 1.0)},
|
|
324
|
+
"y": {0: (1, 1.0j), 1: (0, -1.0j)},
|
|
325
|
+
"z": {0: (0, 1.0), 1: (1, -1.0)},
|
|
326
|
+
# spin 1/2 matrices (scaled paulis)
|
|
327
|
+
"sx": {0: (1, 0.5), 1: (0, 0.5)},
|
|
328
|
+
"sy": {0: (1, 0.5j), 1: (0, -0.5j)},
|
|
329
|
+
"sz": {0: (0, 0.5), 1: (1, -0.5)},
|
|
330
|
+
# creation / annihilation operators
|
|
331
|
+
"+": {0: (1, 1.0)},
|
|
332
|
+
"-": {1: (0, 1.0)},
|
|
333
|
+
# number, symmetric number, and hole operators
|
|
334
|
+
"n": {1: (1, 1.0)},
|
|
335
|
+
"sn": {0: (0, -0.5), 1: (1, 0.5)},
|
|
336
|
+
"h": {0: (0, 1.0)},
|
|
337
|
+
}
|
|
338
|
+
|
|
339
|
+
|
|
340
|
+
@functools.lru_cache(maxsize=None)
|
|
341
|
+
def get_mat(op, dtype=None):
|
|
342
|
+
if dtype is None:
|
|
343
|
+
if any(
|
|
344
|
+
isinstance(coeff, complex) for _, (_, coeff) in _OPMAP[op].items()
|
|
345
|
+
):
|
|
346
|
+
dtype = np.complex128
|
|
347
|
+
else:
|
|
348
|
+
dtype = np.float64
|
|
349
|
+
|
|
350
|
+
a = np.zeros((2, 2), dtype=dtype)
|
|
351
|
+
for j, (i, xij) in _OPMAP[op].items():
|
|
352
|
+
a[i, j] = xij
|
|
353
|
+
# make immutable since caching
|
|
354
|
+
a.flags.writeable = False
|
|
355
|
+
return a
|
|
356
|
+
|
|
357
|
+
|
|
358
|
+
@functools.lru_cache(maxsize=2**14)
|
|
359
|
+
def simplify_single_site_ops(coeff, ops):
|
|
360
|
+
"""Simplify a sequence of operators acting on the same site.
|
|
361
|
+
|
|
362
|
+
Parameters
|
|
363
|
+
----------
|
|
364
|
+
coeff : float or complex
|
|
365
|
+
The coefficient of the operator sequence.
|
|
366
|
+
ops : tuple of str
|
|
367
|
+
The operator sequence.
|
|
368
|
+
|
|
369
|
+
Returns
|
|
370
|
+
-------
|
|
371
|
+
new_coeff : float or complex
|
|
372
|
+
The simplified coefficient.
|
|
373
|
+
new_op : str
|
|
374
|
+
The single, simplified operator that the sequence maps to, up to
|
|
375
|
+
scalar multiplication.
|
|
376
|
+
|
|
377
|
+
Examples
|
|
378
|
+
--------
|
|
379
|
+
|
|
380
|
+
>>> simplify_single_site_ops(1.0, ('+', 'z', 'z', 'z', 'z', '-'))
|
|
381
|
+
(1.0, 'n')
|
|
382
|
+
|
|
383
|
+
>>> simplify_single_site_ops(1.0, ("x", "y", "z"))
|
|
384
|
+
(-1j, 'I')
|
|
385
|
+
|
|
386
|
+
"""
|
|
387
|
+
|
|
388
|
+
if len(ops) == 1:
|
|
389
|
+
return coeff, ops[0]
|
|
390
|
+
|
|
391
|
+
# product all the matrices
|
|
392
|
+
combo_mat = functools.reduce(operator.matmul, map(get_mat, ops))
|
|
393
|
+
combo_coeff = combo_mat.flat[np.argmax(np.abs(combo_mat))]
|
|
394
|
+
|
|
395
|
+
if combo_coeff == 0.0:
|
|
396
|
+
# null-term
|
|
397
|
+
return 0, None
|
|
398
|
+
|
|
399
|
+
# find the reference operator that maps to this matrix
|
|
400
|
+
for op in _OPMAP:
|
|
401
|
+
ref_mat = get_mat(op)
|
|
402
|
+
ref_coeff = ref_mat.flat[np.argmax(np.abs(ref_mat))]
|
|
403
|
+
if (
|
|
404
|
+
(combo_mat / combo_coeff).round(12)
|
|
405
|
+
== (ref_mat / ref_coeff).round(12)
|
|
406
|
+
).all():
|
|
407
|
+
break
|
|
408
|
+
else:
|
|
409
|
+
raise ValueError(f"No match found for '{ops}'")
|
|
410
|
+
|
|
411
|
+
coeff *= ref_coeff / combo_coeff
|
|
412
|
+
if coeff.imag == 0.0:
|
|
413
|
+
coeff = coeff.real
|
|
414
|
+
|
|
415
|
+
return coeff, op
|
|
416
|
+
|
|
417
|
+
|
|
418
|
+
class SparseOperatorBuilder:
|
|
419
|
+
"""Object for building operators with sparse structure. Specifically,
|
|
420
|
+
a sum of terms, where each term is a product of operators, where each of
|
|
421
|
+
these local operators acts on a single site and has at most one entry per
|
|
422
|
+
row.
|
|
423
|
+
|
|
424
|
+
Parameters
|
|
425
|
+
----------
|
|
426
|
+
terms : sequence, optional
|
|
427
|
+
The terms to initialize the builder with. ``add_term`` is simply called
|
|
428
|
+
on each of these.
|
|
429
|
+
hilbert_space : HilbertSpace
|
|
430
|
+
The Hilbert space to build the operator in. If this is not supplied
|
|
431
|
+
then a minimal Hilbert space will be constructed from the sites used,
|
|
432
|
+
when required.
|
|
433
|
+
"""
|
|
434
|
+
|
|
435
|
+
def __init__(self, terms=(), hilbert_space=None):
|
|
436
|
+
self._term_store = {}
|
|
437
|
+
self._sites_used = set()
|
|
438
|
+
self._hilbert_space = hilbert_space
|
|
439
|
+
self._coupling_map = None
|
|
440
|
+
for term in terms:
|
|
441
|
+
self.add_term(*term)
|
|
442
|
+
|
|
443
|
+
@property
|
|
444
|
+
def sites_used(self):
|
|
445
|
+
"""A tuple of the sorted coordinates/sites seen so far."""
|
|
446
|
+
return tuple(sorted(self._sites_used))
|
|
447
|
+
|
|
448
|
+
@property
|
|
449
|
+
def nsites(self):
|
|
450
|
+
"""The total number of coordinates/sites seen so far."""
|
|
451
|
+
return self.hilbert_space.nsites
|
|
452
|
+
|
|
453
|
+
@property
|
|
454
|
+
def terms(self):
|
|
455
|
+
"""A tuple of the simplified terms seen so far."""
|
|
456
|
+
return tuple((coeff, ops) for ops, coeff in self._term_store.items())
|
|
457
|
+
|
|
458
|
+
@property
|
|
459
|
+
def nterms(self):
|
|
460
|
+
"""The total number of terms seen so far."""
|
|
461
|
+
return len(self._term_store)
|
|
462
|
+
|
|
463
|
+
@property
|
|
464
|
+
def locality(self):
|
|
465
|
+
"""The locality of the operator, the maximum support of any term."""
|
|
466
|
+
return max(len(ops) for ops in self._term_store)
|
|
467
|
+
|
|
468
|
+
@property
|
|
469
|
+
def hilbert_space(self):
|
|
470
|
+
"""The Hilbert space of the operator. Created from the sites seen
|
|
471
|
+
so far if not supplied at construction.
|
|
472
|
+
"""
|
|
473
|
+
if self._hilbert_space is None:
|
|
474
|
+
self._hilbert_space = HilbertSpace(self.sites_used)
|
|
475
|
+
return self._hilbert_space
|
|
476
|
+
|
|
477
|
+
@property
|
|
478
|
+
def coupling_map(self):
|
|
479
|
+
if self._coupling_map is None:
|
|
480
|
+
self._coupling_map = build_coupling_numba(
|
|
481
|
+
self._term_store, self.hilbert_space.site_to_reg
|
|
482
|
+
)
|
|
483
|
+
return self._coupling_map
|
|
484
|
+
|
|
485
|
+
def site_to_reg(self, site):
|
|
486
|
+
"""Get the register / linear index of coordinate ``site``."""
|
|
487
|
+
return self.hilbert_space.site_to_reg(site)
|
|
488
|
+
|
|
489
|
+
def reg_to_site(self, reg):
|
|
490
|
+
"""Get the site of register / linear index ``reg``."""
|
|
491
|
+
return self.hilbert_space.reg_to_site(reg)
|
|
492
|
+
|
|
493
|
+
def add_term(self, *coeff_ops):
|
|
494
|
+
"""Add a term to the operator.
|
|
495
|
+
|
|
496
|
+
Parameters
|
|
497
|
+
----------
|
|
498
|
+
coeff : float, optional
|
|
499
|
+
The overall coefficient of the term.
|
|
500
|
+
ops : sequence of tuple[str, hashable]
|
|
501
|
+
The operators of the term, together with the sites they act on.
|
|
502
|
+
Each term should be a pair of ``(operator, site)``, where
|
|
503
|
+
``operator`` can be:
|
|
504
|
+
|
|
505
|
+
- ``'x'``, ``'y'``, ``'z'``: Pauli matrices
|
|
506
|
+
- ``'sx'``, ``'sy'``, ``'sz'``: spin operators (i.e. scaled
|
|
507
|
+
Pauli matrices)
|
|
508
|
+
- ``'+'``, ``'-'``: creation/annihilation operators
|
|
509
|
+
- ``'n'``, ``'sn'``, or ``'h'``: number, symmetric
|
|
510
|
+
number (n - 1/2) and hole (1 - n) operators
|
|
511
|
+
|
|
512
|
+
And ``site`` is a hashable object that represents the site that
|
|
513
|
+
the operator acts on.
|
|
514
|
+
|
|
515
|
+
"""
|
|
516
|
+
if isinstance(coeff_ops[0], (tuple, list)):
|
|
517
|
+
# assume coeff is 1.0
|
|
518
|
+
coeff = 1
|
|
519
|
+
ops = coeff_ops
|
|
520
|
+
else:
|
|
521
|
+
coeff, *ops = coeff_ops
|
|
522
|
+
|
|
523
|
+
if coeff == 0.0:
|
|
524
|
+
# null-term
|
|
525
|
+
return
|
|
526
|
+
|
|
527
|
+
# print(coeff, ops, '->')
|
|
528
|
+
|
|
529
|
+
# collect operators acting on the same site
|
|
530
|
+
collected = {}
|
|
531
|
+
for op, site in ops:
|
|
532
|
+
# check that the site is valid if the Hilbert space is known
|
|
533
|
+
if (
|
|
534
|
+
self._hilbert_space is not None
|
|
535
|
+
) and not self._hilbert_space.has_site(site):
|
|
536
|
+
raise ValueError(f"Site {site} not in the Hilbert space.")
|
|
537
|
+
self._sites_used.add(site)
|
|
538
|
+
collected.setdefault(site, []).append(op)
|
|
539
|
+
|
|
540
|
+
# simplify operators acting on the smae site & don't add null-terms
|
|
541
|
+
simplified_ops = []
|
|
542
|
+
for site, collected_ops in collected.items():
|
|
543
|
+
coeff, op = simplify_single_site_ops(coeff, tuple(collected_ops))
|
|
544
|
+
|
|
545
|
+
if op is None:
|
|
546
|
+
# null-term ('e.g. '++' or '--')
|
|
547
|
+
# print('null-term')
|
|
548
|
+
# print()
|
|
549
|
+
return
|
|
550
|
+
|
|
551
|
+
if op != "I":
|
|
552
|
+
# only need to record non-identity operators
|
|
553
|
+
simplified_ops.append((op, site))
|
|
554
|
+
|
|
555
|
+
key = tuple(simplified_ops)
|
|
556
|
+
|
|
557
|
+
# print(coeff, key)
|
|
558
|
+
# print()
|
|
559
|
+
|
|
560
|
+
new_coeff = self._term_store.pop(key, 0.0) + coeff
|
|
561
|
+
if new_coeff != 0.0:
|
|
562
|
+
self._term_store[key] = new_coeff
|
|
563
|
+
|
|
564
|
+
def __iadd__(self, term):
|
|
565
|
+
self.add_term(*term)
|
|
566
|
+
return self
|
|
567
|
+
|
|
568
|
+
def jordan_wigner_transform(self):
|
|
569
|
+
"""Transform the terms in this operator by pre-prending pauli Z
|
|
570
|
+
strings to all creation and annihilation operators, and then
|
|
571
|
+
simplifying the resulting terms.
|
|
572
|
+
"""
|
|
573
|
+
# TODO: check if transform has been applied already
|
|
574
|
+
# TODO: store untransformed terms, so we can re-order at will
|
|
575
|
+
|
|
576
|
+
old_term_store = self._term_store.copy()
|
|
577
|
+
self._term_store.clear()
|
|
578
|
+
|
|
579
|
+
for term, coeff in old_term_store.items():
|
|
580
|
+
if not term:
|
|
581
|
+
self.add_term(coeff, *term)
|
|
582
|
+
continue
|
|
583
|
+
|
|
584
|
+
ops, site = zip(*term)
|
|
585
|
+
if {"+", "-"}.intersection(ops):
|
|
586
|
+
new_term = []
|
|
587
|
+
|
|
588
|
+
for op, site in term:
|
|
589
|
+
reg = self.site_to_reg(site)
|
|
590
|
+
if op in {"+", "-"}:
|
|
591
|
+
for r in range(reg):
|
|
592
|
+
site_below = self.reg_to_site(r)
|
|
593
|
+
new_term.append(("z", site_below))
|
|
594
|
+
new_term.append((op, site))
|
|
595
|
+
|
|
596
|
+
self.add_term(coeff, *new_term)
|
|
597
|
+
else:
|
|
598
|
+
self.add_term(coeff, *term)
|
|
599
|
+
|
|
600
|
+
def build_coo_data(self, *k, parallel=False):
|
|
601
|
+
"""Build the raw data for a sparse matrix in COO format. Optionally
|
|
602
|
+
in a reduced k basis and in parallel.
|
|
603
|
+
|
|
604
|
+
Parameters
|
|
605
|
+
----------
|
|
606
|
+
k : int or tuple of (int, int)
|
|
607
|
+
If provided, get the basis for a number conserving subspace(s)::
|
|
608
|
+
|
|
609
|
+
- If a single int, compute the size of the subspace with
|
|
610
|
+
``k`` particles / up states: ``comb(nsites, k)``.
|
|
611
|
+
- If a tuple of (int, int), compute the size of the subspace
|
|
612
|
+
of the product of spaces where each pair (n, k) corresponds
|
|
613
|
+
to n sites with k particles / up states. The sum of every n
|
|
614
|
+
should equal ``nsites``.
|
|
615
|
+
|
|
616
|
+
parallel : bool, optional
|
|
617
|
+
Whether to build the matrix in parallel (multi-threaded).
|
|
618
|
+
|
|
619
|
+
Returns
|
|
620
|
+
-------
|
|
621
|
+
coo : array
|
|
622
|
+
The data entries for the sparse matrix in COO format.
|
|
623
|
+
cis : array
|
|
624
|
+
The row indices for the sparse matrix in COO format.
|
|
625
|
+
cjs : array
|
|
626
|
+
The column indices for the sparse matrix in COO format.
|
|
627
|
+
N : int
|
|
628
|
+
The total number of basis states.
|
|
629
|
+
"""
|
|
630
|
+
hs = self.hilbert_space
|
|
631
|
+
bits = hs.get_bitbasis(*k)
|
|
632
|
+
coupling_map = self.coupling_map
|
|
633
|
+
coo, cis, cjs = build_coo_numba(bits, coupling_map, parallel=parallel)
|
|
634
|
+
return coo, cis, cjs, bits.size
|
|
635
|
+
|
|
636
|
+
def build_sparse_matrix(self, *k, stype="csr", parallel=False):
|
|
637
|
+
"""Build a sparse matrix in the given format. Optionally in a reduced
|
|
638
|
+
k basis and in parallel.
|
|
639
|
+
|
|
640
|
+
Parameters
|
|
641
|
+
----------
|
|
642
|
+
k : int or tuple of (int, int)
|
|
643
|
+
If provided, get the basis for a number conserving subspace(s)::
|
|
644
|
+
|
|
645
|
+
- If a single int, compute the size of the subspace with
|
|
646
|
+
``k`` particles / up states: ``comb(nsites, k)``.
|
|
647
|
+
- If a tuple of (int, int), compute the size of the subspace
|
|
648
|
+
of the product of spaces where each pair (n, k) corresponds
|
|
649
|
+
to n sites with k particles / up states. The sum of every n
|
|
650
|
+
should equal ``nsites``.
|
|
651
|
+
|
|
652
|
+
parallel : bool, optional
|
|
653
|
+
Whether to build the matrix in parallel (multi-threaded).
|
|
654
|
+
|
|
655
|
+
Returns
|
|
656
|
+
-------
|
|
657
|
+
scipy.sparse matrix
|
|
658
|
+
"""
|
|
659
|
+
import scipy.sparse as sp
|
|
660
|
+
|
|
661
|
+
coo, cis, cjs, N = self.build_coo_data(*k, parallel=parallel)
|
|
662
|
+
A = sp.coo_matrix((coo, (cis, cjs)), shape=(N, N))
|
|
663
|
+
if stype != "coo":
|
|
664
|
+
A = A.asformat(stype)
|
|
665
|
+
return A
|
|
666
|
+
|
|
667
|
+
def build_dense(self):
|
|
668
|
+
"""Get the dense (`numpy.ndarray`) matrix representation of this
|
|
669
|
+
operator.
|
|
670
|
+
"""
|
|
671
|
+
A = self.build_sparse_matrix(stype="coo")
|
|
672
|
+
return A.toarray()
|
|
673
|
+
|
|
674
|
+
def build_local_terms(self):
|
|
675
|
+
"""Get a dictionary of local terms, where each key is a sorted tuple
|
|
676
|
+
of sites, and each value is the local matrix representation of the
|
|
677
|
+
operator on those sites. For use with e.g. tensor network algorithms.
|
|
678
|
+
|
|
679
|
+
Note terms acting on the same sites are summed together and the size of
|
|
680
|
+
each local matrix is exponential in the locality of that term.
|
|
681
|
+
|
|
682
|
+
Returns
|
|
683
|
+
-------
|
|
684
|
+
Hk : dict[tuple[hashable], numpy.ndarray]
|
|
685
|
+
The local terms.
|
|
686
|
+
"""
|
|
687
|
+
Hk = {}
|
|
688
|
+
|
|
689
|
+
for term, coeff in self._term_store.items():
|
|
690
|
+
ops, sites = zip(*term)
|
|
691
|
+
mats = tuple(get_mat(op) for op in ops)
|
|
692
|
+
hk = coeff * functools.reduce(np.kron, mats)
|
|
693
|
+
if sites not in Hk:
|
|
694
|
+
Hk[sites] = hk.copy()
|
|
695
|
+
else:
|
|
696
|
+
Hk[sites] += hk
|
|
697
|
+
return Hk
|
|
698
|
+
|
|
699
|
+
def config_coupling(self, config):
|
|
700
|
+
"""Get a list of other configurations coupled to ``config`` by this
|
|
701
|
+
operator, and the corresponding coupling coefficients. This is for
|
|
702
|
+
use with VMC for example.
|
|
703
|
+
|
|
704
|
+
Parameters
|
|
705
|
+
----------
|
|
706
|
+
config : dict[site, int]
|
|
707
|
+
The configuration to get the coupling for.
|
|
708
|
+
|
|
709
|
+
Returns
|
|
710
|
+
-------
|
|
711
|
+
coupled_configs : list[dict[site, int]]
|
|
712
|
+
Each distinct configuration coupled to ``config``.
|
|
713
|
+
coeffs: list[float]
|
|
714
|
+
The corresponding coupling coefficients.
|
|
715
|
+
"""
|
|
716
|
+
bit_to_config = self.hilbert_space.bit_to_config
|
|
717
|
+
config_to_bit = self.hilbert_space.config_to_bit
|
|
718
|
+
b = config_to_bit(config)
|
|
719
|
+
bjs, coeffs = coupled_bits_numba(b, self.coupling_map)
|
|
720
|
+
return [bit_to_config(bj) for bj in bjs], coeffs
|
|
721
|
+
|
|
722
|
+
def show(self, filler="."):
|
|
723
|
+
"""Print an ascii representation of the terms in this operator."""
|
|
724
|
+
print(self)
|
|
725
|
+
for t, (term, coeff) in enumerate(self._term_store.items()):
|
|
726
|
+
s = [f"{filler} "] * self.nsites
|
|
727
|
+
for op, site in term:
|
|
728
|
+
s[self.site_to_reg(site)] = f"{op:<2}"
|
|
729
|
+
print("".join(s), f"{coeff:+}")
|
|
730
|
+
|
|
731
|
+
def build_state_machine_greedy(self):
|
|
732
|
+
# XXX: optimal method : https://arxiv.org/abs/2006.02056
|
|
733
|
+
|
|
734
|
+
import networkx as nx
|
|
735
|
+
|
|
736
|
+
# - nodes of the state machine are a 2D grid of (register, 'rail'),
|
|
737
|
+
# with the maximum number of rails giving the eventual bond dimension
|
|
738
|
+
# - there are N + 1 registers for N sites
|
|
739
|
+
# - each edge from (r, i) to (r + 1, j) represents a term that will be
|
|
740
|
+
# placed in the rth MPO tensor at entry like W[i, j, :, :] = op
|
|
741
|
+
# - each node has either a single inwards or outwards edge
|
|
742
|
+
G = nx.DiGraph()
|
|
743
|
+
G.add_node((0, 0))
|
|
744
|
+
|
|
745
|
+
# count how many rails are at each register
|
|
746
|
+
num_rails = [1] + [0] * self.nsites
|
|
747
|
+
|
|
748
|
+
# track which terms pass through each edge and vice versa
|
|
749
|
+
edges_to_terms = {}
|
|
750
|
+
terms_to_edges = {}
|
|
751
|
+
# so that we can place all the coefficients at the end
|
|
752
|
+
coeffs_to_place = {}
|
|
753
|
+
|
|
754
|
+
def new_edge(a, b):
|
|
755
|
+
# need to track which terms pass through this edge so we can
|
|
756
|
+
# place the coefficient somewhere unique at the end
|
|
757
|
+
G.add_edge(a, b, op=op, weight=1, coeff=None)
|
|
758
|
+
edges_to_terms.setdefault((a, b), set()).add(t)
|
|
759
|
+
terms_to_edges.setdefault(t, []).append((a, b))
|
|
760
|
+
|
|
761
|
+
def check_right():
|
|
762
|
+
# check if can **right share**
|
|
763
|
+
# - check all existing potential next nodes
|
|
764
|
+
# - current op must match or not exist
|
|
765
|
+
# - right strings must match
|
|
766
|
+
# - must be single output node
|
|
767
|
+
for rail in range(num_rails[reg + 1]):
|
|
768
|
+
cand_node = (reg + 1, rail)
|
|
769
|
+
if G.out_degree(cand_node) > 1:
|
|
770
|
+
continue
|
|
771
|
+
|
|
772
|
+
if G.nodes[cand_node]["out_string"] != string[reg + 1 :]:
|
|
773
|
+
continue
|
|
774
|
+
|
|
775
|
+
e = (current_node, cand_node)
|
|
776
|
+
if e not in G.edges:
|
|
777
|
+
new_edge(current_node, cand_node)
|
|
778
|
+
return cand_node
|
|
779
|
+
else:
|
|
780
|
+
if G.edges[e]["op"] != op:
|
|
781
|
+
continue
|
|
782
|
+
G.edges[e]["weight"] += 1
|
|
783
|
+
edges_to_terms.setdefault(e, set()).add(t)
|
|
784
|
+
terms_to_edges.setdefault(t, []).append(e)
|
|
785
|
+
return cand_node
|
|
786
|
+
|
|
787
|
+
# XXX: if we can right share, don't need to do anything
|
|
788
|
+
# more since whole remaining string is shared?
|
|
789
|
+
|
|
790
|
+
def check_left():
|
|
791
|
+
# check if can **left share**
|
|
792
|
+
# - check all out edges
|
|
793
|
+
# - current op must match AND
|
|
794
|
+
# - must be single input node
|
|
795
|
+
for e in G.edges(current_node):
|
|
796
|
+
cand_node = e[1]
|
|
797
|
+
if G.in_degree(cand_node) <= 1:
|
|
798
|
+
if G.edges[e]["op"] == op:
|
|
799
|
+
G.edges[e]["weight"] += 1
|
|
800
|
+
edges_to_terms.setdefault(e, set()).add(t)
|
|
801
|
+
terms_to_edges.setdefault(t, []).append(e)
|
|
802
|
+
return cand_node
|
|
803
|
+
|
|
804
|
+
def create_new():
|
|
805
|
+
# create a new rail at the next register
|
|
806
|
+
next_node = (reg + 1, num_rails[reg + 1])
|
|
807
|
+
num_rails[reg + 1] += 1
|
|
808
|
+
new_edge(current_node, next_node)
|
|
809
|
+
return next_node
|
|
810
|
+
|
|
811
|
+
for t, (term, coeff) in enumerate(self._term_store.items()):
|
|
812
|
+
# build full string for this term including identity ops
|
|
813
|
+
rmap = {self.site_to_reg(site): op for op, site in term}
|
|
814
|
+
string = tuple(rmap.get(r, "I") for r in range(self.nsites))
|
|
815
|
+
|
|
816
|
+
current_node = (0, 0)
|
|
817
|
+
for reg, op in enumerate(string):
|
|
818
|
+
cand_node = check_right()
|
|
819
|
+
if cand_node is not None:
|
|
820
|
+
# can share right part of string
|
|
821
|
+
current_node = cand_node
|
|
822
|
+
else:
|
|
823
|
+
cand_node = check_left()
|
|
824
|
+
if cand_node is not None:
|
|
825
|
+
# can share left part of string
|
|
826
|
+
current_node = cand_node
|
|
827
|
+
else:
|
|
828
|
+
# have to create new node
|
|
829
|
+
current_node = create_new()
|
|
830
|
+
|
|
831
|
+
if G.out_degree(current_node) <= 1:
|
|
832
|
+
# record what the right matching string is
|
|
833
|
+
G.nodes[current_node]["out_string"] = string[reg + 1 :]
|
|
834
|
+
else:
|
|
835
|
+
G.nodes[current_node]["out_string"] = None
|
|
836
|
+
|
|
837
|
+
if coeff != 1.0:
|
|
838
|
+
# record that we still need to place coeff somewhere
|
|
839
|
+
coeffs_to_place[t] = coeff
|
|
840
|
+
|
|
841
|
+
G.graph["nsites"] = self.nsites
|
|
842
|
+
G.graph["num_rails"] = tuple(num_rails)
|
|
843
|
+
G.graph["max_num_rails"] = max(num_rails)
|
|
844
|
+
|
|
845
|
+
# how many terms pass through each edge
|
|
846
|
+
edge_scores = {e: len(ts) for e, ts in edges_to_terms.items()}
|
|
847
|
+
|
|
848
|
+
# the least congested edge a term passes through
|
|
849
|
+
term_scores = {
|
|
850
|
+
t: min(edge_scores[e] for e in es)
|
|
851
|
+
for t, es in terms_to_edges.items()
|
|
852
|
+
}
|
|
853
|
+
|
|
854
|
+
def place_coeff(edge, coeff):
|
|
855
|
+
G.edges[edge]["coeff"] = coeff
|
|
856
|
+
edge_scores.pop(edge)
|
|
857
|
+
# every term passing through this edge is multiplied by this coeff
|
|
858
|
+
for t in edges_to_terms[edge]:
|
|
859
|
+
new_coeff = coeffs_to_place.pop(t, 1.0) / coeff
|
|
860
|
+
if new_coeff != 1.0:
|
|
861
|
+
# if another term doesn't have matching coeff, still need
|
|
862
|
+
# to place the updated coeff
|
|
863
|
+
coeffs_to_place[t] = new_coeff
|
|
864
|
+
|
|
865
|
+
while coeffs_to_place:
|
|
866
|
+
# get the remaining term with the maximum congestion
|
|
867
|
+
t = max(coeffs_to_place, key=term_scores.get)
|
|
868
|
+
# get the least conjested edge it passes through
|
|
869
|
+
best = min(terms_to_edges[t], key=edge_scores.get)
|
|
870
|
+
# place it and update everything
|
|
871
|
+
place_coeff(best, coeffs_to_place[t])
|
|
872
|
+
|
|
873
|
+
return G
|
|
874
|
+
|
|
875
|
+
def draw_state_machine(
|
|
876
|
+
self,
|
|
877
|
+
method="greedy",
|
|
878
|
+
figsize="auto",
|
|
879
|
+
):
|
|
880
|
+
import math
|
|
881
|
+
from matplotlib import pyplot as plt
|
|
882
|
+
from quimb.schematic import auto_colors
|
|
883
|
+
|
|
884
|
+
if method == "greedy":
|
|
885
|
+
G = self.build_state_machine_greedy()
|
|
886
|
+
else:
|
|
887
|
+
raise ValueError(f"Unknown method {method}")
|
|
888
|
+
|
|
889
|
+
def labelled_arrow(ax, p1, p2, label, color, width):
|
|
890
|
+
angle = math.atan2(p2[1] - p1[1], p2[0] - p1[0])
|
|
891
|
+
ax.annotate(
|
|
892
|
+
"",
|
|
893
|
+
xy=p2,
|
|
894
|
+
xycoords="data",
|
|
895
|
+
xytext=p1,
|
|
896
|
+
textcoords="data",
|
|
897
|
+
arrowprops=dict(
|
|
898
|
+
arrowstyle="->",
|
|
899
|
+
color=color,
|
|
900
|
+
alpha=0.75,
|
|
901
|
+
linewidth=width,
|
|
902
|
+
),
|
|
903
|
+
)
|
|
904
|
+
p_middle = (p1[0] + p2[0]) / 2, (p1[1] + p2[1]) / 2
|
|
905
|
+
ax.text(
|
|
906
|
+
*p_middle,
|
|
907
|
+
label,
|
|
908
|
+
color=color,
|
|
909
|
+
ha="center",
|
|
910
|
+
va="center",
|
|
911
|
+
rotation=angle * 180 / math.pi,
|
|
912
|
+
alpha=1.0,
|
|
913
|
+
transform_rotates_text=True,
|
|
914
|
+
)
|
|
915
|
+
|
|
916
|
+
if figsize == "auto":
|
|
917
|
+
width = G.graph["nsites"]
|
|
918
|
+
# maximum number of nodes on any rail
|
|
919
|
+
height = G.graph["max_num_rails"] / 2
|
|
920
|
+
figsize = (width, height)
|
|
921
|
+
|
|
922
|
+
fig, ax = plt.subplots(figsize=figsize)
|
|
923
|
+
fig.patch.set_alpha(0.0)
|
|
924
|
+
ax.set_axis_off()
|
|
925
|
+
ax.set_xlim(-0.5, G.graph["nsites"] + 0.5)
|
|
926
|
+
ax.set_ylim(-0.5, G.graph["max_num_rails"] + 0.5)
|
|
927
|
+
|
|
928
|
+
# draw each edge as a colored, labelled node
|
|
929
|
+
ops = sorted(
|
|
930
|
+
{data["op"] for _, _, data in G.edges(data=True)} - {"I"}, key=str
|
|
931
|
+
)
|
|
932
|
+
all_colors = auto_colors(len(ops))
|
|
933
|
+
colors = {op: c for op, c in zip(ops, all_colors)}
|
|
934
|
+
colors["I"] = "grey"
|
|
935
|
+
for n1, n2, data in G.edges(data=True):
|
|
936
|
+
color = colors[data["op"]]
|
|
937
|
+
width = math.log2(1 + data["weight"])
|
|
938
|
+
label = data["op"]
|
|
939
|
+
if data.get("coeff", None) is not None:
|
|
940
|
+
label += f" * {data['coeff']}"
|
|
941
|
+
label += "\n"
|
|
942
|
+
labelled_arrow(ax, n1, n2, label, color, width)
|
|
943
|
+
|
|
944
|
+
# label which MPO site along the bottom
|
|
945
|
+
for i in range(G.graph["nsites"]):
|
|
946
|
+
ax.text(
|
|
947
|
+
i + 0.5,
|
|
948
|
+
-1.0,
|
|
949
|
+
"$W_{" + str(i) + "}$",
|
|
950
|
+
ha="center",
|
|
951
|
+
va="center",
|
|
952
|
+
color=(0.5, 0.5, 0.5),
|
|
953
|
+
fontsize=12,
|
|
954
|
+
)
|
|
955
|
+
|
|
956
|
+
plt.show()
|
|
957
|
+
plt.close(fig)
|
|
958
|
+
|
|
959
|
+
def build_mpo(self, method="greedy", **mpo_opts):
|
|
960
|
+
import numpy as np
|
|
961
|
+
import quimb as qu
|
|
962
|
+
import quimb.tensor as qtn
|
|
963
|
+
|
|
964
|
+
if method == "greedy":
|
|
965
|
+
G = self.build_state_machine_greedy()
|
|
966
|
+
else:
|
|
967
|
+
raise ValueError(f"Unknown method {method}.")
|
|
968
|
+
|
|
969
|
+
Wts = [
|
|
970
|
+
np.zeros((dl, dr, 2, 2), dtype=float)
|
|
971
|
+
for dl, dr in qu.utils.pairwise(G.graph["num_rails"])
|
|
972
|
+
]
|
|
973
|
+
|
|
974
|
+
for node_a, node_b, data in G.edges(data=True):
|
|
975
|
+
op = data["op"]
|
|
976
|
+
coeff = data.get("coeff", None)
|
|
977
|
+
if coeff is not None:
|
|
978
|
+
mat = coeff * get_mat(op)
|
|
979
|
+
else:
|
|
980
|
+
mat = get_mat(op)
|
|
981
|
+
|
|
982
|
+
rega, raila = node_a
|
|
983
|
+
_, railb = node_b
|
|
984
|
+
Wts[rega][raila, railb, :, :] = mat
|
|
985
|
+
|
|
986
|
+
Wts[0] = Wts[0].sum(axis=0)
|
|
987
|
+
Wts[-1] = Wts[-1].sum(axis=1)
|
|
988
|
+
|
|
989
|
+
return qtn.MatrixProductOperator(Wts, **mpo_opts)
|
|
990
|
+
|
|
991
|
+
def __repr__(self):
|
|
992
|
+
return (
|
|
993
|
+
f"{self.__class__.__name__}"
|
|
994
|
+
f"(nsites={self.nsites}, "
|
|
995
|
+
f"nterms={self.nterms}, "
|
|
996
|
+
f"locality={self.locality})"
|
|
997
|
+
")"
|
|
998
|
+
)
|
|
999
|
+
|
|
1000
|
+
|
|
1001
|
+
@njit
|
|
1002
|
+
def get_nth_bit(val, n):
|
|
1003
|
+
"""Get the nth bit of val.
|
|
1004
|
+
|
|
1005
|
+
Examples
|
|
1006
|
+
--------
|
|
1007
|
+
|
|
1008
|
+
>>> get_nth_bit(0b101, 1)
|
|
1009
|
+
0
|
|
1010
|
+
"""
|
|
1011
|
+
return (val >> n) & 1
|
|
1012
|
+
|
|
1013
|
+
|
|
1014
|
+
@njit
|
|
1015
|
+
def flip_nth_bit(val, n):
|
|
1016
|
+
"""Flip the nth bit of val.
|
|
1017
|
+
|
|
1018
|
+
Examples
|
|
1019
|
+
--------
|
|
1020
|
+
|
|
1021
|
+
>>> bin(flip_nth_bit(0b101, 1))
|
|
1022
|
+
0b111
|
|
1023
|
+
"""
|
|
1024
|
+
return val ^ (1 << n)
|
|
1025
|
+
|
|
1026
|
+
|
|
1027
|
+
@njit
|
|
1028
|
+
def comb(n, k):
|
|
1029
|
+
"""Compute the binomial coefficient n choose k."""
|
|
1030
|
+
r = 1
|
|
1031
|
+
for dk, dn in zip(range(1, k + 1), range(n, n - k, -1)):
|
|
1032
|
+
r *= dn
|
|
1033
|
+
r //= dk
|
|
1034
|
+
return r
|
|
1035
|
+
|
|
1036
|
+
|
|
1037
|
+
@njit
|
|
1038
|
+
def get_all_equal_weight_bits(n, k, dtype=np.int64):
|
|
1039
|
+
"""Get an array of all 'bits' (integers), with n bits, and k of them set."""
|
|
1040
|
+
if k == 0:
|
|
1041
|
+
return np.array([0], dtype=dtype)
|
|
1042
|
+
|
|
1043
|
+
m = comb(n, k)
|
|
1044
|
+
b = np.empty(m, dtype=dtype)
|
|
1045
|
+
i = 0
|
|
1046
|
+
val = (1 << k) - 1
|
|
1047
|
+
while val < (1 << (n)):
|
|
1048
|
+
b[i] = val
|
|
1049
|
+
c = val & -val
|
|
1050
|
+
r = val + c
|
|
1051
|
+
val = (((r ^ val) >> 2) // c) | r
|
|
1052
|
+
i += 1
|
|
1053
|
+
return b
|
|
1054
|
+
|
|
1055
|
+
|
|
1056
|
+
@njit
|
|
1057
|
+
def bit_to_rank(b, n, k):
|
|
1058
|
+
"""Given a bitstring b, return the rank of the bitstring in the
|
|
1059
|
+
basis of all bitstrings of length n with k bits set. Adapted from
|
|
1060
|
+
https://dlbeer.co.nz/articles/kwbs.html.
|
|
1061
|
+
"""
|
|
1062
|
+
c = comb(n, k)
|
|
1063
|
+
r = 0
|
|
1064
|
+
while n:
|
|
1065
|
+
c0 = c * (n - k) // n
|
|
1066
|
+
if (b >> n - 1) & 1:
|
|
1067
|
+
r += c0
|
|
1068
|
+
k -= 1
|
|
1069
|
+
c -= c0
|
|
1070
|
+
else:
|
|
1071
|
+
c = c0
|
|
1072
|
+
n -= 1
|
|
1073
|
+
return r
|
|
1074
|
+
|
|
1075
|
+
|
|
1076
|
+
@njit
|
|
1077
|
+
def rank_to_bit(r, n, k):
|
|
1078
|
+
"""Given a rank r, return the bitstring of length n with k bits set
|
|
1079
|
+
that has rank r in the basis of all bitstrings of length n with k
|
|
1080
|
+
bits set. Adapted from https://dlbeer.co.nz/articles/kwbs.html.
|
|
1081
|
+
"""
|
|
1082
|
+
b = 0
|
|
1083
|
+
c = comb(n, k)
|
|
1084
|
+
while n:
|
|
1085
|
+
c0 = c * (n - k) // n
|
|
1086
|
+
if r >= c0:
|
|
1087
|
+
b |= 1 << (n - 1)
|
|
1088
|
+
r -= c0
|
|
1089
|
+
k -= 1
|
|
1090
|
+
c -= c0
|
|
1091
|
+
else:
|
|
1092
|
+
c = c0
|
|
1093
|
+
n -= 1
|
|
1094
|
+
return b
|
|
1095
|
+
|
|
1096
|
+
|
|
1097
|
+
@njit
|
|
1098
|
+
def _recursively_fill_flatconfigs(flatconfigs, n, k, c, r):
|
|
1099
|
+
c0 = c * (n - k) // n
|
|
1100
|
+
# set the entries of the left binary subtree
|
|
1101
|
+
flatconfigs[r : r + c0, -n] = 0
|
|
1102
|
+
# set the entries of the right binary subtree
|
|
1103
|
+
flatconfigs[r + c0 : r + c, -n] = 1
|
|
1104
|
+
if n > 1:
|
|
1105
|
+
# process each subtree recursively
|
|
1106
|
+
_recursively_fill_flatconfigs(flatconfigs, n - 1, k, c0, r)
|
|
1107
|
+
_recursively_fill_flatconfigs(
|
|
1108
|
+
flatconfigs, n - 1, k - 1, c - c0, r + c0
|
|
1109
|
+
)
|
|
1110
|
+
|
|
1111
|
+
|
|
1112
|
+
@njit
|
|
1113
|
+
def get_all_equal_weight_flatconfigs(n, k):
|
|
1114
|
+
"""Get every flat configuration of length n with k bits set."""
|
|
1115
|
+
c = comb(n, k)
|
|
1116
|
+
flatconfigs = np.empty((c, n), dtype=np.uint8)
|
|
1117
|
+
_recursively_fill_flatconfigs(flatconfigs, n, k, c, 0)
|
|
1118
|
+
return flatconfigs
|
|
1119
|
+
|
|
1120
|
+
|
|
1121
|
+
@njit
|
|
1122
|
+
def flatconfig_to_bit(flatconfig):
|
|
1123
|
+
"""Given a flat configuration, return the corresponding bitstring."""
|
|
1124
|
+
b = 0
|
|
1125
|
+
for i, x in enumerate(flatconfig):
|
|
1126
|
+
if x:
|
|
1127
|
+
b |= 1 << i
|
|
1128
|
+
return b
|
|
1129
|
+
|
|
1130
|
+
|
|
1131
|
+
@njit
|
|
1132
|
+
def flatconfig_to_rank(flatconfig, n, k):
|
|
1133
|
+
"""Given a flat configuration ``flatconfig``, return the rank of the
|
|
1134
|
+
bitstring in the basis of all bitstrings of length ``n`` with ``k`` bits
|
|
1135
|
+
set. Adapted from https://dlbeer.co.nz/articles/kwbs.html.
|
|
1136
|
+
"""
|
|
1137
|
+
c = comb(n, k)
|
|
1138
|
+
r = 0
|
|
1139
|
+
while n:
|
|
1140
|
+
c0 = c * (n - k) // n
|
|
1141
|
+
if flatconfig[-n]:
|
|
1142
|
+
r += c0
|
|
1143
|
+
k -= 1
|
|
1144
|
+
c -= c0
|
|
1145
|
+
else:
|
|
1146
|
+
c = c0
|
|
1147
|
+
n -= 1
|
|
1148
|
+
return r
|
|
1149
|
+
|
|
1150
|
+
|
|
1151
|
+
@njit
|
|
1152
|
+
def rank_to_flatconfig(r, n, k):
|
|
1153
|
+
"""Given a rank ``r``, return the flat configuration of length ``n``
|
|
1154
|
+
with ``k`` bits set that has rank ``r`` in the basis of all bitstrings of
|
|
1155
|
+
length ``n`` with ``k`` bits set. Adapted from
|
|
1156
|
+
https://dlbeer.co.nz/articles/kwbs.html.
|
|
1157
|
+
"""
|
|
1158
|
+
flatconfig = np.zeros(n, dtype=np.uint8)
|
|
1159
|
+
c = comb(n, k)
|
|
1160
|
+
while n:
|
|
1161
|
+
c0 = c * (n - k) // n
|
|
1162
|
+
if r >= c0:
|
|
1163
|
+
flatconfig[-n] = 1
|
|
1164
|
+
r -= c0
|
|
1165
|
+
k -= 1
|
|
1166
|
+
c -= c0
|
|
1167
|
+
else:
|
|
1168
|
+
c = c0
|
|
1169
|
+
n -= 1
|
|
1170
|
+
return flatconfig
|
|
1171
|
+
|
|
1172
|
+
|
|
1173
|
+
@njit
|
|
1174
|
+
def product_of_bits(b1, b2, n2, dtype=np.int64):
|
|
1175
|
+
"""Get the outer product of two bit arrays.
|
|
1176
|
+
|
|
1177
|
+
Parameters
|
|
1178
|
+
----------
|
|
1179
|
+
b1, b2 : array
|
|
1180
|
+
The bit arrays to take the outer product of.
|
|
1181
|
+
n2 : int
|
|
1182
|
+
The number of bits in ``b2``
|
|
1183
|
+
"""
|
|
1184
|
+
b = np.empty(len(b1) * len(b2), dtype=dtype)
|
|
1185
|
+
i = 0
|
|
1186
|
+
for x in b1:
|
|
1187
|
+
for y in b2:
|
|
1188
|
+
b[i] = (x << n2) | y
|
|
1189
|
+
i += 1
|
|
1190
|
+
return b
|
|
1191
|
+
|
|
1192
|
+
|
|
1193
|
+
def get_number_bitbasis(*nk_pairs, dtype=np.int64):
|
|
1194
|
+
"""Create a bit basis with number conservation.
|
|
1195
|
+
|
|
1196
|
+
Parameters
|
|
1197
|
+
----------
|
|
1198
|
+
nk_pairs : sequence of (int, int)
|
|
1199
|
+
Each element is a pair (n, k) where n is the number of bits, and k is
|
|
1200
|
+
the number of bits that are set. The basis will be the product of all
|
|
1201
|
+
supplied pairs.
|
|
1202
|
+
|
|
1203
|
+
Returns
|
|
1204
|
+
-------
|
|
1205
|
+
basis : ndarray
|
|
1206
|
+
An array of integers, each representing a bit string. The size will be
|
|
1207
|
+
``prod(comb(n, k) for n, k in nk_pairs)``.
|
|
1208
|
+
|
|
1209
|
+
Examples
|
|
1210
|
+
--------
|
|
1211
|
+
|
|
1212
|
+
A single number conserving basis:
|
|
1213
|
+
|
|
1214
|
+
>>> for i, b in enumerate(get_number_bitbasis((4, 2))):
|
|
1215
|
+
>>> print(f"{i}: {b:0>4b}")
|
|
1216
|
+
0: 0011
|
|
1217
|
+
1: 0101
|
|
1218
|
+
2: 0110
|
|
1219
|
+
3: 1001
|
|
1220
|
+
4: 1010
|
|
1221
|
+
5: 1100
|
|
1222
|
+
|
|
1223
|
+
A product of two number conserving bases, e.g. n_up and n_down:
|
|
1224
|
+
|
|
1225
|
+
>>> for b in get_number_bitbasis((3, 2), (3, 1)):
|
|
1226
|
+
>>> print(f"{b:0>6b}")
|
|
1227
|
+
011001
|
|
1228
|
+
011010
|
|
1229
|
+
011100
|
|
1230
|
+
101001
|
|
1231
|
+
101010
|
|
1232
|
+
101100
|
|
1233
|
+
110001
|
|
1234
|
+
110010
|
|
1235
|
+
110100
|
|
1236
|
+
|
|
1237
|
+
"""
|
|
1238
|
+
configs = None
|
|
1239
|
+
for n, k in nk_pairs:
|
|
1240
|
+
next_configs = get_all_equal_weight_bits(n, k, dtype=dtype)
|
|
1241
|
+
if configs is None:
|
|
1242
|
+
configs = next_configs
|
|
1243
|
+
else:
|
|
1244
|
+
configs = product_of_bits(configs, next_configs, n, dtype=dtype)
|
|
1245
|
+
return configs
|
|
1246
|
+
|
|
1247
|
+
|
|
1248
|
+
@njit
|
|
1249
|
+
def build_bitmap(configs):
|
|
1250
|
+
"""Build of map of bits to linear indices, suitable for use with numba."""
|
|
1251
|
+
return {b: i for i, b in enumerate(configs)}
|
|
1252
|
+
|
|
1253
|
+
|
|
1254
|
+
def build_coupling_numba(term_store, site_to_reg):
|
|
1255
|
+
"""Create a sparse nested dictionary of how each term couples each
|
|
1256
|
+
local site configuration to which other local site configuration, and
|
|
1257
|
+
with what coefficient, suitable for use with numba.
|
|
1258
|
+
|
|
1259
|
+
Parameters
|
|
1260
|
+
----------
|
|
1261
|
+
term_store : dict[term, coeff]
|
|
1262
|
+
The terms of the operator.
|
|
1263
|
+
site_to_reg : callable
|
|
1264
|
+
A function that maps a site to a linear register index.
|
|
1265
|
+
|
|
1266
|
+
Returns
|
|
1267
|
+
-------
|
|
1268
|
+
coupling_map : numba.typed.Dict
|
|
1269
|
+
A nested numba dictionary of the form
|
|
1270
|
+
``{term: {reg: {bit_in: (bit_out, coeff), ...}, ...}, ...}``.
|
|
1271
|
+
"""
|
|
1272
|
+
from numba.core import types
|
|
1273
|
+
from numba.typed import Dict
|
|
1274
|
+
|
|
1275
|
+
ty_xj = types.Tuple((types.int64, types.float64))
|
|
1276
|
+
ty_xi = types.DictType(types.int64, ty_xj)
|
|
1277
|
+
ty_site = types.DictType(types.int64, ty_xi)
|
|
1278
|
+
coupling_map = Dict.empty(types.int64, ty_site)
|
|
1279
|
+
|
|
1280
|
+
# for term t ...
|
|
1281
|
+
for t, (term, coeff) in enumerate(term_store.items()):
|
|
1282
|
+
first = True
|
|
1283
|
+
# which couples sites with product of ops ...
|
|
1284
|
+
for op, site in term:
|
|
1285
|
+
reg = site_to_reg(site)
|
|
1286
|
+
# -> bit `xi` at `reg` is coupled to `xj` with coeff `cij`
|
|
1287
|
+
# : reg
|
|
1288
|
+
# ...10010... xi=0 ->
|
|
1289
|
+
# ...10110... xj=1 with coeff cij
|
|
1290
|
+
for xi, (xj, cij) in _OPMAP[op].items():
|
|
1291
|
+
if first:
|
|
1292
|
+
# absorb overall coefficient into first coupling
|
|
1293
|
+
cij = coeff * cij
|
|
1294
|
+
|
|
1295
|
+
# populate just the term/reg/bit maps we need
|
|
1296
|
+
coupling_map.setdefault(
|
|
1297
|
+
t, Dict.empty(types.int64, ty_xi)
|
|
1298
|
+
).setdefault(reg, Dict.empty(types.int64, ty_xj))[xi] = (
|
|
1299
|
+
xj,
|
|
1300
|
+
cij,
|
|
1301
|
+
)
|
|
1302
|
+
first = False
|
|
1303
|
+
|
|
1304
|
+
return coupling_map
|
|
1305
|
+
|
|
1306
|
+
|
|
1307
|
+
def build_coupling(term_store, site_to_reg):
|
|
1308
|
+
coupling_map = dict()
|
|
1309
|
+
# for term t ...
|
|
1310
|
+
for t, (term, coeff) in enumerate(term_store.items()):
|
|
1311
|
+
first = True
|
|
1312
|
+
# which couples sites with product of ops ...
|
|
1313
|
+
for op, site in term:
|
|
1314
|
+
reg = site_to_reg(site)
|
|
1315
|
+
# -> bit `xi` at `reg` is coupled to `xj` with coeff `cij`
|
|
1316
|
+
# : reg
|
|
1317
|
+
# ...10010... xi=0 ->
|
|
1318
|
+
# ...10110... xj=1 with coeff cij
|
|
1319
|
+
for xi, (xj, cij) in _OPMAP[op].items():
|
|
1320
|
+
if first:
|
|
1321
|
+
# absorb overall coefficient into first coupling
|
|
1322
|
+
cij = coeff * cij
|
|
1323
|
+
|
|
1324
|
+
# populate just the term/reg/bit maps we need
|
|
1325
|
+
coupling_map.setdefault(t, {}).setdefault(reg, {})[xi] = (
|
|
1326
|
+
xj,
|
|
1327
|
+
cij,
|
|
1328
|
+
)
|
|
1329
|
+
first = False
|
|
1330
|
+
|
|
1331
|
+
return coupling_map
|
|
1332
|
+
|
|
1333
|
+
|
|
1334
|
+
@njit(nogil=True)
|
|
1335
|
+
def coupled_flatconfigs_numba(flatconfig, coupling_map):
|
|
1336
|
+
"""Get the coupled flat configurations for a given flat configuration
|
|
1337
|
+
and coupling map.
|
|
1338
|
+
|
|
1339
|
+
Parameters
|
|
1340
|
+
----------
|
|
1341
|
+
flatconfig : ndarray[uint8]
|
|
1342
|
+
The flat configuration to get the coupled configurations for.
|
|
1343
|
+
coupling_map : numba.typed.Dict
|
|
1344
|
+
A nested numba dictionary of the form
|
|
1345
|
+
``{term: {reg: {bit_in: (bit_out, coeff), ...}, ...}, ...}``.
|
|
1346
|
+
|
|
1347
|
+
Returns
|
|
1348
|
+
-------
|
|
1349
|
+
coupled_flatconfigs : ndarray[uint8]
|
|
1350
|
+
A list of coupled flat configurations, each with the corresponding
|
|
1351
|
+
coefficient.
|
|
1352
|
+
coeffs : ndarray[float64]
|
|
1353
|
+
The coefficients for each coupled flat configuration.
|
|
1354
|
+
"""
|
|
1355
|
+
buf_ptr = 0
|
|
1356
|
+
bjs = np.empty((len(coupling_map), flatconfig.size), dtype=np.uint8)
|
|
1357
|
+
cijs = np.empty(len(coupling_map), dtype=np.float64)
|
|
1358
|
+
for coupling_t in coupling_map.values():
|
|
1359
|
+
cij = 1.0
|
|
1360
|
+
bj = flatconfig.copy()
|
|
1361
|
+
# bjs[buf_ptr, :] = flatconfig
|
|
1362
|
+
for reg, coupling_t_reg in coupling_t.items():
|
|
1363
|
+
xi = flatconfig[reg]
|
|
1364
|
+
if xi not in coupling_t_reg:
|
|
1365
|
+
# zero coupling - whole branch dead
|
|
1366
|
+
break
|
|
1367
|
+
# update coeff and config
|
|
1368
|
+
xj, cij = coupling_t_reg[xi]
|
|
1369
|
+
cij *= cij
|
|
1370
|
+
if xi != xj:
|
|
1371
|
+
bj[reg] = xj
|
|
1372
|
+
else:
|
|
1373
|
+
# no break - all terms survived
|
|
1374
|
+
bjs[buf_ptr, :] = bj
|
|
1375
|
+
cijs[buf_ptr] = cij
|
|
1376
|
+
buf_ptr += 1
|
|
1377
|
+
return bjs[:buf_ptr], cijs[:buf_ptr]
|
|
1378
|
+
|
|
1379
|
+
|
|
1380
|
+
@njit(nogil=True)
|
|
1381
|
+
def coupled_bits_numba(bi, coupling_map):
|
|
1382
|
+
buf_ptr = 0
|
|
1383
|
+
bjs = np.empty(len(coupling_map), dtype=np.int64)
|
|
1384
|
+
cijs = np.empty(len(coupling_map), dtype=np.float64)
|
|
1385
|
+
bitmap = {}
|
|
1386
|
+
|
|
1387
|
+
for coupling_t in coupling_map.values():
|
|
1388
|
+
cij = 1.0
|
|
1389
|
+
bj = bi
|
|
1390
|
+
for reg, coupling_t_reg in coupling_t.items():
|
|
1391
|
+
xi = get_nth_bit(bi, reg)
|
|
1392
|
+
if xi not in coupling_t_reg:
|
|
1393
|
+
# zero coupling - whole branch dead
|
|
1394
|
+
break
|
|
1395
|
+
# update coeff and config
|
|
1396
|
+
xj, cij = coupling_t_reg[xi]
|
|
1397
|
+
cij *= cij
|
|
1398
|
+
if xi != xj:
|
|
1399
|
+
bj = flip_nth_bit(bj, reg)
|
|
1400
|
+
else:
|
|
1401
|
+
# no break - all terms survived
|
|
1402
|
+
if bj in bitmap:
|
|
1403
|
+
# already seed this config - just add coeff
|
|
1404
|
+
loc = bitmap[bj]
|
|
1405
|
+
cijs[loc] += cij
|
|
1406
|
+
else:
|
|
1407
|
+
# TODO: check performance of numba exception catching
|
|
1408
|
+
bjs[buf_ptr] = bj
|
|
1409
|
+
cijs[buf_ptr] = cij
|
|
1410
|
+
bitmap[bj] = buf_ptr
|
|
1411
|
+
buf_ptr += 1
|
|
1412
|
+
|
|
1413
|
+
return bjs[:buf_ptr], cijs[:buf_ptr]
|
|
1414
|
+
|
|
1415
|
+
|
|
1416
|
+
@njit(nogil=True)
|
|
1417
|
+
def _build_coo_numba_core(bits, coupling_map, bitmap=None):
|
|
1418
|
+
# the bit map is needed if we only have a partial set of `bits`, which
|
|
1419
|
+
# might couple to other bits that are not in `bits` -> we need to look up
|
|
1420
|
+
# the linear register of the coupled bit
|
|
1421
|
+
if bitmap is None:
|
|
1422
|
+
bitmap = {bi: ci for ci, bi in enumerate(bits)}
|
|
1423
|
+
|
|
1424
|
+
buf_size = len(bits)
|
|
1425
|
+
data = np.empty(buf_size, dtype=np.float64)
|
|
1426
|
+
cis = np.empty(buf_size, dtype=np.int64)
|
|
1427
|
+
cjs = np.empty(buf_size, dtype=np.int64)
|
|
1428
|
+
buf_ptr = 0
|
|
1429
|
+
|
|
1430
|
+
for bi in bits:
|
|
1431
|
+
ci = bitmap[bi]
|
|
1432
|
+
for coupling_t in coupling_map.values():
|
|
1433
|
+
hij = 1.0
|
|
1434
|
+
bj = bi
|
|
1435
|
+
for reg, coupling_t_reg in coupling_t.items():
|
|
1436
|
+
xi = get_nth_bit(bi, reg)
|
|
1437
|
+
if xi not in coupling_t_reg:
|
|
1438
|
+
# zero coupling - whole branch dead
|
|
1439
|
+
break
|
|
1440
|
+
# update coeff and config
|
|
1441
|
+
xj, cij = coupling_t_reg[xi]
|
|
1442
|
+
hij *= cij
|
|
1443
|
+
if xi != xj:
|
|
1444
|
+
bj = flip_nth_bit(bj, reg)
|
|
1445
|
+
else:
|
|
1446
|
+
# didn't break out of loop
|
|
1447
|
+
if buf_ptr >= buf_size:
|
|
1448
|
+
# need to double our storage
|
|
1449
|
+
data = np.concatenate((data, np.empty_like(data)))
|
|
1450
|
+
cis = np.concatenate((cis, np.empty_like(cis)))
|
|
1451
|
+
cjs = np.concatenate((cjs, np.empty_like(cjs)))
|
|
1452
|
+
buf_size *= 2
|
|
1453
|
+
data[buf_ptr] = hij
|
|
1454
|
+
cis[buf_ptr] = ci
|
|
1455
|
+
cjs[buf_ptr] = bitmap[bj]
|
|
1456
|
+
buf_ptr += 1
|
|
1457
|
+
|
|
1458
|
+
return data[:buf_ptr], cis[:buf_ptr], cjs[:buf_ptr]
|
|
1459
|
+
|
|
1460
|
+
|
|
1461
|
+
def build_coo_numba(bits, coupling_map, parallel=False):
|
|
1462
|
+
"""Build an operator in COO form, using the basis ``bits`` and the
|
|
1463
|
+
``coupling_map``, optionally multithreaded.
|
|
1464
|
+
|
|
1465
|
+
Parameters
|
|
1466
|
+
----------
|
|
1467
|
+
bits : array
|
|
1468
|
+
An array of integers, each representing a bit string.
|
|
1469
|
+
coupling_map : Dict[int, Dict[int, Dict[int, Tuple[int, float]]]]
|
|
1470
|
+
A nested numba dictionary of couplings. The outermost key is the term
|
|
1471
|
+
index, the next key is the register, and the innermost key is the bit
|
|
1472
|
+
index. The value is a tuple of (coupled bit index, coupling
|
|
1473
|
+
coefficient).
|
|
1474
|
+
parallel : bool or int, optional
|
|
1475
|
+
Whether to parallelize the computation. If an integer is given, it
|
|
1476
|
+
specifies the number of threads to use.
|
|
1477
|
+
|
|
1478
|
+
Returns
|
|
1479
|
+
-------
|
|
1480
|
+
data : array
|
|
1481
|
+
The non-zero elements of the operator.
|
|
1482
|
+
cis : array
|
|
1483
|
+
The row indices of the non-zero elements.
|
|
1484
|
+
cjs : array
|
|
1485
|
+
The column indices of the non-zero elements.
|
|
1486
|
+
"""
|
|
1487
|
+
if not parallel:
|
|
1488
|
+
return _build_coo_numba_core(bits, coupling_map)
|
|
1489
|
+
|
|
1490
|
+
from quimb import get_thread_pool
|
|
1491
|
+
|
|
1492
|
+
if parallel is True:
|
|
1493
|
+
n_thread_workers = None
|
|
1494
|
+
elif isinstance(parallel, int):
|
|
1495
|
+
n_thread_workers = parallel
|
|
1496
|
+
else:
|
|
1497
|
+
raise ValueError(f"Unknown parallel option {parallel}.")
|
|
1498
|
+
|
|
1499
|
+
pool = get_thread_pool(n_thread_workers)
|
|
1500
|
+
n_thread_workers = pool._max_workers
|
|
1501
|
+
|
|
1502
|
+
# need a global mapping of bits to linear indices
|
|
1503
|
+
kws = dict(coupling_map=coupling_map, bitmap=build_bitmap(bits))
|
|
1504
|
+
|
|
1505
|
+
# launch the threads! note we distribtue in cyclic fashion as the sparsity
|
|
1506
|
+
# can be concentrated in certain ranges and we want each thread to have
|
|
1507
|
+
# roughly the same amount of work to do
|
|
1508
|
+
fs = [
|
|
1509
|
+
pool.submit(
|
|
1510
|
+
_build_coo_numba_core, bits=bits[i::n_thread_workers], **kws
|
|
1511
|
+
)
|
|
1512
|
+
for i in range(n_thread_workers)
|
|
1513
|
+
]
|
|
1514
|
+
|
|
1515
|
+
# gather and concatenate the results (probably some memory overhead here)
|
|
1516
|
+
data = []
|
|
1517
|
+
cis = []
|
|
1518
|
+
cjs = []
|
|
1519
|
+
for f in fs:
|
|
1520
|
+
d, ci, cj = f.result()
|
|
1521
|
+
data.append(d)
|
|
1522
|
+
cis.append(ci)
|
|
1523
|
+
cjs.append(cj)
|
|
1524
|
+
|
|
1525
|
+
data = np.concatenate(data)
|
|
1526
|
+
cis = np.concatenate(cis)
|
|
1527
|
+
cjs = np.concatenate(cjs)
|
|
1528
|
+
|
|
1529
|
+
return data, cis, cjs
|
|
1530
|
+
|
|
1531
|
+
|
|
1532
|
+
# -------------------------- specific hamiltonians -------------------------- #
|
|
1533
|
+
|
|
1534
|
+
|
|
1535
|
+
def fermi_hubbard_from_edges(edges, t=1.0, U=1.0, mu=0.0):
|
|
1536
|
+
""" """
|
|
1537
|
+
H = SparseOperatorBuilder()
|
|
1538
|
+
sites, edges = parse_edges_to_unique(edges)
|
|
1539
|
+
|
|
1540
|
+
if t != 0.0:
|
|
1541
|
+
for cooa, coob in edges:
|
|
1542
|
+
# hopping
|
|
1543
|
+
for s in "↑↓":
|
|
1544
|
+
H += -t, ("+", (s, cooa)), ("-", (s, coob))
|
|
1545
|
+
H += -t, ("+", (s, coob)), ("-", (s, cooa))
|
|
1546
|
+
|
|
1547
|
+
for coo in sites:
|
|
1548
|
+
# interaction
|
|
1549
|
+
H += U, ("n", ("↑", coo)), ("n", ("↓", coo))
|
|
1550
|
+
|
|
1551
|
+
# chemical potential
|
|
1552
|
+
H += -mu, ("n", ("↑", coo))
|
|
1553
|
+
H += -mu, ("n", ("↓", coo))
|
|
1554
|
+
|
|
1555
|
+
H.jordan_wigner_transform()
|
|
1556
|
+
return H
|
|
1557
|
+
|
|
1558
|
+
|
|
1559
|
+
def fermi_hubbard_spinless_from_edges(edges, t=1.0, mu=0.0):
|
|
1560
|
+
""" """
|
|
1561
|
+
H = SparseOperatorBuilder()
|
|
1562
|
+
sites, edges = parse_edges_to_unique(edges)
|
|
1563
|
+
|
|
1564
|
+
for cooa, coob in edges:
|
|
1565
|
+
# hopping
|
|
1566
|
+
H += -t, ("+", cooa), ("-", coob)
|
|
1567
|
+
H += -t, ("+", coob), ("-", cooa)
|
|
1568
|
+
|
|
1569
|
+
# chemical potential
|
|
1570
|
+
for coo in sites:
|
|
1571
|
+
H += -mu, ("n", coo)
|
|
1572
|
+
|
|
1573
|
+
H.jordan_wigner_transform()
|
|
1574
|
+
return H
|
|
1575
|
+
|
|
1576
|
+
|
|
1577
|
+
def heisenberg_from_edges(edges, j=1.0, b=0.0, hilbert_space=None):
|
|
1578
|
+
"""Create a Heisenberg Hamiltonian on the graph defined by ``edges``.
|
|
1579
|
+
|
|
1580
|
+
Parameters
|
|
1581
|
+
----------
|
|
1582
|
+
edges : Iterable[tuple[hashable, hashable]]
|
|
1583
|
+
The edges, as pairs of hashable 'sites', that define the graph.
|
|
1584
|
+
Multiple edges are allowed, and will be treated as a single edge.
|
|
1585
|
+
j : float or tuple[float, float, float], optional
|
|
1586
|
+
The Heisenberg exchange coupling constant(s). If a single float is
|
|
1587
|
+
given, it is used for all three terms. If a tuple of three floats is
|
|
1588
|
+
given, they are used for the xx, yy, and zz terms respectively. Note
|
|
1589
|
+
that positive values of ``j`` correspond to antiferromagnetic coupling.
|
|
1590
|
+
b : float or tuple[float, float, float], optional
|
|
1591
|
+
The magnetic field strength(s). If a single float is given, it is used
|
|
1592
|
+
taken as a z-field. If a tuple of three floats is given, they are used
|
|
1593
|
+
for the x, y, and z fields respectively.
|
|
1594
|
+
hilbert_space : HilbertSpace, optional
|
|
1595
|
+
The Hilbert space to use. If not given, one will be constructed
|
|
1596
|
+
automatically from the edges.
|
|
1597
|
+
|
|
1598
|
+
Returns
|
|
1599
|
+
-------
|
|
1600
|
+
H : SparseOperatorBuilder
|
|
1601
|
+
"""
|
|
1602
|
+
try:
|
|
1603
|
+
jx, jy, jz = j
|
|
1604
|
+
except TypeError:
|
|
1605
|
+
jx, jy, jz = j, j, j
|
|
1606
|
+
|
|
1607
|
+
try:
|
|
1608
|
+
bx, by, bz = b
|
|
1609
|
+
except TypeError:
|
|
1610
|
+
bx, by, bz = 0, 0, b
|
|
1611
|
+
|
|
1612
|
+
H = SparseOperatorBuilder(hilbert_space=hilbert_space)
|
|
1613
|
+
sites, edges = parse_edges_to_unique(edges)
|
|
1614
|
+
|
|
1615
|
+
for cooa, coob in edges:
|
|
1616
|
+
if jx == jy:
|
|
1617
|
+
# keep things real
|
|
1618
|
+
H += jx / 2, ("+", cooa), ("-", coob)
|
|
1619
|
+
H += jx / 2, ("-", cooa), ("+", coob)
|
|
1620
|
+
else:
|
|
1621
|
+
H += jx, ("sx", cooa), ("sx", coob)
|
|
1622
|
+
H += jy, ("sy", cooa), ("sy", coob)
|
|
1623
|
+
|
|
1624
|
+
H += jz, ("sz", cooa), ("sz", coob)
|
|
1625
|
+
|
|
1626
|
+
for site in sites:
|
|
1627
|
+
H += bx, ("sx", site)
|
|
1628
|
+
H += by, ("sy", site)
|
|
1629
|
+
H += bz, ("sz", site)
|
|
1630
|
+
|
|
1631
|
+
return H
|