Trajectree 0.0.1__py3-none-any.whl → 0.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (124) hide show
  1. trajectree/__init__.py +0 -3
  2. trajectree/fock_optics/devices.py +1 -1
  3. trajectree/fock_optics/light_sources.py +2 -2
  4. trajectree/fock_optics/measurement.py +9 -9
  5. trajectree/fock_optics/outputs.py +10 -6
  6. trajectree/fock_optics/utils.py +9 -6
  7. trajectree/sequence/swap.py +5 -4
  8. trajectree/trajectory.py +5 -4
  9. {trajectree-0.0.1.dist-info → trajectree-0.0.3.dist-info}/METADATA +2 -3
  10. trajectree-0.0.3.dist-info/RECORD +16 -0
  11. trajectree/quimb/docs/_pygments/_pygments_dark.py +0 -118
  12. trajectree/quimb/docs/_pygments/_pygments_light.py +0 -118
  13. trajectree/quimb/docs/conf.py +0 -158
  14. trajectree/quimb/docs/examples/ex_mpi_expm_evo.py +0 -62
  15. trajectree/quimb/quimb/__init__.py +0 -507
  16. trajectree/quimb/quimb/calc.py +0 -1491
  17. trajectree/quimb/quimb/core.py +0 -2279
  18. trajectree/quimb/quimb/evo.py +0 -712
  19. trajectree/quimb/quimb/experimental/__init__.py +0 -0
  20. trajectree/quimb/quimb/experimental/autojittn.py +0 -129
  21. trajectree/quimb/quimb/experimental/belief_propagation/__init__.py +0 -109
  22. trajectree/quimb/quimb/experimental/belief_propagation/bp_common.py +0 -397
  23. trajectree/quimb/quimb/experimental/belief_propagation/d1bp.py +0 -316
  24. trajectree/quimb/quimb/experimental/belief_propagation/d2bp.py +0 -653
  25. trajectree/quimb/quimb/experimental/belief_propagation/hd1bp.py +0 -571
  26. trajectree/quimb/quimb/experimental/belief_propagation/hv1bp.py +0 -775
  27. trajectree/quimb/quimb/experimental/belief_propagation/l1bp.py +0 -316
  28. trajectree/quimb/quimb/experimental/belief_propagation/l2bp.py +0 -537
  29. trajectree/quimb/quimb/experimental/belief_propagation/regions.py +0 -194
  30. trajectree/quimb/quimb/experimental/cluster_update.py +0 -286
  31. trajectree/quimb/quimb/experimental/merabuilder.py +0 -865
  32. trajectree/quimb/quimb/experimental/operatorbuilder/__init__.py +0 -15
  33. trajectree/quimb/quimb/experimental/operatorbuilder/operatorbuilder.py +0 -1631
  34. trajectree/quimb/quimb/experimental/schematic.py +0 -7
  35. trajectree/quimb/quimb/experimental/tn_marginals.py +0 -130
  36. trajectree/quimb/quimb/experimental/tnvmc.py +0 -1483
  37. trajectree/quimb/quimb/gates.py +0 -36
  38. trajectree/quimb/quimb/gen/__init__.py +0 -2
  39. trajectree/quimb/quimb/gen/operators.py +0 -1167
  40. trajectree/quimb/quimb/gen/rand.py +0 -713
  41. trajectree/quimb/quimb/gen/states.py +0 -479
  42. trajectree/quimb/quimb/linalg/__init__.py +0 -6
  43. trajectree/quimb/quimb/linalg/approx_spectral.py +0 -1109
  44. trajectree/quimb/quimb/linalg/autoblock.py +0 -258
  45. trajectree/quimb/quimb/linalg/base_linalg.py +0 -719
  46. trajectree/quimb/quimb/linalg/mpi_launcher.py +0 -397
  47. trajectree/quimb/quimb/linalg/numpy_linalg.py +0 -244
  48. trajectree/quimb/quimb/linalg/rand_linalg.py +0 -514
  49. trajectree/quimb/quimb/linalg/scipy_linalg.py +0 -293
  50. trajectree/quimb/quimb/linalg/slepc_linalg.py +0 -892
  51. trajectree/quimb/quimb/schematic.py +0 -1518
  52. trajectree/quimb/quimb/tensor/__init__.py +0 -401
  53. trajectree/quimb/quimb/tensor/array_ops.py +0 -610
  54. trajectree/quimb/quimb/tensor/circuit.py +0 -4824
  55. trajectree/quimb/quimb/tensor/circuit_gen.py +0 -411
  56. trajectree/quimb/quimb/tensor/contraction.py +0 -336
  57. trajectree/quimb/quimb/tensor/decomp.py +0 -1255
  58. trajectree/quimb/quimb/tensor/drawing.py +0 -1646
  59. trajectree/quimb/quimb/tensor/fitting.py +0 -385
  60. trajectree/quimb/quimb/tensor/geometry.py +0 -583
  61. trajectree/quimb/quimb/tensor/interface.py +0 -114
  62. trajectree/quimb/quimb/tensor/networking.py +0 -1058
  63. trajectree/quimb/quimb/tensor/optimize.py +0 -1818
  64. trajectree/quimb/quimb/tensor/tensor_1d.py +0 -4778
  65. trajectree/quimb/quimb/tensor/tensor_1d_compress.py +0 -1854
  66. trajectree/quimb/quimb/tensor/tensor_1d_tebd.py +0 -662
  67. trajectree/quimb/quimb/tensor/tensor_2d.py +0 -5954
  68. trajectree/quimb/quimb/tensor/tensor_2d_compress.py +0 -96
  69. trajectree/quimb/quimb/tensor/tensor_2d_tebd.py +0 -1230
  70. trajectree/quimb/quimb/tensor/tensor_3d.py +0 -2869
  71. trajectree/quimb/quimb/tensor/tensor_3d_tebd.py +0 -46
  72. trajectree/quimb/quimb/tensor/tensor_approx_spectral.py +0 -60
  73. trajectree/quimb/quimb/tensor/tensor_arbgeom.py +0 -3237
  74. trajectree/quimb/quimb/tensor/tensor_arbgeom_compress.py +0 -565
  75. trajectree/quimb/quimb/tensor/tensor_arbgeom_tebd.py +0 -1138
  76. trajectree/quimb/quimb/tensor/tensor_builder.py +0 -5411
  77. trajectree/quimb/quimb/tensor/tensor_core.py +0 -11179
  78. trajectree/quimb/quimb/tensor/tensor_dmrg.py +0 -1472
  79. trajectree/quimb/quimb/tensor/tensor_mera.py +0 -204
  80. trajectree/quimb/quimb/utils.py +0 -892
  81. trajectree/quimb/tests/__init__.py +0 -0
  82. trajectree/quimb/tests/test_accel.py +0 -501
  83. trajectree/quimb/tests/test_calc.py +0 -788
  84. trajectree/quimb/tests/test_core.py +0 -847
  85. trajectree/quimb/tests/test_evo.py +0 -565
  86. trajectree/quimb/tests/test_gen/__init__.py +0 -0
  87. trajectree/quimb/tests/test_gen/test_operators.py +0 -361
  88. trajectree/quimb/tests/test_gen/test_rand.py +0 -296
  89. trajectree/quimb/tests/test_gen/test_states.py +0 -261
  90. trajectree/quimb/tests/test_linalg/__init__.py +0 -0
  91. trajectree/quimb/tests/test_linalg/test_approx_spectral.py +0 -368
  92. trajectree/quimb/tests/test_linalg/test_base_linalg.py +0 -351
  93. trajectree/quimb/tests/test_linalg/test_mpi_linalg.py +0 -127
  94. trajectree/quimb/tests/test_linalg/test_numpy_linalg.py +0 -84
  95. trajectree/quimb/tests/test_linalg/test_rand_linalg.py +0 -134
  96. trajectree/quimb/tests/test_linalg/test_slepc_linalg.py +0 -283
  97. trajectree/quimb/tests/test_tensor/__init__.py +0 -0
  98. trajectree/quimb/tests/test_tensor/test_belief_propagation/__init__.py +0 -0
  99. trajectree/quimb/tests/test_tensor/test_belief_propagation/test_d1bp.py +0 -39
  100. trajectree/quimb/tests/test_tensor/test_belief_propagation/test_d2bp.py +0 -67
  101. trajectree/quimb/tests/test_tensor/test_belief_propagation/test_hd1bp.py +0 -64
  102. trajectree/quimb/tests/test_tensor/test_belief_propagation/test_hv1bp.py +0 -51
  103. trajectree/quimb/tests/test_tensor/test_belief_propagation/test_l1bp.py +0 -142
  104. trajectree/quimb/tests/test_tensor/test_belief_propagation/test_l2bp.py +0 -101
  105. trajectree/quimb/tests/test_tensor/test_circuit.py +0 -816
  106. trajectree/quimb/tests/test_tensor/test_contract.py +0 -67
  107. trajectree/quimb/tests/test_tensor/test_decomp.py +0 -40
  108. trajectree/quimb/tests/test_tensor/test_mera.py +0 -52
  109. trajectree/quimb/tests/test_tensor/test_optimizers.py +0 -488
  110. trajectree/quimb/tests/test_tensor/test_tensor_1d.py +0 -1171
  111. trajectree/quimb/tests/test_tensor/test_tensor_2d.py +0 -606
  112. trajectree/quimb/tests/test_tensor/test_tensor_2d_tebd.py +0 -144
  113. trajectree/quimb/tests/test_tensor/test_tensor_3d.py +0 -123
  114. trajectree/quimb/tests/test_tensor/test_tensor_arbgeom.py +0 -226
  115. trajectree/quimb/tests/test_tensor/test_tensor_builder.py +0 -441
  116. trajectree/quimb/tests/test_tensor/test_tensor_core.py +0 -2066
  117. trajectree/quimb/tests/test_tensor/test_tensor_dmrg.py +0 -388
  118. trajectree/quimb/tests/test_tensor/test_tensor_spectral_approx.py +0 -63
  119. trajectree/quimb/tests/test_tensor/test_tensor_tebd.py +0 -270
  120. trajectree/quimb/tests/test_utils.py +0 -85
  121. trajectree-0.0.1.dist-info/RECORD +0 -126
  122. {trajectree-0.0.1.dist-info → trajectree-0.0.3.dist-info}/WHEEL +0 -0
  123. {trajectree-0.0.1.dist-info → trajectree-0.0.3.dist-info}/licenses/LICENSE +0 -0
  124. {trajectree-0.0.1.dist-info → trajectree-0.0.3.dist-info}/top_level.txt +0 -0
@@ -1,316 +0,0 @@
1
- """Belief propagation for standard tensor networks. This:
2
-
3
- - assumes no hyper indices, only standard bonds.
4
- - assumes a single ('dense') tensor per site
5
- - works directly on the '1-norm' i.e. scalar tensor network
6
-
7
- This is the simplest version of belief propagation, and is useful for
8
- simple investigations.
9
- """
10
-
11
- import autoray as ar
12
-
13
- from quimb.tensor.contraction import array_contract
14
- from quimb.utils import oset
15
-
16
- from .bp_common import (
17
- BeliefPropagationCommon,
18
- combine_local_contractions,
19
- )
20
- from .hd1bp import (
21
- compute_all_tensor_messages_tree,
22
- )
23
-
24
-
25
- def initialize_messages(tn, fill_fn=None):
26
-
27
- backend = ar.infer_backend(next(t.data for t in tn))
28
- _sum = ar.get_lib_fn(backend, "sum")
29
-
30
- messages = {}
31
- for ix, tids in tn.ind_map.items():
32
- if len(tids) != 2:
33
- continue
34
- tida, tidb = tids
35
-
36
- for tid_from, tid_to in [(tida, tidb), (tidb, tida)]:
37
- t_from = tn.tensor_map[tid_from]
38
- if fill_fn is not None:
39
- d = t_from.ind_size(ix)
40
- m = fill_fn((d,))
41
- else:
42
- m = array_contract(
43
- arrays=(t_from.data,),
44
- inputs=(tuple(range(t_from.ndim)),),
45
- output=(t_from.inds.index(ix),),
46
- )
47
- messages[ix, tid_to] = m / _sum(m)
48
-
49
- return messages
50
-
51
-
52
- class D1BP(BeliefPropagationCommon):
53
- """Dense (as in one tensor per site) 1-norm (as in for 'classical' systems)
54
- belief propagation algorithm. Allows message reuse. This version assumes no
55
- hyper indices (i.e. a standard tensor network). This is the simplest
56
- version of belief propagation.
57
-
58
- Parameters
59
- ----------
60
- tn : TensorNetwork
61
- The tensor network to run BP on.
62
- messages : dict[(str, int), array_like], optional
63
- The initial messages to use, effectively defaults to all ones if not
64
- specified.
65
- damping : float, optional
66
- The damping factor to use, 0.0 means no damping.
67
- update : {'sequential', 'parallel'}, optional
68
- Whether to update messages sequentially or in parallel.
69
- local_convergence : bool, optional
70
- Whether to allow messages to locally converge - i.e. if all their
71
- input messages have converged then stop updating them.
72
- fill_fn : callable, optional
73
- If specified, use this function to fill in the initial messages.
74
-
75
- Attributes
76
- ----------
77
- tn : TensorNetwork
78
- The target tensor network.
79
- messages : dict[(str, int), array_like]
80
- The current messages. The key is a tuple of the index and tensor id
81
- that the message is being sent to.
82
- key_pairs : dict[(str, int), (str, int)]
83
- A dictionary mapping the key of a message to the key of the message
84
- propagating in the opposite direction.
85
- """
86
-
87
- def __init__(
88
- self,
89
- tn,
90
- messages=None,
91
- damping=0.0,
92
- update="sequential",
93
- local_convergence=True,
94
- message_init_function=None,
95
- ):
96
- self.tn = tn
97
- self.damping = damping
98
- self.local_convergence = local_convergence
99
- self.update = update
100
-
101
- self.backend = next(t.backend for t in tn)
102
- _abs = ar.get_lib_fn(self.backend, "abs")
103
- _sum = ar.get_lib_fn(self.backend, "sum")
104
-
105
- def _normalize(x):
106
- return x / _sum(x)
107
-
108
- def _distance(x, y):
109
- return _sum(_abs(x - y))
110
-
111
- self._normalize = _normalize
112
- self._distance = _distance
113
-
114
- if messages is None:
115
- self.messages = initialize_messages(self.tn, message_init_function)
116
- else:
117
- self.messages = messages
118
-
119
- # record which messages touch which tids, for efficient updates
120
- self.touched = oset()
121
- self.key_pairs = {}
122
- for ix, tids in tn.ind_map.items():
123
- if len(tids) != 2:
124
- continue
125
- tida, tidb = tids
126
- self.key_pairs[ix, tidb] = (ix, tida)
127
- self.key_pairs[ix, tida] = (ix, tidb)
128
-
129
- def iterate(self, tol=5e-6):
130
- if (not self.local_convergence) or (not self.touched):
131
- # assume if asked to iterate that we want to check all messages
132
- self.touched = oset(self.tn.tensor_map)
133
-
134
- ncheck = len(self.touched)
135
- nconv = 0
136
- max_mdiff = -1.0
137
- new_touched = oset()
138
-
139
- def _compute_ms(tid):
140
- t = self.tn.tensor_map[tid]
141
- new_ms = compute_all_tensor_messages_tree(
142
- t.data,
143
- [self.messages[ix, tid] for ix in t.inds],
144
- self.backend,
145
- )
146
- new_ms = [self._normalize(m) for m in new_ms]
147
- new_ks = [self.key_pairs[ix, tid] for ix in t.inds]
148
-
149
- return new_ks, new_ms
150
-
151
- def _update_m(key, data):
152
- nonlocal nconv, max_mdiff
153
-
154
- m = self.messages[key]
155
- if self.damping != 0.0:
156
- data = (1 - self.damping) * data + self.damping * m
157
-
158
- mdiff = float(self._distance(m, data))
159
- if mdiff > tol:
160
- # mark distination tid for update
161
- new_touched.add(key[1])
162
- else:
163
- nconv += 1
164
-
165
- max_mdiff = max(max_mdiff, mdiff)
166
- self.messages[key] = data
167
-
168
- if self.update == "sequential":
169
- # compute each new message and immediately re-insert it
170
- while self.touched:
171
- tid = self.touched.pop()
172
- keys, new_ms = _compute_ms(tid)
173
- for key, data in zip(keys, new_ms):
174
- _update_m(key, data)
175
-
176
- elif self.update == "parallel":
177
- new_data = {}
178
- # compute all new messages
179
- while self.touched:
180
- tid = self.touched.pop()
181
- keys, new_ms = _compute_ms(tid)
182
- for key, data in zip(keys, new_ms):
183
- new_data[key] = data
184
- # insert all new messages
185
- for key, data in new_data.items():
186
- _update_m(key, data)
187
-
188
- self.touched = new_touched
189
- return nconv, ncheck, max_mdiff
190
-
191
- def normalize_messages(self):
192
- """Normalize all messages such that for each bond `<m_i|m_j> = 1` and
193
- `<m_i|m_i> = <m_j|m_j>` (but in general != 1).
194
- """
195
- for ix, tids in self.tn.ind_map.items():
196
- if len(tids) != 2:
197
- continue
198
- tida, tidb = tids
199
- mi = self.messages[ix, tida]
200
- mj = self.messages[ix, tidb]
201
- nij = abs(mi @ mj)**0.5
202
- nii = (mi @ mi)**0.25
203
- njj = (mj @ mj)**0.25
204
- self.messages[ix, tida] = mi / (nij * nii / njj)
205
- self.messages[ix, tidb] = mj / (nij * njj / nii)
206
-
207
- def get_gauged_tn(self):
208
- """Gauge the original TN by inserting the BP-approximated transfer
209
- matrix eigenvectors, which may be complex. The BP-contraction of this
210
- gauged network is then simply the product of zeroth entries of each
211
- tensor.
212
- """
213
- tng = self.tn.copy()
214
- for ind, tids in self.tn.ind_map.items():
215
- tida, tidb = tids
216
- ka = (ind, tida)
217
- kb = (ind, tidb)
218
- ma = self.messages[ka]
219
- mb = self.messages[kb]
220
-
221
- el, ev = ar.do('linalg.eig', ar.do('outer', ma, mb))
222
- k = ar.do('argsort', -ar.do('abs', el))
223
- ev = ev[:, k]
224
- Uinv = ev
225
- U = ar.do('linalg.inv', ev)
226
- tng._insert_gauge_tids(U, tida, tidb, Uinv)
227
- return tng
228
-
229
- def contract(self, strip_exponent=False):
230
- tvals = []
231
- for tid, t in self.tn.tensor_map.items():
232
- arrays = [t.data]
233
- inputs = [tuple(range(t.ndim))]
234
- for i, ix in enumerate(t.inds):
235
- m = self.messages[ix, tid]
236
- arrays.append(m)
237
- inputs.append((i,))
238
- tvals.append(
239
- array_contract(
240
- arrays=arrays,
241
- inputs=inputs,
242
- output=(),
243
- )
244
- )
245
-
246
- mvals = []
247
- for ix, tids in self.tn.ind_map.items():
248
- if len(tids) != 2:
249
- continue
250
- tida, tidb = tids
251
- mvals.append(
252
- self.messages[ix, tida] @ self.messages[ix, tidb]
253
- )
254
-
255
- return combine_local_contractions(
256
- tvals, mvals, self.backend, strip_exponent=strip_exponent
257
- )
258
-
259
-
260
-
261
- def contract_d1bp(
262
- tn,
263
- max_iterations=1000,
264
- tol=5e-6,
265
- damping=0.0,
266
- update="sequential",
267
- local_convergence=True,
268
- strip_exponent=False,
269
- info=None,
270
- progbar=False,
271
- **contract_opts,
272
- ):
273
- """Estimate the contraction of standard tensor network ``tn`` using dense
274
- 1-norm belief propagation.
275
-
276
- Parameters
277
- ----------
278
- tn : TensorNetwork
279
- The tensor network to contract, it should have no dangling or hyper
280
- indices.
281
- max_iterations : int, optional
282
- The maximum number of iterations to run for.
283
- tol : float, optional
284
- The convergence tolerance for messages.
285
- damping : float, optional
286
- The damping parameter to use, defaults to no damping.
287
- update : {'sequential', 'parallel'}, optional
288
- Whether to update messages sequentially or in parallel.
289
- local_convergence : bool, optional
290
- Whether to allow messages to locally converge - i.e. if all their
291
- input messages have converged then stop updating them.
292
- strip_exponent : bool, optional
293
- Whether to strip the exponent from the final result. If ``True``
294
- then the returned result is ``(mantissa, exponent)``.
295
- info : dict, optional
296
- If specified, update this dictionary with information about the
297
- belief propagation run.
298
- progbar : bool, optional
299
- Whether to show a progress bar.
300
- """
301
- bp = D1BP(
302
- tn,
303
- damping=damping,
304
- local_convergence=local_convergence,
305
- update=update,
306
- **contract_opts,
307
- )
308
- bp.run(
309
- max_iterations=max_iterations,
310
- tol=tol,
311
- info=info,
312
- progbar=progbar,
313
- )
314
- return bp.contract(
315
- strip_exponent=strip_exponent,
316
- )