tensorcircuit-nightly 1.3.0.dev20250902__py3-none-any.whl → 1.3.0.dev20250904__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tensorcircuit-nightly might be problematic. Click here for more details.
- tensorcircuit/__init__.py +1 -1
- tensorcircuit/abstractcircuit.py +1 -0
- tensorcircuit/backends/abstract_backend.py +33 -5
- tensorcircuit/backends/jax_backend.py +3 -0
- tensorcircuit/backends/numpy_backend.py +3 -0
- tensorcircuit/backends/pytorch_backend.py +3 -0
- tensorcircuit/backends/tensorflow_backend.py +3 -0
- tensorcircuit/basecircuit.py +127 -75
- tensorcircuit/circuit.py +54 -52
- tensorcircuit/cons.py +1 -0
- tensorcircuit/densitymatrix.py +13 -8
- tensorcircuit/gates.py +28 -22
- tensorcircuit/mpscircuit.py +89 -59
- tensorcircuit/quantum.py +274 -105
- tensorcircuit/results/counts.py +45 -31
- tensorcircuit/simplify.py +3 -1
- tensorcircuit/timeevol.py +42 -33
- {tensorcircuit_nightly-1.3.0.dev20250902.dist-info → tensorcircuit_nightly-1.3.0.dev20250904.dist-info}/METADATA +1 -1
- {tensorcircuit_nightly-1.3.0.dev20250902.dist-info → tensorcircuit_nightly-1.3.0.dev20250904.dist-info}/RECORD +22 -22
- {tensorcircuit_nightly-1.3.0.dev20250902.dist-info → tensorcircuit_nightly-1.3.0.dev20250904.dist-info}/WHEEL +0 -0
- {tensorcircuit_nightly-1.3.0.dev20250902.dist-info → tensorcircuit_nightly-1.3.0.dev20250904.dist-info}/licenses/LICENSE +0 -0
- {tensorcircuit_nightly-1.3.0.dev20250902.dist-info → tensorcircuit_nightly-1.3.0.dev20250904.dist-info}/top_level.txt +0 -0
tensorcircuit/quantum.py
CHANGED
|
@@ -9,6 +9,7 @@ import math
|
|
|
9
9
|
import os
|
|
10
10
|
from functools import partial, reduce
|
|
11
11
|
from operator import matmul, mul, or_
|
|
12
|
+
from collections import Counter
|
|
12
13
|
from typing import (
|
|
13
14
|
Any,
|
|
14
15
|
Callable,
|
|
@@ -31,7 +32,7 @@ from tensornetwork.network_operations import (
|
|
|
31
32
|
remove_node,
|
|
32
33
|
)
|
|
33
34
|
|
|
34
|
-
from .cons import backend, contractor, dtypestr, npdtype, rdtypestr
|
|
35
|
+
from .cons import backend, contractor, dtypestr, npdtype, rdtypestr, _ALPHABET
|
|
35
36
|
from .gates import Gate, num_to_tensor
|
|
36
37
|
from .utils import arg_alias
|
|
37
38
|
|
|
@@ -56,6 +57,91 @@ def get_all_nodes(edges: Iterable[Edge]) -> List[Node]:
|
|
|
56
57
|
return nodes
|
|
57
58
|
|
|
58
59
|
|
|
60
|
+
def onehot_d_tensor(_k: Union[int, Tensor], d: int = 2) -> Tensor:
|
|
61
|
+
"""
|
|
62
|
+
Construct a one-hot vector (or matrix) of local dimension ``d``.
|
|
63
|
+
|
|
64
|
+
:param _k: index or indices to set as 1. Can be an int or a backend Tensor.
|
|
65
|
+
:type _k: int or Tensor
|
|
66
|
+
:param d: local dimension (number of categories), defaults to 2
|
|
67
|
+
:type d: int, optional
|
|
68
|
+
:return: one-hot encoded vector (shape [d]) or matrix (shape [len(_k), d])
|
|
69
|
+
:rtype: Tensor
|
|
70
|
+
"""
|
|
71
|
+
if isinstance(_k, int):
|
|
72
|
+
vec = backend.one_hot(_k, d)
|
|
73
|
+
else:
|
|
74
|
+
vec = backend.one_hot(backend.cast(_k, "int32"), d)
|
|
75
|
+
return backend.cast(vec, dtypestr)
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def _decode_basis_label(label: str, n: int, dim: int) -> List[int]:
|
|
79
|
+
"""
|
|
80
|
+
Decode a string basis label into a list of integer digits.
|
|
81
|
+
|
|
82
|
+
The label is interpreted in base-``dim`` using characters ``0–9A–Z``.
|
|
83
|
+
Only dimensions up to 36 are supported.
|
|
84
|
+
|
|
85
|
+
:param label: basis label string, e.g. "010" or "A9F"
|
|
86
|
+
:type label: str
|
|
87
|
+
:param n: number of sites (expected length of the label)
|
|
88
|
+
:type n: int
|
|
89
|
+
:param dim: local dimension (2 <= dim <= 36)
|
|
90
|
+
:type dim: int
|
|
91
|
+
:return: list of integer digits of length ``n``, each in ``[0, dim-1]``
|
|
92
|
+
:rtype: List[int]
|
|
93
|
+
|
|
94
|
+
:raises NotImplementedError: if ``dim > 36``
|
|
95
|
+
:raises ValueError: if the label length mismatches ``n``,
|
|
96
|
+
or contains invalid/out-of-range characters
|
|
97
|
+
"""
|
|
98
|
+
if dim > 36:
|
|
99
|
+
raise NotImplementedError(
|
|
100
|
+
f"String basis label supports d<=36 (0–9A–Z). Got dim={dim}. "
|
|
101
|
+
"Use an integer array/tensor of length n instead."
|
|
102
|
+
)
|
|
103
|
+
s = label.upper()
|
|
104
|
+
if len(s) != n:
|
|
105
|
+
raise ValueError(f"Basis label length mismatch: expect {n}, got {len(s)}")
|
|
106
|
+
digits = []
|
|
107
|
+
for ch in s:
|
|
108
|
+
if ch not in _ALPHABET:
|
|
109
|
+
raise ValueError(
|
|
110
|
+
f"Invalid character '{ch}' in basis label (allowed 0–9A–Z)."
|
|
111
|
+
)
|
|
112
|
+
v = _ALPHABET.index(ch)
|
|
113
|
+
if v >= dim:
|
|
114
|
+
raise ValueError(
|
|
115
|
+
f"Digit '{ch}' (= {v}) out of range for base-d with dim={dim}."
|
|
116
|
+
)
|
|
117
|
+
digits.append(v)
|
|
118
|
+
return digits
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def _infer_num_sites(D: int, dim: int) -> int:
|
|
122
|
+
"""
|
|
123
|
+
Infer the number of sites (n) from a Hilbert space dimension D
|
|
124
|
+
and local dimension d, assuming D = d**n.
|
|
125
|
+
|
|
126
|
+
:param D: total Hilbert space dimension (int)
|
|
127
|
+
:param dim: local dimension per site (int)
|
|
128
|
+
:return: n such that D == d**n
|
|
129
|
+
:raises ValueError: if D is not an exact power of d
|
|
130
|
+
"""
|
|
131
|
+
if not (isinstance(D, int) and D > 0):
|
|
132
|
+
raise ValueError(f"D must be a positive integer, got {D}")
|
|
133
|
+
if not (isinstance(dim, int) and dim >= 2):
|
|
134
|
+
raise ValueError(f"d must be an integer >= 2, got {dim}")
|
|
135
|
+
|
|
136
|
+
tmp, n = D, 0
|
|
137
|
+
while tmp % dim == 0 and tmp > 1:
|
|
138
|
+
tmp //= dim
|
|
139
|
+
n += 1
|
|
140
|
+
if tmp != 1:
|
|
141
|
+
raise ValueError(f"Dimension {D} is not a power of local dim {dim}")
|
|
142
|
+
return n
|
|
143
|
+
|
|
144
|
+
|
|
59
145
|
def _reachable(nodes: List[AbstractNode]) -> List[AbstractNode]:
|
|
60
146
|
if not nodes:
|
|
61
147
|
raise ValueError("Reachable requires at least 1 node.")
|
|
@@ -2150,7 +2236,10 @@ def entanglement_entropy(state: Tensor, cut: Union[int, List[int]]) -> Tensor:
|
|
|
2150
2236
|
|
|
2151
2237
|
|
|
2152
2238
|
def reduced_wavefunction(
|
|
2153
|
-
state: Tensor,
|
|
2239
|
+
state: Tensor,
|
|
2240
|
+
cut: List[int],
|
|
2241
|
+
measure: Optional[List[int]] = None,
|
|
2242
|
+
dim: Optional[int] = None,
|
|
2154
2243
|
) -> Tensor:
|
|
2155
2244
|
"""
|
|
2156
2245
|
Compute the reduced wavefunction from the quantum state ``state``.
|
|
@@ -2165,20 +2254,22 @@ def reduced_wavefunction(
|
|
|
2165
2254
|
:type measure: List[int]
|
|
2166
2255
|
:return: _description_
|
|
2167
2256
|
:rtype: Tensor
|
|
2257
|
+
:param dim: dimension of qudit system
|
|
2258
|
+
:type dim: int
|
|
2168
2259
|
"""
|
|
2260
|
+
dim = 2 if dim is None else dim
|
|
2169
2261
|
if measure is None:
|
|
2170
2262
|
measure = [0 for _ in cut]
|
|
2171
|
-
s = backend.
|
|
2263
|
+
s = backend.reshaped(state, dim)
|
|
2172
2264
|
n = len(backend.shape_tuple(s))
|
|
2173
2265
|
s_node = Gate(s)
|
|
2174
2266
|
end_nodes = []
|
|
2175
2267
|
for c, m in zip(cut, measure):
|
|
2176
|
-
|
|
2177
|
-
backend.
|
|
2178
|
-
|
|
2179
|
-
backend.convert_to_tensor(np.array([0.0, 1.0])), dtypestr
|
|
2268
|
+
oh = backend.cast(
|
|
2269
|
+
backend.one_hot(backend.cast(backend.convert_to_tensor(m), "int32"), dim),
|
|
2270
|
+
dtypestr,
|
|
2180
2271
|
)
|
|
2181
|
-
end_node = Gate(
|
|
2272
|
+
end_node = Gate(backend.convert_to_tensor(oh))
|
|
2182
2273
|
end_nodes.append(end_node)
|
|
2183
2274
|
s_node[c] ^ end_node[0]
|
|
2184
2275
|
new_node = contractor(
|
|
@@ -2193,8 +2284,9 @@ def reduced_density_matrix(
|
|
|
2193
2284
|
cut: Union[int, List[int]],
|
|
2194
2285
|
p: Optional[Tensor] = None,
|
|
2195
2286
|
normalize: bool = True,
|
|
2287
|
+
dim: Optional[int] = None,
|
|
2196
2288
|
) -> Union[Tensor, QuOperator]:
|
|
2197
|
-
"""
|
|
2289
|
+
r"""
|
|
2198
2290
|
Compute the reduced density matrix from the quantum state ``state``.
|
|
2199
2291
|
|
|
2200
2292
|
:param state: The quantum state in form of Tensor or QuOperator.
|
|
@@ -2206,8 +2298,12 @@ def reduced_density_matrix(
|
|
|
2206
2298
|
:type p: Optional[Tensor]
|
|
2207
2299
|
:return: The reduced density matrix.
|
|
2208
2300
|
:rtype: Union[Tensor, QuOperator]
|
|
2209
|
-
:normalize: if True, returns a trace 1 density matrix. Otherwise does not normalize.
|
|
2301
|
+
:param normalize: if True, returns a trace 1 density matrix. Otherwise, does not normalize.
|
|
2302
|
+
:type normalize: bool
|
|
2303
|
+
:param dim: dimension of qudit system
|
|
2304
|
+
:type dim: int
|
|
2210
2305
|
"""
|
|
2306
|
+
dim = 2 if dim is None else dim
|
|
2211
2307
|
if isinstance(cut, list) or isinstance(cut, tuple) or isinstance(cut, set):
|
|
2212
2308
|
traceout = list(cut)
|
|
2213
2309
|
else:
|
|
@@ -2220,21 +2316,19 @@ def reduced_density_matrix(
|
|
|
2220
2316
|
return state.partial_trace(traceout)
|
|
2221
2317
|
if len(state.shape) == 2 and state.shape[0] == state.shape[1]:
|
|
2222
2318
|
# density operator
|
|
2223
|
-
|
|
2224
|
-
# traceout = sorted(traceout)[::-1]
|
|
2225
|
-
freedom = int(np.log2(freedomexp) / 2)
|
|
2226
|
-
# traceout2 = [i + freedom for i in traceout]
|
|
2319
|
+
freedom = _infer_num_sites(state.shape[0], dim)
|
|
2227
2320
|
left = traceout + [i for i in range(freedom) if i not in traceout]
|
|
2228
2321
|
right = [i + freedom for i in left]
|
|
2229
|
-
|
|
2322
|
+
|
|
2323
|
+
rho = backend.reshape(state, [dim] * (2 * freedom))
|
|
2230
2324
|
rho = backend.transpose(rho, perm=left + right)
|
|
2231
2325
|
rho = backend.reshape(
|
|
2232
2326
|
rho,
|
|
2233
2327
|
[
|
|
2234
|
-
|
|
2235
|
-
|
|
2236
|
-
|
|
2237
|
-
|
|
2328
|
+
dim ** len(traceout),
|
|
2329
|
+
dim ** (freedom - len(traceout)),
|
|
2330
|
+
dim ** len(traceout),
|
|
2331
|
+
dim ** (freedom - len(traceout)),
|
|
2238
2332
|
],
|
|
2239
2333
|
)
|
|
2240
2334
|
if p is None:
|
|
@@ -2247,20 +2341,20 @@ def reduced_density_matrix(
|
|
|
2247
2341
|
p = backend.reshape(p, [-1])
|
|
2248
2342
|
rho = backend.einsum("a,aiaj->ij", p, rho)
|
|
2249
2343
|
rho = backend.reshape(
|
|
2250
|
-
rho, [
|
|
2344
|
+
rho, [dim ** (freedom - len(traceout)), dim ** (freedom - len(traceout))]
|
|
2251
2345
|
)
|
|
2252
2346
|
if normalize:
|
|
2253
2347
|
rho /= backend.trace(rho)
|
|
2254
2348
|
|
|
2255
2349
|
else:
|
|
2256
2350
|
w = state / backend.norm(state)
|
|
2257
|
-
|
|
2258
|
-
freedom =
|
|
2351
|
+
size = int(backend.sizen(state))
|
|
2352
|
+
freedom = _infer_num_sites(size, dim)
|
|
2259
2353
|
perm = [i for i in range(freedom) if i not in traceout]
|
|
2260
2354
|
perm = perm + traceout
|
|
2261
|
-
w = backend.reshape(w, [
|
|
2355
|
+
w = backend.reshape(w, [dim for _ in range(freedom)])
|
|
2262
2356
|
w = backend.transpose(w, perm=perm)
|
|
2263
|
-
w = backend.reshape(w, [-1,
|
|
2357
|
+
w = backend.reshape(w, [-1, dim ** len(traceout)])
|
|
2264
2358
|
if p is None:
|
|
2265
2359
|
rho = w @ backend.adjoint(w)
|
|
2266
2360
|
else:
|
|
@@ -2403,7 +2497,9 @@ def truncated_free_energy(
|
|
|
2403
2497
|
|
|
2404
2498
|
|
|
2405
2499
|
@op2tensor
|
|
2406
|
-
def partial_transpose(
|
|
2500
|
+
def partial_transpose(
|
|
2501
|
+
rho: Tensor, transposed_sites: List[int], dim: Optional[int] = None
|
|
2502
|
+
) -> Tensor:
|
|
2407
2503
|
"""
|
|
2408
2504
|
_summary_
|
|
2409
2505
|
|
|
@@ -2411,10 +2507,13 @@ def partial_transpose(rho: Tensor, transposed_sites: List[int]) -> Tensor:
|
|
|
2411
2507
|
:type rho: Tensor
|
|
2412
2508
|
:param transposed_sites: sites int list to be transposed
|
|
2413
2509
|
:type transposed_sites: List[int]
|
|
2510
|
+
:param dim: dimension of qudit system
|
|
2511
|
+
:type dim: int
|
|
2414
2512
|
:return: _description_
|
|
2415
2513
|
:rtype: Tensor
|
|
2416
2514
|
"""
|
|
2417
|
-
|
|
2515
|
+
dim = 2 if dim is None else dim
|
|
2516
|
+
rho = backend.reshaped(rho, dim)
|
|
2418
2517
|
rho_node = Gate(rho)
|
|
2419
2518
|
n = len(rho.shape) // 2
|
|
2420
2519
|
left_edges = []
|
|
@@ -2432,7 +2531,9 @@ def partial_transpose(rho: Tensor, transposed_sites: List[int]) -> Tensor:
|
|
|
2432
2531
|
|
|
2433
2532
|
|
|
2434
2533
|
@op2tensor
|
|
2435
|
-
def entanglement_negativity(
|
|
2534
|
+
def entanglement_negativity(
|
|
2535
|
+
rho: Tensor, transposed_sites: List[int], dim: Optional[int] = None
|
|
2536
|
+
) -> Tensor:
|
|
2436
2537
|
"""
|
|
2437
2538
|
_summary_
|
|
2438
2539
|
|
|
@@ -2440,17 +2541,21 @@ def entanglement_negativity(rho: Tensor, transposed_sites: List[int]) -> Tensor:
|
|
|
2440
2541
|
:type rho: Tensor
|
|
2441
2542
|
:param transposed_sites: _description_
|
|
2442
2543
|
:type transposed_sites: List[int]
|
|
2544
|
+
:param dim: dimension of qudit system
|
|
2545
|
+
:type dim: int
|
|
2443
2546
|
:return: _description_
|
|
2444
2547
|
:rtype: Tensor
|
|
2445
2548
|
"""
|
|
2446
|
-
rhot = partial_transpose(rho, transposed_sites)
|
|
2549
|
+
rhot = partial_transpose(rho, transposed_sites, dim=dim)
|
|
2447
2550
|
es = backend.eigvalsh(rhot)
|
|
2448
2551
|
rhot_m = backend.sum(backend.abs(es))
|
|
2449
2552
|
return (rhot_m - 1.0) / 2.0
|
|
2450
2553
|
|
|
2451
2554
|
|
|
2452
2555
|
@op2tensor
|
|
2453
|
-
def log_negativity(
|
|
2556
|
+
def log_negativity(
|
|
2557
|
+
rho: Tensor, transposed_sites: List[int], base: str = "e", dim: Optional[int] = None
|
|
2558
|
+
) -> Tensor:
|
|
2454
2559
|
"""
|
|
2455
2560
|
_summary_
|
|
2456
2561
|
|
|
@@ -2460,10 +2565,13 @@ def log_negativity(rho: Tensor, transposed_sites: List[int], base: str = "e") ->
|
|
|
2460
2565
|
:type transposed_sites: List[int]
|
|
2461
2566
|
:param base: whether use 2 based log or e based log, defaults to "e"
|
|
2462
2567
|
:type base: str, optional
|
|
2568
|
+
:param dim: dimension of qudit system
|
|
2569
|
+
:type dim: int
|
|
2463
2570
|
:return: _description_
|
|
2464
2571
|
:rtype: Tensor
|
|
2465
2572
|
"""
|
|
2466
|
-
|
|
2573
|
+
dim = 2 if dim is None else dim
|
|
2574
|
+
rhot = partial_transpose(rho, transposed_sites, dim)
|
|
2467
2575
|
es = backend.eigvalsh(rhot)
|
|
2468
2576
|
rhot_m = backend.sum(backend.abs(es))
|
|
2469
2577
|
een = backend.log(rhot_m)
|
|
@@ -2549,7 +2657,9 @@ def double_state(h: Tensor, beta: float = 1) -> Tensor:
|
|
|
2549
2657
|
|
|
2550
2658
|
|
|
2551
2659
|
@op2tensor
|
|
2552
|
-
def mutual_information(
|
|
2660
|
+
def mutual_information(
|
|
2661
|
+
s: Tensor, cut: Union[int, List[int]], dim: Optional[int] = None
|
|
2662
|
+
) -> Tensor:
|
|
2553
2663
|
"""
|
|
2554
2664
|
Mutual information between AB subsystem described by ``cut``.
|
|
2555
2665
|
|
|
@@ -2557,9 +2667,12 @@ def mutual_information(s: Tensor, cut: Union[int, List[int]]) -> Tensor:
|
|
|
2557
2667
|
:type s: Tensor
|
|
2558
2668
|
:param cut: The AB subsystem.
|
|
2559
2669
|
:type cut: Union[int, List[int]]
|
|
2670
|
+
:param dim: The diagonal matrix in form of Tensor.
|
|
2671
|
+
:type dim: Tensor
|
|
2560
2672
|
:return: The mutual information between AB subsystem described by ``cut``.
|
|
2561
2673
|
:rtype: Tensor
|
|
2562
2674
|
"""
|
|
2675
|
+
dim = 2 if dim is None else dim
|
|
2563
2676
|
if isinstance(cut, list) or isinstance(cut, tuple) or isinstance(cut, set):
|
|
2564
2677
|
traceout = list(cut)
|
|
2565
2678
|
else:
|
|
@@ -2567,22 +2680,22 @@ def mutual_information(s: Tensor, cut: Union[int, List[int]]) -> Tensor:
|
|
|
2567
2680
|
|
|
2568
2681
|
if len(s.shape) == 2 and s.shape[0] == s.shape[1]:
|
|
2569
2682
|
# mixed state
|
|
2570
|
-
n =
|
|
2683
|
+
n = _infer_num_sites(s.shape[0], dim=dim)
|
|
2571
2684
|
hab = entropy(s)
|
|
2572
2685
|
|
|
2573
2686
|
# subsystem a
|
|
2574
|
-
rhoa = reduced_density_matrix(s, traceout)
|
|
2687
|
+
rhoa = reduced_density_matrix(s, traceout, dim=dim)
|
|
2575
2688
|
ha = entropy(rhoa)
|
|
2576
2689
|
|
|
2577
2690
|
# need subsystem b as well
|
|
2578
2691
|
other = tuple(i for i in range(n) if i not in traceout)
|
|
2579
|
-
rhob = reduced_density_matrix(s, other) # type: ignore
|
|
2692
|
+
rhob = reduced_density_matrix(s, other, dim=dim) # type: ignore
|
|
2580
2693
|
hb = entropy(rhob)
|
|
2581
2694
|
|
|
2582
2695
|
# pure system
|
|
2583
2696
|
else:
|
|
2584
2697
|
hab = 0.0
|
|
2585
|
-
rhoa = reduced_density_matrix(s, traceout)
|
|
2698
|
+
rhoa = reduced_density_matrix(s, traceout, dim=dim)
|
|
2586
2699
|
ha = hb = entropy(rhoa)
|
|
2587
2700
|
|
|
2588
2701
|
return ha + hb - hab
|
|
@@ -2591,7 +2704,9 @@ def mutual_information(s: Tensor, cut: Union[int, List[int]]) -> Tensor:
|
|
|
2591
2704
|
# measurement results and transformations and correlations below
|
|
2592
2705
|
|
|
2593
2706
|
|
|
2594
|
-
def count_s2d(
|
|
2707
|
+
def count_s2d(
|
|
2708
|
+
srepr: Tuple[Tensor, Tensor], n: int, dim: Optional[int] = None
|
|
2709
|
+
) -> Tensor:
|
|
2595
2710
|
"""
|
|
2596
2711
|
measurement shots results, sparse tuple representation to dense representation
|
|
2597
2712
|
count_vector to count_tuple
|
|
@@ -2600,11 +2715,14 @@ def count_s2d(srepr: Tuple[Tensor, Tensor], n: int) -> Tensor:
|
|
|
2600
2715
|
:type srepr: Tuple[Tensor, Tensor]
|
|
2601
2716
|
:param n: number of qubits
|
|
2602
2717
|
:type n: int
|
|
2718
|
+
:param dim: [description], defaults to None
|
|
2719
|
+
:type dim: int, optional
|
|
2603
2720
|
:return: [description]
|
|
2604
2721
|
:rtype: Tensor
|
|
2605
2722
|
"""
|
|
2723
|
+
dim = 2 if dim is None else dim
|
|
2606
2724
|
return backend.scatter(
|
|
2607
|
-
backend.cast(backend.zeros([
|
|
2725
|
+
backend.cast(backend.zeros([dim**n]), srepr[1].dtype),
|
|
2608
2726
|
backend.reshape(srepr[0], [-1, 1]),
|
|
2609
2727
|
srepr[1],
|
|
2610
2728
|
)
|
|
@@ -2647,117 +2765,146 @@ def count_d2s(drepr: Tensor, eps: float = 1e-7) -> Tuple[Tensor, Tensor]:
|
|
|
2647
2765
|
count_t2v = count_d2s
|
|
2648
2766
|
|
|
2649
2767
|
|
|
2650
|
-
def sample_int2bin(sample: Tensor, n: int) -> Tensor:
|
|
2768
|
+
def sample_int2bin(sample: Tensor, n: int, dim: Optional[int] = None) -> Tensor:
|
|
2651
2769
|
"""
|
|
2652
|
-
|
|
2770
|
+
Convert linear-index samples to per-site digits (base-d).
|
|
2653
2771
|
|
|
2654
|
-
:param sample:
|
|
2772
|
+
:param sample: shape [trials], integers in [0, d**n)
|
|
2655
2773
|
:type sample: Tensor
|
|
2656
|
-
:param n: number of
|
|
2774
|
+
:param n: number of sites
|
|
2657
2775
|
:type n: int
|
|
2658
|
-
:
|
|
2776
|
+
:param dim: local dimension, defaults to 2
|
|
2777
|
+
:type dim: int, optional
|
|
2778
|
+
:return: shape [trials, n], entries in [0, d-1]
|
|
2659
2779
|
:rtype: Tensor
|
|
2660
2780
|
"""
|
|
2661
|
-
|
|
2662
|
-
|
|
2663
|
-
|
|
2664
|
-
|
|
2665
|
-
|
|
2781
|
+
dim = 2 if dim is None else dim
|
|
2782
|
+
if dim == 2:
|
|
2783
|
+
return backend.mod(
|
|
2784
|
+
backend.right_shift(sample[..., None], backend.reverse(backend.arange(n))),
|
|
2785
|
+
2,
|
|
2786
|
+
)
|
|
2787
|
+
else:
|
|
2788
|
+
pos = backend.reverse(backend.arange(n))
|
|
2789
|
+
base = backend.power(dim, pos)
|
|
2790
|
+
digits = backend.mod(
|
|
2791
|
+
backend.floor_divide(sample[..., None], base), # ⌊sample / d**pos⌋
|
|
2792
|
+
dim,
|
|
2793
|
+
)
|
|
2794
|
+
return backend.cast(digits, "int32")
|
|
2666
2795
|
|
|
2667
2796
|
|
|
2668
|
-
def sample_bin2int(sample: Tensor, n: int) -> Tensor:
|
|
2797
|
+
def sample_bin2int(sample: Tensor, n: int, dim: Optional[int] = None) -> Tensor:
|
|
2669
2798
|
"""
|
|
2670
2799
|
bin sample to int sample
|
|
2671
2800
|
|
|
2672
2801
|
:param sample: in shape [trials, n] of elements (0, 1)
|
|
2673
2802
|
:type sample: Tensor
|
|
2674
|
-
:param n: number of
|
|
2803
|
+
:param n: number of sites
|
|
2675
2804
|
:type n: int
|
|
2805
|
+
:param dim: local dimension, defaults to 2
|
|
2806
|
+
:type dim: int, optional
|
|
2676
2807
|
:return: in shape [trials]
|
|
2677
2808
|
:rtype: Tensor
|
|
2678
2809
|
"""
|
|
2679
|
-
|
|
2810
|
+
dim = 2 if dim is None else dim
|
|
2811
|
+
power = backend.convert_to_tensor([dim**j for j in reversed(range(n))])
|
|
2680
2812
|
return backend.sum(sample * power, axis=-1)
|
|
2681
2813
|
|
|
2682
2814
|
|
|
2683
2815
|
def sample2count(
|
|
2684
|
-
sample: Tensor,
|
|
2816
|
+
sample: Tensor,
|
|
2817
|
+
n: int,
|
|
2818
|
+
jittable: bool = True,
|
|
2819
|
+
dim: Optional[int] = None,
|
|
2685
2820
|
) -> Tuple[Tensor, Tensor]:
|
|
2686
2821
|
"""
|
|
2687
|
-
sample_int to count_tuple
|
|
2822
|
+
sample_int to count_tuple (indices, counts), size = d**n
|
|
2688
2823
|
|
|
2689
|
-
:param sample:
|
|
2824
|
+
:param sample: linear-index samples, shape [shots]
|
|
2690
2825
|
:type sample: Tensor
|
|
2691
|
-
:param n:
|
|
2826
|
+
:param n: number of sites
|
|
2692
2827
|
:type n: int
|
|
2693
|
-
:param jittable:
|
|
2694
|
-
:type jittable: bool
|
|
2695
|
-
:
|
|
2828
|
+
:param jittable: whether to return fixed-size outputs (backend dependent)
|
|
2829
|
+
:type jittable: bool
|
|
2830
|
+
:param dim: local dimension per site, default 2 (qubit)
|
|
2831
|
+
:type dim: int, optional
|
|
2832
|
+
:return: (unique_indices, counts)
|
|
2696
2833
|
:rtype: Tuple[Tensor, Tensor]
|
|
2697
2834
|
"""
|
|
2698
|
-
|
|
2835
|
+
dim = 2 if dim is None else dim
|
|
2836
|
+
size = dim**n
|
|
2699
2837
|
if not jittable:
|
|
2700
2838
|
results = backend.unique_with_counts(sample) # non-jittable
|
|
2701
|
-
else: # jax specified
|
|
2702
|
-
results = backend.unique_with_counts(sample, size=
|
|
2839
|
+
else: # jax specified / fixed-size
|
|
2840
|
+
results = backend.unique_with_counts(sample, size=size, fill_value=-1)
|
|
2703
2841
|
return results
|
|
2704
2842
|
|
|
2705
2843
|
|
|
2706
|
-
def count_vector2dict(
|
|
2844
|
+
def count_vector2dict(
|
|
2845
|
+
count: Tensor, n: int, key: str = "bin", dim: Optional[int] = None
|
|
2846
|
+
) -> Dict[Any, int]:
|
|
2707
2847
|
"""
|
|
2708
|
-
|
|
2848
|
+
Convert count_vector to count_dict_bin or count_dict_int.
|
|
2849
|
+
For d>10 cases, a base-d string (0-9A-Z) is used.
|
|
2709
2850
|
|
|
2710
|
-
:param count: tensor in shape [
|
|
2851
|
+
:param count: tensor in shape [d**n]
|
|
2711
2852
|
:type count: Tensor
|
|
2712
|
-
:param n: number of
|
|
2853
|
+
:param n: number of sites
|
|
2713
2854
|
:type n: int
|
|
2714
2855
|
:param key: can be "int" or "bin", defaults to "bin"
|
|
2715
2856
|
:type key: str, optional
|
|
2716
|
-
:
|
|
2717
|
-
:
|
|
2857
|
+
:param dim: local dimension (default 2)
|
|
2858
|
+
:type dim: int, optional
|
|
2859
|
+
:return: mapping from configuration to count
|
|
2860
|
+
:rtype: Dict[Any, int]
|
|
2718
2861
|
"""
|
|
2719
2862
|
from .interfaces import which_backend
|
|
2720
2863
|
|
|
2864
|
+
dim = 2 if dim is None else dim
|
|
2721
2865
|
b = which_backend(count)
|
|
2722
|
-
|
|
2866
|
+
out_int = {i: b.numpy(count[i]).item() for i in range(dim**n)}
|
|
2723
2867
|
if key == "int":
|
|
2724
|
-
return
|
|
2868
|
+
return out_int
|
|
2725
2869
|
else:
|
|
2726
|
-
|
|
2727
|
-
for k, v in
|
|
2728
|
-
kn =
|
|
2729
|
-
|
|
2730
|
-
return
|
|
2870
|
+
out_str = {}
|
|
2871
|
+
for k, v in out_int.items():
|
|
2872
|
+
kn = np.base_repr(k, base=dim).zfill(n)
|
|
2873
|
+
out_str[kn] = v
|
|
2874
|
+
return out_str
|
|
2731
2875
|
|
|
2732
2876
|
|
|
2733
2877
|
def count_tuple2dict(
|
|
2734
|
-
count: Tuple[Tensor, Tensor], n: int, key: str = "bin"
|
|
2878
|
+
count: Tuple[Tensor, Tensor], n: int, key: str = "bin", dim: Optional[int] = None
|
|
2735
2879
|
) -> Dict[Any, int]:
|
|
2736
2880
|
"""
|
|
2737
2881
|
count_tuple to count_dict_bin or count_dict_int
|
|
2738
2882
|
|
|
2739
|
-
:param count: count_tuple format
|
|
2883
|
+
:param count: count_tuple format (indices, counts)
|
|
2740
2884
|
:type count: Tuple[Tensor, Tensor]
|
|
2741
|
-
:param n: number of qubits
|
|
2885
|
+
:param n: number of sites (qubits or qudits)
|
|
2742
2886
|
:type n: int
|
|
2743
2887
|
:param key: can be "int" or "bin", defaults to "bin"
|
|
2744
2888
|
:type key: str, optional
|
|
2889
|
+
:param dim: local dimension, defaults to 2
|
|
2890
|
+
:type dim: int, optional
|
|
2745
2891
|
:return: count_dict
|
|
2746
|
-
:rtype:
|
|
2892
|
+
:rtype: Dict[Any, int]
|
|
2747
2893
|
"""
|
|
2748
|
-
|
|
2894
|
+
dim = 2 if dim is None else dim
|
|
2895
|
+
out_int = {
|
|
2749
2896
|
backend.numpy(i).item(): backend.numpy(j).item()
|
|
2750
2897
|
for i, j in zip(count[0], count[1])
|
|
2751
2898
|
if i >= 0
|
|
2752
2899
|
}
|
|
2753
2900
|
if key == "int":
|
|
2754
|
-
return
|
|
2901
|
+
return out_int
|
|
2755
2902
|
else:
|
|
2756
|
-
|
|
2757
|
-
for k, v in
|
|
2758
|
-
kn =
|
|
2759
|
-
|
|
2760
|
-
return
|
|
2903
|
+
out_str = {}
|
|
2904
|
+
for k, v in out_int.items():
|
|
2905
|
+
kn = np.base_repr(k, base=dim).zfill(n)
|
|
2906
|
+
out_str[kn] = v
|
|
2907
|
+
return out_str
|
|
2761
2908
|
|
|
2762
2909
|
|
|
2763
2910
|
@partial(arg_alias, alias_dict={"counts": ["shots"], "format": ["format_"]})
|
|
@@ -2769,8 +2916,9 @@ def measurement_counts(
|
|
|
2769
2916
|
random_generator: Optional[Any] = None,
|
|
2770
2917
|
status: Optional[Tensor] = None,
|
|
2771
2918
|
jittable: bool = False,
|
|
2919
|
+
dim: Optional[int] = None,
|
|
2772
2920
|
) -> Any:
|
|
2773
|
-
"""
|
|
2921
|
+
r"""
|
|
2774
2922
|
Simulate the measuring of each qubit of ``p`` in the computational basis,
|
|
2775
2923
|
thus producing output like that of ``qiskit``.
|
|
2776
2924
|
|
|
@@ -2785,6 +2933,7 @@ def measurement_counts(
|
|
|
2785
2933
|
"count_tuple": # (np.array([0]), np.array([2]))
|
|
2786
2934
|
|
|
2787
2935
|
"count_dict_bin": # {"00": 2, "01": 0, "10": 0, "11": 0}
|
|
2936
|
+
/ for cases d\in [10, 36], "10" -> "A", ..., "35" -> "Z"
|
|
2788
2937
|
|
|
2789
2938
|
"count_dict_int": # {0: 2, 1: 0, 2: 0, 3: 0}
|
|
2790
2939
|
|
|
@@ -2836,21 +2985,22 @@ def measurement_counts(
|
|
|
2836
2985
|
state /= backend.norm(state)
|
|
2837
2986
|
pi = backend.real(backend.conj(state) * state)
|
|
2838
2987
|
pi = backend.reshape(pi, [-1])
|
|
2839
|
-
|
|
2840
|
-
|
|
2988
|
+
|
|
2989
|
+
local_d = 2 if dim is None else dim
|
|
2990
|
+
total_dim = int(backend.shape_tuple(pi)[0])
|
|
2991
|
+
n = _infer_num_sites(total_dim, local_d)
|
|
2992
|
+
|
|
2841
2993
|
if (counts is None) or counts <= 0:
|
|
2842
2994
|
if format == "count_vector":
|
|
2843
2995
|
return pi
|
|
2844
2996
|
elif format == "count_tuple":
|
|
2845
2997
|
return count_d2s(pi)
|
|
2846
2998
|
elif format == "count_dict_bin":
|
|
2847
|
-
return count_vector2dict(pi, n, key="bin")
|
|
2999
|
+
return count_vector2dict(pi, n, key="bin", dim=local_d)
|
|
2848
3000
|
elif format == "count_dict_int":
|
|
2849
|
-
return count_vector2dict(pi, n, key="int")
|
|
3001
|
+
return count_vector2dict(pi, n, key="int", dim=local_d)
|
|
2850
3002
|
else:
|
|
2851
|
-
raise ValueError(
|
|
2852
|
-
"unsupported format %s for analytical measurement" % format
|
|
2853
|
-
)
|
|
3003
|
+
raise ValueError(f"unsupported format {format} for analytical measurement")
|
|
2854
3004
|
else:
|
|
2855
3005
|
raw_counts = backend.probability_sample(
|
|
2856
3006
|
counts, pi, status=status, g=random_generator
|
|
@@ -2861,7 +3011,7 @@ def measurement_counts(
|
|
|
2861
3011
|
# raw_counts = backend.stateful_randc(
|
|
2862
3012
|
# random_generator, a=drange, shape=counts, p=pi
|
|
2863
3013
|
# )
|
|
2864
|
-
return sample2all(raw_counts, n, format=format, jittable=jittable)
|
|
3014
|
+
return sample2all(raw_counts, n, format=format, jittable=jittable, dim=local_d)
|
|
2865
3015
|
|
|
2866
3016
|
|
|
2867
3017
|
measurement_results = measurement_counts
|
|
@@ -2869,45 +3019,64 @@ measurement_results = measurement_counts
|
|
|
2869
3019
|
|
|
2870
3020
|
@partial(arg_alias, alias_dict={"format": ["format_"]})
|
|
2871
3021
|
def sample2all(
|
|
2872
|
-
sample: Tensor,
|
|
3022
|
+
sample: Tensor,
|
|
3023
|
+
n: int,
|
|
3024
|
+
format: str = "count_vector",
|
|
3025
|
+
jittable: bool = False,
|
|
3026
|
+
dim: Optional[int] = None,
|
|
2873
3027
|
) -> Any:
|
|
2874
3028
|
"""
|
|
2875
|
-
transform ``sample_int`` or ``sample_bin``
|
|
3029
|
+
transform ``sample_int`` or ``sample_bin`` results to other forms specified by ``format``
|
|
2876
3030
|
|
|
2877
|
-
:param sample: measurement shots results in ``sample_int`` or ``sample_bin``
|
|
3031
|
+
:param sample: measurement shots results in ``sample_int`` (shape [shots]) or ``sample_bin`` (shape [shots, n])
|
|
2878
3032
|
:type sample: Tensor
|
|
2879
|
-
:param n: number of
|
|
3033
|
+
:param n: number of sites
|
|
2880
3034
|
:type n: int
|
|
2881
|
-
:param format: see
|
|
2882
|
-
defaults to "count_vector"
|
|
3035
|
+
:param format: see :py:meth:`tensorcircuit.quantum.measurement_results`, defaults to "count_vector"
|
|
2883
3036
|
:type format: str, optional
|
|
2884
3037
|
:param jittable: only applicable to count transformation in jax backend, defaults to False
|
|
2885
3038
|
:type jittable: bool, optional
|
|
3039
|
+
:param dim: local dimension (2 for qubit; >2 for qudit), defaults to 2
|
|
3040
|
+
:type dim: Optional[int]
|
|
2886
3041
|
:return: measurement results specified as ``format``
|
|
2887
3042
|
:rtype: Any
|
|
2888
3043
|
"""
|
|
3044
|
+
dim = 2 if dim is None else int(dim)
|
|
3045
|
+
n_max_d = int(32 / np.log2(dim))
|
|
3046
|
+
if n > n_max_d:
|
|
3047
|
+
assert (
|
|
3048
|
+
len(backend.shape_tuple(sample)) == 2
|
|
3049
|
+
), f"n>{n_max_d} is only supported for ``sample_bin``"
|
|
3050
|
+
if format == "sample_bin":
|
|
3051
|
+
return sample
|
|
3052
|
+
if format == "count_dict_bin":
|
|
3053
|
+
binary_strings = ["".join(map(str, shot)) for shot in sample]
|
|
3054
|
+
return dict(Counter(binary_strings))
|
|
3055
|
+
raise ValueError(f"n={n} is too large for measurement representaion: {format}")
|
|
3056
|
+
|
|
2889
3057
|
if len(backend.shape_tuple(sample)) == 1:
|
|
2890
3058
|
sample_int = sample
|
|
2891
|
-
sample_bin = sample_int2bin(sample, n)
|
|
3059
|
+
sample_bin = sample_int2bin(sample, n, dim=dim)
|
|
2892
3060
|
elif len(backend.shape_tuple(sample)) == 2:
|
|
2893
|
-
sample_int = sample_bin2int(sample, n)
|
|
3061
|
+
sample_int = sample_bin2int(sample, n, dim=dim)
|
|
2894
3062
|
sample_bin = sample
|
|
2895
3063
|
else:
|
|
2896
3064
|
raise ValueError("unrecognized tensor shape for sample")
|
|
3065
|
+
|
|
2897
3066
|
if format == "sample_int":
|
|
2898
3067
|
return sample_int
|
|
2899
3068
|
elif format == "sample_bin":
|
|
2900
3069
|
return sample_bin
|
|
2901
3070
|
else:
|
|
2902
|
-
count_tuple = sample2count(sample_int, n, jittable)
|
|
3071
|
+
count_tuple = sample2count(sample_int, n, jittable=jittable, dim=dim)
|
|
2903
3072
|
if format == "count_tuple":
|
|
2904
3073
|
return count_tuple
|
|
2905
3074
|
elif format == "count_vector":
|
|
2906
|
-
return count_s2d(count_tuple, n)
|
|
3075
|
+
return count_s2d(count_tuple, n, dim=dim)
|
|
2907
3076
|
elif format == "count_dict_bin":
|
|
2908
|
-
return count_tuple2dict(count_tuple, n, key="bin")
|
|
3077
|
+
return count_tuple2dict(count_tuple, n, key="bin", dim=dim)
|
|
2909
3078
|
elif format == "count_dict_int":
|
|
2910
|
-
return count_tuple2dict(count_tuple, n, key="int")
|
|
3079
|
+
return count_tuple2dict(count_tuple, n, key="int", dim=dim)
|
|
2911
3080
|
else:
|
|
2912
3081
|
raise ValueError(
|
|
2913
3082
|
"unsupported format %s for finite shots measurement" % format
|