tensorcircuit-nightly 1.0.2.dev20250108__py3-none-any.whl → 1.4.0.dev20251103__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tensorcircuit-nightly might be problematic. Click here for more details.

Files changed (76) hide show
  1. tensorcircuit/__init__.py +18 -2
  2. tensorcircuit/about.py +46 -0
  3. tensorcircuit/abstractcircuit.py +4 -0
  4. tensorcircuit/analogcircuit.py +413 -0
  5. tensorcircuit/applications/layers.py +1 -1
  6. tensorcircuit/applications/van.py +1 -1
  7. tensorcircuit/backends/abstract_backend.py +320 -7
  8. tensorcircuit/backends/cupy_backend.py +3 -1
  9. tensorcircuit/backends/jax_backend.py +102 -4
  10. tensorcircuit/backends/jax_ops.py +110 -1
  11. tensorcircuit/backends/numpy_backend.py +49 -3
  12. tensorcircuit/backends/pytorch_backend.py +92 -3
  13. tensorcircuit/backends/tensorflow_backend.py +102 -3
  14. tensorcircuit/basecircuit.py +157 -98
  15. tensorcircuit/circuit.py +115 -57
  16. tensorcircuit/cloud/local.py +1 -1
  17. tensorcircuit/cloud/quafu_provider.py +1 -1
  18. tensorcircuit/cloud/tencent.py +1 -1
  19. tensorcircuit/compiler/simple_compiler.py +2 -2
  20. tensorcircuit/cons.py +142 -21
  21. tensorcircuit/densitymatrix.py +43 -14
  22. tensorcircuit/experimental.py +387 -129
  23. tensorcircuit/fgs.py +282 -81
  24. tensorcircuit/gates.py +66 -22
  25. tensorcircuit/interfaces/__init__.py +1 -3
  26. tensorcircuit/interfaces/jax.py +189 -0
  27. tensorcircuit/keras.py +3 -3
  28. tensorcircuit/mpscircuit.py +154 -65
  29. tensorcircuit/quantum.py +868 -152
  30. tensorcircuit/quditcircuit.py +733 -0
  31. tensorcircuit/quditgates.py +618 -0
  32. tensorcircuit/results/counts.py +147 -20
  33. tensorcircuit/results/readout_mitigation.py +4 -1
  34. tensorcircuit/shadows.py +1 -1
  35. tensorcircuit/simplify.py +3 -1
  36. tensorcircuit/stabilizercircuit.py +479 -0
  37. tensorcircuit/templates/__init__.py +2 -0
  38. tensorcircuit/templates/blocks.py +2 -2
  39. tensorcircuit/templates/hamiltonians.py +174 -0
  40. tensorcircuit/templates/lattice.py +1789 -0
  41. tensorcircuit/timeevol.py +896 -0
  42. tensorcircuit/translation.py +10 -3
  43. tensorcircuit/utils.py +7 -0
  44. {tensorcircuit_nightly-1.0.2.dev20250108.dist-info → tensorcircuit_nightly-1.4.0.dev20251103.dist-info}/METADATA +73 -23
  45. tensorcircuit_nightly-1.4.0.dev20251103.dist-info/RECORD +96 -0
  46. {tensorcircuit_nightly-1.0.2.dev20250108.dist-info → tensorcircuit_nightly-1.4.0.dev20251103.dist-info}/WHEEL +1 -1
  47. {tensorcircuit_nightly-1.0.2.dev20250108.dist-info → tensorcircuit_nightly-1.4.0.dev20251103.dist-info}/top_level.txt +0 -1
  48. tensorcircuit_nightly-1.0.2.dev20250108.dist-info/RECORD +0 -115
  49. tests/__init__.py +0 -0
  50. tests/conftest.py +0 -67
  51. tests/test_backends.py +0 -1031
  52. tests/test_calibrating.py +0 -149
  53. tests/test_channels.py +0 -365
  54. tests/test_circuit.py +0 -1699
  55. tests/test_cloud.py +0 -219
  56. tests/test_compiler.py +0 -147
  57. tests/test_dmcircuit.py +0 -555
  58. tests/test_ensemble.py +0 -72
  59. tests/test_fgs.py +0 -310
  60. tests/test_gates.py +0 -156
  61. tests/test_interfaces.py +0 -429
  62. tests/test_keras.py +0 -160
  63. tests/test_miscs.py +0 -277
  64. tests/test_mpscircuit.py +0 -341
  65. tests/test_noisemodel.py +0 -156
  66. tests/test_qaoa.py +0 -86
  67. tests/test_qem.py +0 -152
  68. tests/test_quantum.py +0 -526
  69. tests/test_quantum_attr.py +0 -42
  70. tests/test_results.py +0 -347
  71. tests/test_shadows.py +0 -160
  72. tests/test_simplify.py +0 -46
  73. tests/test_templates.py +0 -218
  74. tests/test_torchnn.py +0 -99
  75. tests/test_van.py +0 -102
  76. {tensorcircuit_nightly-1.0.2.dev20250108.dist-info → tensorcircuit_nightly-1.4.0.dev20251103.dist-info/licenses}/LICENSE +0 -0
tensorcircuit/quantum.py CHANGED
@@ -1,43 +1,38 @@
1
1
  """
2
2
  Quantum state and operator class backend by tensornetwork
3
-
4
- :IMPORT:
5
-
6
- .. code-block:: python
7
-
8
- import tensorcircuit.quantum as qu
9
3
  """
10
4
 
11
5
  # pylint: disable=invalid-name
12
6
 
13
7
  import logging
8
+ import math
14
9
  import os
15
10
  from functools import partial, reduce
16
11
  from operator import matmul, mul, or_
12
+ from collections import Counter
17
13
  from typing import (
18
14
  Any,
19
15
  Callable,
20
16
  Collection,
21
17
  Dict,
18
+ Iterable,
22
19
  List,
23
20
  Optional,
24
21
  Sequence,
25
- Set,
26
22
  Tuple,
27
23
  Union,
28
24
  )
29
25
 
30
26
  import numpy as np
27
+ import tensornetwork as tn
31
28
  from tensornetwork.network_components import AbstractNode, CopyNode, Edge, Node, connect
32
29
  from tensornetwork.network_operations import (
33
30
  copy,
34
- get_all_nodes,
35
31
  get_subgraph_dangling,
36
- reachable,
37
32
  remove_node,
38
33
  )
39
34
 
40
- from .cons import backend, contractor, dtypestr, npdtype, rdtypestr
35
+ from .cons import backend, contractor, dtypestr, npdtype, rdtypestr, _ALPHABET
41
36
  from .gates import Gate, num_to_tensor
42
37
  from .utils import arg_alias
43
38
 
@@ -51,6 +46,157 @@ logger = logging.getLogger(__name__)
51
46
  # For the reason of adoption instead of direct import: see https://github.com/google/TensorNetwork/issues/950
52
47
 
53
48
 
49
+ def get_all_nodes(edges: Iterable[Edge]) -> List[Node]:
50
+ """Return the set of nodes connected to edges."""
51
+ nodes = []
52
+ for edge in edges:
53
+ if edge.node1 is not None and edge.node1 not in nodes:
54
+ nodes.append(edge.node1)
55
+ if edge.node2 is not None and edge.node2 not in nodes:
56
+ nodes.append(edge.node2)
57
+ return nodes
58
+
59
+
60
+ def onehot_d_tensor(_k: Union[int, Tensor], d: int = 2) -> Tensor:
61
+ """
62
+ Construct a one-hot vector (or matrix) of local dimension ``d``.
63
+
64
+ :param _k: index or indices to set as 1. Can be an int or a backend Tensor.
65
+ :type _k: int or Tensor
66
+ :param d: local dimension (number of categories), defaults to 2
67
+ :type d: int, optional
68
+ :return: one-hot encoded vector (shape [d]) or matrix (shape [len(_k), d])
69
+ :rtype: Tensor
70
+ """
71
+ if isinstance(_k, int):
72
+ vec = backend.one_hot(_k, d)
73
+ else:
74
+ vec = backend.one_hot(backend.cast(_k, "int32"), d)
75
+ return backend.cast(vec, dtypestr)
76
+
77
+
78
+ def _decode_basis_label(label: str, n: int, dim: int) -> List[int]:
79
+ """
80
+ Decode a string basis label into a list of integer digits.
81
+
82
+ The label is interpreted in base-``dim`` using characters ``0-9A-Z``.
83
+ Only dimensions up to 36 are supported.
84
+
85
+ :param label: basis label string, e.g. "010" or "A9F"
86
+ :type label: str
87
+ :param n: number of sites (expected length of the label)
88
+ :type n: int
89
+ :param dim: local dimension (2 <= dim <= 36)
90
+ :type dim: int
91
+ :return: list of integer digits of length ``n``, each in ``[0, dim-1]``
92
+ :rtype: List[int]
93
+
94
+ :raises NotImplementedError: if ``dim > 36``
95
+ :raises ValueError: if the label length mismatches ``n``,
96
+ or contains invalid/out-of-range characters
97
+ """
98
+ if dim > 36:
99
+ raise NotImplementedError(
100
+ f"String basis label supports d<=36 (0-9A-Z). Got dim={dim}. "
101
+ "Use an integer array/tensor of length n instead."
102
+ )
103
+ s = label.upper()
104
+ if len(s) != n:
105
+ raise ValueError(f"Basis label length mismatch: expect {n}, got {len(s)}")
106
+ digits = []
107
+ for ch in s:
108
+ if ch not in _ALPHABET:
109
+ raise ValueError(
110
+ f"Invalid character '{ch}' in basis label (allowed 0-9A-Z)."
111
+ )
112
+ v = _ALPHABET.index(ch)
113
+ if v >= dim:
114
+ raise ValueError(
115
+ f"Digit '{ch}' (= {v}) out of range for base-d with dim={dim}."
116
+ )
117
+ digits.append(v)
118
+ return digits
119
+
120
+
121
+ def _infer_num_sites(D: int, dim: int) -> int:
122
+ """
123
+ Infer the number of sites (n) from a Hilbert space dimension D
124
+ and local dimension d, assuming D = d**n.
125
+
126
+ :param D: total Hilbert space dimension (int)
127
+ :param dim: local dimension per site (int)
128
+ :return: n such that D == d**n
129
+ :raises ValueError: if D is not an exact power of d
130
+ """
131
+ if not (isinstance(D, int) and D > 0):
132
+ raise ValueError(f"D must be a positive integer, got {D}")
133
+ if not (isinstance(dim, int) and dim >= 2):
134
+ raise ValueError(f"d must be an integer >= 2, got {dim}")
135
+
136
+ tmp, n = D, 0
137
+ while tmp % dim == 0 and tmp > 1:
138
+ tmp //= dim
139
+ n += 1
140
+ if tmp != 1:
141
+ raise ValueError(f"Dimension {D} is not a power of local dim {dim}")
142
+ return n
143
+
144
+
145
+ def _reachable(nodes: List[AbstractNode]) -> List[AbstractNode]:
146
+ if not nodes:
147
+ raise ValueError("Reachable requires at least 1 node.")
148
+ node_que = []
149
+ node_que.extend(nodes)
150
+ seen_nodes = []
151
+ i = 0
152
+ while i < len(node_que):
153
+ node = node_que[i]
154
+ if node not in seen_nodes:
155
+ seen_nodes.append(node)
156
+ for e in sorted(node.edges, key=id): # Sort edges by id for deterministic order
157
+ for n in sorted([n for n in e.get_nodes() if n is not None], key=id):
158
+ if n not in seen_nodes and n not in node_que[i + 1 :]:
159
+ node_que.append(n)
160
+ i += 1
161
+ return sorted(seen_nodes, key=lambda node: getattr(node, "_stable_id_", -1))
162
+
163
+
164
+ def reachable(
165
+ inputs: Union[AbstractNode, Iterable[AbstractNode], Edge, Iterable[Edge]],
166
+ ) -> List[AbstractNode]:
167
+ """Computes all nodes reachable from `node` or `edge.node1` by connected edges.
168
+
169
+ Args:
170
+ inputs: A `AbstractNode`/`Edge` or collection of `AbstractNodes`/`Edges`
171
+
172
+ Returns:
173
+ A list of `AbstractNode` objects that can be reached from `node`
174
+ via connected edges.
175
+
176
+ Raises:
177
+ TypeError: If inputs contains other then `Edge` or `Node`.
178
+ """
179
+ if isinstance(inputs, AbstractNode):
180
+ inputs = [inputs]
181
+ if isinstance(inputs, Edge):
182
+ inputs = [inputs.node1]
183
+
184
+ processed_inputs = []
185
+ for inp in inputs:
186
+ if isinstance(inp, AbstractNode):
187
+ if inp not in processed_inputs:
188
+ processed_inputs.append(inp)
189
+ elif isinstance(inp, Edge):
190
+ if inp.node1 not in processed_inputs:
191
+ processed_inputs.append(inp.node1)
192
+ else:
193
+ raise TypeError(
194
+ f"input to `reachable` has to be an iterable of "
195
+ f"Nodes or Edges, got {type(inp)} instead."
196
+ )
197
+ return _reachable(processed_inputs)
198
+
199
+
54
200
  # general conventions left (first) out, right (then) in
55
201
 
56
202
 
@@ -299,8 +445,8 @@ class QuOperator:
299
445
  )
300
446
  self.out_edges = list(out_edges)
301
447
  self.in_edges = list(in_edges)
302
- self.ignore_edges = set(ignore_edges) if ignore_edges else set()
303
- self.ref_nodes = set(ref_nodes) if ref_nodes else set()
448
+ self.ignore_edges = list(ignore_edges) if ignore_edges else list()
449
+ self.ref_nodes = list(ref_nodes) if ref_nodes else list()
304
450
  self.check_network()
305
451
 
306
452
  @classmethod
@@ -390,9 +536,9 @@ class QuOperator:
390
536
  return cls(out_edges, in_edges)
391
537
 
392
538
  @property
393
- def nodes(self) -> Set[AbstractNode]:
539
+ def nodes(self) -> List[AbstractNode]:
394
540
  """All tensor-network nodes involved in the operator."""
395
- return reachable(get_all_nodes(self.out_edges + self.in_edges) | self.ref_nodes) # type: ignore
541
+ return reachable(get_all_nodes(self.out_edges + self.in_edges) + self.ref_nodes) # type: ignore
396
542
 
397
543
  @property
398
544
  def in_space(self) -> List[int]:
@@ -442,7 +588,7 @@ class QuOperator:
442
588
  "ignore_edges contains non-dangling edge: {}".format(str(e))
443
589
  )
444
590
 
445
- known_edges = set(self.in_edges) | set(self.out_edges) | self.ignore_edges
591
+ known_edges = set(self.in_edges) | set(self.out_edges) | set(self.ignore_edges)
446
592
  all_dangling_edges = get_subgraph_dangling(self.nodes)
447
593
  if known_edges != all_dangling_edges:
448
594
  raise ValueError(
@@ -602,10 +748,10 @@ class QuOperator:
602
748
  return self.__mul__(other)
603
749
 
604
750
  def tensor_product(self, other: "QuOperator") -> "QuOperator":
605
- """
751
+ r"""
606
752
  Tensor product with another operator.
607
753
  Given two operators `A` and `B`, produces a new operator `AB` representing
608
- :math:`A B`. The `out_edges` (`in_edges`) of `AB` is simply the
754
+ :math:`A \otimes B`. The `out_edges` (`in_edges`) of `AB` is simply the
609
755
  concatenation of the `out_edges` (`in_edges`) of `A.copy()` with that of
610
756
  `B.copy()`:
611
757
  `new_out_edges = [*out_edges_A_copy, *out_edges_B_copy]`
@@ -670,16 +816,16 @@ class QuOperator:
670
816
  nodes_dict, dangling_edges_dict = eliminate_identities(self.nodes)
671
817
  self.in_edges = [dangling_edges_dict[e] for e in self.in_edges]
672
818
  self.out_edges = [dangling_edges_dict[e] for e in self.out_edges]
673
- self.ignore_edges = set(dangling_edges_dict[e] for e in self.ignore_edges)
674
- self.ref_nodes = set(nodes_dict[n] for n in self.ref_nodes if n in nodes_dict)
819
+ self.ignore_edges = list(dangling_edges_dict[e] for e in self.ignore_edges)
820
+ self.ref_nodes = list(nodes_dict[n] for n in self.ref_nodes if n in nodes_dict)
675
821
  self.check_network()
676
822
  if final_edge_order:
677
823
  final_edge_order = [dangling_edges_dict[e] for e in final_edge_order]
678
- self.ref_nodes = set(
824
+ self.ref_nodes = list(
679
825
  [contractor(self.nodes, output_edge_order=final_edge_order)]
680
826
  )
681
827
  else:
682
- self.ref_nodes = set([contractor(self.nodes, ignore_edge_order=True)])
828
+ self.ref_nodes = list([contractor(self.nodes, ignore_edge_order=True)])
683
829
  return self
684
830
 
685
831
  def eval(
@@ -1019,7 +1165,7 @@ class QuScalar(QuOperator):
1019
1165
  :rtype: QuScalar
1020
1166
  """
1021
1167
  n = Node(tensor)
1022
- return cls(set([n]))
1168
+ return cls(list([n]))
1023
1169
 
1024
1170
 
1025
1171
  def ps2xyz(ps: List[int]) -> Dict[str, List[int]]:
@@ -1092,33 +1238,281 @@ def generate_local_hamiltonian(
1092
1238
  return hop
1093
1239
 
1094
1240
 
1095
- def tn2qop(tn_mpo: Any) -> QuOperator:
1241
+ # TODO(@Charlespkuer): Add more conversion functions for other packages
1242
+ def extract_tensors_from_qop(qop: QuOperator) -> Tuple[List[Node], bool, int]:
1096
1243
  """
1097
- Convert MPO in TensorNetwork package to QuOperator.
1244
+ Extract and sort tensors from QuOperator for conversion to other tensor network formats.
1098
1245
 
1099
- :param tn_mpo: MPO in the form of TensorNetwork package
1100
- :type tn_mpo: ``tn.matrixproductstates.mpo.*``
1101
- :return: MPO in the form of QuOperator
1246
+ :param qop: Input QuOperator to extract tensors from
1247
+ :type qop: QuOperator
1248
+ :return: Tuple containing (sorted_nodes, is_mps, nwires) where:
1249
+ - sorted_nodes: List of Node objects sorted in linear chain order
1250
+ - is_mps: Boolean flag indicating if the structure is MPS (True) or MPO (False)
1251
+ - nwires: Integer number of physical edges/qubits in the system
1252
+ :rtype: Tuple[List[Node], bool, int]
1253
+ """
1254
+ is_mps = len(qop.in_edges) == 0
1255
+ nwires = len(qop.out_edges)
1256
+ if not is_mps and len(qop.in_edges) != nwires:
1257
+ raise ValueError(
1258
+ "MPO must have the same number of input and output edges. "
1259
+ f"Got {len(qop.in_edges)} and {nwires}."
1260
+ )
1261
+
1262
+ # Collect all nodes from edges
1263
+ nodes_for_sorting = qop.nodes
1264
+ if len(nodes_for_sorting) != nwires:
1265
+ raise ValueError(f"Number of nodes does not match number of wires.")
1266
+
1267
+ # Find endpoint nodes
1268
+ endpoint_nodes = set()
1269
+ physical_edges = set(qop.out_edges) if is_mps else set(qop.in_edges + qop.out_edges)
1270
+ if is_mps:
1271
+ rank_2_nodes = {node for node in nodes_for_sorting if len(node.edges) == 2}
1272
+ if len(rank_2_nodes) == 2:
1273
+ endpoint_nodes = rank_2_nodes
1274
+
1275
+ if not endpoint_nodes:
1276
+ endpoint_nodes = {edge.node1 for edge in qop.ignore_edges if edge.node1}
1277
+
1278
+ if not endpoint_nodes and len(nodes_for_sorting) > 1:
1279
+ virtual_bond_counts = {}
1280
+ virtual_bond_dim_sums = {}
1281
+
1282
+ for node in nodes_for_sorting:
1283
+ virtual_bonds = 0
1284
+ virtual_dim_sum = 0
1285
+
1286
+ for edge in node.edges:
1287
+ if edge not in physical_edges and not edge.is_dangling():
1288
+ virtual_bonds += 1
1289
+ virtual_dim_sum += edge.dimension
1290
+
1291
+ virtual_bond_counts[node] = virtual_bonds
1292
+ virtual_bond_dim_sums[node] = virtual_dim_sum
1293
+
1294
+ min_dim_sum = min(virtual_bond_dim_sums.values())
1295
+ min_dim_nodes = {
1296
+ node
1297
+ for node, dim_sum in virtual_bond_dim_sums.items()
1298
+ if dim_sum == min_dim_sum
1299
+ }
1300
+
1301
+ if len(min_dim_nodes) == 2:
1302
+ endpoint_nodes = min_dim_nodes
1303
+
1304
+ if not endpoint_nodes:
1305
+ if len(nodes_for_sorting) == 1:
1306
+ raise ValueError("Cannot determine chain structure: only one node found.")
1307
+ elif len(nodes_for_sorting) >= 2:
1308
+ raise ValueError(f"Cannot identify endpoint nodes for your nodes.")
1309
+
1310
+ # Sort nodes along the chain
1311
+ sorted_nodes: list[Node] = []
1312
+ if endpoint_nodes and len(endpoint_nodes) >= 1:
1313
+ current = next(iter(endpoint_nodes))
1314
+ while current and len(sorted_nodes) < nwires:
1315
+ sorted_nodes.append(current)
1316
+ current = next(
1317
+ (
1318
+ e.node2 if e.node1 is current else e.node1
1319
+ for e in current.edges
1320
+ if not e.is_dangling()
1321
+ and e not in physical_edges
1322
+ and (e.node2 if e.node1 is current else e.node1) not in sorted_nodes
1323
+ ),
1324
+ None,
1325
+ )
1326
+
1327
+ if not sorted_nodes:
1328
+ raise ValueError("No valid chain structure found in the QuOperator. ")
1329
+ if len(sorted_nodes) > 0 and len(qop.ignore_edges) > 0:
1330
+ if sorted_nodes[0] is not qop.ignore_edges[0].node1:
1331
+ sorted_nodes = sorted_nodes[::-1]
1332
+
1333
+ return sorted_nodes, is_mps, nwires
1334
+
1335
+
1336
+ def tenpy2qop(tenpy_obj: Any) -> QuOperator:
1337
+ """
1338
+ Converts a TeNPy MPO or MPS to a TensorCircuit QuOperator.
1339
+ This definitive version correctly handles axis ordering and boundary
1340
+ conditions to be compatible with `eval_matrix`.
1341
+
1342
+ :param tenpy_obj: A MPO or MPS object from the TeNPy package.
1343
+ :type tenpy_obj: Union[tenpy.networks.mpo.MPO, tenpy.networks.mps.MPS]
1344
+ :return: The corresponding state or operator as a QuOperator.
1102
1345
  :rtype: QuOperator
1103
1346
  """
1104
- tn_mpo = tn_mpo.tensors
1105
- nwires = len(tn_mpo)
1106
- mpo = []
1107
- for i in range(nwires):
1108
- mpo.append(Node(tn_mpo[i]))
1347
+ # MPO objects have _W attribute containing tensor list (documented in tenpy.networks.mpo.MPO)
1348
+ # MPS objects have _B attribute containing tensor list (documented in tenpy.networks.mps.MPS)
1349
+ # These are internal attributes that store the actual tensor data for each site
1350
+ # Reference: https://tenpy.readthedocs.io/en/latest/reference/tenpy.networks.mpo.html
1351
+ # https://tenpy.readthedocs.io/en/latest/reference/tenpy.networks.mps.html
1352
+ is_mpo = hasattr(tenpy_obj, "_W")
1353
+ tenpy_tensors = tenpy_obj._W if is_mpo else tenpy_obj._B
1354
+ nwires = len(tenpy_tensors)
1355
+ if nwires == 0:
1356
+ return quantum_constructor([], [], [])
1357
+
1358
+ nodes = []
1359
+ if is_mpo:
1360
+ original_tensors_obj = tenpy_tensors
1361
+
1362
+ for i, W_obj in enumerate(original_tensors_obj):
1363
+ arr = W_obj.to_ndarray()
1364
+ labels = W_obj.get_leg_labels()
1365
+ wL_idx = labels.index("wL")
1366
+ p_idx = labels.index("p")
1367
+ p_star_idx = labels.index("p*")
1368
+ wR_idx = labels.index("wR")
1369
+
1370
+ arr_reordered = arr.transpose((wL_idx, p_idx, p_star_idx, wR_idx))
1371
+ if nwires == 1:
1372
+ arr_reordered = arr_reordered[[0], :, :, :]
1373
+ arr_reordered = arr_reordered[:, :, :, [-1]]
1374
+ else:
1375
+ if i == 0:
1376
+ arr_reordered = arr_reordered[[0], :, :, :]
1377
+ elif i == nwires - 1:
1378
+ arr_reordered = arr_reordered[:, :, :, [-1]]
1379
+
1380
+ node = Node(
1381
+ arr_reordered, name=f"mpo_{i}", axis_names=["wL", "p", "p*", "wR"]
1382
+ )
1383
+ nodes.append(node)
1384
+
1385
+ if nwires > 1:
1386
+ for i in range(nwires - 1):
1387
+ nodes[i][3] ^ nodes[i + 1][0]
1388
+
1389
+ out_edges = [n[2] for n in nodes]
1390
+ in_edges = [n[1] for n in nodes]
1391
+ ignore_edges = [nodes[0][0], nodes[-1][3]]
1392
+ else: # MPS
1393
+ for i in range(nwires):
1394
+ B_obj = tenpy_obj.get_B(i)
1395
+ arr = B_obj.to_ndarray()
1396
+ labels = B_obj.get_leg_labels()
1397
+ vL_idx = labels.index("vL")
1398
+ p_idx = labels.index("p")
1399
+ vR_idx = labels.index("vR")
1400
+ arr_reordered = arr.transpose((vL_idx, p_idx, vR_idx))
1401
+ node = Node(arr_reordered, name=f"mps_{i}", axis_names=["vL", "p", "vR"])
1402
+ nodes.append(node)
1403
+
1404
+ if nwires > 1:
1405
+ for i in range(nwires - 1):
1406
+ nodes[i][2] ^ nodes[i + 1][0]
1407
+
1408
+ out_edges = [n[1] for n in nodes]
1409
+ in_edges = []
1410
+ ignore_edges = [nodes[0][0], nodes[-1][2]]
1411
+
1412
+ qop = quantum_constructor(out_edges, in_edges, [], ignore_edges)
1109
1413
 
1110
- for i in range(nwires - 1):
1111
- connect(mpo[i][1], mpo[i + 1][0])
1112
- # TODO(@refraction-ray): whether in and out edge is in the correct order require further check
1113
- qop = quantum_constructor(
1114
- [mpo[i][-1] for i in range(nwires)], # out_edges
1115
- [mpo[i][-2] for i in range(nwires)], # in_edges
1116
- [],
1117
- [mpo[0][0], mpo[-1][1]], # ignore_edges
1118
- )
1119
1414
  return qop
1120
1415
 
1121
1416
 
1417
+ def qop2tenpy(qop: QuOperator) -> Any:
1418
+ """
1419
+ Convert TensorCircuit QuOperator to MPO or MPS from TeNPy.
1420
+
1421
+ Requirements: QuOperator must represent valid MPS/MPO structure:
1422
+ - Linear chain topology with open boundaries only
1423
+ - MPS: no input edges, consistent virtual bonds, rank-3 or 4(with empty input edges) tensors
1424
+ - MPO: equal input/output edges, rank-4 tensors
1425
+ - Cyclic boundary conditions NOT supported
1426
+
1427
+ :param qop: The corresponding state/operator as a QuOperator.
1428
+ :type qop: QuOperator
1429
+ :return: MPO or MPS object from the TeNPy package.
1430
+ :rtype: Union[tenpy.networks.mpo.MPO, tenpy.networks.mps.MPS]
1431
+ """
1432
+ try:
1433
+ from tenpy.networks import MPO, MPS, Site
1434
+ from tenpy.linalg import np_conserved as npc
1435
+ from tenpy.linalg import LegCharge
1436
+ except ImportError:
1437
+ raise ImportError("Please install TeNPy package to use this function.")
1438
+
1439
+ sorted_nodes, is_mps, nwires = extract_tensors_from_qop(qop)
1440
+
1441
+ physical_dim = qop.out_edges[0].dimension if is_mps else qop.in_edges[0].dimension
1442
+ sites = [Site(LegCharge.from_trivial(physical_dim), "q") for _ in range(nwires)]
1443
+
1444
+ # MPS Conversion
1445
+ if is_mps:
1446
+ tensors = []
1447
+ for i, node in enumerate(sorted_nodes):
1448
+ tensor = np.asarray(node.tensor)
1449
+ if tensor.ndim == 3:
1450
+ if i == 0:
1451
+ if tensor.shape[0] > 1:
1452
+ tensor = tensor[0:1, :, :]
1453
+ elif i == len(sorted_nodes) - 1:
1454
+ if tensor.shape[2] > 1:
1455
+ tensor = tensor[:, :, 0:1]
1456
+ tensors.append(
1457
+ npc.Array.from_ndarray(
1458
+ tensor,
1459
+ legcharges=[LegCharge.from_trivial(s) for s in tensor.shape],
1460
+ labels=["vL", "p", "vR"],
1461
+ )
1462
+ )
1463
+
1464
+ SVs = (
1465
+ [np.ones([1])]
1466
+ + [np.ones(tensors[i].get_leg("vR").ind_len) for i in range(nwires - 1)]
1467
+ + [np.ones([1])]
1468
+ )
1469
+ return MPS(sites, tensors, SVs, bc="finite")
1470
+
1471
+ # MPO Conversion
1472
+ raw_tensors = [np.asarray(node.tensor) for node in sorted_nodes]
1473
+
1474
+ if nwires == 1:
1475
+ chi = 1
1476
+ IdL = IdR = 0
1477
+ reconstructed_tensors = raw_tensors
1478
+ else:
1479
+ chi = max(
1480
+ raw_tensors[0].shape[3] if raw_tensors[0].ndim > 3 else 1,
1481
+ raw_tensors[-1].shape[0] if raw_tensors[-1].ndim > 3 else 1,
1482
+ )
1483
+ IdL = 0
1484
+ IdR = chi - 1 if chi > 1 else 0
1485
+
1486
+ reconstructed_tensors = []
1487
+ for i, tensor in enumerate(raw_tensors):
1488
+ if i == 0 and tensor.shape[0] < chi:
1489
+ new_shape = (chi,) + tensor.shape[1:]
1490
+ padded_tensor = np.zeros(new_shape, dtype=tensor.dtype)
1491
+ padded_tensor[IdL, ...] = tensor[0, ...]
1492
+ reconstructed_tensors.append(padded_tensor)
1493
+ elif i == nwires - 1 and len(tensor.shape) > 3 and tensor.shape[3] < chi:
1494
+ new_shape = tensor.shape[:3] + (chi,)
1495
+ padded_tensor = np.zeros(new_shape, dtype=tensor.dtype)
1496
+ padded_tensor[..., IdR] = tensor[..., 0]
1497
+ reconstructed_tensors.append(padded_tensor)
1498
+ else:
1499
+ reconstructed_tensors.append(tensor)
1500
+
1501
+ tenpy_Ws = []
1502
+ for tensor in reconstructed_tensors:
1503
+ labels = ["wL", "wR", "p", "p*"]
1504
+ tensor = np.transpose(tensor, (0, 3, 1, 2))
1505
+ tenpy_Ws.append(
1506
+ npc.Array.from_ndarray(
1507
+ tensor,
1508
+ legcharges=[LegCharge.from_trivial(s) for s in tensor.shape],
1509
+ labels=labels,
1510
+ )
1511
+ )
1512
+
1513
+ return MPO(sites, tenpy_Ws, bc="finite", IdL=IdL, IdR=IdR)
1514
+
1515
+
1122
1516
  def quimb2qop(qb_mpo: Any) -> QuOperator:
1123
1517
  """
1124
1518
  Convert MPO in Quimb package to QuOperator.
@@ -1160,6 +1554,245 @@ def quimb2qop(qb_mpo: Any) -> QuOperator:
1160
1554
  return qop
1161
1555
 
1162
1556
 
1557
+ def qop2quimb(qop: QuOperator) -> Any:
1558
+ """
1559
+ Convert QuOperator to MPO or MPS in Quimb package.
1560
+
1561
+ Requirements: QuOperator must represent valid MPS/MPO structure:
1562
+ - Linear chain topology with open boundaries only
1563
+ - MPS: no input edges, consistent virtual bonds between adjacent tensors
1564
+ - MPO: equal input/output edges, rank-4 tensors
1565
+ - Edge connectivity: each internal node connected to exactly 2 neighbors
1566
+ - Cyclic boundary conditions NOT supported
1567
+
1568
+ :param qop: MPO in the form of QuOperator
1569
+ :type qop: QuOperator
1570
+ :return: MPO in the form of Quimb package
1571
+ :rtype: quimb.tensor.tensor_gen.MatrixProductOperator
1572
+ """
1573
+ try:
1574
+ import quimb.tensor as qtn
1575
+ except ImportError:
1576
+ raise ImportError("Please install Quimb package to use this function.")
1577
+
1578
+ sorted_nodes, is_mps, _ = extract_tensors_from_qop(qop)
1579
+
1580
+ quimb_tensors = []
1581
+ node_map = {node: i for i, node in enumerate(sorted_nodes)}
1582
+
1583
+ for i, node in enumerate(sorted_nodes):
1584
+ tensor_data = node.tensor
1585
+ inds: List[str] = []
1586
+
1587
+ for axis, edge in enumerate(node.edges):
1588
+ if edge in qop.out_edges:
1589
+ site_index = qop.out_edges.index(edge)
1590
+ inds.append(f"k{site_index}")
1591
+ elif edge in qop.in_edges and not is_mps:
1592
+ site_index = qop.in_edges.index(edge)
1593
+ inds.append(f"b{site_index}")
1594
+ elif edge in qop.ignore_edges:
1595
+ if i == 0:
1596
+ inds.append("_left_dangling")
1597
+ elif i == len(sorted_nodes) - 1:
1598
+ inds.append("_right_dangling")
1599
+ else:
1600
+ inds.append(f"_ignore_{i}_{axis}")
1601
+ else:
1602
+ neighbor = edge.node1 if edge.node2 is node else edge.node2
1603
+ if neighbor in node_map:
1604
+ j = node_map[neighbor]
1605
+ left, right = min(i, j), max(i, j)
1606
+ inds.append(f"v{left}_{right}")
1607
+ else:
1608
+ inds.append(f"_unconnected_{i}_{axis}")
1609
+
1610
+ quimb_tensors.append(qtn.Tensor(tensor_data, inds=inds, tags=f"I{i}"))
1611
+
1612
+ tn = qtn.TensorNetwork(quimb_tensors)
1613
+
1614
+ if is_mps:
1615
+ return tn.as_network(qtn.MatrixProductState)
1616
+ else:
1617
+ return tn.as_network(qtn.MatrixProductOperator)
1618
+
1619
+
1620
+ def tn2qop(tn_obj: Any) -> QuOperator:
1621
+ """
1622
+ Convert MPO in TensorNetwork package to QuOperator.
1623
+
1624
+ :param tn_mpo: MPO in the form of TensorNetwork package
1625
+ :type tn_mpo: ``tn.matrixproductstates.mpo.*``
1626
+ :return: MPO in the form of QuOperator
1627
+ :rtype: QuOperator
1628
+ """
1629
+ tn_tensors = tn_obj.tensors
1630
+ nwires = len(tn_tensors)
1631
+
1632
+ if nwires == 0:
1633
+ return quantum_constructor([], [], [])
1634
+
1635
+ is_mps = all(len(t.shape) <= 3 for t in tn_tensors)
1636
+
1637
+ nodes = []
1638
+ for i in range(nwires):
1639
+ nodes.append(Node(tn_tensors[i], name=f"tensor_{i}"))
1640
+
1641
+ if is_mps:
1642
+ for i in range(nwires - 1):
1643
+ connect(nodes[i][-1], nodes[i + 1][0])
1644
+
1645
+ out_edges = []
1646
+ for i, node in enumerate(nodes):
1647
+ if len(node.edges) == 2:
1648
+ physical_edge = next(e for e in node.edges if e.is_dangling())
1649
+ out_edges.append(physical_edge)
1650
+ else:
1651
+ out_edges.append(node[1])
1652
+
1653
+ in_edges = []
1654
+
1655
+ ignore_edges = []
1656
+ left_dangling = next(
1657
+ (e for e in nodes[0].edges if e.is_dangling() and e not in out_edges), None
1658
+ )
1659
+ right_dangling = next(
1660
+ (e for e in nodes[-1].edges if e.is_dangling() and e not in out_edges), None
1661
+ )
1662
+
1663
+ if left_dangling:
1664
+ ignore_edges.append(left_dangling)
1665
+ if right_dangling:
1666
+ ignore_edges.append(right_dangling)
1667
+
1668
+ else:
1669
+ for i in range(nwires - 1):
1670
+ connect(nodes[i][1], nodes[i + 1][0])
1671
+
1672
+ out_edges = [nodes[i][-1] for i in range(nwires)]
1673
+ in_edges = [nodes[i][-2] for i in range(nwires)]
1674
+ ignore_edges = [nodes[0][0], nodes[-1][1]]
1675
+
1676
+ qop = quantum_constructor(
1677
+ out_edges,
1678
+ in_edges,
1679
+ [],
1680
+ ignore_edges,
1681
+ )
1682
+ return qop
1683
+
1684
+
1685
+ def qop2tn(qop: QuOperator) -> Any:
1686
+ """
1687
+ Convert QuOperator back to MPO or MPS in TensorNetwork package.
1688
+
1689
+ :param qop: MPO or MPS in the form of QuOperator, param in docstring
1690
+ :return: MPO or MPS in the form of TensorNetwork
1691
+ :rtype: Union[tn.matrixproductstates.MPO, tn.matrixproductstates.MPS]
1692
+ """
1693
+ sorted_nodes, is_mps, _ = extract_tensors_from_qop(qop)
1694
+
1695
+ tensors = [node.tensor for node in sorted_nodes]
1696
+
1697
+ if is_mps:
1698
+ return tn.FiniteMPS(tensors, canonicalize=False)
1699
+ else:
1700
+ return tn.matrixproductstates.mpo.FiniteMPO(tensors)
1701
+
1702
+
1703
+ # TODO(@refraction-ray): Z2 analogy or more general analogies for the following u1 functions
1704
+
1705
+
1706
+ def u1_inds(n: int, m: int) -> Tensor:
1707
+ """
1708
+ Generate all the combination index of m down spins in n sites.
1709
+
1710
+ .. code-block:: python
1711
+
1712
+ print(u1_inds(5, 1))
1713
+ # [1, 2, 4, 8, 16]
1714
+
1715
+
1716
+ :param n: number of total sites
1717
+ :type n: int
1718
+ :param m: number of down spins (1 in 0, 1)
1719
+ :type m: int
1720
+ :return: index tensor
1721
+ :rtype: Tensor
1722
+ """
1723
+ # m down spins
1724
+ num_combinations = math.comb(n, m)
1725
+ inds = np.zeros([num_combinations], dtype="int64")
1726
+ if m == 0:
1727
+ inds[0] = 0
1728
+ return inds
1729
+ combination = (1 << m) - 1
1730
+
1731
+ for i in range(num_combinations):
1732
+ inds[i] = combination
1733
+
1734
+ # Find the next combination using Gosper's Hack
1735
+ u = combination & -combination
1736
+ v = u + combination
1737
+ combination = v + (((v ^ combination) // u) >> 2)
1738
+ return backend.convert_to_tensor(inds)
1739
+
1740
+
1741
+ def u1_mask(n: int, m: int) -> Tensor:
1742
+ """
1743
+ Return the 1d array of size 2**n filled with zero,
1744
+ one only in elements corresponding to the m down spins
1745
+
1746
+ :param n: number of total sites
1747
+ :type n: int
1748
+ :param m: number of down spins (1 in 0, 1)
1749
+ :type m: int
1750
+ :return: _description_
1751
+ :rtype: Tensor
1752
+ """
1753
+ inds = u1_inds(n, m)
1754
+ m = backend.scatter(
1755
+ backend.zeros([2**n]),
1756
+ backend.reshape(inds, [-1, 1]),
1757
+ backend.ones([math.comb(n, m)]),
1758
+ )
1759
+ return m
1760
+
1761
+
1762
+ def u1_project(s: Tensor, n: int, m: int) -> Tensor:
1763
+ """
1764
+ Project a state s to the subspace with m down spins in n sites
1765
+
1766
+ :param s: input state of size 2**n
1767
+ :type s: Tensor
1768
+ :param n: number of total sites
1769
+ :type n: int
1770
+ :param m: number of down spins (1 in 0, 1)
1771
+ :type m: int
1772
+ :return: projected state of size C_n^m
1773
+ :rtype: Tensor
1774
+ """
1775
+ return backend.gather1d(s, u1_inds(n, m))
1776
+
1777
+
1778
+ def u1_enlarge(s: Tensor, n: int, m: int) -> Tensor:
1779
+ """
1780
+ Enlarge a state s in the subspace with m down spins in n sites to
1781
+ the full Hilbert space wavefunction of size 2**n
1782
+
1783
+ :param s: input state of size C_n^m
1784
+ :type s: Tensor
1785
+ :param n: number of total sites
1786
+ :type n: int
1787
+ :param m: number of down spins (1 in 0, 1)
1788
+ :type m: int
1789
+ :return: enlarged state of size 2**n
1790
+ :rtype: Tensor
1791
+ """
1792
+ inds = u1_inds(n, m)
1793
+ return backend.scatter(backend.zeros([2**n]), backend.reshape(inds, [-1, 1]), s)
1794
+
1795
+
1163
1796
  def heisenberg_hamiltonian(
1164
1797
  g: Graph,
1165
1798
  hzz: float = 1.0,
@@ -1281,7 +1914,7 @@ def PauliStringSum2Dense(
1281
1914
  return sparsem.todense()
1282
1915
  sparsem = backend.coo_sparse_matrix_from_numpy(sparsem)
1283
1916
  densem = backend.to_dense(sparsem)
1284
- return densem
1917
+ return backend.convert_to_tensor(densem)
1285
1918
 
1286
1919
 
1287
1920
  # already implemented as backend method
@@ -1603,7 +2236,10 @@ def entanglement_entropy(state: Tensor, cut: Union[int, List[int]]) -> Tensor:
1603
2236
 
1604
2237
 
1605
2238
  def reduced_wavefunction(
1606
- state: Tensor, cut: List[int], measure: Optional[List[int]] = None
2239
+ state: Tensor,
2240
+ cut: List[int],
2241
+ measure: Optional[List[int]] = None,
2242
+ dim: Optional[int] = None,
1607
2243
  ) -> Tensor:
1608
2244
  """
1609
2245
  Compute the reduced wavefunction from the quantum state ``state``.
@@ -1618,20 +2254,22 @@ def reduced_wavefunction(
1618
2254
  :type measure: List[int]
1619
2255
  :return: _description_
1620
2256
  :rtype: Tensor
2257
+ :param dim: dimension of qudit system
2258
+ :type dim: int
1621
2259
  """
2260
+ dim = 2 if dim is None else dim
1622
2261
  if measure is None:
1623
2262
  measure = [0 for _ in cut]
1624
- s = backend.reshape2(state)
2263
+ s = backend.reshaped(state, dim)
1625
2264
  n = len(backend.shape_tuple(s))
1626
2265
  s_node = Gate(s)
1627
2266
  end_nodes = []
1628
2267
  for c, m in zip(cut, measure):
1629
- rt = backend.cast(backend.convert_to_tensor(1 - m), dtypestr) * backend.cast(
1630
- backend.convert_to_tensor(np.array([1.0, 0.0])), dtypestr
1631
- ) + backend.cast(backend.convert_to_tensor(m), dtypestr) * backend.cast(
1632
- backend.convert_to_tensor(np.array([0.0, 1.0])), dtypestr
2268
+ oh = backend.cast(
2269
+ backend.one_hot(backend.cast(backend.convert_to_tensor(m), "int32"), dim),
2270
+ dtypestr,
1633
2271
  )
1634
- end_node = Gate(rt)
2272
+ end_node = Gate(backend.convert_to_tensor(oh))
1635
2273
  end_nodes.append(end_node)
1636
2274
  s_node[c] ^ end_node[0]
1637
2275
  new_node = contractor(
@@ -1646,8 +2284,9 @@ def reduced_density_matrix(
1646
2284
  cut: Union[int, List[int]],
1647
2285
  p: Optional[Tensor] = None,
1648
2286
  normalize: bool = True,
2287
+ dim: Optional[int] = None,
1649
2288
  ) -> Union[Tensor, QuOperator]:
1650
- """
2289
+ r"""
1651
2290
  Compute the reduced density matrix from the quantum state ``state``.
1652
2291
 
1653
2292
  :param state: The quantum state in form of Tensor or QuOperator.
@@ -1659,8 +2298,12 @@ def reduced_density_matrix(
1659
2298
  :type p: Optional[Tensor]
1660
2299
  :return: The reduced density matrix.
1661
2300
  :rtype: Union[Tensor, QuOperator]
1662
- :normalize: if True, returns a trace 1 density matrix. Otherwise does not normalize.
2301
+ :param normalize: if True, returns a trace 1 density matrix. Otherwise, does not normalize.
2302
+ :type normalize: bool
2303
+ :param dim: dimension of qudit system
2304
+ :type dim: int
1663
2305
  """
2306
+ dim = 2 if dim is None else dim
1664
2307
  if isinstance(cut, list) or isinstance(cut, tuple) or isinstance(cut, set):
1665
2308
  traceout = list(cut)
1666
2309
  else:
@@ -1673,21 +2316,19 @@ def reduced_density_matrix(
1673
2316
  return state.partial_trace(traceout)
1674
2317
  if len(state.shape) == 2 and state.shape[0] == state.shape[1]:
1675
2318
  # density operator
1676
- freedomexp = backend.sizen(state)
1677
- # traceout = sorted(traceout)[::-1]
1678
- freedom = int(np.log2(freedomexp) / 2)
1679
- # traceout2 = [i + freedom for i in traceout]
2319
+ freedom = _infer_num_sites(state.shape[0], dim)
1680
2320
  left = traceout + [i for i in range(freedom) if i not in traceout]
1681
2321
  right = [i + freedom for i in left]
1682
- rho = backend.reshape(state, [2 for _ in range(2 * freedom)])
2322
+
2323
+ rho = backend.reshape(state, [dim] * (2 * freedom))
1683
2324
  rho = backend.transpose(rho, perm=left + right)
1684
2325
  rho = backend.reshape(
1685
2326
  rho,
1686
2327
  [
1687
- 2 ** len(traceout),
1688
- 2 ** (freedom - len(traceout)),
1689
- 2 ** len(traceout),
1690
- 2 ** (freedom - len(traceout)),
2328
+ dim ** len(traceout),
2329
+ dim ** (freedom - len(traceout)),
2330
+ dim ** len(traceout),
2331
+ dim ** (freedom - len(traceout)),
1691
2332
  ],
1692
2333
  )
1693
2334
  if p is None:
@@ -1700,20 +2341,20 @@ def reduced_density_matrix(
1700
2341
  p = backend.reshape(p, [-1])
1701
2342
  rho = backend.einsum("a,aiaj->ij", p, rho)
1702
2343
  rho = backend.reshape(
1703
- rho, [2 ** (freedom - len(traceout)), 2 ** (freedom - len(traceout))]
2344
+ rho, [dim ** (freedom - len(traceout)), dim ** (freedom - len(traceout))]
1704
2345
  )
1705
2346
  if normalize:
1706
2347
  rho /= backend.trace(rho)
1707
2348
 
1708
2349
  else:
1709
2350
  w = state / backend.norm(state)
1710
- freedomexp = backend.sizen(state)
1711
- freedom = int(np.log(freedomexp) / np.log(2))
2351
+ size = int(backend.sizen(state))
2352
+ freedom = _infer_num_sites(size, dim)
1712
2353
  perm = [i for i in range(freedom) if i not in traceout]
1713
2354
  perm = perm + traceout
1714
- w = backend.reshape(w, [2 for _ in range(freedom)])
2355
+ w = backend.reshape(w, [dim for _ in range(freedom)])
1715
2356
  w = backend.transpose(w, perm=perm)
1716
- w = backend.reshape(w, [-1, 2 ** len(traceout)])
2357
+ w = backend.reshape(w, [-1, dim ** len(traceout)])
1717
2358
  if p is None:
1718
2359
  rho = w @ backend.adjoint(w)
1719
2360
  else:
@@ -1762,13 +2403,13 @@ def free_energy(
1762
2403
 
1763
2404
  def renyi_entropy(rho: Union[Tensor, QuOperator], k: int = 2) -> Tensor:
1764
2405
  """
1765
- Compute the Rényi entropy of order :math:`k` by given density matrix.
2406
+ Compute the Renyi entropy of order :math:`k` by given density matrix.
1766
2407
 
1767
2408
  :param rho: The density matrix in form of Tensor or QuOperator.
1768
2409
  :type rho: Union[Tensor, QuOperator]
1769
- :param k: The order of Rényi entropy, default is 2.
2410
+ :param k: The order of Renyi entropy, default is 2.
1770
2411
  :type k: int, optional
1771
- :return: The :math:`k` th order of Rényi entropy.
2412
+ :return: The :math:`k` th order of Renyi entropy.
1772
2413
  :rtype: Tensor
1773
2414
  """
1774
2415
  s = 1 / (1 - k) * backend.real(backend.log(trace_product(*[rho for _ in range(k)])))
@@ -1782,7 +2423,7 @@ def renyi_free_energy(
1782
2423
  k: int = 2,
1783
2424
  ) -> Tensor:
1784
2425
  """
1785
- Compute the Rényi free energy of the corresponding density matrix and Hamiltonian.
2426
+ Compute the Renyi free energy of the corresponding density matrix and Hamiltonian.
1786
2427
 
1787
2428
  :Example:
1788
2429
 
@@ -1799,9 +2440,9 @@ def renyi_free_energy(
1799
2440
  :type h: Union[Tensor, QuOperator]
1800
2441
  :param beta: Constant for the optimization, default is 1.
1801
2442
  :type beta: float, optional
1802
- :param k: The order of Rényi entropy, default is 2.
2443
+ :param k: The order of Renyi entropy, default is 2.
1803
2444
  :type k: int, optional
1804
- :return: The :math:`k` th order of Rényi entropy.
2445
+ :return: The :math:`k` th order of Renyi entropy.
1805
2446
  :rtype: Tensor
1806
2447
  """
1807
2448
  energy = backend.real(trace_product(rho, h))
@@ -1856,7 +2497,9 @@ def truncated_free_energy(
1856
2497
 
1857
2498
 
1858
2499
  @op2tensor
1859
- def partial_transpose(rho: Tensor, transposed_sites: List[int]) -> Tensor:
2500
+ def partial_transpose(
2501
+ rho: Tensor, transposed_sites: List[int], dim: Optional[int] = None
2502
+ ) -> Tensor:
1860
2503
  """
1861
2504
  _summary_
1862
2505
 
@@ -1864,10 +2507,13 @@ def partial_transpose(rho: Tensor, transposed_sites: List[int]) -> Tensor:
1864
2507
  :type rho: Tensor
1865
2508
  :param transposed_sites: sites int list to be transposed
1866
2509
  :type transposed_sites: List[int]
2510
+ :param dim: dimension of qudit system
2511
+ :type dim: int
1867
2512
  :return: _description_
1868
2513
  :rtype: Tensor
1869
2514
  """
1870
- rho = backend.reshape2(rho)
2515
+ dim = 2 if dim is None else dim
2516
+ rho = backend.reshaped(rho, dim)
1871
2517
  rho_node = Gate(rho)
1872
2518
  n = len(rho.shape) // 2
1873
2519
  left_edges = []
@@ -1885,7 +2531,9 @@ def partial_transpose(rho: Tensor, transposed_sites: List[int]) -> Tensor:
1885
2531
 
1886
2532
 
1887
2533
  @op2tensor
1888
- def entanglement_negativity(rho: Tensor, transposed_sites: List[int]) -> Tensor:
2534
+ def entanglement_negativity(
2535
+ rho: Tensor, transposed_sites: List[int], dim: Optional[int] = None
2536
+ ) -> Tensor:
1889
2537
  """
1890
2538
  _summary_
1891
2539
 
@@ -1893,17 +2541,21 @@ def entanglement_negativity(rho: Tensor, transposed_sites: List[int]) -> Tensor:
1893
2541
  :type rho: Tensor
1894
2542
  :param transposed_sites: _description_
1895
2543
  :type transposed_sites: List[int]
2544
+ :param dim: dimension of qudit system
2545
+ :type dim: int
1896
2546
  :return: _description_
1897
2547
  :rtype: Tensor
1898
2548
  """
1899
- rhot = partial_transpose(rho, transposed_sites)
2549
+ rhot = partial_transpose(rho, transposed_sites, dim=dim)
1900
2550
  es = backend.eigvalsh(rhot)
1901
2551
  rhot_m = backend.sum(backend.abs(es))
1902
2552
  return (rhot_m - 1.0) / 2.0
1903
2553
 
1904
2554
 
1905
2555
  @op2tensor
1906
- def log_negativity(rho: Tensor, transposed_sites: List[int], base: str = "e") -> Tensor:
2556
+ def log_negativity(
2557
+ rho: Tensor, transposed_sites: List[int], base: str = "e", dim: Optional[int] = None
2558
+ ) -> Tensor:
1907
2559
  """
1908
2560
  _summary_
1909
2561
 
@@ -1913,10 +2565,13 @@ def log_negativity(rho: Tensor, transposed_sites: List[int], base: str = "e") ->
1913
2565
  :type transposed_sites: List[int]
1914
2566
  :param base: whether use 2 based log or e based log, defaults to "e"
1915
2567
  :type base: str, optional
2568
+ :param dim: dimension of qudit system
2569
+ :type dim: int
1916
2570
  :return: _description_
1917
2571
  :rtype: Tensor
1918
2572
  """
1919
- rhot = partial_transpose(rho, transposed_sites)
2573
+ dim = 2 if dim is None else dim
2574
+ rhot = partial_transpose(rho, transposed_sites, dim)
1920
2575
  es = backend.eigvalsh(rhot)
1921
2576
  rhot_m = backend.sum(backend.abs(es))
1922
2577
  een = backend.log(rhot_m)
@@ -2002,7 +2657,9 @@ def double_state(h: Tensor, beta: float = 1) -> Tensor:
2002
2657
 
2003
2658
 
2004
2659
  @op2tensor
2005
- def mutual_information(s: Tensor, cut: Union[int, List[int]]) -> Tensor:
2660
+ def mutual_information(
2661
+ s: Tensor, cut: Union[int, List[int]], dim: Optional[int] = None
2662
+ ) -> Tensor:
2006
2663
  """
2007
2664
  Mutual information between AB subsystem described by ``cut``.
2008
2665
 
@@ -2010,9 +2667,12 @@ def mutual_information(s: Tensor, cut: Union[int, List[int]]) -> Tensor:
2010
2667
  :type s: Tensor
2011
2668
  :param cut: The AB subsystem.
2012
2669
  :type cut: Union[int, List[int]]
2670
+ :param dim: The diagonal matrix in form of Tensor.
2671
+ :type dim: Tensor
2013
2672
  :return: The mutual information between AB subsystem described by ``cut``.
2014
2673
  :rtype: Tensor
2015
2674
  """
2675
+ dim = 2 if dim is None else dim
2016
2676
  if isinstance(cut, list) or isinstance(cut, tuple) or isinstance(cut, set):
2017
2677
  traceout = list(cut)
2018
2678
  else:
@@ -2020,22 +2680,22 @@ def mutual_information(s: Tensor, cut: Union[int, List[int]]) -> Tensor:
2020
2680
 
2021
2681
  if len(s.shape) == 2 and s.shape[0] == s.shape[1]:
2022
2682
  # mixed state
2023
- n = int(np.log2(backend.sizen(s)) / 2)
2683
+ n = _infer_num_sites(s.shape[0], dim=dim)
2024
2684
  hab = entropy(s)
2025
2685
 
2026
2686
  # subsystem a
2027
- rhoa = reduced_density_matrix(s, traceout)
2687
+ rhoa = reduced_density_matrix(s, traceout, dim=dim)
2028
2688
  ha = entropy(rhoa)
2029
2689
 
2030
2690
  # need subsystem b as well
2031
2691
  other = tuple(i for i in range(n) if i not in traceout)
2032
- rhob = reduced_density_matrix(s, other) # type: ignore
2692
+ rhob = reduced_density_matrix(s, other, dim=dim) # type: ignore
2033
2693
  hb = entropy(rhob)
2034
2694
 
2035
2695
  # pure system
2036
2696
  else:
2037
2697
  hab = 0.0
2038
- rhoa = reduced_density_matrix(s, traceout)
2698
+ rhoa = reduced_density_matrix(s, traceout, dim=dim)
2039
2699
  ha = hb = entropy(rhoa)
2040
2700
 
2041
2701
  return ha + hb - hab
@@ -2044,7 +2704,9 @@ def mutual_information(s: Tensor, cut: Union[int, List[int]]) -> Tensor:
2044
2704
  # measurement results and transformations and correlations below
2045
2705
 
2046
2706
 
2047
- def count_s2d(srepr: Tuple[Tensor, Tensor], n: int) -> Tensor:
2707
+ def count_s2d(
2708
+ srepr: Tuple[Tensor, Tensor], n: int, dim: Optional[int] = None
2709
+ ) -> Tensor:
2048
2710
  """
2049
2711
  measurement shots results, sparse tuple representation to dense representation
2050
2712
  count_vector to count_tuple
@@ -2053,11 +2715,14 @@ def count_s2d(srepr: Tuple[Tensor, Tensor], n: int) -> Tensor:
2053
2715
  :type srepr: Tuple[Tensor, Tensor]
2054
2716
  :param n: number of qubits
2055
2717
  :type n: int
2718
+ :param dim: [description], defaults to None
2719
+ :type dim: int, optional
2056
2720
  :return: [description]
2057
2721
  :rtype: Tensor
2058
2722
  """
2723
+ dim = 2 if dim is None else dim
2059
2724
  return backend.scatter(
2060
- backend.cast(backend.zeros([2**n]), srepr[1].dtype),
2725
+ backend.cast(backend.zeros([dim**n]), srepr[1].dtype),
2061
2726
  backend.reshape(srepr[0], [-1, 1]),
2062
2727
  srepr[1],
2063
2728
  )
@@ -2100,117 +2765,146 @@ def count_d2s(drepr: Tensor, eps: float = 1e-7) -> Tuple[Tensor, Tensor]:
2100
2765
  count_t2v = count_d2s
2101
2766
 
2102
2767
 
2103
- def sample_int2bin(sample: Tensor, n: int) -> Tensor:
2768
+ def sample_int2bin(sample: Tensor, n: int, dim: Optional[int] = None) -> Tensor:
2104
2769
  """
2105
- int sample to bin sample
2770
+ Convert linear-index samples to per-site digits (base-d).
2106
2771
 
2107
- :param sample: in shape [trials] of int elements in the range [0, 2**n)
2772
+ :param sample: shape [trials], integers in [0, d**n)
2108
2773
  :type sample: Tensor
2109
- :param n: number of qubits
2774
+ :param n: number of sites
2110
2775
  :type n: int
2111
- :return: in shape [trials, n] of element (0, 1)
2776
+ :param dim: local dimension, defaults to 2
2777
+ :type dim: int, optional
2778
+ :return: shape [trials, n], entries in [0, d-1]
2112
2779
  :rtype: Tensor
2113
2780
  """
2114
- confg = backend.mod(
2115
- backend.right_shift(sample[..., None], backend.reverse(backend.arange(n))),
2116
- 2,
2117
- )
2118
- return confg
2781
+ dim = 2 if dim is None else dim
2782
+ if dim == 2:
2783
+ return backend.mod(
2784
+ backend.right_shift(sample[..., None], backend.reverse(backend.arange(n))),
2785
+ 2,
2786
+ )
2787
+ else:
2788
+ pos = backend.reverse(backend.arange(n))
2789
+ base = backend.power(dim, pos)
2790
+ digits = backend.mod(
2791
+ backend.floor_divide(sample[..., None], base), # ⌊sample / d**pos⌋
2792
+ dim,
2793
+ )
2794
+ return backend.cast(digits, "int32")
2119
2795
 
2120
2796
 
2121
- def sample_bin2int(sample: Tensor, n: int) -> Tensor:
2797
+ def sample_bin2int(sample: Tensor, n: int, dim: Optional[int] = None) -> Tensor:
2122
2798
  """
2123
2799
  bin sample to int sample
2124
2800
 
2125
2801
  :param sample: in shape [trials, n] of elements (0, 1)
2126
2802
  :type sample: Tensor
2127
- :param n: number of qubits
2803
+ :param n: number of sites
2128
2804
  :type n: int
2805
+ :param dim: local dimension, defaults to 2
2806
+ :type dim: int, optional
2129
2807
  :return: in shape [trials]
2130
2808
  :rtype: Tensor
2131
2809
  """
2132
- power = backend.convert_to_tensor([2**j for j in reversed(range(n))])
2810
+ dim = 2 if dim is None else dim
2811
+ power = backend.convert_to_tensor([dim**j for j in reversed(range(n))])
2133
2812
  return backend.sum(sample * power, axis=-1)
2134
2813
 
2135
2814
 
2136
2815
  def sample2count(
2137
- sample: Tensor, n: int, jittable: bool = True
2816
+ sample: Tensor,
2817
+ n: int,
2818
+ jittable: bool = True,
2819
+ dim: Optional[int] = None,
2138
2820
  ) -> Tuple[Tensor, Tensor]:
2139
2821
  """
2140
- sample_int to count_tuple
2822
+ sample_int to count_tuple (indices, counts), size = d**n
2141
2823
 
2142
- :param sample: _description_
2824
+ :param sample: linear-index samples, shape [shots]
2143
2825
  :type sample: Tensor
2144
- :param n: _description_
2826
+ :param n: number of sites
2145
2827
  :type n: int
2146
- :param jittable: _description_, defaults to True
2147
- :type jittable: bool, optional
2148
- :return: _description_
2828
+ :param jittable: whether to return fixed-size outputs (backend dependent)
2829
+ :type jittable: bool
2830
+ :param dim: local dimension per site, default 2 (qubit)
2831
+ :type dim: int, optional
2832
+ :return: (unique_indices, counts)
2149
2833
  :rtype: Tuple[Tensor, Tensor]
2150
2834
  """
2151
- d = 2**n
2835
+ dim = 2 if dim is None else dim
2836
+ size = dim**n
2152
2837
  if not jittable:
2153
2838
  results = backend.unique_with_counts(sample) # non-jittable
2154
- else: # jax specified
2155
- results = backend.unique_with_counts(sample, size=d, fill_value=-1)
2839
+ else: # jax specified / fixed-size
2840
+ results = backend.unique_with_counts(sample, size=size, fill_value=-1)
2156
2841
  return results
2157
2842
 
2158
2843
 
2159
- def count_vector2dict(count: Tensor, n: int, key: str = "bin") -> Dict[Any, int]:
2844
+ def count_vector2dict(
2845
+ count: Tensor, n: int, key: str = "bin", dim: Optional[int] = None
2846
+ ) -> Dict[Any, int]:
2160
2847
  """
2161
- convert_vector to count_dict_bin or count_dict_int
2848
+ Convert count_vector to count_dict_bin or count_dict_int.
2849
+ For d>10 cases, a base-d string (0-9A-Z) is used.
2162
2850
 
2163
- :param count: tensor in shape [2**n]
2851
+ :param count: tensor in shape [d**n]
2164
2852
  :type count: Tensor
2165
- :param n: number of qubits
2853
+ :param n: number of sites
2166
2854
  :type n: int
2167
2855
  :param key: can be "int" or "bin", defaults to "bin"
2168
2856
  :type key: str, optional
2169
- :return: _description_
2170
- :rtype: _type_
2857
+ :param dim: local dimension (default 2)
2858
+ :type dim: int, optional
2859
+ :return: mapping from configuration to count
2860
+ :rtype: Dict[Any, int]
2171
2861
  """
2172
2862
  from .interfaces import which_backend
2173
2863
 
2864
+ dim = 2 if dim is None else dim
2174
2865
  b = which_backend(count)
2175
- d = {i: b.numpy(count[i]).item() for i in range(2**n)}
2866
+ out_int = {i: b.numpy(count[i]).item() for i in range(dim**n)}
2176
2867
  if key == "int":
2177
- return d
2868
+ return out_int
2178
2869
  else:
2179
- dn = {}
2180
- for k, v in d.items():
2181
- kn = str(bin(k))[2:].zfill(n)
2182
- dn[kn] = v
2183
- return dn
2870
+ out_str = {}
2871
+ for k, v in out_int.items():
2872
+ kn = np.base_repr(k, base=dim).zfill(n)
2873
+ out_str[kn] = v
2874
+ return out_str
2184
2875
 
2185
2876
 
2186
2877
  def count_tuple2dict(
2187
- count: Tuple[Tensor, Tensor], n: int, key: str = "bin"
2878
+ count: Tuple[Tensor, Tensor], n: int, key: str = "bin", dim: Optional[int] = None
2188
2879
  ) -> Dict[Any, int]:
2189
2880
  """
2190
2881
  count_tuple to count_dict_bin or count_dict_int
2191
2882
 
2192
- :param count: count_tuple format
2883
+ :param count: count_tuple format (indices, counts)
2193
2884
  :type count: Tuple[Tensor, Tensor]
2194
- :param n: number of qubits
2885
+ :param n: number of sites (qubits or qudits)
2195
2886
  :type n: int
2196
2887
  :param key: can be "int" or "bin", defaults to "bin"
2197
2888
  :type key: str, optional
2889
+ :param dim: local dimension, defaults to 2
2890
+ :type dim: int, optional
2198
2891
  :return: count_dict
2199
- :rtype: _type_
2892
+ :rtype: Dict[Any, int]
2200
2893
  """
2201
- d = {
2894
+ dim = 2 if dim is None else dim
2895
+ out_int = {
2202
2896
  backend.numpy(i).item(): backend.numpy(j).item()
2203
2897
  for i, j in zip(count[0], count[1])
2204
2898
  if i >= 0
2205
2899
  }
2206
2900
  if key == "int":
2207
- return d
2901
+ return out_int
2208
2902
  else:
2209
- dn = {}
2210
- for k, v in d.items():
2211
- kn = str(bin(k))[2:].zfill(n)
2212
- dn[kn] = v
2213
- return dn
2903
+ out_str = {}
2904
+ for k, v in out_int.items():
2905
+ kn = np.base_repr(k, base=dim).zfill(n)
2906
+ out_str[kn] = v
2907
+ return out_str
2214
2908
 
2215
2909
 
2216
2910
  @partial(arg_alias, alias_dict={"counts": ["shots"], "format": ["format_"]})
@@ -2222,8 +2916,9 @@ def measurement_counts(
2222
2916
  random_generator: Optional[Any] = None,
2223
2917
  status: Optional[Tensor] = None,
2224
2918
  jittable: bool = False,
2919
+ dim: Optional[int] = None,
2225
2920
  ) -> Any:
2226
- """
2921
+ r"""
2227
2922
  Simulate the measuring of each qubit of ``p`` in the computational basis,
2228
2923
  thus producing output like that of ``qiskit``.
2229
2924
 
@@ -2238,6 +2933,7 @@ def measurement_counts(
2238
2933
  "count_tuple": # (np.array([0]), np.array([2]))
2239
2934
 
2240
2935
  "count_dict_bin": # {"00": 2, "01": 0, "10": 0, "11": 0}
2936
+ / for cases d\in [10, 36], "10" -> "A", ..., "35" -> "Z"
2241
2937
 
2242
2938
  "count_dict_int": # {0: 2, 1: 0, 2: 0, 3: 0}
2243
2939
 
@@ -2289,21 +2985,22 @@ def measurement_counts(
2289
2985
  state /= backend.norm(state)
2290
2986
  pi = backend.real(backend.conj(state) * state)
2291
2987
  pi = backend.reshape(pi, [-1])
2292
- d = int(backend.shape_tuple(pi)[0])
2293
- n = int(np.log(d) / np.log(2) + 1e-8)
2988
+
2989
+ local_d = 2 if dim is None else dim
2990
+ total_dim = int(backend.shape_tuple(pi)[0])
2991
+ n = _infer_num_sites(total_dim, local_d)
2992
+
2294
2993
  if (counts is None) or counts <= 0:
2295
2994
  if format == "count_vector":
2296
2995
  return pi
2297
2996
  elif format == "count_tuple":
2298
2997
  return count_d2s(pi)
2299
2998
  elif format == "count_dict_bin":
2300
- return count_vector2dict(pi, n, key="bin")
2999
+ return count_vector2dict(pi, n, key="bin", dim=local_d)
2301
3000
  elif format == "count_dict_int":
2302
- return count_vector2dict(pi, n, key="int")
3001
+ return count_vector2dict(pi, n, key="int", dim=local_d)
2303
3002
  else:
2304
- raise ValueError(
2305
- "unsupported format %s for analytical measurement" % format
2306
- )
3003
+ raise ValueError(f"unsupported format {format} for analytical measurement")
2307
3004
  else:
2308
3005
  raw_counts = backend.probability_sample(
2309
3006
  counts, pi, status=status, g=random_generator
@@ -2314,7 +3011,7 @@ def measurement_counts(
2314
3011
  # raw_counts = backend.stateful_randc(
2315
3012
  # random_generator, a=drange, shape=counts, p=pi
2316
3013
  # )
2317
- return sample2all(raw_counts, n, format=format, jittable=jittable)
3014
+ return sample2all(raw_counts, n, format=format, jittable=jittable, dim=local_d)
2318
3015
 
2319
3016
 
2320
3017
  measurement_results = measurement_counts
@@ -2322,45 +3019,64 @@ measurement_results = measurement_counts
2322
3019
 
2323
3020
  @partial(arg_alias, alias_dict={"format": ["format_"]})
2324
3021
  def sample2all(
2325
- sample: Tensor, n: int, format: str = "count_vector", jittable: bool = False
3022
+ sample: Tensor,
3023
+ n: int,
3024
+ format: str = "count_vector",
3025
+ jittable: bool = False,
3026
+ dim: Optional[int] = None,
2326
3027
  ) -> Any:
2327
3028
  """
2328
- transform ``sample_int`` or ``sample_bin`` form results to other forms specified by ``format``
3029
+ transform ``sample_int`` or ``sample_bin`` results to other forms specified by ``format``
2329
3030
 
2330
- :param sample: measurement shots results in ``sample_int`` or ``sample_bin`` format
3031
+ :param sample: measurement shots results in ``sample_int`` (shape [shots]) or ``sample_bin`` (shape [shots, n])
2331
3032
  :type sample: Tensor
2332
- :param n: number of qubits
3033
+ :param n: number of sites
2333
3034
  :type n: int
2334
- :param format: see the doc in the doc in :py:meth:`tensorcircuit.quantum.measurement_results`,
2335
- defaults to "count_vector"
3035
+ :param format: see :py:meth:`tensorcircuit.quantum.measurement_results`, defaults to "count_vector"
2336
3036
  :type format: str, optional
2337
3037
  :param jittable: only applicable to count transformation in jax backend, defaults to False
2338
3038
  :type jittable: bool, optional
3039
+ :param dim: local dimension (2 for qubit; >2 for qudit), defaults to 2
3040
+ :type dim: Optional[int]
2339
3041
  :return: measurement results specified as ``format``
2340
3042
  :rtype: Any
2341
3043
  """
3044
+ dim = 2 if dim is None else int(dim)
3045
+ n_max_d = int(32 / np.log2(dim))
3046
+ if n > n_max_d:
3047
+ assert (
3048
+ len(backend.shape_tuple(sample)) == 2
3049
+ ), f"n>{n_max_d} is only supported for ``sample_bin``"
3050
+ if format == "sample_bin":
3051
+ return sample
3052
+ if format == "count_dict_bin":
3053
+ binary_strings = ["".join(map(str, shot)) for shot in sample]
3054
+ return dict(Counter(binary_strings))
3055
+ raise ValueError(f"n={n} is too large for measurement representaion: {format}")
3056
+
2342
3057
  if len(backend.shape_tuple(sample)) == 1:
2343
3058
  sample_int = sample
2344
- sample_bin = sample_int2bin(sample, n)
3059
+ sample_bin = sample_int2bin(sample, n, dim=dim)
2345
3060
  elif len(backend.shape_tuple(sample)) == 2:
2346
- sample_int = sample_bin2int(sample, n)
3061
+ sample_int = sample_bin2int(sample, n, dim=dim)
2347
3062
  sample_bin = sample
2348
3063
  else:
2349
3064
  raise ValueError("unrecognized tensor shape for sample")
3065
+
2350
3066
  if format == "sample_int":
2351
3067
  return sample_int
2352
3068
  elif format == "sample_bin":
2353
3069
  return sample_bin
2354
3070
  else:
2355
- count_tuple = sample2count(sample_int, n, jittable)
3071
+ count_tuple = sample2count(sample_int, n, jittable=jittable, dim=dim)
2356
3072
  if format == "count_tuple":
2357
3073
  return count_tuple
2358
3074
  elif format == "count_vector":
2359
- return count_s2d(count_tuple, n)
3075
+ return count_s2d(count_tuple, n, dim=dim)
2360
3076
  elif format == "count_dict_bin":
2361
- return count_tuple2dict(count_tuple, n, key="bin")
3077
+ return count_tuple2dict(count_tuple, n, key="bin", dim=dim)
2362
3078
  elif format == "count_dict_int":
2363
- return count_tuple2dict(count_tuple, n, key="int")
3079
+ return count_tuple2dict(count_tuple, n, key="int", dim=dim)
2364
3080
  else:
2365
3081
  raise ValueError(
2366
3082
  "unsupported format %s for finite shots measurement" % format