tensorcircuit-nightly 1.2.0.dev20250326__py3-none-any.whl → 1.4.0.dev20251128__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tensorcircuit-nightly might be problematic. Click here for more details.

Files changed (77) hide show
  1. tensorcircuit/__init__.py +5 -1
  2. tensorcircuit/abstractcircuit.py +4 -0
  3. tensorcircuit/analogcircuit.py +413 -0
  4. tensorcircuit/applications/layers.py +1 -1
  5. tensorcircuit/applications/van.py +1 -1
  6. tensorcircuit/backends/abstract_backend.py +312 -5
  7. tensorcircuit/backends/cupy_backend.py +3 -1
  8. tensorcircuit/backends/jax_backend.py +100 -4
  9. tensorcircuit/backends/jax_ops.py +108 -0
  10. tensorcircuit/backends/numpy_backend.py +49 -3
  11. tensorcircuit/backends/pytorch_backend.py +92 -3
  12. tensorcircuit/backends/tensorflow_backend.py +102 -3
  13. tensorcircuit/basecircuit.py +157 -98
  14. tensorcircuit/circuit.py +115 -57
  15. tensorcircuit/cloud/local.py +1 -1
  16. tensorcircuit/cloud/quafu_provider.py +1 -1
  17. tensorcircuit/cloud/tencent.py +1 -1
  18. tensorcircuit/compiler/simple_compiler.py +2 -2
  19. tensorcircuit/cons.py +105 -23
  20. tensorcircuit/densitymatrix.py +16 -11
  21. tensorcircuit/experimental.py +733 -153
  22. tensorcircuit/fgs.py +254 -73
  23. tensorcircuit/gates.py +66 -22
  24. tensorcircuit/interfaces/jax.py +5 -3
  25. tensorcircuit/interfaces/tensortrans.py +6 -2
  26. tensorcircuit/interfaces/torch.py +14 -4
  27. tensorcircuit/keras.py +3 -3
  28. tensorcircuit/mpscircuit.py +154 -65
  29. tensorcircuit/quantum.py +698 -134
  30. tensorcircuit/quditcircuit.py +733 -0
  31. tensorcircuit/quditgates.py +618 -0
  32. tensorcircuit/results/counts.py +131 -18
  33. tensorcircuit/results/readout_mitigation.py +4 -1
  34. tensorcircuit/shadows.py +1 -1
  35. tensorcircuit/simplify.py +3 -1
  36. tensorcircuit/stabilizercircuit.py +29 -17
  37. tensorcircuit/templates/__init__.py +2 -0
  38. tensorcircuit/templates/blocks.py +2 -2
  39. tensorcircuit/templates/hamiltonians.py +174 -0
  40. tensorcircuit/templates/lattice.py +1789 -0
  41. tensorcircuit/timeevol.py +896 -0
  42. tensorcircuit/translation.py +10 -3
  43. tensorcircuit/utils.py +7 -0
  44. {tensorcircuit_nightly-1.2.0.dev20250326.dist-info → tensorcircuit_nightly-1.4.0.dev20251128.dist-info}/METADATA +66 -29
  45. tensorcircuit_nightly-1.4.0.dev20251128.dist-info/RECORD +96 -0
  46. {tensorcircuit_nightly-1.2.0.dev20250326.dist-info → tensorcircuit_nightly-1.4.0.dev20251128.dist-info}/WHEEL +1 -1
  47. {tensorcircuit_nightly-1.2.0.dev20250326.dist-info → tensorcircuit_nightly-1.4.0.dev20251128.dist-info}/top_level.txt +0 -1
  48. tensorcircuit_nightly-1.2.0.dev20250326.dist-info/RECORD +0 -118
  49. tests/__init__.py +0 -0
  50. tests/conftest.py +0 -67
  51. tests/test_backends.py +0 -1035
  52. tests/test_calibrating.py +0 -149
  53. tests/test_channels.py +0 -409
  54. tests/test_circuit.py +0 -1699
  55. tests/test_cloud.py +0 -219
  56. tests/test_compiler.py +0 -147
  57. tests/test_dmcircuit.py +0 -555
  58. tests/test_ensemble.py +0 -72
  59. tests/test_fgs.py +0 -310
  60. tests/test_gates.py +0 -156
  61. tests/test_interfaces.py +0 -562
  62. tests/test_keras.py +0 -160
  63. tests/test_miscs.py +0 -282
  64. tests/test_mpscircuit.py +0 -341
  65. tests/test_noisemodel.py +0 -156
  66. tests/test_qaoa.py +0 -86
  67. tests/test_qem.py +0 -152
  68. tests/test_quantum.py +0 -549
  69. tests/test_quantum_attr.py +0 -42
  70. tests/test_results.py +0 -380
  71. tests/test_shadows.py +0 -160
  72. tests/test_simplify.py +0 -46
  73. tests/test_stabilizer.py +0 -217
  74. tests/test_templates.py +0 -218
  75. tests/test_torchnn.py +0 -99
  76. tests/test_van.py +0 -102
  77. {tensorcircuit_nightly-1.2.0.dev20250326.dist-info → tensorcircuit_nightly-1.4.0.dev20251128.dist-info}/licenses/LICENSE +0 -0
@@ -6,12 +6,26 @@ from typing import Any, Dict, Optional, Sequence
6
6
 
7
7
  import numpy as np
8
8
 
9
+ from ..quantum import _decode_basis_label
9
10
 
10
11
  Tensor = Any
11
12
  ct = Dict[str, int]
12
13
 
13
14
 
14
15
  def reverse_count(count: ct) -> ct:
16
+ """
17
+ Reverse the bit string keys in a count dictionary.
18
+
19
+ :param count: A dictionary mapping bit strings to counts
20
+ :type count: ct
21
+ :return: A new dictionary with reversed bit string keys
22
+ :rtype: ct
23
+
24
+ :Example:
25
+
26
+ >>> reverse_count({"01": 10, "10": 20})
27
+ {'10': 10, '01': 20}
28
+ """
15
29
  ncount = {}
16
30
  for k, v in count.items():
17
31
  ncount[k[::-1]] = v
@@ -19,15 +33,56 @@ def reverse_count(count: ct) -> ct:
19
33
 
20
34
 
21
35
  def sort_count(count: ct) -> ct:
36
+ """
37
+ Sort the count dictionary by counts in descending order.
38
+
39
+ :param count: A dictionary mapping bit strings to counts
40
+ :type count: ct
41
+ :return: A new dictionary sorted by count values (descending)
42
+ :rtype: ct
43
+
44
+ :Example:
45
+
46
+ >>> sort_count({"00": 5, "01": 15, "10": 10})
47
+ {'01': 15, '10': 10, '00': 5}
48
+ """
22
49
  return {k: v for k, v in sorted(count.items(), key=lambda item: -item[1])}
23
50
 
24
51
 
25
52
  def normalized_count(count: ct) -> Dict[str, float]:
53
+ """
54
+ Normalize the count dictionary to represent probabilities.
55
+
56
+ :param count: A dictionary mapping bit strings to counts
57
+ :type count: ct
58
+ :return: A new dictionary with probabilities instead of counts
59
+ :rtype: Dict[str, float]
60
+
61
+ :Example:
62
+
63
+ >>> normalized_count({"00": 5, "01": 15})
64
+ {'00': 0.25, '01': 0.75}
65
+ """
26
66
  shots = sum([v for k, v in count.items()])
27
67
  return {k: v / shots for k, v in count.items()}
28
68
 
29
69
 
30
70
  def marginal_count(count: ct, keep_list: Sequence[int]) -> ct:
71
+ """
72
+ Compute the marginal distribution of a count dictionary over specified qubits.
73
+
74
+ :param count: A dictionary mapping bit strings to counts
75
+ :type count: ct
76
+ :param keep_list: List of qubit indices to keep in the marginal distribution
77
+ :type keep_list: Sequence[int]
78
+ :return: A new count dictionary with marginal distribution
79
+ :rtype: ct
80
+
81
+ :Example:
82
+
83
+ >>> marginal_count({"001": 10, "110": 20}, [0, 2])
84
+ {'01': 10, '10': 20}
85
+ """
31
86
  import qiskit
32
87
 
33
88
  count = reverse_count(count)
@@ -35,34 +90,87 @@ def marginal_count(count: ct, keep_list: Sequence[int]) -> ct:
35
90
  return reverse_count(ncount)
36
91
 
37
92
 
38
- def count2vec(count: ct, normalization: bool = True) -> Tensor:
39
- nqubit = len(list(count.keys())[0])
40
- probability = [0] * 2**nqubit
41
- shots = sum([v for k, v in count.items()])
93
+ def count2vec(
94
+ count: ct, normalization: bool = True, dim: Optional[int] = None
95
+ ) -> Tensor:
96
+ """
97
+ Convert a dictionary of counts (with string keys) to a probability/count vector.
98
+
99
+ Support:
100
+ - base-d string (d <= 36), characters taken from 0-9A-Z (case-insensitive)
101
+ For example:
102
+ qubit: '0101'
103
+ qudit: '012' or '09A' (A represents 10, which means [0, 9, 10])
104
+
105
+ :param count: A dictionary mapping bit strings to counts
106
+ :type count: ct
107
+ :param normalization: Whether to normalize the counts to probabilities, defaults to True
108
+ :type normalization: bool, optional
109
+ :param dim: Dimensionality of the vector, defaults to 2
110
+ :type dim: int, optional
111
+ :return: Probability vector as numpy array
112
+ :rtype: Tensor
113
+
114
+ :Example:
115
+
116
+ >>> count2vec({"00": 2, "10": 3, "11": 5})
117
+ array([0.2, 0. , 0.3, 0.5])
118
+ """
119
+
120
+ if not count:
121
+ return np.array([], dtype=float)
122
+
123
+ dim = 2 if dim is None else dim
124
+
125
+ n = len(next(iter(count)).upper())
126
+ prob = np.zeros(dim**n, dtype=float)
127
+ shots = float(sum(count.values())) if normalization else 1.0
128
+ if shots == 0:
129
+ return prob
130
+
131
+ powers = [dim**p for p in range(n)][::-1]
42
132
  for k, v in count.items():
43
- if normalization is True:
44
- v /= shots # type: ignore
45
- probability[int(k, 2)] = v
46
- return np.array(probability)
133
+ digits = _decode_basis_label(k, n, dim)
134
+ idx = sum(dig * p for dig, p in zip(digits, powers))
135
+ prob[idx] = (v / shots) if normalization else v
136
+
137
+ return prob
138
+
47
139
 
140
+ def vec2count(vec: Tensor, prune: bool = False, dim: Optional[int] = None) -> ct:
141
+ """
142
+ Map a count/probability vector of length D to a dictionary with base-d string keys (0-9A-Z).
143
+ Only generate string keys when d <= 36; if d is inferred to be > 36, raise a NotImplementedError.
48
144
 
49
- def vec2count(vec: Tensor, prune: bool = False) -> ct:
50
- from ..quantum import count_vector2dict
145
+ :param vec: A one-dimensional vector of length D = d**n
146
+ :param prune: Whether to prune near-zero elements (threshold 1e-8)
147
+ :param dim: Dimensionality of the vector, defaults to 2
148
+ :return: {base-d string key: value}, key length n
149
+ """
150
+ from ..quantum import count_vector2dict, _infer_num_sites
51
151
 
152
+ dim = 2 if dim is None else dim
52
153
  if isinstance(vec, list):
53
154
  vec = np.array(vec)
54
- n = int(np.log(vec.shape[0]) / np.log(2) + 1e-9)
55
- c = count_vector2dict(vec, n, key="bin")
56
- if prune is True:
57
- nc = c.copy()
58
- for k, v in c.items():
59
- if np.abs(v) < 1e-8:
60
- del nc[k]
61
- return nc
155
+ n = _infer_num_sites(int(vec.shape[0]), dim)
156
+ c: ct = count_vector2dict(vec, n, key="bin", dim=dim) # type: ignore
157
+ if prune:
158
+ c = {k: v for k, v in c.items() if np.abs(v) >= 1e-8}
159
+
62
160
  return c
63
161
 
64
162
 
65
163
  def kl_divergence(c1: ct, c2: ct) -> float:
164
+ """
165
+ Compute the Kullback-Leibler divergence between two count distributions.
166
+
167
+ :param c1: First count dictionary
168
+ :type c1: ct
169
+ :param c2: Second count dictionary
170
+ :type c2: ct
171
+ :return: KL divergence value
172
+ :rtype: float
173
+ """
66
174
  eps = 1e-4 # typical value for inverse of the total shots
67
175
  c1 = normalized_count(c1) # type: ignore
68
176
  c2 = normalized_count(c2) # type: ignore
@@ -113,6 +221,11 @@ def merge_count(*counts: ct) -> ct:
113
221
  :type counts: ct
114
222
  :return: Merged count dictionary
115
223
  :rtype: ct
224
+
225
+ :Example:
226
+
227
+ >>> merge_count({"00": 10, "01": 20}, {"00": 5, "10": 15})
228
+ {'00': 15, '01': 20, '10': 15}
116
229
  """
117
230
  merged: ct = {}
118
231
  for count in counts:
@@ -723,7 +723,10 @@ class ReadoutMit:
723
723
  cals = self._form_cals(qubits)
724
724
  M = M3MatVec(dict(counts), cals, distance)
725
725
  L = spla.LinearOperator(
726
- (M.num_elems, M.num_elems), matvec=M.matvec, rmatvec=M.rmatvec
726
+ (M.num_elems, M.num_elems),
727
+ matvec=M.matvec,
728
+ rmatvec=M.rmatvec,
729
+ dtype=np.float64,
727
730
  )
728
731
  diags = M.get_diagonal()
729
732
 
tensorcircuit/shadows.py CHANGED
@@ -334,7 +334,7 @@ def entropy_shadow(
334
334
 
335
335
  def renyi_entropy_2(snapshots: Tensor, sub: Optional[Sequence[int]] = None) -> Tensor:
336
336
  r"""To calculate the second order Renyi entropy of a subsystem from snapshot, please refer to
337
- Brydges, T. et al. Science 364, 260263 (2019). This function is not jitable.
337
+ Brydges, T. et al. Science 364, 260-263 (2019). This function is not jitable.
338
338
 
339
339
  :param snapshots: shape = (ns, repeat, nq)
340
340
  :type: Tensor
tensorcircuit/simplify.py CHANGED
@@ -121,7 +121,9 @@ def _split_two_qubit_gate(
121
121
  if fixed_choice == 2: # swap one
122
122
  return n3, n4, True # swap
123
123
  s2 = n3.tensor.shape[-1]
124
- if (s1 >= 4) and (s2 >= 4):
124
+ if (s1 >= n[0].dimension * n[2].dimension) and (
125
+ s2 >= n[1].dimension * n[3].dimension
126
+ ):
125
127
  # jax jit unspport split_node with trun_err anyway
126
128
  # tf function doesn't work either, though I believe it may work on tf side
127
129
  # CANNOT DONE(@refraction-ray): tf.function version with trun_err set
@@ -96,10 +96,12 @@ class StabilizerCircuit(AbstractCircuit):
96
96
 
97
97
  if name.lower() in self.gate_map:
98
98
  # self._stim_circuit.append(gate_map[name.lower()], list(index))
99
- instruction = f"{self.gate_map[name.lower()]} {' '.join(map(str, index))}"
99
+ gn = self.gate_map[name.lower()]
100
+ instruction = f"{gn} {' '.join(map(str, index))}"
100
101
  self._stim_circuit.append_from_stim_program_text(instruction)
101
102
  # append is much slower
102
- self.current_sim.do(stim.Circuit(instruction))
103
+ # self.current_sim.do(stim.Circuit(instruction))
104
+ getattr(self.current_sim, gn.lower())(*index)
103
105
  else:
104
106
  raise ValueError(f"Gate {name} is not supported in stabilizer simulation")
105
107
 
@@ -147,14 +149,16 @@ class StabilizerCircuit(AbstractCircuit):
147
149
 
148
150
  def measure(self, *index: int, with_prob: bool = False) -> Tensor:
149
151
  """
150
- Measure qubits in Z basis.
152
+ Measure qubits in the Z basis.
151
153
 
152
- :param index: Indices of qubits to measure
154
+ :param index: Indices of the qubits to measure.
153
155
  :type index: int
154
- :param with_prob: Return probability of measurement outcome, defaults to False
156
+ :param with_prob: If True, returns the theoretical probability of the measurement outcome.
157
+ defaults to False
155
158
  :type with_prob: bool, optional
156
- :return: Measurement results and probability (if with_prob=True)
157
- :rtype: Union[np.ndarray, Tuple[np.ndarray, float]]
159
+ :return: A tensor containing the measurement results.
160
+ If `with_prob` is True, a tuple containing the results and the probability is returned.
161
+ :rtype: Tensor
158
162
  """
159
163
  # Convert negative indices
160
164
 
@@ -162,20 +166,28 @@ class StabilizerCircuit(AbstractCircuit):
162
166
 
163
167
  # Add measurement instructions
164
168
  s1 = self.current_simulator().copy()
165
- m = s1.measure_many(*index)
166
169
  # Sample once from the circuit using sampler
167
170
 
168
- # TODO(@refraction-ray): correct probability
171
+ if with_prob:
172
+ num_random_measurements = 0
173
+ for i in index:
174
+ if s1.peek_z(i) == 0:
175
+ num_random_measurements += 1
176
+ probability = (0.5) ** num_random_measurements
177
+
178
+ m = s1.measure_many(*index)
179
+ if with_prob:
180
+ return m, probability
169
181
  return m
170
182
 
171
183
  def cond_measurement(self, index: int) -> Tensor:
172
184
  """
173
- Measure qubits in Z basis with state collapse.
185
+ Measure a single qubit in the Z basis and collapse the state.
174
186
 
175
- :param index: Index of qubit to measure
187
+ :param index: The index of the qubit to measure.
176
188
  :type index: int
177
- :return: Measurement results and probability (if with_prob=True)
178
- :rtype: Union[np.ndarray, Tuple[np.ndarray, float]]
189
+ :return: The measurement result (0 or 1).
190
+ :rtype: Tensor
179
191
  """
180
192
  # Convert negative indices
181
193
 
@@ -191,12 +203,12 @@ class StabilizerCircuit(AbstractCircuit):
191
203
 
192
204
  def cond_measure_many(self, *index: int) -> Tensor:
193
205
  """
194
- Measure qubits in Z basis with state collapse.
206
+ Measure multiple qubits in the Z basis and collapse the state.
195
207
 
196
- :param index: Index of qubit to measure
208
+ :param index: The indices of the qubits to measure.
197
209
  :type index: int
198
- :return: Measurement results and probability (if with_prob=True)
199
- :rtype: Union[np.ndarray, Tuple[np.ndarray, float]]
210
+ :return: A tensor containing the measurement results.
211
+ :rtype: Tensor
200
212
  """
201
213
  # Convert negative indices
202
214
 
@@ -5,5 +5,7 @@ from . import dataset
5
5
  from . import graphs
6
6
  from . import measurements
7
7
  from . import conversions
8
+ from . import lattice
9
+ from . import hamiltonians
8
10
 
9
11
  costfunctions = measurements
@@ -91,7 +91,7 @@ def QAOA_block(
91
91
  e2,
92
92
  unitary=G._zz_matrix,
93
93
  theta=paramzz * g[e1][e2].get("weight", 1.0),
94
- **kws
94
+ **kws,
95
95
  )
96
96
  else:
97
97
  i = 0
@@ -157,7 +157,7 @@ def qft(
157
157
  *index: int,
158
158
  do_swaps: bool = True,
159
159
  inverse: bool = False,
160
- insert_barriers: bool = False
160
+ insert_barriers: bool = False,
161
161
  ) -> Circuit:
162
162
  """
163
163
  This function applies quantum fourier transformation (QFT) to the selected circuit lines
@@ -0,0 +1,174 @@
1
+ from typing import Any, List, Tuple, Union
2
+ import numpy as np
3
+ from ..cons import dtypestr, backend
4
+ from ..quantum import PauliStringSum2COO
5
+ from .lattice import AbstractLattice
6
+
7
+
8
+ def _create_empty_sparse_matrix(shape: Tuple[int, int]) -> Any:
9
+ """
10
+ Helper function to create a backend-agnostic empty sparse matrix.
11
+ """
12
+ indices = backend.convert_to_tensor(backend.zeros((0, 2), dtype="int32"))
13
+ values = backend.convert_to_tensor(backend.zeros((0,), dtype=dtypestr)) # type: ignore
14
+ return backend.coo_sparse_matrix(indices=indices, values=values, shape=shape) # type: ignore
15
+
16
+
17
+ def heisenberg_hamiltonian(
18
+ lattice: AbstractLattice,
19
+ j_coupling: Union[float, List[float], Tuple[float, ...]] = 1.0,
20
+ interaction_scope: str = "neighbors",
21
+ ) -> Any:
22
+ r"""
23
+ Generates the sparse matrix of the Heisenberg Hamiltonian for a given lattice.
24
+
25
+ The Heisenberg Hamiltonian is defined as:
26
+ :math:`H = J\sum_{i,j} (X_i X_j + Y_i Y_j + Z_i Z_j)`
27
+ where the sum is over a specified set of interacting pairs {i,j}.
28
+
29
+ :param lattice: An instance of a class derived from AbstractLattice,
30
+ which provides the geometric information of the system.
31
+ :type lattice: AbstractLattice
32
+ :param j_coupling: The coupling constants. Can be a single float for an
33
+ isotropic model (Jx=Jy=Jz) or a list/tuple of 3 floats for an
34
+ anisotropic model (Jx, Jy, Jz). Defaults to 1.0.
35
+ :type j_coupling: Union[float, List[float], Tuple[float, ...]], optional
36
+ :param interaction_scope: Defines the range of interactions.
37
+ - "neighbors": Includes only nearest-neighbor pairs (default).
38
+ - "all": Includes all unique pairs of sites.
39
+ :type interaction_scope: str, optional
40
+ :return: The Hamiltonian as a backend-agnostic sparse matrix.
41
+ :rtype: Any
42
+ """
43
+ num_sites = lattice.num_sites
44
+ if interaction_scope == "neighbors":
45
+ neighbor_pairs = lattice.get_neighbor_pairs(k=1, unique=True)
46
+ elif interaction_scope == "all":
47
+ neighbor_pairs = lattice.get_all_pairs()
48
+ else:
49
+ raise ValueError(
50
+ f"Invalid interaction_scope: '{interaction_scope}'. "
51
+ "Must be 'neighbors' or 'all'."
52
+ )
53
+
54
+ if isinstance(j_coupling, (float, int)):
55
+ js = [float(j_coupling)] * 3
56
+ else:
57
+ if len(j_coupling) != 3:
58
+ raise ValueError("j_coupling must be a float or a list/tuple of 3 floats.")
59
+ js = [float(j) for j in j_coupling]
60
+
61
+ if not neighbor_pairs:
62
+ return _create_empty_sparse_matrix(shape=(2**num_sites, 2**num_sites))
63
+ if num_sites == 0:
64
+ raise ValueError("Cannot generate a Hamiltonian for a lattice with zero sites.")
65
+
66
+ pauli_map = {"X": 1, "Y": 2, "Z": 3}
67
+
68
+ ls: List[List[int]] = []
69
+ weights: List[float] = []
70
+
71
+ pauli_terms = ["X", "Y", "Z"]
72
+ for i, j in neighbor_pairs:
73
+ for idx, pauli_char in enumerate(pauli_terms):
74
+ if abs(js[idx]) > 1e-9:
75
+ string = [0] * num_sites
76
+ string[i] = pauli_map[pauli_char]
77
+ string[j] = pauli_map[pauli_char]
78
+ ls.append(string)
79
+ weights.append(js[idx])
80
+
81
+ hamiltonian_matrix = PauliStringSum2COO(ls, weight=weights, numpy=False)
82
+
83
+ return hamiltonian_matrix
84
+
85
+
86
+ def rydberg_hamiltonian(
87
+ lattice: AbstractLattice, omega: float, delta: float, c6: float
88
+ ) -> Any:
89
+ r"""
90
+ Generates the sparse matrix of the Rydberg atom array Hamiltonian.
91
+
92
+ The Hamiltonian is defined as:
93
+ .. math::
94
+
95
+ H = \sum_i \frac{\Omega}{2} X_i
96
+ - \sum_i \frac{\delta}{2} \bigl(1 - Z_i \bigr)
97
+ + \sum_{i<j} \frac{V_{ij}}{4} \bigl(1 - Z_i \bigr)\bigl(1 - Z_j \bigr)
98
+
99
+ = \sum_i \frac{\Omega}{2} X_i
100
+ + \sum_i \frac{\delta}{2} Z_i
101
+ + \sum_{i<j} \frac{V_{ij}}{4}\,\bigl(Z_i Z_j - Z_i - Z_j \bigr)
102
+
103
+ where :math:`V_{ij} = C6 / |r_i - r_j|^6`.
104
+
105
+ Note: Constant energy offset terms (proportional to the identity operator)
106
+ are ignored in this implementation.
107
+
108
+ :param lattice: An instance of a class derived from AbstractLattice,
109
+ which provides site coordinates and the distance matrix.
110
+ :type lattice: AbstractLattice
111
+ :param omega: The Rabi frequency (Ω) of the driving laser field.
112
+ :type omega: float
113
+ :param delta: The laser detuning (δ).
114
+ :type delta: float
115
+ :param c6: The Van der Waals interaction coefficient (C6).
116
+ :type c6: float
117
+ :return: The Hamiltonian as a backend-agnostic sparse matrix.
118
+ :rtype: Any
119
+ """
120
+ num_sites = lattice.num_sites
121
+ if num_sites == 0:
122
+ raise ValueError("Cannot generate a Hamiltonian for a lattice with zero sites.")
123
+
124
+ pauli_map = {"X": 1, "Y": 2, "Z": 3}
125
+ ls: List[List[int]] = []
126
+ weights: List[float] = []
127
+
128
+ for i in range(num_sites):
129
+ x_string = [0] * num_sites
130
+ x_string[i] = pauli_map["X"]
131
+ ls.append(x_string)
132
+ weights.append(omega / 2.0)
133
+
134
+ z_coefficients = np.zeros(num_sites)
135
+
136
+ for i in range(num_sites):
137
+ z_coefficients[i] += delta / 2.0
138
+
139
+ dist_matrix = lattice.distance_matrix
140
+
141
+ for i in range(num_sites):
142
+ for j in range(i + 1, num_sites):
143
+ distance = dist_matrix[i, j]
144
+
145
+ if distance < 1e-9:
146
+ continue
147
+
148
+ interaction_strength = c6 / (distance**6)
149
+ coefficient = interaction_strength / 4.0
150
+
151
+ zz_string = [0] * num_sites
152
+ zz_string[i] = pauli_map["Z"]
153
+ zz_string[j] = pauli_map["Z"]
154
+ ls.append(zz_string)
155
+ weights.append(coefficient)
156
+
157
+ # The interaction term V_ij * n_i * n_j, when expanded using
158
+ # n_i = (1-Z_i)/2, becomes (V_ij/4)*(I - Z_i - Z_j + Z_i*Z_j).
159
+ # This contributes a positive term (+V_ij/4) to the ZZ interaction,
160
+ # but negative terms (-V_ij/4) to the single-site Z_i and Z_j operators.
161
+
162
+ z_coefficients[i] -= coefficient
163
+ z_coefficients[j] -= coefficient
164
+
165
+ for i in range(num_sites):
166
+ if abs(z_coefficients[i]) > 1e-9:
167
+ z_string = [0] * num_sites
168
+ z_string[i] = pauli_map["Z"]
169
+ ls.append(z_string)
170
+ weights.append(z_coefficients[i]) # type: ignore
171
+
172
+ hamiltonian_matrix = PauliStringSum2COO(ls, weight=weights, numpy=False)
173
+
174
+ return hamiltonian_matrix