emu-mps 2.4.1__py3-none-any.whl → 2.4.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
emu_mps/__init__.py CHANGED
@@ -36,4 +36,4 @@ __all__ = [
36
36
  "EntanglementEntropy",
37
37
  ]
38
38
 
39
- __version__ = "2.4.1"
39
+ __version__ = "2.4.3"
emu_mps/algebra.py CHANGED
@@ -49,7 +49,7 @@ def add_factors(
49
49
 
50
50
 
51
51
  def scale_factors(
52
- factors: list[torch.Tensor], scalar: complex, *, which: int
52
+ factors: list[torch.Tensor], scalar: complex | torch.Tensor, *, which: int
53
53
  ) -> list[torch.Tensor]:
54
54
  """
55
55
  Returns a new list of factors where the tensor at the given index is scaled by `scalar`.
emu_mps/hamiltonian.py CHANGED
@@ -4,35 +4,41 @@ to the Hamiltonian of a neutral atoms quantum processor.
4
4
  """
5
5
 
6
6
  from abc import abstractmethod, ABC
7
- from typing import Iterator
7
+ from typing import Iterator, Literal, Union
8
8
 
9
+ from pulser.channels.base_channel import States
9
10
  from emu_base import HamiltonianType
10
11
  import torch
11
12
  from emu_mps.mpo import MPO
12
13
 
14
+
13
15
  dtype = torch.complex128
14
16
 
15
17
 
18
+ Eigenstate = Union[States, Literal["0", "1"]]
19
+
20
+
16
21
  class Operators:
17
22
  id = torch.eye(2, dtype=dtype)
23
+ id_3x3 = torch.eye(3, dtype=dtype)
18
24
  n = torch.tensor([[0.0, 0.0], [0.0, 1.0]], dtype=dtype)
19
25
  creation = torch.tensor([[0.0, 1.0], [0.0, 0.0]], dtype=dtype)
20
26
  sx = torch.tensor([[0.0, 0.5], [0.5, 0.0]], dtype=dtype)
21
27
  sy = torch.tensor([[0.0, -0.5j], [0.5j, 0.0]], dtype=dtype)
22
- pu = torch.tensor([[0.0, 0.0], [0.0, 1.0]], dtype=dtype)
23
28
 
24
29
 
25
30
  class HamiltonianMPOFactors(ABC):
26
- def __init__(self, interaction_matrix: torch.Tensor):
31
+ def __init__(self, interaction_matrix: torch.Tensor, dim: int = 2):
27
32
  assert interaction_matrix.ndim == 2, "interaction matrix is not a matrix"
28
33
  assert (
29
34
  interaction_matrix.shape[0] == interaction_matrix.shape[1]
30
35
  ), "interaction matrix is not square"
31
-
36
+ self.dim = dim
32
37
  self.interaction_matrix = interaction_matrix.clone()
33
38
  self.interaction_matrix.fill_diagonal_(0.0) # or assert
34
39
  self.qubit_count = self.interaction_matrix.shape[0]
35
40
  self.middle = self.qubit_count // 2
41
+ self.identity = Operators.id if self.dim == 2 else Operators.id_3x3
36
42
 
37
43
  def __iter__(self) -> Iterator[torch.Tensor]:
38
44
  yield self.first_factor()
@@ -72,10 +78,12 @@ class HamiltonianMPOFactors(ABC):
72
78
  class RydbergHamiltonianMPOFactors(HamiltonianMPOFactors):
73
79
  def first_factor(self) -> torch.Tensor:
74
80
  has_right_interaction = self.interaction_matrix[0, 1:].any()
75
- fac = torch.zeros(1, 2, 2, 3 if has_right_interaction else 2, dtype=dtype)
76
- fac[0, :, :, 1] = Operators.id
81
+ fac = torch.zeros(
82
+ 1, self.dim, self.dim, 3 if has_right_interaction else 2, dtype=dtype
83
+ )
84
+ fac[0, :, :, 1] = self.identity
77
85
  if has_right_interaction:
78
- fac[0, :, :, 2] = Operators.n
86
+ fac[0, :2, :2, 2] = Operators.n
79
87
 
80
88
  return fac
81
89
 
@@ -86,18 +94,18 @@ class RydbergHamiltonianMPOFactors(HamiltonianMPOFactors):
86
94
 
87
95
  fac = torch.zeros(
88
96
  int(current_left_interactions.sum().item() + 2),
89
- 2,
90
- 2,
97
+ self.dim,
98
+ self.dim,
91
99
  int(left_interactions_to_keep.sum().item() + int(has_right_interaction) + 2),
92
100
  dtype=dtype,
93
101
  )
94
102
 
95
- fac[0, :, :, 0] = Operators.id
96
- fac[1, :, :, 1] = Operators.id
103
+ fac[0, :, :, 0] = self.identity
104
+ fac[1, :, :, 1] = self.identity
97
105
  if has_right_interaction:
98
- fac[1, :, :, -1] = Operators.n
106
+ fac[1, :2, :2, -1] = Operators.n
99
107
 
100
- fac[2:, :, :, 0] = (
108
+ fac[2:, :2, :2, 0] = (
101
109
  self.interaction_matrix[:n][current_left_interactions, n, None, None]
102
110
  * Operators.n
103
111
  )
@@ -106,7 +114,7 @@ class RydbergHamiltonianMPOFactors(HamiltonianMPOFactors):
106
114
  j = 2
107
115
  for current_left_interaction in current_left_interactions.nonzero().flatten():
108
116
  if left_interactions_to_keep[current_left_interaction]:
109
- fac[i, :, :, j] = Operators.id
117
+ fac[i, :, :, j] = self.identity
110
118
  j += 1
111
119
  i += 1
112
120
  return fac
@@ -118,21 +126,21 @@ class RydbergHamiltonianMPOFactors(HamiltonianMPOFactors):
118
126
 
119
127
  fac = torch.zeros(
120
128
  int(current_left_interactions.sum().item() + 2),
121
- 2,
122
- 2,
129
+ self.dim,
130
+ self.dim,
123
131
  int(current_right_interactions.sum().item() + 2),
124
132
  dtype=dtype,
125
133
  )
126
134
 
127
- fac[0, :, :, 0] = Operators.id
128
- fac[1, :, :, 1] = Operators.id
135
+ fac[0, :, :, 0] = self.identity
136
+ fac[1, :, :, 1] = self.identity
129
137
 
130
- fac[2:, :, :, 0] = (
138
+ fac[2:, :2, :2, 0] = (
131
139
  self.interaction_matrix[:n][current_left_interactions, n, None, None]
132
140
  * Operators.n
133
141
  )
134
142
 
135
- fac[1, :, :, 2:] = self.interaction_matrix[n + 1 :][
143
+ fac[1, :2, :2, 2:] = self.interaction_matrix[n + 1 :][
136
144
  None, None, current_right_interactions, n
137
145
  ] * Operators.n.unsqueeze(-1)
138
146
 
@@ -140,7 +148,7 @@ class RydbergHamiltonianMPOFactors(HamiltonianMPOFactors):
140
148
  self.interaction_matrix[:n, n + 1 :][current_left_interactions, :][
141
149
  :, None, None, current_right_interactions
142
150
  ]
143
- * Operators.id[None, ..., None]
151
+ * self.identity[None, ..., None]
144
152
  )
145
153
 
146
154
  return fac
@@ -152,18 +160,18 @@ class RydbergHamiltonianMPOFactors(HamiltonianMPOFactors):
152
160
 
153
161
  fac = torch.zeros(
154
162
  int(right_interactions_to_keep.sum().item() + int(has_left_interaction) + 2),
155
- 2,
156
- 2,
163
+ self.dim,
164
+ self.dim,
157
165
  int(current_right_interactions.sum().item() + 2),
158
166
  dtype=dtype,
159
167
  )
160
168
 
161
- fac[0, :, :, 0] = Operators.id
162
- fac[1, :, :, 1] = Operators.id
169
+ fac[0, :, :, 0] = self.identity
170
+ fac[1, :, :, 1] = self.identity
163
171
  if has_left_interaction:
164
- fac[2, :, :, 0] = Operators.n
172
+ fac[2, :2, :2, 0] = Operators.n
165
173
 
166
- fac[1, :, :, 2:] = self.interaction_matrix[n + 1 :][
174
+ fac[1, :2, :2, 2:] = self.interaction_matrix[n + 1 :][
167
175
  None, None, current_right_interactions, n
168
176
  ] * Operators.n.unsqueeze(-1)
169
177
 
@@ -171,20 +179,22 @@ class RydbergHamiltonianMPOFactors(HamiltonianMPOFactors):
171
179
  j = 2
172
180
  for current_right_interaction in current_right_interactions.nonzero().flatten():
173
181
  if right_interactions_to_keep[current_right_interaction]:
174
- fac[i, :, :, j] = Operators.id
182
+ fac[i, :, :, j] = self.identity
175
183
  i += 1
176
184
  j += 1
177
185
  return fac
178
186
 
179
187
  def last_factor(self) -> torch.Tensor:
180
188
  has_left_interaction = self.interaction_matrix[-1, :-1].any()
181
- fac = torch.zeros(3 if has_left_interaction else 2, 2, 2, 1, dtype=dtype)
182
- fac[0, :, :, 0] = Operators.id
189
+ fac = torch.zeros(
190
+ 3 if has_left_interaction else 2, self.dim, self.dim, 1, dtype=dtype
191
+ )
192
+ fac[0, :, :, 0] = self.identity
183
193
  if has_left_interaction:
184
194
  if self.qubit_count >= 3:
185
- fac[2, :, :, 0] = Operators.n
195
+ fac[2, :2, :2, 0] = Operators.n
186
196
  else:
187
- fac[2, :, :, 0] = self.interaction_matrix[0, 1] * Operators.n
197
+ fac[2, :2, :2, 0] = self.interaction_matrix[0, 1] * Operators.n
188
198
 
189
199
  return fac
190
200
 
@@ -192,11 +202,13 @@ class RydbergHamiltonianMPOFactors(HamiltonianMPOFactors):
192
202
  class XYHamiltonianMPOFactors(HamiltonianMPOFactors):
193
203
  def first_factor(self) -> torch.Tensor:
194
204
  has_right_interaction = self.interaction_matrix[0, 1:].any()
195
- fac = torch.zeros(1, 2, 2, 4 if has_right_interaction else 2, dtype=dtype)
196
- fac[0, :, :, 1] = Operators.id
205
+ fac = torch.zeros(
206
+ 1, self.dim, self.dim, 4 if has_right_interaction else 2, dtype=dtype
207
+ )
208
+ fac[0, :, :, 1] = self.identity
197
209
  if has_right_interaction:
198
- fac[0, :, :, 2] = Operators.creation
199
- fac[0, :, :, 3] = Operators.creation.T
210
+ fac[0, :2, :2, 2] = Operators.creation
211
+ fac[0, :2, :2, 3] = Operators.creation.T
200
212
 
201
213
  return fac
202
214
 
@@ -207,8 +219,8 @@ class XYHamiltonianMPOFactors(HamiltonianMPOFactors):
207
219
 
208
220
  fac = torch.zeros(
209
221
  int(2 * current_left_interactions.sum().item() + 2),
210
- 2,
211
- 2,
222
+ self.dim,
223
+ self.dim,
212
224
  int(
213
225
  2 * left_interactions_to_keep.sum().item()
214
226
  + 2 * int(has_right_interaction)
@@ -217,17 +229,17 @@ class XYHamiltonianMPOFactors(HamiltonianMPOFactors):
217
229
  dtype=dtype,
218
230
  )
219
231
 
220
- fac[0, :, :, 0] = Operators.id
221
- fac[1, :, :, 1] = Operators.id
232
+ fac[0, :, :, 0] = self.identity
233
+ fac[1, :, :, 1] = self.identity
222
234
  if has_right_interaction:
223
- fac[1, :, :, -2] = Operators.creation
224
- fac[1, :, :, -1] = Operators.creation.T
235
+ fac[1, :2, :2, -2] = Operators.creation
236
+ fac[1, :2, :2, -1] = Operators.creation.T
225
237
 
226
- fac[2::2, :, :, 0] = (
238
+ fac[2::2, :2, :2, 0] = (
227
239
  self.interaction_matrix[:n][current_left_interactions, n, None, None]
228
240
  * Operators.creation.T
229
241
  )
230
- fac[3::2, :, :, 0] = (
242
+ fac[3::2, :2, :2, 0] = (
231
243
  self.interaction_matrix[:n][current_left_interactions, n, None, None]
232
244
  * Operators.creation
233
245
  )
@@ -236,8 +248,8 @@ class XYHamiltonianMPOFactors(HamiltonianMPOFactors):
236
248
  j = 2
237
249
  for current_left_interaction in current_left_interactions.nonzero().flatten():
238
250
  if left_interactions_to_keep[current_left_interaction]:
239
- fac[i, :, :, j] = Operators.id
240
- fac[i + 1, :, :, j + 1] = Operators.id
251
+ fac[i, :, :, j] = self.identity
252
+ fac[i + 1, :, :, j + 1] = self.identity
241
253
  j += 2
242
254
  i += 2
243
255
  return fac
@@ -249,28 +261,28 @@ class XYHamiltonianMPOFactors(HamiltonianMPOFactors):
249
261
 
250
262
  fac = torch.zeros(
251
263
  int(2 * current_left_interactions.sum().item() + 2),
252
- 2,
253
- 2,
264
+ self.dim,
265
+ self.dim,
254
266
  int(2 * current_right_interactions.sum().item() + 2),
255
267
  dtype=dtype,
256
268
  )
257
269
 
258
- fac[0, :, :, 0] = Operators.id
259
- fac[1, :, :, 1] = Operators.id
270
+ fac[0, :, :, 0] = self.identity
271
+ fac[1, :, :, 1] = self.identity
260
272
 
261
- fac[2::2, :, :, 0] = (
273
+ fac[2::2, :2, :2, 0] = (
262
274
  self.interaction_matrix[:n][current_left_interactions, n, None, None]
263
275
  * Operators.creation.T
264
276
  )
265
- fac[3::2, :, :, 0] = (
277
+ fac[3::2, :2, :2, 0] = (
266
278
  self.interaction_matrix[:n][current_left_interactions, n, None, None]
267
279
  * Operators.creation
268
280
  )
269
281
 
270
- fac[1, :, :, 2::2] = self.interaction_matrix[n + 1 :][
282
+ fac[1, :2, :2, 2::2] = self.interaction_matrix[n + 1 :][
271
283
  None, None, current_right_interactions, n
272
284
  ] * Operators.creation.unsqueeze(-1)
273
- fac[1, :, :, 3::2] = self.interaction_matrix[n + 1 :][
285
+ fac[1, :2, :2, 3::2] = self.interaction_matrix[n + 1 :][
274
286
  None, None, current_right_interactions, n
275
287
  ] * Operators.creation.T.unsqueeze(-1)
276
288
 
@@ -278,13 +290,13 @@ class XYHamiltonianMPOFactors(HamiltonianMPOFactors):
278
290
  self.interaction_matrix[:n, n + 1 :][current_left_interactions, :][
279
291
  :, None, None, current_right_interactions
280
292
  ]
281
- * Operators.id[None, ..., None]
293
+ * self.identity[None, ..., None]
282
294
  )
283
295
  fac[3::2, :, :, 3::2] = (
284
296
  self.interaction_matrix[:n, n + 1 :][current_left_interactions, :][
285
297
  :, None, None, current_right_interactions
286
298
  ]
287
- * Operators.id[None, ..., None]
299
+ * self.identity[None, ..., None]
288
300
  )
289
301
 
290
302
  return fac
@@ -300,22 +312,22 @@ class XYHamiltonianMPOFactors(HamiltonianMPOFactors):
300
312
  + 2 * int(has_left_interaction)
301
313
  + 2
302
314
  ),
303
- 2,
304
- 2,
315
+ self.dim,
316
+ self.dim,
305
317
  int(2 * current_right_interactions.sum().item() + 2),
306
318
  dtype=dtype,
307
319
  )
308
320
 
309
- fac[0, :, :, 0] = Operators.id
310
- fac[1, :, :, 1] = Operators.id
321
+ fac[0, :, :, 0] = self.identity
322
+ fac[1, :, :, 1] = self.identity
311
323
  if has_left_interaction:
312
- fac[2, :, :, 0] = Operators.creation.T
313
- fac[3, :, :, 0] = Operators.creation
324
+ fac[2, :2, :2, 0] = Operators.creation.T
325
+ fac[3, :2, :2, 0] = Operators.creation
314
326
 
315
- fac[1, :, :, 2::2] = self.interaction_matrix[n + 1 :][
327
+ fac[1, :2, :2, 2::2] = self.interaction_matrix[n + 1 :][
316
328
  None, None, current_right_interactions, n
317
329
  ] * Operators.creation.unsqueeze(-1)
318
- fac[1, :, :, 3::2] = self.interaction_matrix[n + 1 :][
330
+ fac[1, :2, :2, 3::2] = self.interaction_matrix[n + 1 :][
319
331
  None, None, current_right_interactions, n
320
332
  ] * Operators.creation.T.unsqueeze(-1)
321
333
 
@@ -323,23 +335,25 @@ class XYHamiltonianMPOFactors(HamiltonianMPOFactors):
323
335
  j = 2
324
336
  for current_right_interaction in current_right_interactions.nonzero().flatten():
325
337
  if right_interactions_to_keep[current_right_interaction]:
326
- fac[i, :, :, j] = Operators.id
327
- fac[i + 1, :, :, j + 1] = Operators.id
338
+ fac[i, :, :, j] = self.identity
339
+ fac[i + 1, :, :, j + 1] = self.identity
328
340
  i += 2
329
341
  j += 2
330
342
  return fac
331
343
 
332
344
  def last_factor(self) -> torch.Tensor:
333
345
  has_left_interaction = self.interaction_matrix[-1, :-1].any()
334
- fac = torch.zeros(4 if has_left_interaction else 2, 2, 2, 1, dtype=dtype)
335
- fac[0, :, :, 0] = Operators.id
346
+ fac = torch.zeros(
347
+ 4 if has_left_interaction else 2, self.dim, self.dim, 1, dtype=dtype
348
+ )
349
+ fac[0, :, :, 0] = self.identity
336
350
  if has_left_interaction:
337
351
  if self.qubit_count >= 3:
338
- fac[2, :, :, 0] = Operators.creation.T
339
- fac[3, :, :, 0] = Operators.creation
352
+ fac[2, :2, :2, 0] = Operators.creation.T
353
+ fac[3, :2, :2, 0] = Operators.creation
340
354
  else:
341
- fac[2, :, :, 0] = self.interaction_matrix[0, 1] * Operators.creation.T
342
- fac[3, :, :, 0] = self.interaction_matrix[0, 1] * Operators.creation
355
+ fac[2, :2, :2, 0] = self.interaction_matrix[0, 1] * Operators.creation.T
356
+ fac[3, :2, :2, 0] = self.interaction_matrix[0, 1] * Operators.creation
343
357
 
344
358
  return fac
345
359
 
@@ -348,6 +362,7 @@ def make_H(
348
362
  *,
349
363
  interaction_matrix: torch.Tensor, # depends on Hamiltonian Type
350
364
  hamiltonian_type: HamiltonianType,
365
+ dim: int = 2,
351
366
  num_gpus_to_use: int | None,
352
367
  ) -> MPO:
353
368
  r"""
@@ -357,53 +372,58 @@ def make_H(
357
372
  The Hamiltonian H is given by:
358
373
  H = ∑ⱼΩⱼ[cos(ϕⱼ)σˣⱼ + sin(ϕⱼ)σʸⱼ] - ∑ⱼΔⱼnⱼ + ∑ᵢ﹥ⱼC⁶/rᵢⱼ⁶ nᵢnⱼ
359
374
 
360
- If noise is considered, the Hamiltonian includes an additional term to support
361
- the Monte Carlo WaveFunction algorithm:
375
+ If noise is considered, the Hamiltonian includes an additional term to
376
+ support the Monte Carlo WaveFunction algorithm:
362
377
  H = ∑ⱼΩⱼ[cos(ϕⱼ)σˣⱼ + sin(ϕⱼ)σʸⱼ] - ∑ⱼΔⱼnⱼ + ∑ᵢ﹥ⱼC⁶/rᵢⱼ⁶ nᵢnⱼ - 0.5i∑ₘ ∑ᵤ Lₘᵘ⁺ Lₘᵘ
363
- where Lₘᵘ are the Lindblad operators representing the noise, m for noise channel
364
- and u for the number of atoms
378
+ where Lₘᵘ are the Lindblad operators representing the noise,
379
+ m for noise channel and u for the number of atoms
365
380
 
366
- make_H constructs an MPO of the appropriate size, but the single qubit terms are left at zero.
381
+ make_H constructs an MPO of the appropriate size, but the single qubit
382
+ terms are left at zero.
367
383
  To fill in the appropriate values, call update_H
368
384
 
369
385
  Args:
370
- interaction_matrix (torch.Tensor): The interaction matrix describing the interactions
371
- between qubits.
372
- num_gpus_to_use (int): how many gpus to put the Hamiltonian on. See utils.assign_devices
386
+ interaction_matrix (torch.Tensor): The interaction matrix describing
387
+ the interactions between qubits.
388
+ hamiltonian_type: whether to use XY or Rydberg interation
389
+ dim: dimension of the basis (2 or 3)
390
+ num_gpus_to_use (int): how many gpus to put the Hamiltonian on.
391
+ See utils.assign_devices
373
392
  Returns:
374
- MPO: A Matrix Product Operator (MPO) representing the specified Hamiltonian.
393
+ MPO: A Matrix Product Operator (MPO) representing the specified
394
+ Hamiltonian.
375
395
 
376
396
  Note:
377
397
  For more information about the Hamiltonian and its usage, refer to the
378
398
  [Pulser documentation](https://pulser.readthedocs.io/en/stable/conventions.html#hamiltonians).
379
399
 
380
400
  """
401
+
381
402
  if hamiltonian_type == HamiltonianType.Rydberg:
382
403
  return MPO(
383
- list(RydbergHamiltonianMPOFactors(interaction_matrix)),
404
+ list(RydbergHamiltonianMPOFactors(interaction_matrix, dim=dim)),
384
405
  num_gpus_to_use=num_gpus_to_use,
385
406
  )
386
407
 
387
408
  if hamiltonian_type == HamiltonianType.XY:
388
409
  return MPO(
389
- list(XYHamiltonianMPOFactors(interaction_matrix)),
410
+ list(XYHamiltonianMPOFactors(interaction_matrix, dim=dim)),
390
411
  num_gpus_to_use=num_gpus_to_use,
391
412
  )
392
413
 
393
- raise ValueError(f"Unsupported hamiltonian type {hamiltonian_type}")
394
-
395
414
 
396
415
  def update_H(
397
416
  hamiltonian: MPO,
398
417
  omega: torch.Tensor,
399
418
  delta: torch.Tensor,
400
419
  phi: torch.Tensor,
401
- noise: torch.Tensor = torch.zeros(2, 2),
420
+ noise: torch.Tensor,
402
421
  ) -> None:
403
422
  """
404
423
  The single qubit operators in the Hamiltonian,
405
- corresponding to the omega, delta, phi parameters and the aggregated Lindblad operators
406
- have a well-determined position in the factors of the Hamiltonian.
424
+ corresponding to the omega, delta, phi parameters and the aggregated
425
+ Lindblad operators have a well-determined position in the factors of
426
+ the Hamiltonian.
407
427
  This function updates this part of the factors to update the
408
428
  Hamiltonian with new parameters without rebuilding the entire thing.
409
429
  See make_H for details about the Hamiltonian.
@@ -416,20 +436,24 @@ def update_H(
416
436
  phi (torch.Tensor): The phase ϕⱼ corresponding to each qubit.
417
437
  noise (torch.Tensor, optional): The single-qubit noise
418
438
  term -0.5i∑ⱼLⱼ†Lⱼ applied to all qubits.
419
- This can be computed using the `compute_noise_from_lindbladians` function.
439
+ This can be computed using the `compute_noise_from_lindbladians`
440
+ function.
420
441
  Defaults to a zero tensor.
421
442
  """
422
443
 
423
- assert noise.shape == (2, 2)
444
+ assert noise.shape == (2, 2) or (3, 3)
424
445
  nqubits = omega.size(dim=0)
425
446
 
426
447
  a = torch.tensordot(omega * torch.cos(phi), Operators.sx, dims=0)
427
- c = torch.tensordot(delta, Operators.pu, dims=0)
448
+ c = torch.tensordot(delta, Operators.n, dims=0)
428
449
  b = torch.tensordot(omega * torch.sin(phi), Operators.sy, dims=0)
429
450
 
430
- single_qubit_terms = a + b - c + noise
431
451
  factors = hamiltonian.factors
432
452
 
453
+ single_qubit_terms = torch.stack(nqubits * [noise])
454
+
455
+ single_qubit_terms[:, :2, :2] += a + b - c
456
+
433
457
  factors[0][0, :, :, 0] = single_qubit_terms[0]
434
458
  for i in range(1, nqubits):
435
459
  factors[i][1, :, :, 0] = single_qubit_terms[i]
emu_mps/mpo.py CHANGED
@@ -10,6 +10,8 @@ from pulser.backend.operator import FullOp, QuditOp
10
10
  from emu_mps.mps import MPS, DEFAULT_MAX_BOND_DIM, DEFAULT_PRECISION
11
11
  from emu_mps.utils import new_left_bath, assign_devices
12
12
 
13
+ dtype = torch.complex128
14
+
13
15
 
14
16
  class MPO(Operator[complex, torch.Tensor, MPS]):
15
17
  """
@@ -30,7 +32,8 @@ class MPO(Operator[complex, torch.Tensor, MPS]):
30
32
  raise ValueError("For 1 qubit states, do state vector")
31
33
  if factors[0].shape[0] != 1 or factors[-1].shape[-1] != 1:
32
34
  raise ValueError(
33
- "The dimension of the left (right) link of the first (last) tensor should be 1"
35
+ "The dimension of the left (right) link of the first (last) "
36
+ "tensor should be 1"
34
37
  )
35
38
  assert all(
36
39
  factors[i - 1].shape[-1] == factors[i].shape[0]
@@ -164,42 +167,75 @@ class MPO(Operator[complex, torch.Tensor, MPS]):
164
167
  """
165
168
 
166
169
  basis = set(eigenstates)
167
-
170
+ dim = len(basis)
168
171
  operators_with_tensors: dict[str, torch.Tensor | QuditOp]
169
172
  if basis == {"r", "g"}:
170
- # operators_with_tensors will now contain the basis for single qubit ops,
171
- # and potentially user defined strings in terms of these
173
+ # operators_with_tensors will now contain the basis for single
174
+ # qubit ops, and potentially user defined strings in terms of these
172
175
  operators_with_tensors = {
173
- "gg": torch.tensor([[1.0, 0.0], [0.0, 0.0]], dtype=torch.complex128).view(
174
- 1, 2, 2, 1
176
+ "gg": torch.tensor([[1.0, 0.0], [0.0, 0.0]], dtype=dtype).view(
177
+ 1, dim, dim, 1
175
178
  ),
176
- "gr": torch.tensor([[0.0, 0.0], [1.0, 0.0]], dtype=torch.complex128).view(
179
+ "rg": torch.tensor([[0.0, 0.0], [1.0, 0.0]], dtype=dtype).view(
177
180
  1, 2, 2, 1
178
181
  ),
179
- "rg": torch.tensor([[0.0, 1.0], [0.0, 0.0]], dtype=torch.complex128).view(
182
+ "gr": torch.tensor([[0.0, 1.0], [0.0, 0.0]], dtype=dtype).view(
180
183
  1, 2, 2, 1
181
184
  ),
182
- "rr": torch.tensor([[0.0, 0.0], [0.0, 1.0]], dtype=torch.complex128).view(
183
- 1, 2, 2, 1
185
+ "rr": torch.tensor([[0.0, 0.0], [0.0, 1.0]], dtype=dtype).view(
186
+ 1, dim, dim, 1
184
187
  ),
185
188
  }
186
189
  elif basis == {"0", "1"}:
187
- # operators_with_tensors will now contain the basis for single qubit ops,
188
- # and potentially user defined strings in terms of these
190
+ # operators_with_tensors will now contain the basis for single
191
+ # qubit ops, and potentially user defined strings in terms of these
189
192
  operators_with_tensors = {
190
- "00": torch.tensor([[1.0, 0.0], [0.0, 0.0]], dtype=torch.complex128).view(
191
- 1, 2, 2, 1
193
+ "00": torch.tensor([[1.0, 0.0], [0.0, 0.0]], dtype=dtype).view(
194
+ 1, dim, dim, 1
192
195
  ),
193
- "01": torch.tensor([[0.0, 0.0], [1.0, 0.0]], dtype=torch.complex128).view(
196
+ "10": torch.tensor([[0.0, 0.0], [1.0, 0.0]], dtype=dtype).view(
194
197
  1, 2, 2, 1
195
198
  ),
196
- "10": torch.tensor([[0.0, 1.0], [0.0, 0.0]], dtype=torch.complex128).view(
199
+ "01": torch.tensor([[0.0, 1.0], [0.0, 0.0]], dtype=dtype).view(
197
200
  1, 2, 2, 1
198
201
  ),
199
- "11": torch.tensor([[0.0, 0.0], [0.0, 1.0]], dtype=torch.complex128).view(
200
- 1, 2, 2, 1
202
+ "11": torch.tensor([[0.0, 0.0], [0.0, 1.0]], dtype=dtype).view(
203
+ 1, dim, dim, 1
201
204
  ),
202
205
  }
206
+ elif basis == {"r", "g", "x"}:
207
+ # operators_with_tensors will now contain the basis for single
208
+ # qubit ops, and potentially user defined strings in terms of these
209
+ operators_with_tensors = {
210
+ "gg": torch.tensor(
211
+ [[1.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], dtype=dtype
212
+ ).view(1, dim, dim, 1),
213
+ "gr": torch.tensor(
214
+ [[0.0, 1.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], dtype=dtype
215
+ ).view(1, dim, dim, 1),
216
+ "rg": torch.tensor(
217
+ [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 0.0]], dtype=dtype
218
+ ).view(1, dim, dim, 1),
219
+ "rr": torch.tensor(
220
+ [[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]], dtype=dtype
221
+ ).view(1, dim, dim, 1),
222
+ "xx": torch.tensor(
223
+ [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 1.0]], dtype=dtype
224
+ ).view(1, dim, dim, 1),
225
+ "xg": torch.tensor(
226
+ [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [1.0, 0.0, 0.0]], dtype=dtype
227
+ ).view(1, dim, dim, 1),
228
+ "xr": torch.tensor(
229
+ [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 1.0, 0.0]], dtype=dtype
230
+ ).view(1, dim, dim, 1),
231
+ "gx": torch.tensor(
232
+ [[0.0, 0.0, 1.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], dtype=dtype
233
+ ).view(1, dim, dim, 1),
234
+ "rx": torch.tensor(
235
+ [[0.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 0.0]], dtype=dtype
236
+ ).view(1, dim, dim, 1),
237
+ }
238
+
203
239
  else:
204
240
  raise ValueError("Unsupported basis provided")
205
241
 
@@ -212,16 +248,14 @@ class MPO(Operator[complex, torch.Tensor, MPS]):
212
248
  if isinstance(op, torch.Tensor):
213
249
  return op
214
250
 
215
- result = torch.zeros(1, 2, 2, 1, dtype=torch.complex128)
251
+ result = torch.zeros(1, dim, dim, 1, dtype=dtype)
216
252
  for opstr, coeff in op.items():
217
253
  tensor = replace_operator_string(operators_with_tensors[opstr])
218
254
  operators_with_tensors[opstr] = tensor
219
255
  result += tensor * coeff
220
256
  return result
221
257
 
222
- factors = [
223
- torch.eye(2, 2, dtype=torch.complex128).view(1, 2, 2, 1)
224
- ] * n_qudits
258
+ factors = [torch.eye(dim, dim, dtype=dtype).view(1, dim, dim, 1)] * n_qudits
225
259
 
226
260
  for op in tensorop:
227
261
  factor = replace_operator_string(op[0])
emu_mps/mps.py CHANGED
@@ -394,7 +394,7 @@ class MPS(State[complex, torch.Tensor]):
394
394
  result.truncate()
395
395
  return result
396
396
 
397
- def __rmul__(self, scalar: complex) -> MPS:
397
+ def __rmul__(self, scalar: complex | torch.Tensor) -> MPS:
398
398
  """
399
399
  Multiply an MPS by a scalar.
400
400
 
@@ -419,7 +419,7 @@ class MPS(State[complex, torch.Tensor]):
419
419
  eigenstates=self.eigenstates,
420
420
  )
421
421
 
422
- def __imul__(self, scalar: complex) -> MPS:
422
+ def __imul__(self, scalar: complex | torch.Tensor) -> MPS:
423
423
  return self.__rmul__(scalar)
424
424
 
425
425
  @classmethod
@@ -42,6 +42,8 @@ from emu_mps.utils import (
42
42
  new_left_bath,
43
43
  )
44
44
 
45
+ dtype = torch.complex128
46
+
45
47
 
46
48
  class Statistics(Observable):
47
49
  def __init__(
@@ -117,7 +119,8 @@ class MPSBackendImpl:
117
119
  self.phi = pulser_data.phi
118
120
  self.timestep_count: int = self.omega.shape[0]
119
121
  self.has_lindblad_noise = pulser_data.has_lindblad_noise
120
- self.lindblad_noise = torch.zeros(2, 2, dtype=torch.complex128)
122
+ self.dim = pulser_data.dim
123
+ self.lindblad_noise = torch.zeros(self.dim, self.dim, dtype=dtype)
121
124
  self.qubit_permutation = (
122
125
  optimat.minimize_bandwidth(pulser_data.full_interaction_matrix)
123
126
  if self.config.optimize_qubit_ordering
@@ -126,6 +129,7 @@ class MPSBackendImpl:
126
129
  self.full_interaction_matrix = optimat.permute_tensor(
127
130
  pulser_data.full_interaction_matrix, self.qubit_permutation
128
131
  )
132
+
129
133
  self.masked_interaction_matrix = optimat.permute_tensor(
130
134
  pulser_data.masked_interaction_matrix, self.qubit_permutation
131
135
  )
@@ -155,12 +159,16 @@ class MPSBackendImpl:
155
159
  f"""To resume: `MPSBackend().resume("{self.autosave_file}")`"""
156
160
  )
157
161
  self.last_save_time = time.time()
162
+ requested_num_gpus = self.config.num_gpus_to_use
158
163
 
159
- if self.config.num_gpus_to_use > DEVICE_COUNT:
164
+ if requested_num_gpus is None:
165
+ requested_num_gpus = DEVICE_COUNT
166
+ elif requested_num_gpus > DEVICE_COUNT:
160
167
  self.config.logger.warning(
161
- f"Requested to use {self.config.num_gpus_to_use} GPU(s) "
168
+ f"Requested to use {requested_num_gpus} GPU(s) "
162
169
  f"but only {DEVICE_COUNT if DEVICE_COUNT > 0 else 'cpu'} available"
163
170
  )
171
+ self.resolved_num_gpus = requested_num_gpus
164
172
 
165
173
  def __getstate__(self) -> dict:
166
174
  d = self.__dict__.copy()
@@ -211,7 +219,7 @@ class MPSBackendImpl:
211
219
  self.qubit_count,
212
220
  precision=self.config.precision,
213
221
  max_bond_dim=self.config.max_bond_dim,
214
- num_gpus_to_use=self.config.num_gpus_to_use,
222
+ num_gpus_to_use=self.resolved_num_gpus,
215
223
  )
216
224
  return
217
225
 
@@ -239,7 +247,7 @@ class MPSBackendImpl:
239
247
  [f.detach().clone() for f in initial_state.factors],
240
248
  precision=self.config.precision,
241
249
  max_bond_dim=self.config.max_bond_dim,
242
- num_gpus_to_use=self.config.num_gpus_to_use,
250
+ num_gpus_to_use=self.resolved_num_gpus,
243
251
  eigenstates=initial_state.eigenstates,
244
252
  )
245
253
  initial_state.truncate()
@@ -259,7 +267,7 @@ class MPSBackendImpl:
259
267
  else self.full_interaction_matrix
260
268
  ),
261
269
  hamiltonian_type=self.hamiltonian_type,
262
- num_gpus_to_use=self.config.num_gpus_to_use,
270
+ num_gpus_to_use=self.resolved_num_gpus,
263
271
  )
264
272
 
265
273
  update_H(
@@ -272,9 +280,7 @@ class MPSBackendImpl:
272
280
 
273
281
  def init_baths(self) -> None:
274
282
  self.left_baths = [
275
- torch.ones(
276
- 1, 1, 1, dtype=torch.complex128, device=self.state.factors[0].device
277
- )
283
+ torch.ones(1, 1, 1, dtype=dtype, device=self.state.factors[0].device)
278
284
  ]
279
285
  self.right_baths = right_baths(self.state, self.hamiltonian, final_qubit=2)
280
286
  assert len(self.right_baths) == self.qubit_count - 1
@@ -447,7 +453,8 @@ class MPSBackendImpl:
447
453
  self.hamiltonian = make_H(
448
454
  interaction_matrix=self.full_interaction_matrix,
449
455
  hamiltonian_type=self.hamiltonian_type,
450
- num_gpus_to_use=self.config.num_gpus_to_use,
456
+ dim=self.dim,
457
+ num_gpus_to_use=self.resolved_num_gpus,
451
458
  )
452
459
 
453
460
  if not self.is_finished():
@@ -682,6 +689,7 @@ class NoisyMPSBackendImpl(MPSBackendImpl):
682
689
  omega=self.omega[self.timestep_index - 1, :], # Meh
683
690
  delta=self.delta[self.timestep_index - 1, :],
684
691
  phi=self.phi[self.timestep_index - 1, :],
692
+ noise=torch.zeros(self.dim, self.dim, dtype=dtype), # no noise
685
693
  )
686
694
 
687
695
  super().fill_results()
emu_mps/mps_config.py CHANGED
@@ -3,7 +3,6 @@ from types import MethodType
3
3
 
4
4
  import copy
5
5
 
6
- from emu_base import DEVICE_COUNT
7
6
  from emu_mps.mps import MPS, DEFAULT_MAX_BOND_DIM, DEFAULT_PRECISION
8
7
  from emu_mps.mpo import MPO
9
8
  from emu_mps.solver import Solver
@@ -45,8 +44,12 @@ class MPSConfig(EmulationConfig):
45
44
  The size of the krylov subspace that the Lanczos algorithm maximally builds
46
45
  extra_krylov_tolerance:
47
46
  The Lanczos algorithm uses this*precision as the convergence tolerance
48
- num_gpus_to_use: During the simulation, distribute the state over this many GPUs
49
- 0=all factors to cpu. As shown in the benchmarks, using multiple GPUs might
47
+ num_gpus_to_use: number of GPUs to be used in a given simulation.
48
+ - if it is set to a number `n > 0`, the state will be distributed across `n` GPUs.
49
+ - if it is set to `n = 0`, the entire simulation runs on the CPU.
50
+ - if it is `None` (the default value), the backend internally chooses the number of GPUs
51
+ based on the hardware availability during runtime.
52
+ As shown in the benchmarks, using multiple GPUs might
50
53
  alleviate memory pressure per GPU, but the runtime should be similar.
51
54
  optimize_qubit_ordering: Optimize the register ordering. Improves performance and
52
55
  accuracy, but disables certain features.
@@ -86,7 +89,7 @@ class MPSConfig(EmulationConfig):
86
89
  max_bond_dim: int = DEFAULT_MAX_BOND_DIM,
87
90
  max_krylov_dim: int = 100,
88
91
  extra_krylov_tolerance: float = 1e-3,
89
- num_gpus_to_use: int = DEVICE_COUNT,
92
+ num_gpus_to_use: int | None = None,
90
93
  optimize_qubit_ordering: bool = False,
91
94
  interaction_cutoff: float = 0.0,
92
95
  log_level: int = logging.INFO,
emu_mps/solver_utils.py CHANGED
@@ -5,7 +5,7 @@ from emu_base import krylov_exp
5
5
  from emu_base.math.krylov_energy_min import krylov_energy_minimization
6
6
  from emu_base.utils import deallocate_tensor
7
7
  from emu_mps import MPS, MPO
8
- from emu_mps.utils import split_tensor
8
+ from emu_mps.utils import split_matrix
9
9
  from emu_mps.mps_config import MPSConfig
10
10
 
11
11
 
@@ -178,11 +178,12 @@ def evolve_pair(
178
178
  is_hermitian=is_hermitian,
179
179
  ).view(left_bond_dim * 2, 2 * right_bond_dim)
180
180
 
181
- l, r = split_tensor(
181
+ l, r = split_matrix(
182
182
  evol,
183
183
  max_error=config.precision,
184
184
  max_rank=config.max_bond_dim,
185
185
  orth_center_right=orth_center_right,
186
+ preserve_norm=not is_hermitian, # only relevant for computing jump times
186
187
  )
187
188
 
188
189
  return l.view(left_bond_dim, 2, -1), r.view(-1, 2, right_bond_dim).to(right_device)
@@ -262,7 +263,7 @@ def minimize_energy_pair(
262
263
  )
263
264
  updated_state = updated_state.view(left_bond_dim * 2, 2 * right_bond_dim)
264
265
 
265
- l, r = split_tensor(
266
+ l, r = split_matrix(
266
267
  updated_state,
267
268
  max_error=config.precision,
268
269
  max_rank=config.max_bond_dim,
emu_mps/utils.py CHANGED
@@ -23,11 +23,12 @@ def _determine_cutoff_index(d: torch.Tensor, max_error: float) -> int:
23
23
  return 0 # type: ignore[no-any-return]
24
24
 
25
25
 
26
- def split_tensor(
26
+ def split_matrix(
27
27
  m: torch.Tensor,
28
28
  max_error: float = 1e-5,
29
29
  max_rank: int = 1024,
30
30
  orth_center_right: bool = True,
31
+ preserve_norm: bool = False,
31
32
  ) -> tuple[torch.Tensor, torch.Tensor]:
32
33
  """
33
34
  Computes a low-rank approximation split of m using the Eckart-Young-Mirsky theorem.
@@ -41,8 +42,11 @@ def split_tensor(
41
42
  d.shape[0] - max_rank,
42
43
  )
43
44
  left = q[:, max_bond:]
44
- right = q.T.conj() @ m
45
- right = right[max_bond:, :]
45
+ right = left.T.conj() @ m
46
+ if preserve_norm:
47
+ old_norm2 = torch.sum(d)
48
+ new_norm2 = torch.sum(d[max_bond:])
49
+ right *= torch.sqrt(old_norm2 / new_norm2)
46
50
  else:
47
51
  d, q = torch.linalg.eigh(m.T.conj() @ m)
48
52
  max_bond = max(
@@ -50,8 +54,11 @@ def split_tensor(
50
54
  d.shape[0] - max_rank,
51
55
  )
52
56
  right = q[:, max_bond:].T.conj_physical()
53
- left = m @ q
54
- left = left[:, max_bond:]
57
+ left = m @ q[:, max_bond:]
58
+ if preserve_norm:
59
+ old_norm2 = torch.sum(d)
60
+ new_norm2 = torch.sum(d[max_bond:])
61
+ left *= torch.sqrt(old_norm2 / new_norm2)
55
62
 
56
63
  return left, right
57
64
 
@@ -71,7 +78,7 @@ def truncate_impl(
71
78
  for i in range(len(factors) - 1, 0, -1):
72
79
  factor_shape = factors[i].shape
73
80
 
74
- l, r = split_tensor(
81
+ l, r = split_matrix(
75
82
  factors[i].view(factor_shape[0], -1),
76
83
  max_error=precision,
77
84
  max_rank=max_bond_dim,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: emu-mps
3
- Version: 2.4.1
3
+ Version: 2.4.3
4
4
  Summary: Pasqal MPS based pulse emulator built on PyTorch
5
5
  Project-URL: Documentation, https://pasqal-io.github.io/emulators/
6
6
  Project-URL: Repository, https://github.com/pasqal-io/emulators
@@ -25,7 +25,7 @@ Classifier: Programming Language :: Python :: 3.10
25
25
  Classifier: Programming Language :: Python :: Implementation :: CPython
26
26
  Classifier: Programming Language :: Python :: Implementation :: PyPy
27
27
  Requires-Python: >=3.10
28
- Requires-Dist: emu-base==2.4.1
28
+ Requires-Dist: emu-base==2.4.3
29
29
  Description-Content-Type: text/markdown
30
30
 
31
31
  <div align="center">
@@ -0,0 +1,19 @@
1
+ emu_mps/__init__.py,sha256=avViacgzWmSfTtWIUvoxlaE_1p7LRXPb9Hh5sWv8ND4,708
2
+ emu_mps/algebra.py,sha256=VZ5uaX5PYWGqDCpRKKr819BLMMtT0pKDxc2HrlLCdgU,5423
3
+ emu_mps/custom_callback_implementations.py,sha256=WeczmO6qkvBIipvXLqX45i3D7M4ovOrepusIGs6d2Ts,2420
4
+ emu_mps/hamiltonian.py,sha256=OM0bPNZV7J5Egk6aTUwt4GaWqiUOP68ujZBTuqlBY1k,16289
5
+ emu_mps/mpo.py,sha256=WmGDGkCMhlODmydd0b09YcSRlsk6Bg5xYQ4rXSNJvnY,9703
6
+ emu_mps/mps.py,sha256=1aMvY4NQSBeAwzTwHRegXncGQqm-1g05W00OimFNzt8,21630
7
+ emu_mps/mps_backend.py,sha256=bS83qFxvdoK-c12_1WaPw6O7xUc7vdWifZNHUzNP5sM,2091
8
+ emu_mps/mps_backend_impl.py,sha256=LMvi7KpfnsNmbiwQHuCtoA2lsK3Pf1vETuipL7BlG5U,30688
9
+ emu_mps/mps_config.py,sha256=j7rho3edFzDxPO_VX7j5jc0Drw9wO2NWRymkqZ9hzmU,9128
10
+ emu_mps/observables.py,sha256=4C_ewkd3YkJP0xghTrGUTgXUGvJRCQcetb8cU0SjMl0,1900
11
+ emu_mps/solver.py,sha256=M9xkHhlEouTBvoPw2UYVu6kij7CO4Z1FXw_SiGFtdgo,85
12
+ emu_mps/solver_utils.py,sha256=CklWjVvhngpyElpJyfUTncr4ErBP6VbUE3Z9nf8s3mM,8762
13
+ emu_mps/utils.py,sha256=rL75H55hB5lDMjy8a_O2PpJq51iZKjSx91X4euxB3mY,7293
14
+ emu_mps/optimatrix/__init__.py,sha256=fBXQ7-rgDro4hcaBijCGhx3J69W96qcw5_3mWc7tND4,364
15
+ emu_mps/optimatrix/optimiser.py,sha256=k9suYmKLKlaZ7ozFuIqvXHyCBoCtGgkX1mpen9GOdOo,6977
16
+ emu_mps/optimatrix/permutations.py,sha256=9DDMZtrGGZ01b9F3GkzHR3paX4qNtZiPoI7Z_Kia3Lc,3727
17
+ emu_mps-2.4.3.dist-info/METADATA,sha256=hq605tOeTSfyZa9cNwSkVUH72t5ZTHfLo19Kll9-sxU,3587
18
+ emu_mps-2.4.3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
19
+ emu_mps-2.4.3.dist-info/RECORD,,
@@ -1,19 +0,0 @@
1
- emu_mps/__init__.py,sha256=R4Fw8r-_GEeckaxyQ9leUKVfLoukcrwvaTQsTbzxhbs,708
2
- emu_mps/algebra.py,sha256=vi3d4xOBEsfQ7gsjYNns9AMLjJXXXppYVEThavNvAg0,5408
3
- emu_mps/custom_callback_implementations.py,sha256=WeczmO6qkvBIipvXLqX45i3D7M4ovOrepusIGs6d2Ts,2420
4
- emu_mps/hamiltonian.py,sha256=gOPxNOBmk6jRPPjevERuCP_scGv0EKYeAJ0uxooihes,15622
5
- emu_mps/mpo.py,sha256=2HNwN4Fz04QIVfPcPaMmt2q89ZBxN3K-vVeiFkOtqzs,8049
6
- emu_mps/mps.py,sha256=KEXrLdqhi5EvpdX-9J38ZfrRdt3oDmcGsVLghMPUfQw,21600
7
- emu_mps/mps_backend.py,sha256=bS83qFxvdoK-c12_1WaPw6O7xUc7vdWifZNHUzNP5sM,2091
8
- emu_mps/mps_backend_impl.py,sha256=Pcbn27lhg3n3Lzo3CGwhlWPqPi-8YV1ntkgl901AoUs,30400
9
- emu_mps/mps_config.py,sha256=QmwgU8INEnxrxZkhboYIsHGwml0UhgPkddT5zh8KVBU,8867
10
- emu_mps/observables.py,sha256=4C_ewkd3YkJP0xghTrGUTgXUGvJRCQcetb8cU0SjMl0,1900
11
- emu_mps/solver.py,sha256=M9xkHhlEouTBvoPw2UYVu6kij7CO4Z1FXw_SiGFtdgo,85
12
- emu_mps/solver_utils.py,sha256=EnNzEaUrtTMQbrWoqOy8vyDsQwlsfQCUc2HgOp4z8dk,8680
13
- emu_mps/utils.py,sha256=pW5N_EbbGiOviQpJCw1a0pVgEDObP_InceNaIqY5bHE,6982
14
- emu_mps/optimatrix/__init__.py,sha256=fBXQ7-rgDro4hcaBijCGhx3J69W96qcw5_3mWc7tND4,364
15
- emu_mps/optimatrix/optimiser.py,sha256=k9suYmKLKlaZ7ozFuIqvXHyCBoCtGgkX1mpen9GOdOo,6977
16
- emu_mps/optimatrix/permutations.py,sha256=9DDMZtrGGZ01b9F3GkzHR3paX4qNtZiPoI7Z_Kia3Lc,3727
17
- emu_mps-2.4.1.dist-info/METADATA,sha256=2SUt6GLqhz_6-2ewuVqDH9xvz3gx1ZROU975intvuwk,3587
18
- emu_mps-2.4.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
19
- emu_mps-2.4.1.dist-info/RECORD,,