emu-mps 2.5.2__py3-none-any.whl → 2.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
emu_mps/__init__.py CHANGED
@@ -36,4 +36,4 @@ __all__ = [
36
36
  "EntanglementEntropy",
37
37
  ]
38
38
 
39
- __version__ = "2.5.2"
39
+ __version__ = "2.7.0"
emu_mps/mpo.py CHANGED
@@ -17,10 +17,25 @@ class MPO(Operator[complex, torch.Tensor, MPS]):
17
17
  """
18
18
  Matrix Product Operator.
19
19
 
20
- Each tensor has 4 dimensions ordered as such: (left bond, output, input, right bond).
20
+ Each tensor is 4 dimensions with axes ordered as
21
+ (left_bond, phys_out, phys_in, right_bond). When contracting an MPO with
22
+ an MPS as H|ψ⟩, phys_in contracts with the MPS physical index, while
23
+ phys_out becomes the physical index of the resulting MPS.
21
24
 
22
25
  Args:
23
- factors: the tensors making up the MPO
26
+ factors: List of 4D tensors with shape (Dl, d_out, d_in, Dr).
27
+ Neighboring tensors must satisfy Dr[i] == Dl[i+1].
28
+ num_gpus_to_use: Number of GPUs to use for placing MPO factors
29
+ (implementation-dependent placement). If None, uses all available
30
+ GPUs.
31
+
32
+ Returns:
33
+ MPO: A matrix product operator constructed from the provided factors.
34
+
35
+ Raises:
36
+ ValueError: If any factor is not 4D or if neighboring bond dimensions
37
+ do not match.
38
+ RuntimeError: If requested GPUs are not available.
24
39
  """
25
40
 
26
41
  def __init__(
emu_mps/mps.py CHANGED
@@ -23,11 +23,35 @@ DEFAULT_MAX_BOND_DIM = 1024
23
23
 
24
24
  class MPS(State[complex, torch.Tensor]):
25
25
  """
26
- Matrix Product State, aka tensor train.
27
-
26
+ Matrix Product State, a.k.a tensor train.
28
27
  Each tensor has 3 dimensions ordered as such: (left bond, site, right bond).
28
+ Only qubits and qutrits (using the leakage state: 'x') are supported. This
29
+ constructor creates a MPS directly from a list of tensors.
29
30
 
30
- Only qubits are supported.
31
+ Args:
32
+ factors: the tensors for each site. WARNING: for efficiency, this list
33
+ of tensors IS NOT DEEP-COPIED. Therefore, the new MPS object is not
34
+ necessarily the exclusive owner of the list and its tensors.
35
+ As a consequence, beware of potential external modifications
36
+ affecting the list or the tensors.
37
+ You are responsible for deciding whether to pass its own exclusive
38
+ copy of the data to this constructor, or some shared objects.
39
+ orthogonality_center: the orthogonality center of the MPS, or None
40
+ (in which case it will be orthogonalized when needed)
41
+ precision: the threshold for truncating singular values during SVD
42
+ operations. Any singular value below this threshold will be
43
+ discarded, effectively reducing the bond dimension and improving
44
+ computational efficiency.
45
+ Check [precision in config](advanced/config.md#precision)
46
+ max_bond_dim: the maximum bond dimension to allow for this MPS
47
+ num_gpus_to_use: number of GPUs to use for placing MPS factors.
48
+ - If set to 0, all factors are placed on CPU.
49
+ - If set to None, factors retain their current device assignment.
50
+ - Otherwise, factors are distributed across the specified number of
51
+ GPUs.
52
+ eigenstates: the basis states for each qudit (['0','1'] or ['r','g'])
53
+ or qutrit ['g','r','x'], where 'x' is the leakage state
54
+ (default: ['0','1'])
31
55
  """
32
56
 
33
57
  def __init__(
@@ -41,25 +65,6 @@ class MPS(State[complex, torch.Tensor]):
41
65
  num_gpus_to_use: Optional[int] = DEVICE_COUNT,
42
66
  eigenstates: Sequence[Eigenstate] = ("r", "g"),
43
67
  ):
44
- """
45
- This constructor creates a MPS directly from a list of tensors. It is
46
- for internal use only.
47
-
48
- Args:
49
- factors: the tensors for each site
50
- WARNING: for efficiency in a lot of use cases, this list of tensors
51
- IS NOT DEEP-COPIED. Therefore, the new MPS object is not necessarily
52
- the exclusive owner of the list and its tensors. As a consequence,
53
- beware of potential external modifications affecting the list or the tensors.
54
- You are responsible for deciding whether to pass its own exclusive copy
55
- of the data to this constructor, or some shared objects.
56
- orthogonality_center: the orthogonality center of the MPS, or None (in which case
57
- it will be orthogonalized when needed)
58
- precision: the precision with which to keep this MPS
59
- max_bond_dim: the maximum bond dimension to allow for this MPS
60
- num_gpus_to_use: distribute the factors over this many GPUs
61
- 0=all factors to cpu, None=keep the existing device assignment.
62
- """
63
68
  super().__init__(eigenstates=eigenstates)
64
69
  self.precision = precision
65
70
  self.max_bond_dim = max_bond_dim
@@ -555,8 +560,8 @@ class MPS(State[complex, torch.Tensor]):
555
560
  ) -> torch.Tensor:
556
561
  """
557
562
  Efficiently compute the symmetric correlation matrix
558
- C_ij = <self|operator_i operator_j|self>
559
- in basis ("r", "g").
563
+ $C_{ij} = \\langle \\text{self}|\\text{operator}_i \\text{operator}_j|\\text{self}\\rangle$
564
+ in basis ("r", "g"), ("0","1"), and ("r","g","x").
560
565
 
561
566
  Args:
562
567
  operator: a 2x2 (or 3x3) Torch tensor to use
@@ -609,13 +614,22 @@ class MPS(State[complex, torch.Tensor]):
609
614
 
610
615
  def inner(left: MPS, right: MPS) -> torch.Tensor:
611
616
  """
612
- Wrapper around MPS.inner.
617
+ Computes the inner product ⟨left|right⟩ between two MPS states
618
+ (convenience wrapper for MPS.inner). Both MPS must have the same number of
619
+ sites and the same local (physical) dimension at each site.
613
620
 
614
621
  Args:
615
- left: the anti-linear argument
616
- right: the linear argument
622
+ left: Left state (conjugated in the inner product).
623
+ right: Right state (not conjugated).
617
624
 
618
625
  Returns:
619
- the inner product
626
+ A scalar torch.Tensor equal to ⟨left|right⟩ (typically complex-valued).
627
+ Use result.item() to convert to a Python number.
628
+
629
+ Raises:
630
+ ValueError: If the MPS are incompatible (e.g., different lengths or
631
+ dimensions).
632
+ RuntimeError: If tensors are on incompatible
633
+ devices/dtypes (as raised by PyTorch).
620
634
  """
621
635
  return left.inner(right)
emu_mps/mps_backend.py CHANGED
@@ -1,6 +1,6 @@
1
- from pulser.backend import EmulatorBackend, Results
1
+ from pulser.backend import EmulatorBackend, Results, BitStrings
2
2
  from emu_mps.mps_config import MPSConfig
3
- from emu_base import init_logging
3
+ from emu_base import init_logging, PulserData
4
4
  from emu_mps.mps_backend_impl import create_impl, MPSBackendImpl
5
5
  import pickle
6
6
  import os
@@ -12,9 +12,13 @@ class MPSBackend(EmulatorBackend):
12
12
  """
13
13
  A backend for emulating Pulser sequences using Matrix Product States (MPS),
14
14
  aka tensor trains.
15
+
16
+ Args:
17
+ config (MPSConfig): Configuration for the MPS backend.
18
+
15
19
  """
16
20
 
17
- default_config = MPSConfig()
21
+ default_config = MPSConfig(observables=[BitStrings(evaluation_times=[1.0])])
18
22
 
19
23
  @staticmethod
20
24
  def resume(autosave_file: str | pathlib.Path) -> Results:
@@ -34,9 +38,9 @@ class MPSBackend(EmulatorBackend):
34
38
 
35
39
  impl.autosave_file = autosave_file
36
40
  impl.last_save_time = time.time()
37
- init_logging(impl.config.log_level, impl.config.log_file)
41
+ logger = init_logging(impl.config.log_level, impl.config.log_file)
38
42
 
39
- impl.config.logger.warning(
43
+ logger.warning(
40
44
  f"Resuming simulation from file {autosave_file}\n"
41
45
  f"Saving simulation state every {impl.config.autosave_dt} seconds"
42
46
  )
@@ -52,15 +56,22 @@ class MPSBackend(EmulatorBackend):
52
56
  """
53
57
  assert isinstance(self._config, MPSConfig)
54
58
 
55
- impl = create_impl(self._sequence, self._config)
56
- impl.init() # This is separate from the constructor for testing purposes.
57
-
58
- results = self._run(impl)
59
-
60
- return impl.permute_results(results, self._config.optimize_qubit_ordering)
59
+ pulser_data = PulserData(
60
+ sequence=self._sequence, config=self._config, dt=self._config.dt
61
+ )
62
+ results = []
63
+ for sequence_data in pulser_data.get_sequences():
64
+ impl = create_impl(sequence_data, self._config)
65
+ impl.init() # This is separate from the constructor for testing purposes.
66
+ result = self._run(impl)
67
+ results.append(
68
+ impl.permute_results(result, self._config.optimize_qubit_ordering)
69
+ )
70
+ return Results.aggregate(results)
61
71
 
62
72
  @staticmethod
63
73
  def _run(impl: MPSBackendImpl) -> Results:
74
+ impl.fill_results() # at t == 0 for pulser compatibility
64
75
  while not impl.is_finished():
65
76
  impl.progress()
66
77
 
@@ -6,6 +6,7 @@ import random
6
6
  import time
7
7
  import typing
8
8
  import uuid
9
+ import logging
9
10
 
10
11
  from copy import deepcopy
11
12
  from collections import Counter
@@ -14,10 +15,9 @@ from types import MethodType
14
15
  from typing import Any, Optional
15
16
 
16
17
  import torch
17
- from pulser import Sequence
18
18
  from pulser.backend import EmulationConfig, Observable, Results, State
19
19
 
20
- from emu_base import DEVICE_COUNT, PulserData, get_max_rss
20
+ from emu_base import DEVICE_COUNT, SequenceData, get_max_rss
21
21
  from emu_base.math.brents_root_finding import BrentsRootFinder
22
22
  from emu_base.utils import deallocate_tensor
23
23
 
@@ -72,7 +72,7 @@ class Statistics(Observable):
72
72
  duration = self.data[-1]
73
73
  max_mem = get_max_rss(state.factors[0].is_cuda)
74
74
 
75
- config.logger.info(
75
+ logging.getLogger("emulators").info(
76
76
  f"step = {len(self.data)}/{self.timestep_count}, "
77
77
  + f"χ = {state.get_max_bond_dim()}, "
78
78
  + f"|ψ| = {state.get_memory_footprint():.3f} MB, "
@@ -107,7 +107,7 @@ class MPSBackendImpl:
107
107
  target_time: float
108
108
  results: Results
109
109
 
110
- def __init__(self, mps_config: MPSConfig, pulser_data: PulserData):
110
+ def __init__(self, mps_config: MPSConfig, pulser_data: SequenceData):
111
111
  self.config = mps_config
112
112
  self.target_times = pulser_data.target_times
113
113
  self.target_time = self.target_times[1]
@@ -118,7 +118,7 @@ class MPSBackendImpl:
118
118
  self.delta = pulser_data.delta
119
119
  self.phi = pulser_data.phi
120
120
  self.timestep_count: int = self.omega.shape[0]
121
- self.has_lindblad_noise = pulser_data.has_lindblad_noise
121
+ self.has_lindblad_noise = len(pulser_data.lindblad_ops) > 0
122
122
  self.eigenstates = pulser_data.eigenstates
123
123
  self.dim = pulser_data.dim
124
124
  self.lindblad_noise = torch.zeros(self.dim, self.dim, dtype=dtype)
@@ -146,7 +146,7 @@ class MPSBackendImpl:
146
146
  atom_order=optimat.permute_tuple(
147
147
  pulser_data.qubit_ids, self.qubit_permutation
148
148
  ),
149
- total_duration=self.target_times[-1],
149
+ total_duration=int(self.target_times[-1]),
150
150
  )
151
151
  self.statistics = Statistics(
152
152
  evaluation_times=[t / self.target_times[-1] for t in self.target_times],
@@ -154,7 +154,7 @@ class MPSBackendImpl:
154
154
  timestep_count=self.timestep_count,
155
155
  )
156
156
  self.autosave_file = self._get_autosave_filepath(self.config.autosave_prefix)
157
- self.config.logger.debug(
157
+ logging.getLogger("emulators").debug(
158
158
  f"""Will save simulation state to file "{self.autosave_file.name}"
159
159
  every {self.config.autosave_dt} seconds.\n"""
160
160
  f"""To resume: `MPSBackend().resume("{self.autosave_file}")`"""
@@ -165,7 +165,7 @@ class MPSBackendImpl:
165
165
  if requested_num_gpus is None:
166
166
  requested_num_gpus = DEVICE_COUNT
167
167
  elif requested_num_gpus > DEVICE_COUNT:
168
- self.config.logger.warning(
168
+ logging.getLogger("emulators").warning(
169
169
  f"Requested to use {requested_num_gpus} GPU(s) "
170
170
  f"but only {DEVICE_COUNT if DEVICE_COUNT > 0 else 'cpu'} available"
171
171
  )
@@ -193,8 +193,8 @@ class MPSBackendImpl:
193
193
 
194
194
  def init_dark_qubits(self) -> None:
195
195
  # has_state_preparation_error
196
- if self.config.noise_model.state_prep_error > 0.0:
197
- bad_atoms = self.pulser_data.hamiltonian.bad_atoms
196
+ if self.pulser_data.noise_model.state_prep_error > 0.0:
197
+ bad_atoms = self.pulser_data.bad_atoms
198
198
  self.well_prepared_qubits_filter = torch.logical_not(
199
199
  torch.tensor(list(bool(x) for x in bad_atoms.values()))
200
200
  )
@@ -500,59 +500,80 @@ class MPSBackendImpl:
500
500
 
501
501
  self.last_save_time = time.time()
502
502
 
503
- self.config.logger.debug(
503
+ logging.getLogger("emulators").debug(
504
504
  f"Saved simulation state in file {self.autosave_file} ({autosave_filesize}MB)"
505
505
  )
506
506
 
507
+ def _is_evaluation_time(
508
+ self,
509
+ observable: Observable,
510
+ t: float,
511
+ tolerance: float = 1e-10,
512
+ ) -> bool:
513
+ """Return True if ``t`` is a genuine sampling time for this observable.
514
+
515
+ Filters out nearby points that are close to, but not in, the
516
+ observable's evaluation times (within ``tolerance``).
517
+ Prevent false matches by using Pulser's tolerance
518
+ tol = 0.5 / total_duration. (deep inside pulser Observable class)
519
+ """
520
+ times = observable.evaluation_times
521
+
522
+ is_observable_eval_time = (
523
+ times is not None
524
+ and self.config.is_time_in_evaluation_times(t, times, tol=tolerance)
525
+ )
526
+
527
+ is_default_eval_time = self.config.is_evaluation_time(t, tol=tolerance)
528
+
529
+ return is_observable_eval_time or is_default_eval_time
530
+
507
531
  def fill_results(self) -> None:
508
532
  normalized_state = 1 / self.state.norm() * self.state
509
533
 
510
- current_time_int: int = round(self.current_time)
511
534
  fractional_time = self.current_time / self.target_times[-1]
512
- assert abs(self.current_time - current_time_int) < 1e-10
513
535
 
514
- if self.well_prepared_qubits_filter is None:
515
- for callback in self.config.observables:
516
- callback(
517
- self.config,
518
- fractional_time,
519
- normalized_state,
520
- self.hamiltonian,
521
- self.results,
522
- )
536
+ callbacks_for_current_time_step = [
537
+ callback
538
+ for callback in self.config.observables
539
+ if self._is_evaluation_time(callback, fractional_time)
540
+ ]
541
+ if not callbacks_for_current_time_step:
523
542
  return
524
543
 
525
- full_mpo, full_state = None, None
526
- for callback in self.config.observables:
527
- time_tol = 0.5 / self.target_times[-1] + 1e-10
528
- if (
529
- callback.evaluation_times is not None
530
- and self.config.is_time_in_evaluation_times(
531
- fractional_time, callback.evaluation_times, tol=time_tol
544
+ if self.well_prepared_qubits_filter is None:
545
+ state = normalized_state
546
+ hamiltonian = self.hamiltonian
547
+ else:
548
+ # Only do this potentially expensive step once and when needed.
549
+ full_mpo = MPO(
550
+ extended_mpo_factors(
551
+ self.hamiltonian.factors, self.well_prepared_qubits_filter
532
552
  )
533
- ) or self.config.is_evaluation_time(fractional_time, tol=time_tol):
534
-
535
- if full_mpo is None or full_state is None:
536
- # Only do this potentially expensive step once and when needed.
537
- full_mpo = MPO(
538
- extended_mpo_factors(
539
- self.hamiltonian.factors, self.well_prepared_qubits_filter
540
- )
541
- )
542
- full_state = MPS(
543
- extended_mps_factors(
544
- normalized_state.factors,
545
- self.well_prepared_qubits_filter,
546
- ),
547
- num_gpus_to_use=None, # Keep the already assigned devices.
548
- orthogonality_center=get_extended_site_index(
549
- self.well_prepared_qubits_filter,
550
- normalized_state.orthogonality_center,
551
- ),
552
- eigenstates=normalized_state.eigenstates,
553
- )
554
-
555
- callback(self.config, fractional_time, full_state, full_mpo, self.results)
553
+ )
554
+ full_state = MPS(
555
+ extended_mps_factors(
556
+ normalized_state.factors,
557
+ self.well_prepared_qubits_filter,
558
+ ),
559
+ num_gpus_to_use=None, # Keep the already assigned devices.
560
+ orthogonality_center=get_extended_site_index(
561
+ self.well_prepared_qubits_filter,
562
+ normalized_state.orthogonality_center,
563
+ ),
564
+ eigenstates=normalized_state.eigenstates,
565
+ )
566
+ state = full_state
567
+ hamiltonian = full_mpo
568
+
569
+ for callback in callbacks_for_current_time_step:
570
+ callback(
571
+ self.config,
572
+ fractional_time,
573
+ state,
574
+ hamiltonian,
575
+ self.results,
576
+ )
556
577
 
557
578
  def permute_results(self, results: Results, permute: bool) -> Results:
558
579
  if permute:
@@ -608,12 +629,12 @@ class NoisyMPSBackendImpl(MPSBackendImpl):
608
629
  norm_gap_before_jump: float
609
630
  root_finder: Optional[BrentsRootFinder]
610
631
 
611
- def __init__(self, config: MPSConfig, pulser_data: PulserData):
632
+ def __init__(self, config: MPSConfig, pulser_data: SequenceData):
612
633
  super().__init__(config, pulser_data)
613
634
  self.lindblad_ops = pulser_data.lindblad_ops
614
635
  self.root_finder = None
615
636
 
616
- assert self.has_lindblad_noise
637
+ assert self.lindblad_ops
617
638
 
618
639
  def init_lindblad_noise(self) -> None:
619
640
  stacked = torch.stack(self.lindblad_ops)
@@ -685,7 +706,7 @@ class NoisyMPSBackendImpl(MPSBackendImpl):
685
706
  assert math.isclose(norm_after_normalizing, 1, abs_tol=1e-10)
686
707
  self.set_jump_threshold(norm_after_normalizing**2)
687
708
 
688
- def fill_results(self) -> None:
709
+ def remove_noise_from_hamiltonian(self) -> None:
689
710
  # Remove the noise from self.hamiltonian for the callbacks.
690
711
  # Since update_H is called at the start of do_time_step this is safe.
691
712
  update_H(
@@ -696,22 +717,24 @@ class NoisyMPSBackendImpl(MPSBackendImpl):
696
717
  noise=torch.zeros(self.dim, self.dim, dtype=dtype), # no noise
697
718
  )
698
719
 
699
- super().fill_results()
720
+ def timestep_complete(self) -> None:
721
+ self.remove_noise_from_hamiltonian()
722
+ super().timestep_complete()
700
723
 
701
724
 
702
725
  class DMRGBackendImpl(MPSBackendImpl):
703
726
  def __init__(
704
727
  self,
705
728
  mps_config: MPSConfig,
706
- pulser_data: PulserData,
729
+ pulser_data: SequenceData,
707
730
  energy_tolerance: float = 1e-5,
708
731
  max_sweeps: int = 2000,
709
732
  ):
710
733
 
711
- if mps_config.noise_model.noise_types != ():
734
+ if pulser_data.noise_model.noise_types != ():
712
735
  raise NotImplementedError(
713
736
  "DMRG solver does not currently support noise types"
714
- f"you are using: {mps_config.noise_model.noise_types}"
737
+ f"you are using: {pulser_data.noise_model.noise_types}"
715
738
  )
716
739
  super().__init__(mps_config, pulser_data)
717
740
  self.previous_energy: Optional[float] = None
@@ -810,11 +833,10 @@ class DMRGBackendImpl(MPSBackendImpl):
810
833
  self.current_energy = None
811
834
 
812
835
 
813
- def create_impl(sequence: Sequence, config: MPSConfig) -> MPSBackendImpl:
814
- pulser_data = PulserData(sequence=sequence, config=config, dt=config.dt)
836
+ def create_impl(data: SequenceData, config: MPSConfig) -> MPSBackendImpl:
815
837
 
816
- if pulser_data.has_lindblad_noise:
817
- return NoisyMPSBackendImpl(config, pulser_data)
838
+ if data.lindblad_ops:
839
+ return NoisyMPSBackendImpl(config, data)
818
840
  if config.solver == Solver.DMRG:
819
- return DMRGBackendImpl(config, pulser_data)
820
- return MPSBackendImpl(config, pulser_data)
841
+ return DMRGBackendImpl(config, data)
842
+ return MPSBackendImpl(config, data)
emu_mps/mps_config.py CHANGED
@@ -20,7 +20,6 @@ from pulser.backend import (
20
20
  Energy,
21
21
  EnergySecondMoment,
22
22
  EnergyVariance,
23
- BitStrings,
24
23
  EmulationConfig,
25
24
  )
26
25
  import logging
@@ -29,8 +28,8 @@ import pathlib
29
28
 
30
29
  class MPSConfig(EmulationConfig):
31
30
  """
32
- The configuration of the emu-mps MPSBackend. The kwargs passed to this class
33
- are passed on to the base class.
31
+ The configuration of the emu-mps MPSBackend. The kwargs passed to this
32
+ class are passed on to the base class.
34
33
  See the API for that class for a list of available options.
35
34
 
36
35
  Args:
@@ -38,26 +37,34 @@ class MPSConfig(EmulationConfig):
38
37
  only calculated if the evaluation_times are divisible by dt.
39
38
  precision: Up to what precision the state is truncated.
40
39
  Defaults to `1e-5`.
41
- max_bond_dim: The maximum bond dimension that the state is allowed to have.
40
+ max_bond_dim: The maximum bond dimension that the state is allowed
41
+ to have.
42
42
  Defaults to `1024`.
43
43
  max_krylov_dim:
44
- The size of the krylov subspace that the Lanczos algorithm maximally builds
44
+ The size of the krylov subspace that the Lanczos algorithm
45
+ maximally builds
45
46
  extra_krylov_tolerance:
46
- The Lanczos algorithm uses this*precision as the convergence tolerance
47
+ The Lanczos algorithm uses this*precision as the convergence
48
+ tolerance
47
49
  num_gpus_to_use: number of GPUs to be used in a given simulation.
48
- - if it is set to a number `n > 0`, the state will be distributed across `n` GPUs.
50
+ - if it is set to a number `n > 0`, the state will be distributed
51
+ across `n` GPUs.
49
52
  - if it is set to `n = 0`, the entire simulation runs on the CPU.
50
- - if it is `None` (the default value), the backend internally chooses the number of GPUs
53
+ - if it is `None` (the default value), the backend internally
54
+ chooses the number of GPUs
51
55
  based on the hardware availability during runtime.
52
56
  As shown in the benchmarks, using multiple GPUs might
53
- alleviate memory pressure per GPU, but the runtime should be similar.
54
- optimize_qubit_ordering: Optimize the register ordering. Improves performance and
55
- accuracy, but disables certain features.
56
- interaction_cutoff: Set interaction coefficients below this value to `0`.
57
- Potentially improves runtime and memory consumption.
58
- log_level: How much to log. Set to `logging.WARN` to get rid of the timestep info.
57
+ alleviate memory pressure per GPU, but the runtime should
58
+ be similar.
59
+ optimize_qubit_ordering: Optimize the register ordering. Improves
60
+ performance and accuracy, but disables certain features.
61
+ interaction_cutoff: Set interaction coefficients Uᵢⱼ below this value
62
+ to `0.0`. Potentially improves runtime and memory consumption.
63
+ log_level: How much to log. Set to `logging.WARN` to get rid of the
64
+ timestep info.
59
65
  log_file: If specified, log to this file rather than stout.
60
- autosave_prefix: filename prefix for autosaving simulation state to file
66
+ autosave_prefix: filename prefix for autosaving simulation state to
67
+ file
61
68
  autosave_dt: Minimum time interval in seconds between two autosaves.
62
69
  Saving the simulation state is only possible at specific times,
63
70
  therefore this interval is only a lower bound.
@@ -69,11 +76,13 @@ class MPSConfig(EmulationConfig):
69
76
  kwargs: Arguments that are passed to the base class
70
77
 
71
78
  Examples:
72
- >>> num_gpus_to_use = 2 #use 2 gpus if available, otherwise 1 or cpu
73
- >>> dt = 1 #this will impact the runtime
74
- >>> precision = 1e-6 #smaller dt requires better precision, generally
75
- >>> MPSConfig(num_gpus_to_use=num_gpus_to_use, dt=dt, precision=precision,
76
- >>> with_modulation=True) #the last arg is taken from the base class
79
+ ```python
80
+ num_gpus_to_use = 2 #use 2 gpus if available, otherwise 1 or cpu
81
+ dt = 1.0 #this will impact the runtime
82
+ precision = 1e-6 #smaller dt requires better precision, generally
83
+ MPSConfig(num_gpus_to_use=num_gpus_to_use, dt=dt, precision=precision,
84
+ with_modulation=True) #the last arg is taken from the base class
85
+ ```
77
86
  """
78
87
 
79
88
  # Whether to warn if unexpected kwargs are received
@@ -84,7 +93,7 @@ class MPSConfig(EmulationConfig):
84
93
  def __init__(
85
94
  self,
86
95
  *,
87
- dt: int = 10,
96
+ dt: float = 10.0,
88
97
  precision: float = DEFAULT_PRECISION,
89
98
  max_bond_dim: int = DEFAULT_MAX_BOND_DIM,
90
99
  max_krylov_dim: int = 100,
@@ -95,11 +104,10 @@ class MPSConfig(EmulationConfig):
95
104
  log_level: int = logging.INFO,
96
105
  log_file: pathlib.Path | None = None,
97
106
  autosave_prefix: str = "emu_mps_save_",
98
- autosave_dt: int = 600, # 10 minutes
107
+ autosave_dt: float = float("inf"), # disable autosave by default
99
108
  solver: Solver = Solver.TDVP,
100
109
  **kwargs: Any,
101
110
  ):
102
- kwargs.setdefault("observables", [BitStrings(evaluation_times=[1.0])])
103
111
  super().__init__(
104
112
  dt=dt,
105
113
  precision=precision,
@@ -116,20 +124,36 @@ class MPSConfig(EmulationConfig):
116
124
  solver=solver,
117
125
  **kwargs,
118
126
  )
127
+ logger = init_logging(log_level, log_file)
119
128
 
120
129
  MIN_AUTOSAVE_DT = 10
121
130
  assert (
122
131
  self.autosave_dt > MIN_AUTOSAVE_DT
123
132
  ), f"autosave_dt must be larger than {MIN_AUTOSAVE_DT} seconds"
124
133
 
134
+ MIN_KRYLOV_TOL = 1.0e-12 # keep numerical stability
135
+ prod_tol = precision * extra_krylov_tolerance
136
+ if prod_tol < MIN_KRYLOV_TOL:
137
+ new_extra_krylov_tolerance = MIN_KRYLOV_TOL / precision
138
+ logger.warning(
139
+ f"Requested Lanczos convergence tolerance "
140
+ f"(precision * extra_krylov_tolerance = {prod_tol:.2e}) "
141
+ f"is below minimum threshold {MIN_KRYLOV_TOL:.2e}. "
142
+ f"Automatically adjusting extra_krylov_tolerance from "
143
+ f"{extra_krylov_tolerance:.2e} to {new_extra_krylov_tolerance:.2e} "
144
+ f"to maintain numerical stability."
145
+ )
146
+ else:
147
+ new_extra_krylov_tolerance = extra_krylov_tolerance
148
+ self._backend_options["extra_krylov_tolerance"] = new_extra_krylov_tolerance
149
+
125
150
  self.monkeypatch_observables()
126
- self.logger = init_logging(log_level, log_file)
127
151
 
128
152
  if (self.noise_model.runs != 1 and self.noise_model.runs is not None) or (
129
153
  self.noise_model.samples_per_run != 1
130
154
  and self.noise_model.samples_per_run is not None
131
155
  ):
132
- self.logger.warning(
156
+ logger.warning(
133
157
  "Warning: The runs and samples_per_run values of the NoiseModel are ignored!"
134
158
  )
135
159
  self._backend_options[
@@ -178,7 +202,7 @@ class MPSConfig(EmulationConfig):
178
202
  energy_mps_impl, obs_copy
179
203
  )
180
204
  obs_list.append(obs_copy)
181
- self.observables = tuple(obs_list)
205
+ self._backend_options["observables"] = tuple(obs_list)
182
206
 
183
207
  def check_permutable_observables(self) -> bool:
184
208
  allowed_permutable_obs = set(
@@ -196,7 +220,7 @@ class MPSConfig(EmulationConfig):
196
220
  actual_obs = set([obs._base_tag for obs in self.observables])
197
221
  not_allowed = actual_obs.difference(allowed_permutable_obs)
198
222
  if not_allowed:
199
- self.logger.warning(
223
+ logging.getLogger("emulators").warning(
200
224
  f"emu-mps allows only {allowed_permutable_obs} observables with"
201
225
  " `optimize_qubit_ordering = True`."
202
226
  f" you provided unsupported {not_allowed}"
emu_mps/solver.py CHANGED
@@ -2,5 +2,16 @@ from enum import Enum
2
2
 
3
3
 
4
4
  class Solver(str, Enum):
5
+ """Available MPS solvers used by emu-mps. Use these values to
6
+ select the algorithm for time evolution.
7
+ By defatult TDVP is used. In order to use DMRG, set the
8
+ `solver` argument of `MPSConfig` to "dmrg" or `Solver.DMRG`.
9
+
10
+ Args:
11
+
12
+ - Solver.TDVP: Time-Dependent Variational Principle solver.
13
+ - Solver.DMRG: Density Matrix Renormalization Group solver.
14
+ """
15
+
5
16
  TDVP = "tdvp"
6
17
  DMRG = "dmrg"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: emu-mps
3
- Version: 2.5.2
3
+ Version: 2.7.0
4
4
  Summary: Pasqal MPS based pulse emulator built on PyTorch
5
5
  Project-URL: Documentation, https://pasqal-io.github.io/emulators/
6
6
  Project-URL: Repository, https://github.com/pasqal-io/emulators
@@ -25,7 +25,7 @@ Classifier: Programming Language :: Python :: 3.10
25
25
  Classifier: Programming Language :: Python :: Implementation :: CPython
26
26
  Classifier: Programming Language :: Python :: Implementation :: PyPy
27
27
  Requires-Python: >=3.10
28
- Requires-Dist: emu-base==2.5.2
28
+ Requires-Dist: emu-base==2.7.0
29
29
  Description-Content-Type: text/markdown
30
30
 
31
31
  <div align="center">
@@ -1,19 +1,19 @@
1
- emu_mps/__init__.py,sha256=c-uUpZ6sKJuu0iXuCH5Rle7jHpyadqG_j9qgNH9t5bQ,708
1
+ emu_mps/__init__.py,sha256=mJrKjY6IyvYEI0h5KlmMGUwnkWKBa3VphrqFIOJZkiY,708
2
2
  emu_mps/algebra.py,sha256=VZ5uaX5PYWGqDCpRKKr819BLMMtT0pKDxc2HrlLCdgU,5423
3
3
  emu_mps/custom_callback_implementations.py,sha256=WeczmO6qkvBIipvXLqX45i3D7M4ovOrepusIGs6d2Ts,2420
4
4
  emu_mps/hamiltonian.py,sha256=OM0bPNZV7J5Egk6aTUwt4GaWqiUOP68ujZBTuqlBY1k,16289
5
- emu_mps/mpo.py,sha256=WmGDGkCMhlODmydd0b09YcSRlsk6Bg5xYQ4rXSNJvnY,9703
6
- emu_mps/mps.py,sha256=aIWbd3s4c2lqhzR1IukxOAmRK7kfUFBPHkOOCS3dcPM,21747
7
- emu_mps/mps_backend.py,sha256=dXcW5Fa2AAL_WlHUgr30WS2l3qiDJgBVebiVpyYWpPM,2064
8
- emu_mps/mps_backend_impl.py,sha256=WyB1PpC61rzrnr_c3epVZOhDtXLOdhQLsMgUtZQWCYg,30851
9
- emu_mps/mps_config.py,sha256=btlygf6VDFna9TQus-zOmUqB6pfe_aoQmNoivfvxABk,8251
5
+ emu_mps/mpo.py,sha256=ZNwfTXhyBJ7UCSoMbNT2iqelINlHOtQrfzgZGEiz6Vc,10402
6
+ emu_mps/mps.py,sha256=zlke09Lvk2IHJOPYdn2zYAiPWKCDw0mU8t80KwyzIc0,22889
7
+ emu_mps/mps_backend.py,sha256=UZOtHBlpU5VkE8C51VgVEaHVAnLT_TCndLQvApZZeZ4,2566
8
+ emu_mps/mps_backend_impl.py,sha256=cFFQQb4gyLRcv9kDmpQXXXtmKqV_QlLqrfmL0AaP0_k,31227
9
+ emu_mps/mps_config.py,sha256=ZwEXWHUkrUSUIPTWUCPqEMQBzQOufuX_HO9t_Cm93mQ,9182
10
10
  emu_mps/observables.py,sha256=4C_ewkd3YkJP0xghTrGUTgXUGvJRCQcetb8cU0SjMl0,1900
11
- emu_mps/solver.py,sha256=M9xkHhlEouTBvoPw2UYVu6kij7CO4Z1FXw_SiGFtdgo,85
11
+ emu_mps/solver.py,sha256=RpfNZf0ezl8boSWIwSQUJiDkqRLKy3vrlutza8cEcRI,470
12
12
  emu_mps/solver_utils.py,sha256=Q1SY8E3Kipe_RfKE8lAMRfD4mSG2VkkkPmk-fU7eAgY,8852
13
13
  emu_mps/utils.py,sha256=rL75H55hB5lDMjy8a_O2PpJq51iZKjSx91X4euxB3mY,7293
14
14
  emu_mps/optimatrix/__init__.py,sha256=fBXQ7-rgDro4hcaBijCGhx3J69W96qcw5_3mWc7tND4,364
15
15
  emu_mps/optimatrix/optimiser.py,sha256=k9suYmKLKlaZ7ozFuIqvXHyCBoCtGgkX1mpen9GOdOo,6977
16
16
  emu_mps/optimatrix/permutations.py,sha256=9DDMZtrGGZ01b9F3GkzHR3paX4qNtZiPoI7Z_Kia3Lc,3727
17
- emu_mps-2.5.2.dist-info/METADATA,sha256=7i0nH79Bhe-S22wxLyixahqIo94zoCFvQHFyJxKXMps,3587
18
- emu_mps-2.5.2.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
19
- emu_mps-2.5.2.dist-info/RECORD,,
17
+ emu_mps-2.7.0.dist-info/METADATA,sha256=070Tn1jQRcwee0V3gNfoz49ni9EtL4eFfgBWSG5LyKI,3587
18
+ emu_mps-2.7.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
19
+ emu_mps-2.7.0.dist-info/RECORD,,