emu-base 2.5.2__py3-none-any.whl → 2.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
emu_base/__init__.py CHANGED
@@ -1,5 +1,5 @@
1
1
  from .constants import DEVICE_COUNT
2
- from .pulser_adapter import PulserData, HamiltonianType
2
+ from .pulser_adapter import PulserData, HamiltonianType, SequenceData
3
3
  from .math.brents_root_finding import find_root_brents
4
4
  from .math.krylov_exp import krylov_exp, DEFAULT_MAX_KRYLOV_DIM
5
5
  from .jump_lindblad_operators import compute_noise_from_lindbladians
@@ -12,6 +12,7 @@ __all__ = [
12
12
  "compute_noise_from_lindbladians",
13
13
  "matmul_2x2_with_batched",
14
14
  "PulserData",
15
+ "SequenceData",
15
16
  "find_root_brents",
16
17
  "krylov_exp",
17
18
  "HamiltonianType",
@@ -22,4 +23,4 @@ __all__ = [
22
23
  "init_logging",
23
24
  ]
24
25
 
25
- __version__ = "2.5.2"
26
+ __version__ = "2.7.0"
@@ -52,18 +52,17 @@ def get_lindblad_operators(
52
52
  return [depolarizing_x, depolarizing_y, depolarizing_z]
53
53
 
54
54
  if noise_type == "eff_noise":
55
- if not all(
56
- isinstance(op, torch.Tensor) and op.shape == (dim, dim)
57
- for op in noise_model.eff_noise_opers
58
- ):
55
+ torch_ops = [
56
+ torch.tensor(op, dtype=torch.complex128) for op in noise_model.eff_noise_opers
57
+ ]
58
+ if not all(op.shape == (dim, dim) for op in torch_ops):
59
59
  raise ValueError(
60
- f"Only {dim} by {dim} effective noise operator matrices are "
61
- "supported and it should be given as torch tensors "
60
+ f"Only {dim} by {dim} effective noise operator matrices are supported"
62
61
  )
63
62
 
64
63
  lindblad_ops = [ # lindblad operators with XY pulser basis are fine
65
- math.sqrt(rate) * torch.as_tensor(op)
66
- for rate, op in zip(noise_model.eff_noise_rates, noise_model.eff_noise_opers)
64
+ math.sqrt(rate) * op
65
+ for rate, op in zip(noise_model.eff_noise_rates, torch_ops)
67
66
  ]
68
67
 
69
68
  # pulser ising basis changing to emu-mps ising basis
@@ -0,0 +1,269 @@
1
+ """
2
+ Reference:
3
+ See page 98 (a few pages are sufficient) in:
4
+ C. Moler, Numerical Computing with MATLAB, 2004.
5
+
6
+ PCHIP - Piecewise Cubic Hermite Interpolating Polynomial is a 1D, C¹-continuous
7
+ interpolation method. It builds a separate cubic polynomial P_i(x) on each
8
+ interval [x[i], x[i+1]] and is designed to preserve the shape of the data
9
+ (literally means visually pleasing) e.g., avoid overshoot near sharp changes.
10
+
11
+ The cubic polynomials P_i(x) are expressed in Hermite form, which means:
12
+ (1) they match the data values
13
+ P(x[i]) == y[i],
14
+ P(x[i+1]) == y[i+1].
15
+ (2) they match derivatives
16
+ P(x[i])' == d[i],
17
+ P(x[i+1])' == d[i+1],
18
+ where d[i] is a derivative better than the first-order finite difference
19
+ Δ[i] = (y[i+1]-y[i])/(x[i+1]-x[i]). How d[i] is computed and what "better"
20
+ means is explained below.
21
+
22
+ What distinguishes PCHIP from generic cubic spline interpolators is how PCHIP
23
+ selects the knot derivatives d[i]. PCHIP computes:
24
+ (1) Secant slopes (finite differences) on each interval:
25
+ Δ[i] = (y[i+1] - y[i]) / (x[i+1] - x[i]).
26
+
27
+ (2) The knot derivatives d[i] as a harmonic mean of the neighboring slopes
28
+ Δ[i-1] and Δ[i] when they have the same sign:
29
+
30
+ 1/d[i] = 0.5 * (1/Δ[i-1] + 1/Δ[i]) [PCHIP interpolator]
31
+
32
+ which tends to look better visually than
33
+ - arithmetic mean d[i] = 0.5 * (Δ[i-1] + Δ[i]) [Catmull–Rom], or
34
+ - finite difference d[i] = Δ[i] [Linear interpolator].
35
+
36
+ If Δ[i-1] and Δ[i] have opposite signs (or either is zero), x[i] is a
37
+ local minimum, maximum or valley. In this case PCHIP sets
38
+
39
+ d[i] = 0
40
+
41
+ so the curve flattens at x[i], which helps prevent overshoot and
42
+ preserves the shape of the data.
43
+ (See images https://matthodges.com/posts/2024-08-08-spline-pchip/)
44
+
45
+ If the grid is non-uniform (h[i-1] = x[i] - x[i-1] and h[i] = x[i+1] - x[i]
46
+ are different), the neighboring secant slopes Δ[i-1] and Δ[i] do not contribute
47
+ equally. PCHIP therefore combines them using a weighted harmonic mean:
48
+
49
+ (w1+w2)/d[i] = w1/Δ[i-1] + w2/Δ[i].
50
+
51
+ w1 = 2h[i] + h[i-1], w2 = h[i] + 2h[i-1].
52
+ For uniform spacing this reduces to the simple harmonic mean w1 = w2 = 0.5
53
+
54
+ Given x[i], y[i] and d[i], one can compute the cubic polynomial
55
+ P(t) = p0 + p1 t + p2 t^2 + p3 t^3, 0 <= t <= h[i], on each
56
+ interval x[i], x[i+1]. To find coefficients (p0, p1, p2, p3) solve:
57
+
58
+ P(0) = y[i], P'(0) = d[i]
59
+ P(h) = y[i+1], P'(h) = d[i+1], where h = x[i+1] - x[i].
60
+
61
+ This gives:
62
+ p0 = y[i]
63
+ p1 = d[i]
64
+ p2 = (3Δ[i] - 2d[i] - d[i+1]) / h
65
+ p3 = (d[i] + d[i+1] - 2Δ[i]) / h²
66
+
67
+ Extrapolation:
68
+ points outside the interpolation interval [x[0], x[-1]] are evaluated by
69
+ extending the boundary cubic polynomial. For x < x[0] we use the first
70
+ interval polynomial P_0; for x > x[-1] we use the last P_{n-1} interval
71
+ polynomial.
72
+
73
+ Algorithm outline:
74
+ (1) Compute interval widths h[i] and secant slopes Δ[i].
75
+ (2) Compute knot derivatives d[i] using PCHIP.
76
+ (3) For each interval [x[i], x[i+1]], form the cubic Hermite polynomial
77
+ P(x) using y[i], y[i+1], d[i], and d[i+1].
78
+ (4) Evaluate P(x) at desired query points xq.
79
+
80
+
81
+ Note: Endpoint derivatives d[i] (at x[0] and x[-1]) are computed differently
82
+ than interior derivatives. At the boundaries we use a one-sided 3-point method
83
+ (see “Three-point methods”):
84
+ https://en.wikipedia.org/wiki/Numerical_differentiation
85
+
86
+ Using Taylor expansions for y[i+1] and y[i+2] gives:
87
+ Δ[i] ≈ d[i] + f'' * h[i] / 2 + ...
88
+ Δ[i+1] ≈ d[i] + f'' * (2h[i] + h[i+1]) / 2 + ...
89
+
90
+ Eliminating f'' yields the endpoint estimate:
91
+ d[i] ≈ (Δ[i] * (2h[i] + h[i+1]) - Δ[i+1] * h[i]) / (h[i] + h[i+1])
92
+ """
93
+
94
+ import torch
95
+
96
+
97
+ def _weighted_harmonic_mean(
98
+ delta_l: torch.Tensor,
99
+ delta_r: torch.Tensor,
100
+ h_l: torch.Tensor,
101
+ h_r: torch.Tensor,
102
+ ) -> torch.Tensor:
103
+
104
+ w_l = h_l + 2.0 * h_r
105
+ w_r = 2.0 * h_l + h_r
106
+ return (w_l + w_r) / (w_l / delta_l + w_r / delta_r)
107
+
108
+
109
+ def _endpoint_slope(
110
+ delta_l: torch.Tensor,
111
+ delta_r: torch.Tensor,
112
+ h_l: torch.Tensor,
113
+ h_r: torch.Tensor,
114
+ ) -> torch.Tensor:
115
+ """
116
+ At the boundaries we use a one-sided 3-point method
117
+ (see “Three-point methods”):
118
+ https://en.wikipedia.org/wiki/Numerical_differentiation
119
+
120
+ Using Taylor expansions for y[i+1] and y[i+2] gives:
121
+ Δ[i] ≈ d[i] + f'' * h[i] / 2 + ...
122
+ Δ[i+1] ≈ d[i] + f'' * (2h[i] + h[i+1]) / 2 + ...
123
+
124
+ Eliminating f'' yields the endpoint estimate:
125
+ d[i] ≈ (Δ[i] * (2h[i] + h[i+1]) - Δ[i+1] * h[i]) / (h[i] + h[i+1])
126
+ """
127
+ w1 = 2.0 * h_l + h_r
128
+ return (w1 * delta_l - h_l * delta_r) / (h_l + h_r)
129
+
130
+
131
+ def _limit_endpoint(
132
+ d_end: torch.Tensor,
133
+ s_l: torch.Tensor,
134
+ s_r: torch.Tensor,
135
+ ) -> torch.Tensor:
136
+ # If derivative points opposite to the first secant, zero it
137
+ mask_sign_change = d_end * s_l < 0
138
+ d_end = torch.where(mask_sign_change, torch.zeros_like(d_end), d_end)
139
+
140
+ # If secants switch sign, cap magnitude to 3*|s_l|
141
+ mask_sign_change = s_l * s_r < 0
142
+ mask_cap = mask_sign_change & (torch.abs(d_end) > 3.0 * torch.abs(s_l))
143
+ return torch.where(mask_cap, 3.0 * s_l, d_end)
144
+
145
+
146
+ def _pchip_derivatives(
147
+ h: torch.Tensor,
148
+ delta: torch.Tensor,
149
+ ) -> torch.Tensor:
150
+ """
151
+ Compute PCHIP knot derivatives d[i] from interval widths h[i]
152
+ and secant slopes delta Δ[i].
153
+
154
+ Interior derivatives use a weighted harmonic mean of neighboring secants
155
+ when they have the same sign
156
+ (w1+w2)/d[i] = w1/Δ[i-1] + w2/Δ[i]
157
+ (otherwise set d[i]=0 to preserve shape). Endpoint derivatives are
158
+ estimated with a one-sided 3-point formula and then limited to prevent
159
+ overshoot.
160
+ """
161
+ n = h.numel() + 1
162
+ d = torch.zeros((n,), dtype=delta.dtype, device=delta.device)
163
+
164
+ # Two points: straight line
165
+ if n == 2:
166
+ d.fill_(delta[0])
167
+ return d
168
+
169
+ # Interior points
170
+ delta_l, delta_r = delta[:-1], delta[1:]
171
+ h_l, h_r = h[:-1], h[1:]
172
+
173
+ mask_same_sign = (delta_l * delta_r) > 0 # excludes zeros + sign changes
174
+ dh = _weighted_harmonic_mean(delta_l, delta_r, h_l, h_r)
175
+ d[1:-1] = torch.where(mask_same_sign, dh, torch.zeros_like(dh))
176
+
177
+ # Endpoints (one-sided + limiter)
178
+ d0 = _endpoint_slope(delta[0], delta[1], h[0], h[1])
179
+ dn = _endpoint_slope(delta[-1], delta[-2], h[-1], h[-2])
180
+
181
+ d[0] = _limit_endpoint(d0, delta[0], delta[1])
182
+ d[-1] = _limit_endpoint(dn, delta[-1], delta[-2])
183
+
184
+ return d
185
+
186
+
187
+ def _polynomial_coeffs(
188
+ y: torch.Tensor,
189
+ h: torch.Tensor,
190
+ delta: torch.Tensor,
191
+ d: torch.Tensor,
192
+ ) -> torch.Tensor:
193
+ """
194
+ For each interval x ∈ [x[i], x[i+1]] build cubic in local coordinate
195
+ t(x) = x - x[i]:
196
+
197
+ P(t) = p0 + p1 t + p2 t^2 + p3 t^3, 0 <= t <= h[i]
198
+
199
+ Coefficients (p0, p1, p2, p3) are solutions to match value and slope
200
+ at both endpoints:
201
+
202
+ P(0) = y[i], P'(0) = d[i]
203
+ P(h) = y[i+1], P'(h) = d[i+1], where h = x[i+1] - x[i].
204
+
205
+ This gives:
206
+ p0 = y[i]
207
+ p1 = d[i]
208
+ p2 = (3Δ[i] - 2d[i] - d[i+1]) / h
209
+ p3 = (d[i] + d[i+1] - 2Δ[i]) / h²
210
+ """
211
+ p0 = y[:-1]
212
+ p1 = d[:-1]
213
+ p2 = (3.0 * delta - 2.0 * d[:-1] - d[1:]) / h
214
+ p3 = (d[:-1] + d[1:] - 2.0 * delta) / (h * h)
215
+ return torch.stack([p0, p1, p2, p3], dim=-1) # (N-1, 4)
216
+
217
+
218
+ class PCHIP1D:
219
+ """
220
+ 1D PCHIP interpolator (PyTorch).
221
+
222
+ - Shape-preserving, C¹ piecewise-cubic Hermite interpolant.
223
+ """
224
+
225
+ def __init__(self, x: torch.Tensor, y: torch.Tensor):
226
+ self.x, self.y = self._validate_xy(x, y)
227
+
228
+ h = self.x[1:] - self.x[:-1]
229
+ delta = (self.y[1:] - self.y[:-1]) / h
230
+
231
+ d = _pchip_derivatives(h, delta)
232
+ self._coeffs = _polynomial_coeffs(self.y, h, delta, d)
233
+
234
+ @staticmethod
235
+ def _validate_xy(
236
+ x: torch.Tensor,
237
+ y: torch.Tensor,
238
+ ) -> tuple[torch.Tensor, torch.Tensor]:
239
+ x = torch.as_tensor(x)
240
+ if not x.is_floating_point():
241
+ raise TypeError("x must be a floating point tensor")
242
+
243
+ y = torch.as_tensor(y, dtype=x.dtype, device=x.device)
244
+ if not y.is_floating_point():
245
+ raise TypeError("y must be a floating point tensor")
246
+
247
+ if x.ndim != 1 or y.ndim != 1:
248
+ raise ValueError("x and y must be 1D tensors")
249
+ if x.numel() != y.numel():
250
+ raise ValueError("x and y must have the same length")
251
+ if x.numel() < 2:
252
+ raise ValueError("Need at least 2 points")
253
+ if not torch.all(x[1:] > x[:-1]):
254
+ raise ValueError("x must be strictly increasing")
255
+
256
+ return x, y
257
+
258
+ def _interval_index(self, xq: torch.Tensor) -> torch.Tensor:
259
+ i = torch.searchsorted(self.x, xq, right=True) - 1
260
+ return i.clamp(0, self.x.numel() - 2)
261
+
262
+ def __call__(self, xq: torch.Tensor) -> torch.Tensor:
263
+ xq = torch.as_tensor(xq, dtype=self.x.dtype, device=self.x.device)
264
+
265
+ i = self._interval_index(xq)
266
+ t = xq - self.x[i]
267
+
268
+ p0, p1, p2, p3 = self._coeffs[i].unbind(-1)
269
+ return p0 + t * (p1 + t * (p2 + t * p3))
@@ -1,14 +1,17 @@
1
- from typing import Sequence
1
+ from typing import Sequence, Iterator
2
+ from dataclasses import dataclass
2
3
  from enum import Enum
3
- import torch
4
4
  import math
5
+ import torch
5
6
  import pulser
6
7
  from pulser.sampler import SequenceSamples
7
8
  from pulser.noise_model import NoiseModel
8
9
  from pulser.register.base_register import QubitId
9
10
  from pulser.backend.config import EmulationConfig
10
11
  from pulser._hamiltonian_data import HamiltonianData
12
+ from pulser.channels.base_channel import States
11
13
  from emu_base.jump_lindblad_operators import get_lindblad_operators
14
+ from emu_base.math.pchip_torch import PCHIP1D
12
15
 
13
16
 
14
17
  class HamiltonianType(Enum):
@@ -38,128 +41,178 @@ def _get_all_lindblad_noise_operators(
38
41
  ]
39
42
 
40
43
 
41
- def _get_target_times(
42
- sequence: pulser.Sequence, config: EmulationConfig, dt: int
43
- ) -> list[int]:
44
- sequence_duration = sequence.get_duration(include_fall_time=config.with_modulation)
44
+ def _unique_observable_times(
45
+ config: EmulationConfig,
46
+ ) -> set[float]:
47
+ """Collect unique evaluation times in [0, 1] for all observables."""
48
+ observable_times: set[float] = set()
45
49
 
46
- observable_times = set(range(0, sequence_duration, dt))
47
- observable_times.add(sequence_duration)
48
50
  for obs in config.observables:
49
- times: Sequence[float]
50
51
  if obs.evaluation_times is not None:
51
- times = obs.evaluation_times
52
- elif config.default_evaluation_times != "Full":
53
- times = config.default_evaluation_times.tolist() # type: ignore[union-attr,assignment]
54
- observable_times |= set([round(time * sequence_duration) for time in times])
52
+ observable_times |= set(obs.evaluation_times)
53
+ elif not isinstance(config.default_evaluation_times, str): # != "Full"
54
+ observable_times |= set(config.default_evaluation_times.tolist())
55
+ else:
56
+ raise ValueError(
57
+ f"default config {config.default_evaluation_times} is not supported."
58
+ )
59
+
60
+ return observable_times
61
+
55
62
 
56
- target_times: list[int] = list(observable_times)
57
- target_times.sort()
63
+ def _get_target_times(
64
+ sequence: pulser.Sequence,
65
+ config: EmulationConfig,
66
+ dt: float,
67
+ ) -> list[float]:
68
+ """Compute the sorted absolute times to sample the sequence.
69
+
70
+ Combines a uniform grid with step ``dt`` and any extra observable times,
71
+ then converts everything to absolute times over the sequence duration.
72
+ """
73
+ duration = float(sequence.get_duration(include_fall_time=config.with_modulation))
74
+ n_steps = math.floor(duration / dt)
75
+ evolution_times_rel: set[float] = {
76
+ i * float(dt) / duration for i in range(n_steps + 1)
77
+ }
78
+ evolution_times_rel.add(1.0)
79
+ target_times_rel = evolution_times_rel | _unique_observable_times(config)
80
+ target_times: list[float] = sorted({t * duration for t in target_times_rel})
58
81
  return target_times
59
82
 
60
83
 
61
84
  def _extract_omega_delta_phi(
62
85
  noisy_samples: SequenceSamples,
63
86
  qubit_ids: tuple[str, ...],
64
- target_times: list[int],
87
+ target_times: Sequence[float],
65
88
  ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
66
- sequence_dict = noisy_samples.to_nested_dict(all_local=True, samples_type="tensor")[
67
- "Local"
68
- ]
69
- nsamples = len(target_times) - 1
70
- omega = torch.zeros(
71
- nsamples,
72
- len(qubit_ids),
73
- dtype=torch.complex128,
74
- )
75
- delta = torch.zeros(
76
- nsamples,
77
- len(qubit_ids),
78
- dtype=torch.complex128,
79
- )
80
- phi = torch.zeros(
81
- nsamples,
82
- len(qubit_ids),
83
- dtype=torch.complex128,
84
- )
85
- max_duration = noisy_samples.max_duration
89
+ """
90
+ Extract per-qubit laser parameters (Ω, δ, phase) from Pulser samples.
91
+
92
+ Pulser stores samples on the discrete grid t = 0, 1, ..., T-1
93
+ (with dt = 1.0), i.e. it does not provide values exactly
94
+ at t = T = pulse_duration. Pulser effectively assumes
95
+ Ω(T) = δ(T) = phase(T) = 0. For midpoint discretization we therefore
96
+ interpolate (and implicitly extrapolate near the end) to obtain
97
+ Ω(t_mid), δ(t_mid), and phase(t_mid) at t_mid = (t_k + t_{k+1}) / 2.
86
98
 
87
- if "ground-rydberg" in sequence_dict and len(sequence_dict) == 1:
99
+ We evaluate the laser parameters at time midpoints to benefit from
100
+ a midpoint scheme.
101
+ https://en.wikipedia.org/wiki/Midpoint_method
102
+ """
103
+ sequence_dict = noisy_samples.to_nested_dict(
104
+ all_local=True,
105
+ samples_type="tensor",
106
+ )["Local"]
107
+ if len(sequence_dict) != 1:
108
+ raise ValueError("Only single interaction type is supported.")
109
+
110
+ if "ground-rydberg" in sequence_dict:
88
111
  locals_a_d_p = sequence_dict["ground-rydberg"]
89
- elif "XY" in sequence_dict and len(sequence_dict) == 1:
112
+ elif "XY" in sequence_dict:
90
113
  locals_a_d_p = sequence_dict["XY"]
91
114
  else:
92
- raise ValueError("Only `ground-rydberg` and `mw_global` channels are supported.")
115
+ raise ValueError(
116
+ "Only `ground-rydberg` and `mw_global`(XY) channels are supported."
117
+ )
93
118
  qubit_ids_filtered = [qid for qid in qubit_ids if qid in locals_a_d_p]
94
- for i in range(nsamples):
95
- t = (target_times[i] + target_times[i + 1]) / 2
96
- # The sampled values correspond to the start of each interval
97
- # To maximize the order of the solver, we need the values in the middle
98
- if math.ceil(t) < max_duration:
99
- # If we're not the final step, approximate this using linear
100
- # interpolation
101
- # Note that for dt even, t1=t2
102
- t1 = math.floor(t)
103
- t2 = math.ceil(t)
104
- for q_pos, q_id in enumerate(qubit_ids_filtered):
105
- omega[i, q_pos] = (
106
- locals_a_d_p[q_id]["amp"][t1] + locals_a_d_p[q_id]["amp"][t2]
107
- ) / 2.0
108
- delta[i, q_pos] = (
109
- locals_a_d_p[q_id]["det"][t1] + locals_a_d_p[q_id]["det"][t2]
110
- ) / 2.0
111
- phi[i, q_pos] = (
112
- locals_a_d_p[q_id]["phase"][t1] + locals_a_d_p[q_id]["phase"][t2]
113
- ) / 2.0
114
119
 
115
- else:
116
- # We're in the final step and dt=1, approximate this using linear extrapolation
117
- # we can reuse omega_1 and omega_2 from before
118
- for q_pos, q_id in enumerate(qubit_ids_filtered):
119
- delta[i, q_pos] = (
120
- 3.0 * locals_a_d_p[q_id]["det"][t2] - locals_a_d_p[q_id]["det"][t1]
121
- ) / 2.0
122
- phi[i, q_pos] = (
123
- 3.0 * locals_a_d_p[q_id]["phase"][t2]
124
- - locals_a_d_p[q_id]["phase"][t1]
125
- ) / 2.0
126
- omega[i, q_pos] = max(
127
- (3.0 * locals_a_d_p[q_id]["amp"][t2] - locals_a_d_p[q_id]["amp"][t1])
128
- / 2.0,
129
- 0.0,
120
+ target_t = torch.as_tensor(target_times, dtype=torch.float64)
121
+ t_mid = 0.5 * (target_t[:-1] + target_t[1:])
122
+
123
+ shape = (t_mid.numel(), len(qubit_ids_filtered))
124
+ omega_mid = torch.zeros(shape, dtype=torch.float64, device=t_mid.device)
125
+ delta_mid = torch.zeros(shape, dtype=torch.float64, device=t_mid.device)
126
+ phi_mid = torch.zeros(shape, dtype=torch.float64, device=t_mid.device)
127
+
128
+ assert noisy_samples.max_duration == target_times[-1]
129
+ t_grid = torch.arange(target_times[-1], dtype=torch.float64)
130
+
131
+ laser_by_data = {
132
+ "amp": omega_mid,
133
+ "det": delta_mid,
134
+ "phase": phi_mid,
135
+ }
136
+ for name, data_mid in laser_by_data.items():
137
+ for q_pos, q_id in enumerate(qubit_ids_filtered):
138
+ signal = torch.as_tensor(locals_a_d_p[q_id][name])
139
+ if torch.is_complex(signal) and not torch.allclose(
140
+ signal.imag, torch.zeros_like(signal.imag)
141
+ ):
142
+ raise ValueError(f"Input {name} has non-zero imaginary part.")
143
+
144
+ pchip = PCHIP1D(t_grid, signal.real)
145
+ data_mid[:, q_pos] = pchip(t_mid)
146
+ if name == "amp":
147
+ data_mid[-1, q_pos] = torch.where(
148
+ data_mid[-1, q_pos] > 0,
149
+ data_mid[-1, q_pos],
150
+ 0,
130
151
  )
131
152
 
132
- return omega, delta, phi
153
+ omega_c, delta_c, phi_c = (
154
+ arr.to(torch.complex128) for arr in (omega_mid, delta_mid, phi_mid)
155
+ )
156
+ return omega_c, delta_c, phi_c
133
157
 
134
158
 
135
- class PulserData:
136
- slm_end_time: float
137
- full_interaction_matrix: torch.Tensor
138
- masked_interaction_matrix: torch.Tensor
159
+ @dataclass(frozen=True)
160
+ class SequenceData:
139
161
  omega: torch.Tensor
140
162
  delta: torch.Tensor
141
163
  phi: torch.Tensor
164
+ full_interaction_matrix: torch.Tensor
165
+ masked_interaction_matrix: torch.Tensor
166
+ bad_atoms: dict[str, bool]
167
+ lindblad_ops: list[torch.Tensor]
168
+ noise_model: pulser.NoiseModel
169
+ qubit_ids: tuple[QubitId, ...]
170
+ target_times: list[float]
171
+ eigenstates: list[States]
172
+ qubit_count: int
173
+ dim: int
174
+ hamiltonian_type: HamiltonianType
175
+ slm_end_time: float
176
+
177
+
178
+ class PulserData:
179
+ target_times: list[float]
180
+ slm_end_time: float
181
+ full_interaction_matrix: torch.Tensor | None
142
182
  hamiltonian_type: HamiltonianType
143
183
  lindblad_ops: list[torch.Tensor]
144
184
  qubit_ids: tuple[QubitId, ...]
185
+ noise_model: pulser.NoiseModel
186
+ interaction_cutoff: float
187
+ eigenstates: list[States]
188
+ qubit_count: int
189
+ dim: int
145
190
 
146
- def __init__(self, *, sequence: pulser.Sequence, config: EmulationConfig, dt: int):
191
+ def __init__(self, *, sequence: pulser.Sequence, config: EmulationConfig, dt: float):
192
+ self._sequence = sequence
147
193
  self.qubit_ids = sequence.register.qubit_ids
148
194
  self.qubit_count = len(self.qubit_ids)
149
195
  self.target_times = _get_target_times(sequence=sequence, config=config, dt=dt)
196
+ self.noise_model = (
197
+ sequence.device.default_noise_model
198
+ if config.prefer_device_noise_model
199
+ else config.noise_model
200
+ )
201
+
202
+ if not self.noise_model:
203
+ self.noise_model = NoiseModel()
204
+
150
205
  self.hamiltonian = HamiltonianData.from_sequence(
151
206
  sequence,
152
207
  with_modulation=config.with_modulation,
153
- noise_model=config.noise_model,
208
+ noise_model=self.noise_model,
209
+ n_trajectories=config.n_trajectories,
154
210
  )
155
211
 
156
- self.omega, self.delta, self.phi = _extract_omega_delta_phi(
157
- self.hamiltonian.noisy_samples, self.qubit_ids, self.target_times
158
- )
159
- self.eigenstates = self.hamiltonian.eigenbasis
212
+ self.eigenstates = self.hamiltonian.basis_data.eigenbasis
160
213
 
161
- int_type = self.hamiltonian.interaction_type
162
- self.dim = self.hamiltonian.dim
214
+ int_type = self.hamiltonian.basis_data.interaction_type
215
+ self.dim = self.hamiltonian.basis_data.dim
163
216
  if int_type == "ising": # for local and global
164
217
  self.hamiltonian_type = HamiltonianType.Rydberg
165
218
  elif int_type == "XY":
@@ -168,33 +221,65 @@ class PulserData:
168
221
  raise ValueError(f"Unsupported basis: {int_type}")
169
222
 
170
223
  self.lindblad_ops = _get_all_lindblad_noise_operators(
171
- config.noise_model, dim=self.dim, interact_type=int_type
224
+ self.noise_model, dim=self.dim, interact_type=int_type
172
225
  )
173
226
  self.has_lindblad_noise: bool = self.lindblad_ops != []
174
227
 
228
+ self.full_interaction_matrix = None
175
229
  if config.interaction_matrix is not None:
176
230
  assert len(config.interaction_matrix) == self.qubit_count, (
177
231
  "The number of qubits in the register should be the same as the size of "
178
232
  "the interaction matrix"
179
233
  )
180
-
181
234
  self.full_interaction_matrix = config.interaction_matrix.as_tensor()
182
- else:
183
- self.full_interaction_matrix = (
184
- self.hamiltonian.noisy_interaction_matrix.as_tensor()
185
- )
186
-
187
- self.full_interaction_matrix[
188
- torch.abs(self.full_interaction_matrix) < config.interaction_cutoff
189
- ] = 0.0
190
- self.masked_interaction_matrix = self.full_interaction_matrix.clone()
191
235
 
236
+ self.interaction_cutoff = config.interaction_cutoff
192
237
  self.slm_end_time = (
193
238
  sequence._slm_mask_time[1] if len(sequence._slm_mask_time) > 1 else 0.0
194
239
  )
195
240
 
196
- # disable interaction for SLM masked qubits
197
- slm_targets = list(sequence._slm_mask_targets)
198
- for target in sequence.register.find_indices(slm_targets):
199
- self.masked_interaction_matrix[target] = 0.0
200
- self.masked_interaction_matrix[:, target] = 0.0
241
+ def get_sequences(self) -> Iterator[SequenceData]:
242
+ for samples in self.hamiltonian.noisy_samples:
243
+ full_interaction_matrix = (
244
+ self.full_interaction_matrix
245
+ if self.full_interaction_matrix is not None
246
+ else samples.trajectory.interaction_matrix.as_tensor()
247
+ )
248
+
249
+ full_interaction_matrix = full_interaction_matrix.clone()
250
+
251
+ full_interaction_matrix[
252
+ torch.abs(full_interaction_matrix) < self.interaction_cutoff
253
+ ] = 0.0
254
+
255
+ masked_interaction_matrix = full_interaction_matrix.clone()
256
+
257
+ # disable interaction for SLM masked qubits
258
+
259
+ slm_targets = list(self._sequence._slm_mask_targets)
260
+ for target in self._sequence.register.find_indices(slm_targets):
261
+ masked_interaction_matrix[target] = 0.0
262
+ masked_interaction_matrix[:, target] = 0.0
263
+
264
+ omega, delta, phi = _extract_omega_delta_phi(
265
+ samples.samples, self.qubit_ids, self.target_times
266
+ )
267
+
268
+ for _ in range(samples.reps):
269
+ yield SequenceData(
270
+ omega,
271
+ delta,
272
+ phi,
273
+ full_interaction_matrix,
274
+ masked_interaction_matrix,
275
+ samples.trajectory.bad_atoms,
276
+ self.lindblad_ops,
277
+ self.noise_model,
278
+ self.qubit_ids,
279
+ self.target_times,
280
+ self.eigenstates,
281
+ self.qubit_count,
282
+ self.dim,
283
+ self.hamiltonian_type,
284
+ self.slm_end_time,
285
+ )
emu_base/utils.py CHANGED
@@ -12,8 +12,29 @@ if unix_like:
12
12
 
13
13
 
14
14
  def init_logging(log_level: int, log_file: Path | None) -> logging.Logger:
15
+ """Create and return a configured logger for the emulators package.
16
+
17
+ This configures a logger named "emulators" and ensures it does not
18
+ propagate messages to ancestor loggers. Any existing handlers attached to
19
+ this logger are removed before the new handler is added.
20
+
21
+ Behavior
22
+ - If `log_file` is None, a StreamHandler writing to stdout is used.
23
+ - If `log_file` is a Path, a FileHandler is created (mode='w'), which
24
+ overwrites the file on each call.
25
+ - The handler's level is set to `log_level` and uses a simple
26
+ "%(message)s" formatter.
27
+
28
+ Args:
29
+ log_level (int): Logging level (e.g. logging.INFO).
30
+ log_file (Path | None): Path to a log file, or None to log to stdout.
31
+
32
+ Returns:
33
+ logging.Logger: The configured logger instance named "emulators".
34
+ """
15
35
  logger = logging.getLogger("emulators")
16
36
  logger.propagate = False
37
+ logger.setLevel(logging.DEBUG)
17
38
 
18
39
  handler: logging.Handler
19
40
  if log_file is None:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: emu-base
3
- Version: 2.5.2
3
+ Version: 2.7.0
4
4
  Summary: Pasqal base classes for emulators
5
5
  Project-URL: Documentation, https://pasqal-io.github.io/emulators/
6
6
  Project-URL: Repository, https://github.com/pasqal-io/emulators
@@ -25,7 +25,7 @@ Classifier: Programming Language :: Python :: 3.10
25
25
  Classifier: Programming Language :: Python :: Implementation :: CPython
26
26
  Classifier: Programming Language :: Python :: Implementation :: PyPy
27
27
  Requires-Python: >=3.10
28
- Requires-Dist: pulser-core==1.6.*
28
+ Requires-Dist: pulser-core[torch]>=1.7.0rc2
29
29
  Requires-Dist: torch>=2.9.0
30
30
  Description-Content-Type: text/markdown
31
31
 
@@ -1,14 +1,15 @@
1
- emu_base/__init__.py,sha256=Hwz7RJiKKY-lie2PIQIK-KaOVe0uGhyarI21w_PPqeE,759
1
+ emu_base/__init__.py,sha256=GCfEvZoxqxv5V9TIni0NyUyWSP83PxL1toiMQfOirok,793
2
2
  emu_base/constants.py,sha256=41LYkKLUCz-oxPbd-j7nUDZuhIbUrnez6prT0uR0jcE,56
3
- emu_base/jump_lindblad_operators.py,sha256=F_C-1aruXY_SLzw0m_HD5loqRGdjgsLHEzttlMzWNEw,3015
4
- emu_base/pulser_adapter.py,sha256=QyvJVWmAQDpFBc68-cnndNBkY1rvdDeEXXPse-afQ68,7369
5
- emu_base/utils.py,sha256=kXfRFq31r_dfLbBC0SHiPpvf7Wll4behd2vqQJsTrVo,3553
3
+ emu_base/jump_lindblad_operators.py,sha256=tL2DDG7kG4GthzZckzLpZlBzEFHLt0O_mut8Qm0Ub3w,2958
4
+ emu_base/pulser_adapter.py,sha256=muiyEthtTOgSv3zYXczgHbXF9Mx64lpOpk9o0leMbpg,10171
5
+ emu_base/utils.py,sha256=50Nbm95xXUmnJVA-r1Pd5z4pzEIjKUZs385xesE5sxA,4413
6
6
  emu_base/math/__init__.py,sha256=6BbIytYV5uC-e5jLMtIErkcUl_PvfSNnhmVFY9Il8uQ,97
7
7
  emu_base/math/brents_root_finding.py,sha256=AVx6L1Il6rpPJWrLJ7cn6oNmJyZOPRgEaaZaubC9lsU,3711
8
8
  emu_base/math/double_krylov.py,sha256=X16dyCbyzdP7fFK-hmKS03Q-DJtC6TZ8sJrGTJ6akIc,3708
9
9
  emu_base/math/krylov_energy_min.py,sha256=iR4hmE0eXptbAg3opikd5d4Zv7dhnDrawH-n_4KG-cc,4009
10
10
  emu_base/math/krylov_exp.py,sha256=mGFddVQ8mEbwypbZtnlRPFpi4Nf8JZT6OKLHloIwCDQ,3934
11
11
  emu_base/math/matmul.py,sha256=lEAnV0b5z_f1xEA-9p-WXxA8bM3QbShiHdXQ3ZkZFcQ,877
12
- emu_base-2.5.2.dist-info/METADATA,sha256=Nvrz_okEVXHqTG2aYZmjLBlK57DP3jz2YtHCaiJaexc,3604
13
- emu_base-2.5.2.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
14
- emu_base-2.5.2.dist-info/RECORD,,
12
+ emu_base/math/pchip_torch.py,sha256=Pl95A6BVLts7wuuJSeuXZOIfqpdXgxn3ZOtDxORwOcE,8951
13
+ emu_base-2.7.0.dist-info/METADATA,sha256=SHTj8BJK97soOWGvOV0aSkILS9NMkpwZxh7O_trhjt0,3614
14
+ emu_base-2.7.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
15
+ emu_base-2.7.0.dist-info/RECORD,,