sdf-xarray 0.2.0__cp312-cp312-win_amd64.whl → 0.3.2__cp312-cp312-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
lib/SDFC_14.4.7/sdfc.lib CHANGED
Binary file
sdf_xarray/__init__.py CHANGED
@@ -1,12 +1,17 @@
1
+ import contextlib
1
2
  import os
2
- import pathlib
3
3
  import re
4
4
  from collections import Counter, defaultdict
5
+ from collections.abc import Callable, Iterable
6
+ from importlib.metadata import version
5
7
  from itertools import product
6
- from typing import Iterable
8
+ from os import PathLike as os_PathLike
9
+ from pathlib import Path
10
+ from typing import ClassVar
7
11
 
8
12
  import numpy as np
9
13
  import xarray as xr
14
+ from packaging.version import Version
10
15
  from xarray.backends import AbstractDataStore, BackendArray, BackendEntrypoint
11
16
  from xarray.backends.file_manager import CachingFileManager
12
17
  from xarray.backends.locks import ensure_lock
@@ -14,9 +19,23 @@ from xarray.core import indexing
14
19
  from xarray.core.utils import close_on_error, try_read_magic_number_from_path
15
20
  from xarray.core.variable import Variable
16
21
 
22
+ # NOTE: Do not delete these lines, otherwise the "epoch" dataset and dataarray
23
+ # accessors will not be imported when the user imports sdf_xarray
24
+ import sdf_xarray.dataset_accessor
17
25
  import sdf_xarray.plotting # noqa: F401
18
26
 
19
- from .sdf_interface import Constant, SDFFile
27
+ # NOTE: This attempts to initialise with the "pint" accessor if the user
28
+ # has installed the package
29
+ with contextlib.suppress(ImportError):
30
+ import pint_xarray # noqa: F401
31
+
32
+ from .sdf_interface import Constant, SDFFile # type: ignore # noqa: PGH003
33
+
34
+ # TODO Remove this once the new kwarg options are fully implemented
35
+ if Version(version("xarray")) >= Version("2025.8.0"):
36
+ xr.set_options(use_new_combine_kwarg_defaults=True)
37
+
38
+ PathLike = str | os_PathLike
20
39
 
21
40
 
22
41
  def _rename_with_underscore(name: str) -> str:
@@ -48,24 +67,81 @@ def _process_latex_name(variable_name: str) -> str:
48
67
  return variable_name
49
68
 
50
69
 
51
- def combine_datasets(path_glob: Iterable | str, **kwargs) -> xr.Dataset:
52
- """Combine all datasets using a single time dimension"""
70
+ def _resolve_glob(path_glob: PathLike | Iterable[PathLike]):
71
+ """
72
+ Normalise input path_glob into a sorted list of absolute, resolved Path objects.
73
+ """
74
+
75
+ try:
76
+ p = Path(path_glob)
77
+ paths = list(p.parent.glob(p.name)) if p.name == "*.sdf" else list(p)
78
+ except TypeError:
79
+ paths = list({Path(p) for p in path_glob})
80
+
81
+ paths = sorted(p.resolve() for p in paths)
82
+ if not paths:
83
+ raise FileNotFoundError(f"No files matched pattern or input: {path_glob!r}")
84
+ return paths
85
+
86
+
87
+ def purge_unselected_data_vars(ds: xr.Dataset, data_vars: list[str]) -> xr.Dataset:
88
+ """
89
+ If the user has exclusively requested only certain variables be
90
+ loaded in then we purge all other variables and dimensions
91
+ """
92
+ existing_data_vars = set(ds.data_vars.keys())
93
+ vars_to_keep = set(data_vars) & existing_data_vars
94
+ vars_to_drop = existing_data_vars - vars_to_keep
95
+ ds = ds.drop_vars(vars_to_drop)
96
+
97
+ existing_dims = set(ds.sizes)
98
+ dims_to_keep = set()
99
+ for var in vars_to_keep:
100
+ dims_to_keep.update(ds[var].coords._names)
101
+ dims_to_keep.update(ds[var].dims)
102
+
103
+ coords_to_drop = existing_dims - dims_to_keep
104
+ return ds.drop_dims(coords_to_drop)
105
+
106
+
107
+ def combine_datasets(
108
+ path_glob: Iterable | str, data_vars: list[str], **kwargs
109
+ ) -> xr.Dataset:
110
+ """
111
+ Combine all datasets using a single time dimension, optionally extract
112
+ data from only the listed data_vars
113
+ """
114
+
115
+ if data_vars is not None:
116
+ return xr.open_mfdataset(
117
+ path_glob,
118
+ join="outer",
119
+ coords="different",
120
+ compat="no_conflicts",
121
+ combine="nested",
122
+ concat_dim="time",
123
+ preprocess=SDFPreprocess(data_vars=data_vars),
124
+ **kwargs,
125
+ )
53
126
 
54
127
  return xr.open_mfdataset(
55
128
  path_glob,
56
- data_vars="minimal",
57
- coords="minimal",
58
- compat="override",
129
+ data_vars="all",
130
+ coords="different",
131
+ compat="no_conflicts",
132
+ join="outer",
59
133
  preprocess=SDFPreprocess(),
60
134
  **kwargs,
61
135
  )
62
136
 
63
137
 
64
138
  def open_mfdataset(
65
- path_glob: Iterable | str | pathlib.Path | pathlib.Path.glob,
139
+ path_glob: Iterable | str | Path | Callable[..., Iterable[Path]],
66
140
  *,
67
141
  separate_times: bool = False,
68
142
  keep_particles: bool = False,
143
+ probe_names: list[str] | None = None,
144
+ data_vars: list[str] | None = None,
69
145
  ) -> xr.Dataset:
70
146
  """Open a set of EPOCH SDF files as one `xarray.Dataset`
71
147
 
@@ -95,20 +171,36 @@ def open_mfdataset(
95
171
  different output frequencies
96
172
  keep_particles :
97
173
  If ``True``, also load particle data (this may use a lot of memory!)
174
+ probe_names :
175
+ List of EPOCH probe names
176
+ data_vars :
177
+ List of data vars to load in (If not specified loads in all variables)
98
178
  """
99
179
 
100
- # TODO: This is not very robust, look at how xarray.open_mfdataset does it
101
- if isinstance(path_glob, str):
102
- path_glob = pathlib.Path().glob(path_glob)
103
-
104
- # Coerce to list because we might need to use the sequence multiple times
105
- path_glob = sorted(list(path_glob))
180
+ path_glob = _resolve_glob(path_glob)
106
181
 
107
182
  if not separate_times:
108
- return combine_datasets(path_glob, keep_particles=keep_particles)
183
+ return combine_datasets(
184
+ path_glob,
185
+ data_vars=data_vars,
186
+ keep_particles=keep_particles,
187
+ probe_names=probe_names,
188
+ )
109
189
 
110
- time_dims, var_times_map = make_time_dims(path_glob)
111
- all_dfs = [xr.open_dataset(f, keep_particles=keep_particles) for f in path_glob]
190
+ _, var_times_map = make_time_dims(path_glob)
191
+
192
+ all_dfs = []
193
+ for f in path_glob:
194
+ ds = xr.open_dataset(f, keep_particles=keep_particles, probe_names=probe_names)
195
+
196
+ # If the data_vars are specified then only load them in and disregard the rest.
197
+ # If there are no remaining data variables then skip adding the dataset to list
198
+ if data_vars is not None:
199
+ ds = purge_unselected_data_vars(ds, data_vars)
200
+ if not ds.data_vars:
201
+ continue
202
+
203
+ all_dfs.append(ds)
112
204
 
113
205
  for df in all_dfs:
114
206
  for da in df:
@@ -125,7 +217,11 @@ def open_mfdataset(
125
217
  )
126
218
 
127
219
  return xr.combine_by_coords(
128
- all_dfs, data_vars="minimal", combine_attrs="drop_conflicts"
220
+ all_dfs,
221
+ coords="different",
222
+ combine_attrs="drop_conflicts",
223
+ join="outer",
224
+ compat="no_conflicts",
129
225
  )
130
226
 
131
227
 
@@ -146,14 +242,12 @@ def make_time_dims(path_glob):
146
242
  )
147
243
 
148
244
  # Count the unique set of lists of times
149
- times_count = Counter((tuple(v) for v in vars_count.values()))
245
+ times_count = Counter(tuple(v) for v in vars_count.values())
150
246
 
151
247
  # Give each set of times a unique name
152
248
  time_dims = {}
153
- count = 0
154
- for t in times_count:
249
+ for count, t in enumerate(times_count):
155
250
  time_dims[f"time{count}"] = t
156
- count += 1
157
251
 
158
252
  # Map each variable to the name of its time dimension
159
253
  var_times_map = {}
@@ -205,19 +299,28 @@ class SDFDataStore(AbstractDataStore):
205
299
  """Store for reading and writing data via the SDF library."""
206
300
 
207
301
  __slots__ = (
208
- "lock",
209
- "drop_variables",
210
- "keep_particles",
211
302
  "_filename",
212
303
  "_manager",
304
+ "drop_variables",
305
+ "keep_particles",
306
+ "lock",
307
+ "probe_names",
213
308
  )
214
309
 
215
- def __init__(self, manager, drop_variables=None, keep_particles=False, lock=None):
310
+ def __init__(
311
+ self,
312
+ manager,
313
+ drop_variables=None,
314
+ keep_particles=False,
315
+ lock=None,
316
+ probe_names=None,
317
+ ):
216
318
  self._manager = manager
217
319
  self._filename = self.ds.filename
218
320
  self.drop_variables = drop_variables
219
321
  self.keep_particles = keep_particles
220
322
  self.lock = ensure_lock(lock)
323
+ self.probe_names = probe_names
221
324
 
222
325
  @classmethod
223
326
  def open(
@@ -226,6 +329,7 @@ class SDFDataStore(AbstractDataStore):
226
329
  lock=None,
227
330
  drop_variables=None,
228
331
  keep_particles=False,
332
+ probe_names=None,
229
333
  ):
230
334
  if isinstance(filename, os.PathLike):
231
335
  filename = os.fspath(filename)
@@ -236,6 +340,7 @@ class SDFDataStore(AbstractDataStore):
236
340
  lock=lock,
237
341
  drop_variables=drop_variables,
238
342
  keep_particles=keep_particles,
343
+ probe_names=probe_names,
239
344
  )
240
345
 
241
346
  def _acquire(self, needs_lock=True):
@@ -249,12 +354,21 @@ class SDFDataStore(AbstractDataStore):
249
354
  def acquire_context(self, needs_lock=True):
250
355
  return self._manager.acquire_context(needs_lock)
251
356
 
252
- def load(self):
357
+ def load(self): # noqa: PLR0912, PLR0915
253
358
  # Drop any requested variables
254
359
  if self.drop_variables:
360
+ # Build a mapping from underscored names to real variable names
361
+ name_map = {_rename_with_underscore(var): var for var in self.ds.variables}
362
+
255
363
  for variable in self.drop_variables:
256
- # TODO: nicer error handling
257
- self.ds.variables.pop(variable)
364
+ key = _rename_with_underscore(variable)
365
+ original_name = name_map.get(key)
366
+
367
+ if original_name is None:
368
+ raise KeyError(
369
+ f"Variable '{variable}' not found (interpreted as '{key}')."
370
+ )
371
+ self.ds.variables.pop(original_name)
258
372
 
259
373
  # These two dicts are global metadata about the run or file
260
374
  attrs = {**self.ds.header, **self.ds.run_info}
@@ -274,8 +388,7 @@ class SDFDataStore(AbstractDataStore):
274
388
  def _process_grid_name(grid_name: str, transform_func) -> str:
275
389
  """Apply the given transformation function and then rename with underscores."""
276
390
  transformed_name = transform_func(grid_name)
277
- renamed_name = _rename_with_underscore(transformed_name)
278
- return renamed_name
391
+ return _rename_with_underscore(transformed_name)
279
392
 
280
393
  for key, value in self.ds.grids.items():
281
394
  if "cpu" in key.lower():
@@ -310,6 +423,8 @@ class SDFDataStore(AbstractDataStore):
310
423
  # Had some problems with these variables, so just ignore them for now
311
424
  if "cpu" in key.lower():
312
425
  continue
426
+ if "boundary" in key.lower():
427
+ continue
313
428
  if "output file" in key.lower():
314
429
  continue
315
430
 
@@ -336,7 +451,28 @@ class SDFDataStore(AbstractDataStore):
336
451
 
337
452
  if value.is_point_data:
338
453
  # Point (particle) variables are 1D
339
- var_coords = (f"ID_{_process_grid_name(key, _grid_species_name)}",)
454
+
455
+ # Particle data does not maintain a fixed dimension size
456
+ # throughout the simulation. An example of a particle name comes
457
+ # in the form of `Particles/Px/Ion_H` which is then modified
458
+ # using `_process_grid_name()` into `Ion_H`. This is fine as the
459
+ # other components of the momentum (`Py`, `Pz`) will have the same
460
+ # size as they represent the same bunch of particles.
461
+
462
+ # Probes however have names in the form of `Electron_Front_Probe/Px`
463
+ # which are changed to just `Px`; this is fine when there is only one
464
+ # probe in the system but when there are multiple they will have
465
+ # conflicting sizes so we can't keep the names as simply `Px` so we
466
+ # instead set their dimension as the full name `Electron_Front_Probe_Px`.
467
+ is_probe_name_match = self.probe_names is not None and any(
468
+ name in key for name in self.probe_names
469
+ )
470
+ name_processor = (
471
+ _rename_with_underscore
472
+ if is_probe_name_match
473
+ else _grid_species_name
474
+ )
475
+ var_coords = (f"ID_{_process_grid_name(key, name_processor)}",)
340
476
  else:
341
477
  # These are DataArrays
342
478
 
@@ -403,8 +539,9 @@ class SDFEntrypoint(BackendEntrypoint):
403
539
  *,
404
540
  drop_variables=None,
405
541
  keep_particles=False,
542
+ probe_names=None,
406
543
  ):
407
- if isinstance(filename_or_obj, pathlib.Path):
544
+ if isinstance(filename_or_obj, Path):
408
545
  # sdf library takes a filename only
409
546
  # TODO: work out if we need to deal with file handles
410
547
  filename_or_obj = str(filename_or_obj)
@@ -413,33 +550,68 @@ class SDFEntrypoint(BackendEntrypoint):
413
550
  filename_or_obj,
414
551
  drop_variables=drop_variables,
415
552
  keep_particles=keep_particles,
553
+ probe_names=probe_names,
416
554
  )
417
555
  with close_on_error(store):
418
556
  return store.load()
419
557
 
420
- open_dataset_parameters = ["filename_or_obj", "drop_variables", "keep_particles"]
558
+ open_dataset_parameters: ClassVar[list[str]] = [
559
+ "filename_or_obj",
560
+ "drop_variables",
561
+ "keep_particles",
562
+ "probe_names",
563
+ ]
421
564
 
422
565
  def guess_can_open(self, filename_or_obj):
423
566
  magic_number = try_read_magic_number_from_path(filename_or_obj)
424
567
  if magic_number is not None:
425
568
  return magic_number.startswith(b"SDF1")
426
569
 
427
- try:
428
- _, ext = os.path.splitext(filename_or_obj)
429
- except TypeError:
430
- return False
431
- return ext in {".sdf", ".SDF"}
570
+ return Path(filename_or_obj).suffix in {".sdf", ".SDF"}
432
571
 
433
572
  description = "Use .sdf files in Xarray"
434
573
 
435
- url = "https://epochpic.github.io/documentation/visualising_output/python.html"
574
+ url = "https://epochpic.github.io/documentation/visualising_output/python_beam.html"
436
575
 
437
576
 
438
577
  class SDFPreprocess:
439
- """Preprocess SDF files for xarray ensuring matching job ids and sets time dimension"""
578
+ """Preprocess SDF files for xarray ensuring matching job ids and sets
579
+ time dimension.
580
+
581
+ This class is used as a 'preprocess' function within ``xr.open_mfdataset``. It
582
+ performs three main duties on each individual file's Dataset:
583
+
584
+ 1. Checks for a **matching job ID** across all files to ensure dataset consistency.
585
+ 2. **Filters** the Dataset to keep only the variables specified in `data_vars`
586
+ and their required coordinates.
587
+ 3. **Expands dimensions** to include a single 'time' coordinate, preparing the
588
+ Dataset for concatenation.
589
+
590
+ EPOCH can output variables at different intervals, so some SDF files
591
+ may not contain the requested variable. We combine this data into one
592
+ dataset by concatenating across the time dimension.
593
+
594
+ The combination is performed using ``join="outer"`` (in the calling ``open_mfdataset`` function),
595
+ meaning that the final combined dataset will contain the variable across the
596
+ entire time span, with NaNs filling the time steps where the variable was absent in
597
+ the individual file.
598
+
599
+ With large SDF files, this filtering method will save on memory consumption when
600
+ compared to loading all variables from all files before concatenation.
440
601
 
441
- def __init__(self):
602
+ Parameters
603
+ ----------
604
+ data_vars :
605
+ A list of data variables to load in (If not specified loads
606
+ in all variables)
607
+ """
608
+
609
+ def __init__(
610
+ self,
611
+ data_vars: list[str] | None = None,
612
+ ):
442
613
  self.job_id: int | None = None
614
+ self.data_vars = data_vars
443
615
 
444
616
  def __call__(self, ds: xr.Dataset) -> xr.Dataset:
445
617
  if self.job_id is None:
@@ -450,17 +622,23 @@ class SDFPreprocess:
450
622
  f"Mismatching job ids (got {ds.attrs['jobid1']}, expected {self.job_id})"
451
623
  )
452
624
 
453
- ds = ds.expand_dims(time=[ds.attrs["time"]])
625
+ # If the user has exclusively requested only certain variables be
626
+ # loaded in then we purge all other variables and coordinates
627
+ if self.data_vars:
628
+ ds = purge_unselected_data_vars(ds, self.data_vars)
629
+
630
+ time_val = ds.attrs.get("time", np.nan)
631
+ ds = ds.expand_dims(time=[time_val])
454
632
  ds = ds.assign_coords(
455
633
  time=(
456
634
  "time",
457
- [ds.attrs["time"]],
635
+ [time_val],
458
636
  {"units": "s", "long_name": "Time", "full_name": "time"},
459
637
  )
460
638
  )
461
639
  # Particles' spartial coordinates also evolve in time
462
640
  for coord, value in ds.coords.items():
463
641
  if value.attrs.get("point_data", False):
464
- ds.coords[coord] = value.expand_dims(time=[ds.attrs["time"]])
642
+ ds.coords[coord] = value.expand_dims(time=[time_val])
465
643
 
466
644
  return ds
sdf_xarray/_version.py CHANGED
@@ -1,16 +1,34 @@
1
- # file generated by setuptools_scm
1
+ # file generated by setuptools-scm
2
2
  # don't change, don't track in version control
3
+
4
+ __all__ = [
5
+ "__version__",
6
+ "__version_tuple__",
7
+ "version",
8
+ "version_tuple",
9
+ "__commit_id__",
10
+ "commit_id",
11
+ ]
12
+
3
13
  TYPE_CHECKING = False
4
14
  if TYPE_CHECKING:
5
- from typing import Tuple, Union
15
+ from typing import Tuple
16
+ from typing import Union
17
+
6
18
  VERSION_TUPLE = Tuple[Union[int, str], ...]
19
+ COMMIT_ID = Union[str, None]
7
20
  else:
8
21
  VERSION_TUPLE = object
22
+ COMMIT_ID = object
9
23
 
10
24
  version: str
11
25
  __version__: str
12
26
  __version_tuple__: VERSION_TUPLE
13
27
  version_tuple: VERSION_TUPLE
28
+ commit_id: COMMIT_ID
29
+ __commit_id__: COMMIT_ID
30
+
31
+ __version__ = version = '0.3.2'
32
+ __version_tuple__ = version_tuple = (0, 3, 2)
14
33
 
15
- __version__ = version = '0.2.0'
16
- __version_tuple__ = version_tuple = (0, 2, 0)
34
+ __commit_id__ = commit_id = 'g331520e50'
@@ -0,0 +1,73 @@
1
+ from typing import Union
2
+
3
+ import xarray as xr
4
+
5
+
6
+ @xr.register_dataset_accessor("epoch")
7
+ class EpochAccessor:
8
+ def __init__(self, xarray_obj: xr.Dataset):
9
+ # The xarray object is the Dataset, which we store as self._ds
10
+ self._ds = xarray_obj
11
+
12
+ def rescale_coords(
13
+ self,
14
+ multiplier: float,
15
+ unit_label: str,
16
+ coord_names: Union[str, list[str]],
17
+ ) -> xr.Dataset:
18
+ """
19
+ Rescales specified X and Y coordinates in the Dataset by a given multiplier
20
+ and updates the unit label attribute.
21
+
22
+ Parameters
23
+ ----------
24
+ multiplier : float
25
+ The factor by which to multiply the coordinate values (e.g., 1e6 for meters to microns).
26
+ unit_label : str
27
+ The new unit label for the coordinates (e.g., "µm").
28
+ coord_names : str or list of str
29
+ The name(s) of the coordinate variable(s) to rescale.
30
+ If a string, only that coordinate is rescaled.
31
+ If a list, all listed coordinates are rescaled.
32
+
33
+ Returns
34
+ -------
35
+ xr.Dataset
36
+ A new Dataset with the updated and rescaled coordinates.
37
+
38
+ Examples
39
+ --------
40
+ # Convert X, Y, and Z from meters to microns
41
+ >>> ds_in_microns = ds.epoch.rescale_coords(1e6, "µm", coord_names=["X_Grid", "Y_Grid", "Z_Grid"])
42
+
43
+ # Convert only X to millimeters
44
+ >>> ds_in_mm = ds.epoch.rescale_coords(1000, "mm", coord_names="X_Grid")
45
+ """
46
+
47
+ ds = self._ds
48
+ new_coords = {}
49
+
50
+ if isinstance(coord_names, str):
51
+ # Convert single string to a list
52
+ coords_to_process = [coord_names]
53
+ elif isinstance(coord_names, list):
54
+ # Use the provided list
55
+ coords_to_process = coord_names
56
+ else:
57
+ coords_to_process = list(coord_names)
58
+
59
+ for coord_name in coords_to_process:
60
+ if coord_name not in ds.coords:
61
+ raise ValueError(
62
+ f"Coordinate '{coord_name}' not found in the Dataset. Cannot rescale."
63
+ )
64
+
65
+ coord_original = ds[coord_name]
66
+
67
+ coord_rescaled = coord_original * multiplier
68
+ coord_rescaled.attrs = coord_original.attrs.copy()
69
+ coord_rescaled.attrs["units"] = unit_label
70
+
71
+ new_coords[coord_name] = coord_rescaled
72
+
73
+ return ds.assign_coords(new_coords)
sdf_xarray/plotting.py CHANGED
@@ -10,60 +10,82 @@ if TYPE_CHECKING:
10
10
  from matplotlib.animation import FuncAnimation
11
11
 
12
12
 
13
- def get_frame_title(data: xr.DataArray, frame: int, display_sdf_name: bool) -> str:
13
+ def get_frame_title(
14
+ data: xr.DataArray,
15
+ frame: int,
16
+ display_sdf_name: bool = False,
17
+ title_custom: str | None = None,
18
+ ) -> str:
14
19
  """Generate the title for a frame"""
15
- sdf_name = f", {frame:04d}.sdf" if display_sdf_name else ""
20
+ # Adds custom text to the start of the title, if specified
21
+ title_custom = "" if title_custom is None else f"{title_custom}, "
22
+ # Adds the time and associated units to the title
16
23
  time = data["time"][frame].to_numpy()
17
- return f"t = {time:.2e}s{sdf_name}"
18
24
 
25
+ time_units = data["time"].attrs.get("units", False)
26
+ time_units_formatted = f" [{time_units}]" if time_units else ""
27
+ title_time = f"time = {time:.2e}{time_units_formatted}"
19
28
 
20
- def calculate_window_velocity_and_edges(
21
- data: xr.DataArray, x_axis_coord: str
22
- ) -> tuple[float, tuple[float, float], np.ndarray]:
23
- """Calculate the moving window's velocity and initial edges.
29
+ # Adds sdf name to the title, if specifed
30
+ title_sdf = f", {frame:04d}.sdf" if display_sdf_name else ""
31
+ return f"{title_custom}{title_time}{title_sdf}"
24
32
 
25
- 1. Finds a lineout of the target atribute in the x coordinate of the first frame
26
- 2. Removes the NaN values to isolate the simulation window
27
- 3. Produces the index size of the window, indexed at zero
28
- 4. Uses distance moved and final time of the simulation to calculate velocity and initial xlims
29
- """
30
- time_since_start = data["time"].values - data["time"].values[0]
31
- initial_window_edge = (0, 0)
32
- target_lineout = data.values[0, :, 0]
33
- target_lineout_window = target_lineout[~np.isnan(target_lineout)]
34
- x_grid = data[x_axis_coord].values
35
- window_size_index = target_lineout_window.size - 1
36
33
 
37
- velocity_window = (x_grid[-1] - x_grid[window_size_index]) / time_since_start[-1]
38
- initial_window_edge = (x_grid[0], x_grid[window_size_index])
39
- return velocity_window, initial_window_edge, time_since_start
34
+ def calculate_window_boundaries(
35
+ data: xr.DataArray, xlim: tuple[float, float] | False = False
36
+ ) -> np.ndarray:
37
+ """Calculate the bounderies a moving window frame. If the user specifies xlim, this will
38
+ be used as the initial bounderies and the window will move along acordingly.
39
+ """
40
+ x_grid = data["X_Grid_mid"].values
41
+ x_half_cell = (x_grid[1] - x_grid[0]) / 2
42
+ N_frames = data["time"].size
40
43
 
44
+ # Find the window bounderies by finding the first and last non-NaN values in the 0th lineout
45
+ # along the x-axis.
46
+ window_boundaries = np.zeros((N_frames, 2))
47
+ for i in range(N_frames):
48
+ # Check if data is 1D
49
+ if data.ndim == 2:
50
+ target_lineout = data[i].values
51
+ # Check if data is 2D
52
+ if data.ndim == 3:
53
+ target_lineout = data[i, :, 0].values
54
+ x_grid_non_nan = x_grid[~np.isnan(target_lineout)]
55
+ window_boundaries[i, 0] = x_grid_non_nan[0] - x_half_cell
56
+ window_boundaries[i, 1] = x_grid_non_nan[-1] + x_half_cell
41
57
 
42
- def compute_global_limits(data: xr.DataArray) -> tuple[float, float]:
43
- """Remove all NaN values from the target data to calculate the 1st and 99th percentiles,
44
- excluding extreme outliers.
45
- """
46
- values_no_nan = data.values[~np.isnan(data.values)]
47
- global_min = np.percentile(values_no_nan, 1)
48
- global_max = np.percentile(values_no_nan, 99)
49
- return global_min, global_max
58
+ # User's choice for initial window edge supercides the one calculated
59
+ if xlim:
60
+ window_boundaries = window_boundaries + xlim - window_boundaries[0]
61
+ return window_boundaries
50
62
 
51
63
 
52
- def is_1d(data: xr.DataArray) -> bool:
53
- """Check if the data is 1D."""
54
- return len(data.shape) == 2
64
+ def compute_global_limits(
65
+ data: xr.DataArray,
66
+ min_percentile: float = 0,
67
+ max_percentile: float = 100,
68
+ ) -> tuple[float, float]:
69
+ """Remove all NaN values from the target data to calculate the global minimum and maximum of the data.
70
+ User defined percentiles can remove extreme outliers.
71
+ """
55
72
 
73
+ # Removes NaN values, needed for moving windows
74
+ values_no_nan = data.values[~np.isnan(data.values)]
56
75
 
57
- def is_2d(data: xr.DataArray) -> bool:
58
- """Check if the data is 2D or 3D."""
59
- return len(data.shape) == 3
76
+ # Finds the global minimum and maximum of the plot, based on the percentile of the data
77
+ global_min = np.percentile(values_no_nan, min_percentile)
78
+ global_max = np.percentile(values_no_nan, max_percentile)
79
+ return global_min, global_max
60
80
 
61
81
 
62
- def generate_animation(
82
+ def animate(
63
83
  data: xr.DataArray,
84
+ fps: float = 10,
85
+ min_percentile: float = 0,
86
+ max_percentile: float = 100,
87
+ title: str | None = None,
64
88
  display_sdf_name: bool = False,
65
- fps: int = 10,
66
- move_window: bool = False,
67
89
  ax: plt.Axes | None = None,
68
90
  **kwargs,
69
91
  ) -> FuncAnimation:
@@ -71,17 +93,18 @@ def generate_animation(
71
93
 
72
94
  Parameters
73
95
  ---------
74
- dataset
75
- The dataset containing the simulation data
76
- target_attribute
77
- The attribute to plot for each timestep
78
- display_sdf_name
79
- Display the sdf file name in the animation title
96
+ data
97
+ The dataarray containing the target data
80
98
  fps
81
99
  Frames per second for the animation (default: 10)
82
- move_window
83
- If the simulation has a moving window, the animation will move along
84
- with it (default: False)
100
+ min_percentile
101
+ Minimum percentile of the data (default: 0)
102
+ max_percentile
103
+ Maximum percentile of the data (default: 100)
104
+ title
105
+ Custom title to add to the plot.
106
+ display_sdf_name
107
+ Display the sdf file name in the animation title
85
108
  ax
86
109
  Matplotlib axes on which to plot.
87
110
  kwargs
@@ -89,18 +112,28 @@ def generate_animation(
89
112
 
90
113
  Examples
91
114
  --------
92
- >>> generate_animation(dataset["Derived_Number_Density_Electron"])
115
+ >>> dataset["Derived_Number_Density_Electron"].epoch.animate()
93
116
  """
94
- import matplotlib.pyplot as plt
95
- from matplotlib.animation import FuncAnimation
117
+ import matplotlib.pyplot as plt # noqa: PLC0415
118
+ from matplotlib.animation import FuncAnimation # noqa: PLC0415
119
+
120
+ kwargs_original = kwargs.copy()
96
121
 
97
122
  if ax is None:
98
123
  _, ax = plt.subplots()
99
124
 
100
125
  N_frames = data["time"].size
101
- global_min, global_max = compute_global_limits(data)
126
+ global_min, global_max = compute_global_limits(data, min_percentile, max_percentile)
102
127
 
103
- if is_2d(data):
128
+ # Initialise plot and set y-limits for 1D data
129
+ if data.ndim == 2:
130
+ kwargs.setdefault("x", "X_Grid_mid")
131
+ plot = data.isel(time=0).plot(ax=ax, **kwargs)
132
+ ax.set_title(get_frame_title(data, 0, display_sdf_name, title))
133
+ ax.set_ylim(global_min, global_max)
134
+
135
+ # Initilise plot and set colour bar for 2D data
136
+ if data.ndim == 3:
104
137
  kwargs["norm"] = plt.Normalize(vmin=global_min, vmax=global_max)
105
138
  kwargs["add_colorbar"] = False
106
139
  # Set default x and y coordinates for 2D data if not provided
@@ -109,44 +142,32 @@ def generate_animation(
109
142
 
110
143
  # Initialize the plot with the first timestep
111
144
  plot = data.isel(time=0).plot(ax=ax, **kwargs)
112
- ax.set_title(get_frame_title(data, 0, display_sdf_name))
145
+ ax.set_title(get_frame_title(data, 0, display_sdf_name, title))
113
146
 
114
147
  # Add colorbar
115
- long_name = data.attrs.get("long_name")
116
- units = data.attrs.get("units")
117
- plt.colorbar(plot, ax=ax, label=f"{long_name} [${units}$]")
118
-
119
- # Initialise plo and set y-limits for 1D data
120
- if is_1d(data):
121
- plot = data.isel(time=0).plot(ax=ax, **kwargs)
122
- ax.set_title(get_frame_title(data, 0, display_sdf_name))
123
- ax.set_ylim(global_min, global_max)
148
+ if kwargs_original.get("add_colorbar", True):
149
+ long_name = data.attrs.get("long_name")
150
+ units = data.attrs.get("units")
151
+ plt.colorbar(plot, ax=ax, label=f"{long_name} [${units}$]")
124
152
 
153
+ # check if there is a moving window by finding NaNs in the data
154
+ move_window = np.isnan(np.sum(data.values))
125
155
  if move_window:
126
- window_velocity, window_initial_edge, time_since_start = (
127
- calculate_window_velocity_and_edges(data, kwargs["x"])
128
- )
129
-
130
- # User's choice for initial window edge supercides the one calculated
131
- if "xlim" in kwargs:
132
- window_initial_edge = kwargs["xlim"]
156
+ window_boundaries = calculate_window_boundaries(data, kwargs.get("xlim", False))
133
157
 
134
158
  def update(frame):
135
159
  # Set the xlim for each frame in the case of a moving window
136
160
  if move_window:
137
- kwargs["xlim"] = (
138
- window_initial_edge[0] + window_velocity * time_since_start[frame],
139
- window_initial_edge[1] * 0.99
140
- + window_velocity * time_since_start[frame],
141
- )
161
+ kwargs["xlim"] = window_boundaries[frame]
142
162
 
143
163
  # Update plot for the new frame
144
164
  ax.clear()
165
+
145
166
  data.isel(time=frame).plot(ax=ax, **kwargs)
146
- ax.set_title(get_frame_title(data, frame, display_sdf_name))
167
+ ax.set_title(get_frame_title(data, frame, display_sdf_name, title))
147
168
 
148
- # # Update y-limits for 1D data
149
- if is_1d(data):
169
+ # Update y-limits for 1D data
170
+ if data.ndim == 2:
150
171
  ax.set_ylim(global_min, global_max)
151
172
 
152
173
  return FuncAnimation(
@@ -181,4 +202,4 @@ class EpochAccessor:
181
202
  >>> ani = ds["Electric_Field_Ey"].epoch.animate()
182
203
  >>> ani.save("myfile.mp4")
183
204
  """
184
- return generate_animation(self._obj, *args, **kwargs)
205
+ return animate(self._obj, *args, **kwargs)
@@ -39,7 +39,7 @@ cdef class Block:
39
39
 
40
40
  @dataclasses.dataclass
41
41
  cdef class Variable(Block):
42
- units: tuple[str] | None
42
+ units: str | None
43
43
  mult: float | None
44
44
  grid: str | None
45
45
  grid_mid: str | None
@@ -0,0 +1,176 @@
1
+ Metadata-Version: 2.4
2
+ Name: sdf-xarray
3
+ Version: 0.3.2
4
+ Summary: Provides a backend for xarray to read SDF files as created by the EPOCH plasma PIC code.
5
+ Author-Email: Peter Hill <peter.hill@york.ac.uk>, Joel Adams <joel.adams@york.ac.uk>, Shaun Doherty <shaun.doherty@york.ac.uk>, Chris Herdman <chris.herdman@york.ac.uk>
6
+ License-Expression: BSD-3-Clause
7
+ Classifier: Development Status :: 5 - Production/Stable
8
+ Classifier: Intended Audience :: Science/Research
9
+ Classifier: Topic :: Scientific/Engineering
10
+ Classifier: Operating System :: OS Independent
11
+ Classifier: Programming Language :: Python
12
+ Classifier: Programming Language :: Python :: 3
13
+ Classifier: Programming Language :: Python :: 3.10
14
+ Classifier: Programming Language :: Python :: 3.11
15
+ Classifier: Programming Language :: Python :: 3.12
16
+ Classifier: Programming Language :: Python :: 3.13
17
+ Requires-Python: <3.14,>=3.10
18
+ Requires-Dist: numpy>=2.0.0
19
+ Requires-Dist: xarray>=2024.1.0
20
+ Requires-Dist: dask>=2024.7.1
21
+ Provides-Extra: docs
22
+ Requires-Dist: sphinx>=5.3; extra == "docs"
23
+ Requires-Dist: sphinx_autodoc_typehints>=1.19; extra == "docs"
24
+ Requires-Dist: sphinx-book-theme>=0.4.0rc1; extra == "docs"
25
+ Requires-Dist: sphinx-argparse-cli>=1.10.0; extra == "docs"
26
+ Requires-Dist: sphinx-inline-tabs; extra == "docs"
27
+ Requires-Dist: pickleshare; extra == "docs"
28
+ Requires-Dist: ipython; extra == "docs"
29
+ Requires-Dist: matplotlib; extra == "docs"
30
+ Requires-Dist: pint; extra == "docs"
31
+ Requires-Dist: pint-xarray; extra == "docs"
32
+ Requires-Dist: myst-parser; extra == "docs"
33
+ Provides-Extra: test
34
+ Requires-Dist: pytest>=3.3.0; extra == "test"
35
+ Requires-Dist: dask[complete]; extra == "test"
36
+ Requires-Dist: matplotlib; extra == "test"
37
+ Provides-Extra: lint
38
+ Requires-Dist: ruff; extra == "lint"
39
+ Provides-Extra: build
40
+ Requires-Dist: cibuildwheel[uv]; extra == "build"
41
+ Provides-Extra: jupyter
42
+ Requires-Dist: dask[diagnostics]; extra == "jupyter"
43
+ Requires-Dist: ipykernel>=6.29.5; extra == "jupyter"
44
+ Provides-Extra: pint
45
+ Requires-Dist: pint; extra == "pint"
46
+ Requires-Dist: pint-xarray; extra == "pint"
47
+ Description-Content-Type: text/markdown
48
+
49
+ # sdf-xarray
50
+
51
+ ![Dynamic TOML Badge](https://img.shields.io/badge/dynamic/toml?url=https%3A%2F%2Fraw.githubusercontent.com%2Fepochpic%2Fsdf-xarray%2Frefs%2Fheads%2Fmain%2Fpyproject.toml&query=%24.project.requires-python&label=python&logo=python)
52
+ [![Available on PyPI](https://img.shields.io/pypi/v/sdf-xarray?color=blue&logo=pypi)](https://pypi.org/project/sdf-xarray/)
53
+ [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.15351323.svg)](https://doi.org/10.5281/zenodo.15351323)
54
+ ![Build/Publish](https://github.com/epochpic/sdf-xarray/actions/workflows/build_publish.yml/badge.svg)
55
+ ![Tests](https://github.com/epochpic/sdf-xarray/actions/workflows/tests.yml/badge.svg)
56
+ [![Read the Docs](https://img.shields.io/readthedocs/sdf-xarray?logo=readthedocs&link=https%3A%2F%2Fsdf-xarray.readthedocs.io%2F)](https://sdf-xarray.readthedocs.io)
57
+ [![Formatted with black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/python/black)
58
+
59
+
60
+ sdf-xarray provides a backend for [xarray](https://xarray.dev) to read SDF files as created by
61
+ [EPOCH](https://epochpic.github.io) using the [SDF-C](https://github.com/epochpic/SDF_C) library.
62
+ Part of [BEAM](#broad-epoch-analysis-modules-beam) (Broad EPOCH Analysis Modules).
63
+
64
+ > [!IMPORTANT]
65
+ > To install this package make sure you are using one of the Python versions listed above.
66
+
67
+ ## Installation
68
+
69
+ Install from PyPI with:
70
+
71
+ ```bash
72
+ pip install sdf-xarray
73
+ ```
74
+
75
+ > [!NOTE]
76
+ > For use within jupyter notebooks, run this additional command after installation:
77
+ >
78
+ > ```bash
79
+ > pip install "sdf-xarray[jupyter]"
80
+ > ```
81
+
82
+ or from a local checkout:
83
+
84
+ ```bash
85
+ git clone https://github.com/epochpic/sdf-xarray.git
86
+ cd sdf-xarray
87
+ pip install .
88
+ ```
89
+
90
+ We recommend switching to [uv](https://docs.astral.sh/uv/) to manage packages.
91
+
92
+ ## Usage
93
+
94
+ ### Single file loading
95
+
96
+ ```python
97
+ import xarray as xr
98
+
99
+ df = xr.open_dataset("0010.sdf")
100
+
101
+ print(df["Electric_Field_Ex"])
102
+
103
+ # <xarray.DataArray 'Electric_Field_Ex' (X_x_px_deltaf_electron_beam: 16)> Size: 128B
104
+ # [16 values with dtype=float64]
105
+ # Coordinates:
106
+ # * X_x_px_deltaf_electron_beam (X_x_px_deltaf_electron_beam) float64 128B 1...
107
+ # Attributes:
108
+ # units: V/m
109
+ # full_name: "Electric Field/Ex"
110
+ ```
111
+
112
+ ### Multi-file loading
113
+
114
+ To open a whole simulation at once, pass `preprocess=sdf_xarray.SDFPreprocess()`
115
+ to `xarray.open_mfdataset`:
116
+
117
+ ```python
118
+ import xarray as xr
119
+ from sdf_xarray import SDFPreprocess
120
+
121
+ with xr.open_mfdataset("*.sdf", preprocess=SDFPreprocess()) as ds:
122
+ print(ds)
123
+
124
+ # Dimensions:
125
+ # time: 301, X_Grid_mid: 128, ...
126
+ # Coordinates: (9) ...
127
+ # Data variables: (18) ...
128
+ # Indexes: (9) ...
129
+ # Attributes: (22) ...
130
+ ```
131
+
132
+ `SDFPreprocess` checks that all the files are from the same simulation, as
133
+ ensures there's a `time` dimension so the files are correctly concatenated.
134
+
135
+ If your simulation has multiple `output` blocks so that not all variables are
136
+ output at every time step, then those variables will have `NaN` values at the
137
+ corresponding time points.
138
+
139
+ For more in depth documentation please visit: <https://sdf-xarray.readthedocs.io/>
140
+
141
+ ## Citing
142
+
143
+ If sdf-xarray contributes to a project that leads to publication, please acknowledge this by citing sdf-xarray. This can be done by clicking the "cite this repository" button located near the top right of this page.
144
+
145
+ ## Contributing
146
+
147
+ We welcome contributions to the BEAM ecosystem! Whether it's reporting issues, suggesting features, or submitting pull requests, your input helps improve these tools for the community.
148
+
149
+ ### How to Contribute
150
+
151
+ There are many ways to get involved:
152
+ - **Report bugs**: Found something not working as expected? Open an issue with as much detail as possible.
153
+ - **Request a feature**: Got an idea for a new feature or enhancement? Open a feature request on [GitHub Issues](https://github.com/epochpic/sdf-xarray/issues)!
154
+ - **Improve the documentation**: We aim to keep our docs clear and helpful—if something's missing or unclear, feel free to suggest edits.
155
+ - **Submit code changes**: Bug fixes, refactoring, or new features are welcome.
156
+
157
+
158
+ All code is automatically linted, formatted, and tested via GitHub Actions.
159
+
160
+ To run checks locally before opening a pull request, see [CONTRIBUTING.md](CONTRIBUTING.md) or [readthedocs documentation](https://sdf-xarray.readthedocs.io/en/latest/contributing.html)
161
+
162
+ ## Broad EPOCH Analysis Modules (BEAM)
163
+
164
+ ![BEAM logo](./BEAM.png)
165
+
166
+ **BEAM** is a collection of independent yet complementary open-source tools for analysing EPOCH simulations, designed to be modular so researchers can adopt only the components they require without being constrained by a rigid framework. In line with the **FAIR principles — Findable**, **Accessible**, **Interoperable**, and **Reusable** — each package is openly published with clear documentation and versioning (Findable), distributed via public repositories (Accessible), designed to follow common standards for data structures and interfaces (Interoperable), and includes licensing and metadata to support long-term use and adaptation (Reusable). The packages are as follows:
167
+
168
+ - [sdf-xarray](https://github.com/epochpic/sdf-xarray): Reading and processing SDF files and converting them to [xarray](https://docs.xarray.dev/en/stable/).
169
+ - [epydeck](https://github.com/epochpic/epydeck): Input deck reader and writer.
170
+ - [epyscan](https://github.com/epochpic/epyscan): Create campaigns over a given parameter space using various sampling methods.
171
+
172
+ ## PlasmaFAIR
173
+
174
+ ![PlasmaFAIR logo](PlasmaFAIR.svg)
175
+
176
+ Originally developed by [PlasmaFAIR](https://plasmafair.github.io), EPSRC Grant EP/V051822/1
@@ -6,19 +6,20 @@ include/SDFC_14.4.7/sdf_list_type.h,sha256=Quu8v0-SEsQuJpGtEZnm09tAyXqWNitx0sXl5
6
6
  include/SDFC_14.4.7/sdf_vector_type.h,sha256=dbKjhzRRsvhzrnTwVjtVlvnuisEnRMKY-vvdm94ok_Q,1595
7
7
  include/SDFC_14.4.7/stack_allocator.h,sha256=L7U9vmGiVSw3VQLIv9EzTaVq7JbFxs9aNonKStTkUSg,1335
8
8
  include/SDFC_14.4.7/uthash.h,sha256=rIyy_-ylY6S_7WaZCCC3VtvXaC9q37rFyA0f1U9xc4w,63030
9
- lib/SDFC_14.4.7/sdfc.lib,sha256=gjj_61MNoqCeyyxSisEd89aC23ZU86x-exBgcxWimy8,350320
9
+ lib/SDFC_14.4.7/sdfc.lib,sha256=VyuxkhB3q8QOeICxMhp3a7jpi7GXvHRmIwFKCSHSyrA,350158
10
10
  lib/SDFC_14.4.7/SDFCConfig.cmake,sha256=IOA1eusC-KvUK4LNTEiOAmEdaPH1ZvNvbYPgiG1oZio,802
11
11
  lib/SDFC_14.4.7/SDFCConfigVersion.cmake,sha256=pN7Qqyf04s3izw7PYQ0XK6imvmhaVegSdR_nEl3Ok_o,2830
12
12
  lib/SDFC_14.4.7/SDFCTargets-release.cmake,sha256=G4zdx5PyjePigeD_a6rmZAxbk7L8Nf0klUnV78Lm2fI,828
13
13
  lib/SDFC_14.4.7/SDFCTargets.cmake,sha256=OVt1Gm8n7Ew4fiTmA9yHoef3vIIGwsXUZfqeG9p9Bys,4152
14
- sdf_xarray/__init__.py,sha256=VASjtRYa7gd6oSPgtpMhJhCNMeMI_WUby904ETEyMpM,17435
15
- sdf_xarray/_version.py,sha256=dwm-k_Z-jVJh3XvUUihA4YH_iqS5EfkubVd6suUFZB0,427
14
+ sdf_xarray/__init__.py,sha256=obgAD4Aecvvpd8GkxLIAiIagSaY0bFVP2Q397N48_5g,24201
15
+ sdf_xarray/_version.py,sha256=bmLiJYnZTISDv_NDGANk6QDMSY0XTk0CwXXKhbOvW3Y,746
16
16
  sdf_xarray/csdf.pxd,sha256=ADPjAuHsodAvdOz96Z_XlFF7VL3KmVaXcTifWDP3rK0,4205
17
- sdf_xarray/plotting.py,sha256=pCKZ01sAT0VLEhnWrX6LN5OSJnzcIkRA3joN7f62giM,6238
18
- sdf_xarray/sdf_interface.cp312-win_amd64.pyd,sha256=zSdMTuvmOMVx3ao-nxQnsP4PVYGNEqHzHzvo9cQSAsQ,379392
19
- sdf_xarray/sdf_interface.pyx,sha256=3XRFlrC1e_HkJrU8-i3fMz8DlyUxZgt9wTx_QkGE_TQ,11982
20
- sdf_xarray-0.2.0.dist-info/METADATA,sha256=G0TiE2vkUccaBSwRMM56Pj82xDCdDm9ngpfbTNNAFhw,6589
21
- sdf_xarray-0.2.0.dist-info/WHEEL,sha256=GgB_RydHGtp7zP9kXrVRu7kuGtdM7WtO3JhH95Vv87o,106
22
- sdf_xarray-0.2.0.dist-info/entry_points.txt,sha256=gP7BIQpXNg6vIf7S7p-Rw_EJZTC1X50BsVTkK7dA7g0,57
23
- sdf_xarray-0.2.0.dist-info/licenses/LICENCE,sha256=dsqtZx65gUc4vyNA4JKHTelIFuzWf-HVNi0h1c-lXNI,1517
24
- sdf_xarray-0.2.0.dist-info/RECORD,,
17
+ sdf_xarray/dataset_accessor.py,sha256=TvnVMBefnT1d94Bkllhd-__O3ittzpaVjZKfze-3WQ4,2484
18
+ sdf_xarray/plotting.py,sha256=PnbEspR4XkA5SHkpoFKA2G7BYj5J3mVgR1TEeGol6Vw,7041
19
+ sdf_xarray/sdf_interface.cp312-win_amd64.pyd,sha256=08xrwtYkgFqyN5GCr4sV5QP3g0mOozAPMg1DCVAqZm8,360960
20
+ sdf_xarray/sdf_interface.pyx,sha256=PFC6upg14OZBqiGInLgBoxztIIKBk-HOh3WC9Ro4YUw,11975
21
+ sdf_xarray-0.3.2.dist-info/METADATA,sha256=xvADFsOdsd5EzaZbVYGOUgmEMe4RzrTDF9IbyijadqE,7624
22
+ sdf_xarray-0.3.2.dist-info/WHEEL,sha256=chqeLhPBtPdrOoreR34YMcofSk3yWDQhkrsDJ2n48LU,106
23
+ sdf_xarray-0.3.2.dist-info/entry_points.txt,sha256=gP7BIQpXNg6vIf7S7p-Rw_EJZTC1X50BsVTkK7dA7g0,57
24
+ sdf_xarray-0.3.2.dist-info/licenses/LICENCE,sha256=aHWuyELjtzIL1jTXFHTbI3tr9vyVyhnw3I9_QYPdEX8,1515
25
+ sdf_xarray-0.3.2.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: scikit-build-core 0.10.7
2
+ Generator: scikit-build-core 0.11.6
3
3
  Root-Is-Purelib: false
4
4
  Tag: cp312-cp312-win_amd64
5
5
 
@@ -1,4 +1,4 @@
1
- Copyright 2024, Peter Hill, Joel Adams, PlasmaFAIR team
1
+ Copyright 2024, Peter Hill, Joel Adams, epochpic team
2
2
 
3
3
  Redistribution and use in source and binary forms, with or without
4
4
  modification, are permitted provided that the following conditions are
@@ -1,190 +0,0 @@
1
- Metadata-Version: 2.1
2
- Name: sdf-xarray
3
- Version: 0.2.0
4
- Summary: Provides a backend for xarray to read SDF files as created by the EPOCH plasma PIC code.
5
- Author-Email: Peter Hill <peter.hill@york.ac.uk>, Joel Adams <joel.adams@york.ac.uk>, Shaun Doherty <shaun.doherty@york.ac.uk>
6
- License: Copyright 2024, Peter Hill, Joel Adams, PlasmaFAIR team
7
-
8
- Redistribution and use in source and binary forms, with or without
9
- modification, are permitted provided that the following conditions are
10
- met:
11
-
12
- 1. Redistributions of source code must retain the above copyright
13
- notice, this list of conditions and the following disclaimer.
14
-
15
- 2. Redistributions in binary form must reproduce the above copyright
16
- notice, this list of conditions and the following disclaimer in the
17
- documentation and/or other materials provided with the distribution.
18
-
19
- 3. Neither the name of the copyright holder nor the names of its
20
- contributors may be used to endorse or promote products derived from
21
- this software without specific prior written permission.
22
-
23
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24
- “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27
- HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
29
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
30
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
31
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
33
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34
- Requires-Python: >=3.10
35
- Requires-Dist: numpy>=2.0.0
36
- Requires-Dist: xarray>=2024.1.0
37
- Requires-Dist: dask>=2024.7.1
38
- Requires-Dist: cython>=3.0
39
- Requires-Dist: sphinx>=5.3; extra == "docs"
40
- Requires-Dist: sphinx_autodoc_typehints>=1.19; extra == "docs"
41
- Requires-Dist: sphinx-book-theme>=0.4.0rc1; extra == "docs"
42
- Requires-Dist: sphinx-argparse-cli>=1.10.0; extra == "docs"
43
- Requires-Dist: sphinx-inline-tabs; extra == "docs"
44
- Requires-Dist: pickleshare; extra == "docs"
45
- Requires-Dist: ipython; extra == "docs"
46
- Requires-Dist: matplotlib; extra == "docs"
47
- Requires-Dist: pytest>=3.3.0; extra == "test"
48
- Requires-Dist: dask[complete]; extra == "test"
49
- Requires-Dist: ruff; extra == "lint"
50
- Requires-Dist: cibuildwheel[uv]; extra == "build"
51
- Requires-Dist: dask[diagnostics]; extra == "jupyter"
52
- Requires-Dist: ipykernel>=6.29.5; extra == "jupyter"
53
- Provides-Extra: docs
54
- Provides-Extra: test
55
- Provides-Extra: lint
56
- Provides-Extra: build
57
- Provides-Extra: jupyter
58
- Description-Content-Type: text/markdown
59
-
60
- # sdf-xarray
61
-
62
- ![PyPI](https://img.shields.io/pypi/v/sdf-xarray?color=blue)
63
- ![Build/Publish](https://github.com/PlasmaFAIR/sdf-xarray/actions/workflows/build_publish.yml/badge.svg)
64
- ![Tests](https://github.com/PlasmaFAIR/sdf-xarray/actions/workflows/tests.yml/badge.svg)
65
-
66
- `sdf-xarray` provides a backend for [xarray](https://xarray.dev) to
67
- read SDF files as created by the [EPOCH](https://epochpic.github.io)
68
- plasma PIC code using the [SDF-C](https://github.com/Warwick-Plasma/SDF_C) library.
69
-
70
- ## Installation
71
-
72
- Install from PyPI with:
73
-
74
- ```bash
75
- pip install sdf-xarray
76
- ```
77
-
78
- > [!NOTE]
79
- > For use within jupyter notebooks, run this additional command after installation:
80
- >
81
- > ```bash
82
- > pip install "sdf-xarray[jupyter]"
83
- > ```
84
-
85
- or from a local checkout:
86
-
87
- ```bash
88
- git clone https://github.com/PlasmaFAIR/sdf-xarray.git
89
- cd sdf-xarray
90
- pip install .
91
- ```
92
-
93
- We recommend switching to [uv](https://docs.astral.sh/uv/) to manage packages.
94
-
95
- ## Usage
96
-
97
- For more in depth documentation please visit <https://sdf-xarray.readthedocs.io/>
98
-
99
- `sdf-xarray` is a backend for xarray, and so is usable directly from
100
- xarray:
101
-
102
- ### Single file loading
103
-
104
- ```python
105
- import xarray as xr
106
-
107
- df = xr.open_dataset("0010.sdf")
108
-
109
- print(df["Electric_Field_Ex"])
110
-
111
- # <xarray.DataArray 'Electric_Field_Ex' (X_x_px_deltaf_electron_beam: 16)> Size: 128B
112
- # [16 values with dtype=float64]
113
- # Coordinates:
114
- # * X_x_px_deltaf_electron_beam (X_x_px_deltaf_electron_beam) float64 128B 1...
115
- # Attributes:
116
- # units: V/m
117
- # full_name: "Electric Field/Ex"
118
- ```
119
-
120
- To open a whole simulation at once, pass `preprocess=sdf_xarray.SDFPreprocess()`
121
- to `xarray.open_mfdataset`:
122
-
123
- ```python
124
- import xarray as xr
125
- from sdf_xarray import SDFPreprocess
126
-
127
- with xr.open_mfdataset("*.sdf", preprocess=SDFPreprocess()) as ds:
128
- print(ds)
129
-
130
- # Dimensions:
131
- # time: 301, X_Grid_mid: 128, ...
132
- # Coordinates: (9) ...
133
- # Data variables: (18) ...
134
- # Indexes: (9) ...
135
- # Attributes: (22) ...
136
- ```
137
-
138
- `SDFPreprocess` checks that all the files are from the same simulation, as
139
- ensures there's a `time` dimension so the files are correctly concatenated.
140
-
141
- If your simulation has multiple `output` blocks so that not all variables are
142
- output at every time step, then those variables will have `NaN` values at the
143
- corresponding time points.
144
-
145
- Alternatively, we can create a separate time dimensions for each `output` block
146
- (essentially) using `sdf_xarray.open_mfdataset` with `separate_times=True`:
147
-
148
- ```python
149
- from sdf_xarray import open_mfdataset
150
-
151
- with open_mfdataset("*.sdf", separate_times=True) as ds:
152
- print(ds)
153
-
154
- # Dimensions:
155
- # time0: 301, time1: 31, time2: 61, X_Grid_mid: 128, ...
156
- # Coordinates: (12) ...
157
- # Data variables: (18) ...
158
- # Indexes: (9) ...
159
- # Attributes: (22) ...
160
- ```
161
-
162
- This is better for memory consumption, at the cost of perhaps slightly less
163
- friendly comparisons between variables on different time coordinates.
164
-
165
- ### Reading particle data
166
-
167
- By default, particle data isn't kept as it takes up a lot of space. Pass
168
- `keep_particles=True` as a keyword argument to `open_dataset` (for single files)
169
- or `open_mfdataset` (for multiple files):
170
-
171
- ```python
172
- df = xr.open_dataset("0010.sdf", keep_particles=True)
173
- ```
174
-
175
- ### Loading SDF files directly
176
-
177
- For debugging, sometimes it's useful to see the raw SDF files:
178
-
179
- ```python
180
- from sdf_xarray import SDFFile
181
-
182
- with SDFFile("0010.sdf") as sdf_file:
183
- print(sdf_file.variables["Electric Field/Ex"])
184
-
185
- # Variable(_id='ex', name='Electric Field/Ex', dtype=dtype('float64'), ...
186
-
187
- print(sdf_file.variables["Electric Field/Ex"].data)
188
-
189
- # [ 0.00000000e+00 0.00000000e+00 0.00000000e+00 ... -4.44992788e+12 1.91704994e+13 0.00000000e+00]
190
- ```