sdf-xarray 0.4.0__cp314-cp314t-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
sdf_xarray/__init__.py ADDED
@@ -0,0 +1,645 @@
1
+ import contextlib
2
+ import os
3
+ import re
4
+ from collections import Counter, defaultdict
5
+ from collections.abc import Callable, Iterable
6
+ from importlib.metadata import version
7
+ from itertools import product
8
+ from os import PathLike as os_PathLike
9
+ from pathlib import Path
10
+ from typing import ClassVar
11
+
12
+ import numpy as np
13
+ import xarray as xr
14
+ from packaging.version import Version
15
+ from xarray.backends import AbstractDataStore, BackendArray, BackendEntrypoint
16
+ from xarray.backends.file_manager import CachingFileManager
17
+ from xarray.backends.locks import ensure_lock
18
+ from xarray.core import indexing
19
+ from xarray.core.utils import close_on_error, try_read_magic_number_from_path
20
+ from xarray.core.variable import Variable
21
+
22
+ # NOTE: Do not delete these lines, otherwise the "epoch" dataset and dataarray
23
+ # accessors will not be imported when the user imports sdf_xarray
24
+ import sdf_xarray.dataset_accessor
25
+ import sdf_xarray.download
26
+ import sdf_xarray.plotting # noqa: F401
27
+
28
+ # NOTE: This attempts to initialise with the "pint" accessor if the user
29
+ # has installed the package
30
+ with contextlib.suppress(ImportError):
31
+ import pint_xarray # noqa: F401
32
+
33
+ from .sdf_interface import Constant, SDFFile # type: ignore # noqa: PGH003
34
+
35
+ # TODO Remove this once the new kwarg options are fully implemented
36
+ if Version(version("xarray")) >= Version("2025.8.0"):
37
+ xr.set_options(use_new_combine_kwarg_defaults=True)
38
+
39
+ PathLike = str | os_PathLike
40
+
41
+
42
+ def _rename_with_underscore(name: str) -> str:
43
+ """A lot of the variable names have spaces, forward slashes and dashes in them, which
44
+ are not valid in netCDF names so we replace them with underscores."""
45
+ return name.replace("/", "_").replace(" ", "_").replace("-", "_")
46
+
47
+
48
+ def _process_latex_name(variable_name: str) -> str:
49
+ """Converts variable names to LaTeX format where possible
50
+ using the following rules:
51
+ - E -> $E_x$
52
+ - E -> $E_y$
53
+ - E -> $E_z$
54
+
55
+ This repeats for B, J and P. It only changes the variable
56
+ name if there are spaces around the affix (prefix + suffix)
57
+ or if there is no trailing space. This is to avoid changing variable
58
+ names that may contain these affixes as part of the variable name itself.
59
+ """
60
+ prefixes = ["E", "B", "J", "P"]
61
+ suffixes = ["x", "y", "z"]
62
+ for prefix, suffix in product(prefixes, suffixes):
63
+ # Match affix with preceding space and trailing space or end of string
64
+ affix_pattern = rf"\b{prefix}{suffix}\b"
65
+ # Insert LaTeX format while preserving spaces
66
+ replacement = rf"${prefix}_{suffix}$"
67
+ variable_name = re.sub(affix_pattern, replacement, variable_name)
68
+ return variable_name
69
+
70
+
71
+ def _resolve_glob(path_glob: PathLike | Iterable[PathLike]):
72
+ """
73
+ Normalise input path_glob into a sorted list of absolute, resolved Path objects.
74
+ """
75
+
76
+ try:
77
+ p = Path(path_glob)
78
+ paths = list(p.parent.glob(p.name)) if p.name == "*.sdf" else list(p)
79
+ except TypeError:
80
+ paths = list({Path(p) for p in path_glob})
81
+
82
+ paths = sorted(p.resolve() for p in paths)
83
+ if not paths:
84
+ raise FileNotFoundError(f"No files matched pattern or input: {path_glob!r}")
85
+ return paths
86
+
87
+
88
+ def purge_unselected_data_vars(ds: xr.Dataset, data_vars: list[str]) -> xr.Dataset:
89
+ """
90
+ If the user has exclusively requested only certain variables be
91
+ loaded in then we purge all other variables and dimensions
92
+ """
93
+ existing_data_vars = set(ds.data_vars.keys())
94
+ vars_to_keep = set(data_vars) & existing_data_vars
95
+ vars_to_drop = existing_data_vars - vars_to_keep
96
+ ds = ds.drop_vars(vars_to_drop)
97
+
98
+ existing_dims = set(ds.sizes)
99
+ dims_to_keep = set()
100
+ for var in vars_to_keep:
101
+ dims_to_keep.update(ds[var].coords._names)
102
+ dims_to_keep.update(ds[var].dims)
103
+
104
+ coords_to_drop = existing_dims - dims_to_keep
105
+ return ds.drop_dims(coords_to_drop)
106
+
107
+
108
+ def combine_datasets(
109
+ path_glob: Iterable | str, data_vars: list[str], **kwargs
110
+ ) -> xr.Dataset:
111
+ """
112
+ Combine all datasets using a single time dimension, optionally extract
113
+ data from only the listed data_vars
114
+ """
115
+
116
+ if data_vars is not None:
117
+ return xr.open_mfdataset(
118
+ path_glob,
119
+ join="outer",
120
+ coords="different",
121
+ compat="no_conflicts",
122
+ combine="nested",
123
+ concat_dim="time",
124
+ preprocess=SDFPreprocess(data_vars=data_vars),
125
+ **kwargs,
126
+ )
127
+
128
+ return xr.open_mfdataset(
129
+ path_glob,
130
+ data_vars="all",
131
+ coords="different",
132
+ compat="no_conflicts",
133
+ join="outer",
134
+ preprocess=SDFPreprocess(),
135
+ **kwargs,
136
+ )
137
+
138
+
139
+ def open_mfdataset(
140
+ path_glob: Iterable | str | Path | Callable[..., Iterable[Path]],
141
+ *,
142
+ separate_times: bool = False,
143
+ keep_particles: bool = False,
144
+ probe_names: list[str] | None = None,
145
+ data_vars: list[str] | None = None,
146
+ ) -> xr.Dataset:
147
+ """Open a set of EPOCH SDF files as one `xarray.Dataset`
148
+
149
+ EPOCH can output variables at different periods, so each individal
150
+ SDF file from one EPOCH run may have different variables in it. In
151
+ order to combine all files into one `xarray.Dataset`, we need to
152
+ concatenate variables across their time dimension.
153
+
154
+ We have two choices:
155
+
156
+ 1. One time dimension where some variables may not be defined at all time
157
+ points, and so will be filled with NaNs at missing points; or
158
+ 2. Multiple time dimensions, one for each output frequency
159
+
160
+ The second option is better for memory consumption, as the missing data with
161
+ the first option still takes up space. However, proper lazy-loading may
162
+ mitigate this.
163
+
164
+ The ``separate_times`` argument can be used to switch between these choices.
165
+
166
+ Parameters
167
+ ----------
168
+ path_glob :
169
+ List of filenames or string glob pattern
170
+ separate_times :
171
+ If ``True``, create separate time dimensions for variables defined at
172
+ different output frequencies
173
+ keep_particles :
174
+ If ``True``, also load particle data (this may use a lot of memory!)
175
+ probe_names :
176
+ List of EPOCH probe names
177
+ data_vars :
178
+ List of data vars to load in (If not specified loads in all variables)
179
+ """
180
+
181
+ path_glob = _resolve_glob(path_glob)
182
+
183
+ if not separate_times:
184
+ return combine_datasets(
185
+ path_glob,
186
+ data_vars=data_vars,
187
+ keep_particles=keep_particles,
188
+ probe_names=probe_names,
189
+ )
190
+
191
+ _, var_times_map = make_time_dims(path_glob)
192
+
193
+ all_dfs = []
194
+ for f in path_glob:
195
+ ds = xr.open_dataset(f, keep_particles=keep_particles, probe_names=probe_names)
196
+
197
+ # If the data_vars are specified then only load them in and disregard the rest.
198
+ # If there are no remaining data variables then skip adding the dataset to list
199
+ if data_vars is not None:
200
+ ds = purge_unselected_data_vars(ds, data_vars)
201
+ if not ds.data_vars:
202
+ continue
203
+
204
+ all_dfs.append(ds)
205
+
206
+ for df in all_dfs:
207
+ for da in df:
208
+ df[da] = df[da].expand_dims(
209
+ dim={var_times_map[str(da)]: [df.attrs["time"]]}
210
+ )
211
+ for coord in df.coords:
212
+ if df.coords[coord].attrs.get("point_data", False):
213
+ # We need to undo our renaming of the coordinates
214
+ base_name = coord.split("_", maxsplit=1)[-1]
215
+ sdf_coord_name = f"Grid_{base_name}"
216
+ df.coords[coord] = df.coords[coord].expand_dims(
217
+ dim={var_times_map[sdf_coord_name]: [df.attrs["time"]]}
218
+ )
219
+
220
+ return xr.combine_by_coords(
221
+ all_dfs,
222
+ coords="different",
223
+ combine_attrs="drop_conflicts",
224
+ join="outer",
225
+ compat="no_conflicts",
226
+ )
227
+
228
+
229
+ def make_time_dims(path_glob):
230
+ """Extract the distinct set of time arrays from a collection of
231
+ SDF files, along with a mapping from variable names to their time
232
+ dimension.
233
+ """
234
+ # Map variable names to list of times
235
+ vars_count = defaultdict(list)
236
+ for f in path_glob:
237
+ with SDFFile(str(f)) as sdf_file:
238
+ for key in sdf_file.variables:
239
+ vars_count[_rename_with_underscore(key)].append(sdf_file.header["time"])
240
+ for grid in sdf_file.grids.values():
241
+ vars_count[_rename_with_underscore(grid.name)].append(
242
+ sdf_file.header["time"]
243
+ )
244
+
245
+ # Count the unique set of lists of times
246
+ times_count = Counter(tuple(v) for v in vars_count.values())
247
+
248
+ # Give each set of times a unique name
249
+ time_dims = {}
250
+ for count, t in enumerate(times_count):
251
+ time_dims[f"time{count}"] = t
252
+
253
+ # Map each variable to the name of its time dimension
254
+ var_times_map = {}
255
+ for key, value in vars_count.items():
256
+ v_tuple = tuple(value)
257
+ for time_name, time_dim in time_dims.items():
258
+ if v_tuple == time_dim:
259
+ var_times_map[key] = time_name
260
+ break
261
+ else:
262
+ raise ValueError(f"Didn't find time dim for {key!r} with {v_tuple}")
263
+
264
+ return time_dims, var_times_map
265
+
266
+
267
+ class SDFBackendArray(BackendArray):
268
+ """Adapater class required for lazy loading"""
269
+
270
+ __slots__ = ("datastore", "dtype", "shape", "variable_name")
271
+
272
+ def __init__(self, variable_name, datastore):
273
+ self.datastore = datastore
274
+ self.variable_name = variable_name
275
+
276
+ array = self.get_array()
277
+ self.shape = array.shape
278
+ self.dtype = array.dtype
279
+
280
+ def get_array(self, needs_lock=True):
281
+ with self.datastore.acquire_context(needs_lock) as ds:
282
+ return ds.variables[self.variable_name]
283
+
284
+ def __getitem__(self, key: indexing.ExplicitIndexer) -> np.typing.ArrayLike:
285
+ return indexing.explicit_indexing_adapter(
286
+ key,
287
+ self.shape,
288
+ indexing.IndexingSupport.OUTER,
289
+ self._raw_indexing_method,
290
+ )
291
+
292
+ def _raw_indexing_method(self, key: tuple) -> np.typing.ArrayLike:
293
+ # thread safe method that access to data on disk
294
+ with self.datastore.acquire_context():
295
+ original_array = self.get_array(needs_lock=False)
296
+ return original_array.data[key]
297
+
298
+
299
+ class SDFDataStore(AbstractDataStore):
300
+ """Store for reading and writing data via the SDF library."""
301
+
302
+ __slots__ = (
303
+ "_filename",
304
+ "_manager",
305
+ "drop_variables",
306
+ "keep_particles",
307
+ "lock",
308
+ "probe_names",
309
+ )
310
+
311
+ def __init__(
312
+ self,
313
+ manager,
314
+ drop_variables=None,
315
+ keep_particles=False,
316
+ lock=None,
317
+ probe_names=None,
318
+ ):
319
+ self._manager = manager
320
+ self._filename = self.ds.filename
321
+ self.drop_variables = drop_variables
322
+ self.keep_particles = keep_particles
323
+ self.lock = ensure_lock(lock)
324
+ self.probe_names = probe_names
325
+
326
+ @classmethod
327
+ def open(
328
+ cls,
329
+ filename,
330
+ lock=None,
331
+ drop_variables=None,
332
+ keep_particles=False,
333
+ probe_names=None,
334
+ ):
335
+ if isinstance(filename, os.PathLike):
336
+ filename = os.fspath(filename)
337
+
338
+ manager = CachingFileManager(SDFFile, filename, lock=lock)
339
+ return cls(
340
+ manager,
341
+ lock=lock,
342
+ drop_variables=drop_variables,
343
+ keep_particles=keep_particles,
344
+ probe_names=probe_names,
345
+ )
346
+
347
+ def _acquire(self, needs_lock=True):
348
+ with self._manager.acquire_context(needs_lock) as ds:
349
+ return ds
350
+
351
+ @property
352
+ def ds(self):
353
+ return self._acquire()
354
+
355
+ def acquire_context(self, needs_lock=True):
356
+ return self._manager.acquire_context(needs_lock)
357
+
358
+ def load(self): # noqa: PLR0912, PLR0915
359
+ # Drop any requested variables
360
+ if self.drop_variables:
361
+ # Build a mapping from underscored names to real variable names
362
+ name_map = {_rename_with_underscore(var): var for var in self.ds.variables}
363
+
364
+ for variable in self.drop_variables:
365
+ key = _rename_with_underscore(variable)
366
+ original_name = name_map.get(key)
367
+
368
+ if original_name is None:
369
+ raise KeyError(
370
+ f"Variable '{variable}' not found (interpreted as '{key}')."
371
+ )
372
+ self.ds.variables.pop(original_name)
373
+
374
+ # These two dicts are global metadata about the run or file
375
+ attrs = {**self.ds.header, **self.ds.run_info}
376
+
377
+ data_vars = {}
378
+ coords = {}
379
+
380
+ def _norm_grid_name(grid_name: str) -> str:
381
+ """There may be multiple grids all with the same coordinate names, so
382
+ drop the "Grid/" from the start, and append the rest to the
383
+ dimension name. This lets us disambiguate them all. Probably"""
384
+ return grid_name.split("/", maxsplit=1)[-1]
385
+
386
+ def _grid_species_name(grid_name: str) -> str:
387
+ return grid_name.split("/")[-1]
388
+
389
+ def _process_grid_name(grid_name: str, transform_func) -> str:
390
+ """Apply the given transformation function and then rename with underscores."""
391
+ transformed_name = transform_func(grid_name)
392
+ return _rename_with_underscore(transformed_name)
393
+
394
+ for key, value in self.ds.grids.items():
395
+ if "cpu" in key.lower():
396
+ # Had some problems with these variables, so just ignore them for now
397
+ continue
398
+
399
+ if not self.keep_particles and value.is_point_data:
400
+ continue
401
+
402
+ base_name = _process_grid_name(value.name, _norm_grid_name)
403
+
404
+ for label, coord, unit in zip(value.labels, value.data, value.units):
405
+ full_name = f"{label}_{base_name}"
406
+ dim_name = (
407
+ f"ID_{_process_grid_name(key, _grid_species_name)}"
408
+ if value.is_point_data
409
+ else full_name
410
+ )
411
+ coords[full_name] = (
412
+ dim_name,
413
+ coord,
414
+ {
415
+ "long_name": label.replace("_", " "),
416
+ "units": unit,
417
+ "point_data": value.is_point_data,
418
+ "full_name": value.name,
419
+ },
420
+ )
421
+
422
+ # Read and convert SDF variables and meshes to xarray DataArrays and Coordinates
423
+ for key, value in self.ds.variables.items():
424
+ # Had some problems with these variables, so just ignore them for now
425
+ if "cpu" in key.lower():
426
+ continue
427
+ if "boundary" in key.lower():
428
+ continue
429
+ if "output file" in key.lower():
430
+ continue
431
+
432
+ if not self.keep_particles and value.is_point_data:
433
+ continue
434
+
435
+ if isinstance(value, Constant) or value.grid is None:
436
+ # We don't have a grid, either because it's just a
437
+ # scalar, or because it's an array over something
438
+ # else. We have no more information, so just make up
439
+ # some (hopefully) unique dimension names
440
+ shape = getattr(value.data, "shape", ())
441
+ dims = [f"dim_{key}_{n}" for n, _ in enumerate(shape)]
442
+ base_name = _rename_with_underscore(key)
443
+
444
+ data_attrs = {}
445
+ data_attrs["full_name"] = key
446
+ data_attrs["long_name"] = base_name.replace("_", " ")
447
+ if value.units is not None:
448
+ data_attrs["units"] = value.units
449
+
450
+ data_vars[base_name] = Variable(dims, value.data, attrs=data_attrs)
451
+ continue
452
+
453
+ if value.is_point_data:
454
+ # Point (particle) variables are 1D
455
+
456
+ # Particle data does not maintain a fixed dimension size
457
+ # throughout the simulation. An example of a particle name comes
458
+ # in the form of `Particles/Px/Ion_H` which is then modified
459
+ # using `_process_grid_name()` into `Ion_H`. This is fine as the
460
+ # other components of the momentum (`Py`, `Pz`) will have the same
461
+ # size as they represent the same bunch of particles.
462
+
463
+ # Probes however have names in the form of `Electron_Front_Probe/Px`
464
+ # which are changed to just `Px`; this is fine when there is only one
465
+ # probe in the system but when there are multiple they will have
466
+ # conflicting sizes so we can't keep the names as simply `Px` so we
467
+ # instead set their dimension as the full name `Electron_Front_Probe_Px`.
468
+ is_probe_name_match = self.probe_names is not None and any(
469
+ name in key for name in self.probe_names
470
+ )
471
+ name_processor = (
472
+ _rename_with_underscore
473
+ if is_probe_name_match
474
+ else _grid_species_name
475
+ )
476
+ var_coords = (f"ID_{_process_grid_name(key, name_processor)}",)
477
+ else:
478
+ # These are DataArrays
479
+
480
+ # SDF makes matching up the coordinates a bit convoluted. Each
481
+ # dimension on a variable can be defined either on "grid" or
482
+ # "grid_mid", and the only way to tell which one is to compare the
483
+ # variable's dimension sizes for each grid. We do this by making a
484
+ # nested dict that looks something like:
485
+ #
486
+ # {"X": {129: "X_Grid", 129: "X_Grid_mid"}}
487
+ #
488
+ # Then we can look up the dimension label and size to get *our* name
489
+ # for the corresponding coordinate
490
+ dim_size_lookup = defaultdict(dict)
491
+ grid = self.ds.grids[value.grid]
492
+ grid_base_name = _process_grid_name(grid.name, _norm_grid_name)
493
+ for dim_size, dim_name in zip(grid.shape, grid.labels):
494
+ dim_size_lookup[dim_name][dim_size] = f"{dim_name}_{grid_base_name}"
495
+
496
+ grid_mid = self.ds.grids[value.grid_mid]
497
+ grid_mid_base_name = _process_grid_name(grid_mid.name, _norm_grid_name)
498
+ for dim_size, dim_name in zip(grid_mid.shape, grid_mid.labels):
499
+ dim_size_lookup[dim_name][
500
+ dim_size
501
+ ] = f"{dim_name}_{grid_mid_base_name}"
502
+
503
+ var_coords = [
504
+ dim_size_lookup[dim_name][dim_size]
505
+ for dim_name, dim_size in zip(grid.labels, value.shape)
506
+ ]
507
+
508
+ # TODO: error handling here? other attributes?
509
+ base_name = _rename_with_underscore(key)
510
+ long_name = _process_latex_name(base_name.replace("_", " "))
511
+ data_attrs = {
512
+ "units": value.units,
513
+ "point_data": value.is_point_data,
514
+ "full_name": key,
515
+ "long_name": long_name,
516
+ }
517
+ lazy_data = indexing.LazilyIndexedArray(SDFBackendArray(key, self))
518
+ data_vars[base_name] = Variable(var_coords, lazy_data, data_attrs)
519
+
520
+ # TODO: might need to decode if mult is set?
521
+
522
+ # # see also conventions.decode_cf_variables
523
+ # vars, attrs, coords = my_decode_variables(
524
+ # vars, attrs, decode_times, decode_timedelta, decode_coords
525
+ # )
526
+
527
+ ds = xr.Dataset(data_vars, attrs=attrs, coords=coords)
528
+ ds.set_close(self.ds.close)
529
+
530
+ return ds
531
+
532
+ def close(self, **kwargs):
533
+ self._manager.close(**kwargs)
534
+
535
+
536
+ class SDFEntrypoint(BackendEntrypoint):
537
+ def open_dataset(
538
+ self,
539
+ filename_or_obj,
540
+ *,
541
+ drop_variables=None,
542
+ keep_particles=False,
543
+ probe_names=None,
544
+ ):
545
+ if isinstance(filename_or_obj, Path):
546
+ # sdf library takes a filename only
547
+ # TODO: work out if we need to deal with file handles
548
+ filename_or_obj = str(filename_or_obj)
549
+
550
+ store = SDFDataStore.open(
551
+ filename_or_obj,
552
+ drop_variables=drop_variables,
553
+ keep_particles=keep_particles,
554
+ probe_names=probe_names,
555
+ )
556
+ with close_on_error(store):
557
+ return store.load()
558
+
559
+ open_dataset_parameters: ClassVar[list[str]] = [
560
+ "filename_or_obj",
561
+ "drop_variables",
562
+ "keep_particles",
563
+ "probe_names",
564
+ ]
565
+
566
+ def guess_can_open(self, filename_or_obj):
567
+ magic_number = try_read_magic_number_from_path(filename_or_obj)
568
+ if magic_number is not None:
569
+ return magic_number.startswith(b"SDF1")
570
+
571
+ return Path(filename_or_obj).suffix in {".sdf", ".SDF"}
572
+
573
+ description = "Use .sdf files in Xarray"
574
+
575
+ url = "https://epochpic.github.io/documentation/visualising_output/python_beam.html"
576
+
577
+
578
+ class SDFPreprocess:
579
+ """Preprocess SDF files for xarray ensuring matching job ids and sets
580
+ time dimension.
581
+
582
+ This class is used as a 'preprocess' function within ``xr.open_mfdataset``. It
583
+ performs three main duties on each individual file's Dataset:
584
+
585
+ 1. Checks for a **matching job ID** across all files to ensure dataset consistency.
586
+ 2. **Filters** the Dataset to keep only the variables specified in `data_vars`
587
+ and their required coordinates.
588
+ 3. **Expands dimensions** to include a single 'time' coordinate, preparing the
589
+ Dataset for concatenation.
590
+
591
+ EPOCH can output variables at different intervals, so some SDF files
592
+ may not contain the requested variable. We combine this data into one
593
+ dataset by concatenating across the time dimension.
594
+
595
+ The combination is performed using ``join="outer"`` (in the calling ``open_mfdataset`` function),
596
+ meaning that the final combined dataset will contain the variable across the
597
+ entire time span, with NaNs filling the time steps where the variable was absent in
598
+ the individual file.
599
+
600
+ With large SDF files, this filtering method will save on memory consumption when
601
+ compared to loading all variables from all files before concatenation.
602
+
603
+ Parameters
604
+ ----------
605
+ data_vars :
606
+ A list of data variables to load in (If not specified loads
607
+ in all variables)
608
+ """
609
+
610
+ def __init__(
611
+ self,
612
+ data_vars: list[str] | None = None,
613
+ ):
614
+ self.job_id: int | None = None
615
+ self.data_vars = data_vars
616
+
617
+ def __call__(self, ds: xr.Dataset) -> xr.Dataset:
618
+ if self.job_id is None:
619
+ self.job_id = ds.attrs["jobid1"]
620
+
621
+ if self.job_id != ds.attrs["jobid1"]:
622
+ raise ValueError(
623
+ f"Mismatching job ids (got {ds.attrs['jobid1']}, expected {self.job_id})"
624
+ )
625
+
626
+ # If the user has exclusively requested only certain variables be
627
+ # loaded in then we purge all other variables and coordinates
628
+ if self.data_vars:
629
+ ds = purge_unselected_data_vars(ds, self.data_vars)
630
+
631
+ time_val = ds.attrs.get("time", np.nan)
632
+ ds = ds.expand_dims(time=[time_val])
633
+ ds = ds.assign_coords(
634
+ time=(
635
+ "time",
636
+ [time_val],
637
+ {"units": "s", "long_name": "Time", "full_name": "time"},
638
+ )
639
+ )
640
+ # Particles' spartial coordinates also evolve in time
641
+ for coord, value in ds.coords.items():
642
+ if value.attrs.get("point_data", False):
643
+ ds.coords[coord] = value.expand_dims(time=[time_val])
644
+
645
+ return ds
sdf_xarray/_version.py ADDED
@@ -0,0 +1,34 @@
1
+ # file generated by setuptools-scm
2
+ # don't change, don't track in version control
3
+
4
+ __all__ = [
5
+ "__version__",
6
+ "__version_tuple__",
7
+ "version",
8
+ "version_tuple",
9
+ "__commit_id__",
10
+ "commit_id",
11
+ ]
12
+
13
+ TYPE_CHECKING = False
14
+ if TYPE_CHECKING:
15
+ from typing import Tuple
16
+ from typing import Union
17
+
18
+ VERSION_TUPLE = Tuple[Union[int, str], ...]
19
+ COMMIT_ID = Union[str, None]
20
+ else:
21
+ VERSION_TUPLE = object
22
+ COMMIT_ID = object
23
+
24
+ version: str
25
+ __version__: str
26
+ __version_tuple__: VERSION_TUPLE
27
+ version_tuple: VERSION_TUPLE
28
+ commit_id: COMMIT_ID
29
+ __commit_id__: COMMIT_ID
30
+
31
+ __version__ = version = '0.4.0'
32
+ __version_tuple__ = version_tuple = (0, 4, 0)
33
+
34
+ __commit_id__ = commit_id = 'gc5cdb3bf9'