pyvlasiator 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,959 @@
1
+ from xml.etree import ElementTree
2
+ import numpy as np
3
+ import os
4
+ from collections import namedtuple
5
+ from pyvlasiator.vlsv.variables import units_predefined
6
+
7
+
8
+ class VMeshInfo:
9
+ def __init__(
10
+ self,
11
+ vblocks: np.ndarray,
12
+ vblock_size: np.ndarray,
13
+ vmin: np.ndarray,
14
+ vmax: np.ndarray,
15
+ dv: np.ndarray,
16
+ ) -> None:
17
+ self.vblocks = vblocks
18
+ self.vblock_size = vblock_size
19
+ self.vmin = vmin
20
+ self.vmax = vmax
21
+ self.dv = dv
22
+ self.cellwithVDF = np.empty(0, dtype=np.uint64)
23
+ self.nblock_C = np.empty(0, dtype=np.int64)
24
+
25
+ self.vblocks.flags.writeable = False
26
+ self.vblock_size.flags.writeable = False
27
+ self.vmin.flags.writeable = False
28
+ self.vmax.flags.writeable = False
29
+ self.dv.flags.writeable = False
30
+
31
+
32
+ "Variable information from the VLSV footer."
33
+ VarInfo = namedtuple(
34
+ "VarInfo", ["unit", "unitLaTeX", "variableLaTeX", "unitConversion"]
35
+ )
36
+
37
+
38
+ class Vlsv:
39
+ def __init__(self, filename: str):
40
+ self.dir, self.name = os.path.split(filename)
41
+ self.fid = open(filename, "rb")
42
+ self.xmlroot = ElementTree.fromstring("<VLSV></VLSV>")
43
+ self.celldict = {}
44
+ self.maxamr = -1
45
+ self.vg_indexes_on_fg = np.array([]) # SEE: map_vg_onto_fg(self)
46
+
47
+ self._read_xml_footer()
48
+
49
+ if self.has_parameter(name="time"): # Vlasiator 5.0+
50
+ self.time = self.read(name="time", tag="PARAMETER")
51
+ elif self.has_parameter(name="t"):
52
+ self.time = self.read(name="t", tag="PARAMETER")
53
+ else:
54
+ self.time = -1.0
55
+
56
+ # Check if the file is using new or old VLSV format
57
+ # Read parameters
58
+ meshName = "SpatialGrid"
59
+ bbox = self.read(tag="MESH_BBOX", mesh=meshName)
60
+ if bbox is None:
61
+ try:
62
+ # Vlasiator 4- files where the mesh is defined with parameters
63
+ self.ncells = np.array(
64
+ (
65
+ self.read_parameter("xcells_ini"),
66
+ self.read_parameter("ycells_ini"),
67
+ self.read_parameter("zcells_ini"),
68
+ ),
69
+ dtype=int,
70
+ )
71
+ self.block_size = np.array((1, 1, 1), dtype=int)
72
+ self.coordmin = np.array(
73
+ (
74
+ self.read_parameter("xmin"),
75
+ self.read_parameter("ymin"),
76
+ self.read_parameter("zmin"),
77
+ ),
78
+ dtype=float,
79
+ )
80
+ self.coordmax = np.array(
81
+ (
82
+ self.read_parameter("xmax"),
83
+ self.read_parameter("ymax"),
84
+ self.read_parameter("zmax"),
85
+ ),
86
+ dtype=float,
87
+ )
88
+ except: # dummy values
89
+ self.ncells = np.array((1, 1, 1), dtype=int)
90
+ self.block_size = np.array((1, 1, 1), dtype=int)
91
+ self.coordmin = np.array((0.0, 0.0, 0.0), dtype=float)
92
+ self.coordmax = np.array((1.0, 1.0, 1.0), dtype=float)
93
+
94
+ else:
95
+ # Vlasiator 5+ file
96
+ nodeX = self.read(tag="MESH_NODE_CRDS_X", mesh=meshName)
97
+ nodeY = self.read(tag="MESH_NODE_CRDS_Y", mesh=meshName)
98
+ nodeZ = self.read(tag="MESH_NODE_CRDS_Z", mesh=meshName)
99
+ self.ncells = np.fromiter((i for i in bbox[0:3]), dtype=int)
100
+ self.block_size = np.fromiter((i for i in bbox[3:6]), dtype=int)
101
+ self.coordmin = np.array((nodeX[0], nodeY[0], nodeZ[0]), dtype=float)
102
+ self.coordmax = np.array((nodeX[-1], nodeY[-1], nodeZ[-1]), dtype=float)
103
+
104
+ self.dcoord = np.fromiter(
105
+ ((self.coordmax[i] - self.coordmin[i]) / self.ncells[i] for i in range(3)),
106
+ dtype=float,
107
+ )
108
+
109
+ self.ncells.flags.writeable = False
110
+ self.block_size.flags.writeable = False
111
+ self.coordmin.flags.writeable = False
112
+ self.coordmax.flags.writeable = False
113
+ self.dcoord.flags.writeable = False
114
+
115
+ self.meshes = {}
116
+
117
+ # Iterate through the XML tree, find all populations
118
+ self.species = []
119
+
120
+ for child in self.xmlroot.findall("BLOCKIDS"):
121
+ if "name" in child.attrib:
122
+ popname = child.attrib["name"]
123
+ else:
124
+ popname = "avgs"
125
+
126
+ if not popname in self.species:
127
+ self.species.append(popname)
128
+
129
+ bbox = self.read(tag="MESH_BBOX", mesh=popname)
130
+ if bbox is None:
131
+ if self.read_parameter("vxblocks_ini") is not None:
132
+ # Vlasiator 4- files where the mesh is defined with parameters
133
+ vblocks = np.array(
134
+ (
135
+ self.read_parameter("vxblocks_ini"),
136
+ self.read_parameter("vyblocks_ini"),
137
+ self.read_parameter("vzblocks_ini"),
138
+ ),
139
+ dtype=int,
140
+ )
141
+ vblock_size = np.array((4, 4, 4), dtype=int)
142
+ vmin = np.array(
143
+ (
144
+ self.read_parameter("vxmin"),
145
+ self.read_parameter("vymin"),
146
+ self.read_parameter("vzmin"),
147
+ ),
148
+ dtype=float,
149
+ )
150
+ vmax = np.array(
151
+ (
152
+ self.read_parameter("vxmax"),
153
+ self.read_parameter("vymax"),
154
+ self.read_parameter("vzmax"),
155
+ ),
156
+ dtype=float,
157
+ )
158
+
159
+ else: # no velocity space
160
+ vblocks = np.array((0, 0, 0), dtype=int)
161
+ vblock_size = np.array((4, 4, 4), dtype=int)
162
+ vmin = np.array((0.0, 0.0, 0.0), dtype=float)
163
+ vmax = np.array((0.0, 0.0, 0.0), dtype=float)
164
+ dv = np.array((1.0, 1.0, 1.0), dtype=float)
165
+
166
+ else: # Vlasiator 5+ file with bounding box
167
+ nodeX = self.read(tag="MESH_NODE_CRDS_X", mesh=popname)
168
+ nodeY = self.read(tag="MESH_NODE_CRDS_Y", mesh=popname)
169
+ nodeZ = self.read(tag="MESH_NODE_CRDS_Z", mesh=popname)
170
+
171
+ vblocks = np.array((*bbox[0:3],), dtype=int)
172
+ vblock_size = np.array((*bbox[3:6],), dtype=int)
173
+ vmin = np.array((nodeX[0], nodeY[0], nodeZ[0]), dtype=float)
174
+ vmax = np.array((nodeX[-1], nodeY[-1], nodeZ[-1]), dtype=float)
175
+
176
+ dv = np.fromiter(
177
+ ((vmax[i] - vmin[i]) / vblocks[i] / vblock_size[i] for i in range(3)),
178
+ dtype=float,
179
+ )
180
+
181
+ self.meshes[popname] = VMeshInfo(vblocks, vblock_size, vmin, vmax, dv)
182
+
183
+ # Precipitation energy bins
184
+ i = 0
185
+ energybins = []
186
+ binexists = True
187
+ while binexists:
188
+ binexists = self.has_parameter(
189
+ f"{popname}_PrecipitationCentreEnergy{i}"
190
+ )
191
+ if binexists:
192
+ binvalue = self.read_parameter(
193
+ f"{popname}_PrecipitationCentreEnergy{i}"
194
+ )
195
+ energybins.append(binvalue)
196
+ i += 1
197
+ if i > 1:
198
+ self.precipitationenergybins[popname] = energybins
199
+
200
+ self.variable = [
201
+ node.attrib["name"] for node in self.xmlroot.findall("VARIABLE")
202
+ ]
203
+
204
+ cellid = self.read(mesh="SpatialGrid", name="CellID", tag="VARIABLE")
205
+ self.cellindex = np.argsort(cellid)
206
+ self.celldict = {cid: i for (i, cid) in enumerate(cellid)}
207
+
208
+ self.maxamr = self.getmaxrefinement(cellid)
209
+
210
+ self.nodecellwithVDF = self.xmlroot.findall("CELLSWITHBLOCKS")
211
+
212
+ if len(self.nodecellwithVDF) == 0:
213
+ self.hasvdf = False
214
+ else:
215
+ self.hasvdf = self.nodecellwithVDF[0].attrib["arraysize"] != "0"
216
+
217
+ def __repr__(self) -> str:
218
+ str = (
219
+ f"File : {self.name}\n"
220
+ f"Time : {self.time:.4f}\n"
221
+ f"Dimension : {self.ndims()}\n"
222
+ f"Max AMR lvl: {self.maxamr}\n"
223
+ f"Has VDF : {self.hasvdf}\n"
224
+ f"Variables : {self.variable}\n"
225
+ )
226
+ return str
227
+
228
+ def ndims(self) -> int:
229
+ """Get the spatial dimension of data."""
230
+ return sum(i > 1 for i in self.ncells)
231
+
232
+ def _read_xml_footer(self) -> None:
233
+ """Read the XML footer of the VLSV file."""
234
+ fid = self.fid
235
+ # first 8 bytes indicate endianness
236
+ endianness_offset = 8
237
+ fid.seek(endianness_offset)
238
+ # offset of the XML file
239
+ uint64_byte_amount = 8
240
+ offset = int.from_bytes(fid.read(uint64_byte_amount), "little", signed=True)
241
+ fid.seek(offset)
242
+ xmlstring = fid.read()
243
+ self.xmlroot = ElementTree.fromstring(xmlstring)
244
+
245
+ def read(
246
+ self,
247
+ name: str = "",
248
+ tag: str = "",
249
+ mesh: str = "",
250
+ cellids=-1,
251
+ ) -> np.ndarray:
252
+ """
253
+ Read data of name, tag, and mesh from the vlsv file.
254
+
255
+ This is the general reading function for all types of variables in VLSV files.
256
+
257
+ Parameters
258
+ ----------
259
+ cellids : int or list of int
260
+ If -1 then all data is read. If nonzero then only the vector for the specified
261
+ cell id or cellids is read.
262
+ Returns
263
+ -------
264
+ numpy.ndarray
265
+ """
266
+ if not tag and not name:
267
+ raise ValueError("Must provide either tag or name")
268
+
269
+ name = name.lower()
270
+
271
+ fid = self.fid
272
+
273
+ if "/" in name:
274
+ popname, varname = name.split("/")
275
+ else:
276
+ popname, varname = "pop", name
277
+
278
+ # TODO: add data reducers
279
+
280
+ for child in self.xmlroot:
281
+ if tag and child.tag != tag:
282
+ continue
283
+ if name and "name" in child.attrib and child.attrib["name"].lower() != name:
284
+ continue
285
+ if mesh and "mesh" in child.attrib and child.attrib["mesh"] != mesh:
286
+ continue
287
+ if child.tag == tag:
288
+ vsize = int(child.attrib["vectorsize"])
289
+ asize = int(child.attrib["arraysize"])
290
+ dsize = int(child.attrib["datasize"])
291
+ dtype = child.attrib["datatype"]
292
+ variable_offset = int(child.text)
293
+
294
+ # Select efficient method to read data based on number of cells
295
+ if hasattr(cellids, "__len__"):
296
+ ncellids = len(cellids)
297
+ # Read multiple specified cells
298
+ # For reading a large amount of single cells, it'll be faster to
299
+ # read all data from the file and sort afterwards.
300
+ arraydata = []
301
+ if ncellids > 5000:
302
+ result_size = ncellids
303
+ read_size = asize
304
+ read_offsets = [0]
305
+ else: # Read multiple cell ids one-by-one
306
+ result_size = ncellids
307
+ read_size = 1
308
+ read_offsets = [
309
+ self.celldict[cid] * dsize * vsize for cid in cellids
310
+ ]
311
+ else:
312
+ if cellids < 0: # all cells
313
+ result_size = asize
314
+ read_size = asize
315
+ read_offsets = [0]
316
+ else: # parameter or single cell
317
+ result_size = 1
318
+ read_size = 1
319
+ read_offsets = [self.celldict[cellids] * dsize * vsize]
320
+
321
+ for r_offset in read_offsets:
322
+ use_offset = int(variable_offset + r_offset)
323
+ fid.seek(use_offset)
324
+
325
+ if dtype == "float" and dsize == 4:
326
+ dtype = np.float32
327
+ elif dtype == "float" and dsize == 8:
328
+ dtype = np.float64
329
+ elif dtype == "int" and dsize == 4:
330
+ dtype = np.int32
331
+ elif dtype == "int" and dsize == 8:
332
+ dtype = np.int64
333
+ elif dtype == "uint" and dsize == 4:
334
+ dtype = np.uint32
335
+ elif dtype == "uint" and dsize == 8:
336
+ dtype = np.uint64
337
+
338
+ data = np.fromfile(fid, dtype, count=vsize * read_size)
339
+
340
+ if len(read_offsets) != 1:
341
+ arraydata.append(data)
342
+
343
+ if len(read_offsets) == 1 and result_size < read_size:
344
+ # Many single cell IDs requested
345
+ # Pick the elements corresponding to the requested cells
346
+ for cid in cellids:
347
+ append_offset = self.celldict[cid] * vsize
348
+ arraydata.append(data[append_offset : append_offset + vsize])
349
+ data = np.squeeze(np.array(arraydata))
350
+ elif len(read_offsets) != 1:
351
+ # Not so many single cell IDs requested
352
+ data = np.squeeze(np.array(arraydata))
353
+
354
+ if vsize > 1:
355
+ data = data.reshape(result_size, vsize)
356
+
357
+ if result_size == 1:
358
+ return data[0]
359
+ else:
360
+ return data
361
+
362
+ if name:
363
+ raise NameError(
364
+ name
365
+ + "/"
366
+ + tag
367
+ + "/"
368
+ + mesh
369
+ + " not found in .vlsv file or in data reducers!"
370
+ )
371
+
372
+ def read_variable(
373
+ self, name: str, cellids: int | list[int] | np.ndarray = -1, sorted: bool = True
374
+ ) -> np.ndarray:
375
+ """
376
+ Read variables as numpy arrays from the open vlsv file.
377
+
378
+ Parameters
379
+ ----------
380
+ cellids : int or list[int] or np.ndarray
381
+ If -1 then all data is read. If nonzero then only the vector for the specified
382
+ cell id or cellids is read.
383
+ sorted : bool
384
+ If the returned array is sorted by cell IDs. Only applied for full arrays.
385
+ Returns
386
+ -------
387
+ numpy.ndarray
388
+ """
389
+
390
+ if self.has_variable(name) and name.startswith("fg_"):
391
+ if not cellids == -1:
392
+ raise ValueError("CellID requests not supported for FSgrid.")
393
+ return self.read_fg_variable(name=name)
394
+
395
+ if self.has_variable(name) and name.startswith("ig_"):
396
+ if not cellids == -1:
397
+ raise ValueError("CellID requests not supported for ionosphere.")
398
+ return self.read_ionosphere_variable(name=name)
399
+
400
+ raw = self.read(
401
+ mesh="SpatialGrid",
402
+ name=name,
403
+ tag="VARIABLE",
404
+ cellids=cellids,
405
+ )
406
+ if hasattr(cellids, "__len__"): # part of cells requested
407
+ return np.float32(raw)
408
+
409
+ if sorted:
410
+ if raw.ndim == 1:
411
+ v = raw[self.cellindex]
412
+ else:
413
+ v = raw[self.cellindex, :]
414
+ if v.dtype == np.float64: # 32-bit is enough for analysis
415
+ v = np.float32(v)
416
+ else:
417
+ v = raw
418
+
419
+ return v
420
+
421
+ def read_fg_variable(self, name: str):
422
+ raw = self.read(
423
+ mesh="fsgrid",
424
+ name=name,
425
+ tag="VARIABLE",
426
+ )
427
+
428
+ bbox = tuple(ncell * 2**self.maxamr for ncell in self.ncells)
429
+
430
+ # Determine fsgrid domain decomposition
431
+ nIORanks = self.read_parameter("numWritingRanks") # Int32
432
+
433
+ if raw.ndim > 1:
434
+ v = np.empty((*bbox, raw.shape[-1]), dtype=np.float32)
435
+ else:
436
+ v = np.empty(bbox, dtype=np.float32)
437
+
438
+ def getDomainDecomposition(globalsize, nproc: int) -> list[int]:
439
+ """Obtain decomposition of this grid over the given number of processors.
440
+ Reference: fsgrid.hpp
441
+ """
442
+ domainDecomp = (1, 1, 1)
443
+ minValue = 1e20
444
+ for i in range(1, min(nproc, globalsize[0]) + 1):
445
+ iBox = max(globalsize[0] / i, 1.0)
446
+ for j in range(1, min(nproc, globalsize[1]) + 1):
447
+ if i * j > nproc:
448
+ break
449
+ jBox = max(globalsize[1] / j, 1.0)
450
+ for k in range(1, min(nproc, globalsize[2]) + 1):
451
+ if i * j * k > nproc:
452
+ continue
453
+ kBox = max(globalsize[2] / k, 1.0)
454
+ v = (
455
+ 10 * iBox * jBox * kBox
456
+ + ((jBox * kBox) if i > 1 else 0)
457
+ + ((iBox * kBox) if j > 1 else 0)
458
+ + ((iBox * jBox) if k > 1 else 0)
459
+ )
460
+ if i * j * k == nproc and v < minValue:
461
+ minValue = v
462
+ domainDecomp = (i, j, k)
463
+
464
+ return domainDecomp
465
+
466
+ def calcLocalStart(globalCells, nprocs: int, lcells: int) -> int:
467
+ ncells = globalCells // nprocs
468
+ remainder = globalCells % nprocs
469
+ lstart = (
470
+ lcells * (ncells + 1)
471
+ if lcells < remainder
472
+ else lcells * ncells + remainder
473
+ )
474
+
475
+ return lstart
476
+
477
+ def calcLocalSize(globalCells, nprocs: int, lcells: int) -> int:
478
+ ncells = globalCells // nprocs
479
+ remainder = globalCells % nprocs
480
+ lsize = ncells + 1 if lcells < remainder else ncells
481
+
482
+ return lsize
483
+
484
+ fgDecomposition = getDomainDecomposition(bbox, nIORanks)
485
+
486
+ offsetnow = 0
487
+
488
+ for i in range(nIORanks):
489
+ xyz = (
490
+ i // fgDecomposition[2] // fgDecomposition[1],
491
+ i // fgDecomposition[2] % fgDecomposition[1],
492
+ i % fgDecomposition[2],
493
+ )
494
+
495
+ lsize = tuple(
496
+ map(
497
+ lambda i: calcLocalSize(bbox[i], fgDecomposition[i], xyz[i]),
498
+ range(0, 3),
499
+ )
500
+ )
501
+
502
+ lstart = tuple(
503
+ map(
504
+ lambda i: calcLocalStart(bbox[i], fgDecomposition[i], xyz[i]),
505
+ range(0, 3),
506
+ )
507
+ )
508
+
509
+ offsetnext = offsetnow + np.prod(lsize)
510
+ lend = tuple(st + si for st, si in zip(lstart, lsize))
511
+
512
+ # Reorder data
513
+ if raw.ndim > 1:
514
+ v[lstart[0] : lend[0], lstart[1] : lend[1], lstart[2] : lend[2], :] = (
515
+ raw[offsetnow:offsetnext, :].reshape(
516
+ *lsize, raw.shape[-1], order="F"
517
+ )
518
+ )
519
+ else:
520
+ v[lstart[0] : lend[0], lstart[1] : lend[1], lstart[2] : lend[2]] = raw[
521
+ offsetnow:offsetnext
522
+ ].reshape(*lsize, order="F")
523
+
524
+ offsetnow = offsetnext
525
+
526
+ return v
527
+
528
+ def read_variable_meta(self, var: str):
529
+ unit, unitLaTeX, variableLaTeX, unitConversion = "", "", "", ""
530
+
531
+ if var in units_predefined:
532
+ unit, variableLaTeX, unitLaTeX = units_predefined[var]
533
+ elif self.has_variable(var): # For Vlasiator 5 files, MetaVLSV is included
534
+ for child in self.xmlroot:
535
+ if "name" in child.attrib and child.attrib["name"] == var:
536
+ if not "unit" in child.attrib:
537
+ break
538
+ else:
539
+ unit = child.attrib["unit"]
540
+ unitLaTeX = child.attrib["unitLaTeX"]
541
+ variableLaTeX = child.attrib["variableLaTeX"]
542
+ unitConversion = child.attrib["unitConversion"]
543
+
544
+ return VarInfo(unit, unitLaTeX, variableLaTeX, unitConversion)
545
+
546
+ def read_parameter(self, name: str):
547
+ return self.read(name=name, tag="PARAMETER")
548
+
549
+ def read_vcells(self, cellid: int, species: str = "proton"):
550
+ """Read raw velocity block data.
551
+
552
+ Parameters
553
+ ----------
554
+ cellid :
555
+ Cell ID of the cell whose velocity blocks are read.
556
+ species : str
557
+ Population required.
558
+
559
+ Returns
560
+ -------
561
+ numpy.ndarray
562
+ A numpy array with block ids and their data.
563
+ """
564
+
565
+ fid = self.fid
566
+ mesh = self.meshes[species]
567
+ vblock_size = mesh.vblock_size
568
+
569
+ self.init_cellswithVDF(species)
570
+
571
+ # Check that cells have VDF stored
572
+ try:
573
+ cellWithVDFIndex = np.where(mesh.cellwithVDF == cellid)[0][0]
574
+ nblocks = mesh.nblock_C[cellWithVDFIndex]
575
+ except:
576
+ raise ValueError(f"Cell ID {cellid} does not store VDF!")
577
+ # Offset position to vcell storage
578
+ offset_v = np.sum(mesh.nblock_C[0:cellWithVDFIndex], initial=0)
579
+
580
+ # Read raw VDF
581
+ for node in self.xmlroot.findall("BLOCKVARIABLE"):
582
+ if node.attrib["name"] == species:
583
+ dsize = int(node.attrib["datasize"])
584
+ offset = int(node.text)
585
+ break
586
+
587
+ bsize = np.prod(vblock_size)
588
+ fid.seek(offset_v * bsize * dsize + offset)
589
+ T = np.float32 if dsize == 4 else np.float64
590
+ data = np.fromfile(
591
+ fid,
592
+ dtype=T,
593
+ count=bsize * nblocks,
594
+ ).reshape(nblocks, bsize)
595
+
596
+ # Read block IDs
597
+ for node in self.xmlroot.findall("BLOCKIDS"):
598
+ if node.attrib["name"] == species:
599
+ dsize = int(node.attrib["datasize"])
600
+ offset = int(node.text)
601
+ break
602
+
603
+ fid.seek(offset_v * dsize + offset)
604
+ T = np.int32 if dsize == 4 else np.int64
605
+ blockIDs = np.fromfile(fid, dtype=T, count=nblocks)
606
+
607
+ # Velocity cell IDs and distributions (ordered by blocks)
608
+ vcellids = np.empty(bsize * nblocks, dtype=np.int32)
609
+ vcellf = np.empty(bsize * nblocks, dtype=np.float32)
610
+
611
+ for i, bid in enumerate(blockIDs):
612
+ for j in range(bsize):
613
+ index_ = i * bsize + j
614
+ vcellids[index_] = j + bsize * bid
615
+ vcellf[index_] = data[i, j]
616
+
617
+ return vcellids, vcellf
618
+
619
+ def init_cellswithVDF(self, species: str = "proton") -> None:
620
+ fid = self.fid
621
+ mesh = self.meshes[species]
622
+ if not np.any(mesh.cellwithVDF):
623
+ for node in self.nodecellwithVDF:
624
+ if node.attrib["name"] == species:
625
+ asize = int(node.attrib["arraysize"])
626
+ offset = int(node.text)
627
+ fid.seek(offset)
628
+ cellwithVDF = np.fromfile(fid, dtype=np.uint64, count=asize)
629
+ mesh.cellwithVDF = cellwithVDF
630
+ break
631
+
632
+ for node in self.xmlroot.findall("BLOCKSPERCELL"):
633
+ if node.attrib["name"] == species:
634
+ asize = int(node.attrib["arraysize"])
635
+ dsize = int(node.attrib["datasize"])
636
+ offset = int(node.text)
637
+ fid.seek(offset)
638
+ T = np.int32 if dsize == 4 else np.int64
639
+ nblock_C = np.fromfile(fid, dtype=T, count=asize).astype(np.int64)
640
+ mesh.nblock_C = nblock_C
641
+ break
642
+
643
+ mesh.cellwithVDF = np.delete(mesh.cellwithVDF, np.where(mesh.nblock_C == 0))
644
+
645
+ def _has_attribute(self, attribute: str, name: str) -> bool:
646
+ """Check if a given attribute exists in the xml."""
647
+ for child in self.xmlroot:
648
+ if child.tag == attribute and "name" in child.attrib:
649
+ if child.attrib["name"].lower() == name.lower():
650
+ return True
651
+ return False
652
+
653
+ def has_variable(self, name: str) -> bool:
654
+ return self._has_attribute("VARIABLE", name)
655
+
656
+ def has_parameter(self, name: str) -> bool:
657
+ return self._has_attribute("PARAMETER", name)
658
+
659
+ def getmaxrefinement(self, cellid: np.ndarray):
660
+ """Get the maximum spatial refinement level."""
661
+ ncell = np.prod(self.ncells)
662
+ maxamr, cid = 0, ncell
663
+ while cid < max(cellid):
664
+ maxamr += 1
665
+ cid += ncell * 8**maxamr
666
+
667
+ return maxamr
668
+
669
+ def getcell(self, loc: np.ndarray | tuple[int, ...] | list[int]):
670
+ coordmin, coordmax = self.coordmin, self.coordmax
671
+ dcoord = self.dcoord
672
+ ncells = self.ncells
673
+ celldict = self.celldict
674
+ maxamr = self.maxamr
675
+
676
+ for i in range(3):
677
+ if not coordmin[i] < loc[i] < coordmax[i]:
678
+ raise ValueError(f"{i} coordinate out of bound!")
679
+
680
+ indices = np.fromiter(
681
+ ((loc[i] - coordmin[i]) // dcoord[i] for i in range(3)), dtype=int
682
+ )
683
+
684
+ cid = (
685
+ indices[0] + indices[1] * ncells[0] + indices[2] * ncells[0] * ncells[1] + 1
686
+ )
687
+
688
+ ncells_lowerlevel = 0
689
+ ncell = np.prod(ncells)
690
+
691
+ for ilevel in range(maxamr):
692
+ if cid in celldict:
693
+ break
694
+ ncells_lowerlevel += (8**ilevel) * ncell
695
+ ratio = 2 ** (ilevel + 1)
696
+ indices = np.fromiter(
697
+ (
698
+ np.floor((loc[i] - coordmin[i]) / dcoord[i] * ratio)
699
+ for i in range(3)
700
+ ),
701
+ dtype=int,
702
+ )
703
+ cid = (
704
+ ncells_lowerlevel
705
+ + indices[0]
706
+ + ratio * ncells[0] * indices[1]
707
+ + ratio**2 * ncells[0] * ncells[1] * indices[2]
708
+ + 1
709
+ )
710
+
711
+ return cid
712
+
713
+ def getvcellcoordinates(
714
+ self, vcellids: np.ndarray, species: str = "proton"
715
+ ) -> np.ndarray:
716
+ mesh = self.meshes[species]
717
+ vblocks = mesh.vblocks
718
+ vblock_size = mesh.vblock_size
719
+ dv = mesh.dv
720
+ vmin = mesh.vmin
721
+
722
+ bsize = np.prod(vblock_size)
723
+ blockid = np.fromiter((cid // bsize for cid in vcellids), dtype=int)
724
+ # Get block coordinates
725
+ blockInd = [
726
+ np.array(
727
+ (
728
+ bid % vblocks[0],
729
+ bid // vblocks[0] % vblocks[1],
730
+ bid // (vblocks[0] * vblocks[1]),
731
+ ),
732
+ dtype=int,
733
+ )
734
+ for bid in blockid
735
+ ]
736
+ blockCoord = [
737
+ np.array(
738
+ (
739
+ bInd[0] * dv[0] * vblock_size[0] + vmin[0],
740
+ bInd[1] * dv[1] * vblock_size[1] + vmin[1],
741
+ bInd[2] * dv[2] * vblock_size[2] + vmin[2],
742
+ ),
743
+ dtype=float,
744
+ )
745
+ for bInd in blockInd
746
+ ]
747
+ # Get cell indices
748
+ vcellblockids = np.fromiter((vid % bsize for vid in vcellids), dtype=int)
749
+ cellidxyz = np.array(
750
+ [
751
+ np.array(
752
+ (
753
+ cid % vblock_size[0],
754
+ cid // vblock_size[0] % vblock_size[1],
755
+ cid // (vblock_size[0] * vblock_size[1]),
756
+ ),
757
+ dtype=int,
758
+ )
759
+ for cid in vcellblockids
760
+ ]
761
+ )
762
+ # Get cell coordinates
763
+ cellCoords = np.array(
764
+ [
765
+ np.fromiter(
766
+ (
767
+ blockCoord[i][j] + (cellidxyz[i][j] + 0.5) * dv[j]
768
+ for j in range(3)
769
+ ),
770
+ dtype=float,
771
+ )
772
+ for i in range(len(vcellids))
773
+ ]
774
+ )
775
+
776
+ return cellCoords
777
+
778
+ def getnearestcellwithvdf(self, id: int, species: str = "proton"):
779
+ self.init_cellswithVDF(species)
780
+ cells = self.meshes[species].cellwithVDF
781
+ if not np.any(cells):
782
+ raise ValueError(f"No distribution saved in {self.name}")
783
+ coords_orig = self.getcellcoordinates(id)
784
+ coords = [self.getcellcoordinates(cid) for cid in cells]
785
+ min_ = np.argmin(np.sum(np.square(coords - coords_orig), axis=1))
786
+
787
+ return cells[min_]
788
+
789
+ def getcellcoordinates(self, cid: int):
790
+ ncells = self.ncells
791
+ coordmin, coordmax = self.coordmin, self.coordmax
792
+ cid -= 1 # for easy divisions
793
+
794
+ ncells_refmax = list(ncells)
795
+ reflevel = 0
796
+ subtraction = np.prod(ncells) * (2**reflevel) ** 3
797
+ # sizes on the finest level
798
+ while cid >= subtraction:
799
+ cid -= subtraction
800
+ reflevel += 1
801
+ subtraction *= 8
802
+ ncells_refmax[0] *= 2
803
+ ncells_refmax[1] *= 2
804
+ ncells_refmax[2] *= 2
805
+
806
+ indices = np.array(
807
+ (
808
+ cid % ncells_refmax[0],
809
+ cid // ncells_refmax[0] % ncells_refmax[1],
810
+ cid // (ncells_refmax[0] * ncells_refmax[1]),
811
+ ),
812
+ dtype=int,
813
+ )
814
+
815
+ coords = np.fromiter(
816
+ (
817
+ coordmin[i]
818
+ + (indices[i] + 0.5) * (coordmax[i] - coordmin[i]) / ncells_refmax[i]
819
+ for i in range(3)
820
+ ),
821
+ dtype=float,
822
+ )
823
+
824
+ return coords
825
+
826
+ def getslicecell(
827
+ self, sliceoffset: float, dir: int, minCoord: float, maxCoord: float
828
+ ):
829
+ if not dir in (0, 1, 2):
830
+ raise ValueError(f"Unknown slice direction {dir}")
831
+
832
+ ncells, maxamr, celldict = self.ncells, self.maxamr, self.celldict
833
+ nsize = ncells[dir]
834
+ sliceratio = sliceoffset / (maxCoord - minCoord)
835
+ if not (0.0 <= sliceratio <= 1.0):
836
+ raise ValueError("slice plane index out of bound!")
837
+
838
+ # Find the ids
839
+ nlen = 0
840
+ ncell = np.prod(ncells)
841
+ # number of cells up to each refinement level
842
+ lvlC = np.fromiter((ncell * 8**ilvl for ilvl in range(maxamr + 1)), dtype=int)
843
+ lvlAccum = np.add.accumulate(lvlC)
844
+ nStart = np.insert(lvlAccum, 0, 0)
845
+
846
+ indexlist = np.empty(0, dtype=int)
847
+ idlist = np.empty(0, dtype=int)
848
+
849
+ cellidsorted = np.fromiter(celldict.keys(), dtype=int)
850
+ cellidsorted.sort()
851
+
852
+ for ilvl in range(maxamr + 1):
853
+ nLow, nHigh = nStart[ilvl], nStart[ilvl + 1]
854
+ idfirst_ = np.searchsorted(cellidsorted, nLow + 1)
855
+ idlast_ = np.searchsorted(cellidsorted, nHigh, side="right")
856
+
857
+ ids = cellidsorted[idfirst_:idlast_]
858
+
859
+ ix, iy, iz = getindexes(ilvl, ncells[0], ncells[1], nLow, ids)
860
+
861
+ if dir == 0:
862
+ coords = ix
863
+ elif dir == 1:
864
+ coords = iy
865
+ else:
866
+ coords = iz
867
+
868
+ # Find the cut plane index for each refinement level (0-based)
869
+ depth = int(np.floor(sliceratio * nsize * 2**ilvl))
870
+ # Find the needed elements to create the cut and save the results
871
+ elements = coords == depth
872
+ indexlist = np.append(indexlist, np.arange(nlen, nlen + len(ids))[elements])
873
+ idlist = np.append(idlist, ids[elements])
874
+
875
+ nlen += len(ids)
876
+
877
+ return idlist, indexlist
878
+
879
+ def refineslice(self, idlist: np.ndarray, data: np.ndarray, normal: int):
880
+ ncells, maxamr = self.ncells, self.maxamr
881
+
882
+ dims = _getdim2d(ncells, maxamr, normal)
883
+ # meshgrid-like 2D input for matplotlib
884
+ dpoints = np.empty((dims[1], dims[0]), dtype=data.dtype)
885
+
886
+ # Create the plot grid
887
+ ncell = np.prod(ncells)
888
+ nHigh, nLow = ncell, 0
889
+
890
+ for i in range(maxamr + 1):
891
+ idfirst_ = np.searchsorted(idlist, nLow + 1)
892
+ idlast_ = np.searchsorted(idlist, nHigh, side="right")
893
+
894
+ ids = idlist[idfirst_:idlast_]
895
+ d = data[idfirst_:idlast_]
896
+
897
+ ix, iy, iz = getindexes(i, ncells[0], ncells[1], nLow, ids)
898
+
899
+ # Get the correct coordinate values and the widths for the plot
900
+ if normal == 0:
901
+ a, b = iy, iz
902
+ elif normal == 1:
903
+ a, b = ix, iz
904
+ elif normal == 2:
905
+ a, b = ix, iy
906
+
907
+ # Insert the data values into dpoints
908
+ refineRatio = 2 ** (maxamr - i)
909
+ iRange = range(refineRatio)
910
+ X, Y = np.meshgrid(iRange, iRange, indexing="ij")
911
+ coords = np.empty((len(a), 2 ** (2 * (maxamr - i)), 2), dtype=int)
912
+
913
+ for ic, (ac, bc) in enumerate(zip(a, b)):
914
+ for ir in range(2 ** (2 * (maxamr - i))):
915
+ index_ = np.unravel_index(ir, (refineRatio, refineRatio))
916
+ coords[ic, ir] = [
917
+ ac * refineRatio + X[index_],
918
+ bc * refineRatio + Y[index_],
919
+ ]
920
+
921
+ for ic, dc in enumerate(d):
922
+ for ir in range(2 ** (2 * (maxamr - i))):
923
+ dpoints[coords[ic, ir, 1], coords[ic, ir, 0]] = dc
924
+
925
+ nLow = nHigh
926
+ nHigh += ncell * 8 ** (i + 1)
927
+
928
+ return dpoints
929
+
930
+
931
+ def _getdim2d(ncells: tuple, maxamr: int, normal: int):
932
+ ratio = 2**maxamr
933
+ if normal == 0:
934
+ i1, i2 = 1, 2
935
+ elif normal == 1:
936
+ i1, i2 = 0, 2
937
+ elif normal == 2:
938
+ i1, i2 = 0, 1
939
+ dims = (ncells[i1] * ratio, ncells[i2] * ratio)
940
+
941
+ return dims
942
+
943
+
944
+ def getindexes(
945
+ ilevel: int, xcells: int, ycells: int, nCellUptoLowerLvl: int, ids: np.ndarray
946
+ ):
947
+ ratio = 2**ilevel
948
+ slicesize = xcells * ycells * ratio**2
949
+
950
+ iz = (ids - nCellUptoLowerLvl - 1) // slicesize
951
+ iy = np.zeros_like(iz)
952
+ ix = np.zeros_like(iz)
953
+
954
+ # number of ids up to the coordinate z in the refinement level ilevel
955
+ idUpToZ = iz * slicesize + nCellUptoLowerLvl
956
+ iy = (ids - idUpToZ - 1) // (xcells * ratio)
957
+ ix = ids - idUpToZ - iy * xcells * ratio - 1
958
+
959
+ return ix, iy, iz