pyramis 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pyramis-0.1.0/PKG-INFO ADDED
@@ -0,0 +1,27 @@
1
+ Metadata-Version: 2.4
2
+ Name: pyramis
3
+ Version: 0.1.0
4
+ Summary: A python-based ramses analyzer for minimalist
5
+ Author-email: San Han <san.han@iap.fr>
6
+ License: MIT
7
+ Project-URL: Repository, https://github.com/sanhan/pyramis
8
+ Keywords: ramses
9
+ Requires-Python: >=3.11
10
+ Description-Content-Type: text/markdown
11
+ Requires-Dist: h5py>=3.12.1
12
+ Requires-Dist: numpy>=1.26.4
13
+
14
+ # PYthon-based Ramses Analyzer MInimaliSt
15
+ A minimalist version of [Ramses Univsersal Reader](https://github.com/sanhancluster/pyramis.git), to provide key essential features for management and analysis of the [RAMSES](https://github.com/ramses-organisation/ramses) simulation data.
16
+
17
+ ## Installing
18
+ ### Using pip
19
+ ```bash
20
+ git clone https://github.com/sanhancluster/pyramis.git
21
+ cd pyramis
22
+ pip install -e .
23
+ ```
24
+ ### Using conda
25
+ ```bash
26
+ conda
27
+ ```
@@ -0,0 +1,14 @@
1
+ # PYthon-based Ramses Analyzer MInimaliSt
2
+ A minimalist version of [Ramses Univsersal Reader](https://github.com/sanhancluster/pyramis.git), to provide key essential features for management and analysis of the [RAMSES](https://github.com/ramses-organisation/ramses) simulation data.
3
+
4
+ ## Installing
5
+ ### Using pip
6
+ ```bash
7
+ git clone https://github.com/sanhancluster/pyramis.git
8
+ cd pyramis
9
+ pip install -e .
10
+ ```
11
+ ### Using conda
12
+ ```bash
13
+ conda
14
+ ```
@@ -0,0 +1,28 @@
1
+ [build-system]
2
+ requires = ["setuptools>=61"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "pyramis"
7
+ version = "0.1.0"
8
+ description = "A python-based ramses analyzer for minimalist"
9
+ authors = [
10
+ {name="San Han", email="san.han@iap.fr"}
11
+ ]
12
+ requires-python = ">=3.11"
13
+ license = {text = "MIT"}
14
+ dependencies = [
15
+ "h5py>=3.12.1",
16
+ "numpy>=1.26.4"
17
+ ]
18
+ readme = "README.md"
19
+ keywords = ["ramses"]
20
+
21
+ [tool.setuptools]
22
+ package-dir = {"" = "src"}
23
+
24
+ [tool.setuptools.packages.find]
25
+ where = ["src"]
26
+
27
+ [project.urls]
28
+ Repository = "https://github.com/sanhan/pyramis"
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,6 @@
1
+ from .config_module import load_config
2
+ config = load_config()
3
+
4
+ from .basic import *
5
+
6
+ __all__ = ["config"]
@@ -0,0 +1,96 @@
1
+ import numpy as np
2
+ from . import config
3
+ from scipy.integrate import cumulative_trapezoid
4
+ from types import SimpleNamespace
5
+
6
+ DIM_KEYS = config['DIM_KEYS']
7
+
8
+ cgs_unit = SimpleNamespace(**config['CGS_UNIT'])
9
+
10
+ def get_vector(data: np.ndarray, name_format: str='{axis}', axis=-1) -> np.ndarray:
11
+ return np.stack([data[f'{name_format.format(axis=axis)}'] for axis in DIM_KEYS], axis=axis)
12
+
13
+
14
+ def get_position(data: np.ndarray, axis=-1) -> np.ndarray:
15
+ return get_vector(data, name_format='{axis}', axis=axis)
16
+
17
+
18
+ def get_velocity(data: np.ndarray, axis=-1) -> np.ndarray:
19
+ return get_vector(data, name_format='v{axis}', axis=axis)
20
+
21
+
22
+ def get_cosmo_table(H0: float, omega_m: float, omega_l: float, omega_k=None, omega_r=None, nbins=5000, aexp_min=1E-4, aexp_max=10.0) -> np.ndarray:
23
+ """
24
+ Build a conversion table for aexp, ttilde, and age of the universe.
25
+ ttilde refers `conformal time (super-comoving time)` scale that is used in cosmological simulation in ramses.
26
+
27
+ Parameters
28
+ ----------
29
+ H0 : float
30
+ Hubble constant at z=0 in km/s/Mpc.
31
+ omega_m : float
32
+ Matter density parameter at z=0.
33
+ omega_l : float
34
+ Dark energy density parameter at z=0.
35
+ nbins : int, optional
36
+ Number of bins in the table, by default 5000.
37
+ """
38
+ def E(aexp):
39
+ return np.sqrt(omega_m * aexp ** -3 + omega_l)
40
+
41
+ if omega_r is None:
42
+ omega_r = 0.0
43
+
44
+ if omega_k is None:
45
+ omega_k = 1.0 - omega_m - omega_l - omega_r
46
+
47
+ x = np.linspace(np.log(aexp_min), np.log(aexp_max), nbins)
48
+ aexp = np.exp(x)
49
+ E = np.sqrt(omega_m * aexp**-3 + omega_l + omega_k * aexp**-2 + omega_r * aexp**-4)
50
+
51
+ dtsc_over_dx = np.exp(-x) / E
52
+ tsc = cumulative_trapezoid(dtsc_over_dx, x, initial=0.0)
53
+ tsc = tsc - np.interp(1.0, aexp, tsc)
54
+
55
+ dt_over_dx = 1. / (H0 * cgs_unit.km / cgs_unit.Mpc * E * cgs_unit.Gyr)
56
+ age = cumulative_trapezoid(dt_over_dx, x, initial=0.0)
57
+ z = 1.0 / aexp - 1.0
58
+ table = np.rec.fromarrays([aexp, tsc, age, z], dtype=[('aexp', 'f8'), ('t_sc', 'f8'), ('age', 'f8'), ('z', 'f8')])
59
+
60
+ return table
61
+
62
+
63
+ def cosmo_convert(table, x, xname, yname):
64
+ x_arr = table[xname]
65
+ y_arr = table[yname]
66
+
67
+ if np.any(x < x_arr[0]) or np.any(x > x_arr[-1]):
68
+ raise ValueError(f"{xname} out of bounds: valid range [{x_arr[0]}, {x_arr[-1]}]")
69
+
70
+ y = np.interp(x, x_arr, y_arr)
71
+ return y
72
+
73
+
74
+ def uniform_digitize(values, lim, nbins):
75
+ """
76
+ A faster version of np.digitize that works with uniform bins.
77
+ The result may vary from np.digitize near the bin edges.
78
+
79
+ Parameters
80
+ ----------
81
+ values : array-like
82
+ The input values to digitize.
83
+ lim : array-like
84
+ The limits for the bins.
85
+ nbins : int
86
+ The number of bins.
87
+
88
+ Returns
89
+ -------
90
+ array-like
91
+ The digitized indices of the input values.
92
+ """
93
+ values_idx = (values - lim[..., 0]) / (lim[..., 1] - lim[..., 0]) * nbins + 1
94
+ values_idx = values_idx.astype(int)
95
+ values_idx = np.clip(values_idx, 0, nbins+1)
96
+ return values_idx
@@ -0,0 +1,21 @@
1
+ import tomllib
2
+ from pathlib import Path
3
+ import multiprocessing as mp
4
+
5
+ CONFIG_PATH = Path(__file__).parent / "config.toml"
6
+ BASE_CONFIG_PATH = Path(__file__).parent / "config_base.toml"
7
+
8
+ def load_config(path: str | Path = CONFIG_PATH) -> dict:
9
+ """Load configuration from a TOML file."""
10
+ base_path = BASE_CONFIG_PATH
11
+ path = Path(path)
12
+
13
+ if not path.exists():
14
+ path = BASE_CONFIG_PATH
15
+
16
+ with base_path.open("rb") as f:
17
+ config = tomllib.load(f)
18
+
19
+ with path.open("rb") as f:
20
+ config.update(tomllib.load(f))
21
+ return config
@@ -0,0 +1,99 @@
1
+ from typing import Union
2
+ import numpy as np
3
+
4
+ from .geometry import DIM_KEYS, Box, Region
5
+ from .utils import hilbert3d
6
+ from . import config
7
+
8
+ DEFAULT_LEVEL_SUBDIVIDE = config['DEFAULT_LEVEL_SUBDIVIDE']
9
+
10
+ def domain_slice(data, domain_list, bounds):
11
+ """
12
+ Returns a merged array of sliced portion from data based on domain_list and bounds.
13
+
14
+ Parameters:
15
+ data (array-like): The data to be sliced.
16
+ domain_list (array-like): List of domain indices willing to be sliced.
17
+ bounds (array-like): Array contains domain boundaries.
18
+ """
19
+ starts, ends = bounds[domain_list], bounds[domain_list+1]
20
+ merged = np.concatenate([data[start:end] for start, end in zip(starts, ends)])
21
+ return merged
22
+
23
+
24
+ def compute_chunk_list_from_hilbert(region: Union[Region, np.ndarray, list], hilbert_boundary, level_hilbert, boxlen: float=1.0, level_divide=None, level_subdivide: int=DEFAULT_LEVEL_SUBDIVIDE, ndim: int=3) -> np.ndarray:
25
+ """
26
+ Computes the list of chunk indices that intersect with the given region based on 3-dimensional Hilbert curve partitioning.
27
+
28
+ Parameters
29
+ ----------
30
+ region : Region or np.ndarray
31
+ The spatial region of interest, either as a Region instance or a (2, 3) ndarray representing a bounding box.
32
+ hilbert_boundary : np.ndarray
33
+ Array of Hilbert boundary keys defining the chunk partitions.
34
+ level_hilbert : int
35
+ The Hilbert curve level used for partitioning.
36
+ boxsize : float
37
+ The size of the entire box in which the Hilbert curve is defined.
38
+ level_divide : int, optional
39
+ The level at which to divide the Hilbert curve for chunking. If None, it is computed based on the region size.
40
+ level_subdivide : int
41
+ Additional subdivision level to refine the chunking.
42
+ """
43
+ assert_ascending(hilbert_boundary)
44
+ if isinstance(region, Region):
45
+ bounding_box = region.bounding_box.box
46
+ elif (isinstance(region, np.ndarray) or isinstance(region, list)) and np.shape(region) == (ndim, 2):
47
+ bounding_box = region
48
+ region = Box(bounding_box)
49
+ else:
50
+ raise ValueError("region must be either a Region instance or a (ndim, 2) ndarray representing a bounding box.")
51
+
52
+ if level_divide is None:
53
+ level_divide = -int(np.floor(np.log2(np.min(bounding_box[:, 1] - bounding_box[:, 0]) / boxlen))) + level_subdivide
54
+ level_divide = np.minimum(level_divide, level_hilbert)
55
+ grid_size = boxlen * np.exp2(-level_divide)
56
+
57
+ min_idx = np.floor(bounding_box[:, 0] / grid_size).astype(np.int64)
58
+ max_idx = np.ceil(bounding_box[:, 1] / grid_size).astype(np.int64)
59
+
60
+ grid_x, grid_y, grid_z = np.meshgrid(
61
+ np.arange(min_idx[0], max_idx[0]),
62
+ np.arange(min_idx[1], max_idx[1]),
63
+ np.arange(min_idx[2], max_idx[2]),
64
+ )
65
+ grid_points = np.stack([grid_x.ravel(), grid_y.ravel(), grid_z.ravel()], axis=-1)
66
+
67
+ if not isinstance(region, Box):
68
+ grid_points = grid_points[region.contains((grid_points + 0.5) * grid_size, size=grid_size/2)]
69
+ hilbert_keys_min = hilbert3d(grid_points, bit_length=level_divide) * np.exp2(ndim * (level_hilbert - level_divide))
70
+ hilbert_keys_max = (hilbert3d(grid_points, bit_length=level_divide) + 1) * np.exp2(ndim * (level_hilbert - level_divide))
71
+ chunk_indices_min = np.searchsorted(hilbert_boundary, hilbert_keys_min, side='right') - 1
72
+ chunk_indices_max = np.searchsorted(hilbert_boundary, hilbert_keys_max, side='left') - 1
73
+
74
+ chunk_indices = np.unique(np.concatenate([np.arange(start, end + 1) for start, end in zip(chunk_indices_min, chunk_indices_max)]))
75
+ return np.sort(chunk_indices)
76
+
77
+
78
+ def assert_ascending(arr, msg="Array is not sorted in ascending order."):
79
+ if not np.all(arr[:-1] <= arr[1:]):
80
+ raise ValueError(msg)
81
+
82
+
83
+ def str_to_tuple(input_data):
84
+ return tuple(map(int, input_data.split(',')))
85
+
86
+
87
+ def quad_to_f16(by):
88
+ # receives byte array with format of IEEE 754 quadruple float and converts to numpy.float128 array
89
+ # because quadruple float is not supported in numpy
90
+ # source: https://stackoverflow.com/questions/52568037/reading-16-byte-fortran-floats-into-python-from-a-file
91
+ out = []
92
+ asint = []
93
+ for raw in np.reshape(by, (-1, 16)):
94
+ asint.append(int.from_bytes(raw, byteorder='little'))
95
+ asint = np.array(asint)
96
+ sign = (np.float128(-1.0)) ** np.float128(asint >> 127)
97
+ exponent = ((asint >> 112) & 0x7FFF) - 16383
98
+ significand = np.float128((asint & ((1 << 112) - 1)) | (1 << 112))
99
+ return sign * significand * 2.0 ** np.float128(exponent - 112)
@@ -0,0 +1,139 @@
1
+ import numpy as np
2
+ from . import config
3
+
4
+ DIM_KEYS = config['DIM_KEYS']
5
+
6
+ class Region():
7
+ def evaluate(self, data):
8
+ if (isinstance(data, np.ndarray) and data.shape[-1] == 3):
9
+ return self.contains(data)
10
+
11
+ def contains(self, points, size=0):
12
+ raise NotImplementedError()
13
+
14
+ def contains_data(self, data, size=0):
15
+ raise NotImplementedError()
16
+
17
+ @property
18
+ def center(self):
19
+ raise NotImplementedError()
20
+
21
+ @property
22
+ def bounding_box(self):
23
+ raise NotImplementedError()
24
+
25
+ __call__ = evaluate
26
+
27
+
28
+ class Box(Region):
29
+ def __init__(self, box):
30
+ self.box = np.asarray(box)
31
+
32
+ def set_center(self, center, extent=None):
33
+ center = np.asarray(center)
34
+ if extent is None:
35
+ extent = self.extent
36
+ extent = np.asarray(extent)
37
+ self.box = np.stack([center - extent / 2, center + extent / 2], axis=-1)
38
+
39
+ @property
40
+ def extent(self) -> np.ndarray:
41
+ return self.box[:, 1] - self.box[:, 0]
42
+
43
+ @property
44
+ def center(self) -> np.ndarray:
45
+ return np.mean(self.box, axis=-1)
46
+
47
+ @property
48
+ def bounding_box(self) -> "Box":
49
+ return self
50
+
51
+ def contains(self, points, size=0):
52
+ box = self.box
53
+ half_size = np.asarray(size / 2)[..., np.newaxis]
54
+
55
+ mask = np.all(
56
+ (box[:, 0] <= points + half_size) &
57
+ (points - half_size <= box[:, 1]),
58
+ axis=-1
59
+ )
60
+ return mask
61
+
62
+ def contains_data(self, data, size=0):
63
+ box = self.box
64
+ half_size = np.asarray(size / 2)
65
+
66
+ mask = np.ones(len(data), dtype=bool)
67
+ for i, key in enumerate(DIM_KEYS):
68
+ mask &= (box[i, 0] <= data[key] + half_size) & (data[key] - half_size <= box[i, 1])
69
+ return mask
70
+
71
+
72
+ def __getitem__(self, key):
73
+ return self.box[key]
74
+
75
+
76
+ class Sphere(Region):
77
+ def __init__(self, center, radius: float):
78
+ self._center = np.asarray(center)
79
+ self.radius = radius
80
+
81
+ @property
82
+ def center(self) -> np.ndarray:
83
+ return self._center
84
+
85
+ @property
86
+ def bounding_box(self) -> "Box":
87
+ box = Box(None)
88
+ box.set_center(self.center, self.radius * 2)
89
+ return box
90
+
91
+ def contains(self, points, size=0):
92
+ center = self.center
93
+ radius = self.radius
94
+ return np.linalg.norm(points - center, axis=-1) <= radius - size
95
+
96
+ def contains_data(self, data, size=0):
97
+ center = self.center
98
+ radius = self.radius
99
+
100
+ dist2 = np.zeros(len(data), dtype=float)
101
+ for i, key in enumerate(DIM_KEYS):
102
+ dist2 += (data[key] - center[i])**2
103
+ mask = np.sqrt(dist2) <= radius - size
104
+ return mask
105
+
106
+
107
+ class Spheroid(Region):
108
+ def __init__(self, center, radii: np.ndarray):
109
+ self._center = np.asarray(center)
110
+ self.radii = np.asarray(radii)
111
+
112
+ @property
113
+ def center(self) -> np.ndarray:
114
+ return self._center
115
+
116
+ @property
117
+ def bounding_box(self) -> "Box":
118
+ box = Box(None)
119
+ box.set_center(self.center, self.radii * 2)
120
+ return box
121
+
122
+ def contains(self, points, size=0):
123
+ center = self.center
124
+ radii = self.radii
125
+ normed = (points - center) / (radii - size)
126
+ dist = np.linalg.norm(normed, axis=-1)
127
+ mask = dist <= 1
128
+ return mask
129
+
130
+ def contains_data(self, data, size=0):
131
+ center = self.center
132
+ radii = self.radii
133
+
134
+ dist2 = np.zeros(len(data), dtype=float)
135
+ for i, key in enumerate(DIM_KEYS):
136
+ normed = (data[key] - center[i]) / (radii[i] - size)
137
+ dist2 += normed**2
138
+ mask = dist2 <= 1
139
+ return mask