cuRDF 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
curdf-0.1.0/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Joseph Hart
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
curdf-0.1.0/PKG-INFO ADDED
@@ -0,0 +1,20 @@
1
+ Metadata-Version: 2.4
2
+ Name: cuRDF
3
+ Version: 0.1.0
4
+ Summary: GPU-accelerated radial distribution functions using Toolkit-Ops + PyTorch with MDAnalysis and ASE adapters.
5
+ Requires-Python: >=3.10
6
+ License-File: LICENSE
7
+ Requires-Dist: torch
8
+ Requires-Dist: numpy
9
+ Requires-Dist: nvalchemiops
10
+ Provides-Extra: analysis
11
+ Requires-Dist: MDAnalysis; extra == "analysis"
12
+ Requires-Dist: ase; extra == "analysis"
13
+ Requires-Dist: matplotlib; extra == "analysis"
14
+ Provides-Extra: dev
15
+ Requires-Dist: pytest; extra == "dev"
16
+ Provides-Extra: docs
17
+ Requires-Dist: sphinx; extra == "docs"
18
+ Requires-Dist: sphinx-rtd-theme; extra == "docs"
19
+ Requires-Dist: myst-parser; extra == "docs"
20
+ Dynamic: license-file
curdf-0.1.0/README.md ADDED
@@ -0,0 +1,83 @@
1
+ # cuRDF
2
+
3
+ [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.1085332119.svg)](https://doi.org/10.5281/zenodo.1085332119)
4
+
5
+
6
+ CUDA-accelerated radial distribution functions using NVIDIA ALCHEMI Toolkit-Ops O(N) neighbor lists and PyTorch. Compatible with ASE and MDAnalysis.
7
+
8
+ [![PyPI version](https://badge.fury.io/py/curdf.svg)](https://badge.fury.io/py/curdf)
9
+ [![Tests](https://github.com/josephhart/amorphous-carbon/actions/workflows/tests.yml/badge.svg)](https://github.com/josephhart/amorphous-carbon/actions/workflows/tests.yml)
10
+
11
+ ## Install (editable)
12
+ ```
13
+ pip install -e .
14
+ ```
15
+ Add `[analysis]` extras if you want MDAnalysis/ASE/matplotlib:
16
+ ```
17
+ pip install -e .[analysis]
18
+ ```
19
+
20
+ ## Library usage
21
+ ```python
22
+ import curdf
23
+ import MDAnalysis as mda
24
+
25
+ u = mda.Universe("top.data", "traj.dcd")
26
+ bins, gr = curdf.rdf_from_mdanalysis(u, selection="name C", r_min=1.0, r_max=8.0, nbins=200)
27
+ ```
28
+
29
+ ASE first (XYZ/extxyz/ASE .traj):
30
+ ```python
31
+ from ase.io import read
32
+ from curdf import rdf_from_ase
33
+
34
+ atoms = read("structure.xyz")
35
+ bins, gr = rdf_from_ase(atoms, selection=None, r_min=1.0, r_max=8.0, nbins=200) # selection=None -> all atoms
36
+ ```
37
+
38
+ Cross-species (ASE): provide group A/B indices
39
+ ```python
40
+ bins, gr = rdf_from_ase(atoms, selection=[0,1,2], selection_b=[3,4,5], r_min=1.0, r_max=8.0, nbins=200, half_fill=False)
41
+ ```
42
+
43
+ MDAnalysis (explicit dependency required; also supports LAMMPS dump):
44
+ ```python
45
+ import MDAnalysis as mda
46
+ from curdf import rdf_from_mdanalysis
47
+
48
+ u = mda.Universe("top.data", "traj.dcd")
49
+ bins, gr = curdf.rdf_from_mdanalysis(u, selection="name C", r_min=1.0, r_max=8.0, nbins=200)
50
+ ```
51
+
52
+ ## CLI
53
+ ASE (XYZ/extxyz/ASE .traj):
54
+ ```
55
+ rdf-gpu --format ase --ase-file structure.xyz --selection 0,1,2 --r-max 8 --nbins 200 --device cuda
56
+ ```
57
+
58
+ Cross-species via CLI (ASE indices or MDAnalysis selections):
59
+ ```
60
+ rdf-gpu --format ase --ase-file structure.xyz --selection-a 0,1,2 --selection-b 3,4,5 --r-max 8 --nbins 200 --device cuda --ordered-pairs
61
+ ```
62
+ (`--selection-b` automatically disables half-fill so pairs are ordered.)
63
+
64
+ LAMMPS dump (lammpstrj) via MDAnalysis:
65
+ ```
66
+ rdf-gpu --format lammps-dump --trajectory dump.lammpstrj --selection "all" --r-max 8 --nbins 200 --device cuda
67
+ ```
68
+
69
+ MDAnalysis:
70
+ ```
71
+ rdf-gpu --format mdanalysis --topology top.data --trajectory traj.dcd --selection "name C" --r-max 8 --nbins 200 --device cuda --out results/rdf.npz --plot results/rdf.png
72
+ ```
73
+
74
+ `--ordered-pairs` switches to counting ordered pairs (disable half-fill). `--no-wrap` leaves coordinates unwrapped if you already wrapped them upstream.
75
+
76
+ ## Docs / examples / tests
77
+ - Docs in `docs/` (index, quickstart, api).
78
+ - Examples in `examples/` for basic, ASE, and MDAnalysis workflows.
79
+ - Tests in `tests/` (run with `pytest` or `pip install -e .[dev]` first).
80
+ - Build Sphinx docs with `pip install -e .[docs]` then `sphinx-build -b html docs/source docs/build/html` (footer: "Built with Sphinx using a theme provided by Read the Docs.").
81
+
82
+ ## Citation
83
+ See `CITATION.cff` for how to cite cuRDF in your work.
@@ -0,0 +1,20 @@
1
+ Metadata-Version: 2.4
2
+ Name: cuRDF
3
+ Version: 0.1.0
4
+ Summary: GPU-accelerated radial distribution functions using Toolkit-Ops + PyTorch with MDAnalysis and ASE adapters.
5
+ Requires-Python: >=3.10
6
+ License-File: LICENSE
7
+ Requires-Dist: torch
8
+ Requires-Dist: numpy
9
+ Requires-Dist: nvalchemiops
10
+ Provides-Extra: analysis
11
+ Requires-Dist: MDAnalysis; extra == "analysis"
12
+ Requires-Dist: ase; extra == "analysis"
13
+ Requires-Dist: matplotlib; extra == "analysis"
14
+ Provides-Extra: dev
15
+ Requires-Dist: pytest; extra == "dev"
16
+ Provides-Extra: docs
17
+ Requires-Dist: sphinx; extra == "docs"
18
+ Requires-Dist: sphinx-rtd-theme; extra == "docs"
19
+ Requires-Dist: myst-parser; extra == "docs"
20
+ Dynamic: license-file
@@ -0,0 +1,16 @@
1
+ LICENSE
2
+ README.md
3
+ pyproject.toml
4
+ cuRDF.egg-info/PKG-INFO
5
+ cuRDF.egg-info/SOURCES.txt
6
+ cuRDF.egg-info/dependency_links.txt
7
+ cuRDF.egg-info/entry_points.txt
8
+ cuRDF.egg-info/requires.txt
9
+ cuRDF.egg-info/top_level.txt
10
+ curdf/__init__.py
11
+ curdf/adapters.py
12
+ curdf/cell.py
13
+ curdf/cli.py
14
+ curdf/neighbor.py
15
+ curdf/rdf.py
16
+ tests/test_rdf.py
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ rdf-gpu = curdf.cli:main
@@ -0,0 +1,16 @@
1
+ torch
2
+ numpy
3
+ nvalchemiops
4
+
5
+ [analysis]
6
+ MDAnalysis
7
+ ase
8
+ matplotlib
9
+
10
+ [dev]
11
+ pytest
12
+
13
+ [docs]
14
+ sphinx
15
+ sphinx-rtd-theme
16
+ myst-parser
@@ -0,0 +1 @@
1
+ curdf
@@ -0,0 +1,12 @@
1
+ """
2
+ cuRDF: GPU-accelerated radial distribution functions with MDAnalysis/ASE adapters.
3
+ """
4
+
5
+ from .rdf import compute_rdf
6
+ from .adapters import rdf_from_mdanalysis, rdf_from_ase
7
+
8
+ __all__ = [
9
+ "compute_rdf",
10
+ "rdf_from_mdanalysis",
11
+ "rdf_from_ase",
12
+ ]
@@ -0,0 +1,180 @@
1
+ from collections.abc import Iterable
2
+ from typing import Sequence
3
+
4
+ import numpy as np
5
+
6
+ try:
7
+ import MDAnalysis as mda
8
+ from MDAnalysis.lib.mdamath import triclinic_vectors
9
+ from MDAnalysis.transformations import wrap as mda_wrap
10
+ except ImportError:
11
+ mda = None
12
+ triclinic_vectors = None
13
+ mda_wrap = None
14
+
15
+ try:
16
+ from ase import Atoms
17
+ except ImportError:
18
+ Atoms = None
19
+
20
+ from .rdf import accumulate_rdf
21
+
22
+
23
+ def _mdanalysis_cell_matrix(dimensions):
24
+ """
25
+ MDAnalysis gives [a, b, c, alpha, beta, gamma]; convert to 3x3.
26
+ """
27
+ if triclinic_vectors is None:
28
+ raise ImportError("MDAnalysis not available")
29
+ return np.array(triclinic_vectors(dimensions), dtype=np.float32)
30
+
31
+
32
+ def rdf_from_mdanalysis(
33
+ universe,
34
+ selection: str = "all",
35
+ selection_b: str | None = None,
36
+ r_min: float = 1.0,
37
+ r_max: float = 6.0,
38
+ nbins: int = 100,
39
+ device="cuda",
40
+ torch_dtype=None,
41
+ half_fill: bool = True,
42
+ max_neighbors: int = 2048,
43
+ wrap_positions: bool = True,
44
+ ):
45
+ """
46
+ Compute g(r) from an MDAnalysis Universe across all trajectory frames.
47
+ selection_b: optional second selection for cross-species RDF (A in selection, B in selection_b).
48
+ """
49
+ if mda is None:
50
+ raise ImportError("MDAnalysis must be installed for rdf_from_mdanalysis")
51
+ if torch_dtype is None:
52
+ import torch
53
+ torch_dtype = torch.float32
54
+
55
+ ag_a = universe.select_atoms(selection)
56
+ ag_b = universe.select_atoms(selection_b) if selection_b is not None else ag_a
57
+ if wrap_positions and mda_wrap is not None:
58
+ ag_wrap = ag_a if selection_b is None else (ag_a | ag_b)
59
+ universe.trajectory.add_transformations(mda_wrap(ag_wrap, compound="atoms"))
60
+
61
+ def frames():
62
+ for ts in universe.trajectory:
63
+ cell = _mdanalysis_cell_matrix(ts.dimensions)
64
+ if selection_b is None:
65
+ yield {
66
+ "positions": ag_a.positions.astype(np.float32, copy=False),
67
+ "cell": cell,
68
+ "pbc": (True, True, True),
69
+ }
70
+ else:
71
+ pos_a = ag_a.positions.astype(np.float32, copy=False)
72
+ pos_b = ag_b.positions.astype(np.float32, copy=False)
73
+ pos = np.concatenate([pos_a, pos_b], axis=0)
74
+ group_a_mask = np.zeros(len(pos), dtype=bool)
75
+ group_b_mask = np.zeros(len(pos), dtype=bool)
76
+ group_a_mask[: len(pos_a)] = True
77
+ group_b_mask[len(pos_a) :] = True
78
+ yield {
79
+ "positions": pos,
80
+ "cell": cell,
81
+ "pbc": (True, True, True),
82
+ "group_a_mask": group_a_mask,
83
+ "group_b_mask": group_b_mask,
84
+ }
85
+
86
+ if selection_b is not None and half_fill:
87
+ half_fill = False # cross-species -> ordered pairs
88
+
89
+ return accumulate_rdf(
90
+ frames(),
91
+ r_min=r_min,
92
+ r_max=r_max,
93
+ nbins=nbins,
94
+ device=device,
95
+ torch_dtype=torch_dtype,
96
+ half_fill=half_fill,
97
+ max_neighbors=max_neighbors,
98
+ )
99
+
100
+
101
+ def _extract_selection_indices(selection: Sequence[int] | None, n_atoms: int):
102
+ if selection is None:
103
+ return np.arange(n_atoms)
104
+ idx = np.asarray(selection, dtype=int)
105
+ if idx.ndim != 1:
106
+ raise ValueError("selection indices must be 1D")
107
+ if idx.min(initial=0) < 0 or idx.max(initial=0) >= n_atoms:
108
+ raise ValueError("selection indices out of bounds")
109
+ return idx
110
+
111
+
112
+ def rdf_from_ase(
113
+ atoms_or_trajectory,
114
+ selection: Sequence[int] | None = None,
115
+ selection_b: Sequence[int] | None = None,
116
+ r_min: float = 1.0,
117
+ r_max: float = 6.0,
118
+ nbins: int = 100,
119
+ device="cuda",
120
+ torch_dtype=None,
121
+ half_fill: bool = True,
122
+ max_neighbors: int = 2048,
123
+ wrap_positions: bool = True,
124
+ ):
125
+ """
126
+ Compute g(r) from an ASE Atoms or iterable of Atoms (trajectory).
127
+ selection/selection_b: index lists for group A and group B (cross-species). With only selection provided, computes A–A.
128
+ """
129
+ if Atoms is None:
130
+ raise ImportError("ASE must be installed for rdf_from_ase")
131
+ if torch_dtype is None:
132
+ import torch
133
+ torch_dtype = torch.float32
134
+
135
+ def _frames_iter():
136
+ if hasattr(atoms_or_trajectory, "get_positions"):
137
+ iterable = (atoms_or_trajectory,)
138
+ elif isinstance(atoms_or_trajectory, Iterable):
139
+ iterable = atoms_or_trajectory
140
+ else:
141
+ raise TypeError("atoms_or_trajectory must be ASE Atoms or iterable of Atoms")
142
+
143
+ for frame in iterable:
144
+ if not hasattr(frame, "get_positions"):
145
+ raise TypeError("Each frame must be ASE Atoms")
146
+ n_atoms = len(frame)
147
+ idx_a = _extract_selection_indices(selection, n_atoms)
148
+ idx_b = _extract_selection_indices(selection_b, n_atoms) if selection_b is not None else idx_a
149
+ pos_all = frame.get_positions(wrap=wrap_positions)
150
+ pos_a = pos_all[idx_a]
151
+ pos_b = pos_all[idx_b]
152
+ pos = np.concatenate([pos_a, pos_b], axis=0)
153
+ cell = np.array(frame.get_cell().array, dtype=np.float32)
154
+ pbc = tuple(bool(x) for x in frame.get_pbc())
155
+ group_a_mask = np.zeros(len(pos), dtype=bool)
156
+ group_b_mask = np.zeros(len(pos), dtype=bool)
157
+ group_a_mask[: len(pos_a)] = True
158
+ group_b_mask[len(pos_a) :] = True
159
+
160
+ yield {
161
+ "positions": pos.astype(np.float32, copy=False),
162
+ "cell": cell,
163
+ "pbc": pbc,
164
+ "group_a_mask": group_a_mask,
165
+ "group_b_mask": group_b_mask,
166
+ }
167
+
168
+ if selection_b is not None and half_fill:
169
+ half_fill = False # cross-species -> ordered pairs
170
+
171
+ return accumulate_rdf(
172
+ _frames_iter(),
173
+ r_min=r_min,
174
+ r_max=r_max,
175
+ nbins=nbins,
176
+ device=device,
177
+ torch_dtype=torch_dtype,
178
+ half_fill=half_fill,
179
+ max_neighbors=max_neighbors,
180
+ )
@@ -0,0 +1,32 @@
1
+ import torch
2
+ from torch import Tensor
3
+
4
+
5
+ def cell_tensor(cell: Tensor | list | tuple, device, dtype) -> Tensor:
6
+ """
7
+ Convert an input (3,3) cell array into a torch tensor shaped (1,3,3).
8
+ Accepts triclinic cells. Does not assume orthorhombic.
9
+ """
10
+ cell_t = torch.as_tensor(cell, device=device, dtype=dtype)
11
+ if cell_t.shape != (3, 3):
12
+ raise ValueError(f"cell must be (3,3); got {tuple(cell_t.shape)}")
13
+ return cell_t.unsqueeze(0)
14
+
15
+
16
+ def pbc_tensor(pbc: Tensor | list | tuple, device) -> Tensor:
17
+ """
18
+ Convert PBC flags to shape (1,3) boolean tensor.
19
+ """
20
+ pbc_t = torch.as_tensor(pbc, device=device, dtype=torch.bool)
21
+ if pbc_t.shape != (3,):
22
+ raise ValueError(f"pbc must be (3,); got {tuple(pbc_t.shape)}")
23
+ return pbc_t.unsqueeze(0)
24
+
25
+
26
+ def cell_volume(cell: Tensor) -> float:
27
+ """
28
+ Compute volume from a (1,3,3) cell tensor (triclinic supported).
29
+ """
30
+ if cell.shape != (1, 3, 3):
31
+ raise ValueError(f"cell must be (1,3,3); got {tuple(cell.shape)}")
32
+ return torch.det(cell[0]).abs().item()
@@ -0,0 +1,166 @@
1
+ import argparse
2
+ import sys
3
+ from pathlib import Path
4
+
5
+ import numpy as np
6
+
7
+
8
+ def _parse_args():
9
+ p = argparse.ArgumentParser(description="cuRDF: GPU RDF using Toolkit-Ops + PyTorch")
10
+ p.add_argument(
11
+ "--format",
12
+ choices=["mdanalysis", "ase", "lammps-dump"],
13
+ required=True,
14
+ help="Input backend",
15
+ )
16
+ p.add_argument("--topology", help="Topology file (MDAnalysis)")
17
+ p.add_argument("--trajectory", nargs="+", help="Trajectory file(s) (MDAnalysis)")
18
+ p.add_argument("--ase-file", help="Structure/trajectory file readable by ASE")
19
+ p.add_argument("--ase-index", default=":", help="ASE index (default all frames)")
20
+ p.add_argument("--selection", default=None, help="(Deprecated) alias for --selection-a")
21
+ p.add_argument("--selection-a", default=None, help="MDAnalysis selection or ASE comma-separated indices for group A")
22
+ p.add_argument("--selection-b", default=None, help="MDAnalysis selection or ASE comma-separated indices for group B")
23
+ p.add_argument("--r-min", type=float, default=1.0)
24
+ p.add_argument("--r-max", type=float, default=6.0)
25
+ p.add_argument("--nbins", type=int, default=100)
26
+ p.add_argument("--device", default="cuda")
27
+ p.add_argument("--dtype", choices=["float32", "float64"], default="float32")
28
+ p.add_argument("--half-fill", action="store_true", default=True, help="Use unique pairs (identical species)")
29
+ p.add_argument("--ordered-pairs", action="store_true", help="Disable half-fill to count ordered pairs")
30
+ p.add_argument("--max-neighbors", type=int, default=2048)
31
+ p.add_argument("--no-wrap", action="store_true", help="Skip wrapping positions into the cell")
32
+ p.add_argument("--plot", type=Path, help="Optional PNG plot output")
33
+ p.add_argument("--out", type=Path, default=Path("rdf.npz"), help="NPZ output path")
34
+ return p.parse_args()
35
+
36
+
37
+ def main():
38
+ args = _parse_args()
39
+ torch_dtype = {"float32": "float32", "float64": "float64"}[args.dtype]
40
+ half_fill = False if args.ordered_pairs else args.half_fill
41
+ # Cross-species (selection-b) should use ordered pairs
42
+ if args.selection_b and half_fill:
43
+ half_fill = False
44
+
45
+ if args.format == "mdanalysis":
46
+ if args.topology is None or args.trajectory is None:
47
+ sys.exit("For mdanalysis format, provide --topology and --trajectory")
48
+ try:
49
+ import MDAnalysis as mda
50
+ except ImportError:
51
+ sys.exit("MDAnalysis is required for --format mdanalysis")
52
+
53
+ u = mda.Universe(args.topology, *args.trajectory)
54
+ selection_a = args.selection_a or args.selection
55
+ selection_b = args.selection_b
56
+ if selection_a is None:
57
+ selection_a = "all"
58
+ from .adapters import rdf_from_mdanalysis
59
+
60
+ bins, gr = rdf_from_mdanalysis(
61
+ u,
62
+ selection=selection_a,
63
+ selection_b=selection_b,
64
+ r_min=args.r_min,
65
+ r_max=args.r_max,
66
+ nbins=args.nbins,
67
+ device=args.device,
68
+ torch_dtype=getattr(__import__("torch"), torch_dtype),
69
+ half_fill=half_fill,
70
+ max_neighbors=args.max_neighbors,
71
+ wrap_positions=not args.no_wrap,
72
+ )
73
+ elif args.format == "lammps-dump":
74
+ if args.trajectory is None:
75
+ sys.exit("For lammps-dump format, provide --trajectory (LAMMPS dump / lammpstrj)")
76
+ try:
77
+ import MDAnalysis as mda
78
+ except ImportError:
79
+ sys.exit("MDAnalysis is required for --format lammps-dump")
80
+
81
+ try:
82
+ u = mda.Universe(args.trajectory[0], format="LAMMPSDUMP")
83
+ except Exception as exc:
84
+ sys.exit(f"Failed to load LAMMPS dump: {exc}")
85
+
86
+ selection_a = args.selection_a or args.selection
87
+ selection_b = args.selection_b
88
+ if selection_a is None:
89
+ selection_a = "all"
90
+ from .adapters import rdf_from_mdanalysis
91
+
92
+ bins, gr = rdf_from_mdanalysis(
93
+ u,
94
+ selection=selection_a,
95
+ selection_b=selection_b,
96
+ r_min=args.r_min,
97
+ r_max=args.r_max,
98
+ nbins=args.nbins,
99
+ device=args.device,
100
+ torch_dtype=getattr(__import__("torch"), torch_dtype),
101
+ half_fill=half_fill,
102
+ max_neighbors=args.max_neighbors,
103
+ wrap_positions=not args.no_wrap,
104
+ )
105
+ else:
106
+ if args.ase_file is None:
107
+ sys.exit("For ase format, provide --ase-file")
108
+ try:
109
+ import ase.io
110
+ except ImportError:
111
+ sys.exit("ASE is required for --format ase")
112
+
113
+ allowed_ext = {".xyz", ".extxyz", ".traj"}
114
+ if Path(args.ase_file).suffix.lower() not in allowed_ext:
115
+ sys.exit(f"ASE mode supports {sorted(allowed_ext)}; got {args.ase_file}")
116
+
117
+ frames = ase.io.read(args.ase_file, index=args.ase_index)
118
+ if isinstance(frames, list):
119
+ atoms_or_traj = frames
120
+ else:
121
+ atoms_or_traj = frames
122
+
123
+ sel_a = None
124
+ sel_b = None
125
+ selection_a = args.selection_a or args.selection
126
+ if selection_a:
127
+ sel_a = [int(x) for x in selection_a.split(",") if x.strip()]
128
+ if args.selection_b:
129
+ sel_b = [int(x) for x in args.selection_b.split(",") if x.strip()]
130
+
131
+ from .adapters import rdf_from_ase
132
+
133
+ bins, gr = rdf_from_ase(
134
+ atoms_or_traj,
135
+ selection=sel_a,
136
+ selection_b=sel_b,
137
+ r_min=args.r_min,
138
+ r_max=args.r_max,
139
+ nbins=args.nbins,
140
+ device=args.device,
141
+ torch_dtype=getattr(__import__("torch"), torch_dtype),
142
+ half_fill=half_fill,
143
+ max_neighbors=args.max_neighbors,
144
+ wrap_positions=not args.no_wrap,
145
+ )
146
+
147
+ args.out.parent.mkdir(parents=True, exist_ok=True)
148
+ np.savez(args.out, bins=bins, gr=gr)
149
+
150
+ if args.plot:
151
+ try:
152
+ import matplotlib
153
+ matplotlib.use("Agg")
154
+ import matplotlib.pyplot as plt
155
+ except ImportError:
156
+ sys.exit("matplotlib required for plotting")
157
+ plt.plot(bins, gr)
158
+ plt.xlabel("r (A)")
159
+ plt.ylabel("g(r)")
160
+ plt.hlines(1.0, xmin=args.r_min, xmax=args.r_max, colors="k", linestyles="dashed")
161
+ plt.savefig(args.plot, dpi=300)
162
+ return 0
163
+
164
+
165
+ if __name__ == "__main__":
166
+ raise SystemExit(main())
@@ -0,0 +1,29 @@
1
+ from nvalchemiops.neighborlist import neighbor_list
2
+ import torch
3
+ from torch import Tensor
4
+
5
+
6
+ def build_neighbor_list(
7
+ positions: Tensor,
8
+ r_max: float,
9
+ cell: Tensor,
10
+ pbc: Tensor,
11
+ half_fill: bool = True,
12
+ max_neighbors: int = 2048,
13
+ method: str = "cell_list",
14
+ ):
15
+ """
16
+ Wrap Toolkit-Ops neighbor list to keep a single import site.
17
+ Returns nlist (2, num_pairs), shifts (num_pairs,3).
18
+ """
19
+ nlist, _, shifts = neighbor_list(
20
+ positions,
21
+ float(r_max),
22
+ cell=cell,
23
+ pbc=pbc,
24
+ return_neighbor_list=True,
25
+ half_fill=half_fill,
26
+ max_neighbors=max_neighbors,
27
+ method=method,
28
+ )
29
+ return nlist, shifts
@@ -0,0 +1,221 @@
1
+ import math
2
+ from typing import Iterable
3
+
4
+ import torch
5
+ from torch import Tensor
6
+
7
+ from .cell import cell_tensor, cell_volume, pbc_tensor
8
+ from .neighbor import build_neighbor_list
9
+
10
+
11
+ def _update_counts(
12
+ counts: Tensor,
13
+ positions: Tensor,
14
+ cell: Tensor,
15
+ pbc: Tensor,
16
+ edges: Tensor,
17
+ r_min: float,
18
+ r_max: float,
19
+ half_fill: bool,
20
+ max_neighbors: int,
21
+ group_a_mask: Tensor | None = None,
22
+ group_b_mask: Tensor | None = None,
23
+ ) -> float:
24
+ """
25
+ Accumulate pair counts for one frame.
26
+ Returns normalization factor (n_group_a * rho_group_b) so the caller can normalize after multiple frames.
27
+ """
28
+ dr = (r_max - r_min) / (len(edges) - 1)
29
+
30
+ nlist, shifts = build_neighbor_list(
31
+ positions,
32
+ r_max,
33
+ cell=cell,
34
+ pbc=pbc,
35
+ half_fill=half_fill,
36
+ max_neighbors=max_neighbors,
37
+ )
38
+
39
+ src = nlist[0].to(torch.int64)
40
+ tgt = nlist[1].to(torch.int64)
41
+
42
+ shift_cart = (shifts.to(positions.dtype) @ cell[0])
43
+ dr_vec = (positions[tgt] + shift_cart) - positions[src]
44
+ dist = torch.linalg.norm(dr_vec, dim=1)
45
+
46
+ valid = (dist >= r_min) & (dist < r_max)
47
+ if group_a_mask is not None and group_b_mask is not None:
48
+ src_mask = group_a_mask[src]
49
+ tgt_mask = group_b_mask[tgt]
50
+ valid = valid & src_mask & tgt_mask
51
+ dist = dist[valid]
52
+
53
+ bin_idx = torch.floor((dist - r_min) / dr).to(torch.int64)
54
+ bin_idx = torch.clamp(bin_idx, 0, counts.numel() - 1)
55
+ counts.scatter_add_(0, bin_idx, torch.ones_like(bin_idx, dtype=torch.int64))
56
+
57
+ volume = cell_volume(cell)
58
+ if group_a_mask is not None and group_b_mask is not None:
59
+ n_a = group_a_mask.sum().item()
60
+ n_b = group_b_mask.sum().item()
61
+ norm_factor = n_a * (n_b / volume)
62
+ else:
63
+ n_atoms = positions.shape[0]
64
+ norm_factor = n_atoms * (n_atoms / volume) # n_atoms * rho
65
+ return norm_factor
66
+
67
+
68
+ def _finalize_gr(
69
+ counts: Tensor,
70
+ edges: Tensor,
71
+ total_norm: float,
72
+ half_fill: bool,
73
+ cross_mode: bool,
74
+ ) -> tuple[Tensor, Tensor]:
75
+ r1 = edges[:-1]
76
+ r2 = edges[1:]
77
+ shell_vol = (4.0 / 3.0) * math.pi * (r2**3 - r1**3)
78
+ pair_factor = 1.0 if cross_mode else (2.0 if half_fill else 1.0)
79
+
80
+ if total_norm == 0:
81
+ raise ValueError("Total normalization is zero; no frames processed?")
82
+ g_r = (pair_factor * counts.to(r1.dtype)) / (shell_vol * total_norm)
83
+ centers = (edges[:-1] + edges[1:]) * 0.5
84
+ return centers, g_r
85
+
86
+
87
+ @torch.no_grad()
88
+ def compute_rdf(
89
+ positions,
90
+ cell,
91
+ pbc=(True, True, True),
92
+ r_min: float = 1.0,
93
+ r_max: float = 6.0,
94
+ nbins: int = 100,
95
+ device: str | torch.device = "cuda",
96
+ torch_dtype: torch.dtype = torch.float32,
97
+ half_fill: bool = True,
98
+ max_neighbors: int = 2048,
99
+ group_a_indices=None,
100
+ group_b_indices=None,
101
+ ):
102
+ """
103
+ Compute g(r) for a single frame of positions.
104
+
105
+ Args:
106
+ positions: array-like (N,3)
107
+ cell: (3,3) cell matrix (triclinic allowed)
108
+ pbc: iterable of 3 booleans
109
+ r_min/r_max/nbins: histogram parameters
110
+ half_fill: True for identical species (unique pairs); False for ordered pairs
111
+ max_neighbors: passed to Toolkit-Ops neighbor list
112
+ group_a_indices/group_b_indices: optional index lists for cross-species RDF.
113
+ If provided, counts pairs with src in A and tgt in B. When both are None,
114
+ uses all atoms (identical-species mode).
115
+ """
116
+ device = torch.device(device)
117
+ pos_t = torch.as_tensor(positions, device=device, dtype=torch_dtype)
118
+ if pos_t.ndim != 2 or pos_t.shape[1] != 3:
119
+ raise ValueError(f"positions must be (N,3); got {tuple(pos_t.shape)}")
120
+
121
+ cell_t = cell_tensor(cell, device=device, dtype=torch_dtype)
122
+ pbc_t = pbc_tensor(pbc, device=device)
123
+
124
+ edges = torch.linspace(r_min, r_max, nbins + 1, device=device, dtype=torch_dtype)
125
+ counts = torch.zeros(nbins, device=device, dtype=torch.int64)
126
+
127
+ group_a_mask = group_b_mask = None
128
+ if group_a_indices is not None:
129
+ group_a_mask = torch.zeros(pos_t.shape[0], device=device, dtype=torch.bool)
130
+ group_a_mask[torch.as_tensor(group_a_indices, device=device, dtype=torch.long)] = True
131
+ if group_b_indices is not None:
132
+ group_b_mask = torch.zeros(pos_t.shape[0], device=device, dtype=torch.bool)
133
+ group_b_mask[torch.as_tensor(group_b_indices, device=device, dtype=torch.long)] = True
134
+ elif group_a_mask is not None:
135
+ group_b_mask = group_a_mask
136
+
137
+ total_norm = _update_counts(
138
+ counts,
139
+ pos_t,
140
+ cell=cell_t,
141
+ pbc=pbc_t,
142
+ edges=edges,
143
+ r_min=r_min,
144
+ r_max=r_max,
145
+ half_fill=half_fill,
146
+ max_neighbors=max_neighbors,
147
+ group_a_mask=group_a_mask,
148
+ group_b_mask=group_b_mask,
149
+ )
150
+
151
+ cross_mode = group_a_mask is not None and group_b_mask is not None and not torch.equal(
152
+ group_a_mask, group_b_mask
153
+ )
154
+ centers, g_r = _finalize_gr(
155
+ counts, edges, total_norm, half_fill=half_fill, cross_mode=cross_mode
156
+ )
157
+ return centers.cpu().numpy(), g_r.cpu().numpy()
158
+
159
+
160
+ @torch.no_grad()
161
+ def accumulate_rdf(
162
+ frames: Iterable[dict],
163
+ r_min: float,
164
+ r_max: float,
165
+ nbins: int,
166
+ device: str | torch.device,
167
+ torch_dtype: torch.dtype,
168
+ half_fill: bool,
169
+ max_neighbors: int,
170
+ ):
171
+ """
172
+ General accumulator for multiple frames.
173
+ frames: iterable yielding dicts with keys positions, cell, pbc, and optional group_a_mask/group_b_mask
174
+ """
175
+ device = torch.device(device)
176
+ edges = torch.linspace(r_min, r_max, nbins + 1, device=device, dtype=torch_dtype)
177
+ counts = torch.zeros(nbins, device=device, dtype=torch.int64)
178
+ total_norm = 0.0
179
+
180
+ cross_flag = False
181
+
182
+ for frame in frames:
183
+ pos_t = torch.as_tensor(frame["positions"], device=device, dtype=torch_dtype)
184
+ cell_t = cell_tensor(frame["cell"], device=device, dtype=torch_dtype)
185
+ pbc_t = pbc_tensor(frame["pbc"], device=device)
186
+ group_a_mask = frame.get("group_a_mask")
187
+ group_b_mask = frame.get("group_b_mask")
188
+ if group_a_mask is not None:
189
+ group_a_mask = torch.as_tensor(group_a_mask, device=device, dtype=torch.bool)
190
+ if group_b_mask is not None:
191
+ group_b_mask = torch.as_tensor(group_b_mask, device=device, dtype=torch.bool)
192
+ elif group_a_mask is not None:
193
+ group_b_mask = group_a_mask
194
+ if group_a_mask is not None and group_b_mask is not None and not torch.equal(
195
+ group_a_mask, group_b_mask
196
+ ):
197
+ cross_flag = True
198
+
199
+ norm = _update_counts(
200
+ counts,
201
+ pos_t,
202
+ cell=cell_t,
203
+ pbc=pbc_t,
204
+ edges=edges,
205
+ r_min=r_min,
206
+ r_max=r_max,
207
+ half_fill=half_fill,
208
+ max_neighbors=max_neighbors,
209
+ group_a_mask=group_a_mask,
210
+ group_b_mask=group_b_mask,
211
+ )
212
+ total_norm += norm
213
+
214
+ centers, g_r = _finalize_gr(
215
+ counts,
216
+ edges,
217
+ total_norm,
218
+ half_fill=half_fill,
219
+ cross_mode=cross_flag,
220
+ )
221
+ return centers.cpu().numpy(), g_r.cpu().numpy()
@@ -0,0 +1,32 @@
1
+ [build-system]
2
+ requires = ["setuptools>=64", "wheel"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "cuRDF"
7
+ version = "0.1.0"
8
+ description = "GPU-accelerated radial distribution functions using Toolkit-Ops + PyTorch with MDAnalysis and ASE adapters."
9
+ requires-python = ">=3.10"
10
+ dependencies = [
11
+ "torch",
12
+ "numpy",
13
+ "nvalchemiops",
14
+ ]
15
+
16
+ [project.optional-dependencies]
17
+ analysis = [
18
+ "MDAnalysis",
19
+ "ase",
20
+ "matplotlib",
21
+ ]
22
+ dev = [
23
+ "pytest",
24
+ ]
25
+ docs = [
26
+ "sphinx",
27
+ "sphinx-rtd-theme",
28
+ "myst-parser",
29
+ ]
30
+
31
+ [project.scripts]
32
+ rdf-gpu = "curdf.cli:main"
curdf-0.1.0/setup.cfg ADDED
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,78 @@
1
+ import math
2
+ import numpy as np
3
+ import torch
4
+
5
+ from curdf.rdf import compute_rdf, accumulate_rdf
6
+
7
+
8
+ def test_compute_rdf_single_frame_two_atoms():
9
+ positions = np.array([[0.0, 0.0, 0.0], [1.5, 0.0, 0.0]], dtype=np.float32)
10
+ cell = np.diag([10.0, 10.0, 10.0])
11
+ bins, gr = compute_rdf(
12
+ positions,
13
+ cell,
14
+ pbc=(True, True, True),
15
+ r_min=0.0,
16
+ r_max=5.0,
17
+ nbins=5,
18
+ device="cpu",
19
+ torch_dtype=torch.float32,
20
+ half_fill=True,
21
+ )
22
+ # Distance 1.5 should fall into bin index 1 (edges 0,1,2,...)
23
+ assert math.isclose(bins[1], 1.5)
24
+ # Expected g(r) from analytic normalization
25
+ shell_vol = (4.0 / 3.0) * math.pi * (2.0**3 - 1.0**3)
26
+ volume = 10.0**3
27
+ n_atoms = 2
28
+ rho = n_atoms / volume
29
+ expected = (2.0 * 1.0) / (shell_vol * (n_atoms * rho))
30
+ assert math.isclose(gr[1], expected, rel_tol=1e-5)
31
+
32
+
33
+ def test_accumulate_rdf_multiple_frames():
34
+ cell = np.diag([8.0, 8.0, 8.0])
35
+ positions1 = np.array([[0.0, 0.0, 0.0], [2.0, 0.0, 0.0]], dtype=np.float32)
36
+ positions2 = np.array([[0.0, 0.0, 0.0], [3.0, 0.0, 0.0]], dtype=np.float32)
37
+ frames = [
38
+ {"positions": positions1, "cell": cell, "pbc": (True, True, True)},
39
+ {"positions": positions2, "cell": cell, "pbc": (True, True, True)},
40
+ ]
41
+ bins, gr = accumulate_rdf(
42
+ frames,
43
+ r_min=0.0,
44
+ r_max=5.0,
45
+ nbins=5,
46
+ device="cpu",
47
+ torch_dtype=torch.float32,
48
+ half_fill=True,
49
+ max_neighbors=1024,
50
+ )
51
+ # Distances fall into bins near 1 and 3; check both nonzero
52
+ assert gr[1] > 0
53
+ assert gr[3] > 0
54
+
55
+
56
+ def test_compute_rdf_cross_species():
57
+ # A at origin, B at 1.5
58
+ positions = np.array([[0.0, 0.0, 0.0], [1.5, 0.0, 0.0]], dtype=np.float32)
59
+ cell = np.diag([10.0, 10.0, 10.0])
60
+ bins, gr = compute_rdf(
61
+ positions,
62
+ cell,
63
+ pbc=(True, True, True),
64
+ r_min=0.0,
65
+ r_max=5.0,
66
+ nbins=5,
67
+ device="cpu",
68
+ torch_dtype=torch.float32,
69
+ half_fill=False, # cross-species -> no pair doubling
70
+ group_a_indices=[0],
71
+ group_b_indices=[1],
72
+ )
73
+ shell_vol = (4.0 / 3.0) * math.pi * (2.0**3 - 1.0**3)
74
+ volume = 10.0**3
75
+ n_a = 1
76
+ n_b = 1
77
+ expected = 1.0 / (shell_vol * (n_a * (n_b / volume)))
78
+ assert math.isclose(gr[1], expected, rel_tol=1e-5)