chemparseplot 1.3.0__tar.gz → 1.4.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/.gitignore +8 -0
  2. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/PKG-INFO +1 -1
  3. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/_version.py +2 -2
  4. chemparseplot-1.4.0/chemparseplot/parse/__init__.py +9 -0
  5. chemparseplot-1.4.0/chemparseplot/parse/chemgp_hdf5.py +174 -0
  6. chemparseplot-1.4.0/chemparseplot/parse/chemgp_jsonl.py +305 -0
  7. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/chemparseplot/parse/eon/neb.py +1 -1
  8. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/chemparseplot/parse/neb_utils.py +2 -2
  9. chemparseplot-1.4.0/chemparseplot/parse/orca/neb/__init__.py +24 -0
  10. chemparseplot-1.4.0/chemparseplot/parse/orca/neb/opi_parser.py +249 -0
  11. chemparseplot-1.4.0/chemparseplot/parse/plumed.py +425 -0
  12. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/chemparseplot/parse/trajectory/hdf5.py +2 -2
  13. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/chemparseplot/parse/trajectory/neb.py +1 -1
  14. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/chemparseplot/plot/chemgp.py +76 -0
  15. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/chemparseplot/plot/geomscan.py +8 -1
  16. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/chemparseplot/plot/neb.py +350 -9
  17. chemparseplot-1.4.0/chemparseplot/plot/plumed.py +137 -0
  18. {chemparseplot-1.3.0/chemparseplot/parse → chemparseplot-1.4.0/chemparseplot/scripts}/__init__.py +0 -2
  19. chemparseplot-1.4.0/chemparseplot/scripts/plot_gp.py +732 -0
  20. chemparseplot-1.4.0/chemparseplot/scripts/plt_neb.py +423 -0
  21. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/pyproject.toml +40 -2
  22. chemparseplot-1.4.0/tests/parse/test_chemgp_hdf5.py +117 -0
  23. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/tests/parse/test_neb_utils.py +1 -1
  24. chemparseplot-1.4.0/tests/parse/test_plumed.py +167 -0
  25. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/tests/parse/test_trajectory_hdf5.py +2 -2
  26. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/tests/parse/test_trajectory_neb.py +3 -3
  27. chemparseplot-1.4.0/tests/plot/test_chemgp_utils.py +61 -0
  28. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/tests/plot/test_neb_renderers.py +3 -3
  29. chemparseplot-1.4.0/tests/scripts/__init__.py +3 -0
  30. chemparseplot-1.4.0/tests/scripts/test_plot_gp_cli.py +54 -0
  31. chemparseplot-1.4.0/tests/scripts/test_plt_neb_cli.py +54 -0
  32. chemparseplot-1.4.0/tests/tutorials/test_chemparseplot.py +151 -0
  33. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/LICENSE +0 -0
  34. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/chemparseplot/__init__.py +0 -0
  35. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/chemparseplot/parse/converter.py +0 -0
  36. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/chemparseplot/parse/eon/gprd.py +0 -0
  37. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/chemparseplot/parse/eon/minimization.py +0 -0
  38. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/chemparseplot/parse/eon/saddle_search.py +0 -0
  39. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/chemparseplot/parse/file_.py +0 -0
  40. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/chemparseplot/parse/orca/__init__.py +0 -0
  41. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/chemparseplot/parse/orca/geomscan.py +0 -0
  42. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/chemparseplot/parse/orca/neb/interp.py +0 -0
  43. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/chemparseplot/parse/patterns.py +0 -0
  44. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/chemparseplot/parse/sella/saddle_search.py +0 -0
  45. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/chemparseplot/parse/trajectory/__init__.py +0 -0
  46. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/chemparseplot/plot/__init__.py +0 -0
  47. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/chemparseplot/plot/structs.py +0 -0
  48. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/chemparseplot/plot/theme.py +0 -0
  49. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/chemparseplot/units.py +0 -0
  50. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/chemparseplot/util.py +0 -0
  51. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/readme.md +0 -0
  52. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/tests/conftest.py +0 -0
  53. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/tests/parse/orca/test_geomscan.py +0 -0
  54. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/tests/parse/orca/test_interp.py +0 -0
  55. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/tests/parse/test_converter.py +0 -0
  56. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/tests/parse/test_patterns.py +0 -0
  57. {chemparseplot-1.3.0 → chemparseplot-1.4.0}/tests/plot/__init__.py +0 -0
@@ -171,3 +171,11 @@ cython_debug/
171
171
  /_version.py
172
172
  /.pdm-python
173
173
  *.ipynb
174
+
175
+ # Dolt database files (added by bd init)
176
+ .dolt/
177
+ *.db
178
+
179
+ # Lychee link checker cache
180
+ .lycheecache
181
+ .beads/
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: chemparseplot
3
- Version: 1.3.0
3
+ Version: 1.4.0
4
4
  Summary: Parsers and plotting tools for computational chemistry
5
5
  Project-URL: Documentation, https://chemparseplot.rgoswami.me
6
6
  Project-URL: Issues, https://github.com/HaoZeke/chemparseplot/issues
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
28
28
  commit_id: COMMIT_ID
29
29
  __commit_id__: COMMIT_ID
30
30
 
31
- __version__ = version = '1.3.0'
32
- __version_tuple__ = version_tuple = (1, 3, 0)
31
+ __version__ = version = '1.4.0'
32
+ __version_tuple__ = version_tuple = (1, 4, 0)
33
33
 
34
34
  __commit_id__ = commit_id = None
@@ -0,0 +1,9 @@
1
+ # SPDX-FileCopyrightText: 2023-present Rohit Goswami <rog32@hi.is>
2
+ #
3
+ # SPDX-License-Identifier: MIT
4
+
5
+ from chemparseplot.parse import orca, patterns
6
+
7
+ # Lazy imports for modules with optional heavy deps (h5py, pandas)
8
+ # Import directly: from chemparseplot.parse.chemgp_hdf5 import read_h5_table
9
+ # Or: from chemparseplot.parse import plumed
@@ -0,0 +1,174 @@
1
+ # SPDX-FileCopyrightText: 2023-present Rohit Goswami <rog32@hi.is>
2
+ #
3
+ # SPDX-License-Identifier: MIT
4
+
5
+ """HDF5 file I/O utilities for ChemGP data.
6
+
7
+ This module provides functions for reading structured data from ChemGP HDF5
8
+ output files. The HDF5 layout mirrors the Julia common_plot.jl helpers.
9
+
10
+ HDF5 Layout
11
+ -----------
12
+ - ``grids/<name>``: 2D arrays with attrs x_range, y_range, x_length, y_length
13
+ - ``table/<name>``: group of same-length 1D arrays
14
+ - ``paths/<name>``: point sequences (x, y or rAB, rBC)
15
+ - ``points/<name>``: point sets (x, y or pc1, pc2)
16
+ - Root attrs: metadata scalars
17
+
18
+ .. versionadded:: 1.7.0
19
+ Extracted from chemgp.plt_gp to standalone module.
20
+ """
21
+
22
+ from typing import Any
23
+
24
+ import numpy as np
25
+
26
+
27
+ def read_h5_table(f: Any, name: str = "table") -> Any:
28
+ """Read a group of same-length vectors as a DataFrame.
29
+
30
+ Parameters
31
+ ----------
32
+ f
33
+ Open HDF5 file object
34
+ name
35
+ Name of the table group (default: "table")
36
+
37
+ Returns
38
+ -------
39
+ DataFrame
40
+ DataFrame with columns from the HDF5 group
41
+ """
42
+ import pandas as pd
43
+
44
+ g = f[name]
45
+ cols = {}
46
+ for k in g.keys():
47
+ arr = g[k][()]
48
+ if arr.dtype.kind in {"S", "O"}:
49
+ cols[k] = arr.astype(str).tolist()
50
+ else:
51
+ cols[k] = arr.tolist()
52
+ return pd.DataFrame(cols)
53
+
54
+
55
+ def read_h5_grid(
56
+ f: Any, name: str
57
+ ) -> tuple[np.ndarray, np.ndarray | None, np.ndarray | None]:
58
+ """Read a 2D grid with optional axis ranges.
59
+
60
+ Parameters
61
+ ----------
62
+ f
63
+ Open HDF5 file object
64
+ name
65
+ Name of the grid dataset
66
+
67
+ Returns
68
+ -------
69
+ tuple
70
+ (data, x_coords, y_coords) where x_coords and y_coords may be None
71
+ if axis range attributes are not present
72
+ """
73
+ ds = f[f"grids/{name}"]
74
+ data = ds[()]
75
+ x_coords = None
76
+ y_coords = None
77
+
78
+ if "x_range" in ds.attrs and "x_length" in ds.attrs:
79
+ lo, hi = ds.attrs["x_range"]
80
+ n = int(ds.attrs["x_length"])
81
+ x_coords = np.linspace(lo, hi, n)
82
+
83
+ if "y_range" in ds.attrs and "y_length" in ds.attrs:
84
+ lo, hi = ds.attrs["y_range"]
85
+ n = int(ds.attrs["y_length"])
86
+ y_coords = np.linspace(lo, hi, n)
87
+
88
+ return data, x_coords, y_coords
89
+
90
+
91
+ def read_h5_path(f: Any, name: str) -> dict[str, np.ndarray]:
92
+ """Read a path (ordered point sequence).
93
+
94
+ Parameters
95
+ ----------
96
+ f
97
+ Open HDF5 file object
98
+ name
99
+ Name of the path dataset
100
+
101
+ Returns
102
+ -------
103
+ dict
104
+ Dictionary mapping coordinate names to arrays
105
+ """
106
+ g = f[f"paths/{name}"]
107
+ return {k: g[k][()] for k in g.keys()}
108
+
109
+
110
+ def read_h5_points(f: Any, name: str) -> dict[str, np.ndarray]:
111
+ """Read a point set.
112
+
113
+ Parameters
114
+ ----------
115
+ f
116
+ Open HDF5 file object
117
+ name
118
+ Name of the points dataset
119
+
120
+ Returns
121
+ -------
122
+ dict
123
+ Dictionary mapping coordinate names to arrays
124
+ """
125
+ g = f[f"points/{name}"]
126
+ return {k: g[k][()] for k in g.keys()}
127
+
128
+
129
+ def read_h5_metadata(f: Any) -> dict[str, Any]:
130
+ """Read root-level metadata attributes.
131
+
132
+ Parameters
133
+ ----------
134
+ f
135
+ Open HDF5 file object
136
+
137
+ Returns
138
+ -------
139
+ dict
140
+ Dictionary of metadata attributes
141
+ """
142
+ return {k: f.attrs[k] for k in f.attrs.keys()}
143
+
144
+
145
+ def validate_hdf5_structure(
146
+ f: Any, required_groups: list[str] | None = None
147
+ ) -> list[str]:
148
+ """Validate HDF5 file has expected structure.
149
+
150
+ Parameters
151
+ ----------
152
+ f
153
+ Open HDF5 file object
154
+ required_groups
155
+ List of required group names (default: ["grids", "table"])
156
+
157
+ Returns
158
+ -------
159
+ list[str]
160
+ List of missing groups (empty if all present)
161
+
162
+ Raises
163
+ ------
164
+ ValueError
165
+ If required groups are missing
166
+ """
167
+ if required_groups is None:
168
+ required_groups = ["grids", "table"]
169
+
170
+ missing = [g for g in required_groups if g not in f]
171
+ if missing:
172
+ msg = f"Invalid HDF5 structure. Missing groups: {missing}"
173
+ raise ValueError(msg)
174
+ return missing
@@ -0,0 +1,305 @@
1
+ # SPDX-FileCopyrightText: 2023-present Rohit Goswami <rog32@hi.is>
2
+ #
3
+ # SPDX-License-Identifier: MIT
4
+
5
+ """Parsers for ChemGP JSONL output formats.
6
+
7
+ ChemGP Rust examples produce JSONL files with method comparison data,
8
+ GP quality grids, and RFF approximation benchmarks. This module provides
9
+ structured parsing into typed containers for downstream plotting.
10
+
11
+ .. versionadded:: 1.5.0
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ import json
17
+ from collections import defaultdict
18
+ from dataclasses import dataclass, field
19
+ from pathlib import Path
20
+ from typing import Any
21
+
22
+
23
+ @dataclass
24
+ class OptimizerTrace:
25
+ """Single optimizer trace from a comparison JSONL.
26
+
27
+ Attributes
28
+ ----------
29
+ method : str
30
+ Optimizer name (e.g. ``"gp_minimize"``, ``"neb"``, ``"otgpd"``).
31
+ steps : list[int]
32
+ Step indices.
33
+ oracle_calls : list[int]
34
+ Cumulative oracle call counts.
35
+ energies : list[float] | None
36
+ Energy at each step (minimize, dimer).
37
+ forces : list[float] | None
38
+ Force norm at each step (dimer: ``force``, NEB: ``max_force``).
39
+ """
40
+
41
+ method: str
42
+ steps: list[int] = field(default_factory=list)
43
+ oracle_calls: list[int] = field(default_factory=list)
44
+ energies: list[float] | None = None
45
+ forces: list[float] | None = None
46
+
47
+
48
+ @dataclass
49
+ class ComparisonData:
50
+ """Parsed optimizer comparison from a single JSONL file.
51
+
52
+ Attributes
53
+ ----------
54
+ traces : dict[str, OptimizerTrace]
55
+ Keyed by method name.
56
+ summary : dict | None
57
+ Summary record if present.
58
+ """
59
+
60
+ traces: dict[str, OptimizerTrace] = field(default_factory=dict)
61
+ summary: dict[str, Any] | None = None
62
+
63
+
64
+ def parse_comparison_jsonl(path: str | Path) -> ComparisonData:
65
+ """Parse a ChemGP optimizer comparison JSONL file.
66
+
67
+ Handles minimize, dimer, and NEB comparison formats. Each line is a
68
+ JSON object with a ``method`` field (or ``summary: true``).
69
+
70
+ Parameters
71
+ ----------
72
+ path
73
+ Path to the JSONL file.
74
+
75
+ Returns
76
+ -------
77
+ ComparisonData
78
+ Parsed traces keyed by method name.
79
+ """
80
+ data = ComparisonData()
81
+ with open(path) as f:
82
+ for line in f:
83
+ rec = json.loads(line.strip())
84
+ if rec.get("summary"):
85
+ data.summary = rec
86
+ continue
87
+ method = rec["method"]
88
+ if method not in data.traces:
89
+ data.traces[method] = OptimizerTrace(method=method)
90
+ trace = data.traces[method]
91
+ trace.steps.append(rec.get("step", len(trace.steps)))
92
+ trace.oracle_calls.append(rec["oracle_calls"])
93
+ if "energy" in rec:
94
+ if trace.energies is None:
95
+ trace.energies = []
96
+ trace.energies.append(rec["energy"])
97
+ force_key = "force" if "force" in rec else "max_force"
98
+ if force_key in rec:
99
+ if trace.forces is None:
100
+ trace.forces = []
101
+ trace.forces.append(rec[force_key])
102
+ return data
103
+
104
+
105
+ @dataclass
106
+ class RFFQualityData:
107
+ """Parsed RFF approximation quality data.
108
+
109
+ Attributes
110
+ ----------
111
+ exact_energy_mae : float
112
+ Exact GP energy MAE vs true surface.
113
+ exact_gradient_mae : float
114
+ Exact GP gradient MAE vs true surface.
115
+ d_rff_values : list[int]
116
+ RFF feature counts tested.
117
+ energy_mae_vs_true : list[float]
118
+ RFF energy MAE vs true surface.
119
+ gradient_mae_vs_true : list[float]
120
+ RFF gradient MAE vs true surface.
121
+ energy_mae_vs_gp : list[float]
122
+ RFF energy MAE vs exact GP.
123
+ gradient_mae_vs_gp : list[float]
124
+ RFF gradient MAE vs exact GP.
125
+ """
126
+
127
+ exact_energy_mae: float = 0.0
128
+ exact_gradient_mae: float = 0.0
129
+ d_rff_values: list[int] = field(default_factory=list)
130
+ energy_mae_vs_true: list[float] = field(default_factory=list)
131
+ gradient_mae_vs_true: list[float] = field(default_factory=list)
132
+ energy_mae_vs_gp: list[float] = field(default_factory=list)
133
+ gradient_mae_vs_gp: list[float] = field(default_factory=list)
134
+
135
+
136
+ def parse_rff_quality_jsonl(path: str | Path) -> RFFQualityData:
137
+ """Parse a ChemGP RFF quality JSONL file.
138
+
139
+ Parameters
140
+ ----------
141
+ path
142
+ Path to the JSONL file.
143
+
144
+ Returns
145
+ -------
146
+ RFFQualityData
147
+ Parsed exact GP and RFF metrics.
148
+ """
149
+ data = RFFQualityData()
150
+ with open(path) as f:
151
+ for line in f:
152
+ rec = json.loads(line.strip())
153
+ if rec["type"] == "exact_gp":
154
+ data.exact_energy_mae = rec["energy_mae"]
155
+ data.exact_gradient_mae = rec["gradient_mae"]
156
+ elif rec["type"] == "rff":
157
+ data.d_rff_values.append(rec["d_rff"])
158
+ data.energy_mae_vs_true.append(rec["energy_mae_vs_true"])
159
+ data.gradient_mae_vs_true.append(rec["gradient_mae_vs_true"])
160
+ data.energy_mae_vs_gp.append(rec["energy_mae_vs_gp"])
161
+ data.gradient_mae_vs_gp.append(rec["gradient_mae_vs_gp"])
162
+ return data
163
+
164
+
165
+ @dataclass
166
+ class GPQualityGrid:
167
+ """GP quality grid data for a single training set size.
168
+
169
+ Attributes
170
+ ----------
171
+ n_train : int
172
+ Number of training points.
173
+ nx : int
174
+ Grid x resolution.
175
+ ny : int
176
+ Grid y resolution.
177
+ x : list[list[float]]
178
+ Grid x coordinates (ny x nx).
179
+ y : list[list[float]]
180
+ Grid y coordinates (ny x nx).
181
+ true_e : list[list[float]]
182
+ True energy on grid.
183
+ gp_e : list[list[float]]
184
+ GP predicted energy on grid.
185
+ gp_var : list[list[float]]
186
+ GP variance on grid.
187
+ train_x : list[float]
188
+ Training point x coordinates.
189
+ train_y : list[float]
190
+ Training point y coordinates.
191
+ train_e : list[float]
192
+ Training point energies.
193
+ """
194
+
195
+ n_train: int = 0
196
+ nx: int = 0
197
+ ny: int = 0
198
+ x: list[list[float]] = field(default_factory=list)
199
+ y: list[list[float]] = field(default_factory=list)
200
+ true_e: list[list[float]] = field(default_factory=list)
201
+ gp_e: list[list[float]] = field(default_factory=list)
202
+ gp_var: list[list[float]] = field(default_factory=list)
203
+ train_x: list[float] = field(default_factory=list)
204
+ train_y: list[float] = field(default_factory=list)
205
+ train_e: list[float] = field(default_factory=list)
206
+
207
+
208
+ @dataclass
209
+ class StationaryPoint:
210
+ """A stationary point (minimum or saddle) on the PES."""
211
+
212
+ kind: str # "minimum" or "saddle"
213
+ id: int
214
+ x: float
215
+ y: float
216
+ energy: float
217
+
218
+
219
+ @dataclass
220
+ class GPQualityData:
221
+ """Complete GP quality data from mb_gp_quality.jsonl.
222
+
223
+ Attributes
224
+ ----------
225
+ meta : dict
226
+ Grid metadata (nx, ny, x_min, x_max, y_min, y_max).
227
+ stationary : list[StationaryPoint]
228
+ Minima and saddle points.
229
+ grids : dict[int, GPQualityGrid]
230
+ Grid data keyed by n_train.
231
+ """
232
+
233
+ meta: dict[str, Any] = field(default_factory=dict)
234
+ stationary: list[StationaryPoint] = field(default_factory=list)
235
+ grids: dict[int, GPQualityGrid] = field(default_factory=dict)
236
+
237
+
238
+ def parse_gp_quality_jsonl(path: str | Path) -> GPQualityData:
239
+ """Parse a ChemGP GP quality JSONL file.
240
+
241
+ Parameters
242
+ ----------
243
+ path
244
+ Path to the JSONL file (e.g. ``mb_gp_quality.jsonl``).
245
+
246
+ Returns
247
+ -------
248
+ GPQualityData
249
+ Structured grid data with metadata and stationary points.
250
+ """
251
+ data = GPQualityData()
252
+ train_points = defaultdict(lambda: {"x": [], "y": [], "e": []})
253
+ grid_records = defaultdict(list)
254
+
255
+ with open(path) as f:
256
+ for line in f:
257
+ rec = json.loads(line.strip())
258
+ t = rec["type"]
259
+ if t == "grid_meta":
260
+ data.meta = rec
261
+ elif t in ("minimum", "saddle"):
262
+ data.stationary.append(
263
+ StationaryPoint(
264
+ kind=t,
265
+ id=rec["id"],
266
+ x=rec["x"],
267
+ y=rec["y"],
268
+ energy=rec["energy"],
269
+ )
270
+ )
271
+ elif t == "train_point":
272
+ n = rec["n_train"]
273
+ train_points[n]["x"].append(rec["x"])
274
+ train_points[n]["y"].append(rec["y"])
275
+ train_points[n]["e"].append(rec["energy"])
276
+ elif t == "grid":
277
+ grid_records[rec["n_train"]].append(rec)
278
+
279
+ nx = data.meta.get("nx", 0)
280
+ ny = data.meta.get("ny", 0)
281
+
282
+ for n_train, records in grid_records.items():
283
+ grid = GPQualityGrid(n_train=n_train, nx=nx, ny=ny)
284
+ # Initialize 2D arrays
285
+ grid.x = [[0.0] * nx for _ in range(ny)]
286
+ grid.y = [[0.0] * nx for _ in range(ny)]
287
+ grid.true_e = [[0.0] * nx for _ in range(ny)]
288
+ grid.gp_e = [[0.0] * nx for _ in range(ny)]
289
+ grid.gp_var = [[0.0] * nx for _ in range(ny)]
290
+
291
+ for rec in records:
292
+ ix, iy = rec["ix"], rec["iy"]
293
+ grid.x[iy][ix] = rec["x"]
294
+ grid.y[iy][ix] = rec["y"]
295
+ grid.true_e[iy][ix] = rec["true_e"]
296
+ grid.gp_e[iy][ix] = rec["gp_e"]
297
+ grid.gp_var[iy][ix] = rec["gp_var"]
298
+
299
+ tp = train_points.get(n_train, {"x": [], "y": [], "e": []})
300
+ grid.train_x = tp["x"]
301
+ grid.train_y = tp["y"]
302
+ grid.train_e = tp["e"]
303
+ data.grids[n_train] = grid
304
+
305
+ return data
@@ -282,7 +282,7 @@ def load_augmenting_neb_data(
282
282
  ```{versionadded} 0.1.0
283
283
  ```
284
284
  """
285
- from chemparseplot.parse.file_ import find_file_paths # noqa: PLC0415
285
+ from chemparseplot.parse.file_ import find_file_paths
286
286
 
287
287
  dat_paths = find_file_paths(dat_pattern)
288
288
  con_paths = find_file_paths(con_pattern)
@@ -32,9 +32,9 @@ def calculate_landscape_coords(
32
32
  :param ira_kmax: kmax factor for IRA.
33
33
  :return: A tuple of (rmsd_r, rmsd_p) arrays.
34
34
  """
35
- from concurrent.futures import ThreadPoolExecutor # noqa: PLC0415
35
+ from concurrent.futures import ThreadPoolExecutor
36
36
 
37
- from rgpycrumbs.geom.api.alignment import calculate_rmsd_from_ref # noqa: PLC0415
37
+ from rgpycrumbs.geom.api.alignment import calculate_rmsd_from_ref
38
38
 
39
39
  log.info("Calculating landscape coordinates (RMSD-R, RMSD-P)...")
40
40
  with ThreadPoolExecutor(max_workers=2) as pool:
@@ -0,0 +1,24 @@
1
+ # SPDX-FileCopyrightText: 2023-present Rohit Goswami <rog32@hi.is>
2
+ #
3
+ # SPDX-License-Identifier: MIT
4
+
5
+ """ORCA NEB parsing utilities.
6
+
7
+ Supports both:
8
+ - OPI (ORCA Python Interface) for ORCA 6.1+ JSON output
9
+ - Legacy regex parsing for older ORCA versions
10
+ """
11
+
12
+ from chemparseplot.parse.orca.neb.interp import extract_interp_points
13
+ from chemparseplot.parse.orca.neb.opi_parser import (
14
+ HAS_OPI,
15
+ parse_orca_neb,
16
+ parse_orca_neb_fallback,
17
+ )
18
+
19
+ __all__ = [
20
+ "HAS_OPI",
21
+ "extract_interp_points",
22
+ "parse_orca_neb",
23
+ "parse_orca_neb_fallback",
24
+ ]