reaxkit 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- reaxkit/__init__.py +0 -0
- reaxkit/analysis/__init__.py +0 -0
- reaxkit/analysis/composed/RDF_analyzer.py +560 -0
- reaxkit/analysis/composed/__init__.py +0 -0
- reaxkit/analysis/composed/connectivity_analyzer.py +706 -0
- reaxkit/analysis/composed/coordination_analyzer.py +144 -0
- reaxkit/analysis/composed/electrostatics_analyzer.py +687 -0
- reaxkit/analysis/per_file/__init__.py +0 -0
- reaxkit/analysis/per_file/control_analyzer.py +165 -0
- reaxkit/analysis/per_file/eregime_analyzer.py +108 -0
- reaxkit/analysis/per_file/ffield_analyzer.py +305 -0
- reaxkit/analysis/per_file/fort13_analyzer.py +79 -0
- reaxkit/analysis/per_file/fort57_analyzer.py +106 -0
- reaxkit/analysis/per_file/fort73_analyzer.py +61 -0
- reaxkit/analysis/per_file/fort74_analyzer.py +65 -0
- reaxkit/analysis/per_file/fort76_analyzer.py +191 -0
- reaxkit/analysis/per_file/fort78_analyzer.py +154 -0
- reaxkit/analysis/per_file/fort79_analyzer.py +83 -0
- reaxkit/analysis/per_file/fort7_analyzer.py +393 -0
- reaxkit/analysis/per_file/fort99_analyzer.py +411 -0
- reaxkit/analysis/per_file/molfra_analyzer.py +359 -0
- reaxkit/analysis/per_file/params_analyzer.py +258 -0
- reaxkit/analysis/per_file/summary_analyzer.py +84 -0
- reaxkit/analysis/per_file/trainset_analyzer.py +84 -0
- reaxkit/analysis/per_file/vels_analyzer.py +95 -0
- reaxkit/analysis/per_file/xmolout_analyzer.py +528 -0
- reaxkit/cli.py +181 -0
- reaxkit/count_loc.py +276 -0
- reaxkit/data/alias.yaml +89 -0
- reaxkit/data/constants.yaml +27 -0
- reaxkit/data/reaxff_input_files_contents.yaml +186 -0
- reaxkit/data/reaxff_output_files_contents.yaml +301 -0
- reaxkit/data/units.yaml +38 -0
- reaxkit/help/__init__.py +0 -0
- reaxkit/help/help_index_loader.py +531 -0
- reaxkit/help/introspection_utils.py +131 -0
- reaxkit/io/__init__.py +0 -0
- reaxkit/io/base_handler.py +165 -0
- reaxkit/io/generators/__init__.py +0 -0
- reaxkit/io/generators/control_generator.py +123 -0
- reaxkit/io/generators/eregime_generator.py +341 -0
- reaxkit/io/generators/geo_generator.py +967 -0
- reaxkit/io/generators/trainset_generator.py +1758 -0
- reaxkit/io/generators/tregime_generator.py +113 -0
- reaxkit/io/generators/vregime_generator.py +164 -0
- reaxkit/io/generators/xmolout_generator.py +304 -0
- reaxkit/io/handlers/__init__.py +0 -0
- reaxkit/io/handlers/control_handler.py +209 -0
- reaxkit/io/handlers/eregime_handler.py +122 -0
- reaxkit/io/handlers/ffield_handler.py +812 -0
- reaxkit/io/handlers/fort13_handler.py +123 -0
- reaxkit/io/handlers/fort57_handler.py +143 -0
- reaxkit/io/handlers/fort73_handler.py +145 -0
- reaxkit/io/handlers/fort74_handler.py +155 -0
- reaxkit/io/handlers/fort76_handler.py +195 -0
- reaxkit/io/handlers/fort78_handler.py +142 -0
- reaxkit/io/handlers/fort79_handler.py +227 -0
- reaxkit/io/handlers/fort7_handler.py +264 -0
- reaxkit/io/handlers/fort99_handler.py +128 -0
- reaxkit/io/handlers/geo_handler.py +224 -0
- reaxkit/io/handlers/molfra_handler.py +184 -0
- reaxkit/io/handlers/params_handler.py +137 -0
- reaxkit/io/handlers/summary_handler.py +135 -0
- reaxkit/io/handlers/trainset_handler.py +658 -0
- reaxkit/io/handlers/vels_handler.py +293 -0
- reaxkit/io/handlers/xmolout_handler.py +174 -0
- reaxkit/utils/__init__.py +0 -0
- reaxkit/utils/alias.py +219 -0
- reaxkit/utils/cache.py +77 -0
- reaxkit/utils/constants.py +75 -0
- reaxkit/utils/equation_of_states.py +96 -0
- reaxkit/utils/exceptions.py +27 -0
- reaxkit/utils/frame_utils.py +175 -0
- reaxkit/utils/log.py +43 -0
- reaxkit/utils/media/__init__.py +0 -0
- reaxkit/utils/media/convert.py +90 -0
- reaxkit/utils/media/make_video.py +91 -0
- reaxkit/utils/media/plotter.py +812 -0
- reaxkit/utils/numerical/__init__.py +0 -0
- reaxkit/utils/numerical/extrema_finder.py +96 -0
- reaxkit/utils/numerical/moving_average.py +103 -0
- reaxkit/utils/numerical/numerical_calcs.py +75 -0
- reaxkit/utils/numerical/signal_ops.py +135 -0
- reaxkit/utils/path.py +55 -0
- reaxkit/utils/units.py +104 -0
- reaxkit/webui/__init__.py +0 -0
- reaxkit/webui/app.py +0 -0
- reaxkit/webui/components.py +0 -0
- reaxkit/webui/layouts.py +0 -0
- reaxkit/webui/utils.py +0 -0
- reaxkit/workflows/__init__.py +0 -0
- reaxkit/workflows/composed/__init__.py +0 -0
- reaxkit/workflows/composed/coordination_workflow.py +393 -0
- reaxkit/workflows/composed/electrostatics_workflow.py +587 -0
- reaxkit/workflows/composed/xmolout_fort7_workflow.py +343 -0
- reaxkit/workflows/meta/__init__.py +0 -0
- reaxkit/workflows/meta/help_workflow.py +136 -0
- reaxkit/workflows/meta/introspection_workflow.py +235 -0
- reaxkit/workflows/meta/make_video_workflow.py +61 -0
- reaxkit/workflows/meta/plotter_workflow.py +601 -0
- reaxkit/workflows/per_file/__init__.py +0 -0
- reaxkit/workflows/per_file/control_workflow.py +110 -0
- reaxkit/workflows/per_file/eregime_workflow.py +267 -0
- reaxkit/workflows/per_file/ffield_workflow.py +390 -0
- reaxkit/workflows/per_file/fort13_workflow.py +86 -0
- reaxkit/workflows/per_file/fort57_workflow.py +137 -0
- reaxkit/workflows/per_file/fort73_workflow.py +151 -0
- reaxkit/workflows/per_file/fort74_workflow.py +88 -0
- reaxkit/workflows/per_file/fort76_workflow.py +188 -0
- reaxkit/workflows/per_file/fort78_workflow.py +135 -0
- reaxkit/workflows/per_file/fort79_workflow.py +314 -0
- reaxkit/workflows/per_file/fort7_workflow.py +592 -0
- reaxkit/workflows/per_file/fort83_workflow.py +60 -0
- reaxkit/workflows/per_file/fort99_workflow.py +223 -0
- reaxkit/workflows/per_file/geo_workflow.py +554 -0
- reaxkit/workflows/per_file/molfra_workflow.py +577 -0
- reaxkit/workflows/per_file/params_workflow.py +135 -0
- reaxkit/workflows/per_file/summary_workflow.py +161 -0
- reaxkit/workflows/per_file/trainset_workflow.py +356 -0
- reaxkit/workflows/per_file/tregime_workflow.py +79 -0
- reaxkit/workflows/per_file/vels_workflow.py +309 -0
- reaxkit/workflows/per_file/vregime_workflow.py +75 -0
- reaxkit/workflows/per_file/xmolout_workflow.py +678 -0
- reaxkit-1.0.0.dist-info/METADATA +128 -0
- reaxkit-1.0.0.dist-info/RECORD +130 -0
- reaxkit-1.0.0.dist-info/WHEEL +5 -0
- reaxkit-1.0.0.dist-info/entry_points.txt +2 -0
- reaxkit-1.0.0.dist-info/licenses/AUTHORS.md +20 -0
- reaxkit-1.0.0.dist-info/licenses/LICENSE +21 -0
- reaxkit-1.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,390 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Force-field (ffield) workflow for ReaxKit.
|
|
3
|
+
|
|
4
|
+
This workflow provides tools for inspecting, filtering, interpreting, and
|
|
5
|
+
exporting data from ReaxFF `ffield` files, which define the full set of force-field
|
|
6
|
+
parameters (atoms, bonds, angles, torsions, off-diagonals, and hydrogen bonds).
|
|
7
|
+
|
|
8
|
+
It supports:
|
|
9
|
+
- Extracting individual ffield sections in either interpreted (symbol-based)
|
|
10
|
+
or index-based form.
|
|
11
|
+
- Filtering term-based parameters using flexible syntax (e.g. C-H, CCH, 1-2,
|
|
12
|
+
with ordered or any-order matching where appropriate).
|
|
13
|
+
- Exporting selected sections or the entire force field to CSV for analysis,
|
|
14
|
+
comparison, or parameterization workflows.
|
|
15
|
+
|
|
16
|
+
The workflow is designed to make force-field inspection transparent and
|
|
17
|
+
scriptable, supporting both exploratory analysis and large-scale ReaxFF
|
|
18
|
+
parameter studies.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
from __future__ import annotations
|
|
22
|
+
|
|
23
|
+
import argparse
|
|
24
|
+
from pathlib import Path
|
|
25
|
+
from typing import Dict, List, Sequence, Tuple
|
|
26
|
+
|
|
27
|
+
import pandas as pd
|
|
28
|
+
|
|
29
|
+
from reaxkit.io.handlers.ffield_handler import FFieldHandler
|
|
30
|
+
from reaxkit.analysis.per_file.ffield_analyzer import get_ffield_data
|
|
31
|
+
|
|
32
|
+
# You added these in ffield_analyzer in the previous step.
|
|
33
|
+
# If the names differ in your repo, update these imports accordingly.
|
|
34
|
+
from reaxkit.analysis.per_file.ffield_analyzer import interpret_one_section, interpret_ffield_terms
|
|
35
|
+
|
|
36
|
+
from reaxkit.utils.path import resolve_output_path
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
# ------------------------ helpers ------------------------
|
|
40
|
+
|
|
41
|
+
def _normalize_section(s: str) -> str:
|
|
42
|
+
return s.strip().lower().replace("-", "_").replace(" ", "_")
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def _atom_maps(handler: FFieldHandler) -> Tuple[Dict[int, str], Dict[str, int]]:
|
|
46
|
+
atom_df = handler.section_df(FFieldHandler.SECTION_ATOM)
|
|
47
|
+
if "symbol" not in atom_df.columns:
|
|
48
|
+
raise KeyError("Atom section missing 'symbol' column.")
|
|
49
|
+
|
|
50
|
+
idx_to_sym: Dict[int, str] = {}
|
|
51
|
+
sym_to_idx: Dict[str, int] = {}
|
|
52
|
+
for idx, row in atom_df.iterrows():
|
|
53
|
+
i = int(idx)
|
|
54
|
+
sym = str(row["symbol"]).strip()
|
|
55
|
+
idx_to_sym[i] = sym
|
|
56
|
+
# assume unique symbol->index in typical ffield; if repeated, last wins
|
|
57
|
+
sym_to_idx[sym] = i
|
|
58
|
+
return idx_to_sym, sym_to_idx
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def _split_term_string(term: str) -> List[str]:
|
|
62
|
+
"""
|
|
63
|
+
Accepts: "C-H", "C H", "CCH", "1-2-3", "1 2 3"
|
|
64
|
+
Returns list of tokens (symbols or indices as strings).
|
|
65
|
+
"""
|
|
66
|
+
t = term.strip()
|
|
67
|
+
if not t:
|
|
68
|
+
return []
|
|
69
|
+
# If it contains separators, split on them
|
|
70
|
+
for sep in ["-", ",", " ", "_"]:
|
|
71
|
+
if sep in t:
|
|
72
|
+
toks = [x for x in t.replace(",", " ").replace("-", " ").replace("_", " ").split() if x]
|
|
73
|
+
return toks
|
|
74
|
+
|
|
75
|
+
# No separators: could be "CCH" or "1123" (indices without separators).
|
|
76
|
+
# Heuristic: if all digits => treat as single token (ambiguous) -> user should use separators.
|
|
77
|
+
if t.isdigit():
|
|
78
|
+
return [t]
|
|
79
|
+
|
|
80
|
+
# For symbols like CCH, split into element-like tokens (handles 1-2 letter symbols).
|
|
81
|
+
# Example: "SiOH" -> ["Si","O","H"]
|
|
82
|
+
toks: List[str] = []
|
|
83
|
+
i = 0
|
|
84
|
+
while i < len(t):
|
|
85
|
+
ch = t[i]
|
|
86
|
+
if ch.isupper():
|
|
87
|
+
if i + 1 < len(t) and t[i + 1].islower():
|
|
88
|
+
toks.append(t[i : i + 2])
|
|
89
|
+
i += 2
|
|
90
|
+
else:
|
|
91
|
+
toks.append(ch)
|
|
92
|
+
i += 1
|
|
93
|
+
else:
|
|
94
|
+
# unexpected; fall back
|
|
95
|
+
toks.append(ch)
|
|
96
|
+
i += 1
|
|
97
|
+
return toks
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
def _term_cols_for_section(section: str) -> List[str]:
|
|
101
|
+
if section in (FFieldHandler.SECTION_BOND, FFieldHandler.SECTION_OFF_DIAGONAL):
|
|
102
|
+
return ["i", "j"]
|
|
103
|
+
if section in (FFieldHandler.SECTION_ANGLE, FFieldHandler.SECTION_HBOND):
|
|
104
|
+
return ["i", "j", "k"]
|
|
105
|
+
if section == FFieldHandler.SECTION_TORSION:
|
|
106
|
+
return ["i", "j", "k", "l"]
|
|
107
|
+
raise KeyError(f"Unsupported term filtering for section: {section!r}")
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def _make_term_series_indices(df: pd.DataFrame, cols: Sequence[str], *, unordered_2body: bool) -> pd.Series:
|
|
111
|
+
"""
|
|
112
|
+
Build a comparable "term key" series from numeric columns.
|
|
113
|
+
- For 2-body (bond/offdiag): optionally unordered, so (1,2)==(2,1).
|
|
114
|
+
- For 3/4-body: keep order (CCH != CHC).
|
|
115
|
+
"""
|
|
116
|
+
if len(cols) == 2 and unordered_2body:
|
|
117
|
+
a = df[cols[0]].astype("Int64")
|
|
118
|
+
b = df[cols[1]].astype("Int64")
|
|
119
|
+
lo = a.where(a <= b, b)
|
|
120
|
+
hi = b.where(a <= b, a)
|
|
121
|
+
return lo.astype(str) + "-" + hi.astype(str)
|
|
122
|
+
|
|
123
|
+
# ordered
|
|
124
|
+
parts = [df[c].astype("Int64").astype(str) for c in cols]
|
|
125
|
+
s = parts[0]
|
|
126
|
+
for p in parts[1:]:
|
|
127
|
+
s = s + "-" + p
|
|
128
|
+
return s
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
def _filter_df_by_term(
|
|
132
|
+
handler: FFieldHandler,
|
|
133
|
+
section: str,
|
|
134
|
+
df: pd.DataFrame,
|
|
135
|
+
*,
|
|
136
|
+
term: str,
|
|
137
|
+
out_format: str, # kept for API compatibility (not needed for filtering)
|
|
138
|
+
unordered_2body: bool = True,
|
|
139
|
+
any_order: bool = False,
|
|
140
|
+
) -> pd.DataFrame:
|
|
141
|
+
"""
|
|
142
|
+
Filter df by a user term, supporting both interpreted and indices requests.
|
|
143
|
+
|
|
144
|
+
any_order:
|
|
145
|
+
- If True (recommended for angle/torsion/hbond): match all permutations of the atoms.
|
|
146
|
+
Example: angle --term CCH --any-order matches CCH, CHC, HCC.
|
|
147
|
+
- For 2-body sections, any_order behaves like unordered matching (same as unordered_2body=True).
|
|
148
|
+
|
|
149
|
+
Examples:
|
|
150
|
+
bond --term C-H
|
|
151
|
+
bond --term 1-2
|
|
152
|
+
angle --term CCH
|
|
153
|
+
angle --term C-C-H
|
|
154
|
+
angle --term 1-1-2
|
|
155
|
+
angle --term CCH --any-order
|
|
156
|
+
"""
|
|
157
|
+
cols = _term_cols_for_section(section)
|
|
158
|
+
toks = _split_term_string(term)
|
|
159
|
+
if not toks:
|
|
160
|
+
return df
|
|
161
|
+
|
|
162
|
+
# helper
|
|
163
|
+
def _tokens_are_all_int(xs: Sequence[str]) -> bool:
|
|
164
|
+
return all(x.isdigit() for x in xs)
|
|
165
|
+
|
|
166
|
+
# --- parse "wanted" into integer indices ---
|
|
167
|
+
if _tokens_are_all_int(toks):
|
|
168
|
+
# indices provided directly
|
|
169
|
+
idxs = [int(x) for x in toks]
|
|
170
|
+
else:
|
|
171
|
+
# symbols -> indices
|
|
172
|
+
_, sym_to_idx = _atom_maps(handler)
|
|
173
|
+
idxs: List[int] = []
|
|
174
|
+
for s in toks:
|
|
175
|
+
if s not in sym_to_idx:
|
|
176
|
+
raise KeyError(f"Unknown atom symbol {s!r}. Available: {sorted(sym_to_idx.keys())}")
|
|
177
|
+
idxs.append(int(sym_to_idx[s]))
|
|
178
|
+
|
|
179
|
+
if len(idxs) != len(cols):
|
|
180
|
+
raise ValueError(
|
|
181
|
+
f"Term {term!r} implies {len(idxs)} atoms, but section '{section}' needs {len(cols)}."
|
|
182
|
+
)
|
|
183
|
+
|
|
184
|
+
# --- matching logic ---
|
|
185
|
+
is_2body = len(cols) == 2
|
|
186
|
+
|
|
187
|
+
# any_order for 2-body is equivalent to unordered matching
|
|
188
|
+
if is_2body and (unordered_2body or any_order):
|
|
189
|
+
a, b = sorted(idxs)
|
|
190
|
+
wanted = f"{a}-{b}"
|
|
191
|
+
key = _make_term_series_indices(df, cols, unordered_2body=True)
|
|
192
|
+
return df.loc[key == wanted].copy()
|
|
193
|
+
|
|
194
|
+
# any_order for 3/4-body: match permutations via sorted multiset equality
|
|
195
|
+
if any_order and not is_2body:
|
|
196
|
+
wanted_sorted = sorted(idxs)
|
|
197
|
+
|
|
198
|
+
# Vectorized-ish approach: take the relevant columns, cast to int,
|
|
199
|
+
# sort per-row, and compare to wanted_sorted.
|
|
200
|
+
sub = df.loc[:, cols].astype("Int64")
|
|
201
|
+
|
|
202
|
+
def _row_sorted_equals_wanted(r: pd.Series) -> bool:
|
|
203
|
+
vals = [int(x) for x in r.tolist()]
|
|
204
|
+
vals.sort()
|
|
205
|
+
return vals == wanted_sorted
|
|
206
|
+
|
|
207
|
+
mask = sub.apply(_row_sorted_equals_wanted, axis=1)
|
|
208
|
+
return df.loc[mask].copy()
|
|
209
|
+
|
|
210
|
+
# default ordered matching (CCH != CHC)
|
|
211
|
+
wanted = "-".join(str(x) for x in idxs)
|
|
212
|
+
key = _make_term_series_indices(df, cols, unordered_2body=False)
|
|
213
|
+
return df.loc[key == wanted].copy()
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
# indices
|
|
217
|
+
df = get_ffield_data(handler, section=section)
|
|
218
|
+
cols = _term_cols_for_section(section)
|
|
219
|
+
key = _make_term_series_indices(df, cols, unordered_2body=unordered_2body)
|
|
220
|
+
return sorted(set(key.dropna().unique().tolist()))
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
def _export_df(df: pd.DataFrame, export_path: str | Path) -> None:
|
|
224
|
+
export_path = Path(export_path)
|
|
225
|
+
export_path.parent.mkdir(parents=True, exist_ok=True)
|
|
226
|
+
df.to_csv(export_path, index=True)
|
|
227
|
+
print(f"[Done] Saved the requested data in {export_path}")
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+
# ------------------------ tasks ------------------------
|
|
231
|
+
|
|
232
|
+
|
|
233
|
+
def _task_get(args: argparse.Namespace) -> int:
|
|
234
|
+
handler = FFieldHandler(args.file)
|
|
235
|
+
section = _normalize_section(args.section)
|
|
236
|
+
|
|
237
|
+
interpreted = args.format == "interpreted"
|
|
238
|
+
|
|
239
|
+
# get df (interpreted or base)
|
|
240
|
+
if interpreted:
|
|
241
|
+
df = interpret_one_section(handler, section=section)
|
|
242
|
+
else:
|
|
243
|
+
df = get_ffield_data(handler, section=section)
|
|
244
|
+
|
|
245
|
+
# optional filter
|
|
246
|
+
if args.term:
|
|
247
|
+
df = _filter_df_by_term(
|
|
248
|
+
handler,
|
|
249
|
+
section,
|
|
250
|
+
df if not interpreted else get_ffield_data(handler, section=section),
|
|
251
|
+
term=args.term,
|
|
252
|
+
out_format=args.format,
|
|
253
|
+
unordered_2body=not args.ordered_2body,
|
|
254
|
+
any_order=args.any_order,
|
|
255
|
+
)
|
|
256
|
+
# if interpreted requested, re-interpret the filtered subset for display/export
|
|
257
|
+
if interpreted and not df.empty:
|
|
258
|
+
df = interpret_one_section(handler, section=section).loc[df.index].copy()
|
|
259
|
+
|
|
260
|
+
# export or preview
|
|
261
|
+
if args.export:
|
|
262
|
+
out_path = resolve_output_path(args.export, workflow="ffield")
|
|
263
|
+
_export_df(df, out_path)
|
|
264
|
+
else:
|
|
265
|
+
with pd.option_context("display.max_rows", min(30, len(df)), "display.max_columns", 200):
|
|
266
|
+
print(df.head(30))
|
|
267
|
+
if len(df) > 30:
|
|
268
|
+
print(f"... ({len(df)} rows total)")
|
|
269
|
+
return 0
|
|
270
|
+
|
|
271
|
+
|
|
272
|
+
def _task_export(args: argparse.Namespace) -> int:
|
|
273
|
+
handler = FFieldHandler(args.file)
|
|
274
|
+
interpreted = args.format == "interpreted"
|
|
275
|
+
|
|
276
|
+
# where to put files
|
|
277
|
+
out_dir = resolve_output_path(args.outdir, workflow="ffield")
|
|
278
|
+
out_dir = Path(out_dir)
|
|
279
|
+
out_dir.mkdir(parents=True, exist_ok=True)
|
|
280
|
+
|
|
281
|
+
sections = [
|
|
282
|
+
FFieldHandler.SECTION_GENERAL,
|
|
283
|
+
FFieldHandler.SECTION_ATOM,
|
|
284
|
+
FFieldHandler.SECTION_BOND,
|
|
285
|
+
FFieldHandler.SECTION_OFF_DIAGONAL,
|
|
286
|
+
FFieldHandler.SECTION_ANGLE,
|
|
287
|
+
FFieldHandler.SECTION_TORSION,
|
|
288
|
+
FFieldHandler.SECTION_HBOND,
|
|
289
|
+
]
|
|
290
|
+
|
|
291
|
+
if interpreted:
|
|
292
|
+
# interpret only the term-bearing sections; keep atom/general as-is
|
|
293
|
+
interpreted_map = interpret_ffield_terms(handler)
|
|
294
|
+
else:
|
|
295
|
+
interpreted_map = {}
|
|
296
|
+
|
|
297
|
+
print('[Done] Exporting all sections data:')
|
|
298
|
+
for sec in sections:
|
|
299
|
+
if interpreted and sec in interpreted_map:
|
|
300
|
+
df = interpreted_map[sec]
|
|
301
|
+
else:
|
|
302
|
+
df = handler.section_df(sec).copy()
|
|
303
|
+
|
|
304
|
+
suffix = "interpreted" if interpreted else "indices"
|
|
305
|
+
out_path = out_dir / f"{sec}_{suffix}.csv"
|
|
306
|
+
df.to_csv(out_path, index=True)
|
|
307
|
+
print(f" {sec:12s} -> {out_path}")
|
|
308
|
+
|
|
309
|
+
return 0
|
|
310
|
+
|
|
311
|
+
|
|
312
|
+
# ------------------------ CLI registration ------------------------
|
|
313
|
+
def _add_common_ffield_args(p: argparse.ArgumentParser) -> None:
|
|
314
|
+
p.add_argument("--file", default="ffield", help="Path to ffield file.")
|
|
315
|
+
p.add_argument(
|
|
316
|
+
"--format",
|
|
317
|
+
default="interpreted",
|
|
318
|
+
choices=["interpreted", "indices"],
|
|
319
|
+
help="Output format: interpreted uses atom symbols (C-H), indices uses numeric (1-2).",
|
|
320
|
+
)
|
|
321
|
+
p.add_argument(
|
|
322
|
+
"--export",
|
|
323
|
+
default=None,
|
|
324
|
+
help="Path to export CSV. If omitted, prints a preview.",
|
|
325
|
+
)
|
|
326
|
+
|
|
327
|
+
def register_tasks(subparsers: argparse._SubParsersAction) -> None:
|
|
328
|
+
"""
|
|
329
|
+
CLI:
|
|
330
|
+
reaxkit ffield get ...
|
|
331
|
+
reaxkit ffield export ...
|
|
332
|
+
"""
|
|
333
|
+
# ---- get ----
|
|
334
|
+
p = subparsers.add_parser(
|
|
335
|
+
"get",
|
|
336
|
+
help="Get a single ffield section (optionally interpreted + filtered).",
|
|
337
|
+
description=(
|
|
338
|
+
"Examples:\n"
|
|
339
|
+
" # 1) Get all C-H bond rows (interpreted output)\n"
|
|
340
|
+
" reaxkit ffield get --section bond --term C-H --format interpreted --export CH_bond.csv\n"
|
|
341
|
+
"\n"
|
|
342
|
+
" # 2) Same, but using indices\n"
|
|
343
|
+
" reaxkit ffield get --section bond --term 1-2 --format indices --export 1_2_bond.csv\n"
|
|
344
|
+
"\n"
|
|
345
|
+
" # 3) Angles: get only C-C-H (ordered)\n"
|
|
346
|
+
" reaxkit ffield get --section angle --term CCH --format interpreted --export CCH_angles.csv\n"
|
|
347
|
+
"\n"
|
|
348
|
+
" # 4) Angles: get all combinations of C-C-H in angle data\n"
|
|
349
|
+
" reaxkit ffield get --section angle --term CCH --format interpreted --any-order --export all_CCH_angles.csv\n"
|
|
350
|
+
),
|
|
351
|
+
formatter_class=argparse.RawTextHelpFormatter,
|
|
352
|
+
)
|
|
353
|
+
_add_common_ffield_args(p)
|
|
354
|
+
p.add_argument("--section", required=True,
|
|
355
|
+
help="Section: general, atom, bond, off_diagonal, angle, torsion, hbond.")
|
|
356
|
+
p.add_argument("--term", default=None,
|
|
357
|
+
help="Filter term, e.g. 'C-H', 'CCH', 'C-C-H', '1-2', '1-1-2'.")
|
|
358
|
+
p.add_argument("--ordered-2body", action="store_true",
|
|
359
|
+
help="For 2-body sections (bond/off_diagonal), treat (C-H) and (H-C) as different. Default is unordered.")
|
|
360
|
+
p.add_argument(
|
|
361
|
+
"--any-order",
|
|
362
|
+
action="store_true",
|
|
363
|
+
help="Match all permutations of the given term (e.g. CCH matches CCH, CHC, HCC).",
|
|
364
|
+
)
|
|
365
|
+
p.set_defaults(_run=_task_get)
|
|
366
|
+
|
|
367
|
+
# ---- export ----
|
|
368
|
+
p = subparsers.add_parser(
|
|
369
|
+
"export",
|
|
370
|
+
help="Export all ffield sections as separate CSV files.",
|
|
371
|
+
description=(
|
|
372
|
+
"Examples:\n"
|
|
373
|
+
" # Export everything interpreted (C-H, C-C-H, ...)\n"
|
|
374
|
+
" reaxkit ffield export --format interpreted --outdir reaxkit_outputs/ffield/all_ffield_csv\n"
|
|
375
|
+
"\n"
|
|
376
|
+
" # Export everything in indices (1-2, 1-1-2, ...)\n"
|
|
377
|
+
" reaxkit ffield export --format indices --outdir reaxkit_outputs/ffield/all_ffield_csv\n"
|
|
378
|
+
),
|
|
379
|
+
formatter_class=argparse.RawTextHelpFormatter,
|
|
380
|
+
)
|
|
381
|
+
p.add_argument("--file", default="ffield", help="Path to ffield file.")
|
|
382
|
+
p.add_argument(
|
|
383
|
+
"--format",
|
|
384
|
+
default="interpreted",
|
|
385
|
+
choices=["interpreted", "indices"],
|
|
386
|
+
help="Export format: interpreted uses atom symbols, indices uses numeric atom indices.",
|
|
387
|
+
)
|
|
388
|
+
p.add_argument("--outdir", default="ffield_export",
|
|
389
|
+
help="Directory to write CSVs (will be placed under reaxkit_output/...).")
|
|
390
|
+
p.set_defaults(_run=_task_export)
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
"""
|
|
2
|
+
fort.13 error-analysis workflow for ReaxKit.
|
|
3
|
+
|
|
4
|
+
This workflow provides utilities for reading and analyzing ReaxFF `fort.13` files,
|
|
5
|
+
which store force-field training and optimization error metrics as a function
|
|
6
|
+
of training epoch.
|
|
7
|
+
|
|
8
|
+
It supports:
|
|
9
|
+
- Extracting total force-field error values versus epoch.
|
|
10
|
+
- Visualizing error convergence during force-field optimization.
|
|
11
|
+
- Exporting error data to CSV for post-processing or comparison across runs.
|
|
12
|
+
|
|
13
|
+
The workflow is intended for monitoring ReaxFF parameter optimization and
|
|
14
|
+
assessing convergence behavior in training or fitting workflows.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
import argparse
|
|
19
|
+
from reaxkit.io.handlers.fort13_handler import Fort13Handler
|
|
20
|
+
from reaxkit.analysis.per_file.fort13_analyzer import get_fort13_data
|
|
21
|
+
from reaxkit.utils.media.plotter import single_plot
|
|
22
|
+
from reaxkit.utils.path import resolve_output_path
|
|
23
|
+
|
|
24
|
+
def _fort13_get_task(args: argparse.Namespace) -> int:
|
|
25
|
+
"""
|
|
26
|
+
Handle 'reaxkit fort13 get ...'
|
|
27
|
+
to plot, export, or save error data vs epoch.
|
|
28
|
+
"""
|
|
29
|
+
handler = Fort13Handler(args.file)
|
|
30
|
+
df = get_fort13_data(handler)
|
|
31
|
+
|
|
32
|
+
workflow_name = args.kind
|
|
33
|
+
# Export if requested
|
|
34
|
+
if args.export:
|
|
35
|
+
out = resolve_output_path(args.export, workflow_name)
|
|
36
|
+
df.to_csv(out, index=False)
|
|
37
|
+
print(f'Successfully exported data to {out}.')
|
|
38
|
+
|
|
39
|
+
if args.plot or args.save:
|
|
40
|
+
x = df["epoch"]
|
|
41
|
+
y = df["total_ff_error"]
|
|
42
|
+
|
|
43
|
+
if args.plot:
|
|
44
|
+
single_plot(
|
|
45
|
+
x,
|
|
46
|
+
y,
|
|
47
|
+
title="Force Field Error vs Epoch",
|
|
48
|
+
xlabel="Epoch",
|
|
49
|
+
ylabel="Total Force Field Error",
|
|
50
|
+
save=None,
|
|
51
|
+
)
|
|
52
|
+
elif args.save:
|
|
53
|
+
out = resolve_output_path(args.save, workflow_name)
|
|
54
|
+
single_plot(
|
|
55
|
+
x,
|
|
56
|
+
y,
|
|
57
|
+
title="Force Field Error vs Epoch",
|
|
58
|
+
xlabel="Epoch",
|
|
59
|
+
ylabel="Total Force Field Error",
|
|
60
|
+
save=out,
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
if not (args.plot or args.save or args.export):
|
|
64
|
+
print("ℹ️ Nothing to do. Use --plot, --save <path>, --export <csv>.")
|
|
65
|
+
return 0
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def register_tasks(subparsers: argparse._SubParsersAction) -> None:
|
|
69
|
+
"""
|
|
70
|
+
CLI registration for:
|
|
71
|
+
reaxkit fort13 get ...
|
|
72
|
+
"""
|
|
73
|
+
p = subparsers.add_parser(
|
|
74
|
+
"get",
|
|
75
|
+
help="Plot, export, or save fort.13 error data vs epoch.\n",
|
|
76
|
+
description=(
|
|
77
|
+
"Examples:\n"
|
|
78
|
+
" reaxkit fort13 get --save total_ff_error_vs_epoch.png\n"
|
|
79
|
+
),
|
|
80
|
+
formatter_class=argparse.RawTextHelpFormatter,
|
|
81
|
+
)
|
|
82
|
+
p.add_argument("--file", default="fort.13", help="Path to fort.13 file")
|
|
83
|
+
p.add_argument("--plot", action="store_true", help="Plot error vs epoch")
|
|
84
|
+
p.add_argument("--save", default=None, help="Save plot image to path")
|
|
85
|
+
p.add_argument("--export", default=None, help="Export data to CSV file")
|
|
86
|
+
p.set_defaults(_run=_fort13_get_task)
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
"""
|
|
2
|
+
fort.57 thermodynamic-output workflow for ReaxKit.
|
|
3
|
+
|
|
4
|
+
This workflow provides tools for reading, visualizing, and exporting data from
|
|
5
|
+
ReaxFF `fort.57` files, which typically contain thermodynamic and simulation
|
|
6
|
+
monitoring quantities recorded during MD runs.
|
|
7
|
+
|
|
8
|
+
It supports:
|
|
9
|
+
- Extracting one or more scalar quantities (e.g. potential energy, temperature,
|
|
10
|
+
RMS force, number of force calls) as functions of iteration, frame, or time.
|
|
11
|
+
- Converting the x-axis between iteration, frame index, and physical time using
|
|
12
|
+
the associated control file.
|
|
13
|
+
- Plotting selected quantities or exporting them to CSV for downstream analysis.
|
|
14
|
+
|
|
15
|
+
The workflow is intended for quick inspection and post-processing of ReaxFF
|
|
16
|
+
simulation stability, convergence, and thermodynamic behavior.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
from __future__ import annotations
|
|
20
|
+
|
|
21
|
+
import argparse
|
|
22
|
+
from pathlib import Path
|
|
23
|
+
from typing import List
|
|
24
|
+
|
|
25
|
+
import pandas as pd
|
|
26
|
+
|
|
27
|
+
from reaxkit.io.handlers.fort57_handler import Fort57Handler
|
|
28
|
+
from reaxkit.analysis.per_file.fort57_analyzer import get_fort57_data
|
|
29
|
+
from reaxkit.utils.media.convert import convert_xaxis
|
|
30
|
+
from reaxkit.utils.media.plotter import single_plot
|
|
31
|
+
from reaxkit.utils.path import resolve_output_path
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def _split_cols(s: str | None) -> List[str]:
|
|
35
|
+
if not s:
|
|
36
|
+
return []
|
|
37
|
+
# support "a,b,c" or "a b c"
|
|
38
|
+
toks: List[str] = []
|
|
39
|
+
for part in s.replace(",", " ").split():
|
|
40
|
+
p = part.strip()
|
|
41
|
+
if p:
|
|
42
|
+
toks.append(p)
|
|
43
|
+
return toks
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def _get_task(args: argparse.Namespace) -> int:
|
|
47
|
+
h = Fort57Handler(args.file)
|
|
48
|
+
|
|
49
|
+
# --- decide which y columns user wants ---
|
|
50
|
+
yreq = _split_cols(args.yaxis)
|
|
51
|
+
if len(yreq) == 0:
|
|
52
|
+
yreq = ["E_pot"]
|
|
53
|
+
|
|
54
|
+
df_full = h.dataframe()
|
|
55
|
+
|
|
56
|
+
if len(yreq) == 1 and yreq[0].lower() == "all":
|
|
57
|
+
# all canonical columns except iter (we’ll add x separately)
|
|
58
|
+
ycols = [c for c in ["E_pot", "T", "T_set", "RMSG", "nfc"] if c in df_full.columns or True]
|
|
59
|
+
else:
|
|
60
|
+
ycols = yreq
|
|
61
|
+
|
|
62
|
+
# --- pull iter + requested y columns through analyzer (alias-aware) ---
|
|
63
|
+
wanted = ["iter"] + ycols
|
|
64
|
+
df = get_fort57_data(fort57_handler=h, cols=wanted, include_geo_descriptor=False)
|
|
65
|
+
|
|
66
|
+
# --- convert x-axis using convert.py ---
|
|
67
|
+
xvals, xlabel = convert_xaxis(df["iter"].to_numpy(), args.xaxis, control_file=args.control)
|
|
68
|
+
|
|
69
|
+
# build output table: x + y
|
|
70
|
+
xname = args.xaxis # "iter" | "frame" | "time"
|
|
71
|
+
out = pd.DataFrame({xname: xvals})
|
|
72
|
+
for yc in ycols:
|
|
73
|
+
if yc not in df.columns:
|
|
74
|
+
raise KeyError(f"❌ Requested y column '{yc}' not found. Available: {', '.join(df.columns)}")
|
|
75
|
+
out[yc] = df[yc].to_numpy()
|
|
76
|
+
|
|
77
|
+
# --- export CSV (always x + y) ---
|
|
78
|
+
if args.export:
|
|
79
|
+
export_path = resolve_output_path(args.export, "fort57.md")
|
|
80
|
+
out.to_csv(export_path, index=False)
|
|
81
|
+
print(f"[Done] Exported CSV to {export_path}")
|
|
82
|
+
|
|
83
|
+
# --- plot (one plot per y if multiple) ---
|
|
84
|
+
if getattr(args, "plot", False) or args.save:
|
|
85
|
+
base_save = args.save
|
|
86
|
+
for yc in ycols:
|
|
87
|
+
save_path = None
|
|
88
|
+
if base_save:
|
|
89
|
+
sp = Path(base_save)
|
|
90
|
+
# suffix _<col> when multiple y columns
|
|
91
|
+
if len(ycols) > 1:
|
|
92
|
+
sp = sp.with_name(f"{sp.stem}_{yc}{sp.suffix}")
|
|
93
|
+
save_path = str(resolve_output_path(str(sp), "fort57.md"))
|
|
94
|
+
|
|
95
|
+
single_plot(
|
|
96
|
+
xvals,
|
|
97
|
+
out[yc].to_numpy(),
|
|
98
|
+
title=f"fort.57: {yc} vs {args.xaxis}",
|
|
99
|
+
xlabel=xlabel,
|
|
100
|
+
ylabel=yc,
|
|
101
|
+
save=save_path,
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
# If user didn't plot or export, at least print a preview
|
|
105
|
+
if not args.export and not getattr(args, "plot", False) and not args.save:
|
|
106
|
+
print(out.head())
|
|
107
|
+
|
|
108
|
+
return 0
|
|
109
|
+
|
|
110
|
+
###################################################################################
|
|
111
|
+
|
|
112
|
+
def _add_common_fort57_io_args(p: argparse.ArgumentParser, *, include_plot: bool = False) -> None:
|
|
113
|
+
p.add_argument("--file", default="fort.57", help="Path to fort.57 file.")
|
|
114
|
+
p.add_argument("--control", default="control", help="Path to control file (for --xaxis time).")
|
|
115
|
+
p.add_argument("--xaxis", default="iter", choices=["iter", "frame", "time"],
|
|
116
|
+
help="X-axis: iter, frame, or time (time uses control:tstep).")
|
|
117
|
+
p.add_argument("--yaxis", default="E_pot",
|
|
118
|
+
help="Y column(s): e.g. 'RMSG' or 'iter RMSG' or 'E_pot,T' or 'all'.")
|
|
119
|
+
if include_plot:
|
|
120
|
+
p.add_argument("--plot", action="store_true", help="Show plot interactively.")
|
|
121
|
+
p.add_argument("--save", default=None, help="Path to save plot image (suffix _<col> if multiple y).")
|
|
122
|
+
p.add_argument("--export", default=None, help="Path to export CSV (x + selected y columns).")
|
|
123
|
+
|
|
124
|
+
def register_tasks(subparsers: argparse._SubParsersAction) -> None:
|
|
125
|
+
p = subparsers.add_parser(
|
|
126
|
+
"get",
|
|
127
|
+
help="Get/plot selected columns from fort.57 (with x-axis conversion).",
|
|
128
|
+
description=(
|
|
129
|
+
"Examples:\n"
|
|
130
|
+
" reaxkit fort57.md get --yaxis RMSG --xaxis iter --save rmsg_vs_iter.png\n"
|
|
131
|
+
" reaxkit fort57.md get --yaxis all --xaxis iter --export fort57_all.csv\n"
|
|
132
|
+
),
|
|
133
|
+
formatter_class=argparse.RawTextHelpFormatter,
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
_add_common_fort57_io_args(p, include_plot=True)
|
|
137
|
+
p.set_defaults(_run=_get_task)
|