reaxkit 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (130) hide show
  1. reaxkit/__init__.py +0 -0
  2. reaxkit/analysis/__init__.py +0 -0
  3. reaxkit/analysis/composed/RDF_analyzer.py +560 -0
  4. reaxkit/analysis/composed/__init__.py +0 -0
  5. reaxkit/analysis/composed/connectivity_analyzer.py +706 -0
  6. reaxkit/analysis/composed/coordination_analyzer.py +144 -0
  7. reaxkit/analysis/composed/electrostatics_analyzer.py +687 -0
  8. reaxkit/analysis/per_file/__init__.py +0 -0
  9. reaxkit/analysis/per_file/control_analyzer.py +165 -0
  10. reaxkit/analysis/per_file/eregime_analyzer.py +108 -0
  11. reaxkit/analysis/per_file/ffield_analyzer.py +305 -0
  12. reaxkit/analysis/per_file/fort13_analyzer.py +79 -0
  13. reaxkit/analysis/per_file/fort57_analyzer.py +106 -0
  14. reaxkit/analysis/per_file/fort73_analyzer.py +61 -0
  15. reaxkit/analysis/per_file/fort74_analyzer.py +65 -0
  16. reaxkit/analysis/per_file/fort76_analyzer.py +191 -0
  17. reaxkit/analysis/per_file/fort78_analyzer.py +154 -0
  18. reaxkit/analysis/per_file/fort79_analyzer.py +83 -0
  19. reaxkit/analysis/per_file/fort7_analyzer.py +393 -0
  20. reaxkit/analysis/per_file/fort99_analyzer.py +411 -0
  21. reaxkit/analysis/per_file/molfra_analyzer.py +359 -0
  22. reaxkit/analysis/per_file/params_analyzer.py +258 -0
  23. reaxkit/analysis/per_file/summary_analyzer.py +84 -0
  24. reaxkit/analysis/per_file/trainset_analyzer.py +84 -0
  25. reaxkit/analysis/per_file/vels_analyzer.py +95 -0
  26. reaxkit/analysis/per_file/xmolout_analyzer.py +528 -0
  27. reaxkit/cli.py +181 -0
  28. reaxkit/count_loc.py +276 -0
  29. reaxkit/data/alias.yaml +89 -0
  30. reaxkit/data/constants.yaml +27 -0
  31. reaxkit/data/reaxff_input_files_contents.yaml +186 -0
  32. reaxkit/data/reaxff_output_files_contents.yaml +301 -0
  33. reaxkit/data/units.yaml +38 -0
  34. reaxkit/help/__init__.py +0 -0
  35. reaxkit/help/help_index_loader.py +531 -0
  36. reaxkit/help/introspection_utils.py +131 -0
  37. reaxkit/io/__init__.py +0 -0
  38. reaxkit/io/base_handler.py +165 -0
  39. reaxkit/io/generators/__init__.py +0 -0
  40. reaxkit/io/generators/control_generator.py +123 -0
  41. reaxkit/io/generators/eregime_generator.py +341 -0
  42. reaxkit/io/generators/geo_generator.py +967 -0
  43. reaxkit/io/generators/trainset_generator.py +1758 -0
  44. reaxkit/io/generators/tregime_generator.py +113 -0
  45. reaxkit/io/generators/vregime_generator.py +164 -0
  46. reaxkit/io/generators/xmolout_generator.py +304 -0
  47. reaxkit/io/handlers/__init__.py +0 -0
  48. reaxkit/io/handlers/control_handler.py +209 -0
  49. reaxkit/io/handlers/eregime_handler.py +122 -0
  50. reaxkit/io/handlers/ffield_handler.py +812 -0
  51. reaxkit/io/handlers/fort13_handler.py +123 -0
  52. reaxkit/io/handlers/fort57_handler.py +143 -0
  53. reaxkit/io/handlers/fort73_handler.py +145 -0
  54. reaxkit/io/handlers/fort74_handler.py +155 -0
  55. reaxkit/io/handlers/fort76_handler.py +195 -0
  56. reaxkit/io/handlers/fort78_handler.py +142 -0
  57. reaxkit/io/handlers/fort79_handler.py +227 -0
  58. reaxkit/io/handlers/fort7_handler.py +264 -0
  59. reaxkit/io/handlers/fort99_handler.py +128 -0
  60. reaxkit/io/handlers/geo_handler.py +224 -0
  61. reaxkit/io/handlers/molfra_handler.py +184 -0
  62. reaxkit/io/handlers/params_handler.py +137 -0
  63. reaxkit/io/handlers/summary_handler.py +135 -0
  64. reaxkit/io/handlers/trainset_handler.py +658 -0
  65. reaxkit/io/handlers/vels_handler.py +293 -0
  66. reaxkit/io/handlers/xmolout_handler.py +174 -0
  67. reaxkit/utils/__init__.py +0 -0
  68. reaxkit/utils/alias.py +219 -0
  69. reaxkit/utils/cache.py +77 -0
  70. reaxkit/utils/constants.py +75 -0
  71. reaxkit/utils/equation_of_states.py +96 -0
  72. reaxkit/utils/exceptions.py +27 -0
  73. reaxkit/utils/frame_utils.py +175 -0
  74. reaxkit/utils/log.py +43 -0
  75. reaxkit/utils/media/__init__.py +0 -0
  76. reaxkit/utils/media/convert.py +90 -0
  77. reaxkit/utils/media/make_video.py +91 -0
  78. reaxkit/utils/media/plotter.py +812 -0
  79. reaxkit/utils/numerical/__init__.py +0 -0
  80. reaxkit/utils/numerical/extrema_finder.py +96 -0
  81. reaxkit/utils/numerical/moving_average.py +103 -0
  82. reaxkit/utils/numerical/numerical_calcs.py +75 -0
  83. reaxkit/utils/numerical/signal_ops.py +135 -0
  84. reaxkit/utils/path.py +55 -0
  85. reaxkit/utils/units.py +104 -0
  86. reaxkit/webui/__init__.py +0 -0
  87. reaxkit/webui/app.py +0 -0
  88. reaxkit/webui/components.py +0 -0
  89. reaxkit/webui/layouts.py +0 -0
  90. reaxkit/webui/utils.py +0 -0
  91. reaxkit/workflows/__init__.py +0 -0
  92. reaxkit/workflows/composed/__init__.py +0 -0
  93. reaxkit/workflows/composed/coordination_workflow.py +393 -0
  94. reaxkit/workflows/composed/electrostatics_workflow.py +587 -0
  95. reaxkit/workflows/composed/xmolout_fort7_workflow.py +343 -0
  96. reaxkit/workflows/meta/__init__.py +0 -0
  97. reaxkit/workflows/meta/help_workflow.py +136 -0
  98. reaxkit/workflows/meta/introspection_workflow.py +235 -0
  99. reaxkit/workflows/meta/make_video_workflow.py +61 -0
  100. reaxkit/workflows/meta/plotter_workflow.py +601 -0
  101. reaxkit/workflows/per_file/__init__.py +0 -0
  102. reaxkit/workflows/per_file/control_workflow.py +110 -0
  103. reaxkit/workflows/per_file/eregime_workflow.py +267 -0
  104. reaxkit/workflows/per_file/ffield_workflow.py +390 -0
  105. reaxkit/workflows/per_file/fort13_workflow.py +86 -0
  106. reaxkit/workflows/per_file/fort57_workflow.py +137 -0
  107. reaxkit/workflows/per_file/fort73_workflow.py +151 -0
  108. reaxkit/workflows/per_file/fort74_workflow.py +88 -0
  109. reaxkit/workflows/per_file/fort76_workflow.py +188 -0
  110. reaxkit/workflows/per_file/fort78_workflow.py +135 -0
  111. reaxkit/workflows/per_file/fort79_workflow.py +314 -0
  112. reaxkit/workflows/per_file/fort7_workflow.py +592 -0
  113. reaxkit/workflows/per_file/fort83_workflow.py +60 -0
  114. reaxkit/workflows/per_file/fort99_workflow.py +223 -0
  115. reaxkit/workflows/per_file/geo_workflow.py +554 -0
  116. reaxkit/workflows/per_file/molfra_workflow.py +577 -0
  117. reaxkit/workflows/per_file/params_workflow.py +135 -0
  118. reaxkit/workflows/per_file/summary_workflow.py +161 -0
  119. reaxkit/workflows/per_file/trainset_workflow.py +356 -0
  120. reaxkit/workflows/per_file/tregime_workflow.py +79 -0
  121. reaxkit/workflows/per_file/vels_workflow.py +309 -0
  122. reaxkit/workflows/per_file/vregime_workflow.py +75 -0
  123. reaxkit/workflows/per_file/xmolout_workflow.py +678 -0
  124. reaxkit-1.0.0.dist-info/METADATA +128 -0
  125. reaxkit-1.0.0.dist-info/RECORD +130 -0
  126. reaxkit-1.0.0.dist-info/WHEEL +5 -0
  127. reaxkit-1.0.0.dist-info/entry_points.txt +2 -0
  128. reaxkit-1.0.0.dist-info/licenses/AUTHORS.md +20 -0
  129. reaxkit-1.0.0.dist-info/licenses/LICENSE +21 -0
  130. reaxkit-1.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,209 @@
1
+ """
2
+ ReaxFF control file handler.
3
+
4
+ This module provides a handler for parsing ReaxFF ``control`` input files
5
+ into a normalized tabular representation, exposing simulation parameters
6
+ and metadata in a structured and programmatic form.
7
+
8
+ Typical use cases include:
9
+
10
+ - reading time step and MD parameters
11
+ - converting iteration indices to physical time
12
+ - inspecting or validating simulation settings
13
+ """
14
+
15
+ from __future__ import annotations
16
+
17
+ import re
18
+ from pathlib import Path
19
+ from typing import Dict, Any
20
+
21
+ import pandas as pd
22
+
23
+ from reaxkit.io.base_handler import BaseHandler
24
+
25
+
26
+ class ControlHandler(BaseHandler):
27
+ """
28
+ Parser for ReaxFF ``control`` input files.
29
+
30
+ This class parses ReaxFF control files and exposes simulation parameters
31
+ as structured tabular data, while also providing backward-compatible
32
+ per-section dictionaries for common access patterns.
33
+
34
+ Parsed Data
35
+ -----------
36
+ Summary table
37
+ One row per control parameter, returned by ``dataframe()``, with columns:
38
+ ["section", "key", "value", "inline_comment"]
39
+
40
+ Sections are normalized to:
41
+ ["general", "md", "mm", "ff", "outdated"]
42
+
43
+ Metadata
44
+ Returned by ``metadata()``, containing counts of parameters per section:
45
+ ["n_general", "n_md", "n_mm", "n_ff", "n_outdated"]
46
+
47
+ Attributes
48
+ ----------
49
+ general_parameters : dict
50
+ Parameters from the ``general`` section.
51
+ md_parameters : dict
52
+ Parameters from the ``md`` section.
53
+ mm_parameters : dict
54
+ Parameters from the ``mm`` section.
55
+ ff_parameters : dict
56
+ Parameters from the ``ff`` section.
57
+ outdated_parameters : dict
58
+ Parameters from the ``outdated`` section.
59
+
60
+ Notes
61
+ -----
62
+ - Numeric values are converted to ``int`` or ``float`` when possible.
63
+ - Parsing is performed eagerly on initialization to populate section dicts.
64
+ - Content before the first recognized section header is ignored.
65
+ """
66
+
67
+ def __init__(self, file_path: str | Path = "control"):
68
+ super().__init__(file_path)
69
+
70
+ # Backward-compatible per-section dicts
71
+ self.general_parameters: Dict[str, Any] = {}
72
+ self.md_parameters: Dict[str, Any] = {}
73
+ self.mm_parameters: Dict[str, Any] = {}
74
+ self.ff_parameters: Dict[str, Any] = {}
75
+ self.outdated_parameters: Dict[str, Any] = {}
76
+
77
+ # Keep old behavior: parse eagerly so dicts are populated
78
+ self.parse()
79
+
80
+ # ------------------------------------------------------------------
81
+ # Internal parsing logic
82
+ # ------------------------------------------------------------------
83
+ def _parse(self) -> tuple[pd.DataFrame, dict[str, Any]]:
84
+ """
85
+ Parse control file into a DataFrame and metadata.
86
+
87
+ DataFrame columns:
88
+ - section
89
+ - key
90
+ - value
91
+ - inline_comment
92
+ """
93
+ section: str | None = None
94
+ rows: list[dict[str, Any]] = []
95
+ counts: Dict[str, int] = {
96
+ "general": 0,
97
+ "md": 0,
98
+ "mm": 0,
99
+ "ff": 0,
100
+ "outdated": 0,
101
+ }
102
+
103
+ # Helper to map a header line to a normalized section name
104
+ def header_to_section(line: str) -> str | None:
105
+ # remove leading '#' and surrounding whitespace, then lower
106
+ hdr = re.sub(r"^\s*#\s*", "", line).strip().lower()
107
+ if hdr.startswith("general"):
108
+ return "general"
109
+ if hdr.startswith("md"):
110
+ return "md"
111
+ if hdr.startswith("mm"):
112
+ return "mm"
113
+ if hdr.startswith("ff"):
114
+ return "ff"
115
+ if hdr.startswith("outdated"):
116
+ return "outdated"
117
+ return None
118
+
119
+ try:
120
+ with open(self.path, "r") as f:
121
+ for raw in f:
122
+ line = raw.strip()
123
+ if not line:
124
+ continue # skip blank lines
125
+
126
+ # Section headers start with '#'
127
+ if line.startswith("#"):
128
+ maybe = header_to_section(line)
129
+ if maybe:
130
+ section = maybe
131
+ # otherwise it's just an unrecognized comment/header
132
+ continue
133
+
134
+ if not section:
135
+ # ignore content before any recognized section header
136
+ continue
137
+
138
+ # Match lines like: "<number> <key>"
139
+ # Examples: "0.25 timestep", "300.0 temperature", "5000 nsteps"
140
+ m = re.match(r"^([\d\-.Ee+]+)\s+([A-Za-z_][\w]*)", line)
141
+ if not m:
142
+ # not a parameter line; skip
143
+ continue
144
+
145
+ value_str, key = m.groups()
146
+ key = key.lower()
147
+
148
+ # Extract inline comment: text after '#' in the ORIGINAL raw line
149
+ hash_idx = raw.find("#")
150
+ if hash_idx != -1:
151
+ inline_comment = raw[hash_idx + 1 :].strip()
152
+ else:
153
+ inline_comment = ""
154
+
155
+ # Convert numeric if possible
156
+ try:
157
+ value: Any = float(value_str)
158
+ if isinstance(value, float) and value.is_integer():
159
+ value = int(value)
160
+ except ValueError:
161
+ value = value_str # keep raw if not numeric
162
+
163
+ # Update backward-compatible dicts
164
+ if section == "general":
165
+ self.general_parameters[key] = value
166
+ elif section == "md":
167
+ self.md_parameters[key] = value
168
+ elif section == "mm":
169
+ self.mm_parameters[key] = value
170
+ elif section == "ff":
171
+ self.ff_parameters[key] = value
172
+ elif section == "outdated":
173
+ self.outdated_parameters[key] = value
174
+
175
+ # Record row for DataFrame
176
+ rows.append(
177
+ {
178
+ "section": section,
179
+ "key": key,
180
+ "value": value,
181
+ "inline_comment": inline_comment,
182
+ }
183
+ )
184
+
185
+ # Count per section
186
+ if section in counts:
187
+ counts[section] += 1
188
+ else:
189
+ counts[section] = 1
190
+
191
+ except FileNotFoundError:
192
+ raise FileNotFoundError(f"❌ Control file not found at {self.path}")
193
+
194
+ # Build DataFrame (even if empty)
195
+ df = pd.DataFrame(
196
+ rows,
197
+ columns=["section", "key", "value", "inline_comment"],
198
+ )
199
+
200
+ # Flattened metadata: only number of parameters per section
201
+ meta: dict[str, Any] = {
202
+ "n_general": counts.get("general", 0),
203
+ "n_md": counts.get("md", 0),
204
+ "n_mm": counts.get("mm", 0),
205
+ "n_ff": counts.get("ff", 0),
206
+ "n_outdated": counts.get("outdated", 0),
207
+ }
208
+
209
+ return df, meta
@@ -0,0 +1,122 @@
1
+ """
2
+ ReaxFF electric-field regime (eregime.in) handler.
3
+
4
+ This module provides a handler for parsing ReaxFF ``eregime.in`` files,
5
+ which define time-dependent electric-field schedules used in MD runs.
6
+
7
+ Typical use cases include:
8
+
9
+ - reading electric-field magnitudes and directions
10
+ - mapping field schedules to simulation iterations
11
+ - converting iteration indices to physical time
12
+ """
13
+
14
+
15
+ from __future__ import annotations
16
+ from pathlib import Path
17
+ from typing import Any, Dict, List, Tuple
18
+ import pandas as pd
19
+
20
+ from reaxkit.io.base_handler import BaseHandler
21
+
22
+
23
+ class EregimeHandler(BaseHandler):
24
+ """
25
+ Parser for ReaxFF electric-field schedule files (``eregime.in``).
26
+
27
+ This class parses electric-field regime definitions and exposes them
28
+ as structured tabular data suitable for downstream analysis and
29
+ visualization.
30
+
31
+ Parsed Data
32
+ -----------
33
+ Summary table
34
+ One row per schedule entry, returned by ``dataframe()``, with columns:
35
+
36
+ - If the maximum number of field zones is ≤ 1:
37
+ ["iter", "field_zones", "field_dir", "field"]
38
+
39
+ - If multiple field zones are present:
40
+ ["iter", "field_zones",
41
+ "field_dir1", "field1",
42
+ "field_dir2", "field2", ...]
43
+
44
+ Metadata
45
+ Returned by ``metadata()``, containing:
46
+ ["columns", "max_field_zones", "n_records"]
47
+
48
+ Notes
49
+ -----
50
+ - Comment lines starting with ``#`` are ignored.
51
+ - Missing direction/field pairs are padded with ``NaN`` to ensure a
52
+ rectangular table.
53
+ - Field directions are stored as strings; field magnitudes are numeric.
54
+ """
55
+
56
+ def __init__(self, file_path: str | Path = "eregime.in"):
57
+ super().__init__(file_path)
58
+
59
+ def _parse(self) -> Tuple[pd.DataFrame, Dict[str, Any]]:
60
+ rows: List[Dict[str, Any]] = []
61
+ max_pairs = 0
62
+
63
+ with open(self.path, "r") as fh:
64
+ for raw in fh:
65
+ s = raw.strip()
66
+ if not s or s.startswith("#"):
67
+ continue
68
+
69
+ parts = s.split()
70
+ if len(parts) < 4:
71
+ raise ValueError(
72
+ f"Malformed line (need at least iter, field_zones, direction, field): {raw!r}"
73
+ )
74
+
75
+ # iter and number of zones
76
+ it = int(float(parts[0]))
77
+ zones = int(float(parts[1]))
78
+
79
+ # parse direction/field pairs
80
+ tail = parts[2:]
81
+ if len(tail) % 2 != 0:
82
+ raise ValueError(f"Direction/field tokens must be pairs: {raw!r}")
83
+
84
+ n_pairs = len(tail) // 2
85
+ max_pairs = max(max_pairs, zones, n_pairs)
86
+
87
+ rec: Dict[str, Any] = {"iter": it, "field_zones": zones}
88
+
89
+ for i in range(n_pairs):
90
+ d = tail[2 * i]
91
+ e = float(tail[2 * i + 1])
92
+
93
+ if max(zones, n_pairs) <= 1:
94
+ rec["field_dir"] = d
95
+ rec["field"] = e
96
+ else:
97
+ rec[f"field_dir{i+1}"] = d
98
+ rec[f"field{i+1}"] = e
99
+
100
+ rows.append(rec)
101
+
102
+ if not rows:
103
+ raise ValueError("No data lines found in eregime file.")
104
+
105
+ # Build final column set
106
+ if max_pairs <= 1:
107
+ columns = ["iter", "field_zones", "field_dir", "field"]
108
+ else:
109
+ columns = ["iter", "field_zones"]
110
+ for i in range(1, max_pairs + 1):
111
+ columns += [f"field_dir{i}", f"field{i}"]
112
+
113
+ # Normalize each record so all columns exist
114
+ normed = [{col: r.get(col) for col in columns} for r in rows]
115
+ df = pd.DataFrame(normed, columns=columns)
116
+
117
+ meta = {
118
+ "columns": list(df.columns),
119
+ "max_field_zones": int(max_pairs),
120
+ "n_records": int(len(df)),
121
+ }
122
+ return df, meta