asyncmd 0.3.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- asyncmd/__init__.py +18 -0
- asyncmd/_config.py +26 -0
- asyncmd/_version.py +75 -0
- asyncmd/config.py +203 -0
- asyncmd/gromacs/__init__.py +16 -0
- asyncmd/gromacs/mdconfig.py +351 -0
- asyncmd/gromacs/mdengine.py +1127 -0
- asyncmd/gromacs/utils.py +197 -0
- asyncmd/mdconfig.py +440 -0
- asyncmd/mdengine.py +100 -0
- asyncmd/slurm.py +1199 -0
- asyncmd/tools.py +86 -0
- asyncmd/trajectory/__init__.py +25 -0
- asyncmd/trajectory/convert.py +577 -0
- asyncmd/trajectory/functionwrapper.py +556 -0
- asyncmd/trajectory/propagate.py +937 -0
- asyncmd/trajectory/trajectory.py +1103 -0
- asyncmd/utils.py +148 -0
- asyncmd-0.3.2.dist-info/LICENSE +232 -0
- asyncmd-0.3.2.dist-info/METADATA +179 -0
- asyncmd-0.3.2.dist-info/RECORD +23 -0
- asyncmd-0.3.2.dist-info/WHEEL +5 -0
- asyncmd-0.3.2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1127 @@
|
|
1
|
+
# This file is part of asyncmd.
|
2
|
+
#
|
3
|
+
# asyncmd is free software: you can redistribute it and/or modify
|
4
|
+
# it under the terms of the GNU General Public License as published by
|
5
|
+
# the Free Software Foundation, either version 3 of the License, or
|
6
|
+
# (at your option) any later version.
|
7
|
+
#
|
8
|
+
# asyncmd is distributed in the hope that it will be useful,
|
9
|
+
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
10
|
+
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
11
|
+
# GNU General Public License for more details.
|
12
|
+
#
|
13
|
+
# You should have received a copy of the GNU General Public License
|
14
|
+
# along with asyncmd. If not, see <https://www.gnu.org/licenses/>.
|
15
|
+
import os
|
16
|
+
import copy
|
17
|
+
import shlex
|
18
|
+
import random
|
19
|
+
import string
|
20
|
+
import shutil
|
21
|
+
import typing
|
22
|
+
import asyncio
|
23
|
+
import logging
|
24
|
+
import aiofiles
|
25
|
+
import aiofiles.os
|
26
|
+
import aiofiles.ospath
|
27
|
+
|
28
|
+
from .._config import _SEMAPHORES
|
29
|
+
from ..mdengine import MDEngine, EngineError, EngineCrashedError
|
30
|
+
from ..trajectory.trajectory import Trajectory
|
31
|
+
from .. import slurm
|
32
|
+
from .mdconfig import MDP
|
33
|
+
from .utils import nstout_from_mdp, get_all_traj_parts
|
34
|
+
from ..tools import ensure_executable_available
|
35
|
+
|
36
|
+
|
37
|
+
logger = logging.getLogger(__name__)
|
38
|
+
|
39
|
+
|
40
|
+
class _descriptor_on_instance_and_class:
|
41
|
+
# a descriptor that makes the (default) value of the private attribute
|
42
|
+
# "_name" accessible as "name" on both the class and the instance level
|
43
|
+
# Accessing the default value works from the class-level, i.e. without
|
44
|
+
# instantiating the object, but note that setting on the class level
|
45
|
+
# overwrites the descriptor and does not call __set__
|
46
|
+
# Setting from an instance calls __set__ and therefore only sets
|
47
|
+
# the attribute for the given instance (and also runs our checks)
|
48
|
+
# also see the python docs:
|
49
|
+
# https://docs.python.org/3/howto/descriptor.html#customized-names
|
50
|
+
def __set_name__(self, owner, name):
|
51
|
+
self.public_name = name
|
52
|
+
self.private_name = "_" + name
|
53
|
+
|
54
|
+
def __get__(self, obj, objtype=None):
|
55
|
+
if obj is None:
|
56
|
+
# I (hejung) think if obj is None objtype will always be set
|
57
|
+
# to the class of the obj
|
58
|
+
obj = objtype
|
59
|
+
val = getattr(obj, self.private_name)
|
60
|
+
return val
|
61
|
+
|
62
|
+
def __set__(self, obj, val):
|
63
|
+
setattr(obj, self.private_name, val)
|
64
|
+
|
65
|
+
|
66
|
+
class _descriptor_output_traj_type(_descriptor_on_instance_and_class):
|
67
|
+
# Check the output_traj_type for consistency before setting
|
68
|
+
def __set__(self, obj, val):
|
69
|
+
allowed_values = ["trr", "xtc"]
|
70
|
+
val = val.lower()
|
71
|
+
if val not in allowed_values:
|
72
|
+
raise ValueError("output_traj_type must be one of "
|
73
|
+
+ f"{allowed_values}, but was {val}."
|
74
|
+
)
|
75
|
+
return super().__set__(obj, val)
|
76
|
+
|
77
|
+
|
78
|
+
class _descriptor_check_executable(_descriptor_on_instance_and_class):
|
79
|
+
# check if a given value is a valid executable when setting it
|
80
|
+
# we use this to make sure gmx grompp and gmx mdrun are available as set
|
81
|
+
def __set__(self, obj, val):
|
82
|
+
# split because mdrun and grompp can be both subcommands of gmx
|
83
|
+
test_exe = val.split(" ")[0]
|
84
|
+
ensure_executable_available(test_exe)
|
85
|
+
return super().__set__(obj, val)
|
86
|
+
|
87
|
+
|
88
|
+
# NOTE: with tra we usually mean trr, i.e. a full precision trajectory with velocities
|
89
|
+
class GmxEngine(MDEngine):
|
90
|
+
"""
|
91
|
+
Steer gromacs molecular dynamics simulation from python.
|
92
|
+
|
93
|
+
An async/await enabled wrapper around gromacs grompp and gromacs mdrun.
|
94
|
+
Please use the power of concurrent execution of computationally bound
|
95
|
+
subprocesses responsibly...or crash your workstation ;)
|
96
|
+
The :class:`SlurmGmxEngine` alleviates this problem somewhat by submitting the
|
97
|
+
(computationally expensive) mdruns via SLURM...in that case please have in
|
98
|
+
mind that your colleagues might also want to use the cluster, and also that
|
99
|
+
someone might have set a job/submission limit :)
|
100
|
+
|
101
|
+
Attributes
|
102
|
+
----------
|
103
|
+
grompp_executable : str
|
104
|
+
Name or path to the grompp executable, by default "gmx grompp".
|
105
|
+
mdrun_executable : str
|
106
|
+
Name or path to the mdrun executable, by default "gmx mdrun".
|
107
|
+
grompp_extra_args : str
|
108
|
+
Can be used to pass extra command line arguments to grompp calls,
|
109
|
+
e.g. "-maxwarn 1".
|
110
|
+
mdrun_extra_args : str
|
111
|
+
Can be used to pass extra command line arguments to mdrun calls,
|
112
|
+
e.g. "-ntomp 8".
|
113
|
+
output_traj_type : str
|
114
|
+
Sets the trajectory type (ending) this engine returns/looks for.
|
115
|
+
Note that we simply ignore all other trajectories, i.e. depending on
|
116
|
+
the MDP settings we will still write xtc and trr, but return only the
|
117
|
+
trajectories with matching ending.
|
118
|
+
"""
|
119
|
+
|
120
|
+
# local prepare and option to run a local gmx (mainly for testing)
|
121
|
+
_grompp_executable = "gmx grompp"
|
122
|
+
grompp_executable = _descriptor_check_executable()
|
123
|
+
_mdrun_executable = "gmx mdrun"
|
124
|
+
mdrun_executable = _descriptor_check_executable()
|
125
|
+
# extra_args are expected to be str and will be appended to the end of the
|
126
|
+
# respective commands after a separating space,
|
127
|
+
# i.e. cmd = base_cmd + " " + extra_args
|
128
|
+
grompp_extra_args = ""
|
129
|
+
mdrun_extra_args = ""
|
130
|
+
# file ending of the returned output trajectories,
|
131
|
+
# exposed as property output_traj_type
|
132
|
+
# NOTE: this will be the traj we count frames for and check the mdp, etc.
|
133
|
+
# However this does not mean that no other trajs will/can be written,
|
134
|
+
# we simply ignore them
|
135
|
+
_output_traj_type = "xtc"
|
136
|
+
output_traj_type = _descriptor_output_traj_type()
|
137
|
+
# See the notes below for the SlurmGmxEngine on why this conversion factor
|
138
|
+
# is needed (there), here we have it only for consistency
|
139
|
+
_mdrun_time_conversion_factor = 1. # run mdrun for 1. * time-limit
|
140
|
+
|
141
|
+
|
142
|
+
def __init__(self,
|
143
|
+
mdconfig: MDP,
|
144
|
+
gro_file: str,
|
145
|
+
top_file: str,
|
146
|
+
ndx_file: str | None = None,
|
147
|
+
**kwargs) -> None:
|
148
|
+
"""
|
149
|
+
Initialize a :class:`GmxEngine`.
|
150
|
+
|
151
|
+
Note that all attributes can be set at intialization by passing keyword
|
152
|
+
arguments with their name, e.g. mdrun_extra_args="-ntomp 2" to instruct
|
153
|
+
gromacs to use 2 openMP threads.
|
154
|
+
|
155
|
+
Parameters
|
156
|
+
----------
|
157
|
+
mdconfig: MDP
|
158
|
+
The molecular dynamics parameters.
|
159
|
+
gro_file: str
|
160
|
+
Absolute or relative path to a gromacs structure file.
|
161
|
+
top_file: str
|
162
|
+
Absolute or relative path to a gromacs topolgy (.top) file.
|
163
|
+
ndx_file: str or None
|
164
|
+
Optional, absolute or relative path to a gromacs index file.
|
165
|
+
"""
|
166
|
+
# make it possible to set any attribute via kwargs
|
167
|
+
# check the type for attributes with default values
|
168
|
+
dval = object()
|
169
|
+
for kwarg, value in kwargs.items():
|
170
|
+
cval = getattr(self, kwarg, dval)
|
171
|
+
if cval is not dval:
|
172
|
+
if isinstance(value, type(cval)):
|
173
|
+
# value is of same type as default so set it
|
174
|
+
setattr(self, kwarg, value)
|
175
|
+
else:
|
176
|
+
raise TypeError(f"Setting attribute {kwarg} with "
|
177
|
+
+ f"mismatching type ({type(value)}). "
|
178
|
+
+ f" Default type is {type(cval)}."
|
179
|
+
)
|
180
|
+
else:
|
181
|
+
# not previously defined, so warn that we ignore it
|
182
|
+
logger.warning("Ignoring unknown keyword-argument %s.", kwarg)
|
183
|
+
# NOTE: after the kwargs setting to be sure they are what we set/expect
|
184
|
+
# TODO: store a hash/the file contents for gro, top, ndx?
|
185
|
+
# to check against when we load from storage/restart?
|
186
|
+
# if we do this do it in the property!
|
187
|
+
# (but still write one hashfunc for all!)
|
188
|
+
self.gro_file = gro_file # sets self._gro_file
|
189
|
+
self.top_file = top_file # sets self._top_file
|
190
|
+
self.ndx_file = ndx_file # sets self._ndx_file
|
191
|
+
# dirty hack to make sure we also check for our defaults if they are
|
192
|
+
# available + executable
|
193
|
+
self.mdrun_executable = self.mdrun_executable
|
194
|
+
self.grompp_executable = self.grompp_executable
|
195
|
+
# basic checks for mdp are done in the property-setter, e.g. if the
|
196
|
+
# output_traj_type is actually written with current mdp-settings
|
197
|
+
self.mdp = mdconfig
|
198
|
+
# initialize internal state variables
|
199
|
+
self._workdir = None
|
200
|
+
self._prepared = False
|
201
|
+
# NOTE: frames_done and steps_done do not have an easy relation!
|
202
|
+
# See the steps_done property docstring for more!
|
203
|
+
# number of frames produced since last call to prepare
|
204
|
+
self._frames_done = 0
|
205
|
+
# number of integration steps done since last call to prepare
|
206
|
+
self._steps_done = 0
|
207
|
+
# integration time since last call to prepare in ps
|
208
|
+
self._time_done = 0.
|
209
|
+
self._nstout = None # get this from the mdp only when we need it
|
210
|
+
# Popen handle for gmx mdrun, used to check if we are running
|
211
|
+
self._proc = None
|
212
|
+
# these are set by prepare() and used by run_XX()
|
213
|
+
self._simulation_part = None
|
214
|
+
self._deffnm = None
|
215
|
+
# tpr for trajectory (part), will become the structure/topology file
|
216
|
+
self._tpr = None
|
217
|
+
|
218
|
+
def __getstate__(self) -> dict:
|
219
|
+
state = self.__dict__.copy()
|
220
|
+
# cant pickle the process, + its probably dead when we unpickle :)
|
221
|
+
state["_proc"] = None
|
222
|
+
return state
|
223
|
+
|
224
|
+
@property
|
225
|
+
def current_trajectory(self) -> Trajectory | None:
|
226
|
+
"""
|
227
|
+
Return the last finished trajectory (part).
|
228
|
+
|
229
|
+
Returns
|
230
|
+
-------
|
231
|
+
Trajectory
|
232
|
+
Last complete trajectory produced by this engine.
|
233
|
+
"""
|
234
|
+
if self._simulation_part == 0:
|
235
|
+
# we could check if self_proc is set (which prepare sets to None)
|
236
|
+
# this should make sure that calling current trajectory after
|
237
|
+
# calling prepare does not return a traj, as soon as we called
|
238
|
+
# run self._proc will be set, i.e. there is still no gurantee that
|
239
|
+
# the traj is done, but it will be started always
|
240
|
+
# (even when accessing simulataneous to the call to run),
|
241
|
+
# i.e. it is most likely done
|
242
|
+
# we can also check for simulation part, since it seems
|
243
|
+
# gmx ignores that if no checkpoint is passed, i.e. we will
|
244
|
+
# **always** start with part0001 anyways!
|
245
|
+
# but checking for self._simulation_part == 0 also just makes sure
|
246
|
+
# we never started a run (i.e. same as checking self._proc)
|
247
|
+
return None
|
248
|
+
if (all(v is not None for v in [self._tpr, self._deffnm])
|
249
|
+
and not self.running):
|
250
|
+
# self._tpr and self._deffnm are set in prepare, i.e. having them
|
251
|
+
# set makes sure that we have at least prepared running the traj
|
252
|
+
# but it might not be done yet
|
253
|
+
traj = Trajectory(
|
254
|
+
trajectory_files=os.path.join(
|
255
|
+
self.workdir,
|
256
|
+
(f"{self._deffnm}"
|
257
|
+
+ f"{self._num_suffix(self._simulation_part)}"
|
258
|
+
+ f".{self.output_traj_type}")
|
259
|
+
),
|
260
|
+
# NOTE: self._tpr already contains the path to workdir
|
261
|
+
structure_file=self._tpr,
|
262
|
+
nstout=self.nstout,
|
263
|
+
)
|
264
|
+
return traj
|
265
|
+
return None
|
266
|
+
|
267
|
+
@property
|
268
|
+
def ready_for_run(self) -> bool:
|
269
|
+
"""Whether this engine is ready to run, i.e. generate a trajectory."""
|
270
|
+
return self._prepared and not self.running
|
271
|
+
|
272
|
+
@property
|
273
|
+
def running(self) -> bool:
|
274
|
+
"""Whether this engine is currently running/generating a trajectory."""
|
275
|
+
if self._proc is None:
|
276
|
+
# this happens when we did not call run() yet
|
277
|
+
return False
|
278
|
+
if self._proc.returncode is None:
|
279
|
+
# no return code means it is still running
|
280
|
+
return True
|
281
|
+
# dont care for the value of the exit code,
|
282
|
+
# we are not running anymore if we crashed ;)
|
283
|
+
return False
|
284
|
+
|
285
|
+
@property
|
286
|
+
def workdir(self) -> str:
|
287
|
+
"""The current woring directory of the engine."""
|
288
|
+
return self._workdir
|
289
|
+
|
290
|
+
@workdir.setter
|
291
|
+
def workdir(self, value: str) -> None:
|
292
|
+
if not os.path.isdir(value):
|
293
|
+
raise TypeError(f"Not a directory ({value}).")
|
294
|
+
value = os.path.relpath(value)
|
295
|
+
self._workdir = value
|
296
|
+
|
297
|
+
@property
|
298
|
+
def gro_file(self) -> str:
|
299
|
+
"""The (path to the) gro file this engine uses/used to call grompp."""
|
300
|
+
return self._gro_file
|
301
|
+
|
302
|
+
@gro_file.setter
|
303
|
+
def gro_file(self, val: str) -> str:
|
304
|
+
if not os.path.isfile(val):
|
305
|
+
raise FileNotFoundError(f"gro file not found: {val}")
|
306
|
+
val = os.path.relpath(val)
|
307
|
+
self._gro_file = val
|
308
|
+
|
309
|
+
@property
|
310
|
+
def top_file(self) -> str:
|
311
|
+
"""The (path to the) top file this engine uses/used to call grompp."""
|
312
|
+
return self._top_file
|
313
|
+
|
314
|
+
@top_file.setter
|
315
|
+
def top_file(self, val: str) -> None:
|
316
|
+
if not os.path.isfile(val):
|
317
|
+
raise FileNotFoundError(f"top file not found: {val}")
|
318
|
+
val = os.path.relpath(val)
|
319
|
+
self._top_file = val
|
320
|
+
|
321
|
+
@property
|
322
|
+
def ndx_file(self) -> str | None:
|
323
|
+
"""The (path to the) ndx file this engine uses/used to call grompp."""
|
324
|
+
return self._ndx_file
|
325
|
+
|
326
|
+
@ndx_file.setter
|
327
|
+
def ndx_file(self, val: str | None) -> None:
|
328
|
+
if val is not None:
|
329
|
+
# GMX does not require an ndx file, so we accept None
|
330
|
+
if not os.path.isfile(val):
|
331
|
+
raise FileNotFoundError(f"ndx file not found: {val}")
|
332
|
+
val = os.path.relpath(val)
|
333
|
+
# set it anyway (even if it is None)
|
334
|
+
self._ndx_file = val
|
335
|
+
|
336
|
+
@property
|
337
|
+
def mdp(self) -> MDP:
|
338
|
+
"""The configuration of this engine as a :class:`MDP` object."""
|
339
|
+
return self._mdp
|
340
|
+
|
341
|
+
@mdp.setter
|
342
|
+
def mdp(self, val: MDP) -> None:
|
343
|
+
if not isinstance(val, MDP):
|
344
|
+
raise TypeError(f"Value must be of type {MDP}.")
|
345
|
+
try:
|
346
|
+
if val["nsteps"] != -1:
|
347
|
+
logger.info("Changing nsteps from %s to -1 (infinte), the run "
|
348
|
+
"length is controlled via arguments of the run "
|
349
|
+
"method.",
|
350
|
+
val['nsteps'])
|
351
|
+
val["nsteps"] = -1
|
352
|
+
except KeyError:
|
353
|
+
# nsteps not defined
|
354
|
+
logger.info("Setting previously undefined nsteps to -1 (infinite).")
|
355
|
+
val["nsteps"] = -1
|
356
|
+
# check that we get a trajectory of the format we expect with our
|
357
|
+
# current mdp, we do this by using nstout_from_mdp since it throws a
|
358
|
+
# nice error if the mdp does not generate output for given traj-format
|
359
|
+
# TODO: ensure that x-out and v-out/f-out are the same (if applicable)?
|
360
|
+
_ = nstout_from_mdp(mdp=val, traj_type=self.output_traj_type)
|
361
|
+
self._mdp = val
|
362
|
+
|
363
|
+
# alias for mdp to mdconfig (since some users may expect mdconfig)
|
364
|
+
mdconfig = mdp
|
365
|
+
|
366
|
+
@property
|
367
|
+
def dt(self) -> float:
|
368
|
+
"""Integration timestep in ps."""
|
369
|
+
return self._mdp["dt"]
|
370
|
+
|
371
|
+
@property
|
372
|
+
def time_done(self) -> float:
|
373
|
+
"""
|
374
|
+
Integration time since last call to prepare in ps.
|
375
|
+
|
376
|
+
Takes into account 'tinit' from the .mdp file if set.
|
377
|
+
"""
|
378
|
+
try:
|
379
|
+
tinit = self._mdp["tinit"]
|
380
|
+
except KeyError:
|
381
|
+
tinit = 0.
|
382
|
+
return self._time_done - tinit
|
383
|
+
|
384
|
+
# TODO/FIXME: we assume that all output frequencies are multiples of the
|
385
|
+
# smallest when determing the number of frames etc
|
386
|
+
# TODO: check that nstxout == nstvout?!
|
387
|
+
@property
|
388
|
+
def nstout(self) -> int:
|
389
|
+
"""Smallest output frequency for current output_traj_type."""
|
390
|
+
if self._nstout is None:
|
391
|
+
nstout = nstout_from_mdp(self._mdp,
|
392
|
+
traj_type=self.output_traj_type)
|
393
|
+
self._nstout = nstout
|
394
|
+
return self._nstout
|
395
|
+
|
396
|
+
@property
|
397
|
+
def steps_done(self) -> int:
|
398
|
+
"""
|
399
|
+
Number of integration steps done since last call to :meth:`prepare`.
|
400
|
+
|
401
|
+
NOTE: steps != frames * nstout
|
402
|
+
Some remarks on the relation between frames_done and steps_done:
|
403
|
+
Usually (when passing `nsteps` to `run()`) frames_done will be equal to
|
404
|
+
steps_done/nstout + 1 because the initial/final configuration will be
|
405
|
+
written twice (since then the first/last step is always an output step)
|
406
|
+
However as soon as we run for a specific walltime (without specifying
|
407
|
+
`nsteps`) stuff gets complicated, then gromacs can potentially stop at
|
408
|
+
every neighbor search step (where it also can/will write a checkpoint).
|
409
|
+
If that step is not a trajectory output step, no output will be written
|
410
|
+
to the traj and then the plus 1 rule for the double written
|
411
|
+
initial/final configuration is off (since it will then be a 'normal'
|
412
|
+
configuration written just once).
|
413
|
+
If however the neighbor search and trajectory output fall togehter on
|
414
|
+
the same step the configuration will be written twice (as with `nsteps`
|
415
|
+
specified).
|
416
|
+
"""
|
417
|
+
return self._steps_done
|
418
|
+
|
419
|
+
@property
|
420
|
+
def frames_done(self) -> int:
|
421
|
+
"""
|
422
|
+
Number of frames produced since last call to :meth:`prepare`.
|
423
|
+
|
424
|
+
NOTE: frames != steps / nstout
|
425
|
+
See the steps_done docstring for more.
|
426
|
+
"""
|
427
|
+
return self._frames_done
|
428
|
+
|
429
|
+
async def apply_constraints(self, conf_in, conf_out_name, wdir="."):
|
430
|
+
"""
|
431
|
+
Apply constraints to given configuration.
|
432
|
+
|
433
|
+
Parameters
|
434
|
+
----------
|
435
|
+
conf_in : asyncmd.Trajectory
|
436
|
+
A (one-frame) trajectory, only the first frame will be used.
|
437
|
+
conf_out_name : str
|
438
|
+
Output path for the constrained configuration.
|
439
|
+
wdir : str, optional
|
440
|
+
Working directory for the constraint engine, by default ".",
|
441
|
+
a subfolder with random name will be created.
|
442
|
+
|
443
|
+
Returns
|
444
|
+
-------
|
445
|
+
Trajectory
|
446
|
+
The constrained configuration.
|
447
|
+
"""
|
448
|
+
return await self._0step_md(conf_in=conf_in,
|
449
|
+
conf_out_name=conf_out_name,
|
450
|
+
wdir=wdir,
|
451
|
+
constraints=True,
|
452
|
+
generate_velocities=False,
|
453
|
+
)
|
454
|
+
|
455
|
+
async def generate_velocities(self, conf_in, conf_out_name, wdir=".",
|
456
|
+
constraints=True):
|
457
|
+
"""
|
458
|
+
Generate random Maxwell-Boltzmann velocities for given configuration.
|
459
|
+
|
460
|
+
Parameters
|
461
|
+
----------
|
462
|
+
conf_in : asyncmd.Trajectory
|
463
|
+
A (one-frame) trajectory, only the first frame will be used.
|
464
|
+
conf_out_name : str
|
465
|
+
Output path for the velocity randomized configuration.
|
466
|
+
wdir : str, optional
|
467
|
+
Working directory for the constraint engine, by default ".",
|
468
|
+
a subfolder with random name will be created.
|
469
|
+
constraints : bool, optional
|
470
|
+
Whether to also apply constraints, by default True.
|
471
|
+
|
472
|
+
Returns
|
473
|
+
-------
|
474
|
+
Trajectory
|
475
|
+
The configuration with random velocities and optionally constraints
|
476
|
+
enforced.
|
477
|
+
"""
|
478
|
+
return await self._0step_md(conf_in=conf_in,
|
479
|
+
conf_out_name=conf_out_name,
|
480
|
+
wdir=wdir,
|
481
|
+
constraints=constraints,
|
482
|
+
generate_velocities=True,
|
483
|
+
)
|
484
|
+
|
485
|
+
async def _0step_md(self, conf_in, conf_out_name, wdir,
|
486
|
+
constraints: bool, generate_velocities: bool):
|
487
|
+
if (self.workdir is not None) and (wdir == "."):
|
488
|
+
# use own working directory if know/set
|
489
|
+
wdir = self.workdir
|
490
|
+
if not os.path.isabs(conf_out_name):
|
491
|
+
# assume conf_out is to be meant relative to wdir if not an abspath
|
492
|
+
conf_out_name = os.path.join(wdir, conf_out_name)
|
493
|
+
# work in a subdirectory of wdir to make deleting easy
|
494
|
+
# generate its name at random to make sure we can use multiple
|
495
|
+
# engines with 0stepMDruns in the same wdir
|
496
|
+
run_name = "".join(random.choices((string.ascii_letters
|
497
|
+
+ string.ascii_lowercase
|
498
|
+
+ string.ascii_uppercase),
|
499
|
+
k=6,
|
500
|
+
)
|
501
|
+
)
|
502
|
+
swdir = os.path.join(wdir, run_name)
|
503
|
+
await aiofiles.os.mkdir(swdir)
|
504
|
+
constraints_mdp = copy.deepcopy(self._mdp)
|
505
|
+
constraints_mdp["continuation"] = "no" if constraints else "yes"
|
506
|
+
constraints_mdp["gen-vel"] = "yes" if generate_velocities else "no"
|
507
|
+
# make sure we write a trr and a xtc to read the final configuration
|
508
|
+
# (this way we dont have to check what ending conf_out_name has)
|
509
|
+
constraints_mdp["nstxout"] = 1
|
510
|
+
constraints_mdp["nstvout"] = 1
|
511
|
+
constraints_mdp["nstfout"] = 1
|
512
|
+
constraints_mdp["nstxout-compressed"] = 1
|
513
|
+
if generate_velocities:
|
514
|
+
# make sure we have draw a new/different random number for gen-vel
|
515
|
+
constraints_mdp["gen-seed"] = -1
|
516
|
+
constraints_mdp["nsteps"] = 0
|
517
|
+
await self._run_grompp(workdir=swdir, deffnm=run_name,
|
518
|
+
trr_in=conf_in.trajectory_files[0],
|
519
|
+
tpr_out=os.path.join(swdir, f"{run_name}.tpr"),
|
520
|
+
mdp_obj=constraints_mdp)
|
521
|
+
# TODO: this is a bit hacky, and should probably not be necessary?
|
522
|
+
# we keep a ref to the 'old' self._proc to reset it after we are
|
523
|
+
# done, because the gmx_mdrun method set self._proc to the running
|
524
|
+
# constraints engine
|
525
|
+
# and it is probably not necessary since no engine should be able
|
526
|
+
# to be runing when/if we are able to call apply_constraints?
|
527
|
+
old_proc_val = self._proc
|
528
|
+
cmd_str = self._mdrun_cmd(tpr=os.path.join(swdir, f"{run_name}.tpr"),
|
529
|
+
workdir=swdir,
|
530
|
+
deffnm=run_name)
|
531
|
+
logger.debug("About to execute gmx mdrun command for constraints and"
|
532
|
+
"/or velocity generation: %s",
|
533
|
+
cmd_str)
|
534
|
+
returncode = None
|
535
|
+
stderr = bytes()
|
536
|
+
stdout = bytes()
|
537
|
+
await self._acquire_resources_gmx_mdrun()
|
538
|
+
try:
|
539
|
+
await self._start_gmx_mdrun(cmd_str=cmd_str, workdir=swdir,
|
540
|
+
run_name=run_name,
|
541
|
+
# TODO/FIXME: we hardcode that the runs
|
542
|
+
# can not be longer than 15 min here
|
543
|
+
# (but i think this should be fine for
|
544
|
+
# randomizing velocities and/or
|
545
|
+
# applying constraints?!)
|
546
|
+
walltime=0.25,
|
547
|
+
)
|
548
|
+
# self._proc is set by _start_gmx_mdrun!
|
549
|
+
stdout, stderr = await self._proc.communicate()
|
550
|
+
returncode = self._proc.returncode
|
551
|
+
except asyncio.CancelledError:
|
552
|
+
self._proc.kill()
|
553
|
+
raise # reraise the error for encompassing coroutines
|
554
|
+
else:
|
555
|
+
if returncode != 0:
|
556
|
+
raise EngineCrashedError(
|
557
|
+
f"Non-zero (or no) exit code from mdrun (= {returncode}).\n"
|
558
|
+
+ "\n--------\n"
|
559
|
+
+ f"stderr: \n--------\n {stderr.decode()}"
|
560
|
+
+ "\n--------\n"
|
561
|
+
+ f"stdout: \n--------\n {stdout.decode()}"
|
562
|
+
)
|
563
|
+
# just get the output trajectory, it is only one configuration
|
564
|
+
shutil.move(os.path.join(swdir, (f"{run_name}{self._num_suffix(1)}"
|
565
|
+
+ f".{conf_out_name.split('.')[-1]}")
|
566
|
+
),
|
567
|
+
conf_out_name)
|
568
|
+
shutil.rmtree(swdir) # remove the whole directory we used as wdir
|
569
|
+
return Trajectory(
|
570
|
+
trajectory_files=conf_out_name,
|
571
|
+
# structure file of the conf_in because we
|
572
|
+
# delete the other one with the folder
|
573
|
+
structure_file=conf_in.structure_file,
|
574
|
+
nstout=1,
|
575
|
+
)
|
576
|
+
finally:
|
577
|
+
await self._cleanup_gmx_mdrun(workdir=swdir, run_name=run_name)
|
578
|
+
self._proc = old_proc_val
|
579
|
+
|
580
|
+
async def prepare(self, starting_configuration, workdir, deffnm):
|
581
|
+
"""
|
582
|
+
Prepare a fresh simulation (starting with part0001).
|
583
|
+
|
584
|
+
Can also be used to continue a simulation from a checkpoint file with
|
585
|
+
matching name ('deffnm.cpt'). In that case, the `simulation-part` mdp
|
586
|
+
option must match the number of the next part to be generated, e.g. it
|
587
|
+
must be 2 if the last part generated was part0001. The previously
|
588
|
+
generated trajectory files do not need to exist.
|
589
|
+
|
590
|
+
Parameters
|
591
|
+
----------
|
592
|
+
starting_configuration : asyncmd.Trajectory or None
|
593
|
+
A (trr) trajectory of which we take the first frame as starting
|
594
|
+
configuration (including velocities) or None, then the initial
|
595
|
+
configuration is the gro-file.
|
596
|
+
workdir : str
|
597
|
+
Absolute or relative path to an existing directory to use as
|
598
|
+
working directory.
|
599
|
+
deffnm : str
|
600
|
+
The name (prefix) to use for all files.
|
601
|
+
"""
|
602
|
+
# deffnm is the default name/prefix for all outfiles (as in gmx)
|
603
|
+
self._deffnm = deffnm
|
604
|
+
self.workdir = workdir # sets to abspath and check if it is a dir
|
605
|
+
# check 'simulation-part' option in mdp file / MDP options
|
606
|
+
# it decides at which .partXXXX the gmx numbering starts,
|
607
|
+
# however gromacs ignores it if there is no -cpi [CheckPointIn]
|
608
|
+
# so we do the same, i.e. we warn if we detect it is set
|
609
|
+
# and check if there is a checkpoint with the right name [deffnm.cpt]
|
610
|
+
# if yes we set our internal simulation_part counter to the value from
|
611
|
+
# the mdp - 1 (we increase *before* each simulation part)
|
612
|
+
cpt_fname = os.path.join(self.workdir, f"{deffnm}.cpt")
|
613
|
+
try:
|
614
|
+
sim_part = self._mdp["simulation-part"]
|
615
|
+
except KeyError:
|
616
|
+
# the gmx mdp default is 1, it starts at part0001
|
617
|
+
# we add one at the start of each run, i.e. the numberings match up
|
618
|
+
# and we will have tra=`...part0001.trr` from gmx
|
619
|
+
# and confout=`...part0001.gro` from our naming
|
620
|
+
self._simulation_part = 0
|
621
|
+
else:
|
622
|
+
if sim_part > 1:
|
623
|
+
if not os.path.isfile(cpt_fname):
|
624
|
+
raise ValueError("'simulation-part' > 1 is only possible "
|
625
|
+
+ "if starting from a checkpoint, but "
|
626
|
+
+ f"{cpt_fname} does not exist."
|
627
|
+
)
|
628
|
+
starting_configuration = cpt_fname
|
629
|
+
logger.warning("Starting value for 'simulation-part' > 1 (=%s)"
|
630
|
+
" and existing checkpoint file found (%s). "
|
631
|
+
"Using the checkpoint file as "
|
632
|
+
"`starting_configuration`.",
|
633
|
+
sim_part, cpt_fname)
|
634
|
+
# always substract one from sim_part so we get 0 if it was 1
|
635
|
+
self._simulation_part = sim_part - 1
|
636
|
+
# check for previous runs with the same deffnm in workdir
|
637
|
+
# NOTE: we only check for checkpoint files and trajectory parts as gmx
|
638
|
+
# will move everything and only the checkpoint and trajs let us
|
639
|
+
# trip and get the part numbering wrong
|
640
|
+
trajs_with_same_deffnm = await get_all_traj_parts(
|
641
|
+
folder=self.workdir,
|
642
|
+
deffnm=deffnm,
|
643
|
+
traj_type=self.output_traj_type,
|
644
|
+
)
|
645
|
+
# NOTE: it is enough to check if we have more trajectories than the
|
646
|
+
# starting simulation_part, because we assume that if we find a
|
647
|
+
# checkpoint file (above) and simulation_part > 0 that the
|
648
|
+
# checkpoint file matches the correct part-number
|
649
|
+
if len(trajs_with_same_deffnm) > self._simulation_part:
|
650
|
+
raise ValueError(f"There are files in workdir ({self.workdir}) "
|
651
|
+
+ f"with the same deffnm ({deffnm}). Use the "
|
652
|
+
+ "`prepare_from_files()` method to continue an "
|
653
|
+
+ "existing MD run or change the workdir and/or "
|
654
|
+
+ "deffnm.")
|
655
|
+
# actucal preparation of MDrun: sort out starting configuration...
|
656
|
+
if ((starting_configuration is None)
|
657
|
+
# None enables start from the initial structure file ('-c' option)
|
658
|
+
or isinstance(starting_configuration, str)
|
659
|
+
# str enables passing the path to the full precision trajectory
|
660
|
+
# directly, i.e. trr, cpt, or tng
|
661
|
+
):
|
662
|
+
trr_in = starting_configuration
|
663
|
+
elif isinstance(starting_configuration, Trajectory):
|
664
|
+
# enable passing of asyncmd.Trajectories as starting_configuration
|
665
|
+
trr_in = starting_configuration.trajectory_files[0]
|
666
|
+
else:
|
667
|
+
raise TypeError("Starting_configuration must be None, a wrapped "
|
668
|
+
"full precission trajectrtory, or the path to a "
|
669
|
+
"full precission trajectory (trr, cpt, or tng).")
|
670
|
+
# ...and call grompp to get a tpr
|
671
|
+
# remember the path to use as structure file for out trajs
|
672
|
+
self._tpr = os.path.join(self.workdir, deffnm + ".tpr")
|
673
|
+
await self._run_grompp(workdir=self.workdir, deffnm=self._deffnm,
|
674
|
+
trr_in=trr_in, tpr_out=self._tpr,
|
675
|
+
mdp_obj=self._mdp)
|
676
|
+
if not await aiofiles.ospath.isfile(self._tpr):
|
677
|
+
# better be save than sorry :)
|
678
|
+
raise RuntimeError("Something went wrong generating the tpr. "
|
679
|
+
f"{self._tpr} does not seem to be a file.")
|
680
|
+
# make sure we can not mistake a previous Popen for current mdrun
|
681
|
+
self._proc = None
|
682
|
+
self._frames_done = 0 # (re-)set how many frames we did
|
683
|
+
self._steps_done = 0
|
684
|
+
self._time_done = 0.
|
685
|
+
self._prepared = True
|
686
|
+
|
687
|
+
async def _run_grompp(self, workdir, deffnm, trr_in, tpr_out, mdp_obj):
|
688
|
+
# NOTE: file paths from workdir and deffnm
|
689
|
+
mdp_in = os.path.join(workdir, deffnm + ".mdp")
|
690
|
+
# write the mdp file (always overwriting existing mdps)
|
691
|
+
# I (hejung) think this is what we want as the prepare methods check
|
692
|
+
# for leftover files with the same deffnm, so if only the mdp is there
|
693
|
+
# we can (and want to) just ovewrite it without raising an err
|
694
|
+
async with _SEMAPHORES["MAX_FILES_OPEN"]:
|
695
|
+
mdp_obj.write(mdp_in, overwrite=True)
|
696
|
+
mdp_out = os.path.join(workdir, deffnm + "_mdout.mdp")
|
697
|
+
cmd_str = self._grompp_cmd(mdp_in=mdp_in, tpr_out=tpr_out,
|
698
|
+
workdir=workdir,
|
699
|
+
trr_in=trr_in, mdp_out=mdp_out)
|
700
|
+
logger.debug("About to execute gmx grompp command: %s", cmd_str)
|
701
|
+
# 3 file descriptors: stdin, stdout, stderr
|
702
|
+
# NOTE: The max open files semaphores counts for 3 open files, so we
|
703
|
+
# only need it once
|
704
|
+
await _SEMAPHORES["MAX_FILES_OPEN"].acquire()
|
705
|
+
try:
|
706
|
+
grompp_proc = await asyncio.create_subprocess_exec(
|
707
|
+
*shlex.split(cmd_str),
|
708
|
+
stdout=asyncio.subprocess.PIPE,
|
709
|
+
stderr=asyncio.subprocess.PIPE,
|
710
|
+
cwd=workdir,
|
711
|
+
)
|
712
|
+
stdout, stderr = await grompp_proc.communicate()
|
713
|
+
return_code = grompp_proc.returncode
|
714
|
+
logger.debug("gmx grompp command returned return code %s.",
|
715
|
+
return_code)
|
716
|
+
#logger.debug("grompp stdout:\n%s", stdout.decode())
|
717
|
+
#logger.debug("grompp stderr:\n%s", stderr.decode())
|
718
|
+
if return_code != 0:
|
719
|
+
# this assumes POSIX
|
720
|
+
raise RuntimeError("grompp had non-zero return code "
|
721
|
+
+ f"({return_code}).\n"
|
722
|
+
+ "\n--------\n"
|
723
|
+
+ f"stderr: \n--------\n {stderr.decode()}"
|
724
|
+
+ "\n--------\n"
|
725
|
+
+ f"stdout: \n--------\n {stdout.decode()}"
|
726
|
+
)
|
727
|
+
except asyncio.CancelledError as e:
|
728
|
+
grompp_proc.kill() # kill grompp
|
729
|
+
raise e from None # and reraise the cancelation
|
730
|
+
finally:
|
731
|
+
# release the semaphore
|
732
|
+
_SEMAPHORES["MAX_FILES_OPEN"].release()
|
733
|
+
|
734
|
+
async def prepare_from_files(self, workdir: str, deffnm: str):
|
735
|
+
"""
|
736
|
+
Prepare continuation run starting from the last part found in workdir.
|
737
|
+
|
738
|
+
Expects the checkpoint file and last trajectory part to exist, will
|
739
|
+
(probably) fail otherwise.
|
740
|
+
|
741
|
+
Parameters
|
742
|
+
----------
|
743
|
+
workdir : str
|
744
|
+
Absolute or relative path to an exisiting directory to use as
|
745
|
+
working directory.
|
746
|
+
deffnm : str
|
747
|
+
The name (prefix) to use for all files.
|
748
|
+
"""
|
749
|
+
self.workdir = workdir
|
750
|
+
previous_trajs = await get_all_traj_parts(self.workdir, deffnm=deffnm,
|
751
|
+
traj_type=self.output_traj_type,
|
752
|
+
)
|
753
|
+
last_trajname = os.path.split(previous_trajs[-1].trajectory_files[0])[-1]
|
754
|
+
last_partnum = int(last_trajname[len(deffnm) + 5:len(deffnm) + 9])
|
755
|
+
if last_partnum != len(previous_trajs):
|
756
|
+
logger.warning("Not all previous trajectory parts seem to be "
|
757
|
+
+ "present in the current workdir. Assuming the "
|
758
|
+
+ "highest part number corresponds to the "
|
759
|
+
+ "checkpoint file and continuing anyway."
|
760
|
+
)
|
761
|
+
# load the 'old' mdp_in
|
762
|
+
async with _SEMAPHORES["MAX_FILES_OPEN"]:
|
763
|
+
self._mdp = MDP(os.path.join(self.workdir, f"{deffnm}.mdp"))
|
764
|
+
self._deffnm = deffnm
|
765
|
+
# Note that we dont need to explicitly check for the tpr existing,
|
766
|
+
# if it does not exist we will err when getting the traj lengths
|
767
|
+
self._tpr = os.path.join(self.workdir, deffnm + ".tpr")
|
768
|
+
self._simulation_part = last_partnum
|
769
|
+
# len(t), because for frames we do not care if first frame is in traj
|
770
|
+
self._frames_done = sum(len(t) for t in previous_trajs)
|
771
|
+
# steps done is the more reliable info if we want to know how many
|
772
|
+
# integration steps we did
|
773
|
+
self._steps_done = previous_trajs[-1].last_step
|
774
|
+
self._time_done = previous_trajs[-1].last_time
|
775
|
+
self._proc = None
|
776
|
+
self._prepared = True
|
777
|
+
|
778
|
+
# NOTE: this enables us to reuse run and prepare methods in SlurmGmxEngine,
|
779
|
+
# i.e. we only need to overwite the next 3 functions to write out the slurm
|
780
|
+
# submission script, submit the job and allocate/release different resources
|
781
|
+
async def _start_gmx_mdrun(self, cmd_str, workdir, **kwargs):
|
782
|
+
proc = await asyncio.create_subprocess_exec(
|
783
|
+
*shlex.split(cmd_str),
|
784
|
+
stdout=asyncio.subprocess.PIPE,
|
785
|
+
stderr=asyncio.subprocess.PIPE,
|
786
|
+
cwd=workdir,
|
787
|
+
)
|
788
|
+
self._proc = proc
|
789
|
+
|
790
|
+
async def _acquire_resources_gmx_mdrun(self, **kwargs):
|
791
|
+
# *always* called before any gmx_mdrun, used to reserve resources
|
792
|
+
# for local gmx we need 3 file descriptors: stdin, stdout, stderr
|
793
|
+
# (one max files semaphore counts for 3 open files)
|
794
|
+
await _SEMAPHORES["MAX_FILES_OPEN"].acquire()
|
795
|
+
|
796
|
+
async def _cleanup_gmx_mdrun(self, **kwargs):
|
797
|
+
# *always* called after any gmx_mdrun, use to release resources
|
798
|
+
# release the semaphore for the 3 file descriptors
|
799
|
+
_SEMAPHORES["MAX_FILES_OPEN"].release()
|
800
|
+
|
801
|
+
async def run(self, nsteps=None, walltime=None, steps_per_part=False):
|
802
|
+
"""
|
803
|
+
Run simulation for specified number of steps or/and a given walltime.
|
804
|
+
|
805
|
+
Note that you can pass both nsteps and walltime and the simulation will
|
806
|
+
stop on the condition that is reached first.
|
807
|
+
|
808
|
+
Parameters
|
809
|
+
----------
|
810
|
+
nsteps : int or None
|
811
|
+
Integration steps to run for either in total [as measured since the
|
812
|
+
last call to `self.prepare()`] or in the newly generated trajectory
|
813
|
+
part, see also the steps_per_part argument.
|
814
|
+
walltime : float or None
|
815
|
+
(Maximum) walltime in hours, `None` means unlimited.
|
816
|
+
steps_per_part : bool
|
817
|
+
If True nsteps are the steps to do in the new trajectory part, else
|
818
|
+
the total number of steps since the last call to `prepare()` are
|
819
|
+
counted, default False.
|
820
|
+
"""
|
821
|
+
# generic run method is actually easier to implement for gmx :D
|
822
|
+
if not self.ready_for_run:
|
823
|
+
raise RuntimeError("Engine not ready for run. Call self.prepare() "
|
824
|
+
+ "and/or check if it is still running.")
|
825
|
+
if all(kwarg is None for kwarg in [nsteps, walltime]):
|
826
|
+
raise ValueError("Neither steps nor walltime given.")
|
827
|
+
if nsteps is not None:
|
828
|
+
nsteps = int(nsteps)
|
829
|
+
if nsteps % self.nstout != 0:
|
830
|
+
raise ValueError(f"nsteps ({nsteps}) must be a multiple of "
|
831
|
+
+ f"nstout ({self.nstout}).")
|
832
|
+
if not steps_per_part:
|
833
|
+
nsteps = nsteps - self.steps_done
|
834
|
+
if nsteps == 0:
|
835
|
+
# Return None instead of raising an error, this makes it nicer
|
836
|
+
# to use the run method with walltime and total nsteps inside
|
837
|
+
# while loops, i.e. we can just call traj = e.run(...) and then
|
838
|
+
# while traj is not None: traj = e.run()
|
839
|
+
# TODO: this will make it complicated to ever use the GmxEngine
|
840
|
+
# for zero-step simulations to only apply constraints,
|
841
|
+
# but we do have the _0_step_md methods for that...?!
|
842
|
+
return None
|
843
|
+
elif nsteps < 0:
|
844
|
+
raise ValueError(f"nsteps is too small ({nsteps} steps for "
|
845
|
+
+ "this part). Can not travel backwards in "
|
846
|
+
+ "time...")
|
847
|
+
|
848
|
+
self._simulation_part += 1
|
849
|
+
cmd_str = self._mdrun_cmd(tpr=self._tpr, workdir=self.workdir,
|
850
|
+
deffnm=self._deffnm,
|
851
|
+
# TODO: use more/any other kwargs?
|
852
|
+
maxh=walltime, nsteps=nsteps)
|
853
|
+
logger.debug("About to execute gmx mdrun command: %s", cmd_str)
|
854
|
+
returncode = None
|
855
|
+
stderr = bytes()
|
856
|
+
stdout = bytes()
|
857
|
+
await self._acquire_resources_gmx_mdrun()
|
858
|
+
try:
|
859
|
+
await self._start_gmx_mdrun(cmd_str=cmd_str, workdir=self.workdir,
|
860
|
+
walltime=walltime,)
|
861
|
+
# self._proc is set by _start_gmx_mdrun!
|
862
|
+
stdout, stderr = await self._proc.communicate()
|
863
|
+
returncode = self._proc.returncode
|
864
|
+
except asyncio.CancelledError as e:
|
865
|
+
if self._proc is not None:
|
866
|
+
# make sure _proc is set, it can still be None if we get
|
867
|
+
# canceled while _start_gmx_mdrun is setting up the process
|
868
|
+
self._proc.kill()
|
869
|
+
raise e from None # reraise the error for encompassing coroutines
|
870
|
+
else:
|
871
|
+
logger.debug("gmx mdrun command returned return code %s.",
|
872
|
+
returncode)
|
873
|
+
#logger.debug("gmx mdrun stdout:\n%s", stdout.decode())
|
874
|
+
#logger.debug("gmx mdrun stderr:\n%s", stderr.decode())
|
875
|
+
if returncode == 0:
|
876
|
+
self._frames_done += len(self.current_trajectory)
|
877
|
+
# dont care if we did a little more and only the checkpoint knows
|
878
|
+
# we will only find out with the next trajectory part anyways
|
879
|
+
self._steps_done = self.current_trajectory.last_step
|
880
|
+
self._time_done = self.current_trajectory.last_time
|
881
|
+
return self.current_trajectory
|
882
|
+
else:
|
883
|
+
raise EngineCrashedError(
|
884
|
+
f"Non-zero (or no) exit code from mdrun (= {returncode}).\n"
|
885
|
+
+ "\n--------\n"
|
886
|
+
+ f"stderr: \n--------\n {stderr.decode()}"
|
887
|
+
+ "\n--------\n"
|
888
|
+
+ f"stdout: \n--------\n {stdout.decode()}"
|
889
|
+
)
|
890
|
+
finally:
|
891
|
+
await self._cleanup_gmx_mdrun(workdir=self.workdir)
|
892
|
+
|
893
|
+
async def run_steps(self, nsteps, steps_per_part=False):
|
894
|
+
"""
|
895
|
+
Run simulation for specified number of steps.
|
896
|
+
|
897
|
+
Parameters
|
898
|
+
----------
|
899
|
+
nsteps : int or None
|
900
|
+
Integration steps to run for either in total [as measured since the
|
901
|
+
last call to `self.prepare()`] or in the newly generated trajectory
|
902
|
+
part, see also the steps_per_part argument.
|
903
|
+
steps_per_part : bool
|
904
|
+
If True nsteps are the steps to do in the new trajectory part, else
|
905
|
+
the total number of steps since the last call to `prepare()` are
|
906
|
+
counted, default False.
|
907
|
+
"""
|
908
|
+
return await self.run(nsteps=nsteps, steps_per_part=steps_per_part)
|
909
|
+
|
910
|
+
async def run_walltime(self, walltime):
|
911
|
+
"""
|
912
|
+
Run simulation for a given walltime.
|
913
|
+
|
914
|
+
Parameters
|
915
|
+
----------
|
916
|
+
walltime : float or None
|
917
|
+
(Maximum) walltime in hours, `None` means unlimited.
|
918
|
+
"""
|
919
|
+
return await self.run(walltime=walltime)
|
920
|
+
|
921
|
+
def _num_suffix(self, sim_part: int) -> str:
|
922
|
+
# construct gromacs num part suffix from simulation_part
|
923
|
+
num_suffix = f".part{sim_part:04d}"
|
924
|
+
return num_suffix
|
925
|
+
|
926
|
+
def _grompp_cmd(self, mdp_in, tpr_out, workdir, trr_in=None, mdp_out=None):
|
927
|
+
# all args are expected to be file paths
|
928
|
+
# make sure we use the right ones, i.e. relative to workdir
|
929
|
+
if workdir is not None:
|
930
|
+
mdp_in = os.path.relpath(mdp_in, start=workdir)
|
931
|
+
tpr_out = os.path.relpath(tpr_out, start=workdir)
|
932
|
+
gro_file = os.path.relpath(self.gro_file, start=workdir)
|
933
|
+
top_file = os.path.relpath(self.top_file, start=workdir)
|
934
|
+
cmd = f"{self.grompp_executable} -f {mdp_in} -c {gro_file}"
|
935
|
+
cmd += f" -p {top_file}"
|
936
|
+
if self.ndx_file is not None:
|
937
|
+
if workdir is not None:
|
938
|
+
ndx_file = os.path.relpath(self.ndx_file, start=workdir)
|
939
|
+
else:
|
940
|
+
ndx_file = self.ndx_file
|
941
|
+
cmd += f" -n {ndx_file}"
|
942
|
+
if trr_in is not None:
|
943
|
+
# input trr is optional
|
944
|
+
# TODO/FIXME?!
|
945
|
+
# TODO/NOTE: currently we do not pass '-time', i.e. we just use the
|
946
|
+
# gmx default frame selection: last frame from trr
|
947
|
+
if workdir is not None:
|
948
|
+
trr_in = os.path.relpath(trr_in, start=workdir)
|
949
|
+
cmd += f" -t {trr_in}"
|
950
|
+
if mdp_out is None:
|
951
|
+
# find out the name and dir of the tpr to put the mdp next to it
|
952
|
+
head, tail = os.path.split(tpr_out)
|
953
|
+
name = tail.split(".")[0]
|
954
|
+
mdp_out = os.path.join(head, name + ".mdout.mdp")
|
955
|
+
if workdir is not None:
|
956
|
+
mdp_out = os.path.relpath(mdp_out, start=workdir)
|
957
|
+
cmd += f" -o {tpr_out} -po {mdp_out}"
|
958
|
+
if self.grompp_extra_args != "":
|
959
|
+
# add extra args string if it is not empty
|
960
|
+
cmd += f" {self.grompp_extra_args}"
|
961
|
+
return cmd
|
962
|
+
|
963
|
+
def _mdrun_cmd(self, tpr, workdir, deffnm=None, maxh=None, nsteps=None):
|
964
|
+
# use "-noappend" to avoid appending to the trajectories when starting
|
965
|
+
# from checkpoints, instead let gmx create new files with .partXXXX suffix
|
966
|
+
if workdir is not None:
|
967
|
+
tpr = os.path.relpath(tpr, start=workdir)
|
968
|
+
if deffnm is None:
|
969
|
+
# find out the name of the tpr and use that as deffnm
|
970
|
+
head, tail = os.path.split(tpr)
|
971
|
+
deffnm = tail.split(".")[0]
|
972
|
+
#cmd = f"{self.mdrun_executable} -noappend -deffnm {deffnm} -cpi"
|
973
|
+
# NOTE: the line above does the same as the four below before the if-clauses
|
974
|
+
# however gromacs -deffnm is deprecated (and buggy),
|
975
|
+
# so we just make our own 'deffnm', i.e. we name all files the same
|
976
|
+
# except for the ending but do so explicitly
|
977
|
+
cmd = f"{self.mdrun_executable} -noappend -s {tpr}"
|
978
|
+
# always add the -cpi option, this lets gmx figure out if it wants
|
979
|
+
# to start from a checkpoint (if there is one with deffnm)
|
980
|
+
# cpi (CheckPointIn) is ignored if not present,
|
981
|
+
# cpo (CheckPointOut) is the name to use for the (final) checkpoint
|
982
|
+
cmd += f" -cpi {deffnm}.cpt -cpo {deffnm}.cpt"
|
983
|
+
cmd += f" -o {deffnm}.trr -x {deffnm}.xtc -c {deffnm}.confout.gro"
|
984
|
+
cmd += f" -e {deffnm}.edr -g {deffnm}.log"
|
985
|
+
if maxh is not None:
|
986
|
+
maxh = self._mdrun_time_conversion_factor * maxh
|
987
|
+
cmd += f" -maxh {maxh}"
|
988
|
+
if nsteps is not None:
|
989
|
+
cmd += f" -nsteps {nsteps}"
|
990
|
+
if self.mdrun_extra_args != "":
|
991
|
+
cmd += f" {self.mdrun_extra_args}"
|
992
|
+
return cmd
|
993
|
+
|
994
|
+
|
995
|
+
class SlurmGmxEngine(GmxEngine):
|
996
|
+
__doc__ = GmxEngine.__doc__
|
997
|
+
# use local prepare (i.e. grompp) of GmxEngine then submit run to slurm
|
998
|
+
# we reuse the `GmxEngine._proc` to keep a reference to a `SlurmProcess`
|
999
|
+
# which emulates the API of `asyncio.subprocess.Process` and can (for our
|
1000
|
+
# purposes) be used as a drop-in replacement, therefore we only need to
|
1001
|
+
# reimplement `_start_gmx_mdrun()`, `_acquire_resources_gmx_mdrun()` and
|
1002
|
+
# `_cleanup_gmx_mdrun()` to have a working SlurmGmxEngine
|
1003
|
+
# take submit script as str/file, use pythons .format to insert stuff!
|
1004
|
+
# TODO: use SLURM also for grompp?! (would make stuff faster?)
|
1005
|
+
# I (hejung) think probably not by much because we already use
|
1006
|
+
# asyncios subprocess for grompp (i.e. do it asyncronous) and grompp
|
1007
|
+
# will most likely not take much resources on the login (local) node
|
1008
|
+
|
1009
|
+
# NOTE: these are possible options, but they result in added dependencies
|
1010
|
+
# - jinja2 templates for slurm submission scripts?
|
1011
|
+
# (does not look like we gain flexibility but we get more work,
|
1012
|
+
# so probably not?!)
|
1013
|
+
# - pyslurm for job status checks?!
|
1014
|
+
# (it seems submission is frickly/impossible in pyslurm,
|
1015
|
+
# so also probably not?!)
|
1016
|
+
|
1017
|
+
_mdrun_executable = "gmx_mpi mdrun" # MPI as default for clusters
|
1018
|
+
_mdrun_time_conversion_factor = 0.99 # run mdrun for 0.99 * time-limit
|
1019
|
+
# NOTE: The rationale behind the (slightly) reduced mdrun time compared to
|
1020
|
+
# the slurm job time-limit is that sometimes setting up and finishing
|
1021
|
+
# up the slurm job takes some time (e.g. activating modules, sourcing
|
1022
|
+
# environments, etc.) and this can result in jobs that are cancelled
|
1023
|
+
# due to reaching the maximum time limit in slurm. This in turn means
|
1024
|
+
# that we would believe the job failed because it got cancelled
|
1025
|
+
# although the mdrun was successfull.
|
1026
|
+
|
1027
|
+
def __init__(self, mdconfig, gro_file, top_file, sbatch_script, ndx_file=None,
|
1028
|
+
**kwargs):
|
1029
|
+
"""
|
1030
|
+
Initialize a :class:`SlurmGmxEngine`.
|
1031
|
+
|
1032
|
+
Parameters
|
1033
|
+
----------
|
1034
|
+
mdconfig : MDP
|
1035
|
+
The molecular dynamics parameters.
|
1036
|
+
gro_file: str
|
1037
|
+
Absolute or relative path to a gromacs structure file.
|
1038
|
+
top_file: str
|
1039
|
+
Absolute or relative path to a gromacs topolgy (.top) file.
|
1040
|
+
sbatch_script : str
|
1041
|
+
Absolute or relative path to a slurm sbatch script or a string with
|
1042
|
+
the content of the sbatch script. Note that the submission script
|
1043
|
+
must contain the following placeholders (see also the examples
|
1044
|
+
folder):
|
1045
|
+
|
1046
|
+
- {mdrun_cmd} : Replaced by the command to run mdrun
|
1047
|
+
|
1048
|
+
ndx_file: str or None
|
1049
|
+
Optional, absolute or relative path to a gromacs index file.
|
1050
|
+
|
1051
|
+
Note that all attributes can be set at intialization by passing keyword
|
1052
|
+
arguments with their name, e.g. mdrun_extra_args="-ntomp 2" to instruct
|
1053
|
+
gromacs to use 2 openMP threads.
|
1054
|
+
"""
|
1055
|
+
super().__init__(mdconfig=mdconfig, gro_file=gro_file,
|
1056
|
+
top_file=top_file, ndx_file=ndx_file, **kwargs)
|
1057
|
+
# we expect sbatch_script to be a str,
|
1058
|
+
# but it could be either the path to a submit script or the content of
|
1059
|
+
# the submission script directly
|
1060
|
+
# we decide what it is by checking for the shebang
|
1061
|
+
if not sbatch_script.startswith("#!"):
|
1062
|
+
# probably path to a file, lets try to read it
|
1063
|
+
with open(sbatch_script, 'r') as f:
|
1064
|
+
sbatch_script = f.read()
|
1065
|
+
self.sbatch_script = sbatch_script
|
1066
|
+
|
1067
|
+
def _name_from_name_or_none(self, run_name: typing.Optional[str]) -> str:
|
1068
|
+
if run_name is not None:
|
1069
|
+
name = run_name
|
1070
|
+
else:
|
1071
|
+
# create a name from deffnm and partnum
|
1072
|
+
name = self._deffnm + self._num_suffix(sim_part=self._simulation_part)
|
1073
|
+
return name
|
1074
|
+
|
1075
|
+
async def _start_gmx_mdrun(self, cmd_str, workdir, walltime=None,
|
1076
|
+
run_name=None, **kwargs):
|
1077
|
+
name = self._name_from_name_or_none(run_name=run_name)
|
1078
|
+
# substitute placeholders in submit script
|
1079
|
+
script = self.sbatch_script.format(mdrun_cmd=cmd_str)
|
1080
|
+
# write it out
|
1081
|
+
fname = os.path.join(workdir, name + ".slurm")
|
1082
|
+
if await aiofiles.ospath.exists(fname):
|
1083
|
+
# Note: we dont raise an error because we want to be able to rerun
|
1084
|
+
# a canceled/crashed run in the same directory without the
|
1085
|
+
# need to remove files
|
1086
|
+
logger.error("Overwriting existing submission file (%s).",
|
1087
|
+
fname)
|
1088
|
+
async with _SEMAPHORES["MAX_FILES_OPEN"]:
|
1089
|
+
async with aiofiles.open(fname, 'w') as f:
|
1090
|
+
await f.write(script)
|
1091
|
+
self._proc = await slurm.create_slurmprocess_submit(
|
1092
|
+
jobname=name,
|
1093
|
+
sbatch_script=fname,
|
1094
|
+
workdir=workdir,
|
1095
|
+
time=walltime,
|
1096
|
+
stdfiles_removal="success",
|
1097
|
+
stdin=None,
|
1098
|
+
)
|
1099
|
+
|
1100
|
+
async def _acquire_resources_gmx_mdrun(self, **kwargs):
|
1101
|
+
if _SEMAPHORES["SLURM_MAX_JOB"] is not None:
|
1102
|
+
logger.debug("SLURM_MAX_JOB semaphore is %s before acquiring.",
|
1103
|
+
_SEMAPHORES['SLURM_MAX_JOB'])
|
1104
|
+
await _SEMAPHORES["SLURM_MAX_JOB"].acquire()
|
1105
|
+
else:
|
1106
|
+
logger.debug("SLURM_MAX_JOB semaphore is None")
|
1107
|
+
|
1108
|
+
async def _cleanup_gmx_mdrun(self, workdir, run_name=None, **kwargs):
|
1109
|
+
if _SEMAPHORES["SLURM_MAX_JOB"] is not None:
|
1110
|
+
_SEMAPHORES["SLURM_MAX_JOB"].release()
|
1111
|
+
# remove the sbatch script
|
1112
|
+
name = self._name_from_name_or_none(run_name=run_name)
|
1113
|
+
fname = os.path.join(workdir, name + ".slurm")
|
1114
|
+
try:
|
1115
|
+
# Note: the 0step MD removes the whole folder in which it runs
|
1116
|
+
# (including the sbatch script)
|
1117
|
+
await aiofiles.os.remove(fname)
|
1118
|
+
except FileNotFoundError:
|
1119
|
+
pass
|
1120
|
+
|
1121
|
+
# TODO: do we even need/want this?
|
1122
|
+
@property
|
1123
|
+
def slurm_job_state(self) -> str | None:
|
1124
|
+
"""The state of the slurm job as reported by slurm."""
|
1125
|
+
if self._proc is None:
|
1126
|
+
return None
|
1127
|
+
return self._proc.slurm_job_state
|