mdkits 0.1.30__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mdkits might be problematic. Click here for more details.
- mdkits/cli/extract.py +27 -16
- mdkits/md_cli/hb_distribution.py +1 -1
- mdkits/md_cli/md_cli.py +2 -0
- mdkits/md_cli/rdf.py +17 -3
- mdkits/md_cli/vac.py +68 -0
- {mdkits-0.1.30.dist-info → mdkits-0.2.0.dist-info}/METADATA +1 -1
- {mdkits-0.1.30.dist-info → mdkits-0.2.0.dist-info}/RECORD +10 -14
- mdkits/cli/,hb_distribution_down.py +0 -114
- mdkits/cli/hartree_potential.py +0 -59
- mdkits/cli/hartree_potential_ave.py +0 -84
- mdkits/cli/hb.py +0 -101
- mdkits/cli/packmol_input.py +0 -76
- {mdkits-0.1.30.dist-info → mdkits-0.2.0.dist-info}/LICENSE +0 -0
- {mdkits-0.1.30.dist-info → mdkits-0.2.0.dist-info}/WHEEL +0 -0
- {mdkits-0.1.30.dist-info → mdkits-0.2.0.dist-info}/entry_points.txt +0 -0
mdkits/cli/extract.py
CHANGED
|
@@ -9,11 +9,12 @@ import MDAnalysis
|
|
|
9
9
|
from MDAnalysis import Universe
|
|
10
10
|
|
|
11
11
|
|
|
12
|
-
def write_to_xyz(u, frames, o, cut=None):
|
|
13
|
-
|
|
12
|
+
def write_to_xyz(u, frames, o, select, cut=None):
|
|
13
|
+
ag = u.select_atoms(select)
|
|
14
|
+
with MDAnalysis.Writer(o, ag.atoms.n_atoms, format='XYZ') as w:
|
|
14
15
|
for ts in u.trajectory:
|
|
15
16
|
if ts.frame in frames:
|
|
16
|
-
w.write(
|
|
17
|
+
w.write(ag)
|
|
17
18
|
if cut:
|
|
18
19
|
with open(o, 'r') as fi, open(o+'t', 'w') as fo:
|
|
19
20
|
for i, line in enumerate(fi):
|
|
@@ -22,13 +23,18 @@ def write_to_xyz(u, frames, o, cut=None):
|
|
|
22
23
|
os.replace(o+'t', o)
|
|
23
24
|
|
|
24
25
|
|
|
25
|
-
def write_to_xyz_s(u, frames, cut=None):
|
|
26
|
+
def write_to_xyz_s(u, frames, select, cut=None):
|
|
26
27
|
index = 0
|
|
28
|
+
ag = u.select_atoms(select)
|
|
29
|
+
if select:
|
|
30
|
+
dir = f'./coord/{"_".join(select.split())}'
|
|
31
|
+
else:
|
|
32
|
+
dir = './coord/all'
|
|
27
33
|
for ts in u.trajectory:
|
|
28
34
|
if ts.frame in frames:
|
|
29
|
-
o = f'
|
|
30
|
-
with MDAnalysis.Writer(o,
|
|
31
|
-
w.write(
|
|
35
|
+
o = f'{dir}/coord_{index:03d}'
|
|
36
|
+
with MDAnalysis.Writer(o, ag.atoms.n_atoms, format='XYZ') as w:
|
|
37
|
+
w.write(ag)
|
|
32
38
|
index += 1
|
|
33
39
|
if cut:
|
|
34
40
|
with open(o, 'r') as fi, open(o+'t', 'w') as fo:
|
|
@@ -39,10 +45,10 @@ def write_to_xyz_s(u, frames, cut=None):
|
|
|
39
45
|
|
|
40
46
|
@click.command(name='extract')
|
|
41
47
|
@click.argument('input_file_name', type=click.Path(exists=True), default=os_operation.default_file_name('*-pos-1.xyz', last=True))
|
|
42
|
-
@click.option('-o', type=str, help='output file name', default='extracted.xyz', show_default=True)
|
|
43
48
|
@click.option('-r', type=arg_type.FrameRange, help='frame range to slice', default='-1', show_default=True)
|
|
44
49
|
@click.option('-c', help='output a coord.xyz', is_flag=True)
|
|
45
|
-
|
|
50
|
+
@click.option("--select", type=str, help="select atoms to extract")
|
|
51
|
+
def main(input_file_name, r, c, select):
|
|
46
52
|
"""
|
|
47
53
|
extract frames in trajectory file
|
|
48
54
|
"""
|
|
@@ -63,16 +69,21 @@ def main(input_file_name, o, r, c):
|
|
|
63
69
|
cut = None
|
|
64
70
|
|
|
65
71
|
if len(r) == 3 and r[-1] is not None:
|
|
66
|
-
if
|
|
67
|
-
|
|
72
|
+
if select:
|
|
73
|
+
dir = f'./coord/{"_".join(select.split())}'
|
|
74
|
+
else:
|
|
75
|
+
dir = './coord/all'
|
|
76
|
+
if not os.path.exists(dir):
|
|
77
|
+
os.makedirs(dir)
|
|
68
78
|
else:
|
|
69
79
|
import shutil
|
|
70
|
-
shutil.rmtree(
|
|
71
|
-
os.makedirs(
|
|
72
|
-
write_to_xyz_s(u, frames, cut=cut)
|
|
73
|
-
click.echo(os.path.abspath(
|
|
80
|
+
shutil.rmtree(dir)
|
|
81
|
+
os.makedirs(dir)
|
|
82
|
+
write_to_xyz_s(u, frames, select, cut=cut)
|
|
83
|
+
click.echo(os.path.abspath(dir))
|
|
74
84
|
else:
|
|
75
|
-
|
|
85
|
+
o = f"{os.path.basename(u.filename).split('.')[0]}_{'_'.join([str(i) for i in r])}_{'_'.join(select.split()) if select else 'all'}.xyz"
|
|
86
|
+
write_to_xyz(u, frames, o, select, cut=cut)
|
|
76
87
|
click.echo(os.path.abspath(o))
|
|
77
88
|
|
|
78
89
|
|
mdkits/md_cli/hb_distribution.py
CHANGED
|
@@ -5,7 +5,7 @@ import click
|
|
|
5
5
|
import MDAnalysis
|
|
6
6
|
from MDAnalysis import Universe
|
|
7
7
|
from MDAnalysis.analysis.base import AnalysisBase
|
|
8
|
-
from mdkits.util import
|
|
8
|
+
from mdkits.util import numpy_geo, encapsulated_mda
|
|
9
9
|
import warnings, sys
|
|
10
10
|
from .setting import common_setting
|
|
11
11
|
warnings.filterwarnings("ignore")
|
mdkits/md_cli/md_cli.py
CHANGED
|
@@ -5,6 +5,7 @@ from mdkits.md_cli import (
|
|
|
5
5
|
angle,
|
|
6
6
|
density,
|
|
7
7
|
hb_distribution,
|
|
8
|
+
vac,
|
|
8
9
|
rdf,
|
|
9
10
|
msd,
|
|
10
11
|
monitor,
|
|
@@ -21,6 +22,7 @@ cli.add_command(density.main)
|
|
|
21
22
|
cli.add_command(dipole.main)
|
|
22
23
|
cli.add_command(angle.main)
|
|
23
24
|
cli.add_command(hb_distribution.main)
|
|
25
|
+
cli.add_command(vac.main)
|
|
24
26
|
cli.add_command(rdf.main)
|
|
25
27
|
cli.add_command(msd.main)
|
|
26
28
|
cli.add_command(monitor.main)
|
mdkits/md_cli/rdf.py
CHANGED
|
@@ -2,9 +2,20 @@ import MDAnalysis as mda
|
|
|
2
2
|
from MDAnalysis.analysis import rdf
|
|
3
3
|
import numpy as np
|
|
4
4
|
import click
|
|
5
|
+
from scipy import integrate
|
|
5
6
|
from mdkits.util import arg_type
|
|
6
7
|
|
|
7
8
|
|
|
9
|
+
def calculate_Gab(r_values, gab_values):
|
|
10
|
+
dr = r_values[1] - r_values[0]
|
|
11
|
+
|
|
12
|
+
integrand = 4 * np.pi * r_values**2 * gab_values
|
|
13
|
+
|
|
14
|
+
G_ab = np.cumsum(integrand) * dr
|
|
15
|
+
|
|
16
|
+
return G_ab
|
|
17
|
+
|
|
18
|
+
|
|
8
19
|
@click.command(name="rdf")
|
|
9
20
|
@click.argument("filename", type=click.Path(exists=True))
|
|
10
21
|
@click.option('--cell', type=arg_type.Cell, help='set cell, a list of lattice, --cell x,y,z or x,y,z,a,b,c')
|
|
@@ -18,10 +29,13 @@ def main(filename, cell, group, range, r):
|
|
|
18
29
|
u.dimensions = cell
|
|
19
30
|
o = f"rdf_{'_'.join(group).replace(' ', '_')}.dat"
|
|
20
31
|
|
|
32
|
+
rho = 32/(9.86**3)
|
|
33
|
+
|
|
21
34
|
group1 = u.select_atoms(group[0])
|
|
22
35
|
group2 = u.select_atoms(group[1])
|
|
23
36
|
|
|
24
|
-
crdf = rdf.InterRDF(group1, group2, verbose=True, range=(range[0], range[1]), norm='
|
|
37
|
+
crdf = rdf.InterRDF(group1, group2, verbose=True, range=(range[0], range[1]), norm='rdf')
|
|
38
|
+
|
|
25
39
|
|
|
26
40
|
if r is not None:
|
|
27
41
|
if len(r) == 2:
|
|
@@ -31,8 +45,8 @@ def main(filename, cell, group, range, r):
|
|
|
31
45
|
else:
|
|
32
46
|
crdf.run()
|
|
33
47
|
|
|
34
|
-
combin = np.column_stack((crdf.results.bins, crdf.results.rdf))
|
|
35
|
-
np.savetxt(o, combin, header="A\tgr", fmt="%.5f", delimiter='\t')
|
|
48
|
+
combin = np.column_stack((crdf.results.bins, crdf.results.rdf, calculate_Gab(crdf.results.bins, crdf.results.rdf)*rho))
|
|
49
|
+
np.savetxt(o, combin, header="A\tgr\tNr", fmt="%.5f", delimiter='\t')
|
|
36
50
|
|
|
37
51
|
|
|
38
52
|
if __name__ == "__main__":
|
mdkits/md_cli/vac.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
import click
|
|
3
|
+
import MDAnalysis
|
|
4
|
+
from MDAnalysis import Universe
|
|
5
|
+
from MDAnalysis.analysis.base import AnalysisBase
|
|
6
|
+
from mdkits.util import os_operation, arg_type
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class Velocity_AutoCorrelation(AnalysisBase):
|
|
10
|
+
def __init__(self, filename, select, dt=0.001):
|
|
11
|
+
u = Universe(filename)
|
|
12
|
+
u.trajectory.ts.dt = dt
|
|
13
|
+
self.u = u
|
|
14
|
+
self.atomgroup = u.select_atoms(select)
|
|
15
|
+
|
|
16
|
+
super(Velocity_AutoCorrelation, self).__init__(self.atomgroup.universe.trajectory, verbose=True)
|
|
17
|
+
|
|
18
|
+
def _prepare(self):
|
|
19
|
+
self.cvv = []
|
|
20
|
+
self.v0 = self.atomgroup.positions
|
|
21
|
+
self.normalize = 1/np.sum(self.v0*self.v0)
|
|
22
|
+
self.cvv.append(np.sum(self.v0*self.v0)*self.normalize)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def _append(self, cvv):
|
|
26
|
+
self.cvv.append(cvv*self.normalize)
|
|
27
|
+
|
|
28
|
+
def _single_frame(self):
|
|
29
|
+
cvv = np.sum(self.atomgroup.positions*self.v0)
|
|
30
|
+
self._append(cvv)
|
|
31
|
+
|
|
32
|
+
def _conclude(self):
|
|
33
|
+
self.cvv = np.array(self.cvv)
|
|
34
|
+
|
|
35
|
+
sf = self.cvv.shape[0]
|
|
36
|
+
t = 1/sf
|
|
37
|
+
f = np.fft.fft(self.cvv).real
|
|
38
|
+
|
|
39
|
+
faxis = np.fft.fftfreq(sf, d=t)
|
|
40
|
+
pf = np.where(faxis>0)
|
|
41
|
+
faxis = faxis[pf]
|
|
42
|
+
f = f[pf]
|
|
43
|
+
|
|
44
|
+
combine = np.column_stack((np.arange(len(self.cvv)), self.cvv))
|
|
45
|
+
|
|
46
|
+
np.savetxt('vac.dat', combine, fmt='%.5f', header="frame\tvac")
|
|
47
|
+
np.savetxt('f.dat', np.column_stack((faxis, f)), fmt='%.5f')
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
@click.command(name="vac")
|
|
51
|
+
@click.argument("filename", type=click.Path(exists=True), default=os_operation.default_file_name('*-vel-1.xyz', last=True))
|
|
52
|
+
@click.option("--select", type=str, default="all", help="atom selection", show_default=True)
|
|
53
|
+
@click.option('-r', type=arg_type.FrameRange, help='range of frame to analysis')
|
|
54
|
+
def main(filename, select, r):
|
|
55
|
+
"""analysis velocity autocorrelation function"""
|
|
56
|
+
a = Velocity_AutoCorrelation(filename, select)
|
|
57
|
+
|
|
58
|
+
if r is not None:
|
|
59
|
+
if len(r) == 2:
|
|
60
|
+
a.run(start=r[0], stop=r[1])
|
|
61
|
+
elif len(r) == 3:
|
|
62
|
+
a.run(start=r[0], stop=r[1], step=r[2])
|
|
63
|
+
else:
|
|
64
|
+
a.run()
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
if __name__ == '__main__':
|
|
68
|
+
main()
|
|
@@ -9,14 +9,9 @@ mdkits/build_cli/build_surface.py,sha256=9GGpmQlCG6vxNevMyWcfI2EL_JiAAKIhxNokZyE
|
|
|
9
9
|
mdkits/build_cli/cut_surface.py,sha256=_f0t2OyBKb8ZV04b3GezfSDUN4XFd5kQM-yWbSmOofs,2742
|
|
10
10
|
mdkits/build_cli/supercell.py,sha256=3iTTt3DHaERWDFonhBRS0oqWhjFh6pbS5SpIR-O1gYg,1034
|
|
11
11
|
mdkits/build_cli/water.xyz,sha256=ByLDz-rYhw_wLPBU78lIQHe4s4Xf5Ckjft-Dus3czIc,171
|
|
12
|
-
"mdkits/cli/,hb_distribution_down.py",sha256=i3NguzGebqCgy4uuVBeFajZRZnXtjhsJBPDGDdumlWA,4733
|
|
13
12
|
mdkits/cli/convert.py,sha256=OmQ-7hmw0imgfgCJaWFEy3ePixsU7VKf0mGuJ6jRpn0,1795
|
|
14
13
|
mdkits/cli/data.py,sha256=FGA4S9Cfo6WUJBSPWKOJrrZXHo_Qza-jNG1P_Dw7yi4,3262
|
|
15
|
-
mdkits/cli/extract.py,sha256=
|
|
16
|
-
mdkits/cli/hartree_potential.py,sha256=XcJfsJ5Y2d5MQfD45p06_gV1fTJbDSrNhCnZ3Sz2Vb0,2233
|
|
17
|
-
mdkits/cli/hartree_potential_ave.py,sha256=25oy3QsgIdxrTFpTqpnGvLAheb-d6poeLMN7iuGT3Xk,3335
|
|
18
|
-
mdkits/cli/hb.py,sha256=lADr4tlctbtQ3_f_UpznkLnSI0MJlAT-pknEf_dwrnU,5330
|
|
19
|
-
mdkits/cli/packmol_input.py,sha256=76MjjMMRDaW2q459B5mEpXDYSSn14W-JXudOOsx-8E4,2849
|
|
14
|
+
mdkits/cli/extract.py,sha256=JUDqASPcI0PJy6h0tyOBA1vL1AIgFo5ldpoIsYNU2M8,2910
|
|
20
15
|
mdkits/cli/plot.py,sha256=1yh5dq5jnQDuyWlxV_9g5ztsnuFHVu4ouYQ9VJYSrUU,8938
|
|
21
16
|
mdkits/config/__init__.py,sha256=ZSwmnPK02LxJLMgcYmNb-tIOk8fEuHf5jpqD3SDHWLg,1039
|
|
22
17
|
mdkits/config/settings.yml,sha256=PY7u0PbFLuxSnd54H5tI9oMjUf-mzyADqSZtm99BwG0,71
|
|
@@ -26,12 +21,13 @@ mdkits/dft_cli/pdos.py,sha256=ALAZ5uOaoT0UpCyKYleWxwmk569HMzKTTK-lMJeicM8,1411
|
|
|
26
21
|
mdkits/md_cli/angle.py,sha256=cfhI6dsn_hIy-YXSTXemu1m1O_l2HuL_x6zx_3uL-Uw,5450
|
|
27
22
|
mdkits/md_cli/density.py,sha256=_w6UunY6alTp0jLa8cyqR8sSYubN0XbM-PDF4SkzsJU,5058
|
|
28
23
|
mdkits/md_cli/dipole.py,sha256=tXTO8CZAQTVY55GwuXWJNGo7EQ4Tb2611g5IHucdlec,4836
|
|
29
|
-
mdkits/md_cli/hb_distribution.py,sha256=
|
|
30
|
-
mdkits/md_cli/md_cli.py,sha256=
|
|
24
|
+
mdkits/md_cli/hb_distribution.py,sha256=ForMmNjfJxpXHqo1Au0OXOmwgvHxIuVR8qnpu3iS7Eg,7897
|
|
25
|
+
mdkits/md_cli/md_cli.py,sha256=2vH04o_3d5kCJsn3qEq-iUPhebKJOrS-e7HJtyiZTiQ,571
|
|
31
26
|
mdkits/md_cli/monitor.py,sha256=JNEgz5RGbFn4x_E85pAiPUY1NVIyZ3b2vjpBk_d1dR8,4536
|
|
32
27
|
mdkits/md_cli/msd.py,sha256=v-9TPKBGHz6ce2PUwexrekVq_9eiutIOQYaw582yN30,965
|
|
33
|
-
mdkits/md_cli/rdf.py,sha256=
|
|
28
|
+
mdkits/md_cli/rdf.py,sha256=p4HMMYZLfFRPnGx7YHQU6kZnMAfoL6vOyOVpZhfdBfM,1712
|
|
34
29
|
mdkits/md_cli/setting.py,sha256=mxMTYpm6DUjMt9hOKsJbBSKwCqzMilOR0bo1azSdJP0,846
|
|
30
|
+
mdkits/md_cli/vac.py,sha256=9YZ9QviJ9VPfF29_cFKUYPwxkszYZ7cHTB7AMgUV4go,2147
|
|
35
31
|
mdkits/md_cli/wrap.py,sha256=YdUpvhRyKn7bYnIAVgP39qItPdrEoTeJl55TmbS7Qqk,1044
|
|
36
32
|
mdkits/mdkits.py,sha256=EiAt7dxGTaHuuj7bCNxgAqZbX0i3sldO0mBxOG-aMnY,595
|
|
37
33
|
mdkits/util/.fig_operation.py.swp,sha256=iZYqdYMj4UKS1rmbXv8Ve2FcVBcNljX7Y43-neMdPSk,12288
|
|
@@ -45,8 +41,8 @@ mdkits/util/numpy_geo.py,sha256=zkh3uNC3HGHIwtHOmiDXborab5_40PmaJF54jSQ-njU,3874
|
|
|
45
41
|
mdkits/util/os_operation.py,sha256=ErN2ExjX9vZRfPe3ypsj4eyoQTEePqzlEX0Xm1N4lL4,980
|
|
46
42
|
mdkits/util/out_err.py,sha256=7vGDI7wVoJWe1S0BDbcq-UC2KAhblCzg-NAYZKBZ4lo,900
|
|
47
43
|
mdkits/util/structure_parsing.py,sha256=mRPMJeih3O-ST7HeETDvBEkfV-1psT-XgxyYgDadV0U,4152
|
|
48
|
-
mdkits-0.
|
|
49
|
-
mdkits-0.
|
|
50
|
-
mdkits-0.
|
|
51
|
-
mdkits-0.
|
|
52
|
-
mdkits-0.
|
|
44
|
+
mdkits-0.2.0.dist-info/entry_points.txt,sha256=xoWWZ_yL87S501AzCO2ZjpnVuYkElC6z-8J3tmuIGXQ,44
|
|
45
|
+
mdkits-0.2.0.dist-info/LICENSE,sha256=VLaqyB0r_H7y3hUntfpPWcE3OATTedHWI983htLftcQ,1081
|
|
46
|
+
mdkits-0.2.0.dist-info/METADATA,sha256=tDq50lJJD0-8TPS7UKxZiXVD7EaK7n4xf_RuZheZldY,10963
|
|
47
|
+
mdkits-0.2.0.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
|
|
48
|
+
mdkits-0.2.0.dist-info/RECORD,,
|
|
@@ -1,114 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
|
|
3
|
-
import numpy as np
|
|
4
|
-
import argparse
|
|
5
|
-
import MDAnalysis
|
|
6
|
-
from MDAnalysis import Universe
|
|
7
|
-
from MDAnalysis.analysis.base import AnalysisBase
|
|
8
|
-
from util import cp2k_input_parsing
|
|
9
|
-
import warnings
|
|
10
|
-
warnings.filterwarnings("ignore")
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
class Hb_distribution(AnalysisBase):
|
|
14
|
-
def __init__(self, filename, cell, surface, dt=0.001, hb_distance=3.5, hb_angle=35, bin_size=0.2):
|
|
15
|
-
u = Universe(filename)
|
|
16
|
-
u.trajectory.ts.dt = dt
|
|
17
|
-
u.dimensions = cell
|
|
18
|
-
self.u = u
|
|
19
|
-
self.atomgroup = u.select_atoms("all")
|
|
20
|
-
self.hb_distance = hb_distance
|
|
21
|
-
self.hb_angle = hb_angle
|
|
22
|
-
self.bin_size = bin_size
|
|
23
|
-
self.surface = surface
|
|
24
|
-
self.frame_count = 0
|
|
25
|
-
super(Hb_distribution, self).__init__(self.atomgroup.universe.trajectory, verbose=True)
|
|
26
|
-
|
|
27
|
-
def _prepare(self):
|
|
28
|
-
bin_num = int(self.u.dimensions[2] / self.bin_size) + 2
|
|
29
|
-
self.donor = np.zeros(bin_num, dtype=np.float64)
|
|
30
|
-
|
|
31
|
-
def _append(self, hb_d):
|
|
32
|
-
bins_d = np.floor(hb_d / self.bin_size).astype(int) + 1
|
|
33
|
-
|
|
34
|
-
bins_d = bins_d[bins_d < len(self.donor)]
|
|
35
|
-
|
|
36
|
-
np.add.at(self.donor, bins_d, 1)
|
|
37
|
-
|
|
38
|
-
self.frame_count += 1
|
|
39
|
-
|
|
40
|
-
def _single_frame(self):
|
|
41
|
-
o_group = self.atomgroup.select_atoms("name O")
|
|
42
|
-
o_pair = MDAnalysis.lib.distances.capped_distance(o_group.positions, o_group.positions, min_cutoff=0, max_cutoff=self.hb_distance, box=self.u.dimensions, return_distances=False)
|
|
43
|
-
|
|
44
|
-
o0 = o_group[o_pair[:, 0]]
|
|
45
|
-
o1 = o_group[o_pair[:, 1]]
|
|
46
|
-
|
|
47
|
-
o0h1 = self.atomgroup[o0.indices + 1]
|
|
48
|
-
o0h2 = self.atomgroup[o0.indices + 2]
|
|
49
|
-
|
|
50
|
-
angle_o0h1_o0_o1 = np.degrees(
|
|
51
|
-
MDAnalysis.lib.distances.calc_angles(o0h1.positions, o0.positions, o1.positions, box=self.u.dimensions)
|
|
52
|
-
)
|
|
53
|
-
angle_o0h2_o0_o1 = np.degrees(
|
|
54
|
-
MDAnalysis.lib.distances.calc_angles(o0h2.positions, o0.positions, o1.positions, box=self.u.dimensions)
|
|
55
|
-
)
|
|
56
|
-
|
|
57
|
-
mid_z = (self.surface[0] + self.surface[1]) / 2
|
|
58
|
-
|
|
59
|
-
condition_d = ((angle_o0h1_o0_o1 < self.hb_angle) | (angle_o0h2_o0_o1 < self.hb_angle)) & (o0.positions[:, 2] - o1.positions[:, 2] > 0)
|
|
60
|
-
#condition_d = ((angle_o0h1_o0_o1 < self.hb_angle) | (angle_o0h2_o0_o1 < self.hb_angle)) & (((o0.positions[:, 2] < mid_z) & (o0.positions[:, 2] - o1.positions[:, 2] > 0)) | ((o0.positions[:, 2] > mid_z) & (o0.positions[:, 2] - o1.positions[:, 2] < 0)))
|
|
61
|
-
#condition_a = ((angle_o1h1_o1_o0 < self.hb_angle) | (angle_o1h2_o1_o0 < self.hb_angle)) & (((o1.positions[:, 2] < mid_z) & (o1.positions[:, 2] - o0.positions[:, 2] > 1.5)) | ((o1.positions[:, 2] > mid_z) & (o1.positions[:, 2] - o0.positions[:, 2] < -1.5)))
|
|
62
|
-
|
|
63
|
-
hb_d = (o0.positions[:, 2][condition_d] + o1.positions[:, 2][condition_d]) / 2
|
|
64
|
-
#hb_a = (o0.positions[:, 2][condition_a] + o1.positions[:, 2][condition_a]) / 2
|
|
65
|
-
|
|
66
|
-
self._append(hb_d)
|
|
67
|
-
|
|
68
|
-
def _conclude(self):
|
|
69
|
-
if self.frame_count > 0:
|
|
70
|
-
average_donor = self.donor / self.frame_count
|
|
71
|
-
|
|
72
|
-
bins_z = np.arange(len(self.donor)) * self.bin_size
|
|
73
|
-
|
|
74
|
-
lower_z, upper_z = self.surface
|
|
75
|
-
mask = (bins_z >= lower_z) & (bins_z <= upper_z)
|
|
76
|
-
filtered_bins_z = bins_z[mask] - lower_z
|
|
77
|
-
filtered_average_donor = average_donor[mask]
|
|
78
|
-
|
|
79
|
-
combined_data = np.column_stack((filtered_bins_z, filtered_average_donor))
|
|
80
|
-
|
|
81
|
-
filename = 'hb_distribution_down.dat'
|
|
82
|
-
np.savetxt(filename, combined_data, header="Z\tDonor", fmt='%.5f', delimiter='\t')
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
def parse_data(s):
|
|
86
|
-
return [float(x) for x in s.replace(',', ' ').split()]
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
def parse_r(s):
|
|
90
|
-
return [int(x) for x in s.replace(':', ' ').split()]
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
def parse_argument():
|
|
94
|
-
parser = argparse.ArgumentParser(description="analysis hb distribution")
|
|
95
|
-
parser.add_argument('filename', type=str, help='filename to analysis')
|
|
96
|
-
parser.add_argument('--cp2k_input_file', type=str, help='input file name of cp2k, default is "input.inp"', default='input.inp')
|
|
97
|
-
parser.add_argument('-r', type=parse_r, help='range of analysis', default=[0, -1, 1])
|
|
98
|
-
parser.add_argument('--cell', type=parse_data, help='set cell, a list of lattice, --cell x,y,z or x,y,z,a,b,c')
|
|
99
|
-
parser.add_argument('--surface', type=parse_data, help='[down_surface_z, up_surface_z]')
|
|
100
|
-
parser.add_argument('--hb_param', type=parse_data, help='[hb_distance, hb_angle], default is [3.5, 35]', default=[3.5, 35])
|
|
101
|
-
|
|
102
|
-
return parser.parse_args()
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
def main():
|
|
106
|
-
args = parse_argument()
|
|
107
|
-
cell = cp2k_input_parsing.get_cell(args.cp2k_input_file, args.cell)
|
|
108
|
-
|
|
109
|
-
hb_dist = Hb_distribution(args.filename, cell, args.surface, hb_distance=args.hb_param[0], hb_angle=args.hb_param[1])
|
|
110
|
-
hb_dist.run(start=args.r[0], stop=args.r[1], step=args.r[2])
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
if __name__ == '__main__':
|
|
114
|
-
main()
|
mdkits/cli/hartree_potential.py
DELETED
|
@@ -1,59 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
|
|
3
|
-
################################################
|
|
4
|
-
# averange cp2k output(or some else file correspond to ase.io.read_cube_data) hartree.cube to z coordinate with python
|
|
5
|
-
## file path is need to pay attention
|
|
6
|
-
## cycle parameter is need to pay attention
|
|
7
|
-
## buck range is need to pay attention
|
|
8
|
-
################################################
|
|
9
|
-
|
|
10
|
-
from numpy import empty, array, mean, append, concatenate
|
|
11
|
-
from argparse import ArgumentParser
|
|
12
|
-
from util import encapsulated_ase, os_operation
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
def array_type(string):
|
|
16
|
-
number_list = string.split(',')
|
|
17
|
-
number_array = array(number_list, dtype=float)
|
|
18
|
-
return number_array
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
def buck_potential(xaxe, potential, range):
|
|
22
|
-
mix = concatenate((xaxe.reshape(-1, 1), potential.reshape(-1, 1)), axis=1)
|
|
23
|
-
mask = (mix[:,0] >= range[0]) & (mix[:,0] <=range[1])
|
|
24
|
-
buck_potential = mix[mask]
|
|
25
|
-
ave_potential = mean(buck_potential[:,1])
|
|
26
|
-
return ave_potential
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
# set argument
|
|
30
|
-
parser = ArgumentParser(description='to handle cp2k output file hartree cube, name should be "hartree-*.cube"')
|
|
31
|
-
parser.add_argument('file_name', type=str, nargs='?', help='hartree cube file', default=os_operation.default_file_name('*-v_hartree-1_*.cube', last=True))
|
|
32
|
-
parser.add_argument('-b', '--buck_range', type=array_type, help='parameter to calculate mean value of buck', default=None)
|
|
33
|
-
parser.add_argument('-o', type=str, help='output file name, default is "out.put"', default='hartree.out')
|
|
34
|
-
|
|
35
|
-
args = parser.parse_args()
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
## init output potential file's shape, and define a z axe
|
|
39
|
-
init_array = encapsulated_ase.ave_potential(args.file_name)
|
|
40
|
-
potential = empty((0, init_array[0].shape[0]))
|
|
41
|
-
z_coordinates = array((init_array[1])).reshape(-1, 1)
|
|
42
|
-
|
|
43
|
-
potential = encapsulated_ase.ave_potential(args.file_name)[0]
|
|
44
|
-
|
|
45
|
-
aved = mean(potential, axis=0)
|
|
46
|
-
total_potential = append(z_coordinates, potential.reshape(-1, 1), axis=1)
|
|
47
|
-
|
|
48
|
-
## if buck range is exit, out put a difference of potential
|
|
49
|
-
if args.buck_range is not None:
|
|
50
|
-
buck_potential = buck_potential(z_coordinates, potential, args.buck_range)
|
|
51
|
-
print(buck_potential)
|
|
52
|
-
with open('hartree_potential.dat', 'w') as f:
|
|
53
|
-
f.write(f"{buck_potential}" + '\n')
|
|
54
|
-
|
|
55
|
-
## write output
|
|
56
|
-
with open(args.o, 'w') as f:
|
|
57
|
-
for value in total_potential:
|
|
58
|
-
f.write(" ".join(map(str, value)) + '\n')
|
|
59
|
-
|
|
@@ -1,84 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
|
|
3
|
-
################################################
|
|
4
|
-
# averange cp2k output(or some else file correspond to ase.io.read_cube_data) hartree.cube to z coordinate with python
|
|
5
|
-
## file path is need to pay attention
|
|
6
|
-
## cycle parameter is need to pay attention
|
|
7
|
-
## buck range is need to pay attention
|
|
8
|
-
################################################
|
|
9
|
-
|
|
10
|
-
from ase.io.cube import read_cube_data
|
|
11
|
-
import numpy as np
|
|
12
|
-
import argparse
|
|
13
|
-
|
|
14
|
-
def array_type(string):
|
|
15
|
-
number_list = string.split(',')
|
|
16
|
-
number_array = np.array(number_list, dtype=int)
|
|
17
|
-
return number_array
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
def ave_potential(filepath):
|
|
21
|
-
# is to average hartree file in z_coordinate
|
|
22
|
-
|
|
23
|
-
## read data from filepath
|
|
24
|
-
data, atoms = read_cube_data(filepath)
|
|
25
|
-
|
|
26
|
-
## define need parameter
|
|
27
|
-
npoints = data.shape[2]
|
|
28
|
-
step_size = atoms.cell[2, 2] / ( npoints - 1 )
|
|
29
|
-
|
|
30
|
-
## average hartree file, and calculate z_coordinates
|
|
31
|
-
z_coordinates = [i * step_size for i in range(npoints)]
|
|
32
|
-
z_potential = 27.2114 * data[:, :, :].sum(axis=(0, 1)) / ( data.shape[0] * data.shape[1] )
|
|
33
|
-
return z_potential, z_coordinates
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
def buck_potential(xaxe, potential, range):
|
|
37
|
-
mix = np.concatenate((xaxe.reshape(-1, 1), potential.reshape(-1, 1)), axis=1)
|
|
38
|
-
mask = (mix[:,0] >= range[0]) & (mix[:,0] <=range[1])
|
|
39
|
-
buck_potential = mix[mask]
|
|
40
|
-
ave_potential = np.mean(buck_potential[:,1])
|
|
41
|
-
return ave_potential
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
# set argument
|
|
45
|
-
parser = argparse.ArgumentParser(description='to handle cp2k output file hartree cube, name should be "hartree-*.cube"')
|
|
46
|
-
parser.add_argument('folder_path', type=str, help='folder that contain all hartree cube file')
|
|
47
|
-
parser.add_argument('cyc_range', type=array_type, help='cycle parameter, need to seperate with ",", similar with range() -- 1,201 1,201,10')
|
|
48
|
-
parser.add_argument('-b', '--buck_range', type=array_type, help='parameter to calculate mean value of buck', default=None)
|
|
49
|
-
parser.add_argument('-o', type=str, help='output file name, default is "out.put"', default='hartree.out')
|
|
50
|
-
|
|
51
|
-
args = parser.parse_args()
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
## init output potential file's shape, and define a z axe
|
|
55
|
-
init_array = ave_potential('{}/hartree-{}.cube'.format(args.folder_path, args.cyc_range[0]))
|
|
56
|
-
potential = np.empty((0, init_array[0].shape[0]))
|
|
57
|
-
z_coordinates = np.array((init_array[1])).reshape(-1, 1)
|
|
58
|
-
|
|
59
|
-
## average one hartree file
|
|
60
|
-
if len(args.cyc_range) == 3:
|
|
61
|
-
for i in range(args.cyc_range[0], args.cyc_range[1], args.cyc_range[2]):
|
|
62
|
-
file_path = '{}/hartree-{}.cube'.format(args.folder_path, i)
|
|
63
|
-
potential = np.append(potential, [ave_potential(file_path)[0]], axis=0)
|
|
64
|
-
else:
|
|
65
|
-
for i in range(args.cyc_range[0], args.cyc_range[1]):
|
|
66
|
-
file_path = '{}/hartree-{}.cube'.format(args.folder_path, i)
|
|
67
|
-
potential = np.append(potential, [ave_potential(file_path)[0]], axis=0)
|
|
68
|
-
|
|
69
|
-
## average every averaged harterr file, and append to z_coordinates
|
|
70
|
-
#aved_potential = potential[:, :].sum(axis=0) / len(range(1, 201))
|
|
71
|
-
aved = np.mean(potential, axis=0)
|
|
72
|
-
total_potential = np.append(z_coordinates, aved.reshape(-1, 1), axis=1)
|
|
73
|
-
|
|
74
|
-
## if buck range is exit, out put a difference of potential
|
|
75
|
-
if args.buck_range is not None:
|
|
76
|
-
buck_potential = buck_potential(z_coordinates, aved, args.buck_range)
|
|
77
|
-
with open(args.o + 'diff', 'w') as f:
|
|
78
|
-
f.write("{}\t{}\t{}".format(aved[0], buck_potential, aved[0]-buck_potential))
|
|
79
|
-
|
|
80
|
-
## write output
|
|
81
|
-
with open(args.o, 'w') as f:
|
|
82
|
-
for value in total_potential:
|
|
83
|
-
f.write(" ".join(map(str, value)) + '\n')
|
|
84
|
-
|
mdkits/cli/hb.py
DELETED
|
@@ -1,101 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
|
|
3
|
-
import argparse, multiprocessing, os
|
|
4
|
-
import numpy as np
|
|
5
|
-
from util import (
|
|
6
|
-
structure_parsing,
|
|
7
|
-
numpy_geo,
|
|
8
|
-
os_operation,
|
|
9
|
-
cp2k_input_parsing,
|
|
10
|
-
)
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
def hb_count(chunk, index, cell, filename, hb_distance=3.5, hb_angle=35):
|
|
14
|
-
groups = structure_parsing.chunk_to_groups(chunk)
|
|
15
|
-
groups_hb_list = []
|
|
16
|
-
coefficients = numpy_geo.cell_to_wrap_coefficients(cell)
|
|
17
|
-
for group in groups:
|
|
18
|
-
group_hb_array = np.zeros((3, 1))
|
|
19
|
-
present_index = index
|
|
20
|
-
o_present = group[present_index].split()
|
|
21
|
-
if o_present[0] == 'O':
|
|
22
|
-
o_present = np.array(o_present[1:], dtype=np.float64)
|
|
23
|
-
group_hb_array[2, 0] += 1
|
|
24
|
-
for other_index in range(2, len(group)):
|
|
25
|
-
o_other = group[other_index].split()
|
|
26
|
-
if o_other[0] == 'O':
|
|
27
|
-
o_other = np.array(o_other[1:], dtype=np.float64)
|
|
28
|
-
oo_distance, o_other = numpy_geo.unwrap(o_present, o_other, coefficients, max=0)
|
|
29
|
-
if oo_distance < hb_distance and oo_distance > 1:
|
|
30
|
-
_, o_present_h1 = numpy_geo.unwrap(o_present, np.array(group[present_index+1].split()[1:], dtype=np.float64), coefficients)
|
|
31
|
-
_, o_present_h2 = numpy_geo.unwrap(o_present, np.array(group[present_index+2].split()[1:], dtype=np.float64), coefficients)
|
|
32
|
-
_, o_other_h1 = numpy_geo.unwrap(o_other, np.array(group[other_index+1].split()[1:], dtype=np.float64), coefficients)
|
|
33
|
-
_, o_other_h2 = numpy_geo.unwrap(o_other, np.array(group[other_index+2].split()[1:], dtype=np.float64), coefficients)
|
|
34
|
-
|
|
35
|
-
o_present_o_other_h1_angle = numpy_geo.vector_vector_angle(o_present-o_other, o_other_h1-o_other)
|
|
36
|
-
o_present_o_other_h2_angle = numpy_geo.vector_vector_angle(o_present-o_other, o_other_h2-o_other)
|
|
37
|
-
if o_present_o_other_h1_angle < hb_angle or o_present_o_other_h2_angle < hb_angle:
|
|
38
|
-
group_hb_array[0, 0] += 1
|
|
39
|
-
o_other_o_present_h1_angle = numpy_geo.vector_vector_angle(o_other-o_present, o_present_h1-o_present)
|
|
40
|
-
o_other_o_present_h2_angle = numpy_geo.vector_vector_angle(o_other-o_present, o_present_h2-o_present)
|
|
41
|
-
if o_other_o_present_h1_angle < hb_angle or o_other_o_present_h2_angle < hb_angle:
|
|
42
|
-
group_hb_array[1, 0] += 1
|
|
43
|
-
groups_hb_list.append(group_hb_array)
|
|
44
|
-
groups_hb_array = np.vstack(groups_hb_list)
|
|
45
|
-
group_hb_acc_array = np.sum(groups_hb_array[0::3], axis=0).reshape(1, -1)
|
|
46
|
-
group_hb_don_array = np.sum(groups_hb_array[1::3], axis=0).reshape(1, -1)
|
|
47
|
-
group_hb_num_array = np.sum(groups_hb_array[2::3], axis=0).reshape(1, -1)
|
|
48
|
-
group_hb_array = np.vstack([group_hb_acc_array, group_hb_don_array, group_hb_num_array])
|
|
49
|
-
np.save(filename, group_hb_array)
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
def parse_data(s):
|
|
53
|
-
return [float(x) for x in s.replace(',', ' ').split()]
|
|
54
|
-
|
|
55
|
-
def parse_argument():
|
|
56
|
-
parser = argparse.ArgumentParser(description="analysis an O atom's hydrogen bond in water")
|
|
57
|
-
parser.add_argument('index', type=int, help='index of target atom in coord.xyz, or all of hb distribution on z')
|
|
58
|
-
parser.add_argument('input_file_name', type=str, nargs='?', help='input file name', default=os_operation.default_file_name('wraped.xyz', last=True))
|
|
59
|
-
parser.add_argument('--cp2k_input_file', type=str, help='input file name of cp2k, default is "input.inp"', default='input.inp')
|
|
60
|
-
parser.add_argument('--cell', type=parse_data, help='set cell, a list of lattice, --cell x,y,z or x,y,z,a,b,c')
|
|
61
|
-
parser.add_argument('--hb_param', type=parse_data, help='[hb_distance, hb_angle], default is [3.5, 35]', default=[3.5, 35])
|
|
62
|
-
parser.add_argument('--process', type=int, help='paralle process number default is 28', default=28)
|
|
63
|
-
parser.add_argument('--temp', help='keep temp file', action='store_false')
|
|
64
|
-
|
|
65
|
-
return parser.parse_args()
|
|
66
|
-
|
|
67
|
-
def main():
|
|
68
|
-
args = parse_argument()
|
|
69
|
-
output = f'./hb_{args.index}.dat'
|
|
70
|
-
cell = cp2k_input_parsing.get_cell(args.cp2k_input_file, args.cell)
|
|
71
|
-
chunks = structure_parsing.xyz_to_chunks(args.input_file_name, args.process)
|
|
72
|
-
temp_dir = f'{os.environ.get("TEMP_DIR")}/{os.getpid()}'
|
|
73
|
-
os_operation.make_temp_dir(temp_dir, delete=args.temp)
|
|
74
|
-
|
|
75
|
-
for index, chunk in enumerate(chunks):
|
|
76
|
-
t = multiprocessing.Process(target=hb_count, args=[chunk, args.index, cell, f'{temp_dir}/chunk_{index}.temp'])
|
|
77
|
-
t.start()
|
|
78
|
-
|
|
79
|
-
for t in multiprocessing.active_children():
|
|
80
|
-
t.join()
|
|
81
|
-
|
|
82
|
-
chunks_array_list = []
|
|
83
|
-
for i in range(len(chunks)):
|
|
84
|
-
chunk_array = np.load(f'{temp_dir}/chunk_{i}.temp.npy')
|
|
85
|
-
chunks_array_list.append(chunk_array)
|
|
86
|
-
chunks_array = np.vstack(chunks_array_list)
|
|
87
|
-
chunks_array = np.mean(chunks_array, axis=1)
|
|
88
|
-
|
|
89
|
-
with open(output, 'w') as f:
|
|
90
|
-
f.write(f"# {args.index}\n")
|
|
91
|
-
f.write(f"accepter : {chunks_array[0]:.2f}\n")
|
|
92
|
-
f.write(f"donor : {chunks_array[1]:.2f}\n")
|
|
93
|
-
f.write(f"total : {chunks_array[0]+chunks_array[1]:.2f}\n")
|
|
94
|
-
print(f"# {args.index}")
|
|
95
|
-
print(f"accepter : {chunks_array[0]:.2f}")
|
|
96
|
-
print(f"donor : {chunks_array[1]:.2f}")
|
|
97
|
-
print(f"total : {chunks_array[0]+chunks_array[1]:.2f}")
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
if __name__ == '__main__':
|
|
101
|
-
main()
|
mdkits/cli/packmol_input.py
DELETED
|
@@ -1,76 +0,0 @@
|
|
|
1
|
-
import argparse
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
def parse_cell(s):
|
|
5
|
-
return [float(x) for x in s.replace(',', ' ').split()]
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
def parse_argument():
|
|
9
|
-
parser = argparse.ArgumentParser(description='generate packmol input file with give parameter')
|
|
10
|
-
parser.add_argument('--size', type=int, help='water size default is 30', default=30)
|
|
11
|
-
parser.add_argument('--cell', type=parse_cell, help='input box size(a,b,c)')
|
|
12
|
-
parser.add_argument('--addwat', type=int, help='add some additional water, default is 0', default=0)
|
|
13
|
-
parser.add_argument('--ioncon', type=float, help='concentration of sol box, default is 0.0', default=0.0)
|
|
14
|
-
parser.add_argument('--tolerance', type=float, help='tolerance of packmol, default is 2.5', default=2.5)
|
|
15
|
-
parser.add_argument('--watpath', type=str, help='water xyz file path', default='C:\\home\\.can\\temp\\packmol\\default\\water.xyz')
|
|
16
|
-
parser.add_argument('--ionpath', type=str, help='ion xyz file path')
|
|
17
|
-
parser.add_argument('-o', type=str, help='output file name, default is "input.pm"', default='input.pm')
|
|
18
|
-
parser.add_argument('--output', type=str, help='output file name of packmol, default is "solbox.xyz"', default='solbox.xyz')
|
|
19
|
-
|
|
20
|
-
return parser.parse_args()
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
def get_water_number():
|
|
24
|
-
water_number = water_volume / water_size
|
|
25
|
-
|
|
26
|
-
return int(round(water_number, 0))
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
def get_ion_number(concentration):
|
|
30
|
-
ion_number = ( (concentration * avogadro) / 1e+27 ) * water_volume
|
|
31
|
-
|
|
32
|
-
return int(round(ion_number, 0))
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
def main():
|
|
36
|
-
global water_volume, water_size, avogadro
|
|
37
|
-
args = parse_argument()
|
|
38
|
-
water_volume = args.cell[0] * args.cell[1] * args.cell[2]
|
|
39
|
-
water_size = args.size
|
|
40
|
-
avogadro = 6.02214179e+23
|
|
41
|
-
water_number = get_water_number() + args.addwat
|
|
42
|
-
ion_number = get_ion_number(args.ioncon)
|
|
43
|
-
|
|
44
|
-
if ion_number == 0:
|
|
45
|
-
packmol_input_str = f"""
|
|
46
|
-
tolerance {args.tolerance}
|
|
47
|
-
filetype xyz
|
|
48
|
-
output {args.output}
|
|
49
|
-
pbc {args.cell[3]} {args.cell[4]} {args.cell[5]}
|
|
50
|
-
structure {args.watpath}
|
|
51
|
-
number {water_number}
|
|
52
|
-
inside box 2. 2. 2. {args.cell[0]-2} {args.cell[1]-2} {args.cell[2]-2}
|
|
53
|
-
end structure
|
|
54
|
-
"""
|
|
55
|
-
else:
|
|
56
|
-
packmol_input_str = f"""
|
|
57
|
-
tolerance {args.tolerance}
|
|
58
|
-
filetype xyz
|
|
59
|
-
output {args.output}
|
|
60
|
-
pbc {args.cell[3]} {args.cell[4]} {args.cell[5]}
|
|
61
|
-
structure {args.watpath}
|
|
62
|
-
number {water_number}
|
|
63
|
-
inside box 2. 2. 2. {args.cell[0]-2} {args.cell[1]-2} {args.cell[2]-2}
|
|
64
|
-
end structure
|
|
65
|
-
structure {args.ionpath}
|
|
66
|
-
number {ion_number}
|
|
67
|
-
inside box 2. 2. 2. {args.cell[0]-2} {args.cell[1]-2} {args.cell[2]-2}
|
|
68
|
-
end structure
|
|
69
|
-
"""
|
|
70
|
-
|
|
71
|
-
with open(args.o, 'w') as f:
|
|
72
|
-
f.write(packmol_input_str)
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
if __name__ == '__main__':
|
|
76
|
-
main()
|
|
File without changes
|
|
File without changes
|
|
File without changes
|