mdkits 0.1a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mdkits might be problematic. Click here for more details.

mdkits/cli/data.py ADDED
@@ -0,0 +1,80 @@
1
+ #!/usr/bin/env python3
2
+
3
+ import numpy as np
4
+ import click
5
+ import os, glob
6
+
7
+
8
+ def match_file(patch):
9
+ pwd = os.getcwd()
10
+ match_file = glob.glob(os.path.join(pwd, patch))
11
+
12
+ return match_file
13
+
14
+
15
+ @click.command(name='data')
16
+ @click.argument('filename', nargs=-1, type=click.Path(exists=True))
17
+ @click.option('--nor', help='normalized data', is_flag=True)
18
+ @click.option('--gaus', type=int, help='gaussian filter 1d data', default=0)
19
+ @click.option('--fold', help='fold and average', is_flag=True)
20
+ @click.option('--err', help='error bar of data', is_flag=True)
21
+ @click.option('--int_cn', type=float, nargs=2, help='integrate gofr data to coordination number', default=None)
22
+ def main(filename, nor, gaus, fold, err, int_cn):
23
+ """
24
+ trade data file with different method
25
+ """
26
+
27
+ for data_file in filename:
28
+ data = np.loadtxt(data_file)
29
+ data_name = data_file.split(os.sep)[-1]
30
+ if len(data.shape) == 1:
31
+ data = np.hstack((np.arange(data.shape[0]).reshape(-1, 1), data.reshape(-1, 1)))
32
+ data_range = data.shape[1]
33
+ if nor:
34
+ nor_data = data.copy()
35
+ for i in range(1, data_range):
36
+ nor_data[:, i] = data[:, i] / np.max(data[:, i])
37
+ np.savetxt(f'./nor_{data_name}', nor_data, fmt='%.2e', delimiter='\t')
38
+ elif gaus > 0:
39
+ from scipy.ndimage import gaussian_filter1d
40
+ gaus_data = data.copy()
41
+ for i in range(1, data_range):
42
+ gaus_data[:, i] = gaussian_filter1d(data[:, i], gaus)
43
+ np.savetxt(f"./gaus_{data_name}", gaus_data, fmt='%.2e', delimiter='\t')
44
+ elif fold:
45
+ data_shape = data.shape
46
+ mid = int(data_shape[0] // 2)
47
+ left_data = data[:mid, 1:]
48
+ right_data = data[mid:, 1:][::-1]
49
+ if data_shape[0] % 2 != 0:
50
+ left_data = np.vstack((left_data, right_data[-1]))
51
+ mid += 1
52
+ fold_data = (left_data + right_data) / 2
53
+ fold_data = np.hstack((data[:mid, 0].reshape(-1, 1), fold_data))
54
+ np.savetxt(f"./fold_{data_name}", fold_data, fmt='%.2e', delimiter='\t')
55
+ elif err:
56
+ err_data = data.copy()
57
+ std_dev_list = []
58
+ for i in range(1, data_range):
59
+ split_array = np.array_split(err_data[:, i], 5)
60
+ averages = np.array([np.mean(part) for part in split_array])
61
+ std_dev = averages.std(axis=0)
62
+ std_dev_list.append(std_dev)
63
+ np.savetxt(f"./error_{data_name}", np.vstack(std_dev_list), fmt='%.5f', delimiter='\t')
64
+ elif int_cn is not None:
65
+ from scipy import integrate
66
+ rho = 32/(9.86**3)
67
+ x = data[:, 0]
68
+ mask = (x >= int_cn[0]) & (x <= int_cn[1])
69
+ filtered_x = x[mask]
70
+ filtered_y = data[:, 1][mask]
71
+ integrate_y = 4 * np.pi * rho * np.insert(integrate.cumulative_trapezoid(filtered_y*filtered_x**2, filtered_x), 0, 0)
72
+ np.savetxt(f"int_cn_{data_name}", np.hstack((filtered_x.reshape(-1, 1), integrate_y.reshape(-1, 1))), fmt='%.5f', delimiter='\t')
73
+
74
+
75
+
76
+ print(f"================ processing of {data_name} is done ===================")
77
+
78
+
79
+ if __name__ == '__main__':
80
+ main()
mdkits/cli/density.py ADDED
@@ -0,0 +1,89 @@
1
+ #!/usr/bin/env python3
2
+
3
+ import numpy as np
4
+ import click
5
+ import MDAnalysis
6
+ from MDAnalysis import Universe
7
+ from MDAnalysis.analysis.base import AnalysisBase
8
+ from mdtool.util import cp2k_input_parsing, numpy_geo, encapsulated_mda
9
+ import warnings
10
+ warnings.filterwarnings("ignore")
11
+
12
+
13
+ class Density_distribution(AnalysisBase):
14
+ def __init__(self, filename, cell, o, distance_judg, angle_judg, dt=0.001, bin_size=0.2, return_index=False):
15
+ u = Universe(filename)
16
+ u.trajectory.ts.dt = dt
17
+ u.dimensions = cell
18
+
19
+ self.u = u
20
+ self.o = o
21
+ self.distance_judg = distance_judg
22
+ self.angle_judg = angle_judg
23
+ self.atomgroup = u.select_atoms("all")
24
+ self.mid_z = u.dimensions[2]/2
25
+ self.bin_size = bin_size
26
+ self.frame_count = 0
27
+ self.return_index = return_index
28
+
29
+ super(Density_distribution, self).__init__(self.atomgroup.universe.trajectory, verbose=True)
30
+
31
+ def _prepare(self):
32
+ self.bin_num = int(self.u.dimensions[2] / self.bin_size) + 2
33
+ self.density_distribution = np.zeros(self.bin_num, dtype=np.float64)
34
+ if self.surface:
35
+ self.surface_pos = ()
36
+
37
+ def _append(self, z):
38
+ bins = np.floor(z / self.bin_size).astype(int) + 1
39
+ np.add.at(self.density_distribution, bins, 1)
40
+
41
+ def _single_frame(self):
42
+ if self.water:
43
+ o_group = self.atomgroup.select_atoms("name O")
44
+ h_group = self.atomgroup.select_atoms("name H")
45
+
46
+ o, oh1, oh2 = encapsulated_mda.update_water(self, o_group, h_group, distance_judg=self.distance_judg, angle_judg=self.angle_judg, return_index=self.return_index)
47
+
48
+ self._append(o.positions[:, 2])
49
+
50
+ else:
51
+ group = self.atomgroup.select_atoms(f"name {self.element}")
52
+ self._append(group.positions[:, 2])
53
+
54
+ self.
55
+ self.frame_count += 1
56
+
57
+ def _conclude(self):
58
+ if self.frame_count > 0:
59
+ V = self.u.dimensions[0] * self.u.dimensions[1] * self.bin_size
60
+
61
+ if self.water:
62
+ density_distribution = (self.density_distribution * (15.999+1.008*2) * 1.660539 / V) / self.frame_count
63
+ else:
64
+ density_distribution = (self.density_distribution * (10000/6.02) / V) / self.frame_count
65
+
66
+ bins_z = np.arange(len(self.density_distribution)) * self.bin_size
67
+
68
+ surface = self.find_surface(self.atomgroup.select_atoms("name Pt"))
69
+
70
+ lower_z, upper_z = surface
71
+ mask = (bins_z >= lower_z) & (bins_z <= upper_z)
72
+ filtered_bins_z = bins_z[mask] - lower_z
73
+ filtered_density_distribution = density_distribution[mask]
74
+ conbined_data = np.column_stack((filtered_bins_z, filtered_density_distribution))
75
+
76
+ np.savetxt(self.o, conbined_data, header="Z\tdensity", fmt='%.5f', delimiter='\t')
77
+
78
+ @click.command(name='density')
79
+ @click.argument('filename', type=click.Path(exists=True), default=os_operation.default_file_name('*-pos-1.xyz', last=True))
80
+ @click.option('--cell', type=arg_type.Cell, help='set xyz file cell, --cell x,y,z,a,b,c')
81
+ @click.option('--cell', type=arg_type.Cell, help='set cell from cp2k input file or a list of lattice: --cell x,y,z or x,y,z,a,b,c', default='input.inp', show_default=True)
82
+ @click.option('-o', type=str, help='output file name', default='density.dat', show_default=True)
83
+ def main(filename, cell, o):
84
+ density_dist = Density_distribution(filename, cell, o=o)
85
+ density_dist.run()
86
+
87
+
88
+ if __name__ == '__main__':
89
+ main()
mdkits/cli/density2.py ADDED
@@ -0,0 +1,91 @@
1
+ #!/usr/bin/env python3
2
+
3
+ import os, sys, argparse
4
+ import numpy as np
5
+ import math
6
+ import multiprocessing
7
+ from util import (
8
+ structure_parsing,
9
+ cp2k_input_parsing,
10
+ os_operation,
11
+ atom_count,
12
+ )
13
+
14
+
15
+ def array_type(string):
16
+ number_list = string.split(',')
17
+ number_array = array(number_list, dtype=float)
18
+ return number_array
19
+
20
+
21
+ def get_cell(cp2k_input_file, cell=None):
22
+ if cell is None:
23
+ cell = cp2k_input_parsing.parse_cell(cp2k_input_file)
24
+ else:
25
+ cell = cell
26
+ if len(cell) == 3:
27
+ cell.extend([90.0, 90.0, 90.0])
28
+
29
+ return cell
30
+
31
+
32
+ def parse_cell(s):
33
+ if s == None:
34
+ return None
35
+ return [float(x) for x in s.replace(',', ' ').split()]
36
+
37
+
38
+ # set argument
39
+ def parse_argument():
40
+ parser = argparse.ArgumentParser(description='calculate water density destribution alone z axis')
41
+
42
+ parser.add_argument('input_file_name', type=str, nargs='?', help='input file name', default=os_operation.default_file_name('*-pos-1.xyz', last=True))
43
+ #parser.add_argument('-a', type=str, help='atom to statistic', default='O')
44
+ parser.add_argument('-o', type=str, help='output file name, default is "density.dat"', default='density.dat')
45
+ parser.add_argument('-r', type=array_type, help='bulk range')
46
+ parser.add_argument('--cp2k_input_file', type=str, help='input file name of cp2k, default is "input.inp"', default='input.inp')
47
+ parser.add_argument('--cell', type=parse_cell, help='set cell, a list of lattice, [x,y,z] or [x,y,z,a,b,c]')
48
+ parser.add_argument('--process', type=int, help='paralle process number default is 28', default=28)
49
+ parser.add_argument('--temp', help='keep temp file', action='store_false')
50
+
51
+ return parser.parse_args()
52
+
53
+
54
+ def main():
55
+ args = parse_argument()
56
+ bin_size = 0.2
57
+ cell = get_cell(args.cp2k_input_file, args.cell)
58
+ temp_dir = f'{os.environ.get("TEMP_DIR")}/{os.getpid()}'
59
+ os_operation.make_temp_dir(temp_dir, delete=args.temp)
60
+ chunks = structure_parsing.xyz_to_chunks(args.input_file_name, args.process)
61
+ group = structure_parsing.chunk_to_groups(chunks[0])[0]
62
+ atom_names = structure_parsing.atom_name_parse(group)
63
+ for atom_name in atom_names:
64
+ for index, chunk in enumerate(chunks):
65
+ t = multiprocessing.Process(target=atom_count.atom_number_count, args=[chunk, bin_size, cell[2], atom_name, f'{temp_dir}/chunk_{index}.temp'])
66
+ t.start()
67
+
68
+ for t in multiprocessing.active_children():
69
+ t.join()
70
+
71
+ chunks_array_list = []
72
+ for i in range(len(chunks)):
73
+ chunk_array = np.load(f'{temp_dir}/chunk_{i}.temp.npy')
74
+ chunks_array_list.append(chunk_array)
75
+ chunks_array = np.vstack(chunks_array_list)
76
+ chunks_array = np.mean(chunks_array, axis=0)
77
+ if atom_name == 'O':
78
+ chunks_array = chunks_array * (15.999+1.0080*2) * 1.660539 / (cell[0]*cell[1]*bin_size)
79
+ with open(f'density_water.dat', 'w') as f:
80
+ for i in range(len(chunks_array)):
81
+ f.write(str((i+1)*bin_size) + '\t' + str(chunks_array[i]) + '\n')
82
+
83
+ chunks_array = chunks_array * (10000/6.02) / (cell[0]*cell[1]*bin_size)
84
+ with open(f'density_{atom_name}.dat', 'w') as f:
85
+ for i in range(len(chunks_array)):
86
+ f.write(str((i+1)*bin_size) + '\t' + str(chunks_array[i]) + '\n')
87
+
88
+ print(f"density analysis of {atom_name} is done")
89
+
90
+ if __name__ == '__main__':
91
+ main()
mdkits/cli/extract.py ADDED
@@ -0,0 +1,80 @@
1
+ #!/usr/bin/env python3
2
+
3
+ # extract final structure form pos.xyz file
4
+
5
+ import os
6
+ import click
7
+ from mdtool.util import os_operation, arg_type
8
+ import MDAnalysis
9
+ from MDAnalysis import Universe
10
+
11
+
12
+ def write_to_xyz(u, frames, o, cut=None):
13
+ with MDAnalysis.Writer(o, u.atoms.n_atoms, format='XYZ') as w:
14
+ for ts in u.trajectory:
15
+ if ts.frame in frames:
16
+ w.write(u)
17
+ if cut:
18
+ with open(o, 'r') as fi, open(o+'t', 'w') as fo:
19
+ for i, line in enumerate(fi):
20
+ if i >= cut:
21
+ fo.write(line)
22
+ os.replace(o+'t', o)
23
+
24
+
25
+ def write_to_xyz_s(u, frames, cut=None):
26
+ index = 0
27
+ for ts in u.trajectory:
28
+ if ts.frame in frames:
29
+ o = f'./coord/coord_{index:03d}'
30
+ with MDAnalysis.Writer(o, u.atoms.n_atoms, format='XYZ') as w:
31
+ w.write(u)
32
+ index += 1
33
+ if cut:
34
+ with open(o, 'r') as fi, open(o+'t', 'w') as fo:
35
+ for i, line in enumerate(fi):
36
+ if i >= cut:
37
+ fo.write(line)
38
+ os.replace(o+'t', o)
39
+
40
+ @click.command(name='extract')
41
+ @click.argument('input_file_name', type=click.Path(exists=True), default=os_operation.default_file_name('*-pos-1.xyz', last=True))
42
+ @click.option('-o', type=str, help='output file name', default='extracted.xyz', show_default=True)
43
+ @click.option('-r', type=arg_type.FrameRange, help='frame range to slice', default='-1', show_default=True)
44
+ @click.option('-c', help='output a coord.xyz', is_flag=True)
45
+ def main(input_file_name, o, r, c):
46
+ """
47
+ extract frames in trajectory file
48
+ """
49
+
50
+ u = Universe(input_file_name)
51
+ if len(r) == 1:
52
+ print(f"frame range slice is {r}")
53
+ group = u.trajectory[r]
54
+ else:
55
+ print(f"frame range slice is {slice(*r)}")
56
+ group = u.trajectory[slice(*r)]
57
+ click.echo(f"total frames is {len(u.trajectory)}")
58
+ frames = [ts.frame for ts in group]
59
+
60
+ if c:
61
+ cut = 2
62
+ else:
63
+ cut = None
64
+
65
+ if len(r) == 3 and r[-1] is not None:
66
+ if not os.path.exists('./coord'):
67
+ os.makedirs('./coord')
68
+ else:
69
+ import shutil
70
+ shutil.rmtree('./coord')
71
+ os.makedirs('./coord')
72
+ write_to_xyz_s(u, frames, cut=cut)
73
+ click.echo(os.path.abspath('./coord'))
74
+ else:
75
+ write_to_xyz(u, frames, o, cut=cut)
76
+ click.echo(os.path.abspath(o))
77
+
78
+
79
+ if __name__ == '__main__':
80
+ main()
@@ -0,0 +1,59 @@
1
+ #!/usr/bin/env python3
2
+
3
+ ################################################
4
+ # averange cp2k output(or some else file correspond to ase.io.read_cube_data) hartree.cube to z coordinate with python
5
+ ## file path is need to pay attention
6
+ ## cycle parameter is need to pay attention
7
+ ## buck range is need to pay attention
8
+ ################################################
9
+
10
+ from numpy import empty, array, mean, append, concatenate
11
+ from argparse import ArgumentParser
12
+ from util import encapsulated_ase, os_operation
13
+
14
+
15
+ def array_type(string):
16
+ number_list = string.split(',')
17
+ number_array = array(number_list, dtype=float)
18
+ return number_array
19
+
20
+
21
+ def buck_potential(xaxe, potential, range):
22
+ mix = concatenate((xaxe.reshape(-1, 1), potential.reshape(-1, 1)), axis=1)
23
+ mask = (mix[:,0] >= range[0]) & (mix[:,0] <=range[1])
24
+ buck_potential = mix[mask]
25
+ ave_potential = mean(buck_potential[:,1])
26
+ return ave_potential
27
+
28
+
29
+ # set argument
30
+ parser = ArgumentParser(description='to handle cp2k output file hartree cube, name should be "hartree-*.cube"')
31
+ parser.add_argument('file_name', type=str, nargs='?', help='hartree cube file', default=os_operation.default_file_name('*-v_hartree-1_*.cube', last=True))
32
+ parser.add_argument('-b', '--buck_range', type=array_type, help='parameter to calculate mean value of buck', default=None)
33
+ parser.add_argument('-o', type=str, help='output file name, default is "out.put"', default='hartree.out')
34
+
35
+ args = parser.parse_args()
36
+
37
+
38
+ ## init output potential file's shape, and define a z axe
39
+ init_array = encapsulated_ase.ave_potential(args.file_name)
40
+ potential = empty((0, init_array[0].shape[0]))
41
+ z_coordinates = array((init_array[1])).reshape(-1, 1)
42
+
43
+ potential = encapsulated_ase.ave_potential(args.file_name)[0]
44
+
45
+ aved = mean(potential, axis=0)
46
+ total_potential = append(z_coordinates, potential.reshape(-1, 1), axis=1)
47
+
48
+ ## if buck range is exit, out put a difference of potential
49
+ if args.buck_range is not None:
50
+ buck_potential = buck_potential(z_coordinates, potential, args.buck_range)
51
+ print(buck_potential)
52
+ with open('hartree_potential.dat', 'w') as f:
53
+ f.write(f"{buck_potential}" + '\n')
54
+
55
+ ## write output
56
+ with open(args.o, 'w') as f:
57
+ for value in total_potential:
58
+ f.write(" ".join(map(str, value)) + '\n')
59
+
@@ -0,0 +1,84 @@
1
+ #!/usr/bin/env python3
2
+
3
+ ################################################
4
+ # averange cp2k output(or some else file correspond to ase.io.read_cube_data) hartree.cube to z coordinate with python
5
+ ## file path is need to pay attention
6
+ ## cycle parameter is need to pay attention
7
+ ## buck range is need to pay attention
8
+ ################################################
9
+
10
+ from ase.io.cube import read_cube_data
11
+ import numpy as np
12
+ import argparse
13
+
14
+ def array_type(string):
15
+ number_list = string.split(',')
16
+ number_array = np.array(number_list, dtype=int)
17
+ return number_array
18
+
19
+
20
+ def ave_potential(filepath):
21
+ # is to average hartree file in z_coordinate
22
+
23
+ ## read data from filepath
24
+ data, atoms = read_cube_data(filepath)
25
+
26
+ ## define need parameter
27
+ npoints = data.shape[2]
28
+ step_size = atoms.cell[2, 2] / ( npoints - 1 )
29
+
30
+ ## average hartree file, and calculate z_coordinates
31
+ z_coordinates = [i * step_size for i in range(npoints)]
32
+ z_potential = 27.2114 * data[:, :, :].sum(axis=(0, 1)) / ( data.shape[0] * data.shape[1] )
33
+ return z_potential, z_coordinates
34
+
35
+
36
+ def buck_potential(xaxe, potential, range):
37
+ mix = np.concatenate((xaxe.reshape(-1, 1), potential.reshape(-1, 1)), axis=1)
38
+ mask = (mix[:,0] >= range[0]) & (mix[:,0] <=range[1])
39
+ buck_potential = mix[mask]
40
+ ave_potential = np.mean(buck_potential[:,1])
41
+ return ave_potential
42
+
43
+
44
+ # set argument
45
+ parser = argparse.ArgumentParser(description='to handle cp2k output file hartree cube, name should be "hartree-*.cube"')
46
+ parser.add_argument('folder_path', type=str, help='folder that contain all hartree cube file')
47
+ parser.add_argument('cyc_range', type=array_type, help='cycle parameter, need to seperate with ",", similar with range() -- 1,201 1,201,10')
48
+ parser.add_argument('-b', '--buck_range', type=array_type, help='parameter to calculate mean value of buck', default=None)
49
+ parser.add_argument('-o', type=str, help='output file name, default is "out.put"', default='hartree.out')
50
+
51
+ args = parser.parse_args()
52
+
53
+
54
+ ## init output potential file's shape, and define a z axe
55
+ init_array = ave_potential('{}/hartree-{}.cube'.format(args.folder_path, args.cyc_range[0]))
56
+ potential = np.empty((0, init_array[0].shape[0]))
57
+ z_coordinates = np.array((init_array[1])).reshape(-1, 1)
58
+
59
+ ## average one hartree file
60
+ if len(args.cyc_range) == 3:
61
+ for i in range(args.cyc_range[0], args.cyc_range[1], args.cyc_range[2]):
62
+ file_path = '{}/hartree-{}.cube'.format(args.folder_path, i)
63
+ potential = np.append(potential, [ave_potential(file_path)[0]], axis=0)
64
+ else:
65
+ for i in range(args.cyc_range[0], args.cyc_range[1]):
66
+ file_path = '{}/hartree-{}.cube'.format(args.folder_path, i)
67
+ potential = np.append(potential, [ave_potential(file_path)[0]], axis=0)
68
+
69
+ ## average every averaged harterr file, and append to z_coordinates
70
+ #aved_potential = potential[:, :].sum(axis=0) / len(range(1, 201))
71
+ aved = np.mean(potential, axis=0)
72
+ total_potential = np.append(z_coordinates, aved.reshape(-1, 1), axis=1)
73
+
74
+ ## if buck range is exit, out put a difference of potential
75
+ if args.buck_range is not None:
76
+ buck_potential = buck_potential(z_coordinates, aved, args.buck_range)
77
+ with open(args.o + 'diff', 'w') as f:
78
+ f.write("{}\t{}\t{}".format(aved[0], buck_potential, aved[0]-buck_potential))
79
+
80
+ ## write output
81
+ with open(args.o, 'w') as f:
82
+ for value in total_potential:
83
+ f.write(" ".join(map(str, value)) + '\n')
84
+
mdkits/cli/hb.py ADDED
@@ -0,0 +1,101 @@
1
+ #!/usr/bin/env python3
2
+
3
+ import argparse, multiprocessing, os
4
+ import numpy as np
5
+ from util import (
6
+ structure_parsing,
7
+ numpy_geo,
8
+ os_operation,
9
+ cp2k_input_parsing,
10
+ )
11
+
12
+
13
+ def hb_count(chunk, index, cell, filename, hb_distance=3.5, hb_angle=35):
14
+ groups = structure_parsing.chunk_to_groups(chunk)
15
+ groups_hb_list = []
16
+ coefficients = numpy_geo.cell_to_wrap_coefficients(cell)
17
+ for group in groups:
18
+ group_hb_array = np.zeros((3, 1))
19
+ present_index = index
20
+ o_present = group[present_index].split()
21
+ if o_present[0] == 'O':
22
+ o_present = np.array(o_present[1:], dtype=np.float64)
23
+ group_hb_array[2, 0] += 1
24
+ for other_index in range(2, len(group)):
25
+ o_other = group[other_index].split()
26
+ if o_other[0] == 'O':
27
+ o_other = np.array(o_other[1:], dtype=np.float64)
28
+ oo_distance, o_other = numpy_geo.unwrap(o_present, o_other, coefficients, max=0)
29
+ if oo_distance < hb_distance and oo_distance > 1:
30
+ _, o_present_h1 = numpy_geo.unwrap(o_present, np.array(group[present_index+1].split()[1:], dtype=np.float64), coefficients)
31
+ _, o_present_h2 = numpy_geo.unwrap(o_present, np.array(group[present_index+2].split()[1:], dtype=np.float64), coefficients)
32
+ _, o_other_h1 = numpy_geo.unwrap(o_other, np.array(group[other_index+1].split()[1:], dtype=np.float64), coefficients)
33
+ _, o_other_h2 = numpy_geo.unwrap(o_other, np.array(group[other_index+2].split()[1:], dtype=np.float64), coefficients)
34
+
35
+ o_present_o_other_h1_angle = numpy_geo.vector_vector_angle(o_present-o_other, o_other_h1-o_other)
36
+ o_present_o_other_h2_angle = numpy_geo.vector_vector_angle(o_present-o_other, o_other_h2-o_other)
37
+ if o_present_o_other_h1_angle < hb_angle or o_present_o_other_h2_angle < hb_angle:
38
+ group_hb_array[0, 0] += 1
39
+ o_other_o_present_h1_angle = numpy_geo.vector_vector_angle(o_other-o_present, o_present_h1-o_present)
40
+ o_other_o_present_h2_angle = numpy_geo.vector_vector_angle(o_other-o_present, o_present_h2-o_present)
41
+ if o_other_o_present_h1_angle < hb_angle or o_other_o_present_h2_angle < hb_angle:
42
+ group_hb_array[1, 0] += 1
43
+ groups_hb_list.append(group_hb_array)
44
+ groups_hb_array = np.vstack(groups_hb_list)
45
+ group_hb_acc_array = np.sum(groups_hb_array[0::3], axis=0).reshape(1, -1)
46
+ group_hb_don_array = np.sum(groups_hb_array[1::3], axis=0).reshape(1, -1)
47
+ group_hb_num_array = np.sum(groups_hb_array[2::3], axis=0).reshape(1, -1)
48
+ group_hb_array = np.vstack([group_hb_acc_array, group_hb_don_array, group_hb_num_array])
49
+ np.save(filename, group_hb_array)
50
+
51
+
52
+ def parse_data(s):
53
+ return [float(x) for x in s.replace(',', ' ').split()]
54
+
55
+ def parse_argument():
56
+ parser = argparse.ArgumentParser(description="analysis an O atom's hydrogen bond in water")
57
+ parser.add_argument('index', type=int, help='index of target atom in coord.xyz, or all of hb distribution on z')
58
+ parser.add_argument('input_file_name', type=str, nargs='?', help='input file name', default=os_operation.default_file_name('wraped.xyz', last=True))
59
+ parser.add_argument('--cp2k_input_file', type=str, help='input file name of cp2k, default is "input.inp"', default='input.inp')
60
+ parser.add_argument('--cell', type=parse_data, help='set cell, a list of lattice, --cell x,y,z or x,y,z,a,b,c')
61
+ parser.add_argument('--hb_param', type=parse_data, help='[hb_distance, hb_angle], default is [3.5, 35]', default=[3.5, 35])
62
+ parser.add_argument('--process', type=int, help='paralle process number default is 28', default=28)
63
+ parser.add_argument('--temp', help='keep temp file', action='store_false')
64
+
65
+ return parser.parse_args()
66
+
67
+ def main():
68
+ args = parse_argument()
69
+ output = f'./hb_{args.index}.dat'
70
+ cell = cp2k_input_parsing.get_cell(args.cp2k_input_file, args.cell)
71
+ chunks = structure_parsing.xyz_to_chunks(args.input_file_name, args.process)
72
+ temp_dir = f'{os.environ.get("TEMP_DIR")}/{os.getpid()}'
73
+ os_operation.make_temp_dir(temp_dir, delete=args.temp)
74
+
75
+ for index, chunk in enumerate(chunks):
76
+ t = multiprocessing.Process(target=hb_count, args=[chunk, args.index, cell, f'{temp_dir}/chunk_{index}.temp'])
77
+ t.start()
78
+
79
+ for t in multiprocessing.active_children():
80
+ t.join()
81
+
82
+ chunks_array_list = []
83
+ for i in range(len(chunks)):
84
+ chunk_array = np.load(f'{temp_dir}/chunk_{i}.temp.npy')
85
+ chunks_array_list.append(chunk_array)
86
+ chunks_array = np.vstack(chunks_array_list)
87
+ chunks_array = np.mean(chunks_array, axis=1)
88
+
89
+ with open(output, 'w') as f:
90
+ f.write(f"# {args.index}\n")
91
+ f.write(f"accepter : {chunks_array[0]:.2f}\n")
92
+ f.write(f"donor : {chunks_array[1]:.2f}\n")
93
+ f.write(f"total : {chunks_array[0]+chunks_array[1]:.2f}\n")
94
+ print(f"# {args.index}")
95
+ print(f"accepter : {chunks_array[0]:.2f}")
96
+ print(f"donor : {chunks_array[1]:.2f}")
97
+ print(f"total : {chunks_array[0]+chunks_array[1]:.2f}")
98
+
99
+
100
+ if __name__ == '__main__':
101
+ main()
mdkits/cli/log.py ADDED
@@ -0,0 +1,64 @@
1
+ """Log"""
2
+ import logging
3
+ import os
4
+ from logging.config import dictConfig
5
+
6
+ from mdtool.config import settings
7
+
8
+ os.makedirs(settings.LOGPATH, exist_ok=True)
9
+
10
+
11
+ def verbose_formatter(verbose: int) -> str:
12
+ """formatter factory"""
13
+ if verbose is True:
14
+ return 'verbose'
15
+ return 'simple'
16
+
17
+
18
+ def update_log_level(debug: bool, level: str) -> str:
19
+ """update log level"""
20
+ if debug is True:
21
+ level_num = logging.DEBUG
22
+ else:
23
+ level_num = logging.getLevelName(level)
24
+ settings.set('LOGLEVEL', logging.getLevelName(level_num))
25
+ return settings.LOGLEVEL
26
+
27
+
28
+ def init_log() -> None:
29
+ """Init log config."""
30
+ log_level = update_log_level(settings.DEBUG, str(settings.LOGLEVEL).upper())
31
+
32
+ log_config = {
33
+ "version": 1,
34
+ "disable_existing_loggers": False,
35
+ "formatters": {
36
+ 'verbose': {
37
+ 'format': '%(asctime)s %(levelname)s %(name)s %(process)d %(thread)d %(message)s',
38
+ },
39
+ 'simple': {
40
+ 'format': '%(asctime)s %(levelname)s %(name)s %(message)s',
41
+ },
42
+ },
43
+ "handlers": {
44
+ "console": {
45
+ "formatter": verbose_formatter(settings.VERBOSE),
46
+ 'level': 'DEBUG',
47
+ "class": "logging.StreamHandler",
48
+ },
49
+ 'file': {
50
+ 'class': 'logging.handlers.RotatingFileHandler',
51
+ 'level': 'DEBUG',
52
+ 'formatter': verbose_formatter(settings.VERBOSE),
53
+ 'filename': os.path.join(settings.LOGPATH, 'all.log'),
54
+ 'maxBytes': 1024 * 1024 * 1024 * 200, # 200M
55
+ 'backupCount': '5',
56
+ 'encoding': 'utf-8'
57
+ },
58
+ },
59
+ "loggers": {
60
+ '': {'level': log_level, 'handlers': ['console']},
61
+ }
62
+ }
63
+
64
+ dictConfig(log_config)