mdkits 0.1.13__py3-none-any.whl → 1.2.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. mdkits/build_cli/adsorbate.py +2 -1
  2. mdkits/build_cli/build_bulk.py +2 -1
  3. mdkits/build_cli/build_interface.py +5 -0
  4. mdkits/build_cli/build_solution.py +48 -4
  5. mdkits/build_cli/build_surface.py +2 -5
  6. mdkits/build_cli/cut_surface.py +1 -1
  7. mdkits/build_cli/supercell.py +1 -1
  8. mdkits/cli/convert.py +1 -1
  9. mdkits/cli/extract.py +29 -18
  10. mdkits/{cli → dft_cli/.back}/pdos.py +1 -0
  11. mdkits/dft_cli/check_neb.py +0 -0
  12. mdkits/{cli → dft_cli}/cube.py +3 -2
  13. mdkits/dft_cli/dft_cli.py +23 -0
  14. mdkits/dft_cli/fix.py +54 -0
  15. mdkits/dft_cli/pdos.py +119 -0
  16. mdkits/md_cli/angle.py +122 -0
  17. mdkits/{cli → md_cli}/density.py +24 -19
  18. mdkits/md_cli/dipole.py +124 -0
  19. mdkits/md_cli/hb_distribution.py +185 -0
  20. mdkits/md_cli/md_cli.py +32 -0
  21. mdkits/md_cli/monitor.py +104 -0
  22. mdkits/md_cli/msd.py +44 -0
  23. mdkits/md_cli/rdf.py +53 -0
  24. mdkits/md_cli/setting.py +14 -0
  25. mdkits/md_cli/vac.py +65 -0
  26. mdkits/{cli → md_cli}/wrap.py +4 -3
  27. mdkits/mdkits.py +5 -9
  28. mdkits/util/.fig_operation.py.swp +0 -0
  29. mdkits/util/arg_type.py +18 -8
  30. mdkits/util/cp2k_input_parsing.py +5 -1
  31. mdkits/util/encapsulated_ase.py +28 -7
  32. mdkits/util/encapsulated_mda.py +4 -1
  33. mdkits/util/numpy_geo.py +10 -5
  34. mdkits/util/os_operation.py +3 -1
  35. mdkits/util/out_err.py +18 -6
  36. mdkits-1.2.3.dist-info/METADATA +370 -0
  37. mdkits-1.2.3.dist-info/RECORD +51 -0
  38. mdkits/cli/,hb_distribution_down.py +0 -114
  39. mdkits/cli/hartree_potential.py +0 -59
  40. mdkits/cli/hartree_potential_ave.py +0 -84
  41. mdkits/cli/hb.py +0 -101
  42. mdkits/cli/hb_distribution.py +0 -126
  43. mdkits/cli/packmol_input.py +0 -76
  44. mdkits-0.1.13.dist-info/METADATA +0 -226
  45. mdkits-0.1.13.dist-info/RECORD +0 -43
  46. {mdkits-0.1.13.dist-info → mdkits-1.2.3.dist-info}/LICENSE +0 -0
  47. {mdkits-0.1.13.dist-info → mdkits-1.2.3.dist-info}/WHEEL +0 -0
  48. {mdkits-0.1.13.dist-info → mdkits-1.2.3.dist-info}/entry_points.txt +0 -0
mdkits/md_cli/vac.py ADDED
@@ -0,0 +1,65 @@
1
+ import numpy as np
2
+ import click
3
+ import MDAnalysis
4
+ from MDAnalysis import Universe
5
+ from MDAnalysis.analysis.base import AnalysisBase
6
+ from mdkits.util import os_operation, arg_type
7
+
8
+
9
+ class Velocity_AutoCorrelation(AnalysisBase):
10
+ def __init__(self, filename, select, dt=0.001):
11
+ u = Universe(filename)
12
+ u.trajectory.ts.dt = dt
13
+ self.u = u
14
+ self.atomgroup = u.select_atoms(select)
15
+
16
+ super(Velocity_AutoCorrelation, self).__init__(self.atomgroup.universe.trajectory, verbose=True)
17
+
18
+ def _prepare(self):
19
+ self.cvv = []
20
+ self.v0 = self.atomgroup.positions
21
+ self.normalize = 1/np.sum(self.v0*self.v0)
22
+ self.cvv.append(np.sum(self.v0*self.v0)*self.normalize)
23
+
24
+
25
+ def _append(self, cvv):
26
+ self.cvv.append(cvv*self.normalize)
27
+
28
+ def _single_frame(self):
29
+ cvv = np.sum(self.atomgroup.positions*self.v0)
30
+ self._append(cvv)
31
+
32
+ def _conclude(self):
33
+ self.cvv = np.array(self.cvv)
34
+
35
+ sf = self.cvv.shape[0]
36
+ fftraj = np.fft.rfft(self.cvv)
37
+ fdos = np.abs(fftraj)
38
+
39
+ faxis = np.fft.rfftfreq(sf, d=1/sf)
40
+
41
+ combine = np.column_stack((np.arange(len(self.cvv)), self.cvv))
42
+
43
+ np.savetxt('vac.dat', combine, fmt='%.5f', header="frame\tvac")
44
+ np.savetxt('freq.dat', np.column_stack((faxis, fdos)), fmt='%.5f', header="freq\tabundance")
45
+
46
+
47
+ @click.command(name="vac")
48
+ @click.argument("filename", type=click.Path(exists=True), default=os_operation.default_file_name('*-vel-1.xyz', last=True))
49
+ @click.option("--select", type=str, default="all", help="atom selection", show_default=True)
50
+ @click.option('-r', type=arg_type.FrameRange, help='range of frame to analysis')
51
+ def main(filename, select, r):
52
+ """analysis velocity autocorrelation function and frequency"""
53
+ a = Velocity_AutoCorrelation(filename, select)
54
+
55
+ if r is not None:
56
+ if len(r) == 2:
57
+ a.run(start=r[0], stop=r[1])
58
+ elif len(r) == 3:
59
+ a.run(start=r[0], stop=r[1], step=r[2])
60
+ else:
61
+ a.run()
62
+
63
+
64
+ if __name__ == '__main__':
65
+ main()
@@ -6,7 +6,8 @@ import MDAnalysis, click
6
6
  from mdkits.util import (
7
7
  arg_type,
8
8
  os_operation,
9
- cp2k_input_parsing
9
+ cp2k_input_parsing,
10
+ out_err
10
11
  )
11
12
 
12
13
 
@@ -28,8 +29,8 @@ def main(filename, o, cell):
28
29
  ag.wrap()
29
30
  W.write(ag)
30
31
 
31
- click.echo("\nwrap is done, output file {o} is:")
32
- click.echo(os.path.abspath(o))
32
+ click.echo(f"\nwrap is done, output file {o} is:")
33
+ out_err.path_output(o)
33
34
 
34
35
 
35
36
  if __name__ == '__main__':
mdkits/mdkits.py CHANGED
@@ -1,14 +1,12 @@
1
1
  import click
2
2
  from mdkits.build_cli import build_cli
3
+ from mdkits.dft_cli import dft_cli
4
+ from mdkits.md_cli import md_cli
3
5
  from mdkits.cli import (
4
6
  convert,
5
- wrap,
6
7
  extract,
7
8
  data,
8
9
  plot,
9
- density,
10
- cube,
11
- pdos,
12
10
  )
13
11
 
14
12
 
@@ -20,15 +18,13 @@ def cli(ctx):
20
18
  pass
21
19
 
22
20
 
21
+ cli.add_command(md_cli.cli)
22
+ cli.add_command(build_cli.cli_build)
23
+ cli.add_command(dft_cli.main)
23
24
  cli.add_command(convert.main)
24
- cli.add_command(wrap.main)
25
25
  cli.add_command(extract.main)
26
26
  cli.add_command(data.main)
27
27
  cli.add_command(plot.main)
28
- cli.add_command(density.main)
29
- cli.add_command(cube.main)
30
- cli.add_command(pdos.main)
31
- cli.add_command(build_cli.cli_build)
32
28
 
33
29
 
34
30
  if __name__ == '__main__':
Binary file
mdkits/util/arg_type.py CHANGED
@@ -6,7 +6,7 @@ from mdkits.util import os_operation, cp2k_input_parsing, out_err
6
6
 
7
7
 
8
8
  class CellType(click.ParamType):
9
- name = "pbc cell type"
9
+ name = "cell type"
10
10
 
11
11
  def convert(self, value, param, ctx):
12
12
  if isinstance(value, str):
@@ -14,10 +14,8 @@ class CellType(click.ParamType):
14
14
 
15
15
  if len(cell) == 3:
16
16
  cell += [90, 90, 90]
17
- out_err.cell_output(cell)
18
17
  return cell
19
18
  elif len(cell) == 6:
20
- out_err.cell_output(cell)
21
19
  return cell
22
20
  else:
23
21
  self.fail(f"{value} is not a valid cell parameter", param, ctx)
@@ -38,7 +36,7 @@ class FrameRangeType(click.ParamType):
38
36
 
39
37
 
40
38
  class StructureType(click.ParamType):
41
- name = "structure file type"
39
+ name = "structure type"
42
40
  def convert(self, value, param, ctx):
43
41
  no_cell=np.array([0., 0., 0., 90., 90., 90.])
44
42
  if isinstance(value, str):
@@ -58,19 +56,30 @@ class StructureType(click.ParamType):
58
56
  self.fail(f"{value} is not exists", param, ctx)
59
57
 
60
58
 
61
-
62
59
  class MoleculeType(click.Choice):
63
60
  name = "mocular type"
64
61
  def __init__(self):
65
62
  g2.names.append(click.Path(exists=True))
66
63
  super().__init__(choices=tuple(g2.names))
67
64
 
65
+
68
66
  class AdsSiteType(click.Choice):
69
67
  name = "adsorption site"
70
68
  def __init__(self):
71
- super().__init__(self)
72
69
  site = ['ontop', 'hollow','fcc', 'hcp', 'bridge', 'shortbridge', 'longbridge']
73
- self.choices = tuple(site)
70
+ super().__init__(choices=tuple(site))
71
+
72
+
73
+ class FileListType(click.ParamType):
74
+ name = "file list"
75
+ def convert(self, value, param, ctx):
76
+ if isinstance(value, str):
77
+ import glob
78
+ file_list = glob.glob(value)
79
+ if file_list:
80
+ return file_list
81
+ else:
82
+ self.fail(f"No files match {value}", param, ctx)
74
83
 
75
84
 
76
85
 
@@ -78,4 +87,5 @@ Cell = CellType()
78
87
  FrameRange = FrameRangeType()
79
88
  Molecule = MoleculeType()
80
89
  AdsSite = AdsSiteType()
81
- Structure = StructureType()
90
+ Structure = StructureType()
91
+ FileList = FileListType()
@@ -30,7 +30,11 @@ def parse_cell():
30
30
  if len(cell) == 3:
31
31
  cell.extend([90.0, 90.0, 90.0])
32
32
 
33
- out_err.cell_output(cell)
33
+ if len(cell) == 0:
34
+ print("parse failed")
35
+ return [0., 0., 0., 90., 90., 90.]
36
+ else:
37
+ print(f"parsed cell: x = {cell[0]}, y = {cell[1]}, z = {cell[2]}, a = {cell[3]}\u00B0, b = {cell[4]}\u00B0, c = {cell[5]}\u00B0")
34
38
  return cell
35
39
  except FileNotFoundError:
36
40
  return [0., 0., 0., 90., 90., 90.]
@@ -66,24 +66,45 @@ def rdf(chunk, cell, bin_size, name, parallel=True):
66
66
  ana.clear_cache()
67
67
 
68
68
 
69
- def ave_cube(filepath):
69
+ def ave_cube(filepath, axis):
70
70
  """
71
71
  function: average hartree file in z_coordinate
72
72
  parameter:
73
73
  filepath: hartree cube file path
74
+ axis: average axis, can be 'x','y','z' or 0,1,2
74
75
  return:
75
76
  z_cube_data: a list of cube data alone z axes
76
77
  z_coordinates: a list of coordinates of z axes
77
78
  """
79
+ if isinstance(axis, str):
80
+ mapping = {'x': 0, 'y': 1, 'z': 2}
81
+ key = axis.lower()
82
+ if key not in mapping:
83
+ raise ValueError(f"axis string must be one of 'x','y','z', got {axis}")
84
+ axis_idx = mapping[key]
85
+ else:
86
+ axis_idx = int(axis)
87
+
78
88
  # read data from filepath
79
89
  data, atoms = read_cube_data(filepath)
80
- # define need parameter
81
- npoints = data.shape[2]
82
- step_size = atoms.cell[2, 2] / ( npoints - 1 )
83
- # average hartree file, and calculate z_coordinates
90
+
91
+ ndim = data.ndim
92
+ if axis_idx < 0:
93
+ axis_idx = ndim + axis_idx
94
+ if not (0 <= axis_idx < ndim):
95
+ raise ValueError(f"axis must be between 0 and {ndim-1}, got {axis_idx}")
96
+
97
+ npoints = data.shape[axis_idx]
98
+ step_size = atoms.cell.cellpar()[axis_idx] / ( npoints - 1 )
99
+
84
100
  z_coordinates = [i * step_size for i in range(npoints)]
85
- z_cube_data = data[:, :, :].sum(axis=(0, 1)) / ( data.shape[0] * data.shape[1] )
86
- return np.column_stack((z_coordinates, z_cube_data))
101
+
102
+ other_axes = tuple(i for i in range(ndim) if i != axis_idx)
103
+ if other_axes:
104
+ averaged = data.mean(axis=other_axes)
105
+ else:
106
+ averaged = data.copy()
107
+ return np.column_stack((z_coordinates, averaged))
87
108
 
88
109
 
89
110
  def atoms_read_with_cell(file_name, cell=None, coord_mode=False, default_cell=np.array([0., 0., 0., 90., 90., 90.])):
@@ -3,7 +3,7 @@ import numpy as np
3
3
  from . import numpy_geo
4
4
 
5
5
 
6
- def update_water(self, o_group, h_group, distance_judg=1.2, angle_judg:tuple[float, float]=None, return_index=False):
6
+ def update_water(self, o_group, h_group, distance_judg=1.2, angle_judg:tuple[float, float]=(None, None), return_index=False):
7
7
  """
8
8
  input: o and h atom
9
9
  output: o and two h in this frame
@@ -53,6 +53,9 @@ def update_water(self, o_group, h_group, distance_judg=1.2, angle_judg:tuple[flo
53
53
  if return_index:
54
54
  return o_index, oh1_index, oh2_index
55
55
  else:
56
+ if len(o_index) == 0:
57
+ raise ValueError("No water found in this atom group")
58
+
56
59
  o = o_group[o_index]
57
60
  oh1 = h_group[oh1_index]
58
61
  oh2 = h_group[oh2_index]
mdkits/util/numpy_geo.py CHANGED
@@ -45,7 +45,10 @@ def vector_between_two_vector(vector1, vector2):
45
45
 
46
46
 
47
47
  def vector_vector_angle(vector, surface_vector):
48
- cos = np.dot(vector, surface_vector) / (np.linalg.norm(vector) * np.linalg.norm(surface_vector))
48
+ if len(vector.shape) == 1:
49
+ cos = np.dot(vector, surface_vector) / (np.linalg.norm(vector) * np.linalg.norm(surface_vector))
50
+ else:
51
+ cos = np.dot(vector, surface_vector) / (np.linalg.norm(vector, axis=1) * np.linalg.norm(surface_vector))
49
52
  vector_vector_angle = np.arccos(np.clip(cos, -1.0, 1.0))
50
53
  vector_vector_angle = np.degrees(vector_vector_angle)
51
54
  return vector_vector_angle
@@ -96,7 +99,7 @@ def unwrap(atom1, atom2, coefficients, max=0, total=False):
96
99
  return min_dist, closest_point
97
100
 
98
101
 
99
- def find_surface(surface_group:np.ndarray, layer_tolerance=1, surface_tolerance=5):
102
+ def find_surface(surface_group:np.ndarray, layer_tolerance=0.05, surface_tolerance=5):
100
103
  sort_group = np.sort(surface_group)
101
104
  layer_mean = []
102
105
  current_layer = [sort_group[0]]
@@ -108,12 +111,14 @@ def find_surface(surface_group:np.ndarray, layer_tolerance=1, surface_tolerance=
108
111
  current_layer = [sort_group[i]]
109
112
  layer_mean.append(np.mean(current_layer))
110
113
 
111
- if len(current_layer) == 1:
112
- return layer_mean[0]
114
+ if len(layer_mean) == 1:
115
+ return [layer_mean[0], 0]
113
116
 
114
117
  diff = np.diff(layer_mean)
115
118
  if np.any(diff > surface_tolerance):
116
- index = np.argmax(diff > 5)
119
+ index = np.argmax(diff > surface_tolerance)
117
120
  return (layer_mean[index], layer_mean[index + 1])
118
121
  else:
122
+ if layer_mean[-1] > layer_mean[0]:
123
+ return [layer_mean[-1], 0]
119
124
  return (layer_mean[-1], layer_mean[0])
@@ -13,7 +13,7 @@ def remove_temp_dir(temp_dir):
13
13
  shutil.rmtree(temp_dir)
14
14
 
15
15
 
16
- def default_file_name(match, last=False):
16
+ def default_file_name(match, last=False, space_split=False):
17
17
  import glob
18
18
  file_list = glob.glob(match)
19
19
  if file_list:
@@ -21,6 +21,8 @@ def default_file_name(match, last=False):
21
21
  default_file_name = sort_word_and_number(file_list)[-1]
22
22
  else:
23
23
  default_file_name = list(file_list)
24
+ if space_split:
25
+ default_file_name = ' '.join(default_file_name)
24
26
  else:
25
27
  default_file_name = None
26
28
 
mdkits/util/out_err.py CHANGED
@@ -6,17 +6,29 @@ import numpy as np
6
6
  import sys, os
7
7
 
8
8
 
9
- def cell_output(cell):
10
- print(f"system cell: x = {cell[0]}, y = {cell[1]}, z = {cell[2]}, a = {cell[3]}\u00B0, b = {cell[4]}\u00B0, c = {cell[5]}\u00B0")
9
+ def cell_output(atoms):
10
+ cell = atoms.cell.cellpar()
11
+ if not hasattr(atoms, "name"):
12
+ atoms.name = "present"
13
+ print(f"{atoms.name} cell: x = {cell[0]}, y = {cell[1]}, z = {cell[2]}, a = {cell[3]}\u00B0, b = {cell[4]}\u00B0, c = {cell[5]}\u00B0")
11
14
 
12
15
 
13
16
  def path_output(file: str):
14
- print(os.path.abspath(file))
17
+ env_var_name = 'ssh_name'
18
+ file_path = os.path.abspath(file)
19
+ if os.environ.get(env_var_name):
20
+ ssh_name = os.environ.get(env_var_name)
21
+ file_path = f"{ssh_name}:{file_path}"
22
+ print(file_path)
15
23
 
16
24
  def check_cell(atoms, cell=None):
17
- if not np.array_equal(atoms.cell.cellpar(), np.array([0., 0., 0., 90., 90., 90.])):
18
- cell_output(atoms.cell.cellpar())
25
+ if cell is not None:
26
+ atoms.set_cell(cell)
27
+ cell_output(atoms)
28
+ elif not np.array_equal(atoms.cell.cellpar(), np.array([0., 0., 0., 90., 90., 90.])):
29
+ cell_output(atoms)
19
30
  elif np.array_equal(atoms.cell.cellpar(), np.array([0., 0., 0., 90., 90., 90.])) and cell is not None:
20
31
  atoms.set_cell(cell)
32
+ cell_output(atoms)
21
33
  else:
22
- raise ValueError("can't parse cell please use --cell set cell")
34
+ raise ValueError("can't parse cell please use --cell set cell")