mdkits 0.1.13__py3-none-any.whl → 1.2.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. mdkits/build_cli/adsorbate.py +2 -1
  2. mdkits/build_cli/build_bulk.py +2 -1
  3. mdkits/build_cli/build_interface.py +5 -0
  4. mdkits/build_cli/build_solution.py +48 -4
  5. mdkits/build_cli/build_surface.py +2 -5
  6. mdkits/build_cli/cut_surface.py +1 -1
  7. mdkits/build_cli/supercell.py +1 -1
  8. mdkits/cli/convert.py +1 -1
  9. mdkits/cli/extract.py +29 -18
  10. mdkits/{cli → dft_cli/.back}/pdos.py +1 -0
  11. mdkits/dft_cli/check_neb.py +0 -0
  12. mdkits/{cli → dft_cli}/cube.py +3 -2
  13. mdkits/dft_cli/dft_cli.py +23 -0
  14. mdkits/dft_cli/fix.py +54 -0
  15. mdkits/dft_cli/pdos.py +119 -0
  16. mdkits/md_cli/angle.py +122 -0
  17. mdkits/{cli → md_cli}/density.py +24 -19
  18. mdkits/md_cli/dipole.py +124 -0
  19. mdkits/md_cli/hb_distribution.py +185 -0
  20. mdkits/md_cli/md_cli.py +32 -0
  21. mdkits/md_cli/monitor.py +104 -0
  22. mdkits/md_cli/msd.py +44 -0
  23. mdkits/md_cli/rdf.py +53 -0
  24. mdkits/md_cli/setting.py +14 -0
  25. mdkits/md_cli/vac.py +65 -0
  26. mdkits/{cli → md_cli}/wrap.py +4 -3
  27. mdkits/mdkits.py +5 -9
  28. mdkits/util/.fig_operation.py.swp +0 -0
  29. mdkits/util/arg_type.py +18 -8
  30. mdkits/util/cp2k_input_parsing.py +5 -1
  31. mdkits/util/encapsulated_ase.py +28 -7
  32. mdkits/util/encapsulated_mda.py +4 -1
  33. mdkits/util/numpy_geo.py +10 -5
  34. mdkits/util/os_operation.py +3 -1
  35. mdkits/util/out_err.py +18 -6
  36. mdkits-1.2.3.dist-info/METADATA +370 -0
  37. mdkits-1.2.3.dist-info/RECORD +51 -0
  38. mdkits/cli/,hb_distribution_down.py +0 -114
  39. mdkits/cli/hartree_potential.py +0 -59
  40. mdkits/cli/hartree_potential_ave.py +0 -84
  41. mdkits/cli/hb.py +0 -101
  42. mdkits/cli/hb_distribution.py +0 -126
  43. mdkits/cli/packmol_input.py +0 -76
  44. mdkits-0.1.13.dist-info/METADATA +0 -226
  45. mdkits-0.1.13.dist-info/RECORD +0 -43
  46. {mdkits-0.1.13.dist-info → mdkits-1.2.3.dist-info}/LICENSE +0 -0
  47. {mdkits-0.1.13.dist-info → mdkits-1.2.3.dist-info}/WHEEL +0 -0
  48. {mdkits-0.1.13.dist-info → mdkits-1.2.3.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,370 @@
1
+ Metadata-Version: 2.3
2
+ Name: mdkits
3
+ Version: 1.2.3
4
+ Summary: kits for md or dft
5
+ License: MIT
6
+ Keywords: molecular dynamics,density functional theory
7
+ Author: jxxcr
8
+ Author-email: jixxcr@qq.com
9
+ Requires-Python: >=3.11,<4.0
10
+ Classifier: License :: OSI Approved :: MIT License
11
+ Classifier: Operating System :: OS Independent
12
+ Classifier: Programming Language :: Python :: 3
13
+ Classifier: Programming Language :: Python :: 3.11
14
+ Classifier: Programming Language :: Python :: 3.12
15
+ Classifier: Programming Language :: Python :: 3.13
16
+ Requires-Dist: Cp2kData (>=0.7.2,<0.8.0)
17
+ Requires-Dist: MDAnalysis (>=2.8.0,<3.0.0)
18
+ Requires-Dist: ase (>=3.22.1,<4.0.0)
19
+ Requires-Dist: click (>=8.1.3,<9.0.0)
20
+ Requires-Dist: julia (>=0.6.2,<0.7.0)
21
+ Requires-Dist: matplotlib (>=3.9.0,<4.0.0)
22
+ Requires-Dist: numpy (>=1.26.4,<2.0.0)
23
+ Requires-Dist: pandas (>=2.3.2,<3.0.0)
24
+ Requires-Dist: pyyaml (>=6.0.1,<7.0.0)
25
+ Requires-Dist: tidynamics (>=1.1.2,<2.0.0)
26
+ Project-URL: Repository, https://github.com/jxxcr/mdkits
27
+ Description-Content-Type: text/markdown
28
+
29
+ # mdkits
30
+ [中文文档](README_zh.md)
31
+
32
+ `mdkits` provides a variety of tools. Installation script:
33
+ ```bash
34
+ pip install mdkits --upgrade
35
+ ```
36
+
37
+ ## General Option Parameter Types
38
+
39
+ 1. `CELL TYPE`: Specifies lattice cell parameters, e.g., `10,10,10`, `10,10,10,90,90,90`, etc.
40
+ 2. `FRAME RANGE`: Specifies the frame range, e.g., `1`, `1:10:2`, etc.
41
+ 3. `--group` and `--surface`: Select analysis objects using [selection language](https://userguide.mdanalysis.org/stable/selections.html).
42
+ 4. `--update_water`, `--distance`, and `--angle`: Enable dynamic update of water molecule positions during trajectory analysis.
43
+ 5. By specifying the `Host` in the corresponding `SSH config-file` on the server as the `ssh_name` environment variable, you can prepend `ssh_name` to the `path` in the script's output, which facilitates subsequent `scp` operations:
44
+ ```bash
45
+ ssh_name:path/to/file
46
+ ```
47
+
48
+ ## Trajectory File Processing Scripts
49
+
50
+ `md` is the trajectory file processing tool, which includes several processing utilities.
51
+
52
+ ### Density Distribution
53
+
54
+ `density` is used to analyze the density distribution of a specific element along the z-axis in a system. For example, to analyze the density distribution of the `O` element along the z-axis:
55
+ ```bash
56
+ mdkits md density [FILENAME] --group="name H" --cell [FILENAME]
57
+ ```
58
+ This will output a file named `density_name_H.dat`, where the first column is the z-axis coordinate and the second column is the density distribution in units of mol/L. To output the density distribution in units of $g/cm^3$, you can specify the `--atomic_mass` option, for example:
59
+ ```bash
60
+ mdkits md density [FILENAME] --group="name H" --cell [FILENAME] --atomic_mass=1.00784
61
+ ```
62
+ This will output the density distribution in units of $g/cm^3$. You can specify surface atoms to normalize the density distribution to the surface, for example:
63
+ ```bash
64
+ mdkits md density [FILENAME] --group="name O" --cell 10,10,10 --atomic_mass=18.01528 --surface="name Pt and name Ru"
65
+ ```
66
+ This will normalize the density distribution to the surface and analyze the density distribution of water molecules using the positions of O atoms as the reference for water molecules. For systems with $OH^-$ ions, you can use the `--update_water` option to update the positions of water molecules in each frame, without needing to specify an element explicitly, for example:
67
+ ```bash
68
+ mdkits md density [FILENAME] --update_water --cell 10,10,10 --atomic_mass=18.01528 --surface="name Pt and name Ru"
69
+ ```
70
+ The output file will be named `density_water.dat`.
71
+
72
+ ### Hydrogen Bonds
73
+
74
+ `hb` is used to analyze hydrogen bonds in a system, for example, to analyze the distribution of hydrogen bonds along the z-axis:
75
+ ```bash
76
+ mdkits md hb [FILENAME] --cell 10,10,40 --surface "prop z < 10" --update_water
77
+ ```
78
+ Or to analyze hydrogen bonds of a single water molecule:
79
+ ```bash
80
+ mdkits md hb [FILENAME] --cell 10,10,40 --index 15
81
+ ```
82
+
83
+ ### Angle
84
+
85
+ `angle` is used to analyze the abundance distribution of the angle between the bisector vector of a water molecule's OH bond and the surface normal. For example, to analyze the angle abundance distribution of water molecules within 5 Å of the surface:
86
+ ```bash
87
+ mdkits md angle [FILENAME] --cell 10,10,40 --surface "name Pt" --water_height 5
88
+ ```
89
+
90
+ ### Dipole Distribution
91
+
92
+ `diople` is used to analyze the dipole ($\cos \phi \rho_{H_2 O}$) distribution in a system. For example, to analyze the $\cos \phi \rho_{H_2 O}$ distribution in the system:
93
+ ```bash
94
+ mdkits md diople [FILENAME] --cell 10,10,40 --surface "name Pt"
95
+ ```
96
+
97
+ ### Radial Distribution Function (RDF)
98
+
99
+ `rdf` is used to analyze the radial distribution function between two `group`s. For example, to analyze the radial distribution function between `O` and `H` elements in the system:
100
+ ```bash
101
+ mdkits md rdf [FILENAME] --group "name O" "name H" --cell 10,10,40 --range 0.1 5
102
+ ```
103
+
104
+ ### Mean Squared Displacement (MSD)
105
+
106
+ `msd` is used to analyze the mean squared displacement of certain atoms in a system. For example, to analyze the MSD of `Li` atoms along the z-axis:
107
+ ```bash
108
+ mdkits md msd [FILENAME] z "name Li"
109
+ ```
110
+
111
+ ### Monitor
112
+
113
+ `monitor` is used to monitor changes in atom height, bond length, and bond angle in a system. For example, to monitor the height of the atom with `index` 0:
114
+ ```bash
115
+ mdkits md monitor [FILENAME] --cell 10,10,40 --surface "name Pt" -i 0
116
+ ```
117
+ This will output the height from the surface as a function of each frame. To monitor the bond length between atoms 0 and 1:
118
+ ```bash
119
+ mdkits md monitor [FILENAME] --cell 10,10,40 --surface "name Pt" -i 0 -i 1
120
+ ```
121
+ This will output the heights from the surface for atoms 0 and 1, and the bond length between 0 and 1 as a function of each frame. To monitor the bond angle of 1-0-2:
122
+ ```bash
123
+ mdkits md monitor [FILENAME] --cell 10,10,40 --surface "name Pt" -i 1 -i 0 -i 2
124
+ ```
125
+ This will output the heights from the surface for atoms 1, 0, and 2, the bond lengths between 1-0 and 0-2, and the bond angle 1-0-2 as a function of each frame. Note that atoms at the vertex of an angle should be placed in the middle.
126
+
127
+ ### Position Normalization
128
+
129
+ `wrap` is used to normalize the atomic positions in a trajectory file. For example, to normalize the atomic positions in `[FILENAME]` within the unit cell and output it as `wrapped.xyz`. By default, it reads `ABC` and `ALPHA_BETA_GAMMA` information from the `cp2k` output file `input_inp` as lattice cell parameters:
130
+ ```bash
131
+ mdkits md wrap [FILENAME]
132
+ ```
133
+ Or specify the `cp2k` input file:
134
+ ```bash
135
+ mdkits md wrap [FILENAME] --cp2k_input_file setting.inp
136
+ ```
137
+ Or specify the lattice cell parameters:
138
+ ```bash
139
+ mdkits md wrap [FILENAME] --cell 10,10,10
140
+ ```
141
+ The default `[FILENAME]` is `*-pos-1.xyz`.
142
+
143
+ ### Vibrational Density of States (VDOS)
144
+
145
+ `vac` is used to analyze the velocity autocorrelation function of a trajectory and compute the Fourier transform of the velocity autocorrelation function, which is the vibrational density of states (VDOS). For example, to analyze the VDOS of the system:
146
+ ```bash
147
+ mdkits md vac h2o-vel-1.xyz
148
+ ```
149
+ The default `[FILENAME]` is `*-vel-1.xyz`.
150
+
151
+ ## DFT Property Analysis Scripts
152
+
153
+ `dft` is the DFT property analysis tool, which includes several analysis utilities.
154
+
155
+ ### PDOS
156
+
157
+ `pdos` is used to analyze the PDOS of a system. To analyze the d-orbital DOS of `[FILENAME]`:
158
+ ```bash
159
+ mdkits dft pdos [FILENAME] -t d
160
+ ```
161
+
162
+ ### CUBE Files
163
+
164
+ `cube` is used to process files in [`cube` format](https://paulbourke.net/dataformats/cube/), averaging them along the z-axis:
165
+ ```bash
166
+ mdkits dft cube [FILENAME]
167
+ ```
168
+ The processed data will be output to `cube.out`. You can also calculate the average value within a specific region:
169
+ ```bash
170
+ mdkits dft cube [FILENAME] -b 1 2
171
+ ```
172
+ This will print the average value to the screen and also record it in the comment line of `cube.out`.
173
+
174
+ ## Modeling
175
+
176
+ `build` is the modeling tool, which includes several modeling utilities.
177
+
178
+ ### Building Bulk Models
179
+
180
+ `bulk` is used to build bulk models. For example, to build an `fcc` bulk model of `Pt`:
181
+ ```bash
182
+ mdkits build bulk Pt fcc
183
+ ```
184
+ To build as a conventional cell model:
185
+ ```bash
186
+ mdkits build bulk Pt fcc --cubic
187
+ ```
188
+ To build a `Caesium chloride` structure model:
189
+ ```bash
190
+ mdkits build bulk CsCl cesiumchloride -a 4.123
191
+ ```
192
+ To build a `fluorite` structure model:
193
+ ```bash
194
+ mdkits build bulk BaF2 fluorite -a 6.196
195
+ ```
196
+
197
+ ### Building Surface Models
198
+
199
+ `surface` is used to build common surface models. Usage:
200
+ ```bash
201
+ mdkits build surface [ELEMENT] [SURFACE_TYPE] [SIZE]
202
+ ```
203
+ For example, to build an `fcc111` surface model of `Pt`:
204
+ ```bash
205
+ mdkits build surface Pt fcc111 2 2 3 --vacuum 15
206
+ ```
207
+ To build a graphene surface:
208
+ ```bash
209
+ mdkits build surface C2 graphene 3 3 1 --vacuum 15
210
+ ```
211
+
212
+ ### Building Surface Models from Existing Structures
213
+
214
+ `cut` is used to build surface models from existing structures (the structure must be in a conventional cell). For example, to build an `fcc331` surface model from `Pt_fcc.cif`:
215
+ ```bash
216
+ mdkits build cut Pt_fcc.cif --face 3 3 1 --size 3 3 5 --vacuum 15
217
+ ```
218
+
219
+ ### Adding Adsorbates to Surface Structures
220
+
221
+ `adsorbate` is used to add adsorbates to surface structures. For example, to add an `H` atom to `surface.cif`:
222
+ ```bash
223
+ mdkits build adsorbate surface.cif H --select "index 0" --height 1
224
+ ```
225
+ Or to add an `H` atom with a coverage of 5 to `Pt_fcc111_335.cif`:
226
+ ```bash
227
+ mdkits build adsorbate Pt_fcc111_335.cif H --select "prop z > 16" --height 2 --cover 5
228
+ ```
229
+
230
+ ### Building Solution Phase Models
231
+
232
+ `solution` is used to build solution phase models. When using for the first time, you should install `juliaup`:
233
+ ```bash
234
+ mdkits build solution --install_julia
235
+ ```
236
+ Then install `Packmol`:
237
+ ```bash
238
+ mdkits build solution --install_packmol
239
+ ```
240
+ After successful installation, you can use the `solution` functionality. For example, to build a water box with 32 water molecules:
241
+ ```bash
242
+ mdkits build solution --water_number 32 --cell 9.86,9.86,9.86
243
+ ```
244
+ Or to build a solution containing ions:
245
+ ```bash
246
+ mdkits build solution li.xyz k.xyz --water_number 64 --tolerance 2.5 -n 25 -n 45 --cell 15,15,15
247
+ ```
248
+ Here, the number of `-n` arguments must match the number of specified solvent molecule types, used to specify the number of solvents to add, respectively. Alternatively, build a solution phase model from `packmol` input files:
249
+ ```bash
250
+ mdkits build solution input.pm input2.pm --infile
251
+ ```
252
+
253
+ ### Building Interface Models
254
+
255
+ `interface` is used to build interface models. For example, to build an interface model without vacuum:
256
+ ```bash
257
+ mdkits build interface --slab Pt_fcc100_555.cif --sol water_160.cif
258
+ ```
259
+ Or to build an interface with a gas phase model:
260
+ ```bash
261
+ mdkits build interface --slab Pt_fcc100_555.cif --sol water_160.cif --cap ne --vacuum 20
262
+ ```
263
+
264
+ ### Building Supercell Models
265
+
266
+ `supercell` is used to build supercell models:
267
+ ```bash
268
+ mdkits build supercell Li3PO4.cif 2 2 2
269
+ ```
270
+
271
+ ## Others
272
+
273
+ ### Trajectory Extraction
274
+
275
+ `extract` is used to extract specific frames from a trajectory file. For example, to extract frames from the 1000th to the 2000th frame from `frames.xyz` and output them to `1000-2000.xyz`. The parameters for the `-r` option are consistent with Python's slicing syntax:
276
+ ```bash
277
+ mdkits extract frames.xyz -r 1000:2000 -o 1000-2000.xyz
278
+ ```
279
+ Or to extract the last frame from the default trajectory file `*-pos-1.xyz` generated by `cp2k` and output it as `frames_-1.xyz` (which is the default behavior of `extract`):
280
+ ```bash
281
+ mdkits extract
282
+ ```
283
+ Or to output a structure every 50 frames into the `./coord` directory, while adjusting the output format to `cp2k`'s `@INCLUDE coord.xyz` format:
284
+ ```bash
285
+ mdkits extract -cr ::50
286
+ ```
287
+ To extract the positions of specific elements, for example, to extract the positions of `O` and `H` elements:
288
+ ```bash
289
+ mdkits extract --select "name O or name H"
290
+ ```
291
+
292
+ ### Structure File Conversion
293
+
294
+ `convert` is used to convert structure files from one format to another. For example, to convert `structure.xyz` to `out.cif` (default filename is `out`). For files that do not store periodic boundary conditions, you can use the `--cell` option to specify `PBC`:
295
+ ```bash
296
+ mdkits convert -c structure.xyz --cell 10,10,10
297
+ ```
298
+ To convert `structure.cif` to `POSCAR`:
299
+ ```bash
300
+ mdkits convert -v structure.cif
301
+ ```
302
+ To convert `structure.cif` to `structure_xyz.xyz`:
303
+ ```bash
304
+ mdkits convert -c structure.cif -o structure_xyz
305
+ ```
306
+
307
+ ### Data Processing
308
+
309
+ `data` is used for data processing, such as:
310
+ 1. `--nor`: Normalizes the data.
311
+ 2. `--gaus`: Applies Gaussian filtering to the data.
312
+ 3. `--fold`: Folds and averages stacked data.
313
+ 4. `--err`: Calculates error bars for the data.
314
+ And so on.
315
+
316
+ ### Plotting Tool
317
+
318
+ `plot` is used for plotting data. `plot` requires reading a YAML format configuration file for plotting. The YAML file format is as follows:
319
+ ```yaml
320
+ # plot mode 1
321
+ figure1:
322
+ data:
323
+ legend1: ./data1.dat
324
+ legend2: ./data2.dat
325
+ x:
326
+ 0: x-axis
327
+ y:
328
+ 1: y-axis
329
+ x_range:
330
+ - 5
331
+ - 15
332
+
333
+ # plot mode 2
334
+ figure2:
335
+ data:
336
+ y-xais: ./data.dat
337
+ x:
338
+ 0: x-axis
339
+ y:
340
+ 1: legend1
341
+ 2: legend2
342
+ 3: legend3
343
+ 4: legend4
344
+ 5: legend5
345
+ y_range:
346
+ - 0.5
347
+ - 6
348
+ legend_fontsize: 12
349
+
350
+ # plot mode error
351
+ 12_dp_e_error:
352
+ data:
353
+ legend: ./error.dat
354
+ x:
355
+ 0: x-axis
356
+ y:
357
+ 1: y-axis
358
+ fold: dp
359
+ legend_fontsize: 12
360
+ ```
361
+ The above illustrates three plotting modes supported by `plot`: `mode 1`, `mode 2`, and `mode error`. `mode 1` is used for comparing the same column data from multiple data files, `mode 2` is used for comparing different column data from the same data file, and `mode error` is used for plotting mean squared error plots.
362
+
363
+ `plot` can process multiple YAML files simultaneously. Each YAML file can contain multiple plotting configurations. Plotting configurations for `mode 1` and `mode 2` can be automatically recognized, but the `error` mode requires explicit specification, for example:
364
+ ```bash
365
+ mdkits plot *.yaml
366
+ ```
367
+ and:
368
+ ```bash
369
+ mdkits plot *.yaml --error
370
+ ```
@@ -0,0 +1,51 @@
1
+ mdkits/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ mdkits/build_cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
+ mdkits/build_cli/adsorbate.py,sha256=_Re942ZrHowcsOnBCjRAzkS-eYPWOZEZ5w5PmMztQ7Q,1944
4
+ mdkits/build_cli/build_bulk.py,sha256=5sYnae68bFmo3IAcXFBTi7nJ1hTJKigxuWymRMkNKr8,1622
5
+ mdkits/build_cli/build_cli.py,sha256=sqjnq5aHWLYLbNzN5SORkEYeYaewLagFuSvspJxyh7E,725
6
+ mdkits/build_cli/build_interface.py,sha256=Lknbn2-xs7fTkr2ks-v1fGUFj-2S0DeFNvVkJknBAoc,2743
7
+ mdkits/build_cli/build_solution.py,sha256=7bwaDH-vLBNRzGoYXT72bzLVXdQAZ4HXNuUDuR7AI78,5377
8
+ mdkits/build_cli/build_surface.py,sha256=KBQn8esurXcLFswRVddksot5Jii-jEm-egxq4p1w-DQ,2736
9
+ mdkits/build_cli/cut_surface.py,sha256=_f0t2OyBKb8ZV04b3GezfSDUN4XFd5kQM-yWbSmOofs,2742
10
+ mdkits/build_cli/supercell.py,sha256=ajB0ZbGtq0BFiOqD5ZqqLR7XE4jiINsfIHhshk_EDpo,1031
11
+ mdkits/build_cli/water.xyz,sha256=ByLDz-rYhw_wLPBU78lIQHe4s4Xf5Ckjft-Dus3czIc,171
12
+ mdkits/cli/convert.py,sha256=f0GQQ-hPbJKhMwpITST7sj_LlrCkLdGB4UZY7lpbQsc,1792
13
+ mdkits/cli/data.py,sha256=FGA4S9Cfo6WUJBSPWKOJrrZXHo_Qza-jNG1P_Dw7yi4,3262
14
+ mdkits/cli/extract.py,sha256=33P9DXOUK38ORNZ3f-7Sx-1MCK9J0MIi1mqXHC9Pou0,2945
15
+ mdkits/cli/plot.py,sha256=1yh5dq5jnQDuyWlxV_9g5ztsnuFHVu4ouYQ9VJYSrUU,8938
16
+ mdkits/config/__init__.py,sha256=ZSwmnPK02LxJLMgcYmNb-tIOk8fEuHf5jpqD3SDHWLg,1039
17
+ mdkits/config/settings.yml,sha256=PY7u0PbFLuxSnd54H5tI9oMjUf-mzyADqSZtm99BwG0,71
18
+ mdkits/dft_cli/.back/pdos.py,sha256=EjZ1oWJHHUocZAZe8dgJUu6bkLPVwgrKkggidF3yduE,1445
19
+ mdkits/dft_cli/check_neb.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
+ mdkits/dft_cli/cube.py,sha256=d8S4jdooEul1giK8DAx5yvAFSnSKoV83qd43kw-iSL0,1272
21
+ mdkits/dft_cli/dft_cli.py,sha256=FoibPrWWAw2lKcNBmGa1KgN-kEO7UBa8VmA7cVuYRt8,381
22
+ mdkits/dft_cli/fix.py,sha256=qncb6rsCRdIJtlnIAzR7s3nSG_qv_4X511QV8MGsL0c,1843
23
+ mdkits/dft_cli/pdos.py,sha256=1m1ZJLpOnrxl-oHQLAp0pI26ogxP7W4tgSu8Jh5jGvg,4848
24
+ mdkits/md_cli/angle.py,sha256=EvsW4lmNDM2iyFj8YZcrikGVvYeyP6WIzu269-ZI4iw,5468
25
+ mdkits/md_cli/density.py,sha256=Y2rJU1qJQq3OBb64_-pPhywm7eMdd0ywc8zuuw5XYYs,5392
26
+ mdkits/md_cli/dipole.py,sha256=u_22-vMlzSEqGSpC6c5PG5t3uyjxaRKUvybgytovMk0,5027
27
+ mdkits/md_cli/hb_distribution.py,sha256=lADOBiJYHCHPutXimXVDQpPdRKasBpldhiIl9SKf39o,7950
28
+ mdkits/md_cli/md_cli.py,sha256=2vH04o_3d5kCJsn3qEq-iUPhebKJOrS-e7HJtyiZTiQ,571
29
+ mdkits/md_cli/monitor.py,sha256=JNEgz5RGbFn4x_E85pAiPUY1NVIyZ3b2vjpBk_d1dR8,4536
30
+ mdkits/md_cli/msd.py,sha256=xg_c2sxNDOlxwK8jsNsQ4nbdqQAUvPGhOqFA6TXBTfs,1515
31
+ mdkits/md_cli/rdf.py,sha256=p4HMMYZLfFRPnGx7YHQU6kZnMAfoL6vOyOVpZhfdBfM,1712
32
+ mdkits/md_cli/setting.py,sha256=mxMTYpm6DUjMt9hOKsJbBSKwCqzMilOR0bo1azSdJP0,846
33
+ mdkits/md_cli/vac.py,sha256=MJi_uftl0UUqhWvqDtcr14qtcRgYRd6X0VSg4J5MFN8,2133
34
+ mdkits/md_cli/wrap.py,sha256=dwlLTfSnR5v99QP1klbbzWLXa71uQqprl7nSoVAm9dA,1049
35
+ mdkits/mdkits.py,sha256=EiAt7dxGTaHuuj7bCNxgAqZbX0i3sldO0mBxOG-aMnY,595
36
+ mdkits/util/.fig_operation.py.swp,sha256=iZYqdYMj4UKS1rmbXv8Ve2FcVBcNljX7Y43-neMdPSk,12288
37
+ mdkits/util/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
38
+ mdkits/util/arg_type.py,sha256=Gknsv2B9mG-g_q2Qfyh7vytt1wM4UQhFOlicpL4o5TI,2813
39
+ mdkits/util/cp2k_input_parsing.py,sha256=IAZ2fF-s_atiqdrePRkKG1_EQYB195TFdxHUuB3uYtE,1485
40
+ mdkits/util/encapsulated_ase.py,sha256=Be5H08-L3_Uhw4s03Y3CC0ocVH3swcFE8nyCN5yrUgY,4908
41
+ mdkits/util/encapsulated_mda.py,sha256=m3i-XrcscMcM5V7JzLnor3JtAOfuDx3LLMl0tZt0n-w,2325
42
+ mdkits/util/fig_operation.py,sha256=FwffNUtXorMl6qE04FipgzcVljEQii7wrNJUCJMyY3E,1045
43
+ mdkits/util/numpy_geo.py,sha256=B2QVADl1M8iixZrIOre2UUhHqvAeghvM8Q2woEQsH3Q,3880
44
+ mdkits/util/os_operation.py,sha256=MimTPsUMWGuggjxUBYjB86-73K2RdpiJ3EM0z5DQnqg,1091
45
+ mdkits/util/out_err.py,sha256=GvHeXO5xHmTd5DK1QN6zvCSBBOUokM9DFdjsiDifxnc,1094
46
+ mdkits/util/structure_parsing.py,sha256=mRPMJeih3O-ST7HeETDvBEkfV-1psT-XgxyYgDadV0U,4152
47
+ mdkits-1.2.3.dist-info/entry_points.txt,sha256=xoWWZ_yL87S501AzCO2ZjpnVuYkElC6z-8J3tmuIGXQ,44
48
+ mdkits-1.2.3.dist-info/LICENSE,sha256=VLaqyB0r_H7y3hUntfpPWcE3OATTedHWI983htLftcQ,1081
49
+ mdkits-1.2.3.dist-info/METADATA,sha256=ZXYPxpsUV4JvMNC6xfOZmShf-FUZAAuYnep1eLN-Jcw,13858
50
+ mdkits-1.2.3.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
51
+ mdkits-1.2.3.dist-info/RECORD,,
@@ -1,114 +0,0 @@
1
- #!/usr/bin/env python3
2
-
3
- import numpy as np
4
- import argparse
5
- import MDAnalysis
6
- from MDAnalysis import Universe
7
- from MDAnalysis.analysis.base import AnalysisBase
8
- from util import cp2k_input_parsing
9
- import warnings
10
- warnings.filterwarnings("ignore")
11
-
12
-
13
- class Hb_distribution(AnalysisBase):
14
- def __init__(self, filename, cell, surface, dt=0.001, hb_distance=3.5, hb_angle=35, bin_size=0.2):
15
- u = Universe(filename)
16
- u.trajectory.ts.dt = dt
17
- u.dimensions = cell
18
- self.u = u
19
- self.atomgroup = u.select_atoms("all")
20
- self.hb_distance = hb_distance
21
- self.hb_angle = hb_angle
22
- self.bin_size = bin_size
23
- self.surface = surface
24
- self.frame_count = 0
25
- super(Hb_distribution, self).__init__(self.atomgroup.universe.trajectory, verbose=True)
26
-
27
- def _prepare(self):
28
- bin_num = int(self.u.dimensions[2] / self.bin_size) + 2
29
- self.donor = np.zeros(bin_num, dtype=np.float64)
30
-
31
- def _append(self, hb_d):
32
- bins_d = np.floor(hb_d / self.bin_size).astype(int) + 1
33
-
34
- bins_d = bins_d[bins_d < len(self.donor)]
35
-
36
- np.add.at(self.donor, bins_d, 1)
37
-
38
- self.frame_count += 1
39
-
40
- def _single_frame(self):
41
- o_group = self.atomgroup.select_atoms("name O")
42
- o_pair = MDAnalysis.lib.distances.capped_distance(o_group.positions, o_group.positions, min_cutoff=0, max_cutoff=self.hb_distance, box=self.u.dimensions, return_distances=False)
43
-
44
- o0 = o_group[o_pair[:, 0]]
45
- o1 = o_group[o_pair[:, 1]]
46
-
47
- o0h1 = self.atomgroup[o0.indices + 1]
48
- o0h2 = self.atomgroup[o0.indices + 2]
49
-
50
- angle_o0h1_o0_o1 = np.degrees(
51
- MDAnalysis.lib.distances.calc_angles(o0h1.positions, o0.positions, o1.positions, box=self.u.dimensions)
52
- )
53
- angle_o0h2_o0_o1 = np.degrees(
54
- MDAnalysis.lib.distances.calc_angles(o0h2.positions, o0.positions, o1.positions, box=self.u.dimensions)
55
- )
56
-
57
- mid_z = (self.surface[0] + self.surface[1]) / 2
58
-
59
- condition_d = ((angle_o0h1_o0_o1 < self.hb_angle) | (angle_o0h2_o0_o1 < self.hb_angle)) & (o0.positions[:, 2] - o1.positions[:, 2] > 0)
60
- #condition_d = ((angle_o0h1_o0_o1 < self.hb_angle) | (angle_o0h2_o0_o1 < self.hb_angle)) & (((o0.positions[:, 2] < mid_z) & (o0.positions[:, 2] - o1.positions[:, 2] > 0)) | ((o0.positions[:, 2] > mid_z) & (o0.positions[:, 2] - o1.positions[:, 2] < 0)))
61
- #condition_a = ((angle_o1h1_o1_o0 < self.hb_angle) | (angle_o1h2_o1_o0 < self.hb_angle)) & (((o1.positions[:, 2] < mid_z) & (o1.positions[:, 2] - o0.positions[:, 2] > 1.5)) | ((o1.positions[:, 2] > mid_z) & (o1.positions[:, 2] - o0.positions[:, 2] < -1.5)))
62
-
63
- hb_d = (o0.positions[:, 2][condition_d] + o1.positions[:, 2][condition_d]) / 2
64
- #hb_a = (o0.positions[:, 2][condition_a] + o1.positions[:, 2][condition_a]) / 2
65
-
66
- self._append(hb_d)
67
-
68
- def _conclude(self):
69
- if self.frame_count > 0:
70
- average_donor = self.donor / self.frame_count
71
-
72
- bins_z = np.arange(len(self.donor)) * self.bin_size
73
-
74
- lower_z, upper_z = self.surface
75
- mask = (bins_z >= lower_z) & (bins_z <= upper_z)
76
- filtered_bins_z = bins_z[mask] - lower_z
77
- filtered_average_donor = average_donor[mask]
78
-
79
- combined_data = np.column_stack((filtered_bins_z, filtered_average_donor))
80
-
81
- filename = 'hb_distribution_down.dat'
82
- np.savetxt(filename, combined_data, header="Z\tDonor", fmt='%.5f', delimiter='\t')
83
-
84
-
85
- def parse_data(s):
86
- return [float(x) for x in s.replace(',', ' ').split()]
87
-
88
-
89
- def parse_r(s):
90
- return [int(x) for x in s.replace(':', ' ').split()]
91
-
92
-
93
- def parse_argument():
94
- parser = argparse.ArgumentParser(description="analysis hb distribution")
95
- parser.add_argument('filename', type=str, help='filename to analysis')
96
- parser.add_argument('--cp2k_input_file', type=str, help='input file name of cp2k, default is "input.inp"', default='input.inp')
97
- parser.add_argument('-r', type=parse_r, help='range of analysis', default=[0, -1, 1])
98
- parser.add_argument('--cell', type=parse_data, help='set cell, a list of lattice, --cell x,y,z or x,y,z,a,b,c')
99
- parser.add_argument('--surface', type=parse_data, help='[down_surface_z, up_surface_z]')
100
- parser.add_argument('--hb_param', type=parse_data, help='[hb_distance, hb_angle], default is [3.5, 35]', default=[3.5, 35])
101
-
102
- return parser.parse_args()
103
-
104
-
105
- def main():
106
- args = parse_argument()
107
- cell = cp2k_input_parsing.get_cell(args.cp2k_input_file, args.cell)
108
-
109
- hb_dist = Hb_distribution(args.filename, cell, args.surface, hb_distance=args.hb_param[0], hb_angle=args.hb_param[1])
110
- hb_dist.run(start=args.r[0], stop=args.r[1], step=args.r[2])
111
-
112
-
113
- if __name__ == '__main__':
114
- main()
@@ -1,59 +0,0 @@
1
- #!/usr/bin/env python3
2
-
3
- ################################################
4
- # averange cp2k output(or some else file correspond to ase.io.read_cube_data) hartree.cube to z coordinate with python
5
- ## file path is need to pay attention
6
- ## cycle parameter is need to pay attention
7
- ## buck range is need to pay attention
8
- ################################################
9
-
10
- from numpy import empty, array, mean, append, concatenate
11
- from argparse import ArgumentParser
12
- from util import encapsulated_ase, os_operation
13
-
14
-
15
- def array_type(string):
16
- number_list = string.split(',')
17
- number_array = array(number_list, dtype=float)
18
- return number_array
19
-
20
-
21
- def buck_potential(xaxe, potential, range):
22
- mix = concatenate((xaxe.reshape(-1, 1), potential.reshape(-1, 1)), axis=1)
23
- mask = (mix[:,0] >= range[0]) & (mix[:,0] <=range[1])
24
- buck_potential = mix[mask]
25
- ave_potential = mean(buck_potential[:,1])
26
- return ave_potential
27
-
28
-
29
- # set argument
30
- parser = ArgumentParser(description='to handle cp2k output file hartree cube, name should be "hartree-*.cube"')
31
- parser.add_argument('file_name', type=str, nargs='?', help='hartree cube file', default=os_operation.default_file_name('*-v_hartree-1_*.cube', last=True))
32
- parser.add_argument('-b', '--buck_range', type=array_type, help='parameter to calculate mean value of buck', default=None)
33
- parser.add_argument('-o', type=str, help='output file name, default is "out.put"', default='hartree.out')
34
-
35
- args = parser.parse_args()
36
-
37
-
38
- ## init output potential file's shape, and define a z axe
39
- init_array = encapsulated_ase.ave_potential(args.file_name)
40
- potential = empty((0, init_array[0].shape[0]))
41
- z_coordinates = array((init_array[1])).reshape(-1, 1)
42
-
43
- potential = encapsulated_ase.ave_potential(args.file_name)[0]
44
-
45
- aved = mean(potential, axis=0)
46
- total_potential = append(z_coordinates, potential.reshape(-1, 1), axis=1)
47
-
48
- ## if buck range is exit, out put a difference of potential
49
- if args.buck_range is not None:
50
- buck_potential = buck_potential(z_coordinates, potential, args.buck_range)
51
- print(buck_potential)
52
- with open('hartree_potential.dat', 'w') as f:
53
- f.write(f"{buck_potential}" + '\n')
54
-
55
- ## write output
56
- with open(args.o, 'w') as f:
57
- for value in total_potential:
58
- f.write(" ".join(map(str, value)) + '\n')
59
-
@@ -1,84 +0,0 @@
1
- #!/usr/bin/env python3
2
-
3
- ################################################
4
- # averange cp2k output(or some else file correspond to ase.io.read_cube_data) hartree.cube to z coordinate with python
5
- ## file path is need to pay attention
6
- ## cycle parameter is need to pay attention
7
- ## buck range is need to pay attention
8
- ################################################
9
-
10
- from ase.io.cube import read_cube_data
11
- import numpy as np
12
- import argparse
13
-
14
- def array_type(string):
15
- number_list = string.split(',')
16
- number_array = np.array(number_list, dtype=int)
17
- return number_array
18
-
19
-
20
- def ave_potential(filepath):
21
- # is to average hartree file in z_coordinate
22
-
23
- ## read data from filepath
24
- data, atoms = read_cube_data(filepath)
25
-
26
- ## define need parameter
27
- npoints = data.shape[2]
28
- step_size = atoms.cell[2, 2] / ( npoints - 1 )
29
-
30
- ## average hartree file, and calculate z_coordinates
31
- z_coordinates = [i * step_size for i in range(npoints)]
32
- z_potential = 27.2114 * data[:, :, :].sum(axis=(0, 1)) / ( data.shape[0] * data.shape[1] )
33
- return z_potential, z_coordinates
34
-
35
-
36
- def buck_potential(xaxe, potential, range):
37
- mix = np.concatenate((xaxe.reshape(-1, 1), potential.reshape(-1, 1)), axis=1)
38
- mask = (mix[:,0] >= range[0]) & (mix[:,0] <=range[1])
39
- buck_potential = mix[mask]
40
- ave_potential = np.mean(buck_potential[:,1])
41
- return ave_potential
42
-
43
-
44
- # set argument
45
- parser = argparse.ArgumentParser(description='to handle cp2k output file hartree cube, name should be "hartree-*.cube"')
46
- parser.add_argument('folder_path', type=str, help='folder that contain all hartree cube file')
47
- parser.add_argument('cyc_range', type=array_type, help='cycle parameter, need to seperate with ",", similar with range() -- 1,201 1,201,10')
48
- parser.add_argument('-b', '--buck_range', type=array_type, help='parameter to calculate mean value of buck', default=None)
49
- parser.add_argument('-o', type=str, help='output file name, default is "out.put"', default='hartree.out')
50
-
51
- args = parser.parse_args()
52
-
53
-
54
- ## init output potential file's shape, and define a z axe
55
- init_array = ave_potential('{}/hartree-{}.cube'.format(args.folder_path, args.cyc_range[0]))
56
- potential = np.empty((0, init_array[0].shape[0]))
57
- z_coordinates = np.array((init_array[1])).reshape(-1, 1)
58
-
59
- ## average one hartree file
60
- if len(args.cyc_range) == 3:
61
- for i in range(args.cyc_range[0], args.cyc_range[1], args.cyc_range[2]):
62
- file_path = '{}/hartree-{}.cube'.format(args.folder_path, i)
63
- potential = np.append(potential, [ave_potential(file_path)[0]], axis=0)
64
- else:
65
- for i in range(args.cyc_range[0], args.cyc_range[1]):
66
- file_path = '{}/hartree-{}.cube'.format(args.folder_path, i)
67
- potential = np.append(potential, [ave_potential(file_path)[0]], axis=0)
68
-
69
- ## average every averaged harterr file, and append to z_coordinates
70
- #aved_potential = potential[:, :].sum(axis=0) / len(range(1, 201))
71
- aved = np.mean(potential, axis=0)
72
- total_potential = np.append(z_coordinates, aved.reshape(-1, 1), axis=1)
73
-
74
- ## if buck range is exit, out put a difference of potential
75
- if args.buck_range is not None:
76
- buck_potential = buck_potential(z_coordinates, aved, args.buck_range)
77
- with open(args.o + 'diff', 'w') as f:
78
- f.write("{}\t{}\t{}".format(aved[0], buck_potential, aved[0]-buck_potential))
79
-
80
- ## write output
81
- with open(args.o, 'w') as f:
82
- for value in total_potential:
83
- f.write(" ".join(map(str, value)) + '\n')
84
-