mimicpy 0.2.0__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mimicpy/__init__.py +1 -1
- mimicpy/__main__.py +726 -2
- mimicpy/_authors.py +2 -2
- mimicpy/_version.py +2 -2
- mimicpy/coords/__init__.py +1 -1
- mimicpy/coords/base.py +1 -1
- mimicpy/coords/cpmdgeo.py +1 -1
- mimicpy/coords/gro.py +1 -1
- mimicpy/coords/pdb.py +1 -1
- mimicpy/core/__init__.py +1 -1
- mimicpy/core/prepare.py +3 -3
- mimicpy/core/selector.py +1 -1
- mimicpy/force_matching/__init__.py +34 -0
- mimicpy/force_matching/bonded_forces.py +628 -0
- mimicpy/force_matching/compare_top.py +809 -0
- mimicpy/force_matching/dresp.py +435 -0
- mimicpy/force_matching/nonbonded_forces.py +32 -0
- mimicpy/force_matching/opt_ff.py +2114 -0
- mimicpy/force_matching/qm_region.py +1960 -0
- mimicpy/plugins/__main_installer__.py +76 -0
- mimicpy/{__main_vmd__.py → plugins/__main_vmd__.py} +2 -2
- mimicpy/plugins/pymol.py +56 -0
- mimicpy/plugins/vmd.tcl +78 -0
- mimicpy/scripts/__init__.py +1 -1
- mimicpy/scripts/cpmd.py +1 -1
- mimicpy/scripts/fm_input.py +265 -0
- mimicpy/scripts/fmdata.py +120 -0
- mimicpy/scripts/mdp.py +1 -1
- mimicpy/scripts/ndx.py +1 -1
- mimicpy/scripts/script.py +1 -1
- mimicpy/topology/__init__.py +1 -1
- mimicpy/topology/itp.py +603 -35
- mimicpy/topology/mpt.py +1 -1
- mimicpy/topology/top.py +254 -15
- mimicpy/topology/topol_dict.py +233 -4
- mimicpy/utils/__init__.py +1 -1
- mimicpy/utils/atomic_numbers.py +1 -1
- mimicpy/utils/constants.py +17 -3
- mimicpy/utils/elements.py +1 -1
- mimicpy/utils/errors.py +1 -1
- mimicpy/utils/file_handler.py +1 -1
- mimicpy/utils/strings.py +1 -1
- mimicpy-0.3.0.dist-info/METADATA +156 -0
- mimicpy-0.3.0.dist-info/RECORD +50 -0
- {mimicpy-0.2.0.dist-info → mimicpy-0.3.0.dist-info}/WHEEL +1 -1
- mimicpy-0.3.0.dist-info/entry_points.txt +4 -0
- mimicpy-0.2.0.dist-info/METADATA +0 -86
- mimicpy-0.2.0.dist-info/RECORD +0 -38
- mimicpy-0.2.0.dist-info/entry_points.txt +0 -3
- {mimicpy-0.2.0.dist-info → mimicpy-0.3.0.dist-info/licenses}/COPYING +0 -0
- {mimicpy-0.2.0.dist-info → mimicpy-0.3.0.dist-info/licenses}/COPYING.LESSER +0 -0
- {mimicpy-0.2.0.dist-info → mimicpy-0.3.0.dist-info}/top_level.txt +0 -0
- {mimicpy-0.2.0.dist-info → mimicpy-0.3.0.dist-info}/zip-safe +0 -0
|
@@ -0,0 +1,435 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
import multiprocessing as mp
|
|
3
|
+
from ..scripts.fmdata import FMDataset
|
|
4
|
+
from .qm_region import QMRegion
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def get_configurations(fmdata: FMDataset, begin: int, end: int, step: int, qm_region: 'QMRegion'):
|
|
8
|
+
"""Get a range of configurations for DRESP, with QM data reordered to GROMACS topology order.
|
|
9
|
+
|
|
10
|
+
Args:
|
|
11
|
+
fmdata (FMDataset): The force matching dataset.
|
|
12
|
+
begin (int): Starting frame index.
|
|
13
|
+
end (int): Ending frame index (exclusive).
|
|
14
|
+
step (int): Step size for frame iteration.
|
|
15
|
+
qm_region (QMRegion): The QMRegion object providing GROMACS/CPMD mappings and QM atom definitions.
|
|
16
|
+
|
|
17
|
+
Returns:
|
|
18
|
+
list: A list of configuration dictionaries. Each dictionary contains:
|
|
19
|
+
'qm_coordinates': QM atom coordinates, ordered by GROMACS QM atom topology.
|
|
20
|
+
'mm_coordinates': MM atom coordinates, as fetched from FMDataset (GROMACS ID ordered).
|
|
21
|
+
'electric_potential': Electric potential at MM atom/point locations, as fetched.
|
|
22
|
+
'electric_field': Electric field at MM atom/point locations, as fetched.
|
|
23
|
+
'reference_charge': QM atom Hirshfeld charges, ordered by GROMACS QM atom topology.
|
|
24
|
+
'qm_cpmd_ids_fmdata_order': Original CPMD IDs for QM atoms from FMDataset for this frame.
|
|
25
|
+
'mm_gmx_ids_fmdata_order': Original GROMACS IDs for MM atoms/points from FMDataset for this frame.
|
|
26
|
+
"""
|
|
27
|
+
configurations = []
|
|
28
|
+
|
|
29
|
+
# Target order for QM properties: GROMACS QM atom indices (1-based)
|
|
30
|
+
target_gmx_qm_indices = qm_region.qm_atoms.index # These are 1-based GROMACS indices
|
|
31
|
+
gmx_to_cpmd_map = qm_region.gmx_to_cpmd_map # Maps 1-based GMX to 1-based CPMD
|
|
32
|
+
|
|
33
|
+
# We need to build the reordering map once, assuming atom IDs are consistent across frames in fmdata.
|
|
34
|
+
# Get the CPMD IDs for QM atoms from the first frame in fmdata to establish the source order.
|
|
35
|
+
cpmd_ids_fmdata_order_qm = fmdata.get_configuration_properties(begin, 'id', 'qm')
|
|
36
|
+
|
|
37
|
+
qm_reorder_indices = np.zeros(len(target_gmx_qm_indices), dtype=int)
|
|
38
|
+
for i, gmx_idx_1_based in enumerate(target_gmx_qm_indices):
|
|
39
|
+
target_cpmd_id_1_based = gmx_to_cpmd_map.get(gmx_idx_1_based)
|
|
40
|
+
if target_cpmd_id_1_based is None:
|
|
41
|
+
raise ValueError(f"GROMACS QM atom {gmx_idx_1_based} not found in gmx_to_cpmd_map.")
|
|
42
|
+
|
|
43
|
+
fm_idx_arr = np.where(cpmd_ids_fmdata_order_qm == target_cpmd_id_1_based)[0]
|
|
44
|
+
if len(fm_idx_arr) > 0:
|
|
45
|
+
qm_reorder_indices[i] = fm_idx_arr[0]
|
|
46
|
+
else:
|
|
47
|
+
raise ValueError(
|
|
48
|
+
f"CPMD ID {target_cpmd_id_1_based} (for GMX QM atom {gmx_idx_1_based}) "
|
|
49
|
+
f"not found in FMDataset's list of QM CPMD IDs: {cpmd_ids_fmdata_order_qm}"
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
for idx in range(begin, end, step):
|
|
53
|
+
config = dict()
|
|
54
|
+
|
|
55
|
+
# QM data (ordered by CPMD IDs in fmdata)
|
|
56
|
+
raw_qm_coords = fmdata.get_configuration_properties(idx, 'coordinate', 'qm')
|
|
57
|
+
raw_qm_ref_charges = fmdata.get_configuration_properties(idx, 'hirshfeld_charge', 'qm')
|
|
58
|
+
# Store original QM CPMD IDs from fmdata for this frame if needed for debugging/verification
|
|
59
|
+
config['qm_cpmd_ids_fmdata_order'] = fmdata.get_configuration_properties(idx, 'id', 'qm')
|
|
60
|
+
|
|
61
|
+
# Reorder QM data to match GROMACS QM atom topology order
|
|
62
|
+
config['qm_coordinates'] = raw_qm_coords[qm_reorder_indices]
|
|
63
|
+
config['reference_charge'] = raw_qm_ref_charges[qm_reorder_indices]
|
|
64
|
+
|
|
65
|
+
# MM data (ordered by GROMACS IDs in fmdata for MM region)
|
|
66
|
+
# We assume this order is what DRESP expects or can work with directly.
|
|
67
|
+
config['mm_coordinates'] = fmdata.get_configuration_properties(idx, 'coordinate', 'mm')
|
|
68
|
+
config['electric_potential'] = fmdata.get_configuration_properties(idx, 'electric_potential', 'mm').squeeze()
|
|
69
|
+
config['electric_field'] = fmdata.get_configuration_properties(idx, 'electric_field', 'mm')
|
|
70
|
+
# Store original MM GROMACS IDs from fmdata for this frame if needed
|
|
71
|
+
config['mm_gmx_ids_fmdata_order'] = fmdata.get_configuration_properties(idx, 'id', 'mm')
|
|
72
|
+
|
|
73
|
+
configurations.append(config)
|
|
74
|
+
return configurations
|
|
75
|
+
|
|
76
|
+
def compute_potential_set_charges(charges, charge_positions, x2):
|
|
77
|
+
R_ij = x2 - charge_positions
|
|
78
|
+
r_ij = np.linalg.norm(R_ij, axis=1)
|
|
79
|
+
potential = charges * (1/r_ij)
|
|
80
|
+
return np.sum(potential)
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def compute_electric_field_set_charges(charges, charge_positions, x2):
|
|
84
|
+
R_ij = x2 - charge_positions
|
|
85
|
+
r_ij = np.linalg.norm(R_ij, axis=1)
|
|
86
|
+
dem = np.reshape(r_ij**3, (-1, 1))
|
|
87
|
+
charges = charges.reshape(-1,1)
|
|
88
|
+
electric_field = R_ij * (charges) * (1/dem)
|
|
89
|
+
return np.sum(electric_field, axis=0)
|
|
90
|
+
|
|
91
|
+
def compute_diff_electric_field(charges, charge_positions,
|
|
92
|
+
sr_positions, sr_electric_field):
|
|
93
|
+
|
|
94
|
+
Emm = np.array(sr_positions)
|
|
95
|
+
for i, sr_position in enumerate(sr_positions):
|
|
96
|
+
Emm[i] = compute_electric_field_set_charges(charges, charge_positions, sr_position)
|
|
97
|
+
|
|
98
|
+
Eroh = np.sum((sr_electric_field)**2)
|
|
99
|
+
E_diff = np.sum((Emm - sr_electric_field)**2)
|
|
100
|
+
return Eroh, E_diff
|
|
101
|
+
|
|
102
|
+
def compute_diff_potential(charges, charge_positions,
|
|
103
|
+
sr_positions, sr_potential):
|
|
104
|
+
Vmm = np.zeros(sr_positions.shape[0])
|
|
105
|
+
for i, sr_position in enumerate(sr_positions):
|
|
106
|
+
Vmm[i] = compute_potential_set_charges(charges, charge_positions, sr_position)
|
|
107
|
+
|
|
108
|
+
V_diff = np.sum((Vmm - sr_potential)**2)
|
|
109
|
+
Vroh = np.sum((sr_potential)**2)
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
return Vroh, V_diff
|
|
113
|
+
|
|
114
|
+
def compute_sd(charges, configurations, n_processes=None):
|
|
115
|
+
"""
|
|
116
|
+
Compute standard deviations for potential and electric field with optional parallelization.
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
charges (numpy.ndarray): Charge values
|
|
120
|
+
configurations (list): List of configuration dictionaries
|
|
121
|
+
n_processes (int, optional): Number of processes for parallel computation
|
|
122
|
+
|
|
123
|
+
Returns:
|
|
124
|
+
tuple: (vsd, esd) Standard deviations for potential and electric field
|
|
125
|
+
"""
|
|
126
|
+
# Use parallel processing if requested and beneficial
|
|
127
|
+
if n_processes is not None and n_processes > 1 and len(configurations) > 1:
|
|
128
|
+
return _compute_sd_parallel(charges, configurations, n_processes)
|
|
129
|
+
else:
|
|
130
|
+
return _compute_sd_serial(charges, configurations)
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def _compute_sd_serial(charges, configurations):
|
|
134
|
+
"""Serial version of compute_sd (original implementation)."""
|
|
135
|
+
eroh = 0.0
|
|
136
|
+
e_diff = 0.0
|
|
137
|
+
|
|
138
|
+
vroh = 0.0
|
|
139
|
+
v_diff = 0.0
|
|
140
|
+
|
|
141
|
+
for config in configurations:
|
|
142
|
+
|
|
143
|
+
ch_positions = config.get('qm_coordinates')
|
|
144
|
+
sr_positions = config.get('mm_coordinates')
|
|
145
|
+
sr_potential = config.get('electric_potential')
|
|
146
|
+
sr_electric_field = config.get('electric_field')
|
|
147
|
+
|
|
148
|
+
ediff = compute_diff_electric_field(charges, ch_positions,
|
|
149
|
+
sr_positions, sr_electric_field)
|
|
150
|
+
vdiff = compute_diff_potential(charges, ch_positions,
|
|
151
|
+
sr_positions, sr_potential)
|
|
152
|
+
|
|
153
|
+
eroh += ediff[0]
|
|
154
|
+
e_diff += ediff[1]
|
|
155
|
+
vroh += vdiff[0]
|
|
156
|
+
v_diff += vdiff[1]
|
|
157
|
+
|
|
158
|
+
vsd = np.sqrt(v_diff/vroh)
|
|
159
|
+
esd = np.sqrt(e_diff/eroh)
|
|
160
|
+
|
|
161
|
+
return vsd, esd
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
def _process_single_config_sd(args):
|
|
165
|
+
"""
|
|
166
|
+
Process a single configuration for parallel SD computation.
|
|
167
|
+
|
|
168
|
+
Args:
|
|
169
|
+
args: Tuple containing (charges, config)
|
|
170
|
+
|
|
171
|
+
Returns:
|
|
172
|
+
tuple: (eroh, e_diff, vroh, v_diff)
|
|
173
|
+
"""
|
|
174
|
+
charges, config = args
|
|
175
|
+
|
|
176
|
+
ch_positions = config.get('qm_coordinates')
|
|
177
|
+
sr_positions = config.get('mm_coordinates')
|
|
178
|
+
sr_potential = config.get('electric_potential')
|
|
179
|
+
sr_electric_field = config.get('electric_field')
|
|
180
|
+
|
|
181
|
+
ediff = compute_diff_electric_field(charges, ch_positions,
|
|
182
|
+
sr_positions, sr_electric_field)
|
|
183
|
+
vdiff = compute_diff_potential(charges, ch_positions,
|
|
184
|
+
sr_positions, sr_potential)
|
|
185
|
+
|
|
186
|
+
return ediff[0], ediff[1], vdiff[0], vdiff[1]
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
def _compute_sd_parallel(charges, configurations, n_processes):
|
|
190
|
+
"""Parallel version of compute_sd using multiprocessing."""
|
|
191
|
+
# Prepare arguments for parallel processing
|
|
192
|
+
args_list = [(charges, config) for config in configurations]
|
|
193
|
+
|
|
194
|
+
# Use multiprocessing pool
|
|
195
|
+
with mp.Pool(processes=n_processes) as pool:
|
|
196
|
+
results = pool.map(_process_single_config_sd, args_list)
|
|
197
|
+
|
|
198
|
+
# Sum up results from all processes
|
|
199
|
+
eroh = 0.0
|
|
200
|
+
e_diff = 0.0
|
|
201
|
+
vroh = 0.0
|
|
202
|
+
v_diff = 0.0
|
|
203
|
+
|
|
204
|
+
for result in results:
|
|
205
|
+
eroh += result[0]
|
|
206
|
+
e_diff += result[1]
|
|
207
|
+
vroh += result[2]
|
|
208
|
+
v_diff += result[3]
|
|
209
|
+
|
|
210
|
+
vsd = np.sqrt(v_diff/vroh)
|
|
211
|
+
esd = np.sqrt(e_diff/eroh)
|
|
212
|
+
|
|
213
|
+
return vsd, esd
|
|
214
|
+
|
|
215
|
+
def compute_infulence_mat(configurations, wv, we,
|
|
216
|
+
eq_map, optimize_charges, n_processes=None):
|
|
217
|
+
"""
|
|
218
|
+
Compute influence matrix with optional parallelization.
|
|
219
|
+
|
|
220
|
+
Args:
|
|
221
|
+
configurations (list): List of configuration dictionaries
|
|
222
|
+
wv (float): Potential weight
|
|
223
|
+
we (float): Electric field weight
|
|
224
|
+
eq_map (dict): Equivalent atom mapping
|
|
225
|
+
optimize_charges (list): List of charges to optimize
|
|
226
|
+
n_processes (int, optional): Number of processes for parallel computation
|
|
227
|
+
|
|
228
|
+
Returns:
|
|
229
|
+
numpy.ndarray: Influence matrix
|
|
230
|
+
"""
|
|
231
|
+
# Use parallel processing if requested and beneficial
|
|
232
|
+
if n_processes is not None and n_processes > 1 and len(configurations) > 1:
|
|
233
|
+
return _compute_infulence_mat_parallel(configurations, wv, we, eq_map, optimize_charges, n_processes)
|
|
234
|
+
else:
|
|
235
|
+
return _compute_infulence_mat_serial(configurations, wv, we, eq_map, optimize_charges)
|
|
236
|
+
|
|
237
|
+
|
|
238
|
+
def _compute_infulence_mat_serial(configurations, wv, we, eq_map, optimize_charges):
|
|
239
|
+
"""Serial version of compute_infulence_mat (original implementation)."""
|
|
240
|
+
infulence_mat = []
|
|
241
|
+
for config in configurations:
|
|
242
|
+
single_inful = infulence_mat_single(config, wv, we, eq_map, optimize_charges)
|
|
243
|
+
infulence_mat.append(single_inful.T)
|
|
244
|
+
infulence_mat = np.vstack(infulence_mat)
|
|
245
|
+
|
|
246
|
+
return infulence_mat
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
def _process_single_influence_mat(args):
|
|
250
|
+
"""
|
|
251
|
+
Process a single configuration for parallel influence matrix computation.
|
|
252
|
+
|
|
253
|
+
Args:
|
|
254
|
+
args: Tuple containing (config, wv, we, eq_map, optimize_charges)
|
|
255
|
+
|
|
256
|
+
Returns:
|
|
257
|
+
numpy.ndarray: Single influence matrix
|
|
258
|
+
"""
|
|
259
|
+
config, wv, we, eq_map, optimize_charges = args
|
|
260
|
+
single_inful = infulence_mat_single(config, wv, we, eq_map, optimize_charges)
|
|
261
|
+
return single_inful.T
|
|
262
|
+
|
|
263
|
+
|
|
264
|
+
def _compute_infulence_mat_parallel(configurations, wv, we, eq_map, optimize_charges, n_processes):
|
|
265
|
+
"""Parallel version of compute_infulence_mat using multiprocessing."""
|
|
266
|
+
# Prepare arguments for parallel processing
|
|
267
|
+
args_list = [(config, wv, we, eq_map, optimize_charges) for config in configurations]
|
|
268
|
+
|
|
269
|
+
# Use multiprocessing pool
|
|
270
|
+
with mp.Pool(processes=n_processes) as pool:
|
|
271
|
+
results = pool.map(_process_single_influence_mat, args_list)
|
|
272
|
+
|
|
273
|
+
# Stack results
|
|
274
|
+
infulence_mat = np.vstack(results)
|
|
275
|
+
|
|
276
|
+
return infulence_mat
|
|
277
|
+
|
|
278
|
+
def infulence_mat_single(config, wv, we, eq_map, optimize_charges):
|
|
279
|
+
|
|
280
|
+
columns = []
|
|
281
|
+
qm_coordinates = config['qm_coordinates']
|
|
282
|
+
mm_coordinates = config['mm_coordinates']
|
|
283
|
+
for i in range(qm_coordinates.shape[0]):
|
|
284
|
+
col = []
|
|
285
|
+
R_ij = mm_coordinates-qm_coordinates[i]
|
|
286
|
+
r_ij = np.linalg.norm(R_ij, axis=1)
|
|
287
|
+
dem = np.reshape(r_ij**3,(-1, 1))
|
|
288
|
+
poten_term = wv * 1/ r_ij
|
|
289
|
+
elec_term = we * R_ij* 1/ dem
|
|
290
|
+
col.append(poten_term)
|
|
291
|
+
col.append(elec_term.flatten())
|
|
292
|
+
index = eq_map.get(i)
|
|
293
|
+
index = optimize_charges.index(index)
|
|
294
|
+
if (len(columns) - 1) < index:
|
|
295
|
+
columns.append(np.hstack(col))
|
|
296
|
+
else:
|
|
297
|
+
columns[index] += np.hstack(col)
|
|
298
|
+
return np.vstack(columns)
|
|
299
|
+
|
|
300
|
+
def compute_target_mat(configurations, wv, we, wh, wq, q_total,
|
|
301
|
+
q_restrain):
|
|
302
|
+
|
|
303
|
+
target_mat = []
|
|
304
|
+
for config in configurations:
|
|
305
|
+
potential = config['electric_potential']
|
|
306
|
+
elec_field = config['electric_field']
|
|
307
|
+
target_mat = np.hstack([target_mat, potential*wv, elec_field.flatten()*we])
|
|
308
|
+
target_mat = np.hstack([target_mat, q_restrain * wh, wq * q_total])
|
|
309
|
+
return target_mat
|
|
310
|
+
|
|
311
|
+
|
|
312
|
+
def opt_dresp(configurations, wv, we, wh, wq, q_total,
|
|
313
|
+
eq_map, n_processes=None, fixed_charge_indices=None,
|
|
314
|
+
charge_group_constraints=None, weights_to_fix_charges=100000):
|
|
315
|
+
"""
|
|
316
|
+
Optimize DRESP charges with optional parallelization and ability to fix certain charges.
|
|
317
|
+
|
|
318
|
+
Args:
|
|
319
|
+
configurations (list): List of configuration dictionaries
|
|
320
|
+
wv (float): Potential weight
|
|
321
|
+
we (float): Electric field weight
|
|
322
|
+
wh (float): Restraint weight
|
|
323
|
+
wq (float): Total charge weight
|
|
324
|
+
q_total (float): Total charge constraint
|
|
325
|
+
eq_map (dict): Equivalent atom mapping
|
|
326
|
+
n_processes (int, optional): Number of processes for parallel computation
|
|
327
|
+
fixed_charge_indices (set, optional): Set of atom indices whose charges should be kept at original values
|
|
328
|
+
charge_group_constraints (list, optional): List of tuples (atom_indices, target_charge) where atom_indices
|
|
329
|
+
is a set of atom indices and target_charge is the desired sum of charges
|
|
330
|
+
|
|
331
|
+
Returns:
|
|
332
|
+
numpy.ndarray: Optimized charges
|
|
333
|
+
"""
|
|
334
|
+
|
|
335
|
+
# Initialize fixed_charge_indices if None
|
|
336
|
+
if fixed_charge_indices is None:
|
|
337
|
+
fixed_charge_indices = set()
|
|
338
|
+
|
|
339
|
+
# Initialize charge_group_constraints if None
|
|
340
|
+
if charge_group_constraints is None:
|
|
341
|
+
charge_group_constraints = []
|
|
342
|
+
|
|
343
|
+
reference_charge = np.zeros(configurations[0]['qm_coordinates'].shape[0])
|
|
344
|
+
for config in configurations:
|
|
345
|
+
reference_charge += config['reference_charge']
|
|
346
|
+
|
|
347
|
+
reference_charge = reference_charge / len(configurations)
|
|
348
|
+
|
|
349
|
+
# Include all atoms in optimization, but use strong restraints for fixed charges
|
|
350
|
+
optimize_charges = []
|
|
351
|
+
for i in range(configurations[0]['qm_coordinates'].shape[0]):
|
|
352
|
+
idx = eq_map.get(i)
|
|
353
|
+
if idx not in optimize_charges:
|
|
354
|
+
optimize_charges.append(idx)
|
|
355
|
+
|
|
356
|
+
unique_charges = len(optimize_charges)
|
|
357
|
+
q_restrain = np.zeros(unique_charges)
|
|
358
|
+
nq_unique = np.zeros(unique_charges)
|
|
359
|
+
tot_charge_constraint= np.zeros(unique_charges)
|
|
360
|
+
|
|
361
|
+
for i in range(reference_charge.shape[0]):
|
|
362
|
+
idx = eq_map.get(i)
|
|
363
|
+
idx = optimize_charges.index(idx)
|
|
364
|
+
q_restrain[idx] += reference_charge[i]
|
|
365
|
+
nq_unique[idx] += 1
|
|
366
|
+
tot_charge_constraint[idx] += wq
|
|
367
|
+
|
|
368
|
+
# Fix division by zero: only divide where nq_unique > 0
|
|
369
|
+
mask = nq_unique > 0
|
|
370
|
+
q_restrain[mask] = q_restrain[mask] / nq_unique[mask]
|
|
371
|
+
q_restrain = q_restrain * nq_unique
|
|
372
|
+
|
|
373
|
+
target_mat = compute_target_mat(configurations, wv, we, wh, wq, q_total,
|
|
374
|
+
q_restrain)
|
|
375
|
+
|
|
376
|
+
|
|
377
|
+
restrain_mat = np.diagflat(nq_unique) * wh
|
|
378
|
+
|
|
379
|
+
infulence_mat = compute_infulence_mat(configurations, wv, we,
|
|
380
|
+
eq_map, optimize_charges, n_processes)
|
|
381
|
+
|
|
382
|
+
# Add charge group constraints
|
|
383
|
+
charge_group_constraint_rows = []
|
|
384
|
+
charge_group_targets = []
|
|
385
|
+
|
|
386
|
+
for atom_indices, target_charge in charge_group_constraints:
|
|
387
|
+
constraint_row = np.zeros(unique_charges)
|
|
388
|
+
for atom_idx in atom_indices:
|
|
389
|
+
if atom_idx < len(reference_charge):
|
|
390
|
+
eq_idx = eq_map.get(atom_idx)
|
|
391
|
+
if eq_idx in optimize_charges:
|
|
392
|
+
constraint_idx = optimize_charges.index(eq_idx)
|
|
393
|
+
constraint_row[constraint_idx] += wq
|
|
394
|
+
|
|
395
|
+
charge_group_constraint_rows.append(constraint_row)
|
|
396
|
+
charge_group_targets.append(target_charge * wq)
|
|
397
|
+
|
|
398
|
+
# Add very strong restraints for fixed charges to force them to reference values
|
|
399
|
+
fixed_charge_restraints = np.zeros((len(fixed_charge_indices), unique_charges))
|
|
400
|
+
fixed_charge_targets = []
|
|
401
|
+
|
|
402
|
+
for i, atom_idx in enumerate(fixed_charge_indices):
|
|
403
|
+
if atom_idx < len(reference_charge):
|
|
404
|
+
constraint_row = np.zeros(unique_charges)
|
|
405
|
+
eq_idx = eq_map.get(atom_idx)
|
|
406
|
+
if eq_idx in optimize_charges:
|
|
407
|
+
constraint_idx = optimize_charges.index(eq_idx)
|
|
408
|
+
# Use very strong restraint weight (1000 * wh) to effectively fix the charge
|
|
409
|
+
constraint_row[constraint_idx] = weights_to_fix_charges
|
|
410
|
+
fixed_charge_restraints[i] = constraint_row
|
|
411
|
+
fixed_charge_targets.append(reference_charge[atom_idx] * weights_to_fix_charges)
|
|
412
|
+
|
|
413
|
+
# Stack all constraints
|
|
414
|
+
all_constraints = [restrain_mat, tot_charge_constraint]
|
|
415
|
+
if charge_group_constraint_rows:
|
|
416
|
+
all_constraints.extend(charge_group_constraint_rows)
|
|
417
|
+
if len(fixed_charge_restraints) > 0:
|
|
418
|
+
all_constraints.append(fixed_charge_restraints)
|
|
419
|
+
|
|
420
|
+
infulence_mat = np.vstack([infulence_mat] + all_constraints)
|
|
421
|
+
|
|
422
|
+
# Add targets to target matrix
|
|
423
|
+
additional_targets = charge_group_targets + fixed_charge_targets
|
|
424
|
+
if additional_targets:
|
|
425
|
+
target_mat = np.hstack([target_mat] + additional_targets)
|
|
426
|
+
|
|
427
|
+
res = np.linalg.lstsq(infulence_mat,target_mat, rcond=None)
|
|
428
|
+
|
|
429
|
+
full_optimize_charges = np.zeros(configurations[0]['qm_coordinates'].shape[0])
|
|
430
|
+
for i in range(configurations[0]['qm_coordinates'].shape[0]):
|
|
431
|
+
idx = eq_map.get(i)
|
|
432
|
+
idx = optimize_charges.index(idx)
|
|
433
|
+
full_optimize_charges[i] = res[0][idx]
|
|
434
|
+
|
|
435
|
+
return full_optimize_charges
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from ..utils.constants import kjmolnm_to_au, nm_to_au
|
|
3
|
+
|
|
4
|
+
def get_qm_gmx_forces(mda, idx_frame, qm_atoms):
|
|
5
|
+
"""
|
|
6
|
+
Get GROMACS forces and positions for QM atoms
|
|
7
|
+
|
|
8
|
+
Args:
|
|
9
|
+
mda: MDAnalysis Universe object
|
|
10
|
+
idx_frame (int): Frame index
|
|
11
|
+
qm_atoms: Set or list of GROMACS atom indices (1-based) including QM atoms
|
|
12
|
+
|
|
13
|
+
Returns:
|
|
14
|
+
tuple: (forces, positions) where forces and positions are numpy arrays
|
|
15
|
+
"""
|
|
16
|
+
# mda.trajectory[0]
|
|
17
|
+
# if mda.trajectory[0].time == 0.0:
|
|
18
|
+
# idx_frame += 1
|
|
19
|
+
mda.trajectory[idx_frame]
|
|
20
|
+
forces = []
|
|
21
|
+
positions = []
|
|
22
|
+
|
|
23
|
+
# Convert qm_atoms to set for efficient lookup
|
|
24
|
+
if not isinstance(qm_atoms, set):
|
|
25
|
+
qm_atoms = set(qm_atoms)
|
|
26
|
+
|
|
27
|
+
for atom in mda.atoms:
|
|
28
|
+
if atom.id+1 in qm_atoms:
|
|
29
|
+
forces.append(atom.force*10*kjmolnm_to_au)
|
|
30
|
+
positions.append(atom.position * 0.1 * nm_to_au)
|
|
31
|
+
|
|
32
|
+
return np.array(forces), np.array(positions)
|