MultiOptPy 1.20.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- multioptpy/Calculator/__init__.py +0 -0
- multioptpy/Calculator/ase_calculation_tools.py +424 -0
- multioptpy/Calculator/ase_tools/__init__.py +0 -0
- multioptpy/Calculator/ase_tools/fairchem.py +28 -0
- multioptpy/Calculator/ase_tools/gamess.py +19 -0
- multioptpy/Calculator/ase_tools/gaussian.py +165 -0
- multioptpy/Calculator/ase_tools/mace.py +28 -0
- multioptpy/Calculator/ase_tools/mopac.py +19 -0
- multioptpy/Calculator/ase_tools/nwchem.py +31 -0
- multioptpy/Calculator/ase_tools/orca.py +22 -0
- multioptpy/Calculator/ase_tools/pygfn0.py +37 -0
- multioptpy/Calculator/dxtb_calculation_tools.py +344 -0
- multioptpy/Calculator/emt_calculation_tools.py +458 -0
- multioptpy/Calculator/gpaw_calculation_tools.py +183 -0
- multioptpy/Calculator/lj_calculation_tools.py +314 -0
- multioptpy/Calculator/psi4_calculation_tools.py +334 -0
- multioptpy/Calculator/pwscf_calculation_tools.py +189 -0
- multioptpy/Calculator/pyscf_calculation_tools.py +327 -0
- multioptpy/Calculator/sqm1_calculation_tools.py +611 -0
- multioptpy/Calculator/sqm2_calculation_tools.py +376 -0
- multioptpy/Calculator/tblite_calculation_tools.py +352 -0
- multioptpy/Calculator/tersoff_calculation_tools.py +818 -0
- multioptpy/Constraint/__init__.py +0 -0
- multioptpy/Constraint/constraint_condition.py +834 -0
- multioptpy/Coordinate/__init__.py +0 -0
- multioptpy/Coordinate/polar_coordinate.py +199 -0
- multioptpy/Coordinate/redundant_coordinate.py +638 -0
- multioptpy/IRC/__init__.py +0 -0
- multioptpy/IRC/converge_criteria.py +28 -0
- multioptpy/IRC/dvv.py +544 -0
- multioptpy/IRC/euler.py +439 -0
- multioptpy/IRC/hpc.py +564 -0
- multioptpy/IRC/lqa.py +540 -0
- multioptpy/IRC/modekill.py +662 -0
- multioptpy/IRC/rk4.py +579 -0
- multioptpy/Interpolation/__init__.py +0 -0
- multioptpy/Interpolation/adaptive_interpolation.py +283 -0
- multioptpy/Interpolation/binomial_interpolation.py +179 -0
- multioptpy/Interpolation/geodesic_interpolation.py +785 -0
- multioptpy/Interpolation/interpolation.py +156 -0
- multioptpy/Interpolation/linear_interpolation.py +473 -0
- multioptpy/Interpolation/savitzky_golay_interpolation.py +252 -0
- multioptpy/Interpolation/spline_interpolation.py +353 -0
- multioptpy/MD/__init__.py +0 -0
- multioptpy/MD/thermostat.py +185 -0
- multioptpy/MEP/__init__.py +0 -0
- multioptpy/MEP/pathopt_bneb_force.py +443 -0
- multioptpy/MEP/pathopt_dmf_force.py +448 -0
- multioptpy/MEP/pathopt_dneb_force.py +130 -0
- multioptpy/MEP/pathopt_ewbneb_force.py +207 -0
- multioptpy/MEP/pathopt_gpneb_force.py +512 -0
- multioptpy/MEP/pathopt_lup_force.py +113 -0
- multioptpy/MEP/pathopt_neb_force.py +225 -0
- multioptpy/MEP/pathopt_nesb_force.py +205 -0
- multioptpy/MEP/pathopt_om_force.py +153 -0
- multioptpy/MEP/pathopt_qsm_force.py +174 -0
- multioptpy/MEP/pathopt_qsmv2_force.py +304 -0
- multioptpy/ModelFunction/__init__.py +7 -0
- multioptpy/ModelFunction/avoiding_model_function.py +29 -0
- multioptpy/ModelFunction/binary_image_ts_search_model_function.py +47 -0
- multioptpy/ModelFunction/conical_model_function.py +26 -0
- multioptpy/ModelFunction/opt_meci.py +50 -0
- multioptpy/ModelFunction/opt_mesx.py +47 -0
- multioptpy/ModelFunction/opt_mesx_2.py +49 -0
- multioptpy/ModelFunction/seam_model_function.py +27 -0
- multioptpy/ModelHessian/__init__.py +0 -0
- multioptpy/ModelHessian/approx_hessian.py +147 -0
- multioptpy/ModelHessian/calc_params.py +227 -0
- multioptpy/ModelHessian/fischer.py +236 -0
- multioptpy/ModelHessian/fischerd3.py +360 -0
- multioptpy/ModelHessian/fischerd4.py +398 -0
- multioptpy/ModelHessian/gfn0xtb.py +633 -0
- multioptpy/ModelHessian/gfnff.py +709 -0
- multioptpy/ModelHessian/lindh.py +165 -0
- multioptpy/ModelHessian/lindh2007d2.py +707 -0
- multioptpy/ModelHessian/lindh2007d3.py +822 -0
- multioptpy/ModelHessian/lindh2007d4.py +1030 -0
- multioptpy/ModelHessian/morse.py +106 -0
- multioptpy/ModelHessian/schlegel.py +144 -0
- multioptpy/ModelHessian/schlegeld3.py +322 -0
- multioptpy/ModelHessian/schlegeld4.py +559 -0
- multioptpy/ModelHessian/shortrange.py +346 -0
- multioptpy/ModelHessian/swartd2.py +496 -0
- multioptpy/ModelHessian/swartd3.py +706 -0
- multioptpy/ModelHessian/swartd4.py +918 -0
- multioptpy/ModelHessian/tshess.py +40 -0
- multioptpy/Optimizer/QHAdam.py +61 -0
- multioptpy/Optimizer/__init__.py +0 -0
- multioptpy/Optimizer/abc_fire.py +83 -0
- multioptpy/Optimizer/adabelief.py +58 -0
- multioptpy/Optimizer/adabound.py +68 -0
- multioptpy/Optimizer/adadelta.py +65 -0
- multioptpy/Optimizer/adaderivative.py +56 -0
- multioptpy/Optimizer/adadiff.py +68 -0
- multioptpy/Optimizer/adafactor.py +70 -0
- multioptpy/Optimizer/adam.py +65 -0
- multioptpy/Optimizer/adamax.py +62 -0
- multioptpy/Optimizer/adamod.py +83 -0
- multioptpy/Optimizer/adamw.py +65 -0
- multioptpy/Optimizer/adiis.py +523 -0
- multioptpy/Optimizer/afire_neb.py +282 -0
- multioptpy/Optimizer/block_hessian_update.py +709 -0
- multioptpy/Optimizer/c2diis.py +491 -0
- multioptpy/Optimizer/component_wise_scaling.py +405 -0
- multioptpy/Optimizer/conjugate_gradient.py +82 -0
- multioptpy/Optimizer/conjugate_gradient_neb.py +345 -0
- multioptpy/Optimizer/coordinate_locking.py +405 -0
- multioptpy/Optimizer/dic_rsirfo.py +1015 -0
- multioptpy/Optimizer/ediis.py +417 -0
- multioptpy/Optimizer/eve.py +76 -0
- multioptpy/Optimizer/fastadabelief.py +61 -0
- multioptpy/Optimizer/fire.py +77 -0
- multioptpy/Optimizer/fire2.py +249 -0
- multioptpy/Optimizer/fire_neb.py +92 -0
- multioptpy/Optimizer/gan_step.py +486 -0
- multioptpy/Optimizer/gdiis.py +609 -0
- multioptpy/Optimizer/gediis.py +203 -0
- multioptpy/Optimizer/geodesic_step.py +433 -0
- multioptpy/Optimizer/gpmin.py +633 -0
- multioptpy/Optimizer/gpr_step.py +364 -0
- multioptpy/Optimizer/gradientdescent.py +78 -0
- multioptpy/Optimizer/gradientdescent_neb.py +52 -0
- multioptpy/Optimizer/hessian_update.py +433 -0
- multioptpy/Optimizer/hybrid_rfo.py +998 -0
- multioptpy/Optimizer/kdiis.py +625 -0
- multioptpy/Optimizer/lars.py +21 -0
- multioptpy/Optimizer/lbfgs.py +253 -0
- multioptpy/Optimizer/lbfgs_neb.py +355 -0
- multioptpy/Optimizer/linesearch.py +236 -0
- multioptpy/Optimizer/lookahead.py +40 -0
- multioptpy/Optimizer/nadam.py +64 -0
- multioptpy/Optimizer/newton.py +200 -0
- multioptpy/Optimizer/prodigy.py +70 -0
- multioptpy/Optimizer/purtubation.py +16 -0
- multioptpy/Optimizer/quickmin_neb.py +245 -0
- multioptpy/Optimizer/radam.py +75 -0
- multioptpy/Optimizer/rfo_neb.py +302 -0
- multioptpy/Optimizer/ric_rfo.py +842 -0
- multioptpy/Optimizer/rl_step.py +627 -0
- multioptpy/Optimizer/rmspropgrave.py +65 -0
- multioptpy/Optimizer/rsirfo.py +1647 -0
- multioptpy/Optimizer/rsprfo.py +1056 -0
- multioptpy/Optimizer/sadam.py +60 -0
- multioptpy/Optimizer/samsgrad.py +63 -0
- multioptpy/Optimizer/tr_lbfgs.py +678 -0
- multioptpy/Optimizer/trim.py +273 -0
- multioptpy/Optimizer/trust_radius.py +207 -0
- multioptpy/Optimizer/trust_radius_neb.py +121 -0
- multioptpy/Optimizer/yogi.py +60 -0
- multioptpy/OtherMethod/__init__.py +0 -0
- multioptpy/OtherMethod/addf.py +1150 -0
- multioptpy/OtherMethod/dimer.py +895 -0
- multioptpy/OtherMethod/elastic_image_pair.py +629 -0
- multioptpy/OtherMethod/modelfunction.py +456 -0
- multioptpy/OtherMethod/newton_traj.py +454 -0
- multioptpy/OtherMethod/twopshs.py +1095 -0
- multioptpy/PESAnalyzer/__init__.py +0 -0
- multioptpy/PESAnalyzer/calc_irc_curvature.py +125 -0
- multioptpy/PESAnalyzer/cmds_analysis.py +152 -0
- multioptpy/PESAnalyzer/koopman_analysis.py +268 -0
- multioptpy/PESAnalyzer/pca_analysis.py +314 -0
- multioptpy/Parameters/__init__.py +0 -0
- multioptpy/Parameters/atomic_mass.py +20 -0
- multioptpy/Parameters/atomic_number.py +22 -0
- multioptpy/Parameters/covalent_radii.py +44 -0
- multioptpy/Parameters/d2.py +61 -0
- multioptpy/Parameters/d3.py +63 -0
- multioptpy/Parameters/d4.py +103 -0
- multioptpy/Parameters/dreiding.py +34 -0
- multioptpy/Parameters/gfn0xtb_param.py +137 -0
- multioptpy/Parameters/gfnff_param.py +315 -0
- multioptpy/Parameters/gnb.py +104 -0
- multioptpy/Parameters/parameter.py +22 -0
- multioptpy/Parameters/uff.py +72 -0
- multioptpy/Parameters/unit_values.py +20 -0
- multioptpy/Potential/AFIR_potential.py +55 -0
- multioptpy/Potential/LJ_repulsive_potential.py +345 -0
- multioptpy/Potential/__init__.py +0 -0
- multioptpy/Potential/anharmonic_keep_potential.py +28 -0
- multioptpy/Potential/asym_elllipsoidal_potential.py +718 -0
- multioptpy/Potential/electrostatic_potential.py +69 -0
- multioptpy/Potential/flux_potential.py +30 -0
- multioptpy/Potential/gaussian_potential.py +101 -0
- multioptpy/Potential/idpp.py +516 -0
- multioptpy/Potential/keep_angle_potential.py +146 -0
- multioptpy/Potential/keep_dihedral_angle_potential.py +105 -0
- multioptpy/Potential/keep_outofplain_angle_potential.py +70 -0
- multioptpy/Potential/keep_potential.py +99 -0
- multioptpy/Potential/mechano_force_potential.py +74 -0
- multioptpy/Potential/nanoreactor_potential.py +52 -0
- multioptpy/Potential/potential.py +896 -0
- multioptpy/Potential/spacer_model_potential.py +221 -0
- multioptpy/Potential/switching_potential.py +258 -0
- multioptpy/Potential/universal_potential.py +34 -0
- multioptpy/Potential/value_range_potential.py +36 -0
- multioptpy/Potential/void_point_potential.py +25 -0
- multioptpy/SQM/__init__.py +0 -0
- multioptpy/SQM/sqm1/__init__.py +0 -0
- multioptpy/SQM/sqm1/sqm1_core.py +1792 -0
- multioptpy/SQM/sqm2/__init__.py +0 -0
- multioptpy/SQM/sqm2/calc_tools.py +95 -0
- multioptpy/SQM/sqm2/sqm2_basis.py +850 -0
- multioptpy/SQM/sqm2/sqm2_bond.py +119 -0
- multioptpy/SQM/sqm2/sqm2_core.py +303 -0
- multioptpy/SQM/sqm2/sqm2_data.py +1229 -0
- multioptpy/SQM/sqm2/sqm2_disp.py +65 -0
- multioptpy/SQM/sqm2/sqm2_eeq.py +243 -0
- multioptpy/SQM/sqm2/sqm2_overlapint.py +704 -0
- multioptpy/SQM/sqm2/sqm2_qm.py +578 -0
- multioptpy/SQM/sqm2/sqm2_rep.py +66 -0
- multioptpy/SQM/sqm2/sqm2_srb.py +70 -0
- multioptpy/Thermo/__init__.py +0 -0
- multioptpy/Thermo/normal_mode_analyzer.py +865 -0
- multioptpy/Utils/__init__.py +0 -0
- multioptpy/Utils/bond_connectivity.py +264 -0
- multioptpy/Utils/calc_tools.py +884 -0
- multioptpy/Utils/oniom.py +96 -0
- multioptpy/Utils/pbc.py +48 -0
- multioptpy/Utils/riemann_curvature.py +208 -0
- multioptpy/Utils/symmetry_analyzer.py +482 -0
- multioptpy/Visualization/__init__.py +0 -0
- multioptpy/Visualization/visualization.py +156 -0
- multioptpy/WFAnalyzer/MO_analysis.py +104 -0
- multioptpy/WFAnalyzer/__init__.py +0 -0
- multioptpy/Wrapper/__init__.py +0 -0
- multioptpy/Wrapper/autots.py +1239 -0
- multioptpy/Wrapper/ieip_wrapper.py +93 -0
- multioptpy/Wrapper/md_wrapper.py +92 -0
- multioptpy/Wrapper/neb_wrapper.py +94 -0
- multioptpy/Wrapper/optimize_wrapper.py +76 -0
- multioptpy/__init__.py +5 -0
- multioptpy/entrypoints.py +916 -0
- multioptpy/fileio.py +660 -0
- multioptpy/ieip.py +340 -0
- multioptpy/interface.py +1086 -0
- multioptpy/irc.py +529 -0
- multioptpy/moleculardynamics.py +432 -0
- multioptpy/neb.py +1267 -0
- multioptpy/optimization.py +1553 -0
- multioptpy/optimizer.py +709 -0
- multioptpy-1.20.2.dist-info/METADATA +438 -0
- multioptpy-1.20.2.dist-info/RECORD +246 -0
- multioptpy-1.20.2.dist-info/WHEEL +5 -0
- multioptpy-1.20.2.dist-info/entry_points.txt +9 -0
- multioptpy-1.20.2.dist-info/licenses/LICENSE +674 -0
- multioptpy-1.20.2.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,283 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from scipy.special import comb
|
|
3
|
+
from multioptpy.Utils.calc_tools import calc_path_length_list
|
|
4
|
+
|
|
5
|
+
def _get_bernstein_coords_at_t(structures, t_values):
|
|
6
|
+
"""
|
|
7
|
+
Generate Bernstein interpolated coordinates at specific t (0.0 to 1.0) values.
|
|
8
|
+
|
|
9
|
+
Bernstein polynomials provide a global smoothing effect, useful for
|
|
10
|
+
removing noise from the path in flat energy regions.
|
|
11
|
+
"""
|
|
12
|
+
structures = np.array(structures)
|
|
13
|
+
N = len(structures)
|
|
14
|
+
path = []
|
|
15
|
+
|
|
16
|
+
# Calculate Bernstein polynomial for each t
|
|
17
|
+
for t in t_values:
|
|
18
|
+
B_t = np.zeros_like(structures[0])
|
|
19
|
+
for k in range(N):
|
|
20
|
+
# nCr * (1-t)^(n-k) * t^k
|
|
21
|
+
coef = comb(N-1, k) * (1-t)**(N-1-k) * t**k
|
|
22
|
+
B_t += coef * structures[k]
|
|
23
|
+
path.append(B_t)
|
|
24
|
+
|
|
25
|
+
return np.array(path)
|
|
26
|
+
|
|
27
|
+
def _get_linear_coords_at_s(geometry_list, path_length_list, target_s):
|
|
28
|
+
"""
|
|
29
|
+
Generate Linear interpolated coordinates at specific physical distances (s).
|
|
30
|
+
|
|
31
|
+
Linear interpolation preserves local geometric features (kinks/curves)
|
|
32
|
+
strictly, preventing corner-cutting in curved valleys.
|
|
33
|
+
"""
|
|
34
|
+
geometry_list = np.array(geometry_list)
|
|
35
|
+
current_s = path_length_list
|
|
36
|
+
|
|
37
|
+
flat_geom = geometry_list.reshape(len(geometry_list), -1)
|
|
38
|
+
new_flat = np.zeros((len(target_s), flat_geom.shape[1]))
|
|
39
|
+
|
|
40
|
+
# Interpolate each coordinate dimension independently
|
|
41
|
+
for dim in range(flat_geom.shape[1]):
|
|
42
|
+
new_flat[:, dim] = np.interp(target_s, current_s, flat_geom[:, dim])
|
|
43
|
+
|
|
44
|
+
return new_flat.reshape(len(target_s), geometry_list.shape[1], 3)
|
|
45
|
+
|
|
46
|
+
def predict_hidden_ts_weights(geometry_list, energy_list, gradient_list, boost_factor=2.0):
|
|
47
|
+
"""
|
|
48
|
+
Predict if a Transition State (TS) is hidden strictly between two nodes
|
|
49
|
+
using Cubic Hermite Interpolation.
|
|
50
|
+
|
|
51
|
+
f(x) = a3*x^3 + a2*x^2 + a1*x + a0
|
|
52
|
+
|
|
53
|
+
If a local maximum (f'(x)=0, f''(x)<0) is detected within a segment,
|
|
54
|
+
the weight of that segment is boosted to attract more nodes.
|
|
55
|
+
|
|
56
|
+
Args:
|
|
57
|
+
geometry_list: Coordinates
|
|
58
|
+
energy_list: Energies
|
|
59
|
+
gradient_list: Gradients (dE/dx). NOT Forces. If Forces, flip sign before passing.
|
|
60
|
+
boost_factor: Weight multiplier if a hidden TS is found.
|
|
61
|
+
"""
|
|
62
|
+
n_nodes = len(geometry_list)
|
|
63
|
+
geometry_arr = np.array(geometry_list)
|
|
64
|
+
energies = np.array(energy_list)
|
|
65
|
+
gradients = np.array(gradient_list)
|
|
66
|
+
|
|
67
|
+
# Initialize with zeros (additive weight)
|
|
68
|
+
ts_weights = np.zeros(n_nodes)
|
|
69
|
+
|
|
70
|
+
for i in range(n_nodes - 1):
|
|
71
|
+
# 1. Segment properties
|
|
72
|
+
vec = geometry_arr[i+1] - geometry_arr[i]
|
|
73
|
+
L = np.linalg.norm(vec)
|
|
74
|
+
|
|
75
|
+
if L < 1e-8: continue
|
|
76
|
+
|
|
77
|
+
tangent = vec / L
|
|
78
|
+
|
|
79
|
+
# 2. Boundary conditions
|
|
80
|
+
y0 = energies[i]
|
|
81
|
+
y1 = energies[i+1]
|
|
82
|
+
|
|
83
|
+
# Project gradient onto tangent (slope)
|
|
84
|
+
m0 = np.sum(gradients[i] * tangent)
|
|
85
|
+
m1 = np.sum(gradients[i+1] * tangent)
|
|
86
|
+
|
|
87
|
+
# 3. Cubic Hermite Coefficients
|
|
88
|
+
# f(x) = a3*x^3 + a2*x^2 + a1*x + a0
|
|
89
|
+
# Derived from: f(0)=y0, f(L)=y1, f'(0)=m0, f'(L)=m1
|
|
90
|
+
a0 = y0
|
|
91
|
+
a1 = m0
|
|
92
|
+
a2 = (3 * (y1 - y0) / (L**2)) - ((2 * m0 + m1) / L)
|
|
93
|
+
a3 = ((m0 + m1) / (L**2)) - (2 * (y1 - y0) / (L**3))
|
|
94
|
+
|
|
95
|
+
# 4. Find Roots of Derivative (Stationary Points)
|
|
96
|
+
# f'(x) = 3*a3*x^2 + 2*a2*x + a1 = 0
|
|
97
|
+
roots = []
|
|
98
|
+
if abs(a3) > 1e-10:
|
|
99
|
+
discriminant = (2 * a2)**2 - 4 * (3 * a3) * a1
|
|
100
|
+
if discriminant >= 0:
|
|
101
|
+
sqrt_d = np.sqrt(discriminant)
|
|
102
|
+
roots.append((-2 * a2 + sqrt_d) / (6 * a3))
|
|
103
|
+
roots.append((-2 * a2 - sqrt_d) / (6 * a3))
|
|
104
|
+
elif abs(a2) > 1e-10:
|
|
105
|
+
# Quadratic case
|
|
106
|
+
roots.append(-a1 / (2 * a2))
|
|
107
|
+
|
|
108
|
+
# 5. Check if roots indicate a valid hidden TS
|
|
109
|
+
found_ts = False
|
|
110
|
+
for x in roots:
|
|
111
|
+
# Check if strictly inside the segment (with small buffer)
|
|
112
|
+
if 0.05 * L < x < 0.95 * L:
|
|
113
|
+
# Check convexity (2nd derivative)
|
|
114
|
+
# f''(x) = 6*a3*x + 2*a2
|
|
115
|
+
curvature = 6 * a3 * x + 2 * a2
|
|
116
|
+
if curvature < 0: # Concave down -> Maximum
|
|
117
|
+
found_ts = True
|
|
118
|
+
break
|
|
119
|
+
|
|
120
|
+
if found_ts:
|
|
121
|
+
# Boost both nodes connected to this segment
|
|
122
|
+
ts_weights[i] += boost_factor
|
|
123
|
+
ts_weights[i+1] += boost_factor
|
|
124
|
+
|
|
125
|
+
return ts_weights
|
|
126
|
+
|
|
127
|
+
def adaptive_geometry_energy_interpolation(geometry_list, energy_list, gradient_list=None,
|
|
128
|
+
n_points=None, smoothing=None, angle_threshold_deg=15.0):
|
|
129
|
+
"""
|
|
130
|
+
Advanced Adaptive Interpolation for Reaction Paths.
|
|
131
|
+
|
|
132
|
+
This function performs two major tasks:
|
|
133
|
+
1. Node Distribution Control (Density):
|
|
134
|
+
Concentrates nodes in high-energy regions and regions with high curvature
|
|
135
|
+
(using gradients and cubic prediction).
|
|
136
|
+
|
|
137
|
+
2. Coordinate Mixing (Shape):
|
|
138
|
+
Blends 'Bernstein' (Smooth) and 'Linear' (Accurate) interpolation.
|
|
139
|
+
- Uses Linear when geometric curvature is high AND energy is steep (prevents corner-cutting).
|
|
140
|
+
- Uses Bernstein when the path is flat or noisy (smoothes optimization).
|
|
141
|
+
|
|
142
|
+
Args:
|
|
143
|
+
geometry_list: List of atomic coordinates.
|
|
144
|
+
energy_list: List of energies.
|
|
145
|
+
gradient_list: List of energy gradients (dE/dx).
|
|
146
|
+
NOTE: Pass Gradients, not Forces. (Force = -Gradient)
|
|
147
|
+
n_points: Number of output nodes (default: same as input).
|
|
148
|
+
smoothing: Base weight factor for density (None = Auto-calculated).
|
|
149
|
+
angle_threshold_deg: Angle threshold to trigger Linear interpolation.
|
|
150
|
+
"""
|
|
151
|
+
geometry_arr = np.array(geometry_list)
|
|
152
|
+
energies = np.array(energy_list)
|
|
153
|
+
|
|
154
|
+
if n_points is None:
|
|
155
|
+
n_points = len(geometry_arr)
|
|
156
|
+
|
|
157
|
+
n_nodes = len(geometry_arr)
|
|
158
|
+
path_length_list = calc_path_length_list(geometry_arr)
|
|
159
|
+
total_length = path_length_list[-1]
|
|
160
|
+
|
|
161
|
+
if total_length < 1e-8:
|
|
162
|
+
return geometry_arr
|
|
163
|
+
|
|
164
|
+
# =========================================================================
|
|
165
|
+
# STEP 1: Weight Calculation (Node Density Control)
|
|
166
|
+
# =========================================================================
|
|
167
|
+
|
|
168
|
+
if smoothing is None:
|
|
169
|
+
# Heuristic: ensure at least ~1 node's worth of weight in valleys
|
|
170
|
+
smoothing = 1.5 / n_nodes if n_nodes > 0 else 0.1
|
|
171
|
+
|
|
172
|
+
# --- A. Global Energy Height ---
|
|
173
|
+
E_min, E_max = np.min(energies), np.max(energies)
|
|
174
|
+
if E_max - E_min < 1e-6:
|
|
175
|
+
w_global = np.zeros_like(energies)
|
|
176
|
+
else:
|
|
177
|
+
w_global = (energies - E_min) / (E_max - E_min)
|
|
178
|
+
|
|
179
|
+
# --- B. Local Peak Convexity (via Gradient) ---
|
|
180
|
+
w_local = np.zeros_like(energies)
|
|
181
|
+
if gradient_list is not None and n_nodes > 2:
|
|
182
|
+
grad_arr = np.array(gradient_list)
|
|
183
|
+
|
|
184
|
+
# Calculate Tangents
|
|
185
|
+
vecs = geometry_arr[1:] - geometry_arr[:-1]
|
|
186
|
+
vec_norms = np.linalg.norm(vecs, axis=(1,2))
|
|
187
|
+
valid = vec_norms > 1e-10
|
|
188
|
+
tangents = np.zeros_like(geometry_arr)
|
|
189
|
+
tangents[:-1][valid] = vecs[valid] / vec_norms[valid][:, np.newaxis, np.newaxis]
|
|
190
|
+
tangents[-1] = tangents[-2]
|
|
191
|
+
|
|
192
|
+
# Project Gradient -> Slope
|
|
193
|
+
slopes = np.sum(grad_arr * tangents, axis=(1,2))
|
|
194
|
+
|
|
195
|
+
# Change in Slope ~ Curvature (2nd derivative)
|
|
196
|
+
slope_change = np.zeros_like(slopes)
|
|
197
|
+
slope_change[1:-1] = slopes[2:] - slopes[:-2]
|
|
198
|
+
|
|
199
|
+
# Identify Hills (Center is higher than neighbors)
|
|
200
|
+
E_neighbors = (energies[:-2] + energies[2:]) / 2.0
|
|
201
|
+
is_hill = energies[1:-1] > E_neighbors
|
|
202
|
+
|
|
203
|
+
# We value high negative slope change (convex cap) in hill regions
|
|
204
|
+
peak_metric = np.abs(slope_change[1:-1])
|
|
205
|
+
w_local[1:-1][is_hill] = peak_metric[is_hill]
|
|
206
|
+
|
|
207
|
+
# Normalize
|
|
208
|
+
if np.max(w_local) > 1e-6:
|
|
209
|
+
w_local /= np.max(w_local)
|
|
210
|
+
w_local[0], w_local[-1] = w_local[1], w_local[-2]
|
|
211
|
+
|
|
212
|
+
# --- C. Hidden TS Prediction (Cubic Hermite) ---
|
|
213
|
+
w_hidden_ts = np.zeros_like(energies)
|
|
214
|
+
if gradient_list is not None:
|
|
215
|
+
w_hidden_ts = predict_hidden_ts_weights(
|
|
216
|
+
geometry_arr, energies, gradient_list, boost_factor=2.0
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
# Combine Weights
|
|
220
|
+
# 30% Global Height, 40% Local Convexity, + Hidden TS Boost + Smoothing
|
|
221
|
+
weights = 0.3 * w_global + 0.4 * w_local + w_hidden_ts + smoothing
|
|
222
|
+
|
|
223
|
+
# Calculate New Node Positions (s)
|
|
224
|
+
segment_dists = np.diff(path_length_list)
|
|
225
|
+
segment_weights = (weights[:-1] + weights[1:]) / 2.0
|
|
226
|
+
weighted_segments = segment_dists * segment_weights
|
|
227
|
+
|
|
228
|
+
cum_weighted_dist = np.concatenate(([0.0], np.cumsum(weighted_segments)))
|
|
229
|
+
target_weighted_grid = np.linspace(0, cum_weighted_dist[-1], n_points)
|
|
230
|
+
target_s = np.interp(target_weighted_grid, cum_weighted_dist, path_length_list)
|
|
231
|
+
|
|
232
|
+
# =========================================================================
|
|
233
|
+
# STEP 2: Determine Mixing Ratio (Bernstein vs Linear)
|
|
234
|
+
# =========================================================================
|
|
235
|
+
|
|
236
|
+
# --- Geometric Curvature (Angle) ---
|
|
237
|
+
vecs = geometry_arr[1:] - geometry_arr[:-1]
|
|
238
|
+
norms = np.linalg.norm(vecs, axis=(1,2))
|
|
239
|
+
valid_mask = norms > 1e-10
|
|
240
|
+
tangents = np.zeros_like(vecs)
|
|
241
|
+
tangents[valid_mask] = vecs[valid_mask] / norms[valid_mask][:, np.newaxis, np.newaxis]
|
|
242
|
+
|
|
243
|
+
angle_scores = np.zeros(n_nodes)
|
|
244
|
+
for i in range(1, n_nodes - 1):
|
|
245
|
+
v_prev = tangents[i-1]
|
|
246
|
+
v_next = tangents[i]
|
|
247
|
+
dot_val = np.clip(np.sum(v_prev * v_next), -1.0, 1.0)
|
|
248
|
+
angle_deg = np.degrees(np.arccos(dot_val))
|
|
249
|
+
angle_scores[i] = np.clip(angle_deg / (2.0 * angle_threshold_deg), 0.0, 1.0)
|
|
250
|
+
angle_scores[0], angle_scores[-1] = angle_scores[1], angle_scores[-2]
|
|
251
|
+
|
|
252
|
+
# --- Energy Steepness ---
|
|
253
|
+
# Only use Linear if energy is changing rapidly (Wall/Slope)
|
|
254
|
+
steepness_scores = np.zeros(n_nodes)
|
|
255
|
+
if (E_max - E_min) > 1e-6:
|
|
256
|
+
dE = np.abs(energies[2:] - energies[:-2])
|
|
257
|
+
# Sensitivity: 20% of barrier height counts as steep
|
|
258
|
+
steepness_scores[1:-1] = np.clip((dE / (E_max - E_min)) * 5.0, 0.0, 1.0)
|
|
259
|
+
steepness_scores[0], steepness_scores[-1] = steepness_scores[1], steepness_scores[-2]
|
|
260
|
+
|
|
261
|
+
# Mixing Alpha (0=Bernstein, 1=Linear)
|
|
262
|
+
# Logic: AND condition. Must be Curved AND Steep to enforce Linear.
|
|
263
|
+
# Otherwise, prefer Bernstein for smoothing.
|
|
264
|
+
alpha_original = angle_scores * steepness_scores
|
|
265
|
+
|
|
266
|
+
# =========================================================================
|
|
267
|
+
# STEP 3: Generate & Blend
|
|
268
|
+
# =========================================================================
|
|
269
|
+
|
|
270
|
+
# Generate candidates at NEW positions
|
|
271
|
+
coords_linear = _get_linear_coords_at_s(geometry_arr, path_length_list, target_s)
|
|
272
|
+
|
|
273
|
+
target_t = target_s / total_length
|
|
274
|
+
coords_bernstein = _get_bernstein_coords_at_t(geometry_arr, target_t)
|
|
275
|
+
|
|
276
|
+
# Interpolate alpha to new positions
|
|
277
|
+
alpha_resampled = np.interp(target_s, path_length_list, alpha_original)
|
|
278
|
+
alpha_resampled = alpha_resampled[:, np.newaxis, np.newaxis]
|
|
279
|
+
|
|
280
|
+
# Final Blend
|
|
281
|
+
new_geometry = alpha_resampled * coords_linear + (1.0 - alpha_resampled) * coords_bernstein
|
|
282
|
+
|
|
283
|
+
return new_geometry
|
|
@@ -0,0 +1,179 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from scipy.special import comb
|
|
3
|
+
from scipy.interpolate import interp1d
|
|
4
|
+
|
|
5
|
+
from multioptpy.Utils.calc_tools import calc_path_length_list
|
|
6
|
+
|
|
7
|
+
def bernstein_interpolation(structures, n_points=20):
|
|
8
|
+
"""
|
|
9
|
+
Interpolate between arbitrary number of structures using Bernstein polynomials.
|
|
10
|
+
"""
|
|
11
|
+
print("Using Bernstein polynomial interpolation.")
|
|
12
|
+
N = len(structures)
|
|
13
|
+
t_values = np.linspace(0, 1, n_points)
|
|
14
|
+
path = []
|
|
15
|
+
structures = np.array(structures)
|
|
16
|
+
for t in t_values:
|
|
17
|
+
B_t = np.zeros_like(structures[0])
|
|
18
|
+
for k in range(N):
|
|
19
|
+
coef = comb(N-1, k) * (1-t)**(N-1-k) * t**k
|
|
20
|
+
B_t += coef * structures[k]
|
|
21
|
+
path.append(B_t)
|
|
22
|
+
return np.array(path)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def distribute_geometry_by_length_bernstein(geometry_list, angstrom_spacing):
|
|
26
|
+
"""
|
|
27
|
+
Distribute geometries by specified distance spacing using Bernstein polynomial interpolation.
|
|
28
|
+
"""
|
|
29
|
+
print("Distributing geometries using Bernstein polynomial interpolation.")
|
|
30
|
+
path_length_list = calc_path_length_list(geometry_list)
|
|
31
|
+
|
|
32
|
+
# Avoid error if total length is 0
|
|
33
|
+
total_length = path_length_list[-1]
|
|
34
|
+
if total_length < 1e-8:
|
|
35
|
+
return np.array(geometry_list)
|
|
36
|
+
|
|
37
|
+
interpolate_dist_list = np.arange(0, total_length, angstrom_spacing)
|
|
38
|
+
# Add the last point if it's missing, taking care not to duplicate
|
|
39
|
+
if interpolate_dist_list[-1] < total_length:
|
|
40
|
+
interpolate_dist_list = np.append(interpolate_dist_list, total_length)
|
|
41
|
+
|
|
42
|
+
t_values = interpolate_dist_list / total_length
|
|
43
|
+
N = len(geometry_list)
|
|
44
|
+
new_geometry_list = []
|
|
45
|
+
|
|
46
|
+
# Cast to array for speed
|
|
47
|
+
geometry_arr = np.array(geometry_list)
|
|
48
|
+
|
|
49
|
+
for t in t_values:
|
|
50
|
+
B_t = np.zeros_like(geometry_arr[0])
|
|
51
|
+
for k in range(N):
|
|
52
|
+
coef = comb(N-1, k) * (1-t)**(N-1-k) * t**k
|
|
53
|
+
B_t += coef * geometry_arr[k]
|
|
54
|
+
new_geometry_list.append(B_t)
|
|
55
|
+
|
|
56
|
+
return np.array(new_geometry_list)
|
|
57
|
+
|
|
58
|
+
def distribute_geometry_by_energy_bernstein(geometry_list, energy_list, gradient_list=None, n_points=None, smoothing=0.1):
|
|
59
|
+
"""
|
|
60
|
+
Distribute geometries concentrating on ALL high-energy regions (Multiple Peaks)
|
|
61
|
+
using Bernstein polynomial interpolation.
|
|
62
|
+
|
|
63
|
+
Improvements:
|
|
64
|
+
- Uses 'gradient_list' (if provided) for high-precision Peak Detection.
|
|
65
|
+
- Concentrates nodes on secondary transition states as well as the global maximum.
|
|
66
|
+
|
|
67
|
+
Args:
|
|
68
|
+
geometry_list: list of np.ndarray
|
|
69
|
+
energy_list: list of float
|
|
70
|
+
gradient_list: list of np.ndarray (dE/dx). Optional.
|
|
71
|
+
n_points: int, number of output nodes
|
|
72
|
+
smoothing: float, prevents node density from becoming zero
|
|
73
|
+
|
|
74
|
+
Returns:
|
|
75
|
+
new_geometry_list: np.ndarray
|
|
76
|
+
"""
|
|
77
|
+
print("Distributing geometries with Energy-Weighting (Multi-Peak) using Bernstein polynomial interpolation.")
|
|
78
|
+
|
|
79
|
+
if len(geometry_list) != len(energy_list):
|
|
80
|
+
raise ValueError("Length of geometry_list and energy_list must be the same.")
|
|
81
|
+
|
|
82
|
+
if n_points is None:
|
|
83
|
+
n_points = len(geometry_list)
|
|
84
|
+
|
|
85
|
+
geometry_arr = np.array(geometry_list)
|
|
86
|
+
energies = np.array(energy_list)
|
|
87
|
+
n_nodes = len(energies)
|
|
88
|
+
|
|
89
|
+
# 1. Calculate the current physical path length
|
|
90
|
+
path_length_list = calc_path_length_list(geometry_list)
|
|
91
|
+
total_physical_length = path_length_list[-1]
|
|
92
|
+
|
|
93
|
+
if total_physical_length < 1e-8:
|
|
94
|
+
return geometry_arr
|
|
95
|
+
|
|
96
|
+
current_s = path_length_list / total_physical_length
|
|
97
|
+
|
|
98
|
+
# 2. Calculate Weights (Global + Local)
|
|
99
|
+
|
|
100
|
+
# --- A. Global Energy Weights ---
|
|
101
|
+
E_min = np.min(energies)
|
|
102
|
+
E_max = np.max(energies)
|
|
103
|
+
|
|
104
|
+
if E_max - E_min < 1e-6:
|
|
105
|
+
w_global = np.zeros_like(energies)
|
|
106
|
+
else:
|
|
107
|
+
w_global = (energies - E_min) / (E_max - E_min)
|
|
108
|
+
|
|
109
|
+
# --- B. Local Peak Weights ---
|
|
110
|
+
w_local = np.zeros_like(energies)
|
|
111
|
+
|
|
112
|
+
if n_nodes > 2:
|
|
113
|
+
E_center = energies[1:-1]
|
|
114
|
+
E_neighbors = (energies[:-2] + energies[2:]) / 2.0
|
|
115
|
+
is_hill = E_center > E_neighbors
|
|
116
|
+
|
|
117
|
+
if gradient_list is not None:
|
|
118
|
+
# Gradient-based Curvature
|
|
119
|
+
grad_arr = np.array(gradient_list)
|
|
120
|
+
|
|
121
|
+
vecs = geometry_arr[1:] - geometry_arr[:-1]
|
|
122
|
+
vec_norms = np.linalg.norm(vecs, axis=(1,2))
|
|
123
|
+
valid = vec_norms > 1e-10
|
|
124
|
+
tangents = np.zeros_like(geometry_arr)
|
|
125
|
+
tangents[:-1][valid] = vecs[valid] / vec_norms[valid][:, np.newaxis, np.newaxis]
|
|
126
|
+
tangents[-1] = tangents[-2]
|
|
127
|
+
|
|
128
|
+
slopes = np.sum(grad_arr * tangents, axis=(1,2))
|
|
129
|
+
slope_change = np.zeros_like(slopes)
|
|
130
|
+
slope_change[1:-1] = slopes[2:] - slopes[:-2]
|
|
131
|
+
|
|
132
|
+
peak_metric = np.abs(slope_change[1:-1])
|
|
133
|
+
w_local[1:-1][is_hill] = peak_metric[is_hill]
|
|
134
|
+
|
|
135
|
+
else:
|
|
136
|
+
# Energy-based Convexity
|
|
137
|
+
convexity = E_center - E_neighbors
|
|
138
|
+
peak_score = np.maximum(convexity, 0.0)
|
|
139
|
+
w_local[1:-1] = peak_score
|
|
140
|
+
|
|
141
|
+
# Normalize
|
|
142
|
+
p_max = np.max(w_local)
|
|
143
|
+
if p_max > 1e-6:
|
|
144
|
+
w_local /= p_max
|
|
145
|
+
|
|
146
|
+
w_local[0] = w_local[1]
|
|
147
|
+
w_local[-1] = w_local[-2]
|
|
148
|
+
|
|
149
|
+
# --- C. Combine Weights ---
|
|
150
|
+
weights = 0.5 * w_global + 0.5 * w_local + smoothing
|
|
151
|
+
|
|
152
|
+
# 3. Integration of Weighted Arc Length
|
|
153
|
+
segment_dists = np.diff(path_length_list)
|
|
154
|
+
segment_weights = (weights[:-1] + weights[1:]) / 2.0
|
|
155
|
+
weighted_segments = segment_dists * segment_weights
|
|
156
|
+
|
|
157
|
+
cum_weighted_dist = np.concatenate(([0.0], np.cumsum(weighted_segments)))
|
|
158
|
+
total_weighted_length = cum_weighted_dist[-1]
|
|
159
|
+
|
|
160
|
+
# 4. Create new grid in Weighted Space
|
|
161
|
+
target_weighted_grid = np.linspace(0, total_weighted_length, n_points)
|
|
162
|
+
|
|
163
|
+
# 5. Inverse mapping: Weighted grid -> Physical distance (s)
|
|
164
|
+
target_physical_s = np.interp(target_weighted_grid, cum_weighted_dist, current_s)
|
|
165
|
+
|
|
166
|
+
t_values = target_physical_s
|
|
167
|
+
|
|
168
|
+
# 6. Coordinate generation using Bernstein polynomials
|
|
169
|
+
N = len(geometry_arr)
|
|
170
|
+
new_geometry_list = []
|
|
171
|
+
|
|
172
|
+
for t in t_values:
|
|
173
|
+
B_t = np.zeros_like(geometry_arr[0])
|
|
174
|
+
for k in range(N):
|
|
175
|
+
coef = comb(N-1, k) * (1-t)**(N-1-k) * t**k
|
|
176
|
+
B_t += coef * geometry_arr[k]
|
|
177
|
+
new_geometry_list.append(B_t)
|
|
178
|
+
|
|
179
|
+
return np.array(new_geometry_list)
|