MultiOptPy 1.20.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- multioptpy/Calculator/__init__.py +0 -0
- multioptpy/Calculator/ase_calculation_tools.py +424 -0
- multioptpy/Calculator/ase_tools/__init__.py +0 -0
- multioptpy/Calculator/ase_tools/fairchem.py +28 -0
- multioptpy/Calculator/ase_tools/gamess.py +19 -0
- multioptpy/Calculator/ase_tools/gaussian.py +165 -0
- multioptpy/Calculator/ase_tools/mace.py +28 -0
- multioptpy/Calculator/ase_tools/mopac.py +19 -0
- multioptpy/Calculator/ase_tools/nwchem.py +31 -0
- multioptpy/Calculator/ase_tools/orca.py +22 -0
- multioptpy/Calculator/ase_tools/pygfn0.py +37 -0
- multioptpy/Calculator/dxtb_calculation_tools.py +344 -0
- multioptpy/Calculator/emt_calculation_tools.py +458 -0
- multioptpy/Calculator/gpaw_calculation_tools.py +183 -0
- multioptpy/Calculator/lj_calculation_tools.py +314 -0
- multioptpy/Calculator/psi4_calculation_tools.py +334 -0
- multioptpy/Calculator/pwscf_calculation_tools.py +189 -0
- multioptpy/Calculator/pyscf_calculation_tools.py +327 -0
- multioptpy/Calculator/sqm1_calculation_tools.py +611 -0
- multioptpy/Calculator/sqm2_calculation_tools.py +376 -0
- multioptpy/Calculator/tblite_calculation_tools.py +352 -0
- multioptpy/Calculator/tersoff_calculation_tools.py +818 -0
- multioptpy/Constraint/__init__.py +0 -0
- multioptpy/Constraint/constraint_condition.py +834 -0
- multioptpy/Coordinate/__init__.py +0 -0
- multioptpy/Coordinate/polar_coordinate.py +199 -0
- multioptpy/Coordinate/redundant_coordinate.py +638 -0
- multioptpy/IRC/__init__.py +0 -0
- multioptpy/IRC/converge_criteria.py +28 -0
- multioptpy/IRC/dvv.py +544 -0
- multioptpy/IRC/euler.py +439 -0
- multioptpy/IRC/hpc.py +564 -0
- multioptpy/IRC/lqa.py +540 -0
- multioptpy/IRC/modekill.py +662 -0
- multioptpy/IRC/rk4.py +579 -0
- multioptpy/Interpolation/__init__.py +0 -0
- multioptpy/Interpolation/adaptive_interpolation.py +283 -0
- multioptpy/Interpolation/binomial_interpolation.py +179 -0
- multioptpy/Interpolation/geodesic_interpolation.py +785 -0
- multioptpy/Interpolation/interpolation.py +156 -0
- multioptpy/Interpolation/linear_interpolation.py +473 -0
- multioptpy/Interpolation/savitzky_golay_interpolation.py +252 -0
- multioptpy/Interpolation/spline_interpolation.py +353 -0
- multioptpy/MD/__init__.py +0 -0
- multioptpy/MD/thermostat.py +185 -0
- multioptpy/MEP/__init__.py +0 -0
- multioptpy/MEP/pathopt_bneb_force.py +443 -0
- multioptpy/MEP/pathopt_dmf_force.py +448 -0
- multioptpy/MEP/pathopt_dneb_force.py +130 -0
- multioptpy/MEP/pathopt_ewbneb_force.py +207 -0
- multioptpy/MEP/pathopt_gpneb_force.py +512 -0
- multioptpy/MEP/pathopt_lup_force.py +113 -0
- multioptpy/MEP/pathopt_neb_force.py +225 -0
- multioptpy/MEP/pathopt_nesb_force.py +205 -0
- multioptpy/MEP/pathopt_om_force.py +153 -0
- multioptpy/MEP/pathopt_qsm_force.py +174 -0
- multioptpy/MEP/pathopt_qsmv2_force.py +304 -0
- multioptpy/ModelFunction/__init__.py +7 -0
- multioptpy/ModelFunction/avoiding_model_function.py +29 -0
- multioptpy/ModelFunction/binary_image_ts_search_model_function.py +47 -0
- multioptpy/ModelFunction/conical_model_function.py +26 -0
- multioptpy/ModelFunction/opt_meci.py +50 -0
- multioptpy/ModelFunction/opt_mesx.py +47 -0
- multioptpy/ModelFunction/opt_mesx_2.py +49 -0
- multioptpy/ModelFunction/seam_model_function.py +27 -0
- multioptpy/ModelHessian/__init__.py +0 -0
- multioptpy/ModelHessian/approx_hessian.py +147 -0
- multioptpy/ModelHessian/calc_params.py +227 -0
- multioptpy/ModelHessian/fischer.py +236 -0
- multioptpy/ModelHessian/fischerd3.py +360 -0
- multioptpy/ModelHessian/fischerd4.py +398 -0
- multioptpy/ModelHessian/gfn0xtb.py +633 -0
- multioptpy/ModelHessian/gfnff.py +709 -0
- multioptpy/ModelHessian/lindh.py +165 -0
- multioptpy/ModelHessian/lindh2007d2.py +707 -0
- multioptpy/ModelHessian/lindh2007d3.py +822 -0
- multioptpy/ModelHessian/lindh2007d4.py +1030 -0
- multioptpy/ModelHessian/morse.py +106 -0
- multioptpy/ModelHessian/schlegel.py +144 -0
- multioptpy/ModelHessian/schlegeld3.py +322 -0
- multioptpy/ModelHessian/schlegeld4.py +559 -0
- multioptpy/ModelHessian/shortrange.py +346 -0
- multioptpy/ModelHessian/swartd2.py +496 -0
- multioptpy/ModelHessian/swartd3.py +706 -0
- multioptpy/ModelHessian/swartd4.py +918 -0
- multioptpy/ModelHessian/tshess.py +40 -0
- multioptpy/Optimizer/QHAdam.py +61 -0
- multioptpy/Optimizer/__init__.py +0 -0
- multioptpy/Optimizer/abc_fire.py +83 -0
- multioptpy/Optimizer/adabelief.py +58 -0
- multioptpy/Optimizer/adabound.py +68 -0
- multioptpy/Optimizer/adadelta.py +65 -0
- multioptpy/Optimizer/adaderivative.py +56 -0
- multioptpy/Optimizer/adadiff.py +68 -0
- multioptpy/Optimizer/adafactor.py +70 -0
- multioptpy/Optimizer/adam.py +65 -0
- multioptpy/Optimizer/adamax.py +62 -0
- multioptpy/Optimizer/adamod.py +83 -0
- multioptpy/Optimizer/adamw.py +65 -0
- multioptpy/Optimizer/adiis.py +523 -0
- multioptpy/Optimizer/afire_neb.py +282 -0
- multioptpy/Optimizer/block_hessian_update.py +709 -0
- multioptpy/Optimizer/c2diis.py +491 -0
- multioptpy/Optimizer/component_wise_scaling.py +405 -0
- multioptpy/Optimizer/conjugate_gradient.py +82 -0
- multioptpy/Optimizer/conjugate_gradient_neb.py +345 -0
- multioptpy/Optimizer/coordinate_locking.py +405 -0
- multioptpy/Optimizer/dic_rsirfo.py +1015 -0
- multioptpy/Optimizer/ediis.py +417 -0
- multioptpy/Optimizer/eve.py +76 -0
- multioptpy/Optimizer/fastadabelief.py +61 -0
- multioptpy/Optimizer/fire.py +77 -0
- multioptpy/Optimizer/fire2.py +249 -0
- multioptpy/Optimizer/fire_neb.py +92 -0
- multioptpy/Optimizer/gan_step.py +486 -0
- multioptpy/Optimizer/gdiis.py +609 -0
- multioptpy/Optimizer/gediis.py +203 -0
- multioptpy/Optimizer/geodesic_step.py +433 -0
- multioptpy/Optimizer/gpmin.py +633 -0
- multioptpy/Optimizer/gpr_step.py +364 -0
- multioptpy/Optimizer/gradientdescent.py +78 -0
- multioptpy/Optimizer/gradientdescent_neb.py +52 -0
- multioptpy/Optimizer/hessian_update.py +433 -0
- multioptpy/Optimizer/hybrid_rfo.py +998 -0
- multioptpy/Optimizer/kdiis.py +625 -0
- multioptpy/Optimizer/lars.py +21 -0
- multioptpy/Optimizer/lbfgs.py +253 -0
- multioptpy/Optimizer/lbfgs_neb.py +355 -0
- multioptpy/Optimizer/linesearch.py +236 -0
- multioptpy/Optimizer/lookahead.py +40 -0
- multioptpy/Optimizer/nadam.py +64 -0
- multioptpy/Optimizer/newton.py +200 -0
- multioptpy/Optimizer/prodigy.py +70 -0
- multioptpy/Optimizer/purtubation.py +16 -0
- multioptpy/Optimizer/quickmin_neb.py +245 -0
- multioptpy/Optimizer/radam.py +75 -0
- multioptpy/Optimizer/rfo_neb.py +302 -0
- multioptpy/Optimizer/ric_rfo.py +842 -0
- multioptpy/Optimizer/rl_step.py +627 -0
- multioptpy/Optimizer/rmspropgrave.py +65 -0
- multioptpy/Optimizer/rsirfo.py +1647 -0
- multioptpy/Optimizer/rsprfo.py +1056 -0
- multioptpy/Optimizer/sadam.py +60 -0
- multioptpy/Optimizer/samsgrad.py +63 -0
- multioptpy/Optimizer/tr_lbfgs.py +678 -0
- multioptpy/Optimizer/trim.py +273 -0
- multioptpy/Optimizer/trust_radius.py +207 -0
- multioptpy/Optimizer/trust_radius_neb.py +121 -0
- multioptpy/Optimizer/yogi.py +60 -0
- multioptpy/OtherMethod/__init__.py +0 -0
- multioptpy/OtherMethod/addf.py +1150 -0
- multioptpy/OtherMethod/dimer.py +895 -0
- multioptpy/OtherMethod/elastic_image_pair.py +629 -0
- multioptpy/OtherMethod/modelfunction.py +456 -0
- multioptpy/OtherMethod/newton_traj.py +454 -0
- multioptpy/OtherMethod/twopshs.py +1095 -0
- multioptpy/PESAnalyzer/__init__.py +0 -0
- multioptpy/PESAnalyzer/calc_irc_curvature.py +125 -0
- multioptpy/PESAnalyzer/cmds_analysis.py +152 -0
- multioptpy/PESAnalyzer/koopman_analysis.py +268 -0
- multioptpy/PESAnalyzer/pca_analysis.py +314 -0
- multioptpy/Parameters/__init__.py +0 -0
- multioptpy/Parameters/atomic_mass.py +20 -0
- multioptpy/Parameters/atomic_number.py +22 -0
- multioptpy/Parameters/covalent_radii.py +44 -0
- multioptpy/Parameters/d2.py +61 -0
- multioptpy/Parameters/d3.py +63 -0
- multioptpy/Parameters/d4.py +103 -0
- multioptpy/Parameters/dreiding.py +34 -0
- multioptpy/Parameters/gfn0xtb_param.py +137 -0
- multioptpy/Parameters/gfnff_param.py +315 -0
- multioptpy/Parameters/gnb.py +104 -0
- multioptpy/Parameters/parameter.py +22 -0
- multioptpy/Parameters/uff.py +72 -0
- multioptpy/Parameters/unit_values.py +20 -0
- multioptpy/Potential/AFIR_potential.py +55 -0
- multioptpy/Potential/LJ_repulsive_potential.py +345 -0
- multioptpy/Potential/__init__.py +0 -0
- multioptpy/Potential/anharmonic_keep_potential.py +28 -0
- multioptpy/Potential/asym_elllipsoidal_potential.py +718 -0
- multioptpy/Potential/electrostatic_potential.py +69 -0
- multioptpy/Potential/flux_potential.py +30 -0
- multioptpy/Potential/gaussian_potential.py +101 -0
- multioptpy/Potential/idpp.py +516 -0
- multioptpy/Potential/keep_angle_potential.py +146 -0
- multioptpy/Potential/keep_dihedral_angle_potential.py +105 -0
- multioptpy/Potential/keep_outofplain_angle_potential.py +70 -0
- multioptpy/Potential/keep_potential.py +99 -0
- multioptpy/Potential/mechano_force_potential.py +74 -0
- multioptpy/Potential/nanoreactor_potential.py +52 -0
- multioptpy/Potential/potential.py +896 -0
- multioptpy/Potential/spacer_model_potential.py +221 -0
- multioptpy/Potential/switching_potential.py +258 -0
- multioptpy/Potential/universal_potential.py +34 -0
- multioptpy/Potential/value_range_potential.py +36 -0
- multioptpy/Potential/void_point_potential.py +25 -0
- multioptpy/SQM/__init__.py +0 -0
- multioptpy/SQM/sqm1/__init__.py +0 -0
- multioptpy/SQM/sqm1/sqm1_core.py +1792 -0
- multioptpy/SQM/sqm2/__init__.py +0 -0
- multioptpy/SQM/sqm2/calc_tools.py +95 -0
- multioptpy/SQM/sqm2/sqm2_basis.py +850 -0
- multioptpy/SQM/sqm2/sqm2_bond.py +119 -0
- multioptpy/SQM/sqm2/sqm2_core.py +303 -0
- multioptpy/SQM/sqm2/sqm2_data.py +1229 -0
- multioptpy/SQM/sqm2/sqm2_disp.py +65 -0
- multioptpy/SQM/sqm2/sqm2_eeq.py +243 -0
- multioptpy/SQM/sqm2/sqm2_overlapint.py +704 -0
- multioptpy/SQM/sqm2/sqm2_qm.py +578 -0
- multioptpy/SQM/sqm2/sqm2_rep.py +66 -0
- multioptpy/SQM/sqm2/sqm2_srb.py +70 -0
- multioptpy/Thermo/__init__.py +0 -0
- multioptpy/Thermo/normal_mode_analyzer.py +865 -0
- multioptpy/Utils/__init__.py +0 -0
- multioptpy/Utils/bond_connectivity.py +264 -0
- multioptpy/Utils/calc_tools.py +884 -0
- multioptpy/Utils/oniom.py +96 -0
- multioptpy/Utils/pbc.py +48 -0
- multioptpy/Utils/riemann_curvature.py +208 -0
- multioptpy/Utils/symmetry_analyzer.py +482 -0
- multioptpy/Visualization/__init__.py +0 -0
- multioptpy/Visualization/visualization.py +156 -0
- multioptpy/WFAnalyzer/MO_analysis.py +104 -0
- multioptpy/WFAnalyzer/__init__.py +0 -0
- multioptpy/Wrapper/__init__.py +0 -0
- multioptpy/Wrapper/autots.py +1239 -0
- multioptpy/Wrapper/ieip_wrapper.py +93 -0
- multioptpy/Wrapper/md_wrapper.py +92 -0
- multioptpy/Wrapper/neb_wrapper.py +94 -0
- multioptpy/Wrapper/optimize_wrapper.py +76 -0
- multioptpy/__init__.py +5 -0
- multioptpy/entrypoints.py +916 -0
- multioptpy/fileio.py +660 -0
- multioptpy/ieip.py +340 -0
- multioptpy/interface.py +1086 -0
- multioptpy/irc.py +529 -0
- multioptpy/moleculardynamics.py +432 -0
- multioptpy/neb.py +1267 -0
- multioptpy/optimization.py +1553 -0
- multioptpy/optimizer.py +709 -0
- multioptpy-1.20.2.dist-info/METADATA +438 -0
- multioptpy-1.20.2.dist-info/RECORD +246 -0
- multioptpy-1.20.2.dist-info/WHEEL +5 -0
- multioptpy-1.20.2.dist-info/entry_points.txt +9 -0
- multioptpy-1.20.2.dist-info/licenses/LICENSE +674 -0
- multioptpy-1.20.2.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,609 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class GDIIS:
|
|
5
|
+
def __init__(self):
|
|
6
|
+
"""
|
|
7
|
+
ref.: Chemical Physics Letters, 1980, 73(2), 393-398.
|
|
8
|
+
Journal of Molecular Structure, 1984, 114, 31-34.
|
|
9
|
+
Physical Chemistry Chemical Physics, 2002, 4(1), 11-15.
|
|
10
|
+
"""
|
|
11
|
+
# GDIIS parameters with enhanced defaults
|
|
12
|
+
self.gdiis_history_size = 5 # Reduced history size for better stability
|
|
13
|
+
self.gdiis_min_points = 3 # Require more points before starting GDIIS
|
|
14
|
+
self.gdiis_error_threshold = 0.5 # More conservative error threshold
|
|
15
|
+
self.gdiis_weight_initial = 0.1 # Start with lower GDIIS contribution
|
|
16
|
+
self.gdiis_weight_max = 0.7 # Maximum GDIIS weight
|
|
17
|
+
|
|
18
|
+
# Robust coefficient handling
|
|
19
|
+
self.gdiis_coeff_min = -0.7 # Stricter minimum coefficient value
|
|
20
|
+
self.gdiis_coeff_max = 0.7 # Stricter maximum coefficient value
|
|
21
|
+
self.gdiis_regularization = 1e-8 # Increased regularization parameter
|
|
22
|
+
|
|
23
|
+
# Enhanced error recovery
|
|
24
|
+
self.gdiis_failure_count = 0 # Counter for consecutive GDIIS failures
|
|
25
|
+
self.gdiis_max_failures = 2 # Reset history after fewer failures
|
|
26
|
+
self.gdiis_recovery_steps = 3 # Number of steps in recovery mode
|
|
27
|
+
self.gdiis_current_recovery = 0 # Current recovery step counter
|
|
28
|
+
|
|
29
|
+
# Aggressive outlier detection
|
|
30
|
+
self.gdiis_step_ratio_max = 2.0 # Maximum allowed ratio between GDIIS and Original steps
|
|
31
|
+
self.gdiis_outlier_threshold = 3.0 # Standard deviations for outlier detection
|
|
32
|
+
|
|
33
|
+
# Dynamic weight adjustment
|
|
34
|
+
self.gdiis_weight_current = self.gdiis_weight_initial # Current weight
|
|
35
|
+
self.gdiis_weight_increment = 0.05 # Increment for successful iterations
|
|
36
|
+
self.gdiis_weight_decrement = 0.15 # Larger decrement for failures
|
|
37
|
+
|
|
38
|
+
# GDIIS history storage with quality metrics
|
|
39
|
+
self.geom_history = []
|
|
40
|
+
self.grad_history = []
|
|
41
|
+
self.quality_history = [] # Track quality of each point
|
|
42
|
+
|
|
43
|
+
# Convergence monitoring
|
|
44
|
+
self.prev_grad_rms = float('inf')
|
|
45
|
+
self.non_improving_count = 0
|
|
46
|
+
self.iter = 0
|
|
47
|
+
return
|
|
48
|
+
|
|
49
|
+
def _update_gdiis_history(self, geometry, gradient, step_quality=1.0):
|
|
50
|
+
"""
|
|
51
|
+
Update the GDIIS history with quality-based filtering
|
|
52
|
+
|
|
53
|
+
Parameters:
|
|
54
|
+
-----------
|
|
55
|
+
geometry : numpy.ndarray
|
|
56
|
+
Current geometry
|
|
57
|
+
gradient : numpy.ndarray
|
|
58
|
+
Current gradient
|
|
59
|
+
step_quality : float
|
|
60
|
+
Quality metric for this point (1.0 = good, <1.0 = lower quality)
|
|
61
|
+
"""
|
|
62
|
+
# Add current point to history with quality metric
|
|
63
|
+
self.geom_history.append(geometry.copy())
|
|
64
|
+
self.grad_history.append(gradient.copy())
|
|
65
|
+
self.quality_history.append(step_quality)
|
|
66
|
+
|
|
67
|
+
# If in recovery mode, only keep the most recent points
|
|
68
|
+
if self.gdiis_current_recovery > 0:
|
|
69
|
+
self.gdiis_current_recovery -= 1
|
|
70
|
+
if len(self.geom_history) > 2:
|
|
71
|
+
self.geom_history = self.geom_history[-2:]
|
|
72
|
+
self.grad_history = self.grad_history[-2:]
|
|
73
|
+
self.quality_history = self.quality_history[-2:]
|
|
74
|
+
return
|
|
75
|
+
|
|
76
|
+
# Limit history size
|
|
77
|
+
if len(self.geom_history) > self.gdiis_history_size:
|
|
78
|
+
# Remove lowest quality point (except for the newest point)
|
|
79
|
+
if len(self.geom_history) > 2:
|
|
80
|
+
# Don't consider the most recent point for removal
|
|
81
|
+
oldest_qualities = self.quality_history[:-1]
|
|
82
|
+
worst_idx = np.argmin(oldest_qualities)
|
|
83
|
+
|
|
84
|
+
# Remove the lowest quality point
|
|
85
|
+
self.geom_history.pop(worst_idx)
|
|
86
|
+
self.grad_history.pop(worst_idx)
|
|
87
|
+
self.quality_history.pop(worst_idx)
|
|
88
|
+
else:
|
|
89
|
+
# Default to removing oldest point if we only have 2 points
|
|
90
|
+
self.geom_history.pop(0)
|
|
91
|
+
self.grad_history.pop(0)
|
|
92
|
+
self.quality_history.pop(0)
|
|
93
|
+
|
|
94
|
+
def _condition_b_matrix(self, B, n_points):
|
|
95
|
+
"""
|
|
96
|
+
Apply advanced conditioning to improve B matrix stability
|
|
97
|
+
|
|
98
|
+
Parameters:
|
|
99
|
+
-----------
|
|
100
|
+
B : numpy.ndarray
|
|
101
|
+
The B matrix to condition
|
|
102
|
+
n_points : int
|
|
103
|
+
Number of actual data points
|
|
104
|
+
|
|
105
|
+
Returns:
|
|
106
|
+
--------
|
|
107
|
+
numpy.ndarray
|
|
108
|
+
Conditioned B matrix
|
|
109
|
+
"""
|
|
110
|
+
# 1. Add regularization to diagonal for numerical stability
|
|
111
|
+
np.fill_diagonal(B[:n_points, :n_points],
|
|
112
|
+
np.diag(B[:n_points, :n_points]) + self.gdiis_regularization)
|
|
113
|
+
|
|
114
|
+
# 2. Apply weighted regularization based on point quality
|
|
115
|
+
if hasattr(self, 'quality_history') and len(self.quality_history) == n_points:
|
|
116
|
+
for i in range(n_points):
|
|
117
|
+
# Lower quality points get more regularization
|
|
118
|
+
quality_factor = self.quality_history[i]
|
|
119
|
+
B[i, i] += self.gdiis_regularization * (2.0 - quality_factor) / quality_factor
|
|
120
|
+
|
|
121
|
+
# 3. Improve conditioning with SVD-based truncation
|
|
122
|
+
try:
|
|
123
|
+
# Apply SVD to the main block
|
|
124
|
+
u, s, vh = np.linalg.svd(B[:n_points, :n_points])
|
|
125
|
+
|
|
126
|
+
# Truncate small singular values (improves condition number)
|
|
127
|
+
s_max = np.max(s)
|
|
128
|
+
s_cutoff = s_max * 1e-10
|
|
129
|
+
s_fixed = np.array([max(sv, s_cutoff) for sv in s])
|
|
130
|
+
|
|
131
|
+
# Reconstruct with improved conditioning
|
|
132
|
+
B_improved = np.dot(u * s_fixed, vh)
|
|
133
|
+
|
|
134
|
+
# Put the improved block back
|
|
135
|
+
B[:n_points, :n_points] = B_improved
|
|
136
|
+
except:
|
|
137
|
+
# If SVD fails, use simpler Tikhonov regularization
|
|
138
|
+
identity = np.eye(n_points)
|
|
139
|
+
B[:n_points, :n_points] += 1e-7 * identity
|
|
140
|
+
|
|
141
|
+
return B
|
|
142
|
+
|
|
143
|
+
def _solve_gdiis_equations(self, error_vectors, qualities=None):
|
|
144
|
+
"""
|
|
145
|
+
Solve GDIIS equations with multiple robustness techniques
|
|
146
|
+
"""
|
|
147
|
+
n_points = len(error_vectors)
|
|
148
|
+
|
|
149
|
+
# Handle case of too few points
|
|
150
|
+
if n_points < 2:
|
|
151
|
+
return np.array([1.0])
|
|
152
|
+
|
|
153
|
+
# Use quality weighting if available
|
|
154
|
+
if qualities is None:
|
|
155
|
+
qualities = np.ones(n_points)
|
|
156
|
+
|
|
157
|
+
# Construct the B matrix with dot products of error vectors
|
|
158
|
+
B = np.zeros((n_points + 1, n_points + 1))
|
|
159
|
+
|
|
160
|
+
# Fill B matrix with weighted error vector dot products
|
|
161
|
+
for i in range(n_points):
|
|
162
|
+
for j in range(n_points):
|
|
163
|
+
# Weight error dot products by quality
|
|
164
|
+
weight_factor = np.sqrt(qualities[i] * qualities[j])
|
|
165
|
+
B[i, j] = weight_factor * np.dot(error_vectors[i].T, error_vectors[j])
|
|
166
|
+
|
|
167
|
+
# Apply advanced conditioning to the B matrix
|
|
168
|
+
B = self._condition_b_matrix(B, n_points)
|
|
169
|
+
|
|
170
|
+
# Add Lagrange multiplier constraints
|
|
171
|
+
B[n_points, :n_points] = 1.0
|
|
172
|
+
B[:n_points, n_points] = 1.0
|
|
173
|
+
B[n_points, n_points] = 0.0
|
|
174
|
+
|
|
175
|
+
# Right-hand side vector with constraint
|
|
176
|
+
rhs = np.zeros(n_points + 1)
|
|
177
|
+
rhs[n_points] = 1.0
|
|
178
|
+
|
|
179
|
+
# Multi-stage solver with progressive fallbacks
|
|
180
|
+
methods = [
|
|
181
|
+
("Standard solve", lambda: np.linalg.solve(B, rhs)),
|
|
182
|
+
("SVD solve", lambda: self._svd_solve(B, rhs, 1e-12)),
|
|
183
|
+
("Regularized solve", lambda: np.linalg.solve(B + np.diag([1e-6]*(n_points+1)), rhs)),
|
|
184
|
+
("Least squares", lambda: np.linalg.lstsq(B, rhs, rcond=1e-8)[0]),
|
|
185
|
+
("Minimal solution", lambda: self._minimal_solution(n_points))
|
|
186
|
+
]
|
|
187
|
+
|
|
188
|
+
coefficients = None
|
|
189
|
+
for method_name, solver in methods:
|
|
190
|
+
try:
|
|
191
|
+
coefficients = solver()
|
|
192
|
+
# Check if solution is reasonable
|
|
193
|
+
if not np.any(np.isnan(coefficients)) and np.abs(np.sum(coefficients[:n_points]) - 1.0) < 0.01:
|
|
194
|
+
print(f"GDIIS using {method_name}")
|
|
195
|
+
break
|
|
196
|
+
except Exception as e:
|
|
197
|
+
print(f"{method_name} failed: {str(e)}")
|
|
198
|
+
|
|
199
|
+
# If all methods failed, default to using the most recent point
|
|
200
|
+
if coefficients is None or np.any(np.isnan(coefficients)):
|
|
201
|
+
print("All GDIIS solvers failed, using last point only")
|
|
202
|
+
coefficients = np.zeros(n_points + 1)
|
|
203
|
+
coefficients[n_points-1] = 1.0 # Use the most recent point
|
|
204
|
+
coefficients[n_points] = 0.0 # Zero Lagrange multiplier
|
|
205
|
+
|
|
206
|
+
# Extract actual coefficients (without Lagrange multiplier)
|
|
207
|
+
return coefficients[:n_points]
|
|
208
|
+
|
|
209
|
+
def _svd_solve(self, A, b, rcond=1e-15):
|
|
210
|
+
"""
|
|
211
|
+
Solve linear system using SVD with improved handling of small singular values
|
|
212
|
+
"""
|
|
213
|
+
u, s, vh = np.linalg.svd(A, full_matrices=False)
|
|
214
|
+
|
|
215
|
+
# More sophisticated singular value filtering
|
|
216
|
+
s_max = np.max(s)
|
|
217
|
+
mask = s > rcond * s_max
|
|
218
|
+
|
|
219
|
+
# Create pseudo-inverse with smooth cutoff for small singular values
|
|
220
|
+
s_inv = np.zeros_like(s)
|
|
221
|
+
for i, (val, use) in enumerate(zip(s, mask)):
|
|
222
|
+
if use:
|
|
223
|
+
s_inv[i] = 1.0/val
|
|
224
|
+
else:
|
|
225
|
+
# Smooth transition to zero for small values
|
|
226
|
+
ratio = val/(rcond * s_max)
|
|
227
|
+
s_inv[i] = ratio/(val * (1.0 + (1.0 - ratio)**2))
|
|
228
|
+
|
|
229
|
+
# Calculate solution using pseudo-inverse
|
|
230
|
+
return np.dot(np.dot(np.dot(vh.T, np.diag(s_inv)), u.T), b)
|
|
231
|
+
|
|
232
|
+
def _minimal_solution(self, n_points):
|
|
233
|
+
"""
|
|
234
|
+
Fallback solution when all numerical methods fail
|
|
235
|
+
"""
|
|
236
|
+
# Create a solution that gives higher weight to more recent points
|
|
237
|
+
result = np.zeros(n_points + 1)
|
|
238
|
+
|
|
239
|
+
# Linear ramp with highest weight to most recent point
|
|
240
|
+
total_weight = 0
|
|
241
|
+
for i in range(n_points):
|
|
242
|
+
# Linear weighting: i+1 gives more weight to later points
|
|
243
|
+
result[i] = i + 1
|
|
244
|
+
total_weight += result[i]
|
|
245
|
+
|
|
246
|
+
# Normalize to sum=1 and add zero Lagrange multiplier
|
|
247
|
+
result[:n_points] /= total_weight
|
|
248
|
+
return result
|
|
249
|
+
|
|
250
|
+
def _filter_gdiis_coefficients(self, coeffs, strict=False):
|
|
251
|
+
"""
|
|
252
|
+
Advanced filtering of extreme coefficient values
|
|
253
|
+
|
|
254
|
+
Parameters:
|
|
255
|
+
-----------
|
|
256
|
+
coeffs : numpy.ndarray
|
|
257
|
+
DIIS coefficients
|
|
258
|
+
strict : bool
|
|
259
|
+
Whether to use stricter filtering limits
|
|
260
|
+
|
|
261
|
+
Returns:
|
|
262
|
+
--------
|
|
263
|
+
tuple
|
|
264
|
+
(filtered_coeffs, was_filtered, quality_metric)
|
|
265
|
+
"""
|
|
266
|
+
# Adjust bounds based on strictness
|
|
267
|
+
coeff_min = self.gdiis_coeff_min * (1.5 if strict else 1.0)
|
|
268
|
+
coeff_max = self.gdiis_coeff_max * (0.9 if strict else 1.0)
|
|
269
|
+
|
|
270
|
+
# Check for extreme values
|
|
271
|
+
extreme_values = np.logical_or(coeffs < coeff_min, coeffs > coeff_max)
|
|
272
|
+
has_extreme_values = np.any(extreme_values)
|
|
273
|
+
|
|
274
|
+
# Calculate quality metric (1.0 = perfect, lower values indicate problems)
|
|
275
|
+
quality = 1.0
|
|
276
|
+
if has_extreme_values:
|
|
277
|
+
# Reduce quality based on how extreme the coefficients are
|
|
278
|
+
extreme_ratio = np.sum(np.abs(coeffs[extreme_values])) / np.sum(np.abs(coeffs))
|
|
279
|
+
quality = max(0.1, 1.0 - extreme_ratio)
|
|
280
|
+
|
|
281
|
+
print(f"Warning: Extreme GDIIS coefficients detected: {[f'{c:.3f}' for c in coeffs]}")
|
|
282
|
+
|
|
283
|
+
# Apply multi-stage filtering
|
|
284
|
+
|
|
285
|
+
# 1. First attempt: Simple clipping and renormalization
|
|
286
|
+
clipped_coeffs = np.clip(coeffs, coeff_min, coeff_max)
|
|
287
|
+
sum_clipped = np.sum(clipped_coeffs)
|
|
288
|
+
|
|
289
|
+
if abs(sum_clipped - 1.0) > 1e-10 and sum_clipped > 1e-10:
|
|
290
|
+
normalized_coeffs = clipped_coeffs / sum_clipped
|
|
291
|
+
else:
|
|
292
|
+
# 2. If simple clipping failed, try redistribution approach
|
|
293
|
+
print("Warning: Simple coefficient normalization failed, using redistribution")
|
|
294
|
+
|
|
295
|
+
# Start with minimum values
|
|
296
|
+
adjusted_coeffs = np.full_like(coeffs, coeff_min)
|
|
297
|
+
|
|
298
|
+
# Distribute available weight (1.0 - sum(mins)) proportionally to valid coefficients
|
|
299
|
+
valid_indices = ~extreme_values
|
|
300
|
+
if np.any(valid_indices):
|
|
301
|
+
# Use only valid coefficients for distribution
|
|
302
|
+
valid_sum = np.sum(coeffs[valid_indices])
|
|
303
|
+
if abs(valid_sum) > 1e-10:
|
|
304
|
+
remaining = 1.0 - len(coeffs) * coeff_min
|
|
305
|
+
adjusted_coeffs[valid_indices] += remaining * (coeffs[valid_indices] / valid_sum)
|
|
306
|
+
else:
|
|
307
|
+
# If all valid coefficients sum to near zero, use uniform distribution
|
|
308
|
+
adjusted_coeffs = np.ones_like(coeffs) / len(coeffs)
|
|
309
|
+
else:
|
|
310
|
+
# If all coefficients are extreme, use exponentially weighted recent points
|
|
311
|
+
n = len(coeffs)
|
|
312
|
+
for i in range(n):
|
|
313
|
+
adjusted_coeffs[i] = 0.5**min(n-i-1, 3) # Exponentially weighted recent points
|
|
314
|
+
adjusted_coeffs /= np.sum(adjusted_coeffs)
|
|
315
|
+
|
|
316
|
+
normalized_coeffs = adjusted_coeffs
|
|
317
|
+
|
|
318
|
+
# 3. Check if coefficients still have issues
|
|
319
|
+
if np.any(np.isnan(normalized_coeffs)) or abs(np.sum(normalized_coeffs) - 1.0) > 1e-8:
|
|
320
|
+
# Final fallback: use most recent point with small contributions from others
|
|
321
|
+
print("Warning: Advanced filtering failed, falling back to recent-point dominated solution")
|
|
322
|
+
n = len(coeffs)
|
|
323
|
+
last_dominated = np.zeros_like(coeffs)
|
|
324
|
+
last_dominated[-1] = 0.7 # 70% weight to most recent point
|
|
325
|
+
|
|
326
|
+
# Distribute remaining 30% to other points
|
|
327
|
+
remaining_weight = 0.3
|
|
328
|
+
if n > 1:
|
|
329
|
+
for i in range(n-1):
|
|
330
|
+
last_dominated[i] = remaining_weight / (n-1)
|
|
331
|
+
|
|
332
|
+
normalized_coeffs = last_dominated
|
|
333
|
+
|
|
334
|
+
self.gdiis_failure_count += 1
|
|
335
|
+
return normalized_coeffs, True, quality
|
|
336
|
+
else:
|
|
337
|
+
# Calculate quality based on coefficient distribution
|
|
338
|
+
# Prefer solutions where coefficients are more evenly distributed
|
|
339
|
+
n = len(coeffs)
|
|
340
|
+
if n > 1:
|
|
341
|
+
# Shannon entropy as a measure of coefficient distribution
|
|
342
|
+
entropy = 0
|
|
343
|
+
for c in coeffs:
|
|
344
|
+
if c > 0:
|
|
345
|
+
entropy -= c * np.log(c)
|
|
346
|
+
|
|
347
|
+
# Normalize to [0,1] range
|
|
348
|
+
max_entropy = np.log(n)
|
|
349
|
+
if max_entropy > 0:
|
|
350
|
+
distribution_quality = min(1.0, entropy / max_entropy)
|
|
351
|
+
quality = 0.5 + 0.5 * distribution_quality
|
|
352
|
+
|
|
353
|
+
self.gdiis_failure_count = max(0, self.gdiis_failure_count - 1) # Reduce failure count on success
|
|
354
|
+
return coeffs, False, quality
|
|
355
|
+
|
|
356
|
+
def _calculate_gdiis_geometry(self):
|
|
357
|
+
"""
|
|
358
|
+
Calculate a new geometry using GDIIS with comprehensive robustness measures
|
|
359
|
+
"""
|
|
360
|
+
n_points = len(self.geom_history)
|
|
361
|
+
|
|
362
|
+
if n_points < self.gdiis_min_points:
|
|
363
|
+
return None, None, False, 0.0
|
|
364
|
+
|
|
365
|
+
# Reset history if we've had too many failures
|
|
366
|
+
if self.gdiis_failure_count >= self.gdiis_max_failures:
|
|
367
|
+
print(f"Warning: {self.gdiis_failure_count} consecutive GDIIS failures, resetting history")
|
|
368
|
+
# Keep only the most recent point
|
|
369
|
+
if len(self.geom_history) > 0:
|
|
370
|
+
self.geom_history = [self.geom_history[-1]]
|
|
371
|
+
self.grad_history = [self.grad_history[-1]]
|
|
372
|
+
self.quality_history = [1.0] if hasattr(self, 'quality_history') else []
|
|
373
|
+
|
|
374
|
+
self.gdiis_failure_count = 0
|
|
375
|
+
self.gdiis_current_recovery = self.gdiis_recovery_steps
|
|
376
|
+
self.gdiis_weight_current = max(0.2, self.gdiis_weight_current / 2) # Reduce weight
|
|
377
|
+
|
|
378
|
+
return None, None, False, 0.0
|
|
379
|
+
|
|
380
|
+
try:
|
|
381
|
+
# Calculate GDIIS coefficients with comprehensive robustness measures
|
|
382
|
+
if hasattr(self, 'quality_history') and len(self.quality_history) == n_points:
|
|
383
|
+
qualities = self.quality_history
|
|
384
|
+
else:
|
|
385
|
+
qualities = np.ones(n_points)
|
|
386
|
+
|
|
387
|
+
# First pass with standard filtering
|
|
388
|
+
coeffs = self._solve_gdiis_equations(self.grad_history, qualities)
|
|
389
|
+
coeffs, was_filtered, quality = self._filter_gdiis_coefficients(coeffs, strict=False)
|
|
390
|
+
|
|
391
|
+
# If first pass needed filtering, try again with stricter limits
|
|
392
|
+
if was_filtered:
|
|
393
|
+
strict_coeffs = self._solve_gdiis_equations(self.grad_history, qualities)
|
|
394
|
+
strict_coeffs, strict_filtered, strict_quality = self._filter_gdiis_coefficients(strict_coeffs, strict=True)
|
|
395
|
+
|
|
396
|
+
# Use the better quality result
|
|
397
|
+
if strict_quality > quality:
|
|
398
|
+
coeffs = strict_coeffs
|
|
399
|
+
quality = strict_quality
|
|
400
|
+
print("Using stricter coefficient filtering (better quality)")
|
|
401
|
+
|
|
402
|
+
# Calculate the new geometry as a linear combination
|
|
403
|
+
extrapolated_geometry = np.zeros_like(self.geom_history[0])
|
|
404
|
+
for i in range(n_points):
|
|
405
|
+
extrapolated_geometry += coeffs[i] * self.geom_history[i]
|
|
406
|
+
|
|
407
|
+
# Check for NaN values in the result
|
|
408
|
+
if np.any(np.isnan(extrapolated_geometry)):
|
|
409
|
+
print("Warning: NaN values in extrapolated geometry, GDIIS calculation failed")
|
|
410
|
+
self.gdiis_failure_count += 1
|
|
411
|
+
return None, None, False, 0.0
|
|
412
|
+
|
|
413
|
+
# Print coefficients (only if they're reasonable)
|
|
414
|
+
print("GDIIS coefficients:", ", ".join(f"{c:.4f}" for c in coeffs))
|
|
415
|
+
print(f"GDIIS quality metric: {quality:.4f}")
|
|
416
|
+
|
|
417
|
+
return extrapolated_geometry, coeffs, True, quality
|
|
418
|
+
|
|
419
|
+
except Exception as e:
|
|
420
|
+
print(f"GDIIS extrapolation failed: {str(e)}")
|
|
421
|
+
self.gdiis_failure_count += 1
|
|
422
|
+
return None, None, False, 0.0
|
|
423
|
+
|
|
424
|
+
def _validate_gdiis_step(self, original_step, gdiis_step, B_g, quality):
|
|
425
|
+
"""
|
|
426
|
+
Comprehensive validation of the GDIIS step
|
|
427
|
+
|
|
428
|
+
Parameters:
|
|
429
|
+
-----------
|
|
430
|
+
original_step : numpy.ndarray
|
|
431
|
+
Step calculated by the Original method
|
|
432
|
+
gdiis_step : numpy.ndarray
|
|
433
|
+
Step calculated by the GDIIS method
|
|
434
|
+
B_g : numpy.ndarray
|
|
435
|
+
Current gradient
|
|
436
|
+
quality : float
|
|
437
|
+
Quality metric from coefficient calculation
|
|
438
|
+
|
|
439
|
+
Returns:
|
|
440
|
+
--------
|
|
441
|
+
tuple
|
|
442
|
+
(is_valid, validation_quality)
|
|
443
|
+
"""
|
|
444
|
+
# 1. Check gradient alignment
|
|
445
|
+
grad_norm = np.linalg.norm(B_g)
|
|
446
|
+
if grad_norm > 1e-10:
|
|
447
|
+
# Calculate normalized dot products with negative gradient
|
|
448
|
+
neg_grad = -B_g / grad_norm
|
|
449
|
+
original_alignment = np.dot(original_step.flatten(), neg_grad.flatten()) / np.linalg.norm(original_step)
|
|
450
|
+
gdiis_alignment = np.dot(gdiis_step.flatten(), neg_grad.flatten()) / np.linalg.norm(gdiis_step)
|
|
451
|
+
|
|
452
|
+
# GDIIS should point in a reasonable direction compared to Original
|
|
453
|
+
if original_alignment > 0.3 and gdiis_alignment < 0:
|
|
454
|
+
print(f"GDIIS step rejected: opposing gradient direction (Original: {original_alignment:.4f}, GDIIS: {gdiis_alignment:.4f})")
|
|
455
|
+
return False, 0.0
|
|
456
|
+
|
|
457
|
+
# 2. Check step size ratio
|
|
458
|
+
original_norm = np.linalg.norm(original_step)
|
|
459
|
+
gdiis_norm = np.linalg.norm(gdiis_step)
|
|
460
|
+
|
|
461
|
+
if original_norm > 1e-10:
|
|
462
|
+
step_ratio = gdiis_norm / original_norm
|
|
463
|
+
if step_ratio > self.gdiis_step_ratio_max:
|
|
464
|
+
print(f"GDIIS step too large: {step_ratio:.2f} times Original step")
|
|
465
|
+
return False, 0.0
|
|
466
|
+
|
|
467
|
+
# Calculate quality based on step ratio (closer to 1.0 is better)
|
|
468
|
+
ratio_quality = 1.0 - min(1.0, abs(np.log10(step_ratio)))
|
|
469
|
+
else:
|
|
470
|
+
ratio_quality = 0.5 # Neutral if original step is near zero
|
|
471
|
+
|
|
472
|
+
# 3. Check for outliers in the step components
|
|
473
|
+
step_diff = gdiis_step - original_step
|
|
474
|
+
mean_diff = np.mean(step_diff)
|
|
475
|
+
std_diff = np.std(step_diff)
|
|
476
|
+
|
|
477
|
+
if std_diff > 1e-10:
|
|
478
|
+
# Check for components that are far from the mean difference
|
|
479
|
+
outliers = np.abs(step_diff - mean_diff) > self.gdiis_outlier_threshold * std_diff
|
|
480
|
+
outlier_fraction = np.sum(outliers) / len(step_diff)
|
|
481
|
+
|
|
482
|
+
if outlier_fraction > 0.1: # More than 10% of components are outliers
|
|
483
|
+
print(f"GDIIS step rejected: {outlier_fraction*100:.1f}% of components are outliers")
|
|
484
|
+
return False, 0.0
|
|
485
|
+
|
|
486
|
+
# 4. Overall validation quality (combine multiple factors)
|
|
487
|
+
validation_quality = (ratio_quality + quality) / 2.0
|
|
488
|
+
|
|
489
|
+
return True, validation_quality
|
|
490
|
+
|
|
491
|
+
def run(self, geom_num_list, B_g, pre_B_g, original_move_vector):
|
|
492
|
+
print("GDIIS method")
|
|
493
|
+
grad_rms = np.sqrt(np.mean(B_g ** 2))
|
|
494
|
+
n_coords = len(geom_num_list)
|
|
495
|
+
|
|
496
|
+
print(f"Gradient RMS: {grad_rms:.8f}")
|
|
497
|
+
|
|
498
|
+
# Check convergence progress
|
|
499
|
+
improving = grad_rms < self.prev_grad_rms * 0.95
|
|
500
|
+
if improving:
|
|
501
|
+
self.non_improving_count = 0
|
|
502
|
+
else:
|
|
503
|
+
self.non_improving_count += 1
|
|
504
|
+
if self.non_improving_count > 2:
|
|
505
|
+
# Reduce GDIIS weight if optimization is stalling
|
|
506
|
+
self.gdiis_weight_current = max(0.1, self.gdiis_weight_current - 0.1)
|
|
507
|
+
print(f"Optimization stalling, reducing GDIIS weight to {self.gdiis_weight_current:.2f}")
|
|
508
|
+
self.non_improving_count = 0
|
|
509
|
+
|
|
510
|
+
self.prev_grad_rms = grad_rms
|
|
511
|
+
|
|
512
|
+
# Update GDIIS history with quality information
|
|
513
|
+
step_quality = 1.0 # Default quality
|
|
514
|
+
if self.iter > 0 and np.linalg.norm(pre_B_g) > 1e-10:
|
|
515
|
+
# Estimate quality based on gradient reduction
|
|
516
|
+
grad_change_ratio = np.linalg.norm(B_g) / np.linalg.norm(pre_B_g)
|
|
517
|
+
if grad_change_ratio < 1.0:
|
|
518
|
+
# Gradient decreased, good quality
|
|
519
|
+
step_quality = 1.0
|
|
520
|
+
else:
|
|
521
|
+
# Gradient increased, lower quality
|
|
522
|
+
step_quality = max(0.3, 1.0 / (1.0 + 2*np.log(grad_change_ratio)))
|
|
523
|
+
|
|
524
|
+
self._update_gdiis_history(geom_num_list, B_g, step_quality)
|
|
525
|
+
|
|
526
|
+
# Skip GDIIS if in recovery mode
|
|
527
|
+
if self.gdiis_current_recovery > 0:
|
|
528
|
+
self.gdiis_current_recovery -= 1
|
|
529
|
+
print(f"In GDIIS recovery mode ({self.gdiis_current_recovery} steps remaining), skipping GDIIS")
|
|
530
|
+
move_vector = original_move_vector
|
|
531
|
+
# Apply GDIIS if enough history has been accumulated
|
|
532
|
+
elif len(self.geom_history) >= self.gdiis_min_points:
|
|
533
|
+
# Calculate GDIIS geometry with robust coefficient handling
|
|
534
|
+
gdiis_geom, gdiis_coeffs, success, quality = self._calculate_gdiis_geometry()
|
|
535
|
+
|
|
536
|
+
if success and gdiis_geom is not None:
|
|
537
|
+
# Calculate GDIIS step
|
|
538
|
+
gdiis_step = (gdiis_geom - geom_num_list).reshape(n_coords, 1)
|
|
539
|
+
|
|
540
|
+
# Validate GDIIS step
|
|
541
|
+
is_valid, validation_quality = self._validate_gdiis_step(original_move_vector, gdiis_step, B_g, quality)
|
|
542
|
+
|
|
543
|
+
if is_valid:
|
|
544
|
+
# Calculate adaptive weight based on quality metrics
|
|
545
|
+
if self.gdiis_failure_count > 0:
|
|
546
|
+
# Reduce GDIIS weight if we've had failures
|
|
547
|
+
gdiis_weight = max(0.1, self.gdiis_weight_current - self.gdiis_failure_count * self.gdiis_weight_decrement)
|
|
548
|
+
elif grad_rms < 0.01:
|
|
549
|
+
# Increase GDIIS weight as we converge
|
|
550
|
+
gdiis_weight = min(self.gdiis_weight_max, self.gdiis_weight_current + self.gdiis_weight_increment)
|
|
551
|
+
elif self.non_improving_count > 0:
|
|
552
|
+
# Reduce weight if progress is stalling
|
|
553
|
+
gdiis_weight = max(0.1, self.gdiis_weight_current - 0.05 * self.non_improving_count)
|
|
554
|
+
else:
|
|
555
|
+
gdiis_weight = self.gdiis_weight_current
|
|
556
|
+
|
|
557
|
+
# Scale weight by validation quality
|
|
558
|
+
gdiis_weight *= validation_quality
|
|
559
|
+
|
|
560
|
+
original_weight = 1.0 - gdiis_weight
|
|
561
|
+
|
|
562
|
+
# Calculate blended step
|
|
563
|
+
move_vector = original_weight * original_move_vector + gdiis_weight * gdiis_step
|
|
564
|
+
print(f"Using blended step: {original_weight:.4f}*Original + {gdiis_weight:.4f}*GDIIS")
|
|
565
|
+
|
|
566
|
+
# Safety check: verify step size is reasonable
|
|
567
|
+
original_norm = np.linalg.norm(original_move_vector)
|
|
568
|
+
blended_norm = np.linalg.norm(move_vector)
|
|
569
|
+
|
|
570
|
+
if blended_norm > 2.0 * original_norm and blended_norm > 0.3:
|
|
571
|
+
# Cap step size to avoid large jumps
|
|
572
|
+
print("Warning: GDIIS step too large, scaling down")
|
|
573
|
+
scale_factor = 2.0 * original_norm / blended_norm
|
|
574
|
+
move_vector = original_move_vector + scale_factor * (move_vector - original_move_vector)
|
|
575
|
+
print(f"Step scaled by {scale_factor:.3f}")
|
|
576
|
+
|
|
577
|
+
# Update current weight for next iteration (with moderate memory)
|
|
578
|
+
self.gdiis_weight_current = 0.7 * self.gdiis_weight_current + 0.3 * gdiis_weight
|
|
579
|
+
else:
|
|
580
|
+
print("GDIIS step validation failed, using Original step only")
|
|
581
|
+
move_vector = original_move_vector
|
|
582
|
+
self.gdiis_failure_count += 1
|
|
583
|
+
else:
|
|
584
|
+
# GDIIS failed
|
|
585
|
+
move_vector = original_move_vector
|
|
586
|
+
if not success: # Only increment failure count for actual failures, not insufficient history
|
|
587
|
+
self.gdiis_failure_count += 1
|
|
588
|
+
else:
|
|
589
|
+
# Not enough history points yet, use standard Original
|
|
590
|
+
print(f"Building GDIIS history ({len(self.geom_history)}/{self.gdiis_min_points} points), using Original step")
|
|
591
|
+
move_vector = original_move_vector
|
|
592
|
+
|
|
593
|
+
# Final safety check for step size and numerical issues
|
|
594
|
+
move_norm = np.linalg.norm(move_vector)
|
|
595
|
+
if move_norm < 1e-10:
|
|
596
|
+
print("Warning: Step size too small, using scaled gradient instead")
|
|
597
|
+
move_vector = -0.1 * B_g.reshape(n_coords, 1)
|
|
598
|
+
elif np.any(np.isnan(move_vector)) or np.any(np.isinf(move_vector)):
|
|
599
|
+
print("Warning: Numerical issues detected in step, using scaled gradient instead")
|
|
600
|
+
move_vector = -0.1 * B_g.reshape(n_coords, 1)
|
|
601
|
+
# Reset GDIIS history on numerical failure
|
|
602
|
+
self.geom_history = []
|
|
603
|
+
self.grad_history = []
|
|
604
|
+
self.quality_history = []
|
|
605
|
+
self.gdiis_failure_count = 0
|
|
606
|
+
|
|
607
|
+
self.iter += 1
|
|
608
|
+
|
|
609
|
+
return move_vector
|