hillclimber 0.1.6__cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hillclimber/__init__.py +41 -0
- hillclimber/actions.py +53 -0
- hillclimber/analysis.py +590 -0
- hillclimber/biases.py +293 -0
- hillclimber/calc.py +22 -0
- hillclimber/cvs.py +1070 -0
- hillclimber/interfaces.py +133 -0
- hillclimber/metadynamics.py +416 -0
- hillclimber/nodes.py +6 -0
- hillclimber/opes.py +359 -0
- hillclimber/pycv.py +362 -0
- hillclimber/selectors.py +230 -0
- hillclimber/virtual_atoms.py +341 -0
- hillclimber-0.1.6.dist-info/METADATA +325 -0
- hillclimber-0.1.6.dist-info/RECORD +475 -0
- hillclimber-0.1.6.dist-info/WHEEL +6 -0
- hillclimber-0.1.6.dist-info/entry_points.txt +8 -0
- hillclimber-0.1.6.dist-info/licenses/LICENSE +165 -0
- hillclimber-0.1.6.dist-info/sboms/auditwheel.cdx.json +1 -0
- hillclimber.libs/libgomp-a49a47f9.so.1.0.0 +0 -0
- plumed/__init__.py +104 -0
- plumed/_lib/bin/plumed +0 -0
- plumed/_lib/bin/plumed-config +9 -0
- plumed/_lib/bin/plumed-patch +9 -0
- plumed/_lib/include/plumed/adjmat/AdjacencyMatrixBase.h +659 -0
- plumed/_lib/include/plumed/adjmat/ContactMatrix.h +59 -0
- plumed/_lib/include/plumed/asmjit/arch.h +228 -0
- plumed/_lib/include/plumed/asmjit/arm.h +43 -0
- plumed/_lib/include/plumed/asmjit/asmjit.h +69 -0
- plumed/_lib/include/plumed/asmjit/asmjit_apibegin.h +143 -0
- plumed/_lib/include/plumed/asmjit/asmjit_apiend.h +93 -0
- plumed/_lib/include/plumed/asmjit/asmjit_build.h +971 -0
- plumed/_lib/include/plumed/asmjit/assembler.h +183 -0
- plumed/_lib/include/plumed/asmjit/base.h +56 -0
- plumed/_lib/include/plumed/asmjit/codebuilder.h +944 -0
- plumed/_lib/include/plumed/asmjit/codecompiler.h +767 -0
- plumed/_lib/include/plumed/asmjit/codeemitter.h +528 -0
- plumed/_lib/include/plumed/asmjit/codeholder.h +777 -0
- plumed/_lib/include/plumed/asmjit/constpool.h +286 -0
- plumed/_lib/include/plumed/asmjit/cpuinfo.h +402 -0
- plumed/_lib/include/plumed/asmjit/func.h +1327 -0
- plumed/_lib/include/plumed/asmjit/globals.h +370 -0
- plumed/_lib/include/plumed/asmjit/inst.h +137 -0
- plumed/_lib/include/plumed/asmjit/logging.h +317 -0
- plumed/_lib/include/plumed/asmjit/misc_p.h +103 -0
- plumed/_lib/include/plumed/asmjit/moved_string.h +318 -0
- plumed/_lib/include/plumed/asmjit/operand.h +1599 -0
- plumed/_lib/include/plumed/asmjit/osutils.h +207 -0
- plumed/_lib/include/plumed/asmjit/regalloc_p.h +597 -0
- plumed/_lib/include/plumed/asmjit/runtime.h +227 -0
- plumed/_lib/include/plumed/asmjit/simdtypes.h +1104 -0
- plumed/_lib/include/plumed/asmjit/utils.h +1387 -0
- plumed/_lib/include/plumed/asmjit/vmem.h +183 -0
- plumed/_lib/include/plumed/asmjit/x86.h +45 -0
- plumed/_lib/include/plumed/asmjit/x86assembler.h +125 -0
- plumed/_lib/include/plumed/asmjit/x86builder.h +117 -0
- plumed/_lib/include/plumed/asmjit/x86compiler.h +322 -0
- plumed/_lib/include/plumed/asmjit/x86emitter.h +5149 -0
- plumed/_lib/include/plumed/asmjit/x86globals.h +535 -0
- plumed/_lib/include/plumed/asmjit/x86inst.h +2547 -0
- plumed/_lib/include/plumed/asmjit/x86instimpl_p.h +74 -0
- plumed/_lib/include/plumed/asmjit/x86internal_p.h +108 -0
- plumed/_lib/include/plumed/asmjit/x86logging_p.h +92 -0
- plumed/_lib/include/plumed/asmjit/x86misc.h +417 -0
- plumed/_lib/include/plumed/asmjit/x86operand.h +1133 -0
- plumed/_lib/include/plumed/asmjit/x86regalloc_p.h +734 -0
- plumed/_lib/include/plumed/asmjit/zone.h +1157 -0
- plumed/_lib/include/plumed/bias/Bias.h +82 -0
- plumed/_lib/include/plumed/bias/ReweightBase.h +58 -0
- plumed/_lib/include/plumed/blas/blas.h +253 -0
- plumed/_lib/include/plumed/blas/def_external.h +61 -0
- plumed/_lib/include/plumed/blas/def_internal.h +97 -0
- plumed/_lib/include/plumed/blas/real.h +49 -0
- plumed/_lib/include/plumed/cltools/CLTool.h +32 -0
- plumed/_lib/include/plumed/clusters/ClusteringBase.h +70 -0
- plumed/_lib/include/plumed/colvar/Colvar.h +32 -0
- plumed/_lib/include/plumed/colvar/ColvarInput.h +68 -0
- plumed/_lib/include/plumed/colvar/ColvarShortcut.h +81 -0
- plumed/_lib/include/plumed/colvar/CoordinationBase.h +52 -0
- plumed/_lib/include/plumed/colvar/MultiColvarTemplate.h +333 -0
- plumed/_lib/include/plumed/colvar/PathMSDBase.h +101 -0
- plumed/_lib/include/plumed/colvar/RMSDVector.h +78 -0
- plumed/_lib/include/plumed/config/Config.h +118 -0
- plumed/_lib/include/plumed/config/version.h +9 -0
- plumed/_lib/include/plumed/contour/ContourFindingObject.h +87 -0
- plumed/_lib/include/plumed/contour/DistanceFromContourBase.h +82 -0
- plumed/_lib/include/plumed/contour/FindContour.h +67 -0
- plumed/_lib/include/plumed/core/Action.h +540 -0
- plumed/_lib/include/plumed/core/ActionAnyorder.h +48 -0
- plumed/_lib/include/plumed/core/ActionAtomistic.h +343 -0
- plumed/_lib/include/plumed/core/ActionForInterface.h +99 -0
- plumed/_lib/include/plumed/core/ActionPilot.h +57 -0
- plumed/_lib/include/plumed/core/ActionRegister.h +124 -0
- plumed/_lib/include/plumed/core/ActionSet.h +163 -0
- plumed/_lib/include/plumed/core/ActionSetup.h +48 -0
- plumed/_lib/include/plumed/core/ActionShortcut.h +73 -0
- plumed/_lib/include/plumed/core/ActionToGetData.h +59 -0
- plumed/_lib/include/plumed/core/ActionToPutData.h +101 -0
- plumed/_lib/include/plumed/core/ActionWithArguments.h +140 -0
- plumed/_lib/include/plumed/core/ActionWithMatrix.h +87 -0
- plumed/_lib/include/plumed/core/ActionWithValue.h +258 -0
- plumed/_lib/include/plumed/core/ActionWithVector.h +94 -0
- plumed/_lib/include/plumed/core/ActionWithVirtualAtom.h +123 -0
- plumed/_lib/include/plumed/core/CLTool.h +177 -0
- plumed/_lib/include/plumed/core/CLToolMain.h +102 -0
- plumed/_lib/include/plumed/core/CLToolRegister.h +108 -0
- plumed/_lib/include/plumed/core/Colvar.h +115 -0
- plumed/_lib/include/plumed/core/DataPassingObject.h +94 -0
- plumed/_lib/include/plumed/core/DataPassingTools.h +54 -0
- plumed/_lib/include/plumed/core/DomainDecomposition.h +120 -0
- plumed/_lib/include/plumed/core/ExchangePatterns.h +47 -0
- plumed/_lib/include/plumed/core/FlexibleBin.h +63 -0
- plumed/_lib/include/plumed/core/GREX.h +61 -0
- plumed/_lib/include/plumed/core/GenericMolInfo.h +89 -0
- plumed/_lib/include/plumed/core/Group.h +41 -0
- plumed/_lib/include/plumed/core/ModuleMap.h +30 -0
- plumed/_lib/include/plumed/core/ParallelTaskManager.h +1023 -0
- plumed/_lib/include/plumed/core/PbcAction.h +61 -0
- plumed/_lib/include/plumed/core/PlumedMain.h +632 -0
- plumed/_lib/include/plumed/core/PlumedMainInitializer.h +118 -0
- plumed/_lib/include/plumed/core/RegisterBase.h +340 -0
- plumed/_lib/include/plumed/core/TargetDist.h +48 -0
- plumed/_lib/include/plumed/core/Value.h +547 -0
- plumed/_lib/include/plumed/core/WithCmd.h +93 -0
- plumed/_lib/include/plumed/dimred/SMACOF.h +55 -0
- plumed/_lib/include/plumed/drr/DRR.h +383 -0
- plumed/_lib/include/plumed/drr/colvar_UIestimator.h +777 -0
- plumed/_lib/include/plumed/fisst/legendre_rule_fast.h +44 -0
- plumed/_lib/include/plumed/function/Custom.h +54 -0
- plumed/_lib/include/plumed/function/Function.h +85 -0
- plumed/_lib/include/plumed/function/FunctionOfMatrix.h +368 -0
- plumed/_lib/include/plumed/function/FunctionOfScalar.h +135 -0
- plumed/_lib/include/plumed/function/FunctionOfVector.h +296 -0
- plumed/_lib/include/plumed/function/FunctionSetup.h +180 -0
- plumed/_lib/include/plumed/function/FunctionShortcut.h +130 -0
- plumed/_lib/include/plumed/function/FunctionWithSingleArgument.h +165 -0
- plumed/_lib/include/plumed/gridtools/ActionWithGrid.h +43 -0
- plumed/_lib/include/plumed/gridtools/EvaluateGridFunction.h +99 -0
- plumed/_lib/include/plumed/gridtools/FunctionOfGrid.h +295 -0
- plumed/_lib/include/plumed/gridtools/GridCoordinatesObject.h +179 -0
- plumed/_lib/include/plumed/gridtools/GridSearch.h +135 -0
- plumed/_lib/include/plumed/gridtools/Interpolator.h +45 -0
- plumed/_lib/include/plumed/gridtools/KDE.h +455 -0
- plumed/_lib/include/plumed/gridtools/RDF.h +40 -0
- plumed/_lib/include/plumed/gridtools/SumOfKernels.h +219 -0
- plumed/_lib/include/plumed/isdb/MetainferenceBase.h +398 -0
- plumed/_lib/include/plumed/lapack/def_external.h +207 -0
- plumed/_lib/include/plumed/lapack/def_internal.h +388 -0
- plumed/_lib/include/plumed/lapack/lapack.h +899 -0
- plumed/_lib/include/plumed/lapack/lapack_limits.h +79 -0
- plumed/_lib/include/plumed/lapack/real.h +50 -0
- plumed/_lib/include/plumed/lepton/CompiledExpression.h +164 -0
- plumed/_lib/include/plumed/lepton/CustomFunction.h +143 -0
- plumed/_lib/include/plumed/lepton/Exception.h +93 -0
- plumed/_lib/include/plumed/lepton/ExpressionProgram.h +137 -0
- plumed/_lib/include/plumed/lepton/ExpressionTreeNode.h +145 -0
- plumed/_lib/include/plumed/lepton/Lepton.h +85 -0
- plumed/_lib/include/plumed/lepton/MSVC_erfc.h +123 -0
- plumed/_lib/include/plumed/lepton/Operation.h +1302 -0
- plumed/_lib/include/plumed/lepton/ParsedExpression.h +165 -0
- plumed/_lib/include/plumed/lepton/Parser.h +111 -0
- plumed/_lib/include/plumed/lepton/windowsIncludes.h +73 -0
- plumed/_lib/include/plumed/mapping/Path.h +44 -0
- plumed/_lib/include/plumed/mapping/PathProjectionCalculator.h +57 -0
- plumed/_lib/include/plumed/matrixtools/MatrixOperationBase.h +54 -0
- plumed/_lib/include/plumed/matrixtools/MatrixTimesMatrix.h +309 -0
- plumed/_lib/include/plumed/matrixtools/MatrixTimesVectorBase.h +365 -0
- plumed/_lib/include/plumed/matrixtools/OuterProduct.h +238 -0
- plumed/_lib/include/plumed/maze/Core.h +65 -0
- plumed/_lib/include/plumed/maze/Loss.h +86 -0
- plumed/_lib/include/plumed/maze/Member.h +66 -0
- plumed/_lib/include/plumed/maze/Memetic.h +799 -0
- plumed/_lib/include/plumed/maze/Optimizer.h +357 -0
- plumed/_lib/include/plumed/maze/Random_MT.h +156 -0
- plumed/_lib/include/plumed/maze/Tools.h +183 -0
- plumed/_lib/include/plumed/metatomic/vesin.h +188 -0
- plumed/_lib/include/plumed/molfile/Gromacs.h +2013 -0
- plumed/_lib/include/plumed/molfile/endianswap.h +217 -0
- plumed/_lib/include/plumed/molfile/fastio.h +683 -0
- plumed/_lib/include/plumed/molfile/largefiles.h +78 -0
- plumed/_lib/include/plumed/molfile/libmolfile_plugin.h +77 -0
- plumed/_lib/include/plumed/molfile/molfile_plugin.h +1034 -0
- plumed/_lib/include/plumed/molfile/periodic_table.h +248 -0
- plumed/_lib/include/plumed/molfile/readpdb.h +447 -0
- plumed/_lib/include/plumed/molfile/vmdplugin.h +236 -0
- plumed/_lib/include/plumed/multicolvar/MultiColvarShortcuts.h +45 -0
- plumed/_lib/include/plumed/opes/ExpansionCVs.h +79 -0
- plumed/_lib/include/plumed/sasa/Sasa.h +32 -0
- plumed/_lib/include/plumed/secondarystructure/SecondaryStructureBase.h +372 -0
- plumed/_lib/include/plumed/setup/ActionSetup.h +25 -0
- plumed/_lib/include/plumed/small_vector/small_vector.h +6114 -0
- plumed/_lib/include/plumed/symfunc/CoordinationNumbers.h +41 -0
- plumed/_lib/include/plumed/tools/Angle.h +52 -0
- plumed/_lib/include/plumed/tools/AtomDistribution.h +138 -0
- plumed/_lib/include/plumed/tools/AtomNumber.h +152 -0
- plumed/_lib/include/plumed/tools/BiasRepresentation.h +106 -0
- plumed/_lib/include/plumed/tools/BitmaskEnum.h +167 -0
- plumed/_lib/include/plumed/tools/Brent1DRootSearch.h +159 -0
- plumed/_lib/include/plumed/tools/CheckInRange.h +44 -0
- plumed/_lib/include/plumed/tools/Citations.h +74 -0
- plumed/_lib/include/plumed/tools/ColvarOutput.h +118 -0
- plumed/_lib/include/plumed/tools/Communicator.h +316 -0
- plumed/_lib/include/plumed/tools/ConjugateGradient.h +80 -0
- plumed/_lib/include/plumed/tools/DLLoader.h +79 -0
- plumed/_lib/include/plumed/tools/ERMSD.h +73 -0
- plumed/_lib/include/plumed/tools/Exception.h +406 -0
- plumed/_lib/include/plumed/tools/File.h +28 -0
- plumed/_lib/include/plumed/tools/FileBase.h +153 -0
- plumed/_lib/include/plumed/tools/FileTools.h +37 -0
- plumed/_lib/include/plumed/tools/ForwardDecl.h +54 -0
- plumed/_lib/include/plumed/tools/Grid.h +638 -0
- plumed/_lib/include/plumed/tools/HistogramBead.h +136 -0
- plumed/_lib/include/plumed/tools/IFile.h +117 -0
- plumed/_lib/include/plumed/tools/KernelFunctions.h +113 -0
- plumed/_lib/include/plumed/tools/Keywords.h +380 -0
- plumed/_lib/include/plumed/tools/LatticeReduction.h +66 -0
- plumed/_lib/include/plumed/tools/LeptonCall.h +64 -0
- plumed/_lib/include/plumed/tools/LinkCells.h +126 -0
- plumed/_lib/include/plumed/tools/Log.h +41 -0
- plumed/_lib/include/plumed/tools/LoopUnroller.h +163 -0
- plumed/_lib/include/plumed/tools/Matrix.h +721 -0
- plumed/_lib/include/plumed/tools/MatrixSquareBracketsAccess.h +138 -0
- plumed/_lib/include/plumed/tools/MergeVectorTools.h +153 -0
- plumed/_lib/include/plumed/tools/Minimise1DBrent.h +244 -0
- plumed/_lib/include/plumed/tools/MinimiseBase.h +120 -0
- plumed/_lib/include/plumed/tools/MolDataClass.h +51 -0
- plumed/_lib/include/plumed/tools/NeighborList.h +112 -0
- plumed/_lib/include/plumed/tools/OFile.h +286 -0
- plumed/_lib/include/plumed/tools/OpenACC.h +180 -0
- plumed/_lib/include/plumed/tools/OpenMP.h +75 -0
- plumed/_lib/include/plumed/tools/PDB.h +154 -0
- plumed/_lib/include/plumed/tools/Pbc.h +139 -0
- plumed/_lib/include/plumed/tools/PlumedHandle.h +105 -0
- plumed/_lib/include/plumed/tools/RMSD.h +493 -0
- plumed/_lib/include/plumed/tools/Random.h +80 -0
- plumed/_lib/include/plumed/tools/RootFindingBase.h +79 -0
- plumed/_lib/include/plumed/tools/Stopwatch.h +475 -0
- plumed/_lib/include/plumed/tools/Subprocess.h +142 -0
- plumed/_lib/include/plumed/tools/SwitchingFunction.h +208 -0
- plumed/_lib/include/plumed/tools/Tensor.h +724 -0
- plumed/_lib/include/plumed/tools/TokenizedLine.h +123 -0
- plumed/_lib/include/plumed/tools/Tools.h +638 -0
- plumed/_lib/include/plumed/tools/Torsion.h +55 -0
- plumed/_lib/include/plumed/tools/TrajectoryParser.h +118 -0
- plumed/_lib/include/plumed/tools/Tree.h +61 -0
- plumed/_lib/include/plumed/tools/TypesafePtr.h +463 -0
- plumed/_lib/include/plumed/tools/Units.h +167 -0
- plumed/_lib/include/plumed/tools/Vector.h +433 -0
- plumed/_lib/include/plumed/tools/View.h +296 -0
- plumed/_lib/include/plumed/tools/View2D.h +100 -0
- plumed/_lib/include/plumed/tools/h36.h +39 -0
- plumed/_lib/include/plumed/vatom/ActionWithVirtualAtom.h +32 -0
- plumed/_lib/include/plumed/ves/BasisFunctions.h +380 -0
- plumed/_lib/include/plumed/ves/CoeffsBase.h +310 -0
- plumed/_lib/include/plumed/ves/CoeffsMatrix.h +220 -0
- plumed/_lib/include/plumed/ves/CoeffsVector.h +251 -0
- plumed/_lib/include/plumed/ves/FermiSwitchingFunction.h +74 -0
- plumed/_lib/include/plumed/ves/GridIntegrationWeights.h +50 -0
- plumed/_lib/include/plumed/ves/GridLinearInterpolation.h +81 -0
- plumed/_lib/include/plumed/ves/GridProjWeights.h +61 -0
- plumed/_lib/include/plumed/ves/LinearBasisSetExpansion.h +303 -0
- plumed/_lib/include/plumed/ves/Optimizer.h +444 -0
- plumed/_lib/include/plumed/ves/TargetDistModifer.h +53 -0
- plumed/_lib/include/plumed/ves/TargetDistribution.h +266 -0
- plumed/_lib/include/plumed/ves/VesBias.h +545 -0
- plumed/_lib/include/plumed/ves/VesTools.h +142 -0
- plumed/_lib/include/plumed/ves/WaveletGrid.h +75 -0
- plumed/_lib/include/plumed/volumes/ActionVolume.h +268 -0
- plumed/_lib/include/plumed/volumes/VolumeShortcut.h +147 -0
- plumed/_lib/include/plumed/wrapper/Plumed.h +5025 -0
- plumed/_lib/include/plumed/xdrfile/xdrfile.h +663 -0
- plumed/_lib/include/plumed/xdrfile/xdrfile_trr.h +89 -0
- plumed/_lib/include/plumed/xdrfile/xdrfile_xtc.h +90 -0
- plumed/_lib/lib/PythonCVInterface.so +0 -0
- plumed/_lib/lib/libplumed.a +0 -0
- plumed/_lib/lib/libplumed.so +0 -0
- plumed/_lib/lib/libplumedKernel.so +0 -0
- plumed/_lib/lib/libplumedWrapper.a +0 -0
- plumed/_lib/lib/pkgconfig/plumed.pc +13 -0
- plumed/_lib/lib/pkgconfig/plumedInternals.pc +13 -0
- plumed/_lib/lib/pkgconfig/plumedWrapper.pc +13 -0
- plumed/_lib/lib/plumed/fortran/plumed.f90 +879 -0
- plumed/_lib/lib/plumed/fortran/plumed_f08.f90 +2625 -0
- plumed/_lib/lib/plumed/modulefile +69 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.config +43 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/CMakeLists.txt +543 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/CMakeLists.txt.preplumed +540 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdlib/expanded.cpp +1628 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdlib/expanded.cpp.preplumed +1590 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdlib/expanded.h +103 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdlib/expanded.h.preplumed +99 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdlib/sim_util.cpp +2527 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdlib/sim_util.cpp.preplumed +2513 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdrun/legacymdrunoptions.cpp +208 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdrun/legacymdrunoptions.cpp.preplumed +175 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdrun/legacymdrunoptions.h +408 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdrun/legacymdrunoptions.h.preplumed +394 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdrun/md.cpp +2348 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdrun/md.cpp.preplumed +2091 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdrun/minimize.cpp +3573 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdrun/minimize.cpp.preplumed +3495 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdrun/replicaexchange.cpp +1506 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdrun/replicaexchange.cpp.preplumed +1402 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdrun/replicaexchange.h +114 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdrun/replicaexchange.h.preplumed +106 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdrun/rerun.cpp +997 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdrun/rerun.cpp.preplumed +906 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdrun/runner.cpp +2780 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdrun/runner.cpp.preplumed +2738 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/modularsimulator/expandedensembleelement.cpp +224 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/modularsimulator/expandedensembleelement.cpp.preplumed +222 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.config +43 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/CMakeLists.txt +549 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/CMakeLists.txt.preplumed +546 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdlib/expanded.cpp +1632 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdlib/expanded.cpp.preplumed +1594 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdlib/expanded.h +104 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdlib/expanded.h.preplumed +100 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdlib/sim_util.cpp +2624 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdlib/sim_util.cpp.preplumed +2610 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdrun/legacymdrunoptions.cpp +208 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdrun/legacymdrunoptions.cpp.preplumed +175 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdrun/legacymdrunoptions.h +409 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdrun/legacymdrunoptions.h.preplumed +395 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdrun/md.cpp +2419 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdrun/md.cpp.preplumed +2164 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdrun/minimize.cpp +3546 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdrun/minimize.cpp.preplumed +3468 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdrun/replicaexchange.cpp +1513 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdrun/replicaexchange.cpp.preplumed +1409 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdrun/replicaexchange.h +114 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdrun/replicaexchange.h.preplumed +106 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdrun/rerun.cpp +991 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdrun/rerun.cpp.preplumed +900 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdrun/runner.cpp +2895 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdrun/runner.cpp.preplumed +2849 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/modularsimulator/expandedensembleelement.cpp +224 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/modularsimulator/expandedensembleelement.cpp.preplumed +222 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/taskassignment/decidegpuusage.cpp +886 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/taskassignment/decidegpuusage.cpp.preplumed +880 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/taskassignment/include/gromacs/taskassignment/decidegpuusage.h +347 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/taskassignment/include/gromacs/taskassignment/decidegpuusage.h.preplumed +345 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.config +43 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/CMakeLists.txt +575 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/CMakeLists.txt.preplumed +572 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdlib/expanded.cpp +1632 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdlib/expanded.cpp.preplumed +1594 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdlib/expanded.h +104 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdlib/expanded.h.preplumed +100 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdlib/sim_util.cpp +2564 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdlib/sim_util.cpp.preplumed +2550 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdrun/legacymdrunoptions.cpp +208 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdrun/legacymdrunoptions.cpp.preplumed +175 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdrun/legacymdrunoptions.h +410 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdrun/legacymdrunoptions.h.preplumed +396 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdrun/md.cpp +2435 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdrun/md.cpp.preplumed +2187 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdrun/minimize.cpp +3592 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdrun/minimize.cpp.preplumed +3514 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdrun/replicaexchange.cpp +1513 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdrun/replicaexchange.cpp.preplumed +1409 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdrun/replicaexchange.h +114 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdrun/replicaexchange.h.preplumed +106 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdrun/rerun.cpp +958 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdrun/rerun.cpp.preplumed +929 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdrun/runner.cpp +2987 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdrun/runner.cpp.preplumed +2941 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/modularsimulator/expandedensembleelement.cpp +224 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/modularsimulator/expandedensembleelement.cpp.preplumed +222 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/taskassignment/decidegpuusage.cpp +904 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/taskassignment/decidegpuusage.cpp.preplumed +898 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/taskassignment/include/gromacs/taskassignment/decidegpuusage.h +353 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/taskassignment/include/gromacs/taskassignment/decidegpuusage.h.preplumed +351 -0
- plumed/_lib/lib/plumed/patches/gromacs-2025.0.config +39 -0
- plumed/_lib/lib/plumed/patches/gromacs-2025.0.diff/cmake/gmxManagePlumed.cmake +82 -0
- plumed/_lib/lib/plumed/patches/gromacs-2025.0.diff/cmake/gmxManagePlumed.cmake.preplumed +82 -0
- plumed/_lib/lib/plumed/patches/gromacs-2025.0.diff/src/gromacs/applied_forces/plumed/plumedMDModule.cpp +162 -0
- plumed/_lib/lib/plumed/patches/gromacs-2025.0.diff/src/gromacs/applied_forces/plumed/plumedMDModule.cpp.preplumed +154 -0
- plumed/_lib/lib/plumed/patches/gromacs-2025.0.diff/src/gromacs/applied_forces/plumed/plumedOptions.cpp +107 -0
- plumed/_lib/lib/plumed/patches/gromacs-2025.0.diff/src/gromacs/applied_forces/plumed/plumedOptions.cpp.preplumed +99 -0
- plumed/_lib/lib/plumed/patches/gromacs-2025.0.diff/src/gromacs/applied_forces/plumed/plumedOptions.h +120 -0
- plumed/_lib/lib/plumed/patches/gromacs-2025.0.diff/src/gromacs/applied_forces/plumed/plumedOptions.h.preplumed +111 -0
- plumed/_lib/lib/plumed/patches/gromacs-2025.0.diff/src/gromacs/applied_forces/plumed/plumedforceprovider.cpp +215 -0
- plumed/_lib/lib/plumed/patches/gromacs-2025.0.diff/src/gromacs/applied_forces/plumed/plumedforceprovider.cpp.preplumed +197 -0
- plumed/_lib/lib/plumed/patches/gromacs-2025.0.diff/src/gromacs/applied_forces/plumed/plumedforceprovider.h +87 -0
- plumed/_lib/lib/plumed/patches/gromacs-2025.0.diff/src/gromacs/applied_forces/plumed/plumedforceprovider.h.preplumed +86 -0
- plumed/_lib/lib/plumed/patches/gromacs-2025.0.diff/src/gromacs/mdrun/runner.cpp +2971 -0
- plumed/_lib/lib/plumed/patches/gromacs-2025.0.diff/src/gromacs/mdrun/runner.cpp.preplumed +2970 -0
- plumed/_lib/lib/plumed/patches/gromacs-2025.0.diff/src/gromacs/mdrunutility/mdmodulesnotifiers.h +430 -0
- plumed/_lib/lib/plumed/patches/gromacs-2025.0.diff/src/gromacs/mdrunutility/mdmodulesnotifiers.h.preplumed +429 -0
- plumed/_lib/lib/plumed/patches/namd-2.12.config +30 -0
- plumed/_lib/lib/plumed/patches/namd-2.12.diff +267 -0
- plumed/_lib/lib/plumed/patches/namd-2.13.config +30 -0
- plumed/_lib/lib/plumed/patches/namd-2.13.diff +267 -0
- plumed/_lib/lib/plumed/patches/namd-2.14.config +30 -0
- plumed/_lib/lib/plumed/patches/namd-2.14.diff +268 -0
- plumed/_lib/lib/plumed/patches/patch.sh +500 -0
- plumed/_lib/lib/plumed/patches/qespresso-5.0.2.config +25 -0
- plumed/_lib/lib/plumed/patches/qespresso-5.0.2.diff/PW/src/forces.f90 +368 -0
- plumed/_lib/lib/plumed/patches/qespresso-5.0.2.diff/PW/src/forces.f90.preplumed +366 -0
- plumed/_lib/lib/plumed/patches/qespresso-5.0.2.diff/PW/src/plugin_forces.f90 +71 -0
- plumed/_lib/lib/plumed/patches/qespresso-5.0.2.diff/PW/src/plugin_forces.f90.preplumed +24 -0
- plumed/_lib/lib/plumed/patches/qespresso-5.0.2.diff/PW/src/plugin_initialization.f90 +62 -0
- plumed/_lib/lib/plumed/patches/qespresso-5.0.2.diff/PW/src/plugin_initialization.f90.preplumed +21 -0
- plumed/_lib/lib/plumed/patches/qespresso-5.0.2.diff/PW/src/pwscf.f90 +189 -0
- plumed/_lib/lib/plumed/patches/qespresso-5.0.2.diff/PW/src/pwscf.f90.preplumed +185 -0
- plumed/_lib/lib/plumed/patches/qespresso-6.2.config +26 -0
- plumed/_lib/lib/plumed/patches/qespresso-6.2.diff/PW/src/forces.f90 +422 -0
- plumed/_lib/lib/plumed/patches/qespresso-6.2.diff/PW/src/forces.f90.preplumed +420 -0
- plumed/_lib/lib/plumed/patches/qespresso-6.2.diff/PW/src/plugin_ext_forces.f90 +70 -0
- plumed/_lib/lib/plumed/patches/qespresso-6.2.diff/PW/src/plugin_ext_forces.f90.preplumed +23 -0
- plumed/_lib/lib/plumed/patches/qespresso-6.2.diff/PW/src/plugin_initialization.f90 +62 -0
- plumed/_lib/lib/plumed/patches/qespresso-6.2.diff/PW/src/plugin_initialization.f90.preplumed +21 -0
- plumed/_lib/lib/plumed/patches/qespresso-6.2.diff/PW/src/run_pwscf.f90 +233 -0
- plumed/_lib/lib/plumed/patches/qespresso-6.2.diff/PW/src/run_pwscf.f90.preplumed +230 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.0.config +28 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.0.diff/Modules/Makefile +175 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.0.diff/Modules/Makefile.preplumed +171 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.0.diff/PW/src/forces.f90 +486 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.0.diff/PW/src/forces.f90.preplumed +484 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.0.diff/PW/src/plugin_ext_forces.f90 +74 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.0.diff/PW/src/plugin_ext_forces.f90.preplumed +23 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.0.diff/PW/src/plugin_initialization.f90 +64 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.0.diff/PW/src/plugin_initialization.f90.preplumed +21 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.0.diff/PW/src/run_pwscf.f90 +532 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.0.diff/PW/src/run_pwscf.f90.preplumed +518 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.2.config +28 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.2.diff/Modules/Makefile +249 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.2.diff/Modules/Makefile.preplumed +244 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.2.diff/PW/src/forces.f90 +532 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.2.diff/PW/src/forces.f90.preplumed +535 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.2.diff/PW/src/plugin_ext_forces.f90 +74 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.2.diff/PW/src/plugin_ext_forces.f90.preplumed +23 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.2.diff/PW/src/plugin_initialization.f90 +64 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.2.diff/PW/src/plugin_initialization.f90.preplumed +21 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.2.diff/PW/src/run_pwscf.f90 +569 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.2.diff/PW/src/run_pwscf.f90.preplumed +560 -0
- plumed/_lib/lib/plumed/plumed-config +9 -0
- plumed/_lib/lib/plumed/plumed-mklib +9 -0
- plumed/_lib/lib/plumed/plumed-newcv +9 -0
- plumed/_lib/lib/plumed/plumed-partial_tempering +9 -0
- plumed/_lib/lib/plumed/plumed-patch +9 -0
- plumed/_lib/lib/plumed/plumed-runtime +0 -0
- plumed/_lib/lib/plumed/plumed-selector +9 -0
- plumed/_lib/lib/plumed/plumed-vim2html +9 -0
- plumed/_lib/lib/plumed/scripts/config.sh +126 -0
- plumed/_lib/lib/plumed/scripts/mklib.sh +175 -0
- plumed/_lib/lib/plumed/scripts/newcv.sh +26 -0
- plumed/_lib/lib/plumed/scripts/partial_tempering.sh +319 -0
- plumed/_lib/lib/plumed/scripts/patch.sh +4 -0
- plumed/_lib/lib/plumed/scripts/selector.sh +234 -0
- plumed/_lib/lib/plumed/scripts/vim2html.sh +190 -0
- plumed/_lib/lib/plumed/src/colvar/Template.cpp +116 -0
- plumed/_lib/lib/plumed/src/config/compile_options.sh +3 -0
- plumed/_lib/lib/plumed/src/config/config.txt +179 -0
- plumed/_lib/lib/plumed/src/lib/Plumed.cmake +8 -0
- plumed/_lib/lib/plumed/src/lib/Plumed.cmake.runtime +5 -0
- plumed/_lib/lib/plumed/src/lib/Plumed.cmake.shared +5 -0
- plumed/_lib/lib/plumed/src/lib/Plumed.cmake.static +5 -0
- plumed/_lib/lib/plumed/src/lib/Plumed.inc +8 -0
- plumed/_lib/lib/plumed/src/lib/Plumed.inc.runtime +5 -0
- plumed/_lib/lib/plumed/src/lib/Plumed.inc.shared +5 -0
- plumed/_lib/lib/plumed/src/lib/Plumed.inc.static +5 -0
- plumed/_lib/lib/plumed/vim/scripts.vim +6 -0
- plumed/_plumed_core.cpython-311-aarch64-linux-gnu.so +0 -0
- plumed/_plumed_core.cpython-312-aarch64-linux-gnu.so +0 -0
- plumed/_plumed_core.cpython-313-aarch64-linux-gnu.so +0 -0
- plumed/_plumed_core.cpython-314-aarch64-linux-gnu.so +0 -0
- plumed/_plumed_core.cpython-314t-aarch64-linux-gnu.so +0 -0
- plumedCommunications.cpython-311-aarch64-linux-gnu.so +0 -0
- plumedCommunications.cpython-312-aarch64-linux-gnu.so +0 -0
- plumedCommunications.cpython-313-aarch64-linux-gnu.so +0 -0
- plumedCommunications.cpython-314-aarch64-linux-gnu.so +0 -0
- plumedCommunications.cpython-314t-aarch64-linux-gnu.so +0 -0
- plumedCommunications.pyi +431 -0
|
@@ -0,0 +1,3592 @@
|
|
|
1
|
+
/*
|
|
2
|
+
* This file is part of the GROMACS molecular simulation package.
|
|
3
|
+
*
|
|
4
|
+
* Copyright 1991- The GROMACS Authors
|
|
5
|
+
* and the project initiators Erik Lindahl, Berk Hess and David van der Spoel.
|
|
6
|
+
* Consult the AUTHORS/COPYING files and https://www.gromacs.org for details.
|
|
7
|
+
*
|
|
8
|
+
* GROMACS is free software; you can redistribute it and/or
|
|
9
|
+
* modify it under the terms of the GNU Lesser General Public License
|
|
10
|
+
* as published by the Free Software Foundation; either version 2.1
|
|
11
|
+
* of the License, or (at your option) any later version.
|
|
12
|
+
*
|
|
13
|
+
* GROMACS is distributed in the hope that it will be useful,
|
|
14
|
+
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
15
|
+
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
16
|
+
* Lesser General Public License for more details.
|
|
17
|
+
*
|
|
18
|
+
* You should have received a copy of the GNU Lesser General Public
|
|
19
|
+
* License along with GROMACS; if not, see
|
|
20
|
+
* https://www.gnu.org/licenses, or write to the Free Software Foundation,
|
|
21
|
+
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
22
|
+
*
|
|
23
|
+
* If you want to redistribute modifications to GROMACS, please
|
|
24
|
+
* consider that scientific software is very special. Version
|
|
25
|
+
* control is crucial - bugs must be traceable. We will be happy to
|
|
26
|
+
* consider code for inclusion in the official distribution, but
|
|
27
|
+
* derived work must not be called official GROMACS. Details are found
|
|
28
|
+
* in the README & COPYING files - if they are missing, get the
|
|
29
|
+
* official version at https://www.gromacs.org.
|
|
30
|
+
*
|
|
31
|
+
* To help us fund GROMACS development, we humbly ask that you cite
|
|
32
|
+
* the research papers on the package. Check out https://www.gromacs.org.
|
|
33
|
+
*/
|
|
34
|
+
/*! \internal \file
|
|
35
|
+
*
|
|
36
|
+
* \brief This file defines integrators for energy minimization
|
|
37
|
+
*
|
|
38
|
+
* \author Berk Hess <hess@kth.se>
|
|
39
|
+
* \author Erik Lindahl <erik@kth.se>
|
|
40
|
+
* \ingroup module_mdrun
|
|
41
|
+
*/
|
|
42
|
+
#include "gmxpre.h"
|
|
43
|
+
|
|
44
|
+
#include "config.h"
|
|
45
|
+
|
|
46
|
+
#include <cmath>
|
|
47
|
+
#include <cstring>
|
|
48
|
+
#include <ctime>
|
|
49
|
+
|
|
50
|
+
#include <algorithm>
|
|
51
|
+
#include <limits>
|
|
52
|
+
#include <vector>
|
|
53
|
+
|
|
54
|
+
#include "gromacs/commandline/filenm.h"
|
|
55
|
+
#include "gromacs/domdec/collect.h"
|
|
56
|
+
#include "gromacs/domdec/dlbtiming.h"
|
|
57
|
+
#include "gromacs/domdec/domdec.h"
|
|
58
|
+
#include "gromacs/domdec/domdec_struct.h"
|
|
59
|
+
#include "gromacs/domdec/mdsetup.h"
|
|
60
|
+
#include "gromacs/domdec/partition.h"
|
|
61
|
+
#include "gromacs/ewald/pme_pp.h"
|
|
62
|
+
#include "gromacs/fileio/confio.h"
|
|
63
|
+
#include "gromacs/fileio/mtxio.h"
|
|
64
|
+
#include "gromacs/gmxlib/network.h"
|
|
65
|
+
#include "gromacs/gmxlib/nrnb.h"
|
|
66
|
+
#include "gromacs/imd/imd.h"
|
|
67
|
+
#include "gromacs/linearalgebra/sparsematrix.h"
|
|
68
|
+
#include "gromacs/listed_forces/listed_forces.h"
|
|
69
|
+
#include "gromacs/listed_forces/listed_forces_gpu.h"
|
|
70
|
+
#include "gromacs/math/functions.h"
|
|
71
|
+
#include "gromacs/math/vec.h"
|
|
72
|
+
#include "gromacs/mdlib/constr.h"
|
|
73
|
+
#include "gromacs/mdlib/coupling.h"
|
|
74
|
+
#include "gromacs/mdlib/ebin.h"
|
|
75
|
+
#include "gromacs/mdlib/enerdata_utils.h"
|
|
76
|
+
#include "gromacs/mdlib/energyoutput.h"
|
|
77
|
+
#include "gromacs/mdlib/force.h"
|
|
78
|
+
#include "gromacs/mdlib/force_flags.h"
|
|
79
|
+
#include "gromacs/mdlib/forcerec.h"
|
|
80
|
+
#include "gromacs/mdlib/gmx_omp_nthreads.h"
|
|
81
|
+
#include "gromacs/mdlib/md_support.h"
|
|
82
|
+
#include "gromacs/mdlib/mdatoms.h"
|
|
83
|
+
#include "gromacs/mdlib/stat.h"
|
|
84
|
+
#include "gromacs/mdlib/tgroup.h"
|
|
85
|
+
#include "gromacs/mdlib/trajectory_writing.h"
|
|
86
|
+
#include "gromacs/mdlib/update.h"
|
|
87
|
+
#include "gromacs/mdlib/vsite.h"
|
|
88
|
+
#include "gromacs/mdrunutility/handlerestart.h"
|
|
89
|
+
#include "gromacs/mdrunutility/multisim.h" /*PLUMED*/
|
|
90
|
+
#include "gromacs/mdrunutility/printtime.h"
|
|
91
|
+
#include "gromacs/mdtypes/checkpointdata.h"
|
|
92
|
+
#include "gromacs/mdtypes/commrec.h"
|
|
93
|
+
#include "gromacs/mdtypes/forcebuffers.h"
|
|
94
|
+
#include "gromacs/mdtypes/forcerec.h"
|
|
95
|
+
#include "gromacs/mdtypes/inputrec.h"
|
|
96
|
+
#include "gromacs/mdtypes/interaction_const.h"
|
|
97
|
+
#include "gromacs/mdtypes/md_enums.h"
|
|
98
|
+
#include "gromacs/mdtypes/mdatom.h"
|
|
99
|
+
#include "gromacs/mdtypes/mdrunoptions.h"
|
|
100
|
+
#include "gromacs/mdtypes/multipletimestepping.h"
|
|
101
|
+
#include "gromacs/mdtypes/observablesreducer.h"
|
|
102
|
+
#include "gromacs/mdtypes/state.h"
|
|
103
|
+
#include "gromacs/pbcutil/pbc.h"
|
|
104
|
+
#include "gromacs/taskassignment/include/gromacs/taskassignment/decidesimulationworkload.h"
|
|
105
|
+
#include "gromacs/timing/wallcycle.h"
|
|
106
|
+
#include "gromacs/timing/walltime_accounting.h"
|
|
107
|
+
#include "gromacs/topology/mtop_util.h"
|
|
108
|
+
#include "gromacs/topology/topology.h"
|
|
109
|
+
#include "gromacs/utility/cstringutil.h"
|
|
110
|
+
#include "gromacs/utility/exceptions.h"
|
|
111
|
+
#include "gromacs/utility/fatalerror.h"
|
|
112
|
+
#include "gromacs/utility/logger.h"
|
|
113
|
+
#include "gromacs/utility/smalloc.h"
|
|
114
|
+
|
|
115
|
+
#include "legacysimulator.h"
|
|
116
|
+
#include "shellfc.h"
|
|
117
|
+
|
|
118
|
+
using gmx::ArrayRef;
|
|
119
|
+
using gmx::MDModulesNotifiers;
|
|
120
|
+
using gmx::MdrunScheduleWorkload;
|
|
121
|
+
using gmx::RVec;
|
|
122
|
+
using gmx::VirtualSitesHandler;
|
|
123
|
+
|
|
124
|
+
/* PLUMED */
|
|
125
|
+
#include "../../../Plumed.h"
|
|
126
|
+
extern int plumedswitch;
|
|
127
|
+
extern plumed plumedmain;
|
|
128
|
+
/* END PLUMED */
|
|
129
|
+
|
|
130
|
+
//! Utility structure for manipulating states during EM
|
|
131
|
+
typedef struct em_state
|
|
132
|
+
{
|
|
133
|
+
//! Copy of the global state
|
|
134
|
+
t_state s;
|
|
135
|
+
//! Force array
|
|
136
|
+
gmx::ForceBuffers f;
|
|
137
|
+
//! Potential energy
|
|
138
|
+
real epot;
|
|
139
|
+
//! Norm of the force
|
|
140
|
+
real fnorm;
|
|
141
|
+
//! Maximum force
|
|
142
|
+
real fmax;
|
|
143
|
+
//! Direction
|
|
144
|
+
int a_fmax;
|
|
145
|
+
} em_state_t;
|
|
146
|
+
|
|
147
|
+
//! Print the EM starting conditions
|
|
148
|
+
static void print_em_start(FILE* fplog,
|
|
149
|
+
const t_commrec* cr,
|
|
150
|
+
gmx_walltime_accounting_t walltime_accounting,
|
|
151
|
+
gmx_wallcycle* wcycle,
|
|
152
|
+
const char* name)
|
|
153
|
+
{
|
|
154
|
+
walltime_accounting_start_time(walltime_accounting);
|
|
155
|
+
wallcycle_start(wcycle, WallCycleCounter::Run);
|
|
156
|
+
print_start(fplog, cr, walltime_accounting, name);
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
//! Stop counting time for EM
|
|
160
|
+
static void em_time_end(gmx_walltime_accounting_t walltime_accounting, gmx_wallcycle* wcycle)
|
|
161
|
+
{
|
|
162
|
+
wallcycle_stop(wcycle, WallCycleCounter::Run);
|
|
163
|
+
|
|
164
|
+
walltime_accounting_end_time(walltime_accounting);
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
//! Printing a log file and console header
|
|
168
|
+
static void sp_header(FILE* out, const char* minimizer, real ftol, int nsteps)
|
|
169
|
+
{
|
|
170
|
+
fprintf(out, "\n");
|
|
171
|
+
fprintf(out, "%s:\n", minimizer);
|
|
172
|
+
fprintf(out, " Tolerance (Fmax) = %12.5e\n", ftol);
|
|
173
|
+
fprintf(out, " Number of steps = %12d\n", nsteps);
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
//! Print warning message
|
|
177
|
+
static void warn_step(FILE* fp, real ftol, real fmax, gmx_bool bLastStep, gmx_bool bConstrain)
|
|
178
|
+
{
|
|
179
|
+
constexpr bool realIsDouble = GMX_DOUBLE;
|
|
180
|
+
char buffer[2048];
|
|
181
|
+
|
|
182
|
+
if (!std::isfinite(fmax))
|
|
183
|
+
{
|
|
184
|
+
sprintf(buffer,
|
|
185
|
+
"\nEnergy minimization has stopped because the force "
|
|
186
|
+
"on at least one atom is not finite. This usually means "
|
|
187
|
+
"atoms are overlapping. Modify the input coordinates to "
|
|
188
|
+
"remove atom overlap or use soft-core potentials with "
|
|
189
|
+
"the free energy code to avoid infinite forces.\n%s",
|
|
190
|
+
!realIsDouble ? "You could also be lucky that switching to double precision "
|
|
191
|
+
"is sufficient to obtain finite forces.\n"
|
|
192
|
+
: "");
|
|
193
|
+
}
|
|
194
|
+
else if (bLastStep)
|
|
195
|
+
{
|
|
196
|
+
sprintf(buffer,
|
|
197
|
+
"\nEnergy minimization reached the maximum number "
|
|
198
|
+
"of steps before the forces reached the requested "
|
|
199
|
+
"precision Fmax < %g.\n",
|
|
200
|
+
ftol);
|
|
201
|
+
}
|
|
202
|
+
else
|
|
203
|
+
{
|
|
204
|
+
sprintf(buffer,
|
|
205
|
+
"\nEnergy minimization has stopped, but the forces have "
|
|
206
|
+
"not converged to the requested precision Fmax < %g (which "
|
|
207
|
+
"may not be possible for your system). It stopped "
|
|
208
|
+
"because the algorithm tried to make a new step whose size "
|
|
209
|
+
"was too small, or there was no change in the energy since "
|
|
210
|
+
"last step. Either way, we regard the minimization as "
|
|
211
|
+
"converged to within the available machine precision, "
|
|
212
|
+
"given your starting configuration and EM parameters.\n%s%s",
|
|
213
|
+
ftol,
|
|
214
|
+
!realIsDouble ? "\nDouble precision normally gives you higher accuracy, but "
|
|
215
|
+
"this is often not needed for preparing to run molecular "
|
|
216
|
+
"dynamics.\n"
|
|
217
|
+
: "",
|
|
218
|
+
bConstrain ? "You might need to increase your constraint accuracy, or turn\n"
|
|
219
|
+
"off constraints altogether (set constraints = none in mdp file)\n"
|
|
220
|
+
: "");
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
fputs(wrap_lines(buffer, 78, 0, FALSE), stderr);
|
|
224
|
+
fputs(wrap_lines(buffer, 78, 0, FALSE), fp);
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
//! Print message about convergence of the EM
|
|
228
|
+
static void print_converged(FILE* fp,
|
|
229
|
+
const char* alg,
|
|
230
|
+
real ftol,
|
|
231
|
+
int64_t count,
|
|
232
|
+
gmx_bool bDone,
|
|
233
|
+
int64_t nsteps,
|
|
234
|
+
const em_state_t* ems,
|
|
235
|
+
double sqrtNumAtoms)
|
|
236
|
+
{
|
|
237
|
+
char buf[STEPSTRSIZE];
|
|
238
|
+
|
|
239
|
+
if (bDone)
|
|
240
|
+
{
|
|
241
|
+
fprintf(fp, "\n%s converged to Fmax < %g in %s steps\n", alg, ftol, gmx_step_str(count, buf));
|
|
242
|
+
}
|
|
243
|
+
else if (count < nsteps)
|
|
244
|
+
{
|
|
245
|
+
fprintf(fp,
|
|
246
|
+
"\n%s converged to machine precision in %s steps,\n"
|
|
247
|
+
"but did not reach the requested Fmax < %g.\n",
|
|
248
|
+
alg,
|
|
249
|
+
gmx_step_str(count, buf),
|
|
250
|
+
ftol);
|
|
251
|
+
}
|
|
252
|
+
else
|
|
253
|
+
{
|
|
254
|
+
fprintf(fp, "\n%s did not converge to Fmax < %g in %s steps.\n", alg, ftol, gmx_step_str(count, buf));
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
#if GMX_DOUBLE
|
|
258
|
+
fprintf(fp, "Potential Energy = %21.14e\n", ems->epot);
|
|
259
|
+
fprintf(fp, "Maximum force = %21.14e on atom %d\n", ems->fmax, ems->a_fmax + 1);
|
|
260
|
+
fprintf(fp, "Norm of force = %21.14e\n", ems->fnorm / sqrtNumAtoms);
|
|
261
|
+
#else
|
|
262
|
+
fprintf(fp, "Potential Energy = %14.7e\n", ems->epot);
|
|
263
|
+
fprintf(fp, "Maximum force = %14.7e on atom %d\n", ems->fmax, ems->a_fmax + 1);
|
|
264
|
+
fprintf(fp, "Norm of force = %14.7e\n", ems->fnorm / sqrtNumAtoms);
|
|
265
|
+
#endif
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
//! Compute the norm and max of the force array in parallel
|
|
269
|
+
static void get_f_norm_max(const t_commrec* cr,
|
|
270
|
+
const t_grpopts* opts,
|
|
271
|
+
t_mdatoms* mdatoms,
|
|
272
|
+
gmx::ArrayRef<const gmx::RVec> f,
|
|
273
|
+
real* fnorm,
|
|
274
|
+
real* fmax,
|
|
275
|
+
int* a_fmax)
|
|
276
|
+
{
|
|
277
|
+
double fnorm2, *sum;
|
|
278
|
+
real fmax2, fam;
|
|
279
|
+
int la_max, a_max, start, end, i, m, gf;
|
|
280
|
+
|
|
281
|
+
/* This routine finds the largest force and returns it.
|
|
282
|
+
* On parallel machines the global max is taken.
|
|
283
|
+
*/
|
|
284
|
+
fnorm2 = 0;
|
|
285
|
+
fmax2 = 0;
|
|
286
|
+
la_max = -1;
|
|
287
|
+
start = 0;
|
|
288
|
+
end = mdatoms->homenr;
|
|
289
|
+
if (!mdatoms->cFREEZE.empty())
|
|
290
|
+
{
|
|
291
|
+
for (i = start; i < end; i++)
|
|
292
|
+
{
|
|
293
|
+
gf = mdatoms->cFREEZE[i];
|
|
294
|
+
fam = 0;
|
|
295
|
+
for (m = 0; m < DIM; m++)
|
|
296
|
+
{
|
|
297
|
+
if (!opts->nFreeze[gf][m])
|
|
298
|
+
{
|
|
299
|
+
fam += gmx::square(f[i][m]);
|
|
300
|
+
}
|
|
301
|
+
}
|
|
302
|
+
fnorm2 += fam;
|
|
303
|
+
if (fam > fmax2)
|
|
304
|
+
{
|
|
305
|
+
fmax2 = fam;
|
|
306
|
+
la_max = i;
|
|
307
|
+
}
|
|
308
|
+
}
|
|
309
|
+
}
|
|
310
|
+
else
|
|
311
|
+
{
|
|
312
|
+
for (i = start; i < end; i++)
|
|
313
|
+
{
|
|
314
|
+
fam = norm2(f[i]);
|
|
315
|
+
fnorm2 += fam;
|
|
316
|
+
if (fam > fmax2)
|
|
317
|
+
{
|
|
318
|
+
fmax2 = fam;
|
|
319
|
+
la_max = i;
|
|
320
|
+
}
|
|
321
|
+
}
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
if (la_max >= 0 && haveDDAtomOrdering(*cr))
|
|
325
|
+
{
|
|
326
|
+
a_max = cr->dd->globalAtomIndices[la_max];
|
|
327
|
+
}
|
|
328
|
+
else
|
|
329
|
+
{
|
|
330
|
+
a_max = la_max;
|
|
331
|
+
}
|
|
332
|
+
if (PAR(cr))
|
|
333
|
+
{
|
|
334
|
+
snew(sum, 2 * cr->nnodes + 1);
|
|
335
|
+
sum[2 * cr->nodeid] = fmax2;
|
|
336
|
+
sum[2 * cr->nodeid + 1] = a_max;
|
|
337
|
+
sum[2 * cr->nnodes] = fnorm2;
|
|
338
|
+
gmx_sumd(2 * cr->nnodes + 1, sum, cr);
|
|
339
|
+
fnorm2 = sum[2 * cr->nnodes];
|
|
340
|
+
/* Determine the global maximum */
|
|
341
|
+
for (i = 0; i < cr->nnodes; i++)
|
|
342
|
+
{
|
|
343
|
+
if (sum[2 * i] > fmax2)
|
|
344
|
+
{
|
|
345
|
+
fmax2 = sum[2 * i];
|
|
346
|
+
a_max = gmx::roundToInt(sum[2 * i + 1]);
|
|
347
|
+
}
|
|
348
|
+
}
|
|
349
|
+
sfree(sum);
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
if (fnorm)
|
|
353
|
+
{
|
|
354
|
+
*fnorm = sqrt(fnorm2);
|
|
355
|
+
}
|
|
356
|
+
if (fmax)
|
|
357
|
+
{
|
|
358
|
+
*fmax = sqrt(fmax2);
|
|
359
|
+
}
|
|
360
|
+
if (a_fmax)
|
|
361
|
+
{
|
|
362
|
+
*a_fmax = a_max;
|
|
363
|
+
}
|
|
364
|
+
}
|
|
365
|
+
|
|
366
|
+
//! Compute the norm of the force
|
|
367
|
+
static void get_state_f_norm_max(const t_commrec* cr, const t_grpopts* opts, t_mdatoms* mdatoms, em_state_t* ems)
|
|
368
|
+
{
|
|
369
|
+
get_f_norm_max(cr, opts, mdatoms, ems->f.view().force(), &ems->fnorm, &ems->fmax, &ems->a_fmax);
|
|
370
|
+
}
|
|
371
|
+
|
|
372
|
+
//! Initialize the energy minimization
|
|
373
|
+
static void init_em(FILE* fplog,
|
|
374
|
+
const gmx::MDLogger& mdlog,
|
|
375
|
+
const char* title,
|
|
376
|
+
const t_commrec* cr,
|
|
377
|
+
const gmx_multisim_t* ms, /* PLUMED */
|
|
378
|
+
const t_inputrec* ir,
|
|
379
|
+
const MDModulesNotifiers& mdModulesNotifiers,
|
|
380
|
+
gmx::ImdSession* imdSession,
|
|
381
|
+
pull_t* pull_work,
|
|
382
|
+
t_state* state_global,
|
|
383
|
+
const gmx_mtop_t& top_global,
|
|
384
|
+
em_state_t* ems,
|
|
385
|
+
gmx_localtop_t* top,
|
|
386
|
+
t_nrnb* nrnb,
|
|
387
|
+
t_forcerec* fr,
|
|
388
|
+
gmx::MDAtoms* mdAtoms,
|
|
389
|
+
gmx_global_stat_t* gstat,
|
|
390
|
+
VirtualSitesHandler* vsite,
|
|
391
|
+
gmx::Constraints* constr,
|
|
392
|
+
gmx_shellfc_t** shellfc)
|
|
393
|
+
{
|
|
394
|
+
real dvdl_constr;
|
|
395
|
+
|
|
396
|
+
if (fplog)
|
|
397
|
+
{
|
|
398
|
+
fprintf(fplog, "Initiating %s\n", title);
|
|
399
|
+
}
|
|
400
|
+
|
|
401
|
+
if (MAIN(cr))
|
|
402
|
+
{
|
|
403
|
+
state_global->ngtc = 0;
|
|
404
|
+
}
|
|
405
|
+
int* fep_state = MAIN(cr) ? &state_global->fep_state : nullptr;
|
|
406
|
+
gmx::ArrayRef<real> lambda = MAIN(cr) ? state_global->lambda : gmx::ArrayRef<real>();
|
|
407
|
+
initialize_lambdas(
|
|
408
|
+
fplog, ir->efep, ir->bSimTemp, *ir->fepvals, ir->simtempvals->temperatures, nullptr, MAIN(cr), fep_state, lambda);
|
|
409
|
+
|
|
410
|
+
if (ir->eI == IntegrationAlgorithm::NM)
|
|
411
|
+
{
|
|
412
|
+
GMX_ASSERT(shellfc != nullptr, "With NM we always support shells");
|
|
413
|
+
|
|
414
|
+
*shellfc = init_shell_flexcon(stdout,
|
|
415
|
+
top_global,
|
|
416
|
+
constr ? constr->numFlexibleConstraints() : 0,
|
|
417
|
+
ir->nstcalcenergy,
|
|
418
|
+
haveDDAtomOrdering(*cr),
|
|
419
|
+
thisRankHasDuty(cr, DUTY_PME));
|
|
420
|
+
}
|
|
421
|
+
else
|
|
422
|
+
{
|
|
423
|
+
GMX_ASSERT(EI_ENERGY_MINIMIZATION(ir->eI),
|
|
424
|
+
"This else currently only handles energy minimizers, consider if your algorithm "
|
|
425
|
+
"needs shell/flexible-constraint support");
|
|
426
|
+
|
|
427
|
+
/* With energy minimization, shells and flexible constraints are
|
|
428
|
+
* automatically minimized when treated like normal DOFS.
|
|
429
|
+
*/
|
|
430
|
+
if (shellfc != nullptr)
|
|
431
|
+
{
|
|
432
|
+
*shellfc = nullptr;
|
|
433
|
+
}
|
|
434
|
+
}
|
|
435
|
+
|
|
436
|
+
if (haveDDAtomOrdering(*cr))
|
|
437
|
+
{
|
|
438
|
+
// Local state only becomes valid now.
|
|
439
|
+
dd_init_local_state(*cr->dd, state_global, &ems->s);
|
|
440
|
+
|
|
441
|
+
/* Distribute the charge groups over the nodes from the main node */
|
|
442
|
+
dd_partition_system(fplog,
|
|
443
|
+
mdlog,
|
|
444
|
+
ir->init_step,
|
|
445
|
+
cr,
|
|
446
|
+
TRUE,
|
|
447
|
+
state_global,
|
|
448
|
+
top_global,
|
|
449
|
+
*ir,
|
|
450
|
+
mdModulesNotifiers,
|
|
451
|
+
imdSession,
|
|
452
|
+
pull_work,
|
|
453
|
+
&ems->s,
|
|
454
|
+
&ems->f,
|
|
455
|
+
mdAtoms,
|
|
456
|
+
top,
|
|
457
|
+
fr,
|
|
458
|
+
vsite,
|
|
459
|
+
constr,
|
|
460
|
+
nrnb,
|
|
461
|
+
nullptr,
|
|
462
|
+
FALSE);
|
|
463
|
+
dd_store_state(*cr->dd, &ems->s);
|
|
464
|
+
}
|
|
465
|
+
else
|
|
466
|
+
{
|
|
467
|
+
/* Just copy the state */
|
|
468
|
+
ems->s = *state_global;
|
|
469
|
+
|
|
470
|
+
mdAlgorithmsSetupAtomData(
|
|
471
|
+
cr, *ir, top_global, top, fr, &ems->f, mdAtoms, constr, vsite, shellfc ? *shellfc : nullptr);
|
|
472
|
+
}
|
|
473
|
+
|
|
474
|
+
update_mdatoms(mdAtoms->mdatoms(), ems->s.lambda[FreeEnergyPerturbationCouplingType::Mass]);
|
|
475
|
+
|
|
476
|
+
if (constr)
|
|
477
|
+
{
|
|
478
|
+
// TODO how should this cross-module support dependency be managed?
|
|
479
|
+
if (ir->eConstrAlg == ConstraintAlgorithm::Shake && gmx_mtop_ftype_count(top_global, F_CONSTR) > 0)
|
|
480
|
+
{
|
|
481
|
+
gmx_fatal(FARGS,
|
|
482
|
+
"Can not do energy minimization with %s, use %s\n",
|
|
483
|
+
enumValueToString(ConstraintAlgorithm::Shake),
|
|
484
|
+
enumValueToString(ConstraintAlgorithm::Lincs));
|
|
485
|
+
}
|
|
486
|
+
|
|
487
|
+
if (!ir->bContinuation)
|
|
488
|
+
{
|
|
489
|
+
/* Constrain the starting coordinates */
|
|
490
|
+
bool needsLogging = true;
|
|
491
|
+
bool computeEnergy = true;
|
|
492
|
+
bool computeVirial = false;
|
|
493
|
+
dvdl_constr = 0;
|
|
494
|
+
constr->apply(needsLogging,
|
|
495
|
+
computeEnergy,
|
|
496
|
+
-1,
|
|
497
|
+
0,
|
|
498
|
+
1.0,
|
|
499
|
+
ems->s.x.arrayRefWithPadding(),
|
|
500
|
+
ems->s.x.arrayRefWithPadding(),
|
|
501
|
+
ArrayRef<RVec>(),
|
|
502
|
+
ems->s.box,
|
|
503
|
+
ems->s.lambda[FreeEnergyPerturbationCouplingType::Fep],
|
|
504
|
+
&dvdl_constr,
|
|
505
|
+
gmx::ArrayRefWithPadding<RVec>(),
|
|
506
|
+
computeVirial,
|
|
507
|
+
nullptr,
|
|
508
|
+
gmx::ConstraintVariable::Positions);
|
|
509
|
+
}
|
|
510
|
+
}
|
|
511
|
+
|
|
512
|
+
if (PAR(cr))
|
|
513
|
+
{
|
|
514
|
+
*gstat = global_stat_init(ir);
|
|
515
|
+
}
|
|
516
|
+
else
|
|
517
|
+
{
|
|
518
|
+
*gstat = nullptr;
|
|
519
|
+
}
|
|
520
|
+
|
|
521
|
+
calc_shifts(ems->s.box, fr->shift_vec);
|
|
522
|
+
|
|
523
|
+
/* PLUMED */
|
|
524
|
+
if(plumedswitch){
|
|
525
|
+
if(isMultiSim(ms)) {
|
|
526
|
+
if(MAIN(cr)) plumed_cmd(plumedmain,"GREX setMPIIntercomm",&ms->mainRanksComm_);
|
|
527
|
+
if(PAR(cr)){
|
|
528
|
+
if(haveDDAtomOrdering(*cr)) {
|
|
529
|
+
plumed_cmd(plumedmain,"GREX setMPIIntracomm",&cr->dd->mpi_comm_all);
|
|
530
|
+
}else{
|
|
531
|
+
plumed_cmd(plumedmain,"GREX setMPIIntracomm",&cr->mpi_comm_mysim);
|
|
532
|
+
}
|
|
533
|
+
}
|
|
534
|
+
plumed_cmd(plumedmain,"GREX init",nullptr);
|
|
535
|
+
}
|
|
536
|
+
if(PAR(cr)){
|
|
537
|
+
if(haveDDAtomOrdering(*cr)) {
|
|
538
|
+
plumed_cmd(plumedmain,"setMPIComm",&cr->dd->mpi_comm_all);
|
|
539
|
+
}else{
|
|
540
|
+
plumed_cmd(plumedmain,"setMPIComm",&cr->mpi_comm_mysim);
|
|
541
|
+
}
|
|
542
|
+
}
|
|
543
|
+
plumed_cmd(plumedmain,"setNatoms",top_global.natoms);
|
|
544
|
+
plumed_cmd(plumedmain,"setMDEngine","gromacs");
|
|
545
|
+
plumed_cmd(plumedmain,"setLog",fplog);
|
|
546
|
+
real real_delta_t;
|
|
547
|
+
real_delta_t=ir->delta_t;
|
|
548
|
+
plumed_cmd(plumedmain,"setTimestep",&real_delta_t);
|
|
549
|
+
plumed_cmd(plumedmain,"init",nullptr);
|
|
550
|
+
|
|
551
|
+
if(haveDDAtomOrdering(*cr)) {
|
|
552
|
+
int nat_home = dd_numHomeAtoms(*cr->dd);
|
|
553
|
+
plumed_cmd(plumedmain,"setAtomsNlocal",&nat_home);
|
|
554
|
+
plumed_cmd(plumedmain,"setAtomsGatindex",cr->dd->globalAtomIndices.data());
|
|
555
|
+
}
|
|
556
|
+
}
|
|
557
|
+
/* END PLUMED */
|
|
558
|
+
}
|
|
559
|
+
|
|
560
|
+
//! Finalize the minimization
|
|
561
|
+
static void finish_em(const t_commrec* cr,
|
|
562
|
+
gmx_mdoutf_t outf,
|
|
563
|
+
gmx_walltime_accounting_t walltime_accounting,
|
|
564
|
+
gmx_wallcycle* wcycle)
|
|
565
|
+
{
|
|
566
|
+
if (!thisRankHasDuty(cr, DUTY_PME))
|
|
567
|
+
{
|
|
568
|
+
/* Tell the PME only node to finish */
|
|
569
|
+
gmx_pme_send_finish(cr);
|
|
570
|
+
}
|
|
571
|
+
|
|
572
|
+
done_mdoutf(outf);
|
|
573
|
+
|
|
574
|
+
em_time_end(walltime_accounting, wcycle);
|
|
575
|
+
}
|
|
576
|
+
|
|
577
|
+
//! Swap two different EM states during minimization
|
|
578
|
+
static void swap_em_state(em_state_t** ems1, em_state_t** ems2)
|
|
579
|
+
{
|
|
580
|
+
em_state_t* tmp;
|
|
581
|
+
|
|
582
|
+
tmp = *ems1;
|
|
583
|
+
*ems1 = *ems2;
|
|
584
|
+
*ems2 = tmp;
|
|
585
|
+
}
|
|
586
|
+
|
|
587
|
+
//! Save the EM trajectory
|
|
588
|
+
static void write_em_traj(FILE* fplog,
|
|
589
|
+
const t_commrec* cr,
|
|
590
|
+
gmx_mdoutf_t outf,
|
|
591
|
+
gmx_bool bX,
|
|
592
|
+
gmx_bool bF,
|
|
593
|
+
const char* confout,
|
|
594
|
+
const gmx_mtop_t& top_global,
|
|
595
|
+
const t_inputrec* ir,
|
|
596
|
+
int64_t step,
|
|
597
|
+
em_state_t* state,
|
|
598
|
+
t_state* state_global,
|
|
599
|
+
ObservablesHistory* observablesHistory)
|
|
600
|
+
{
|
|
601
|
+
int mdof_flags = 0;
|
|
602
|
+
|
|
603
|
+
if (bX)
|
|
604
|
+
{
|
|
605
|
+
mdof_flags |= MDOF_X;
|
|
606
|
+
}
|
|
607
|
+
if (bF)
|
|
608
|
+
{
|
|
609
|
+
mdof_flags |= MDOF_F;
|
|
610
|
+
}
|
|
611
|
+
|
|
612
|
+
/* If we want IMD output, set appropriate MDOF flag */
|
|
613
|
+
if (ir->bIMD)
|
|
614
|
+
{
|
|
615
|
+
mdof_flags |= MDOF_IMD;
|
|
616
|
+
}
|
|
617
|
+
|
|
618
|
+
gmx::WriteCheckpointDataHolder checkpointDataHolder;
|
|
619
|
+
mdoutf_write_to_trajectory_files(fplog,
|
|
620
|
+
cr,
|
|
621
|
+
outf,
|
|
622
|
+
mdof_flags,
|
|
623
|
+
top_global.natoms,
|
|
624
|
+
step,
|
|
625
|
+
static_cast<double>(step),
|
|
626
|
+
&state->s,
|
|
627
|
+
state_global,
|
|
628
|
+
observablesHistory,
|
|
629
|
+
state->f.view().force(),
|
|
630
|
+
&checkpointDataHolder);
|
|
631
|
+
|
|
632
|
+
if (confout != nullptr)
|
|
633
|
+
{
|
|
634
|
+
if (haveDDAtomOrdering(*cr))
|
|
635
|
+
{
|
|
636
|
+
/* If bX=true, x was collected to state_global in the call above */
|
|
637
|
+
if (!bX)
|
|
638
|
+
{
|
|
639
|
+
auto globalXRef = MAIN(cr) ? state_global->x : gmx::ArrayRef<gmx::RVec>();
|
|
640
|
+
dd_collect_vec(
|
|
641
|
+
cr->dd, state->s.ddp_count, state->s.ddp_count_cg_gl, state->s.cg_gl, state->s.x, globalXRef);
|
|
642
|
+
}
|
|
643
|
+
}
|
|
644
|
+
else
|
|
645
|
+
{
|
|
646
|
+
/* Copy the local state pointer */
|
|
647
|
+
state_global = &state->s;
|
|
648
|
+
}
|
|
649
|
+
|
|
650
|
+
if (MAIN(cr))
|
|
651
|
+
{
|
|
652
|
+
if (ir->pbcType != PbcType::No && !ir->bPeriodicMols && haveDDAtomOrdering(*cr))
|
|
653
|
+
{
|
|
654
|
+
/* Make molecules whole only for confout writing */
|
|
655
|
+
do_pbc_mtop(ir->pbcType, state->s.box, &top_global, state_global->x.rvec_array());
|
|
656
|
+
}
|
|
657
|
+
|
|
658
|
+
write_sto_conf_mtop(confout,
|
|
659
|
+
*top_global.name,
|
|
660
|
+
top_global,
|
|
661
|
+
state_global->x.rvec_array(),
|
|
662
|
+
nullptr,
|
|
663
|
+
ir->pbcType,
|
|
664
|
+
state->s.box);
|
|
665
|
+
}
|
|
666
|
+
}
|
|
667
|
+
}
|
|
668
|
+
|
|
669
|
+
//! \brief Do one minimization step
|
|
670
|
+
//
|
|
671
|
+
// \returns true when the step succeeded, false when a constraint error occurred
|
|
672
|
+
static bool do_em_step(const t_commrec* cr,
|
|
673
|
+
const t_inputrec* ir,
|
|
674
|
+
t_mdatoms* md,
|
|
675
|
+
em_state_t* ems1,
|
|
676
|
+
real a,
|
|
677
|
+
gmx::ArrayRefWithPadding<const gmx::RVec> force,
|
|
678
|
+
em_state_t* ems2,
|
|
679
|
+
gmx::Constraints* constr,
|
|
680
|
+
int64_t count)
|
|
681
|
+
|
|
682
|
+
{
|
|
683
|
+
t_state * s1, *s2;
|
|
684
|
+
int start, end;
|
|
685
|
+
real dvdl_constr;
|
|
686
|
+
int nthreads gmx_unused;
|
|
687
|
+
|
|
688
|
+
bool validStep = true;
|
|
689
|
+
|
|
690
|
+
s1 = &ems1->s;
|
|
691
|
+
s2 = &ems2->s;
|
|
692
|
+
|
|
693
|
+
if (haveDDAtomOrdering(*cr) && s1->ddp_count != cr->dd->ddp_count)
|
|
694
|
+
{
|
|
695
|
+
gmx_incons("state mismatch in do_em_step");
|
|
696
|
+
}
|
|
697
|
+
|
|
698
|
+
s2->setFlags(s1->flags());
|
|
699
|
+
|
|
700
|
+
if (s2->numAtoms() != s1->numAtoms())
|
|
701
|
+
{
|
|
702
|
+
s2->changeNumAtoms(s1->numAtoms());
|
|
703
|
+
ems2->f.resize(s2->numAtoms());
|
|
704
|
+
}
|
|
705
|
+
if (haveDDAtomOrdering(*cr) && s2->cg_gl.size() != s1->cg_gl.size())
|
|
706
|
+
{
|
|
707
|
+
s2->cg_gl.resize(s1->cg_gl.size());
|
|
708
|
+
}
|
|
709
|
+
|
|
710
|
+
copy_mat(s1->box, s2->box);
|
|
711
|
+
/* Copy free energy state */
|
|
712
|
+
s2->lambda = s1->lambda;
|
|
713
|
+
copy_mat(s1->box, s2->box);
|
|
714
|
+
|
|
715
|
+
start = 0;
|
|
716
|
+
end = md->homenr;
|
|
717
|
+
|
|
718
|
+
nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Update);
|
|
719
|
+
#pragma omp parallel num_threads(nthreads)
|
|
720
|
+
{
|
|
721
|
+
const rvec* x1 = s1->x.rvec_array();
|
|
722
|
+
rvec* x2 = s2->x.rvec_array();
|
|
723
|
+
const rvec* f = as_rvec_array(force.unpaddedArrayRef().data());
|
|
724
|
+
|
|
725
|
+
int gf = 0;
|
|
726
|
+
#pragma omp for schedule(static) nowait
|
|
727
|
+
for (int i = start; i < end; i++)
|
|
728
|
+
{
|
|
729
|
+
try
|
|
730
|
+
{
|
|
731
|
+
if (!md->cFREEZE.empty())
|
|
732
|
+
{
|
|
733
|
+
gf = md->cFREEZE[i];
|
|
734
|
+
}
|
|
735
|
+
for (int m = 0; m < DIM; m++)
|
|
736
|
+
{
|
|
737
|
+
if (ir->opts.nFreeze[gf][m])
|
|
738
|
+
{
|
|
739
|
+
x2[i][m] = x1[i][m];
|
|
740
|
+
}
|
|
741
|
+
else
|
|
742
|
+
{
|
|
743
|
+
x2[i][m] = x1[i][m] + a * f[i][m];
|
|
744
|
+
}
|
|
745
|
+
}
|
|
746
|
+
}
|
|
747
|
+
GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
|
|
748
|
+
}
|
|
749
|
+
|
|
750
|
+
if (s2->hasEntry(StateEntry::Cgp))
|
|
751
|
+
{
|
|
752
|
+
/* Copy the CG p vector */
|
|
753
|
+
const rvec* p1 = s1->cg_p.rvec_array();
|
|
754
|
+
rvec* p2 = s2->cg_p.rvec_array();
|
|
755
|
+
#pragma omp for schedule(static) nowait
|
|
756
|
+
for (int i = start; i < end; i++)
|
|
757
|
+
{
|
|
758
|
+
// Trivial OpenMP block that does not throw
|
|
759
|
+
copy_rvec(p1[i], p2[i]);
|
|
760
|
+
}
|
|
761
|
+
}
|
|
762
|
+
|
|
763
|
+
if (haveDDAtomOrdering(*cr))
|
|
764
|
+
{
|
|
765
|
+
/* OpenMP does not supported unsigned loop variables */
|
|
766
|
+
#pragma omp for schedule(static) nowait
|
|
767
|
+
for (gmx::Index i = 0; i < gmx::ssize(s2->cg_gl); i++)
|
|
768
|
+
{
|
|
769
|
+
s2->cg_gl[i] = s1->cg_gl[i];
|
|
770
|
+
}
|
|
771
|
+
}
|
|
772
|
+
}
|
|
773
|
+
|
|
774
|
+
// Copy the DD or pair search counters
|
|
775
|
+
s2->ddp_count = s1->ddp_count;
|
|
776
|
+
s2->ddp_count_cg_gl = s1->ddp_count_cg_gl;
|
|
777
|
+
|
|
778
|
+
if (constr)
|
|
779
|
+
{
|
|
780
|
+
dvdl_constr = 0;
|
|
781
|
+
validStep = constr->apply(TRUE,
|
|
782
|
+
TRUE,
|
|
783
|
+
count,
|
|
784
|
+
0,
|
|
785
|
+
1.0,
|
|
786
|
+
s1->x.arrayRefWithPadding(),
|
|
787
|
+
s2->x.arrayRefWithPadding(),
|
|
788
|
+
ArrayRef<RVec>(),
|
|
789
|
+
s2->box,
|
|
790
|
+
s2->lambda[FreeEnergyPerturbationCouplingType::Bonded],
|
|
791
|
+
&dvdl_constr,
|
|
792
|
+
gmx::ArrayRefWithPadding<RVec>(),
|
|
793
|
+
false,
|
|
794
|
+
nullptr,
|
|
795
|
+
gmx::ConstraintVariable::Positions);
|
|
796
|
+
|
|
797
|
+
if (cr->nnodes > 1)
|
|
798
|
+
{
|
|
799
|
+
/* This global reduction will affect performance at high
|
|
800
|
+
* parallelization, but we can not really avoid it.
|
|
801
|
+
* But usually EM is not run at high parallelization.
|
|
802
|
+
*/
|
|
803
|
+
int reductionBuffer = static_cast<int>(!validStep);
|
|
804
|
+
gmx_sumi(1, &reductionBuffer, cr);
|
|
805
|
+
validStep = (reductionBuffer == 0);
|
|
806
|
+
}
|
|
807
|
+
|
|
808
|
+
// We should move this check to the different minimizers
|
|
809
|
+
if (!validStep && ir->eI != IntegrationAlgorithm::Steep)
|
|
810
|
+
{
|
|
811
|
+
gmx_fatal(FARGS,
|
|
812
|
+
"The coordinates could not be constrained. Minimizer '%s' can not handle "
|
|
813
|
+
"constraint failures, use minimizer '%s' before using '%s'.",
|
|
814
|
+
enumValueToString(ir->eI),
|
|
815
|
+
enumValueToString(IntegrationAlgorithm::Steep),
|
|
816
|
+
enumValueToString(ir->eI));
|
|
817
|
+
}
|
|
818
|
+
}
|
|
819
|
+
|
|
820
|
+
return validStep;
|
|
821
|
+
}
|
|
822
|
+
|
|
823
|
+
//! Prepare EM for using domain decomposition parallellization
|
|
824
|
+
static void em_dd_partition_system(FILE* fplog,
|
|
825
|
+
const gmx::MDLogger& mdlog,
|
|
826
|
+
int step,
|
|
827
|
+
const t_commrec* cr,
|
|
828
|
+
const gmx_mtop_t& top_global,
|
|
829
|
+
const t_inputrec* ir,
|
|
830
|
+
const MDModulesNotifiers& mdModulesNotifiers,
|
|
831
|
+
|
|
832
|
+
gmx::ImdSession* imdSession,
|
|
833
|
+
pull_t* pull_work,
|
|
834
|
+
em_state_t* ems,
|
|
835
|
+
gmx_localtop_t* top,
|
|
836
|
+
gmx::MDAtoms* mdAtoms,
|
|
837
|
+
t_forcerec* fr,
|
|
838
|
+
VirtualSitesHandler* vsite,
|
|
839
|
+
gmx::Constraints* constr,
|
|
840
|
+
t_nrnb* nrnb,
|
|
841
|
+
gmx_wallcycle* wcycle)
|
|
842
|
+
{
|
|
843
|
+
/* Repartition the domain decomposition */
|
|
844
|
+
dd_partition_system(fplog,
|
|
845
|
+
mdlog,
|
|
846
|
+
step,
|
|
847
|
+
cr,
|
|
848
|
+
FALSE,
|
|
849
|
+
nullptr,
|
|
850
|
+
top_global,
|
|
851
|
+
*ir,
|
|
852
|
+
mdModulesNotifiers,
|
|
853
|
+
imdSession,
|
|
854
|
+
pull_work,
|
|
855
|
+
&ems->s,
|
|
856
|
+
&ems->f,
|
|
857
|
+
mdAtoms,
|
|
858
|
+
top,
|
|
859
|
+
fr,
|
|
860
|
+
vsite,
|
|
861
|
+
constr,
|
|
862
|
+
nrnb,
|
|
863
|
+
wcycle,
|
|
864
|
+
FALSE);
|
|
865
|
+
dd_store_state(*cr->dd, &ems->s);
|
|
866
|
+
}
|
|
867
|
+
|
|
868
|
+
namespace
|
|
869
|
+
{
|
|
870
|
+
|
|
871
|
+
//! Copy coordinates, OpenMP parallelized, from \p refCoords to coords
|
|
872
|
+
void setCoordinates(std::vector<RVec>* coords, ArrayRef<const RVec> refCoords)
|
|
873
|
+
{
|
|
874
|
+
coords->resize(refCoords.size());
|
|
875
|
+
|
|
876
|
+
const int gmx_unused nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Update);
|
|
877
|
+
#pragma omp parallel for num_threads(nthreads) schedule(static)
|
|
878
|
+
for (int i = 0; i < ssize(refCoords); i++)
|
|
879
|
+
{
|
|
880
|
+
(*coords)[i] = refCoords[i];
|
|
881
|
+
}
|
|
882
|
+
}
|
|
883
|
+
|
|
884
|
+
//! Returns the maximum difference an atom moved between two coordinate sets, over all ranks
|
|
885
|
+
real maxCoordinateDifference(ArrayRef<const RVec> coords1, ArrayRef<const RVec> coords2, MPI_Comm mpiCommMyGroup)
|
|
886
|
+
{
|
|
887
|
+
GMX_RELEASE_ASSERT(coords1.size() == coords2.size(), "Coordinate counts should match");
|
|
888
|
+
|
|
889
|
+
real maxDiffSquared = 0;
|
|
890
|
+
|
|
891
|
+
#ifndef _MSC_VER // Visual Studio has no support for reduction(max)
|
|
892
|
+
const int gmx_unused nthreads = gmx_omp_nthreads_get(ModuleMultiThread::Update);
|
|
893
|
+
# pragma omp parallel for reduction(max : maxDiffSquared) num_threads(nthreads) schedule(static)
|
|
894
|
+
#endif
|
|
895
|
+
for (int i = 0; i < ssize(coords1); i++)
|
|
896
|
+
{
|
|
897
|
+
maxDiffSquared = std::max(maxDiffSquared, gmx::norm2(coords1[i] - coords2[i]));
|
|
898
|
+
}
|
|
899
|
+
|
|
900
|
+
#if GMX_MPI
|
|
901
|
+
int numRanks = 1;
|
|
902
|
+
if (mpiCommMyGroup != MPI_COMM_NULL)
|
|
903
|
+
{
|
|
904
|
+
MPI_Comm_size(mpiCommMyGroup, &numRanks);
|
|
905
|
+
}
|
|
906
|
+
if (numRanks > 1)
|
|
907
|
+
{
|
|
908
|
+
real maxDiffSquaredReduced;
|
|
909
|
+
MPI_Allreduce(
|
|
910
|
+
&maxDiffSquared, &maxDiffSquaredReduced, 1, GMX_DOUBLE ? MPI_DOUBLE : MPI_FLOAT, MPI_MAX, mpiCommMyGroup);
|
|
911
|
+
maxDiffSquared = maxDiffSquaredReduced;
|
|
912
|
+
}
|
|
913
|
+
#else
|
|
914
|
+
GMX_UNUSED_VALUE(mpiCommMyGroup);
|
|
915
|
+
#endif
|
|
916
|
+
|
|
917
|
+
return std::sqrt(maxDiffSquared);
|
|
918
|
+
}
|
|
919
|
+
|
|
920
|
+
/*! \brief Class to handle the work of setting and doing an energy evaluation.
|
|
921
|
+
*
|
|
922
|
+
* This class is a mere aggregate of parameters to pass to evaluate an
|
|
923
|
+
* energy, so that future changes to names and types of them consume
|
|
924
|
+
* less time when refactoring other code.
|
|
925
|
+
*
|
|
926
|
+
* Aggregate initialization is used, for which the chief risk is that
|
|
927
|
+
* if a member is added at the end and not all initializer lists are
|
|
928
|
+
* updated, then the member will be value initialized, which will
|
|
929
|
+
* typically mean initialization to zero.
|
|
930
|
+
*
|
|
931
|
+
* Use a braced initializer list to construct one of these. */
|
|
932
|
+
class EnergyEvaluator
|
|
933
|
+
{
|
|
934
|
+
public:
|
|
935
|
+
/*! \brief Evaluates an energy on the state in \c ems.
|
|
936
|
+
*
|
|
937
|
+
* \todo In practice, the same objects mu_tot, vir, and pres
|
|
938
|
+
* are always passed to this function, so we would rather have
|
|
939
|
+
* them as data members. However, their C-array types are
|
|
940
|
+
* unsuited for aggregate initialization. When the types
|
|
941
|
+
* improve, the call signature of this method can be reduced.
|
|
942
|
+
*/
|
|
943
|
+
void run(em_state_t* ems, rvec mu_tot, tensor vir, tensor pres, int64_t count, gmx_bool bFirst, int64_t step);
|
|
944
|
+
//! Handles logging (deprecated).
|
|
945
|
+
FILE* fplog;
|
|
946
|
+
//! Handles logging.
|
|
947
|
+
const gmx::MDLogger& mdlog;
|
|
948
|
+
//! Handles communication.
|
|
949
|
+
const t_commrec* cr;
|
|
950
|
+
//! Coordinates multi-simulations.
|
|
951
|
+
const gmx_multisim_t* ms;
|
|
952
|
+
//! Holds the simulation topology.
|
|
953
|
+
const gmx_mtop_t& top_global;
|
|
954
|
+
//! Holds the domain topology.
|
|
955
|
+
gmx_localtop_t* top;
|
|
956
|
+
//! User input options.
|
|
957
|
+
const t_inputrec* inputrec;
|
|
958
|
+
// Handles notifications for MDModules
|
|
959
|
+
const MDModulesNotifiers& mdModulesNotifiers;
|
|
960
|
+
//! The Interactive Molecular Dynamics session.
|
|
961
|
+
gmx::ImdSession* imdSession;
|
|
962
|
+
//! The pull work object.
|
|
963
|
+
pull_t* pull_work;
|
|
964
|
+
//! Data for rotational pulling.
|
|
965
|
+
gmx_enfrot* enforcedRotation;
|
|
966
|
+
//! Manages flop accounting.
|
|
967
|
+
t_nrnb* nrnb;
|
|
968
|
+
//! Manages wall cycle accounting.
|
|
969
|
+
gmx_wallcycle* wcycle;
|
|
970
|
+
//! Legacy coordinator of global reduction.
|
|
971
|
+
gmx_global_stat_t gstat;
|
|
972
|
+
//! Coordinates reduction for observables
|
|
973
|
+
gmx::ObservablesReducer* observablesReducer;
|
|
974
|
+
//! Handles virtual sites.
|
|
975
|
+
VirtualSitesHandler* vsite;
|
|
976
|
+
//! Handles constraints.
|
|
977
|
+
gmx::Constraints* constr;
|
|
978
|
+
//! Per-atom data for this domain.
|
|
979
|
+
gmx::MDAtoms* mdAtoms;
|
|
980
|
+
//! Handles how to calculate the forces.
|
|
981
|
+
t_forcerec* fr;
|
|
982
|
+
//! Schedule of force-calculation work each step for this task.
|
|
983
|
+
MdrunScheduleWorkload* runScheduleWork;
|
|
984
|
+
//! Stores the computed energies.
|
|
985
|
+
gmx_enerdata_t* enerd;
|
|
986
|
+
//! The DD partitioning count at which the pair list was generated
|
|
987
|
+
int ddpCountPairSearch;
|
|
988
|
+
//! The local coordinates that were used for pair searching, stored for computing displacements
|
|
989
|
+
std::vector<RVec> pairSearchCoordinates;
|
|
990
|
+
};
|
|
991
|
+
|
|
992
|
+
void EnergyEvaluator::run(em_state_t* ems, rvec mu_tot, tensor vir, tensor pres, int64_t count, gmx_bool bFirst, int64_t step)
|
|
993
|
+
{
|
|
994
|
+
real t;
|
|
995
|
+
gmx_bool bNS;
|
|
996
|
+
tensor force_vir, shake_vir, ekin;
|
|
997
|
+
real dvdl_constr;
|
|
998
|
+
real terminate = 0;
|
|
999
|
+
|
|
1000
|
+
/* Set the time to the initial time, the time does not change during EM */
|
|
1001
|
+
t = inputrec->init_t;
|
|
1002
|
+
|
|
1003
|
+
if (vsite)
|
|
1004
|
+
{
|
|
1005
|
+
vsite->construct(ems->s.x, {}, ems->s.box, gmx::VSiteOperation::Positions);
|
|
1006
|
+
}
|
|
1007
|
+
|
|
1008
|
+
// Compute the buffer size of the pair list
|
|
1009
|
+
const real bufferSize = inputrec->rlist - std::max(inputrec->rcoulomb, inputrec->rvdw);
|
|
1010
|
+
|
|
1011
|
+
if (bFirst || bufferSize <= 0 || ems->s.ddp_count != ddpCountPairSearch)
|
|
1012
|
+
{
|
|
1013
|
+
/* This is the first state or an old state used before the last ns */
|
|
1014
|
+
bNS = TRUE;
|
|
1015
|
+
}
|
|
1016
|
+
else
|
|
1017
|
+
{
|
|
1018
|
+
// We need to generate a new pairlist when one atom moved more than half the buffer size
|
|
1019
|
+
ArrayRef<const RVec> localCoordinates =
|
|
1020
|
+
ArrayRef<const RVec>(ems->s.x).subArray(0, mdAtoms->mdatoms()->homenr);
|
|
1021
|
+
bNS = 2 * maxCoordinateDifference(pairSearchCoordinates, localCoordinates, cr->mpi_comm_mygroup)
|
|
1022
|
+
> bufferSize;
|
|
1023
|
+
}
|
|
1024
|
+
|
|
1025
|
+
if (bNS)
|
|
1026
|
+
{
|
|
1027
|
+
if (haveDDAtomOrdering(*cr))
|
|
1028
|
+
{
|
|
1029
|
+
/* Repartition the domain decomposition */
|
|
1030
|
+
em_dd_partition_system(fplog,
|
|
1031
|
+
mdlog,
|
|
1032
|
+
count,
|
|
1033
|
+
cr,
|
|
1034
|
+
top_global,
|
|
1035
|
+
inputrec,
|
|
1036
|
+
mdModulesNotifiers,
|
|
1037
|
+
imdSession,
|
|
1038
|
+
pull_work,
|
|
1039
|
+
ems,
|
|
1040
|
+
top,
|
|
1041
|
+
mdAtoms,
|
|
1042
|
+
fr,
|
|
1043
|
+
vsite,
|
|
1044
|
+
constr,
|
|
1045
|
+
nrnb,
|
|
1046
|
+
wcycle);
|
|
1047
|
+
ddpCountPairSearch = cr->dd->ddp_count;
|
|
1048
|
+
}
|
|
1049
|
+
else
|
|
1050
|
+
{
|
|
1051
|
+
// Without DD we increase the search counter here
|
|
1052
|
+
ddpCountPairSearch++;
|
|
1053
|
+
// Store the count in the state, so we check whether we later need
|
|
1054
|
+
// to do pair search after resetting to this, by then, old state
|
|
1055
|
+
ems->s.ddp_count = ddpCountPairSearch;
|
|
1056
|
+
}
|
|
1057
|
+
}
|
|
1058
|
+
|
|
1059
|
+
/* Store the local coordinates that will be used in the pair search, after we re-partitioned */
|
|
1060
|
+
if (bufferSize > 0 && bNS)
|
|
1061
|
+
{
|
|
1062
|
+
ArrayRef<const RVec> localCoordinates =
|
|
1063
|
+
constArrayRefFromArray(ems->s.x.data(), mdAtoms->mdatoms()->homenr);
|
|
1064
|
+
setCoordinates(&pairSearchCoordinates, localCoordinates);
|
|
1065
|
+
}
|
|
1066
|
+
|
|
1067
|
+
fr->longRangeNonbondeds->updateAfterPartition(*mdAtoms->mdatoms());
|
|
1068
|
+
|
|
1069
|
+
gmx_edsam* const ed = nullptr;
|
|
1070
|
+
|
|
1071
|
+
if (bNS)
|
|
1072
|
+
{
|
|
1073
|
+
if (fr->listedForcesGpu)
|
|
1074
|
+
{
|
|
1075
|
+
fr->listedForcesGpu->updateHaveInteractions(top->idef);
|
|
1076
|
+
}
|
|
1077
|
+
runScheduleWork->domainWork = setupDomainLifetimeWorkload(
|
|
1078
|
+
*inputrec, *fr, pull_work, ed, *mdAtoms->mdatoms(), runScheduleWork->simulationWork);
|
|
1079
|
+
}
|
|
1080
|
+
|
|
1081
|
+
|
|
1082
|
+
const int legacyForceFlags = GMX_FORCE_STATECHANGED | GMX_FORCE_ALLFORCES | GMX_FORCE_VIRIAL
|
|
1083
|
+
| GMX_FORCE_ENERGY | (bNS ? GMX_FORCE_NS : 0);
|
|
1084
|
+
runScheduleWork->stepWork = setupStepWorkload(legacyForceFlags,
|
|
1085
|
+
inputrec->mtsLevels,
|
|
1086
|
+
step,
|
|
1087
|
+
runScheduleWork->domainWork,
|
|
1088
|
+
runScheduleWork->simulationWork);
|
|
1089
|
+
|
|
1090
|
+
/* Calc force & energy on new trial position */
|
|
1091
|
+
/* do_force always puts the charge groups in the box and shifts again
|
|
1092
|
+
* We do not unshift, so molecules are always whole in congrad.c
|
|
1093
|
+
*/
|
|
1094
|
+
/* PLUMED */
|
|
1095
|
+
int plumedNeedsEnergy=0;
|
|
1096
|
+
matrix plumed_vir;
|
|
1097
|
+
if(plumedswitch){
|
|
1098
|
+
long int lstep=count; plumed_cmd(plumedmain,"setStepLong",&lstep);
|
|
1099
|
+
plumed_cmd(plumedmain,"setPositions",&ems->s.x[0][0]);
|
|
1100
|
+
plumed_cmd(plumedmain,"setMasses",&mdAtoms->mdatoms()->massT[0]);
|
|
1101
|
+
plumed_cmd(plumedmain,"setCharges",&mdAtoms->mdatoms()->chargeA[0]);
|
|
1102
|
+
plumed_cmd(plumedmain,"setBox",&ems->s.box[0][0]);
|
|
1103
|
+
plumed_cmd(plumedmain,"prepareCalc",nullptr);
|
|
1104
|
+
plumed_cmd(plumedmain,"setForces",&ems->f.view().force()[0][0]);
|
|
1105
|
+
plumed_cmd(plumedmain,"isEnergyNeeded",&plumedNeedsEnergy);
|
|
1106
|
+
clear_mat(plumed_vir);
|
|
1107
|
+
plumed_cmd(plumedmain,"setVirial",&plumed_vir[0][0]);
|
|
1108
|
+
}
|
|
1109
|
+
/* END PLUMED */
|
|
1110
|
+
do_force(fplog,
|
|
1111
|
+
cr,
|
|
1112
|
+
ms,
|
|
1113
|
+
*inputrec,
|
|
1114
|
+
mdModulesNotifiers,
|
|
1115
|
+
nullptr,
|
|
1116
|
+
enforcedRotation,
|
|
1117
|
+
imdSession,
|
|
1118
|
+
pull_work,
|
|
1119
|
+
count,
|
|
1120
|
+
nrnb,
|
|
1121
|
+
wcycle,
|
|
1122
|
+
top,
|
|
1123
|
+
ems->s.box,
|
|
1124
|
+
ems->s.x.arrayRefWithPadding(),
|
|
1125
|
+
{},
|
|
1126
|
+
&ems->s.hist,
|
|
1127
|
+
&ems->f.view(),
|
|
1128
|
+
force_vir,
|
|
1129
|
+
mdAtoms->mdatoms(),
|
|
1130
|
+
enerd,
|
|
1131
|
+
ems->s.lambda,
|
|
1132
|
+
fr,
|
|
1133
|
+
*runScheduleWork,
|
|
1134
|
+
vsite,
|
|
1135
|
+
mu_tot,
|
|
1136
|
+
t,
|
|
1137
|
+
ed,
|
|
1138
|
+
fr->longRangeNonbondeds.get(),
|
|
1139
|
+
DDBalanceRegionHandler(cr));
|
|
1140
|
+
|
|
1141
|
+
/* PLUMED */
|
|
1142
|
+
if(plumedswitch){
|
|
1143
|
+
if(plumedNeedsEnergy) {
|
|
1144
|
+
msmul(force_vir,2.0,plumed_vir);
|
|
1145
|
+
plumed_cmd(plumedmain,"setEnergy",&enerd->term[F_EPOT]);
|
|
1146
|
+
plumed_cmd(plumedmain,"performCalc",nullptr);
|
|
1147
|
+
msmul(plumed_vir,0.5,force_vir);
|
|
1148
|
+
} else {
|
|
1149
|
+
msmul(plumed_vir,0.5,plumed_vir);
|
|
1150
|
+
m_add(force_vir,plumed_vir,force_vir);
|
|
1151
|
+
}
|
|
1152
|
+
}
|
|
1153
|
+
/* END PLUMED */
|
|
1154
|
+
|
|
1155
|
+
/* Clear the unused shake virial and pressure */
|
|
1156
|
+
clear_mat(shake_vir);
|
|
1157
|
+
clear_mat(pres);
|
|
1158
|
+
|
|
1159
|
+
/* Communicate stuff when parallel */
|
|
1160
|
+
if (PAR(cr) && inputrec->eI != IntegrationAlgorithm::NM)
|
|
1161
|
+
{
|
|
1162
|
+
wallcycle_start(wcycle, WallCycleCounter::MoveE);
|
|
1163
|
+
|
|
1164
|
+
global_stat(*gstat,
|
|
1165
|
+
cr,
|
|
1166
|
+
enerd,
|
|
1167
|
+
force_vir,
|
|
1168
|
+
shake_vir,
|
|
1169
|
+
*inputrec,
|
|
1170
|
+
nullptr,
|
|
1171
|
+
nullptr,
|
|
1172
|
+
std::vector<real>(1, terminate),
|
|
1173
|
+
FALSE,
|
|
1174
|
+
CGLO_ENERGY | CGLO_PRESSURE | CGLO_CONSTRAINT,
|
|
1175
|
+
step,
|
|
1176
|
+
observablesReducer);
|
|
1177
|
+
|
|
1178
|
+
wallcycle_stop(wcycle, WallCycleCounter::MoveE);
|
|
1179
|
+
}
|
|
1180
|
+
|
|
1181
|
+
ems->epot = enerd->term[F_EPOT];
|
|
1182
|
+
|
|
1183
|
+
if (constr)
|
|
1184
|
+
{
|
|
1185
|
+
/* Project out the constraint components of the force */
|
|
1186
|
+
bool needsLogging = false;
|
|
1187
|
+
bool computeEnergy = false;
|
|
1188
|
+
bool computeVirial = true;
|
|
1189
|
+
dvdl_constr = 0;
|
|
1190
|
+
auto f = ems->f.view().forceWithPadding();
|
|
1191
|
+
constr->apply(needsLogging,
|
|
1192
|
+
computeEnergy,
|
|
1193
|
+
count,
|
|
1194
|
+
0,
|
|
1195
|
+
1.0,
|
|
1196
|
+
ems->s.x.arrayRefWithPadding(),
|
|
1197
|
+
f,
|
|
1198
|
+
f.unpaddedArrayRef(),
|
|
1199
|
+
ems->s.box,
|
|
1200
|
+
ems->s.lambda[FreeEnergyPerturbationCouplingType::Bonded],
|
|
1201
|
+
&dvdl_constr,
|
|
1202
|
+
gmx::ArrayRefWithPadding<RVec>(),
|
|
1203
|
+
computeVirial,
|
|
1204
|
+
shake_vir,
|
|
1205
|
+
gmx::ConstraintVariable::ForceDispl);
|
|
1206
|
+
enerd->term[F_DVDL_CONSTR] += dvdl_constr;
|
|
1207
|
+
m_add(force_vir, shake_vir, vir);
|
|
1208
|
+
}
|
|
1209
|
+
else
|
|
1210
|
+
{
|
|
1211
|
+
copy_mat(force_vir, vir);
|
|
1212
|
+
}
|
|
1213
|
+
|
|
1214
|
+
clear_mat(ekin);
|
|
1215
|
+
enerd->term[F_PRES] = calc_pres(fr->pbcType, inputrec->nwall, ems->s.box, ekin, vir, pres);
|
|
1216
|
+
|
|
1217
|
+
if (inputrec->efep != FreeEnergyPerturbationType::No)
|
|
1218
|
+
{
|
|
1219
|
+
accumulateKineticLambdaComponents(enerd, ems->s.lambda, *inputrec->fepvals);
|
|
1220
|
+
}
|
|
1221
|
+
|
|
1222
|
+
if (EI_ENERGY_MINIMIZATION(inputrec->eI))
|
|
1223
|
+
{
|
|
1224
|
+
get_state_f_norm_max(cr, &(inputrec->opts), mdAtoms->mdatoms(), ems);
|
|
1225
|
+
}
|
|
1226
|
+
}
|
|
1227
|
+
|
|
1228
|
+
} // namespace
|
|
1229
|
+
|
|
1230
|
+
//! Parallel utility summing energies and forces
|
|
1231
|
+
static double reorder_partsum(const t_commrec* cr,
|
|
1232
|
+
const t_grpopts* opts,
|
|
1233
|
+
const gmx_mtop_t& top_global,
|
|
1234
|
+
const em_state_t* s_min,
|
|
1235
|
+
const em_state_t* s_b)
|
|
1236
|
+
{
|
|
1237
|
+
if (debug)
|
|
1238
|
+
{
|
|
1239
|
+
fprintf(debug, "Doing reorder_partsum\n");
|
|
1240
|
+
}
|
|
1241
|
+
|
|
1242
|
+
auto fm = s_min->f.view().force();
|
|
1243
|
+
auto fb = s_b->f.view().force();
|
|
1244
|
+
|
|
1245
|
+
/* Collect fm in a global vector fmg.
|
|
1246
|
+
* This conflicts with the spirit of domain decomposition,
|
|
1247
|
+
* but to fully optimize this a much more complicated algorithm is required.
|
|
1248
|
+
*/
|
|
1249
|
+
const int natoms = top_global.natoms;
|
|
1250
|
+
rvec* fmg;
|
|
1251
|
+
snew(fmg, natoms);
|
|
1252
|
+
|
|
1253
|
+
gmx::ArrayRef<const int> indicesMin = s_min->s.cg_gl;
|
|
1254
|
+
int i = 0;
|
|
1255
|
+
for (int a : indicesMin)
|
|
1256
|
+
{
|
|
1257
|
+
copy_rvec(fm[i], fmg[a]);
|
|
1258
|
+
i++;
|
|
1259
|
+
}
|
|
1260
|
+
gmx_sum(top_global.natoms * 3, fmg[0], cr);
|
|
1261
|
+
|
|
1262
|
+
/* Now we will determine the part of the sum for the cgs in state s_b */
|
|
1263
|
+
gmx::ArrayRef<const int> indicesB = s_b->s.cg_gl;
|
|
1264
|
+
|
|
1265
|
+
double partsum = 0;
|
|
1266
|
+
i = 0;
|
|
1267
|
+
int gf = 0;
|
|
1268
|
+
gmx::ArrayRef<const unsigned char> grpnrFREEZE =
|
|
1269
|
+
top_global.groups.groupNumbers[SimulationAtomGroupType::Freeze];
|
|
1270
|
+
for (int a : indicesB)
|
|
1271
|
+
{
|
|
1272
|
+
if (!grpnrFREEZE.empty())
|
|
1273
|
+
{
|
|
1274
|
+
gf = grpnrFREEZE[i];
|
|
1275
|
+
}
|
|
1276
|
+
for (int m = 0; m < DIM; m++)
|
|
1277
|
+
{
|
|
1278
|
+
if (!opts->nFreeze[gf][m])
|
|
1279
|
+
{
|
|
1280
|
+
partsum += (fb[i][m] - fmg[a][m]) * fb[i][m];
|
|
1281
|
+
}
|
|
1282
|
+
}
|
|
1283
|
+
i++;
|
|
1284
|
+
}
|
|
1285
|
+
|
|
1286
|
+
sfree(fmg);
|
|
1287
|
+
|
|
1288
|
+
return partsum;
|
|
1289
|
+
}
|
|
1290
|
+
|
|
1291
|
+
//! Print some stuff, like beta, whatever that means.
|
|
1292
|
+
static real pr_beta(const t_commrec* cr,
|
|
1293
|
+
const t_grpopts* opts,
|
|
1294
|
+
t_mdatoms* mdatoms,
|
|
1295
|
+
const gmx_mtop_t& top_global,
|
|
1296
|
+
const em_state_t* s_min,
|
|
1297
|
+
const em_state_t* s_b)
|
|
1298
|
+
{
|
|
1299
|
+
double sum;
|
|
1300
|
+
|
|
1301
|
+
/* This is just the classical Polak-Ribiere calculation of beta;
|
|
1302
|
+
* it looks a bit complicated since we take freeze groups into account,
|
|
1303
|
+
* and might have to sum it in parallel runs.
|
|
1304
|
+
*/
|
|
1305
|
+
|
|
1306
|
+
if (!haveDDAtomOrdering(*cr)
|
|
1307
|
+
|| (s_min->s.ddp_count == cr->dd->ddp_count && s_b->s.ddp_count == cr->dd->ddp_count))
|
|
1308
|
+
{
|
|
1309
|
+
auto fm = s_min->f.view().force();
|
|
1310
|
+
auto fb = s_b->f.view().force();
|
|
1311
|
+
sum = 0;
|
|
1312
|
+
int gf = 0;
|
|
1313
|
+
/* This part of code can be incorrect with DD,
|
|
1314
|
+
* since the atom ordering in s_b and s_min might differ.
|
|
1315
|
+
*/
|
|
1316
|
+
for (int i = 0; i < mdatoms->homenr; i++)
|
|
1317
|
+
{
|
|
1318
|
+
if (!mdatoms->cFREEZE.empty())
|
|
1319
|
+
{
|
|
1320
|
+
gf = mdatoms->cFREEZE[i];
|
|
1321
|
+
}
|
|
1322
|
+
for (int m = 0; m < DIM; m++)
|
|
1323
|
+
{
|
|
1324
|
+
if (!opts->nFreeze[gf][m])
|
|
1325
|
+
{
|
|
1326
|
+
sum += (fb[i][m] - fm[i][m]) * fb[i][m];
|
|
1327
|
+
}
|
|
1328
|
+
}
|
|
1329
|
+
}
|
|
1330
|
+
}
|
|
1331
|
+
else
|
|
1332
|
+
{
|
|
1333
|
+
/* We need to reorder cgs while summing */
|
|
1334
|
+
sum = reorder_partsum(cr, opts, top_global, s_min, s_b);
|
|
1335
|
+
}
|
|
1336
|
+
if (PAR(cr))
|
|
1337
|
+
{
|
|
1338
|
+
gmx_sumd(1, &sum, cr);
|
|
1339
|
+
}
|
|
1340
|
+
|
|
1341
|
+
return sum / gmx::square(s_min->fnorm);
|
|
1342
|
+
}
|
|
1343
|
+
|
|
1344
|
+
namespace gmx
|
|
1345
|
+
{
|
|
1346
|
+
|
|
1347
|
+
void LegacySimulator::do_cg()
|
|
1348
|
+
{
|
|
1349
|
+
const char* CG = "Polak-Ribiere Conjugate Gradients";
|
|
1350
|
+
|
|
1351
|
+
gmx_global_stat_t gstat;
|
|
1352
|
+
double tmp, minstep;
|
|
1353
|
+
real stepsize;
|
|
1354
|
+
real a, b, c, beta = 0.0;
|
|
1355
|
+
real epot_repl = 0;
|
|
1356
|
+
real pnorm;
|
|
1357
|
+
gmx_bool converged, foundlower;
|
|
1358
|
+
rvec mu_tot = { 0 };
|
|
1359
|
+
gmx_bool do_log = FALSE, do_ene = FALSE, do_x, do_f;
|
|
1360
|
+
tensor vir, pres;
|
|
1361
|
+
int number_steps, neval = 0, nstcg = inputRec_->nstcgsteep;
|
|
1362
|
+
int m, step, nminstep;
|
|
1363
|
+
auto* mdatoms = mdAtoms_->mdatoms();
|
|
1364
|
+
|
|
1365
|
+
GMX_LOG(mdLog_.info)
|
|
1366
|
+
.asParagraph()
|
|
1367
|
+
.appendText(
|
|
1368
|
+
"Note that activating conjugate gradient energy minimization via the "
|
|
1369
|
+
"integrator .mdp option and the command gmx mdrun may "
|
|
1370
|
+
"be available in a different form in a future version of GROMACS, "
|
|
1371
|
+
"e.g. gmx minimize and an .mdp option.");
|
|
1372
|
+
|
|
1373
|
+
step = 0;
|
|
1374
|
+
|
|
1375
|
+
if (MAIN(cr_))
|
|
1376
|
+
{
|
|
1377
|
+
// In CG, the state is extended with a search direction
|
|
1378
|
+
stateGlobal_->addEntry(StateEntry::Cgp);
|
|
1379
|
+
|
|
1380
|
+
// Initialize the search direction to zero
|
|
1381
|
+
for (RVec& cg_p : stateGlobal_->cg_p)
|
|
1382
|
+
{
|
|
1383
|
+
cg_p = { 0, 0, 0 };
|
|
1384
|
+
}
|
|
1385
|
+
}
|
|
1386
|
+
|
|
1387
|
+
/* Create 4 states on the stack and extract pointers that we will swap */
|
|
1388
|
+
em_state_t s0{}, s1{}, s2{}, s3{};
|
|
1389
|
+
em_state_t* s_min = &s0;
|
|
1390
|
+
em_state_t* s_a = &s1;
|
|
1391
|
+
em_state_t* s_b = &s2;
|
|
1392
|
+
em_state_t* s_c = &s3;
|
|
1393
|
+
|
|
1394
|
+
ObservablesReducer observablesReducer = observablesReducerBuilder_->build();
|
|
1395
|
+
|
|
1396
|
+
/* Init em and store the local state in s_min */
|
|
1397
|
+
init_em(fpLog_,
|
|
1398
|
+
mdLog_,
|
|
1399
|
+
CG,
|
|
1400
|
+
cr_,
|
|
1401
|
+
ms_, /* PLUMED */
|
|
1402
|
+
inputRec_,
|
|
1403
|
+
mdModulesNotifiers_,
|
|
1404
|
+
imdSession_,
|
|
1405
|
+
pullWork_,
|
|
1406
|
+
stateGlobal_,
|
|
1407
|
+
topGlobal_,
|
|
1408
|
+
s_min,
|
|
1409
|
+
top_,
|
|
1410
|
+
nrnb_,
|
|
1411
|
+
fr_,
|
|
1412
|
+
mdAtoms_,
|
|
1413
|
+
&gstat,
|
|
1414
|
+
virtualSites_,
|
|
1415
|
+
constr_,
|
|
1416
|
+
nullptr);
|
|
1417
|
+
const bool simulationsShareState = false;
|
|
1418
|
+
gmx_mdoutf* outf = init_mdoutf(fpLog_,
|
|
1419
|
+
nFile_,
|
|
1420
|
+
fnm_,
|
|
1421
|
+
mdrunOptions_,
|
|
1422
|
+
cr_,
|
|
1423
|
+
outputProvider_,
|
|
1424
|
+
mdModulesNotifiers_,
|
|
1425
|
+
inputRec_,
|
|
1426
|
+
topGlobal_,
|
|
1427
|
+
nullptr,
|
|
1428
|
+
wallCycleCounters_,
|
|
1429
|
+
StartingBehavior::NewSimulation,
|
|
1430
|
+
simulationsShareState,
|
|
1431
|
+
ms_);
|
|
1432
|
+
gmx::EnergyOutput energyOutput(mdoutf_get_fp_ene(outf),
|
|
1433
|
+
topGlobal_,
|
|
1434
|
+
*inputRec_,
|
|
1435
|
+
pullWork_,
|
|
1436
|
+
nullptr,
|
|
1437
|
+
false,
|
|
1438
|
+
StartingBehavior::NewSimulation,
|
|
1439
|
+
simulationsShareState,
|
|
1440
|
+
mdModulesNotifiers_);
|
|
1441
|
+
|
|
1442
|
+
/* Print to log file */
|
|
1443
|
+
print_em_start(fpLog_, cr_, wallTimeAccounting_, wallCycleCounters_, CG);
|
|
1444
|
+
|
|
1445
|
+
/* Max number of steps */
|
|
1446
|
+
number_steps = inputRec_->nsteps;
|
|
1447
|
+
|
|
1448
|
+
if (MAIN(cr_))
|
|
1449
|
+
{
|
|
1450
|
+
sp_header(stderr, CG, inputRec_->em_tol, number_steps);
|
|
1451
|
+
}
|
|
1452
|
+
if (fpLog_)
|
|
1453
|
+
{
|
|
1454
|
+
sp_header(fpLog_, CG, inputRec_->em_tol, number_steps);
|
|
1455
|
+
}
|
|
1456
|
+
|
|
1457
|
+
EnergyEvaluator energyEvaluator{ fpLog_,
|
|
1458
|
+
mdLog_,
|
|
1459
|
+
cr_,
|
|
1460
|
+
ms_,
|
|
1461
|
+
topGlobal_,
|
|
1462
|
+
top_,
|
|
1463
|
+
inputRec_,
|
|
1464
|
+
mdModulesNotifiers_,
|
|
1465
|
+
imdSession_,
|
|
1466
|
+
pullWork_,
|
|
1467
|
+
enforcedRotation_,
|
|
1468
|
+
nrnb_,
|
|
1469
|
+
wallCycleCounters_,
|
|
1470
|
+
gstat,
|
|
1471
|
+
&observablesReducer,
|
|
1472
|
+
virtualSites_,
|
|
1473
|
+
constr_,
|
|
1474
|
+
mdAtoms_,
|
|
1475
|
+
fr_,
|
|
1476
|
+
runScheduleWork_,
|
|
1477
|
+
enerd_,
|
|
1478
|
+
-1,
|
|
1479
|
+
{} };
|
|
1480
|
+
/* Call the force routine and some auxiliary (neighboursearching etc.) */
|
|
1481
|
+
/* do_force always puts the charge groups in the box and shifts again
|
|
1482
|
+
* We do not unshift, so molecules are always whole in congrad.c
|
|
1483
|
+
*/
|
|
1484
|
+
energyEvaluator.run(s_min, mu_tot, vir, pres, -1, TRUE, step);
|
|
1485
|
+
observablesReducer.markAsReadyToReduce();
|
|
1486
|
+
|
|
1487
|
+
if (MAIN(cr_))
|
|
1488
|
+
{
|
|
1489
|
+
/* Copy stuff to the energy bin for easy printing etc. */
|
|
1490
|
+
matrix nullBox = {};
|
|
1491
|
+
energyOutput.addDataAtEnergyStep(false,
|
|
1492
|
+
false,
|
|
1493
|
+
static_cast<double>(step),
|
|
1494
|
+
mdatoms->tmass,
|
|
1495
|
+
enerd_,
|
|
1496
|
+
nullptr,
|
|
1497
|
+
nullBox,
|
|
1498
|
+
PTCouplingArrays(),
|
|
1499
|
+
0,
|
|
1500
|
+
vir,
|
|
1501
|
+
pres,
|
|
1502
|
+
nullptr,
|
|
1503
|
+
mu_tot,
|
|
1504
|
+
constr_);
|
|
1505
|
+
|
|
1506
|
+
EnergyOutput::printHeader(fpLog_, step, step);
|
|
1507
|
+
energyOutput.printStepToEnergyFile(
|
|
1508
|
+
mdoutf_get_fp_ene(outf), TRUE, FALSE, FALSE, fpLog_, step, step, fr_->fcdata.get(), nullptr);
|
|
1509
|
+
}
|
|
1510
|
+
|
|
1511
|
+
/* Estimate/guess the initial stepsize */
|
|
1512
|
+
stepsize = inputRec_->em_stepsize / s_min->fnorm;
|
|
1513
|
+
|
|
1514
|
+
if (MAIN(cr_))
|
|
1515
|
+
{
|
|
1516
|
+
double sqrtNumAtoms = sqrt(static_cast<double>(stateGlobal_->numAtoms()));
|
|
1517
|
+
fprintf(stderr, " F-max = %12.5e on atom %d\n", s_min->fmax, s_min->a_fmax + 1);
|
|
1518
|
+
fprintf(stderr, " F-Norm = %12.5e\n", s_min->fnorm / sqrtNumAtoms);
|
|
1519
|
+
fprintf(stderr, "\n");
|
|
1520
|
+
/* and copy to the log file too... */
|
|
1521
|
+
fprintf(fpLog_, " F-max = %12.5e on atom %d\n", s_min->fmax, s_min->a_fmax + 1);
|
|
1522
|
+
fprintf(fpLog_, " F-Norm = %12.5e\n", s_min->fnorm / sqrtNumAtoms);
|
|
1523
|
+
fprintf(fpLog_, "\n");
|
|
1524
|
+
}
|
|
1525
|
+
/* Start the loop over CG steps.
|
|
1526
|
+
* Each successful step is counted, and we continue until
|
|
1527
|
+
* we either converge or reach the max number of steps.
|
|
1528
|
+
*/
|
|
1529
|
+
converged = FALSE;
|
|
1530
|
+
for (step = 0; (number_steps < 0 || step <= number_steps) && !converged; step++)
|
|
1531
|
+
{
|
|
1532
|
+
|
|
1533
|
+
/* start taking steps in a new direction
|
|
1534
|
+
* First time we enter the routine, beta=0, and the direction is
|
|
1535
|
+
* simply the negative gradient.
|
|
1536
|
+
*/
|
|
1537
|
+
|
|
1538
|
+
/* Calculate the new direction in p, and the gradient in this direction, gpa */
|
|
1539
|
+
gmx::ArrayRef<gmx::RVec> pm = s_min->s.cg_p;
|
|
1540
|
+
gmx::ArrayRef<const gmx::RVec> sfm = s_min->f.view().force();
|
|
1541
|
+
double gpa = 0;
|
|
1542
|
+
int gf = 0;
|
|
1543
|
+
for (int i = 0; i < mdatoms->homenr; i++)
|
|
1544
|
+
{
|
|
1545
|
+
if (!mdatoms->cFREEZE.empty())
|
|
1546
|
+
{
|
|
1547
|
+
gf = mdatoms->cFREEZE[i];
|
|
1548
|
+
}
|
|
1549
|
+
for (m = 0; m < DIM; m++)
|
|
1550
|
+
{
|
|
1551
|
+
if (!inputRec_->opts.nFreeze[gf][m])
|
|
1552
|
+
{
|
|
1553
|
+
pm[i][m] = sfm[i][m] + beta * pm[i][m];
|
|
1554
|
+
gpa -= pm[i][m] * sfm[i][m];
|
|
1555
|
+
/* f is negative gradient, thus the sign */
|
|
1556
|
+
}
|
|
1557
|
+
else
|
|
1558
|
+
{
|
|
1559
|
+
pm[i][m] = 0;
|
|
1560
|
+
}
|
|
1561
|
+
}
|
|
1562
|
+
}
|
|
1563
|
+
|
|
1564
|
+
/* Sum the gradient along the line across CPUs */
|
|
1565
|
+
if (PAR(cr_))
|
|
1566
|
+
{
|
|
1567
|
+
gmx_sumd(1, &gpa, cr_);
|
|
1568
|
+
}
|
|
1569
|
+
|
|
1570
|
+
/* Calculate the norm of the search vector */
|
|
1571
|
+
get_f_norm_max(cr_, &(inputRec_->opts), mdatoms, pm, &pnorm, nullptr, nullptr);
|
|
1572
|
+
|
|
1573
|
+
/* Just in case stepsize reaches zero due to numerical precision... */
|
|
1574
|
+
if (stepsize <= 0)
|
|
1575
|
+
{
|
|
1576
|
+
stepsize = inputRec_->em_stepsize / pnorm;
|
|
1577
|
+
}
|
|
1578
|
+
|
|
1579
|
+
/*
|
|
1580
|
+
* Double check the value of the derivative in the search direction.
|
|
1581
|
+
* If it is positive it must be due to the old information in the
|
|
1582
|
+
* CG formula, so just remove that and start over with beta=0.
|
|
1583
|
+
* This corresponds to a steepest descent step.
|
|
1584
|
+
*/
|
|
1585
|
+
if (gpa > 0)
|
|
1586
|
+
{
|
|
1587
|
+
beta = 0;
|
|
1588
|
+
step--; /* Don't count this step since we are restarting */
|
|
1589
|
+
continue; /* Go back to the beginning of the big for-loop */
|
|
1590
|
+
}
|
|
1591
|
+
|
|
1592
|
+
/* Calculate minimum allowed stepsize, before the average (norm)
|
|
1593
|
+
* relative change in coordinate is smaller than precision
|
|
1594
|
+
*/
|
|
1595
|
+
minstep = 0;
|
|
1596
|
+
auto s_min_x = makeArrayRef(s_min->s.x);
|
|
1597
|
+
for (int i = 0; i < mdatoms->homenr; i++)
|
|
1598
|
+
{
|
|
1599
|
+
for (m = 0; m < DIM; m++)
|
|
1600
|
+
{
|
|
1601
|
+
tmp = fabs(s_min_x[i][m]);
|
|
1602
|
+
if (tmp < 1.0)
|
|
1603
|
+
{
|
|
1604
|
+
tmp = 1.0;
|
|
1605
|
+
}
|
|
1606
|
+
tmp = pm[i][m] / tmp;
|
|
1607
|
+
minstep += tmp * tmp;
|
|
1608
|
+
}
|
|
1609
|
+
}
|
|
1610
|
+
/* Add up from all CPUs */
|
|
1611
|
+
if (PAR(cr_))
|
|
1612
|
+
{
|
|
1613
|
+
gmx_sumd(1, &minstep, cr_);
|
|
1614
|
+
}
|
|
1615
|
+
|
|
1616
|
+
minstep = GMX_REAL_EPS / sqrt(minstep / (3 * topGlobal_.natoms));
|
|
1617
|
+
|
|
1618
|
+
if (stepsize < minstep)
|
|
1619
|
+
{
|
|
1620
|
+
converged = TRUE;
|
|
1621
|
+
break;
|
|
1622
|
+
}
|
|
1623
|
+
|
|
1624
|
+
/* Write coordinates if necessary */
|
|
1625
|
+
do_x = do_per_step(step, inputRec_->nstxout);
|
|
1626
|
+
do_f = do_per_step(step, inputRec_->nstfout);
|
|
1627
|
+
|
|
1628
|
+
write_em_traj(
|
|
1629
|
+
fpLog_, cr_, outf, do_x, do_f, nullptr, topGlobal_, inputRec_, step, s_min, stateGlobal_, observablesHistory_);
|
|
1630
|
+
|
|
1631
|
+
/* Take a step downhill.
|
|
1632
|
+
* In theory, we should minimize the function along this direction.
|
|
1633
|
+
* That is quite possible, but it turns out to take 5-10 function evaluations
|
|
1634
|
+
* for each line. However, we dont really need to find the exact minimum -
|
|
1635
|
+
* it is much better to start a new CG step in a modified direction as soon
|
|
1636
|
+
* as we are close to it. This will save a lot of energy evaluations.
|
|
1637
|
+
*
|
|
1638
|
+
* In practice, we just try to take a single step.
|
|
1639
|
+
* If it worked (i.e. lowered the energy), we increase the stepsize but
|
|
1640
|
+
* the continue straight to the next CG step without trying to find any minimum.
|
|
1641
|
+
* If it didn't work (higher energy), there must be a minimum somewhere between
|
|
1642
|
+
* the old position and the new one.
|
|
1643
|
+
*
|
|
1644
|
+
* Due to the finite numerical accuracy, it turns out that it is a good idea
|
|
1645
|
+
* to even accept a SMALL increase in energy, if the derivative is still downhill.
|
|
1646
|
+
* This leads to lower final energies in the tests I've done. / Erik
|
|
1647
|
+
*/
|
|
1648
|
+
s_a->epot = s_min->epot;
|
|
1649
|
+
a = 0.0;
|
|
1650
|
+
c = a + stepsize; /* reference position along line is zero */
|
|
1651
|
+
|
|
1652
|
+
if (haveDDAtomOrdering(*cr_) && s_min->s.ddp_count < cr_->dd->ddp_count)
|
|
1653
|
+
{
|
|
1654
|
+
em_dd_partition_system(fpLog_,
|
|
1655
|
+
mdLog_,
|
|
1656
|
+
step,
|
|
1657
|
+
cr_,
|
|
1658
|
+
topGlobal_,
|
|
1659
|
+
inputRec_,
|
|
1660
|
+
mdModulesNotifiers_,
|
|
1661
|
+
imdSession_,
|
|
1662
|
+
pullWork_,
|
|
1663
|
+
s_min,
|
|
1664
|
+
top_,
|
|
1665
|
+
mdAtoms_,
|
|
1666
|
+
fr_,
|
|
1667
|
+
virtualSites_,
|
|
1668
|
+
constr_,
|
|
1669
|
+
nrnb_,
|
|
1670
|
+
wallCycleCounters_);
|
|
1671
|
+
}
|
|
1672
|
+
|
|
1673
|
+
/* Take a trial step (new coords in s_c) */
|
|
1674
|
+
do_em_step(cr_, inputRec_, mdatoms, s_min, c, s_min->s.cg_p.constArrayRefWithPadding(), s_c, constr_, -1);
|
|
1675
|
+
|
|
1676
|
+
neval++;
|
|
1677
|
+
/* Calculate energy for the trial step */
|
|
1678
|
+
energyEvaluator.run(s_c, mu_tot, vir, pres, -1, FALSE, step);
|
|
1679
|
+
observablesReducer.markAsReadyToReduce();
|
|
1680
|
+
|
|
1681
|
+
/* Calc derivative along line */
|
|
1682
|
+
const rvec* pc = s_c->s.cg_p.rvec_array();
|
|
1683
|
+
gmx::ArrayRef<const gmx::RVec> sfc = s_c->f.view().force();
|
|
1684
|
+
double gpc = 0;
|
|
1685
|
+
for (int i = 0; i < mdatoms->homenr; i++)
|
|
1686
|
+
{
|
|
1687
|
+
for (m = 0; m < DIM; m++)
|
|
1688
|
+
{
|
|
1689
|
+
gpc -= pc[i][m] * sfc[i][m]; /* f is negative gradient, thus the sign */
|
|
1690
|
+
}
|
|
1691
|
+
}
|
|
1692
|
+
/* Sum the gradient along the line across CPUs */
|
|
1693
|
+
if (PAR(cr_))
|
|
1694
|
+
{
|
|
1695
|
+
gmx_sumd(1, &gpc, cr_);
|
|
1696
|
+
}
|
|
1697
|
+
|
|
1698
|
+
/* This is the max amount of increase in energy we tolerate */
|
|
1699
|
+
tmp = std::sqrt(GMX_REAL_EPS) * fabs(s_a->epot);
|
|
1700
|
+
|
|
1701
|
+
/* Accept the step if the energy is lower, or if it is not significantly higher
|
|
1702
|
+
* and the line derivative is still negative.
|
|
1703
|
+
*/
|
|
1704
|
+
if (s_c->epot < s_a->epot || (gpc < 0 && s_c->epot < (s_a->epot + tmp)))
|
|
1705
|
+
{
|
|
1706
|
+
foundlower = TRUE;
|
|
1707
|
+
/* Great, we found a better energy. Increase step for next iteration
|
|
1708
|
+
* if we are still going down, decrease it otherwise
|
|
1709
|
+
*/
|
|
1710
|
+
if (gpc < 0)
|
|
1711
|
+
{
|
|
1712
|
+
stepsize *= 1.618034; /* The golden section */
|
|
1713
|
+
}
|
|
1714
|
+
else
|
|
1715
|
+
{
|
|
1716
|
+
stepsize *= 0.618034; /* 1/golden section */
|
|
1717
|
+
}
|
|
1718
|
+
}
|
|
1719
|
+
else
|
|
1720
|
+
{
|
|
1721
|
+
/* New energy is the same or higher. We will have to do some work
|
|
1722
|
+
* to find a smaller value in the interval. Take smaller step next time!
|
|
1723
|
+
*/
|
|
1724
|
+
foundlower = FALSE;
|
|
1725
|
+
stepsize *= 0.618034;
|
|
1726
|
+
}
|
|
1727
|
+
|
|
1728
|
+
|
|
1729
|
+
/* OK, if we didn't find a lower value we will have to locate one now - there must
|
|
1730
|
+
* be one in the interval [a=0,c].
|
|
1731
|
+
* The same thing is valid here, though: Don't spend dozens of iterations to find
|
|
1732
|
+
* the line minimum. We try to interpolate based on the derivative at the endpoints,
|
|
1733
|
+
* and only continue until we find a lower value. In most cases this means 1-2 iterations.
|
|
1734
|
+
*
|
|
1735
|
+
* I also have a safeguard for potentially really pathological functions so we never
|
|
1736
|
+
* take more than 20 steps before we give up ...
|
|
1737
|
+
*
|
|
1738
|
+
* If we already found a lower value we just skip this step and continue to the update.
|
|
1739
|
+
*/
|
|
1740
|
+
double gpb;
|
|
1741
|
+
if (!foundlower)
|
|
1742
|
+
{
|
|
1743
|
+
nminstep = 0;
|
|
1744
|
+
|
|
1745
|
+
do
|
|
1746
|
+
{
|
|
1747
|
+
/* Select a new trial point.
|
|
1748
|
+
* If the derivatives at points a & c have different sign we interpolate to zero,
|
|
1749
|
+
* otherwise just do a bisection.
|
|
1750
|
+
*/
|
|
1751
|
+
if (gpa < 0 && gpc > 0)
|
|
1752
|
+
{
|
|
1753
|
+
b = a + gpa * (a - c) / (gpc - gpa);
|
|
1754
|
+
}
|
|
1755
|
+
else
|
|
1756
|
+
{
|
|
1757
|
+
b = 0.5 * (a + c);
|
|
1758
|
+
}
|
|
1759
|
+
|
|
1760
|
+
/* safeguard if interpolation close to machine accuracy causes errors:
|
|
1761
|
+
* never go outside the interval
|
|
1762
|
+
*/
|
|
1763
|
+
if (b <= a || b >= c)
|
|
1764
|
+
{
|
|
1765
|
+
b = 0.5 * (a + c);
|
|
1766
|
+
}
|
|
1767
|
+
|
|
1768
|
+
if (haveDDAtomOrdering(*cr_) && s_min->s.ddp_count != cr_->dd->ddp_count)
|
|
1769
|
+
{
|
|
1770
|
+
/* Reload the old state */
|
|
1771
|
+
em_dd_partition_system(fpLog_,
|
|
1772
|
+
mdLog_,
|
|
1773
|
+
-1,
|
|
1774
|
+
cr_,
|
|
1775
|
+
topGlobal_,
|
|
1776
|
+
inputRec_,
|
|
1777
|
+
mdModulesNotifiers_,
|
|
1778
|
+
imdSession_,
|
|
1779
|
+
pullWork_,
|
|
1780
|
+
s_min,
|
|
1781
|
+
top_,
|
|
1782
|
+
mdAtoms_,
|
|
1783
|
+
fr_,
|
|
1784
|
+
virtualSites_,
|
|
1785
|
+
constr_,
|
|
1786
|
+
nrnb_,
|
|
1787
|
+
wallCycleCounters_);
|
|
1788
|
+
}
|
|
1789
|
+
|
|
1790
|
+
/* Take a trial step to this new point - new coords in s_b */
|
|
1791
|
+
do_em_step(
|
|
1792
|
+
cr_, inputRec_, mdatoms, s_min, b, s_min->s.cg_p.constArrayRefWithPadding(), s_b, constr_, -1);
|
|
1793
|
+
|
|
1794
|
+
neval++;
|
|
1795
|
+
/* Calculate energy for the trial step */
|
|
1796
|
+
energyEvaluator.run(s_b, mu_tot, vir, pres, -1, FALSE, step);
|
|
1797
|
+
observablesReducer.markAsReadyToReduce();
|
|
1798
|
+
|
|
1799
|
+
/* p does not change within a step, but since the domain decomposition
|
|
1800
|
+
* might change, we have to use cg_p of s_b here.
|
|
1801
|
+
*/
|
|
1802
|
+
const rvec* pb = s_b->s.cg_p.rvec_array();
|
|
1803
|
+
gmx::ArrayRef<const gmx::RVec> sfb = s_b->f.view().force();
|
|
1804
|
+
gpb = 0;
|
|
1805
|
+
for (int i = 0; i < mdatoms->homenr; i++)
|
|
1806
|
+
{
|
|
1807
|
+
for (m = 0; m < DIM; m++)
|
|
1808
|
+
{
|
|
1809
|
+
gpb -= pb[i][m] * sfb[i][m]; /* f is negative gradient, thus the sign */
|
|
1810
|
+
}
|
|
1811
|
+
}
|
|
1812
|
+
/* Sum the gradient along the line across CPUs */
|
|
1813
|
+
if (PAR(cr_))
|
|
1814
|
+
{
|
|
1815
|
+
gmx_sumd(1, &gpb, cr_);
|
|
1816
|
+
}
|
|
1817
|
+
|
|
1818
|
+
if (debug)
|
|
1819
|
+
{
|
|
1820
|
+
fprintf(debug, "CGE: EpotA %f EpotB %f EpotC %f gpb %f\n", s_a->epot, s_b->epot, s_c->epot, gpb);
|
|
1821
|
+
}
|
|
1822
|
+
|
|
1823
|
+
epot_repl = s_b->epot;
|
|
1824
|
+
|
|
1825
|
+
/* Keep one of the intervals based on the value of the derivative at the new point */
|
|
1826
|
+
if (gpb > 0)
|
|
1827
|
+
{
|
|
1828
|
+
/* Replace c endpoint with b */
|
|
1829
|
+
swap_em_state(&s_b, &s_c);
|
|
1830
|
+
c = b;
|
|
1831
|
+
gpc = gpb;
|
|
1832
|
+
}
|
|
1833
|
+
else
|
|
1834
|
+
{
|
|
1835
|
+
/* Replace a endpoint with b */
|
|
1836
|
+
swap_em_state(&s_b, &s_a);
|
|
1837
|
+
a = b;
|
|
1838
|
+
gpa = gpb;
|
|
1839
|
+
}
|
|
1840
|
+
|
|
1841
|
+
/*
|
|
1842
|
+
* Stop search as soon as we find a value smaller than the endpoints.
|
|
1843
|
+
* Never run more than 20 steps, no matter what.
|
|
1844
|
+
*/
|
|
1845
|
+
nminstep++;
|
|
1846
|
+
} while ((epot_repl > s_a->epot || epot_repl > s_c->epot) && (nminstep < 20));
|
|
1847
|
+
|
|
1848
|
+
if (std::fabs(epot_repl - s_min->epot) < fabs(s_min->epot) * GMX_REAL_EPS || nminstep >= 20)
|
|
1849
|
+
{
|
|
1850
|
+
/* OK. We couldn't find a significantly lower energy.
|
|
1851
|
+
* If beta==0 this was steepest descent, and then we give up.
|
|
1852
|
+
* If not, set beta=0 and restart with steepest descent before quitting.
|
|
1853
|
+
*/
|
|
1854
|
+
if (beta == 0.0)
|
|
1855
|
+
{
|
|
1856
|
+
/* Converged */
|
|
1857
|
+
converged = TRUE;
|
|
1858
|
+
break;
|
|
1859
|
+
}
|
|
1860
|
+
else
|
|
1861
|
+
{
|
|
1862
|
+
/* Reset memory before giving up */
|
|
1863
|
+
beta = 0.0;
|
|
1864
|
+
continue;
|
|
1865
|
+
}
|
|
1866
|
+
}
|
|
1867
|
+
|
|
1868
|
+
/* Select min energy state of A & C, put the best in B.
|
|
1869
|
+
*/
|
|
1870
|
+
if (s_c->epot < s_a->epot)
|
|
1871
|
+
{
|
|
1872
|
+
if (debug)
|
|
1873
|
+
{
|
|
1874
|
+
fprintf(debug, "CGE: C (%f) is lower than A (%f), moving C to B\n", s_c->epot, s_a->epot);
|
|
1875
|
+
}
|
|
1876
|
+
swap_em_state(&s_b, &s_c);
|
|
1877
|
+
gpb = gpc;
|
|
1878
|
+
}
|
|
1879
|
+
else
|
|
1880
|
+
{
|
|
1881
|
+
if (debug)
|
|
1882
|
+
{
|
|
1883
|
+
fprintf(debug, "CGE: A (%f) is lower than C (%f), moving A to B\n", s_a->epot, s_c->epot);
|
|
1884
|
+
}
|
|
1885
|
+
swap_em_state(&s_b, &s_a);
|
|
1886
|
+
gpb = gpa;
|
|
1887
|
+
}
|
|
1888
|
+
}
|
|
1889
|
+
else
|
|
1890
|
+
{
|
|
1891
|
+
if (debug)
|
|
1892
|
+
{
|
|
1893
|
+
fprintf(debug, "CGE: Found a lower energy %f, moving C to B\n", s_c->epot);
|
|
1894
|
+
}
|
|
1895
|
+
swap_em_state(&s_b, &s_c);
|
|
1896
|
+
gpb = gpc;
|
|
1897
|
+
}
|
|
1898
|
+
|
|
1899
|
+
/* new search direction */
|
|
1900
|
+
/* beta = 0 means forget all memory and restart with steepest descents. */
|
|
1901
|
+
if (nstcg && ((step % nstcg) == 0))
|
|
1902
|
+
{
|
|
1903
|
+
beta = 0.0;
|
|
1904
|
+
}
|
|
1905
|
+
else
|
|
1906
|
+
{
|
|
1907
|
+
/* s_min->fnorm cannot be zero, because then we would have converged
|
|
1908
|
+
* and broken out.
|
|
1909
|
+
*/
|
|
1910
|
+
|
|
1911
|
+
/* Polak-Ribiere update.
|
|
1912
|
+
* Change to fnorm2/fnorm2_old for Fletcher-Reeves
|
|
1913
|
+
*/
|
|
1914
|
+
beta = pr_beta(cr_, &inputRec_->opts, mdatoms, topGlobal_, s_min, s_b);
|
|
1915
|
+
}
|
|
1916
|
+
/* Limit beta to prevent oscillations */
|
|
1917
|
+
if (fabs(beta) > 5.0)
|
|
1918
|
+
{
|
|
1919
|
+
beta = 0.0;
|
|
1920
|
+
}
|
|
1921
|
+
|
|
1922
|
+
|
|
1923
|
+
/* update positions */
|
|
1924
|
+
swap_em_state(&s_min, &s_b);
|
|
1925
|
+
gpa = gpb;
|
|
1926
|
+
|
|
1927
|
+
/* Print it if necessary */
|
|
1928
|
+
if (MAIN(cr_))
|
|
1929
|
+
{
|
|
1930
|
+
if (mdrunOptions_.verbose)
|
|
1931
|
+
{
|
|
1932
|
+
double sqrtNumAtoms = sqrt(static_cast<double>(stateGlobal_->numAtoms()));
|
|
1933
|
+
fprintf(stderr,
|
|
1934
|
+
"\rStep %d, Epot=%12.6e, Fnorm=%9.3e, Fmax=%9.3e (atom %d)\n",
|
|
1935
|
+
step,
|
|
1936
|
+
s_min->epot,
|
|
1937
|
+
s_min->fnorm / sqrtNumAtoms,
|
|
1938
|
+
s_min->fmax,
|
|
1939
|
+
s_min->a_fmax + 1);
|
|
1940
|
+
fflush(stderr);
|
|
1941
|
+
}
|
|
1942
|
+
/* Store the new (lower) energies */
|
|
1943
|
+
matrix nullBox = {};
|
|
1944
|
+
energyOutput.addDataAtEnergyStep(false,
|
|
1945
|
+
false,
|
|
1946
|
+
static_cast<double>(step),
|
|
1947
|
+
mdatoms->tmass,
|
|
1948
|
+
enerd_,
|
|
1949
|
+
nullptr,
|
|
1950
|
+
nullBox,
|
|
1951
|
+
PTCouplingArrays(),
|
|
1952
|
+
0,
|
|
1953
|
+
vir,
|
|
1954
|
+
pres,
|
|
1955
|
+
nullptr,
|
|
1956
|
+
mu_tot,
|
|
1957
|
+
constr_);
|
|
1958
|
+
|
|
1959
|
+
do_log = do_per_step(step, inputRec_->nstlog);
|
|
1960
|
+
do_ene = do_per_step(step, inputRec_->nstenergy);
|
|
1961
|
+
|
|
1962
|
+
imdSession_->fillEnergyRecord(step, TRUE);
|
|
1963
|
+
|
|
1964
|
+
if (do_log)
|
|
1965
|
+
{
|
|
1966
|
+
EnergyOutput::printHeader(fpLog_, step, step);
|
|
1967
|
+
}
|
|
1968
|
+
energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf),
|
|
1969
|
+
do_ene,
|
|
1970
|
+
FALSE,
|
|
1971
|
+
FALSE,
|
|
1972
|
+
do_log ? fpLog_ : nullptr,
|
|
1973
|
+
step,
|
|
1974
|
+
step,
|
|
1975
|
+
fr_->fcdata.get(),
|
|
1976
|
+
nullptr);
|
|
1977
|
+
}
|
|
1978
|
+
|
|
1979
|
+
/* Send energies and positions to the IMD client if bIMD is TRUE. */
|
|
1980
|
+
if (MAIN(cr_) && imdSession_->run(step, TRUE, stateGlobal_->box, stateGlobal_->x, 0))
|
|
1981
|
+
{
|
|
1982
|
+
imdSession_->sendPositionsAndEnergies();
|
|
1983
|
+
}
|
|
1984
|
+
|
|
1985
|
+
/* Stop when the maximum force lies below tolerance.
|
|
1986
|
+
* If we have reached machine precision, converged is already set to true.
|
|
1987
|
+
*/
|
|
1988
|
+
converged = converged || (s_min->fmax < inputRec_->em_tol);
|
|
1989
|
+
observablesReducer.markAsReadyToReduce();
|
|
1990
|
+
} /* End of the loop */
|
|
1991
|
+
|
|
1992
|
+
if (converged)
|
|
1993
|
+
{
|
|
1994
|
+
step--; /* we never took that last step in this case */
|
|
1995
|
+
}
|
|
1996
|
+
if (s_min->fmax > inputRec_->em_tol)
|
|
1997
|
+
{
|
|
1998
|
+
if (MAIN(cr_))
|
|
1999
|
+
{
|
|
2000
|
+
warn_step(fpLog_, inputRec_->em_tol, s_min->fmax, step - 1 == number_steps, FALSE);
|
|
2001
|
+
}
|
|
2002
|
+
converged = FALSE;
|
|
2003
|
+
}
|
|
2004
|
+
|
|
2005
|
+
if (MAIN(cr_))
|
|
2006
|
+
{
|
|
2007
|
+
/* If we printed energy and/or logfile last step (which was the last step)
|
|
2008
|
+
* we don't have to do it again, but otherwise print the final values.
|
|
2009
|
+
*/
|
|
2010
|
+
if (!do_log)
|
|
2011
|
+
{
|
|
2012
|
+
/* Write final value to log since we didn't do anything the last step */
|
|
2013
|
+
EnergyOutput::printHeader(fpLog_, step, step);
|
|
2014
|
+
}
|
|
2015
|
+
if (!do_ene || !do_log)
|
|
2016
|
+
{
|
|
2017
|
+
/* Write final energy file entries */
|
|
2018
|
+
energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf),
|
|
2019
|
+
!do_ene,
|
|
2020
|
+
FALSE,
|
|
2021
|
+
FALSE,
|
|
2022
|
+
!do_log ? fpLog_ : nullptr,
|
|
2023
|
+
step,
|
|
2024
|
+
step,
|
|
2025
|
+
fr_->fcdata.get(),
|
|
2026
|
+
nullptr);
|
|
2027
|
+
}
|
|
2028
|
+
}
|
|
2029
|
+
|
|
2030
|
+
/* Print some stuff... */
|
|
2031
|
+
if (MAIN(cr_))
|
|
2032
|
+
{
|
|
2033
|
+
fprintf(stderr, "\nwriting lowest energy coordinates.\n");
|
|
2034
|
+
}
|
|
2035
|
+
|
|
2036
|
+
/* IMPORTANT!
|
|
2037
|
+
* For accurate normal mode calculation it is imperative that we
|
|
2038
|
+
* store the last conformation into the full precision binary trajectory.
|
|
2039
|
+
*
|
|
2040
|
+
* However, we should only do it if we did NOT already write this step
|
|
2041
|
+
* above (which we did if do_x or do_f was true).
|
|
2042
|
+
*/
|
|
2043
|
+
/* Note that with 0 < nstfout != nstxout we can end up with two frames
|
|
2044
|
+
* in the trajectory with the same step number.
|
|
2045
|
+
*/
|
|
2046
|
+
do_x = !do_per_step(step, inputRec_->nstxout);
|
|
2047
|
+
do_f = (inputRec_->nstfout > 0 && !do_per_step(step, inputRec_->nstfout));
|
|
2048
|
+
|
|
2049
|
+
write_em_traj(
|
|
2050
|
+
fpLog_, cr_, outf, do_x, do_f, ftp2fn(efSTO, nFile_, fnm_), topGlobal_, inputRec_, step, s_min, stateGlobal_, observablesHistory_);
|
|
2051
|
+
|
|
2052
|
+
|
|
2053
|
+
if (MAIN(cr_))
|
|
2054
|
+
{
|
|
2055
|
+
double sqrtNumAtoms = sqrt(static_cast<double>(stateGlobal_->numAtoms()));
|
|
2056
|
+
print_converged(stderr, CG, inputRec_->em_tol, step, converged, number_steps, s_min, sqrtNumAtoms);
|
|
2057
|
+
print_converged(fpLog_, CG, inputRec_->em_tol, step, converged, number_steps, s_min, sqrtNumAtoms);
|
|
2058
|
+
|
|
2059
|
+
fprintf(fpLog_, "\nPerformed %d energy evaluations in total.\n", neval);
|
|
2060
|
+
}
|
|
2061
|
+
|
|
2062
|
+
finish_em(cr_, outf, wallTimeAccounting_, wallCycleCounters_);
|
|
2063
|
+
|
|
2064
|
+
/* To print the actual number of steps we needed somewhere */
|
|
2065
|
+
walltime_accounting_set_nsteps_done(wallTimeAccounting_, step);
|
|
2066
|
+
}
|
|
2067
|
+
|
|
2068
|
+
|
|
2069
|
+
void LegacySimulator::do_lbfgs()
|
|
2070
|
+
{
|
|
2071
|
+
static const char* LBFGS = "Low-Memory BFGS Minimizer";
|
|
2072
|
+
em_state_t ems;
|
|
2073
|
+
gmx_global_stat_t gstat;
|
|
2074
|
+
auto* mdatoms = mdAtoms_->mdatoms();
|
|
2075
|
+
|
|
2076
|
+
GMX_LOG(mdLog_.info)
|
|
2077
|
+
.asParagraph()
|
|
2078
|
+
.appendText(
|
|
2079
|
+
"Note that activating L-BFGS energy minimization via the "
|
|
2080
|
+
"integrator .mdp option and the command gmx mdrun may "
|
|
2081
|
+
"be available in a different form in a future version of GROMACS, "
|
|
2082
|
+
"e.g. gmx minimize and an .mdp option.");
|
|
2083
|
+
|
|
2084
|
+
if (haveDDAtomOrdering(*cr_))
|
|
2085
|
+
{
|
|
2086
|
+
gmx_fatal(FARGS, "L_BFGS is currently not supported");
|
|
2087
|
+
}
|
|
2088
|
+
if (PAR(cr_))
|
|
2089
|
+
{
|
|
2090
|
+
gmx_fatal(FARGS, "L-BFGS minimization only supports a single rank");
|
|
2091
|
+
}
|
|
2092
|
+
|
|
2093
|
+
if (nullptr != constr_)
|
|
2094
|
+
{
|
|
2095
|
+
gmx_fatal(
|
|
2096
|
+
FARGS,
|
|
2097
|
+
"The combination of constraints and L-BFGS minimization is not implemented. Either "
|
|
2098
|
+
"do not use constraints, or use another minimizer (e.g. steepest descent).");
|
|
2099
|
+
}
|
|
2100
|
+
|
|
2101
|
+
const int n = 3 * stateGlobal_->numAtoms();
|
|
2102
|
+
const int nmaxcorr = inputRec_->nbfgscorr;
|
|
2103
|
+
|
|
2104
|
+
std::vector<real> p(n);
|
|
2105
|
+
std::vector<real> rho(nmaxcorr);
|
|
2106
|
+
std::vector<real> alpha(nmaxcorr);
|
|
2107
|
+
|
|
2108
|
+
std::vector<std::vector<real>> dx(nmaxcorr);
|
|
2109
|
+
for (auto& dxCorr : dx)
|
|
2110
|
+
{
|
|
2111
|
+
dxCorr.resize(n);
|
|
2112
|
+
}
|
|
2113
|
+
|
|
2114
|
+
std::vector<std::vector<real>> dg(nmaxcorr);
|
|
2115
|
+
for (auto& dgCorr : dg)
|
|
2116
|
+
{
|
|
2117
|
+
dgCorr.resize(n);
|
|
2118
|
+
}
|
|
2119
|
+
|
|
2120
|
+
int step = 0;
|
|
2121
|
+
int neval = 0;
|
|
2122
|
+
|
|
2123
|
+
ObservablesReducer observablesReducer = observablesReducerBuilder_->build();
|
|
2124
|
+
|
|
2125
|
+
/* Init em */
|
|
2126
|
+
init_em(fpLog_,
|
|
2127
|
+
mdLog_,
|
|
2128
|
+
LBFGS,
|
|
2129
|
+
cr_,
|
|
2130
|
+
ms_, /* PLUMED */
|
|
2131
|
+
inputRec_,
|
|
2132
|
+
mdModulesNotifiers_,
|
|
2133
|
+
imdSession_,
|
|
2134
|
+
pullWork_,
|
|
2135
|
+
stateGlobal_,
|
|
2136
|
+
topGlobal_,
|
|
2137
|
+
&ems,
|
|
2138
|
+
top_,
|
|
2139
|
+
nrnb_,
|
|
2140
|
+
fr_,
|
|
2141
|
+
mdAtoms_,
|
|
2142
|
+
&gstat,
|
|
2143
|
+
virtualSites_,
|
|
2144
|
+
constr_,
|
|
2145
|
+
nullptr);
|
|
2146
|
+
const bool simulationsShareState = false;
|
|
2147
|
+
gmx_mdoutf* outf = init_mdoutf(fpLog_,
|
|
2148
|
+
nFile_,
|
|
2149
|
+
fnm_,
|
|
2150
|
+
mdrunOptions_,
|
|
2151
|
+
cr_,
|
|
2152
|
+
outputProvider_,
|
|
2153
|
+
mdModulesNotifiers_,
|
|
2154
|
+
inputRec_,
|
|
2155
|
+
topGlobal_,
|
|
2156
|
+
nullptr,
|
|
2157
|
+
wallCycleCounters_,
|
|
2158
|
+
StartingBehavior::NewSimulation,
|
|
2159
|
+
simulationsShareState,
|
|
2160
|
+
ms_);
|
|
2161
|
+
gmx::EnergyOutput energyOutput(mdoutf_get_fp_ene(outf),
|
|
2162
|
+
topGlobal_,
|
|
2163
|
+
*inputRec_,
|
|
2164
|
+
pullWork_,
|
|
2165
|
+
nullptr,
|
|
2166
|
+
false,
|
|
2167
|
+
StartingBehavior::NewSimulation,
|
|
2168
|
+
simulationsShareState,
|
|
2169
|
+
mdModulesNotifiers_);
|
|
2170
|
+
|
|
2171
|
+
const int start = 0;
|
|
2172
|
+
const int end = mdatoms->homenr;
|
|
2173
|
+
|
|
2174
|
+
/* We need 4 working states */
|
|
2175
|
+
em_state_t s0{}, s1{}, s2{}, s3{};
|
|
2176
|
+
em_state_t* sa = &s0;
|
|
2177
|
+
em_state_t* sb = &s1;
|
|
2178
|
+
em_state_t* sc = &s2;
|
|
2179
|
+
em_state_t* last = &s3;
|
|
2180
|
+
/* Initialize by copying the state from ems (we could skip x and f here) */
|
|
2181
|
+
*sa = ems;
|
|
2182
|
+
*sb = ems;
|
|
2183
|
+
*sc = ems;
|
|
2184
|
+
|
|
2185
|
+
/* Print to log file */
|
|
2186
|
+
print_em_start(fpLog_, cr_, wallTimeAccounting_, wallCycleCounters_, LBFGS);
|
|
2187
|
+
|
|
2188
|
+
/* Max number of steps */
|
|
2189
|
+
const int number_steps = inputRec_->nsteps;
|
|
2190
|
+
|
|
2191
|
+
/* Create a 3*natoms index to tell whether each degree of freedom is frozen */
|
|
2192
|
+
std::vector<bool> frozen(n);
|
|
2193
|
+
int gf = 0;
|
|
2194
|
+
for (int i = start; i < end; i++)
|
|
2195
|
+
{
|
|
2196
|
+
if (!mdatoms->cFREEZE.empty())
|
|
2197
|
+
{
|
|
2198
|
+
gf = mdatoms->cFREEZE[i];
|
|
2199
|
+
}
|
|
2200
|
+
for (int m = 0; m < DIM; m++)
|
|
2201
|
+
{
|
|
2202
|
+
frozen[3 * i + m] = (inputRec_->opts.nFreeze[gf][m] != 0);
|
|
2203
|
+
}
|
|
2204
|
+
}
|
|
2205
|
+
if (MAIN(cr_))
|
|
2206
|
+
{
|
|
2207
|
+
sp_header(stderr, LBFGS, inputRec_->em_tol, number_steps);
|
|
2208
|
+
}
|
|
2209
|
+
if (fpLog_)
|
|
2210
|
+
{
|
|
2211
|
+
sp_header(fpLog_, LBFGS, inputRec_->em_tol, number_steps);
|
|
2212
|
+
}
|
|
2213
|
+
|
|
2214
|
+
if (virtualSites_)
|
|
2215
|
+
{
|
|
2216
|
+
virtualSites_->construct(stateGlobal_->x, {}, stateGlobal_->box, VSiteOperation::Positions);
|
|
2217
|
+
}
|
|
2218
|
+
|
|
2219
|
+
/* Call the force routine and some auxiliary (neighboursearching etc.) */
|
|
2220
|
+
/* do_force always puts the charge groups in the box and shifts again
|
|
2221
|
+
* We do not unshift, so molecules are always whole
|
|
2222
|
+
*/
|
|
2223
|
+
neval++;
|
|
2224
|
+
EnergyEvaluator energyEvaluator{ fpLog_,
|
|
2225
|
+
mdLog_,
|
|
2226
|
+
cr_,
|
|
2227
|
+
ms_,
|
|
2228
|
+
topGlobal_,
|
|
2229
|
+
top_,
|
|
2230
|
+
inputRec_,
|
|
2231
|
+
mdModulesNotifiers_,
|
|
2232
|
+
imdSession_,
|
|
2233
|
+
pullWork_,
|
|
2234
|
+
enforcedRotation_,
|
|
2235
|
+
nrnb_,
|
|
2236
|
+
wallCycleCounters_,
|
|
2237
|
+
gstat,
|
|
2238
|
+
&observablesReducer,
|
|
2239
|
+
virtualSites_,
|
|
2240
|
+
constr_,
|
|
2241
|
+
mdAtoms_,
|
|
2242
|
+
fr_,
|
|
2243
|
+
runScheduleWork_,
|
|
2244
|
+
enerd_,
|
|
2245
|
+
-1,
|
|
2246
|
+
{} };
|
|
2247
|
+
rvec mu_tot;
|
|
2248
|
+
tensor vir;
|
|
2249
|
+
tensor pres;
|
|
2250
|
+
energyEvaluator.run(&ems, mu_tot, vir, pres, -1, TRUE, step);
|
|
2251
|
+
|
|
2252
|
+
if (MAIN(cr_))
|
|
2253
|
+
{
|
|
2254
|
+
/* Copy stuff to the energy bin for easy printing etc. */
|
|
2255
|
+
matrix nullBox = {};
|
|
2256
|
+
energyOutput.addDataAtEnergyStep(false,
|
|
2257
|
+
false,
|
|
2258
|
+
static_cast<double>(step),
|
|
2259
|
+
mdatoms->tmass,
|
|
2260
|
+
enerd_,
|
|
2261
|
+
nullptr,
|
|
2262
|
+
nullBox,
|
|
2263
|
+
PTCouplingArrays(),
|
|
2264
|
+
0,
|
|
2265
|
+
vir,
|
|
2266
|
+
pres,
|
|
2267
|
+
nullptr,
|
|
2268
|
+
mu_tot,
|
|
2269
|
+
constr_);
|
|
2270
|
+
|
|
2271
|
+
EnergyOutput::printHeader(fpLog_, step, step);
|
|
2272
|
+
energyOutput.printStepToEnergyFile(
|
|
2273
|
+
mdoutf_get_fp_ene(outf), TRUE, FALSE, FALSE, fpLog_, step, step, fr_->fcdata.get(), nullptr);
|
|
2274
|
+
}
|
|
2275
|
+
|
|
2276
|
+
/* Set the initial step.
|
|
2277
|
+
* since it will be multiplied by the non-normalized search direction
|
|
2278
|
+
* vector (force vector the first time), we scale it by the
|
|
2279
|
+
* norm of the force.
|
|
2280
|
+
*/
|
|
2281
|
+
|
|
2282
|
+
if (MAIN(cr_))
|
|
2283
|
+
{
|
|
2284
|
+
double sqrtNumAtoms = sqrt(static_cast<double>(stateGlobal_->numAtoms()));
|
|
2285
|
+
fprintf(stderr, "Using %d BFGS correction steps.\n\n", nmaxcorr);
|
|
2286
|
+
fprintf(stderr, " F-max = %12.5e on atom %d\n", ems.fmax, ems.a_fmax + 1);
|
|
2287
|
+
fprintf(stderr, " F-Norm = %12.5e\n", ems.fnorm / sqrtNumAtoms);
|
|
2288
|
+
fprintf(stderr, "\n");
|
|
2289
|
+
/* and copy to the log file too... */
|
|
2290
|
+
fprintf(fpLog_, "Using %d BFGS correction steps.\n\n", nmaxcorr);
|
|
2291
|
+
fprintf(fpLog_, " F-max = %12.5e on atom %d\n", ems.fmax, ems.a_fmax + 1);
|
|
2292
|
+
fprintf(fpLog_, " F-Norm = %12.5e\n", ems.fnorm / sqrtNumAtoms);
|
|
2293
|
+
fprintf(fpLog_, "\n");
|
|
2294
|
+
}
|
|
2295
|
+
|
|
2296
|
+
// Point is an index to the memory of search directions, where 0 is the first one.
|
|
2297
|
+
int point = 0;
|
|
2298
|
+
|
|
2299
|
+
// Set initial search direction to the force (-gradient), or 0 for frozen particles.
|
|
2300
|
+
real* fInit = static_cast<real*>(ems.f.view().force().data()[0]);
|
|
2301
|
+
for (int i = 0; i < n; i++)
|
|
2302
|
+
{
|
|
2303
|
+
if (!frozen[i])
|
|
2304
|
+
{
|
|
2305
|
+
dx[point][i] = fInit[i]; /* Initial search direction */
|
|
2306
|
+
}
|
|
2307
|
+
else
|
|
2308
|
+
{
|
|
2309
|
+
dx[point][i] = 0;
|
|
2310
|
+
}
|
|
2311
|
+
}
|
|
2312
|
+
|
|
2313
|
+
// Stepsize will be modified during the search, and actually it is not critical
|
|
2314
|
+
// (the main efficiency in the algorithm comes from changing directions), but
|
|
2315
|
+
// we still need an initial value, so estimate it as the inverse of the norm
|
|
2316
|
+
// so we take small steps where the potential fluctuates a lot.
|
|
2317
|
+
double stepsize = 1.0 / ems.fnorm;
|
|
2318
|
+
|
|
2319
|
+
/* Start the loop over BFGS steps.
|
|
2320
|
+
* Each successful step is counted, and we continue until
|
|
2321
|
+
* we either converge or reach the max number of steps.
|
|
2322
|
+
*/
|
|
2323
|
+
|
|
2324
|
+
bool do_log = true;
|
|
2325
|
+
bool do_ene = true;
|
|
2326
|
+
|
|
2327
|
+
int ncorr = 0;
|
|
2328
|
+
|
|
2329
|
+
/* Set the gradient from the force */
|
|
2330
|
+
bool converged = false;
|
|
2331
|
+
for (int step = 0; (number_steps < 0 || step <= number_steps) && !converged; step++)
|
|
2332
|
+
{
|
|
2333
|
+
|
|
2334
|
+
/* Write coordinates if necessary */
|
|
2335
|
+
const bool do_x = do_per_step(step, inputRec_->nstxout);
|
|
2336
|
+
const bool do_f = do_per_step(step, inputRec_->nstfout);
|
|
2337
|
+
|
|
2338
|
+
int mdof_flags = 0;
|
|
2339
|
+
if (do_x)
|
|
2340
|
+
{
|
|
2341
|
+
mdof_flags |= MDOF_X;
|
|
2342
|
+
}
|
|
2343
|
+
|
|
2344
|
+
if (do_f)
|
|
2345
|
+
{
|
|
2346
|
+
mdof_flags |= MDOF_F;
|
|
2347
|
+
}
|
|
2348
|
+
|
|
2349
|
+
if (inputRec_->bIMD)
|
|
2350
|
+
{
|
|
2351
|
+
mdof_flags |= MDOF_IMD;
|
|
2352
|
+
}
|
|
2353
|
+
|
|
2354
|
+
gmx::WriteCheckpointDataHolder checkpointDataHolder;
|
|
2355
|
+
mdoutf_write_to_trajectory_files(fpLog_,
|
|
2356
|
+
cr_,
|
|
2357
|
+
outf,
|
|
2358
|
+
mdof_flags,
|
|
2359
|
+
topGlobal_.natoms,
|
|
2360
|
+
step,
|
|
2361
|
+
static_cast<real>(step),
|
|
2362
|
+
&ems.s,
|
|
2363
|
+
stateGlobal_,
|
|
2364
|
+
observablesHistory_,
|
|
2365
|
+
ems.f.view().force(),
|
|
2366
|
+
&checkpointDataHolder);
|
|
2367
|
+
|
|
2368
|
+
/* Do the linesearching in the direction dx[point][0..(n-1)] */
|
|
2369
|
+
|
|
2370
|
+
/* make s a pointer to current search direction - point=0 first time we get here */
|
|
2371
|
+
gmx::ArrayRef<const real> s = dx[point];
|
|
2372
|
+
|
|
2373
|
+
const real* xx = static_cast<real*>(ems.s.x.rvec_array()[0]);
|
|
2374
|
+
const real* ff = static_cast<real*>(ems.f.view().force().data()[0]);
|
|
2375
|
+
|
|
2376
|
+
// calculate line gradient in position A
|
|
2377
|
+
double gpa = 0;
|
|
2378
|
+
for (int i = 0; i < n; i++)
|
|
2379
|
+
{
|
|
2380
|
+
gpa -= s[i] * ff[i];
|
|
2381
|
+
}
|
|
2382
|
+
|
|
2383
|
+
/* Calculate minimum allowed stepsize along the line, before the average (norm)
|
|
2384
|
+
* relative change in coordinate is smaller than precision
|
|
2385
|
+
*/
|
|
2386
|
+
double minstep = 0;
|
|
2387
|
+
for (int i = 0; i < n; i++)
|
|
2388
|
+
{
|
|
2389
|
+
double tmp = fabs(xx[i]);
|
|
2390
|
+
if (tmp < 1.0)
|
|
2391
|
+
{
|
|
2392
|
+
tmp = 1.0;
|
|
2393
|
+
}
|
|
2394
|
+
tmp = s[i] / tmp;
|
|
2395
|
+
minstep += tmp * tmp;
|
|
2396
|
+
}
|
|
2397
|
+
minstep = GMX_REAL_EPS / sqrt(minstep / n);
|
|
2398
|
+
|
|
2399
|
+
if (stepsize < minstep)
|
|
2400
|
+
{
|
|
2401
|
+
converged = true;
|
|
2402
|
+
break;
|
|
2403
|
+
}
|
|
2404
|
+
|
|
2405
|
+
// Before taking any steps along the line, store the old position
|
|
2406
|
+
*last = ems;
|
|
2407
|
+
real* lastx = static_cast<real*>(last->s.x.data()[0]);
|
|
2408
|
+
real* lastf = static_cast<real*>(last->f.view().force().data()[0]);
|
|
2409
|
+
const real Epot0 = ems.epot;
|
|
2410
|
+
|
|
2411
|
+
*sa = ems;
|
|
2412
|
+
|
|
2413
|
+
/* Take a step downhill.
|
|
2414
|
+
* In theory, we should find the actual minimum of the function in this
|
|
2415
|
+
* direction, somewhere along the line.
|
|
2416
|
+
* That is quite possible, but it turns out to take 5-10 function evaluations
|
|
2417
|
+
* for each line. However, we dont really need to find the exact minimum -
|
|
2418
|
+
* it is much better to start a new BFGS step in a modified direction as soon
|
|
2419
|
+
* as we are close to it. This will save a lot of energy evaluations.
|
|
2420
|
+
*
|
|
2421
|
+
* In practice, we just try to take a single step.
|
|
2422
|
+
* If it worked (i.e. lowered the energy), we increase the stepsize but
|
|
2423
|
+
* continue straight to the next BFGS step without trying to find any minimum,
|
|
2424
|
+
* i.e. we change the search direction too. If the line was smooth, it is
|
|
2425
|
+
* likely we are in a smooth region, and then it makes sense to take longer
|
|
2426
|
+
* steps in the modified search direction too.
|
|
2427
|
+
*
|
|
2428
|
+
* If it didn't work (higher energy), there must be a minimum somewhere between
|
|
2429
|
+
* the old position and the new one. Then we need to start by finding a lower
|
|
2430
|
+
* value before we change search direction. Since the energy was apparently
|
|
2431
|
+
* quite rough, we need to decrease the step size.
|
|
2432
|
+
*
|
|
2433
|
+
* Due to the finite numerical accuracy, it turns out that it is a good idea
|
|
2434
|
+
* to accept a SMALL increase in energy, if the derivative is still downhill.
|
|
2435
|
+
* This leads to lower final energies in the tests I've done. / Erik
|
|
2436
|
+
*/
|
|
2437
|
+
|
|
2438
|
+
// State "A" is the first position along the line.
|
|
2439
|
+
// reference position along line is initially zero
|
|
2440
|
+
real a = 0;
|
|
2441
|
+
|
|
2442
|
+
// Check stepsize first. We do not allow displacements
|
|
2443
|
+
// larger than emstep.
|
|
2444
|
+
//
|
|
2445
|
+
real c;
|
|
2446
|
+
real maxdelta;
|
|
2447
|
+
do
|
|
2448
|
+
{
|
|
2449
|
+
// Pick a new position C by adding stepsize to A.
|
|
2450
|
+
c = a + stepsize;
|
|
2451
|
+
|
|
2452
|
+
// Calculate what the largest change in any individual coordinate
|
|
2453
|
+
// would be (translation along line * gradient along line)
|
|
2454
|
+
maxdelta = 0;
|
|
2455
|
+
for (int i = 0; i < n; i++)
|
|
2456
|
+
{
|
|
2457
|
+
real delta = c * s[i];
|
|
2458
|
+
if (delta > maxdelta)
|
|
2459
|
+
{
|
|
2460
|
+
maxdelta = delta;
|
|
2461
|
+
}
|
|
2462
|
+
}
|
|
2463
|
+
// If any displacement is larger than the stepsize limit, reduce the step
|
|
2464
|
+
if (maxdelta > inputRec_->em_stepsize)
|
|
2465
|
+
{
|
|
2466
|
+
stepsize *= 0.1;
|
|
2467
|
+
}
|
|
2468
|
+
} while (maxdelta > inputRec_->em_stepsize);
|
|
2469
|
+
|
|
2470
|
+
// Take a trial step and move the coordinate array xc[] to position C
|
|
2471
|
+
real* xc = static_cast<real*>(sc->s.x.rvec_array()[0]);
|
|
2472
|
+
for (int i = 0; i < n; i++)
|
|
2473
|
+
{
|
|
2474
|
+
xc[i] = lastx[i] + c * s[i];
|
|
2475
|
+
}
|
|
2476
|
+
|
|
2477
|
+
neval++;
|
|
2478
|
+
// Calculate energy for the trial step in position C
|
|
2479
|
+
energyEvaluator.run(sc, mu_tot, vir, pres, step, FALSE, step);
|
|
2480
|
+
|
|
2481
|
+
// Calc line gradient in position C
|
|
2482
|
+
real* fc = static_cast<real*>(sc->f.view().force()[0]);
|
|
2483
|
+
double gpc = 0;
|
|
2484
|
+
for (int i = 0; i < n; i++)
|
|
2485
|
+
{
|
|
2486
|
+
gpc -= s[i] * fc[i]; /* f is negative gradient, thus the sign */
|
|
2487
|
+
}
|
|
2488
|
+
/* Sum the gradient along the line across CPUs */
|
|
2489
|
+
if (PAR(cr_))
|
|
2490
|
+
{
|
|
2491
|
+
gmx_sumd(1, &gpc, cr_);
|
|
2492
|
+
}
|
|
2493
|
+
|
|
2494
|
+
// This is the max amount of increase in energy we tolerate.
|
|
2495
|
+
// By allowing VERY small changes (close to numerical precision) we
|
|
2496
|
+
// frequently find even better (lower) final energies.
|
|
2497
|
+
double tmp = std::sqrt(GMX_REAL_EPS) * fabs(sa->epot);
|
|
2498
|
+
|
|
2499
|
+
// Accept the step if the energy is lower in the new position C (compared to A),
|
|
2500
|
+
// or if it is not significantly higher and the line derivative is still negative.
|
|
2501
|
+
bool foundlower = sc->epot < sa->epot || (gpc < 0 && sc->epot < (sa->epot + tmp));
|
|
2502
|
+
// If true, great, we found a better energy. We no longer try to alter the
|
|
2503
|
+
// stepsize, but simply accept this new better position. The we select a new
|
|
2504
|
+
// search direction instead, which will be much more efficient than continuing
|
|
2505
|
+
// to take smaller steps along a line. Set fnorm based on the new C position,
|
|
2506
|
+
// which will be used to update the stepsize to 1/fnorm further down.
|
|
2507
|
+
|
|
2508
|
+
// If false, the energy is NOT lower in point C, i.e. it will be the same
|
|
2509
|
+
// or higher than in point A. In this case it is pointless to move to point C,
|
|
2510
|
+
// so we will have to do more iterations along the same line to find a smaller
|
|
2511
|
+
// value in the interval [A=0.0,C].
|
|
2512
|
+
// Here, A is still 0.0, but that will change when we do a search in the interval
|
|
2513
|
+
// [0.0,C] below. That search we will do by interpolation or bisection rather
|
|
2514
|
+
// than with the stepsize, so no need to modify it. For the next search direction
|
|
2515
|
+
// it will be reset to 1/fnorm anyway.
|
|
2516
|
+
|
|
2517
|
+
double step_taken;
|
|
2518
|
+
if (!foundlower)
|
|
2519
|
+
{
|
|
2520
|
+
// OK, if we didn't find a lower value we will have to locate one now - there must
|
|
2521
|
+
// be one in the interval [a,c].
|
|
2522
|
+
// The same thing is valid here, though: Don't spend dozens of iterations to find
|
|
2523
|
+
// the line minimum. We try to interpolate based on the derivative at the endpoints,
|
|
2524
|
+
// and only continue until we find a lower value. In most cases this means 1-2 iterations.
|
|
2525
|
+
// I also have a safeguard for potentially really pathological functions so we never
|
|
2526
|
+
// take more than 20 steps before we give up.
|
|
2527
|
+
// If we already found a lower value we just skip this step and continue to the update.
|
|
2528
|
+
real fnorm = 0;
|
|
2529
|
+
int nminstep = 0;
|
|
2530
|
+
do
|
|
2531
|
+
{
|
|
2532
|
+
// Select a new trial point B in the interval [A,C].
|
|
2533
|
+
// If the derivatives at points a & c have different sign we interpolate to zero,
|
|
2534
|
+
// otherwise just do a bisection since there might be multiple minima/maxima
|
|
2535
|
+
// inside the interval.
|
|
2536
|
+
real b;
|
|
2537
|
+
if (gpa < 0 && gpc > 0)
|
|
2538
|
+
{
|
|
2539
|
+
b = a + gpa * (a - c) / (gpc - gpa);
|
|
2540
|
+
}
|
|
2541
|
+
else
|
|
2542
|
+
{
|
|
2543
|
+
b = 0.5 * (a + c);
|
|
2544
|
+
}
|
|
2545
|
+
|
|
2546
|
+
/* safeguard if interpolation close to machine accuracy causes errors:
|
|
2547
|
+
* never go outside the interval
|
|
2548
|
+
*/
|
|
2549
|
+
if (b <= a || b >= c)
|
|
2550
|
+
{
|
|
2551
|
+
b = 0.5 * (a + c);
|
|
2552
|
+
}
|
|
2553
|
+
|
|
2554
|
+
// Take a trial step to point B
|
|
2555
|
+
real* xb = static_cast<real*>(sb->s.x.rvec_array()[0]);
|
|
2556
|
+
for (int i = 0; i < n; i++)
|
|
2557
|
+
{
|
|
2558
|
+
xb[i] = lastx[i] + b * s[i];
|
|
2559
|
+
}
|
|
2560
|
+
|
|
2561
|
+
neval++;
|
|
2562
|
+
// Calculate energy for the trial step in point B
|
|
2563
|
+
energyEvaluator.run(sb, mu_tot, vir, pres, step, FALSE, step);
|
|
2564
|
+
fnorm = sb->fnorm;
|
|
2565
|
+
|
|
2566
|
+
// Calculate gradient in point B
|
|
2567
|
+
real* fb = static_cast<real*>(sb->f.view().force()[0]);
|
|
2568
|
+
double gpb = 0;
|
|
2569
|
+
for (int i = 0; i < n; i++)
|
|
2570
|
+
{
|
|
2571
|
+
gpb -= s[i] * fb[i]; /* f is negative gradient, thus the sign */
|
|
2572
|
+
}
|
|
2573
|
+
/* Sum the gradient along the line across CPUs */
|
|
2574
|
+
if (PAR(cr_))
|
|
2575
|
+
{
|
|
2576
|
+
gmx_sumd(1, &gpb, cr_);
|
|
2577
|
+
}
|
|
2578
|
+
|
|
2579
|
+
// Keep one of the intervals [A,B] or [B,C] based on the value of the derivative
|
|
2580
|
+
// at the new point B, and rename the endpoints of this new interval A and C.
|
|
2581
|
+
if (gpb > 0)
|
|
2582
|
+
{
|
|
2583
|
+
/* Replace c endpoint with b */
|
|
2584
|
+
c = b;
|
|
2585
|
+
/* copy state b to c */
|
|
2586
|
+
*sc = *sb;
|
|
2587
|
+
}
|
|
2588
|
+
else
|
|
2589
|
+
{
|
|
2590
|
+
/* Replace a endpoint with b */
|
|
2591
|
+
a = b;
|
|
2592
|
+
/* copy state b to a */
|
|
2593
|
+
*sa = *sb;
|
|
2594
|
+
}
|
|
2595
|
+
|
|
2596
|
+
/*
|
|
2597
|
+
* Stop search as soon as we find a value smaller than the endpoints,
|
|
2598
|
+
* or if the tolerance is below machine precision.
|
|
2599
|
+
* Never run more than 20 steps, no matter what.
|
|
2600
|
+
*/
|
|
2601
|
+
nminstep++;
|
|
2602
|
+
} while ((sb->epot > sa->epot || sb->epot > sc->epot) && (nminstep < 20));
|
|
2603
|
+
|
|
2604
|
+
if (std::fabs(sb->epot - Epot0) < GMX_REAL_EPS || nminstep >= 20)
|
|
2605
|
+
{
|
|
2606
|
+
/* OK. We couldn't find a significantly lower energy.
|
|
2607
|
+
* If ncorr==0 this was steepest descent, and then we give up.
|
|
2608
|
+
* If not, reset memory to restart as steepest descent before quitting.
|
|
2609
|
+
*/
|
|
2610
|
+
if (ncorr == 0)
|
|
2611
|
+
{
|
|
2612
|
+
/* Converged */
|
|
2613
|
+
converged = true;
|
|
2614
|
+
break;
|
|
2615
|
+
}
|
|
2616
|
+
else
|
|
2617
|
+
{
|
|
2618
|
+
/* Reset memory */
|
|
2619
|
+
ncorr = 0;
|
|
2620
|
+
/* Search in gradient direction */
|
|
2621
|
+
for (int i = 0; i < n; i++)
|
|
2622
|
+
{
|
|
2623
|
+
dx[point][i] = ff[i];
|
|
2624
|
+
}
|
|
2625
|
+
/* Reset stepsize */
|
|
2626
|
+
stepsize = 1.0 / fnorm;
|
|
2627
|
+
continue;
|
|
2628
|
+
}
|
|
2629
|
+
}
|
|
2630
|
+
|
|
2631
|
+
/* Select min energy state of A & C, put the best in xx/ff/Epot
|
|
2632
|
+
*/
|
|
2633
|
+
if (sc->epot < sa->epot)
|
|
2634
|
+
{
|
|
2635
|
+
/* Use state C */
|
|
2636
|
+
ems = *sc;
|
|
2637
|
+
step_taken = c;
|
|
2638
|
+
}
|
|
2639
|
+
else
|
|
2640
|
+
{
|
|
2641
|
+
/* Use state A */
|
|
2642
|
+
ems = *sa;
|
|
2643
|
+
step_taken = a;
|
|
2644
|
+
}
|
|
2645
|
+
}
|
|
2646
|
+
else
|
|
2647
|
+
{
|
|
2648
|
+
/* found lower */
|
|
2649
|
+
/* Use state C */
|
|
2650
|
+
ems = *sc;
|
|
2651
|
+
step_taken = c;
|
|
2652
|
+
}
|
|
2653
|
+
|
|
2654
|
+
/* Update the memory information, and calculate a new
|
|
2655
|
+
* approximation of the inverse hessian
|
|
2656
|
+
*/
|
|
2657
|
+
|
|
2658
|
+
/* Have new data in Epot, xx, ff */
|
|
2659
|
+
if (ncorr < nmaxcorr)
|
|
2660
|
+
{
|
|
2661
|
+
ncorr++;
|
|
2662
|
+
}
|
|
2663
|
+
|
|
2664
|
+
for (int i = 0; i < n; i++)
|
|
2665
|
+
{
|
|
2666
|
+
dg[point][i] = lastf[i] - ff[i];
|
|
2667
|
+
dx[point][i] *= step_taken;
|
|
2668
|
+
}
|
|
2669
|
+
|
|
2670
|
+
real dgdg = 0;
|
|
2671
|
+
real dgdx = 0;
|
|
2672
|
+
for (int i = 0; i < n; i++)
|
|
2673
|
+
{
|
|
2674
|
+
dgdg += dg[point][i] * dg[point][i];
|
|
2675
|
+
dgdx += dg[point][i] * dx[point][i];
|
|
2676
|
+
}
|
|
2677
|
+
|
|
2678
|
+
const real diag = dgdx / dgdg;
|
|
2679
|
+
|
|
2680
|
+
rho[point] = 1.0 / dgdx;
|
|
2681
|
+
point++;
|
|
2682
|
+
|
|
2683
|
+
if (point >= nmaxcorr)
|
|
2684
|
+
{
|
|
2685
|
+
point = 0;
|
|
2686
|
+
}
|
|
2687
|
+
|
|
2688
|
+
/* Update */
|
|
2689
|
+
for (int i = 0; i < n; i++)
|
|
2690
|
+
{
|
|
2691
|
+
p[i] = ff[i];
|
|
2692
|
+
}
|
|
2693
|
+
|
|
2694
|
+
int cp = point;
|
|
2695
|
+
|
|
2696
|
+
/* Recursive update. First go back over the memory points */
|
|
2697
|
+
for (int k = 0; k < ncorr; k++)
|
|
2698
|
+
{
|
|
2699
|
+
cp--;
|
|
2700
|
+
if (cp < 0)
|
|
2701
|
+
{
|
|
2702
|
+
cp = ncorr - 1;
|
|
2703
|
+
}
|
|
2704
|
+
|
|
2705
|
+
real sq = 0;
|
|
2706
|
+
for (int i = 0; i < n; i++)
|
|
2707
|
+
{
|
|
2708
|
+
sq += dx[cp][i] * p[i];
|
|
2709
|
+
}
|
|
2710
|
+
|
|
2711
|
+
alpha[cp] = rho[cp] * sq;
|
|
2712
|
+
|
|
2713
|
+
for (int i = 0; i < n; i++)
|
|
2714
|
+
{
|
|
2715
|
+
p[i] -= alpha[cp] * dg[cp][i];
|
|
2716
|
+
}
|
|
2717
|
+
}
|
|
2718
|
+
|
|
2719
|
+
for (int i = 0; i < n; i++)
|
|
2720
|
+
{
|
|
2721
|
+
p[i] *= diag;
|
|
2722
|
+
}
|
|
2723
|
+
|
|
2724
|
+
/* And then go forward again */
|
|
2725
|
+
for (int k = 0; k < ncorr; k++)
|
|
2726
|
+
{
|
|
2727
|
+
real yr = 0;
|
|
2728
|
+
for (int i = 0; i < n; i++)
|
|
2729
|
+
{
|
|
2730
|
+
yr += p[i] * dg[cp][i];
|
|
2731
|
+
}
|
|
2732
|
+
|
|
2733
|
+
real beta = rho[cp] * yr;
|
|
2734
|
+
beta = alpha[cp] - beta;
|
|
2735
|
+
|
|
2736
|
+
for (int i = 0; i < n; i++)
|
|
2737
|
+
{
|
|
2738
|
+
p[i] += beta * dx[cp][i];
|
|
2739
|
+
}
|
|
2740
|
+
|
|
2741
|
+
cp++;
|
|
2742
|
+
if (cp >= ncorr)
|
|
2743
|
+
{
|
|
2744
|
+
cp = 0;
|
|
2745
|
+
}
|
|
2746
|
+
}
|
|
2747
|
+
|
|
2748
|
+
for (int i = 0; i < n; i++)
|
|
2749
|
+
{
|
|
2750
|
+
if (!frozen[i])
|
|
2751
|
+
{
|
|
2752
|
+
dx[point][i] = p[i];
|
|
2753
|
+
}
|
|
2754
|
+
else
|
|
2755
|
+
{
|
|
2756
|
+
dx[point][i] = 0;
|
|
2757
|
+
}
|
|
2758
|
+
}
|
|
2759
|
+
|
|
2760
|
+
/* Print it if necessary */
|
|
2761
|
+
if (MAIN(cr_))
|
|
2762
|
+
{
|
|
2763
|
+
if (mdrunOptions_.verbose)
|
|
2764
|
+
{
|
|
2765
|
+
double sqrtNumAtoms = sqrt(static_cast<double>(stateGlobal_->numAtoms()));
|
|
2766
|
+
fprintf(stderr,
|
|
2767
|
+
"\rStep %d, Epot=%12.6e, Fnorm=%9.3e, Fmax=%9.3e (atom %d)\n",
|
|
2768
|
+
step,
|
|
2769
|
+
ems.epot,
|
|
2770
|
+
ems.fnorm / sqrtNumAtoms,
|
|
2771
|
+
ems.fmax,
|
|
2772
|
+
ems.a_fmax + 1);
|
|
2773
|
+
fflush(stderr);
|
|
2774
|
+
}
|
|
2775
|
+
/* Store the new (lower) energies */
|
|
2776
|
+
matrix nullBox = {};
|
|
2777
|
+
energyOutput.addDataAtEnergyStep(false,
|
|
2778
|
+
false,
|
|
2779
|
+
static_cast<double>(step),
|
|
2780
|
+
mdatoms->tmass,
|
|
2781
|
+
enerd_,
|
|
2782
|
+
nullptr,
|
|
2783
|
+
nullBox,
|
|
2784
|
+
PTCouplingArrays(),
|
|
2785
|
+
0,
|
|
2786
|
+
vir,
|
|
2787
|
+
pres,
|
|
2788
|
+
nullptr,
|
|
2789
|
+
mu_tot,
|
|
2790
|
+
constr_);
|
|
2791
|
+
|
|
2792
|
+
do_log = do_per_step(step, inputRec_->nstlog);
|
|
2793
|
+
do_ene = do_per_step(step, inputRec_->nstenergy);
|
|
2794
|
+
|
|
2795
|
+
imdSession_->fillEnergyRecord(step, TRUE);
|
|
2796
|
+
|
|
2797
|
+
if (do_log)
|
|
2798
|
+
{
|
|
2799
|
+
EnergyOutput::printHeader(fpLog_, step, step);
|
|
2800
|
+
}
|
|
2801
|
+
energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf),
|
|
2802
|
+
do_ene,
|
|
2803
|
+
FALSE,
|
|
2804
|
+
FALSE,
|
|
2805
|
+
do_log ? fpLog_ : nullptr,
|
|
2806
|
+
step,
|
|
2807
|
+
step,
|
|
2808
|
+
fr_->fcdata.get(),
|
|
2809
|
+
nullptr);
|
|
2810
|
+
}
|
|
2811
|
+
|
|
2812
|
+
/* Send x and E to IMD client, if bIMD is TRUE. */
|
|
2813
|
+
if (imdSession_->run(step, TRUE, stateGlobal_->box, stateGlobal_->x, 0) && MAIN(cr_))
|
|
2814
|
+
{
|
|
2815
|
+
imdSession_->sendPositionsAndEnergies();
|
|
2816
|
+
}
|
|
2817
|
+
|
|
2818
|
+
// Reset stepsize in we are doing more iterations
|
|
2819
|
+
stepsize = 1.0;
|
|
2820
|
+
|
|
2821
|
+
/* Stop when the maximum force lies below tolerance.
|
|
2822
|
+
* If we have reached machine precision, converged is already set to true.
|
|
2823
|
+
*/
|
|
2824
|
+
converged = converged || (ems.fmax < inputRec_->em_tol);
|
|
2825
|
+
observablesReducer.markAsReadyToReduce();
|
|
2826
|
+
} /* End of the loop */
|
|
2827
|
+
|
|
2828
|
+
if (converged)
|
|
2829
|
+
{
|
|
2830
|
+
step--; /* we never took that last step in this case */
|
|
2831
|
+
}
|
|
2832
|
+
if (ems.fmax > inputRec_->em_tol)
|
|
2833
|
+
{
|
|
2834
|
+
if (MAIN(cr_))
|
|
2835
|
+
{
|
|
2836
|
+
warn_step(fpLog_, inputRec_->em_tol, ems.fmax, step - 1 == number_steps, FALSE);
|
|
2837
|
+
}
|
|
2838
|
+
converged = FALSE;
|
|
2839
|
+
}
|
|
2840
|
+
|
|
2841
|
+
/* If we printed energy and/or logfile last step (which was the last step)
|
|
2842
|
+
* we don't have to do it again, but otherwise print the final values.
|
|
2843
|
+
*/
|
|
2844
|
+
if (!do_log) /* Write final value to log since we didn't do anythin last step */
|
|
2845
|
+
{
|
|
2846
|
+
EnergyOutput::printHeader(fpLog_, step, step);
|
|
2847
|
+
}
|
|
2848
|
+
if (!do_ene || !do_log) /* Write final energy file entries */
|
|
2849
|
+
{
|
|
2850
|
+
energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf),
|
|
2851
|
+
!do_ene,
|
|
2852
|
+
FALSE,
|
|
2853
|
+
FALSE,
|
|
2854
|
+
!do_log ? fpLog_ : nullptr,
|
|
2855
|
+
step,
|
|
2856
|
+
step,
|
|
2857
|
+
fr_->fcdata.get(),
|
|
2858
|
+
nullptr);
|
|
2859
|
+
}
|
|
2860
|
+
|
|
2861
|
+
/* Print some stuff... */
|
|
2862
|
+
if (MAIN(cr_))
|
|
2863
|
+
{
|
|
2864
|
+
fprintf(stderr, "\nwriting lowest energy coordinates.\n");
|
|
2865
|
+
}
|
|
2866
|
+
|
|
2867
|
+
/* IMPORTANT!
|
|
2868
|
+
* For accurate normal mode calculation it is imperative that we
|
|
2869
|
+
* store the last conformation into the full precision binary trajectory.
|
|
2870
|
+
*
|
|
2871
|
+
* However, we should only do it if we did NOT already write this step
|
|
2872
|
+
* above (which we did if do_x or do_f was true).
|
|
2873
|
+
*/
|
|
2874
|
+
const bool do_x = !do_per_step(step, inputRec_->nstxout);
|
|
2875
|
+
const bool do_f = !do_per_step(step, inputRec_->nstfout);
|
|
2876
|
+
write_em_traj(
|
|
2877
|
+
fpLog_, cr_, outf, do_x, do_f, ftp2fn(efSTO, nFile_, fnm_), topGlobal_, inputRec_, step, &ems, stateGlobal_, observablesHistory_);
|
|
2878
|
+
|
|
2879
|
+
if (MAIN(cr_))
|
|
2880
|
+
{
|
|
2881
|
+
double sqrtNumAtoms = sqrt(static_cast<double>(stateGlobal_->numAtoms()));
|
|
2882
|
+
print_converged(stderr, LBFGS, inputRec_->em_tol, step, converged, number_steps, &ems, sqrtNumAtoms);
|
|
2883
|
+
print_converged(fpLog_, LBFGS, inputRec_->em_tol, step, converged, number_steps, &ems, sqrtNumAtoms);
|
|
2884
|
+
|
|
2885
|
+
fprintf(fpLog_, "\nPerformed %d energy evaluations in total.\n", neval);
|
|
2886
|
+
}
|
|
2887
|
+
|
|
2888
|
+
finish_em(cr_, outf, wallTimeAccounting_, wallCycleCounters_);
|
|
2889
|
+
|
|
2890
|
+
/* To print the actual number of steps we needed somewhere */
|
|
2891
|
+
walltime_accounting_set_nsteps_done(wallTimeAccounting_, step);
|
|
2892
|
+
}
|
|
2893
|
+
|
|
2894
|
+
void LegacySimulator::do_steep()
|
|
2895
|
+
{
|
|
2896
|
+
const char* SD = "Steepest Descents";
|
|
2897
|
+
gmx_global_stat_t gstat;
|
|
2898
|
+
real stepsize;
|
|
2899
|
+
real ustep;
|
|
2900
|
+
gmx_bool bDone, bAbort, do_x, do_f;
|
|
2901
|
+
tensor vir, pres;
|
|
2902
|
+
rvec mu_tot = { 0 };
|
|
2903
|
+
int nsteps;
|
|
2904
|
+
int count = 0;
|
|
2905
|
+
int steps_accepted = 0;
|
|
2906
|
+
auto* mdatoms = mdAtoms_->mdatoms();
|
|
2907
|
+
|
|
2908
|
+
GMX_LOG(mdLog_.info)
|
|
2909
|
+
.asParagraph()
|
|
2910
|
+
.appendText(
|
|
2911
|
+
"Note that activating steepest-descent energy minimization via the "
|
|
2912
|
+
"integrator .mdp option and the command gmx mdrun may "
|
|
2913
|
+
"be available in a different form in a future version of GROMACS, "
|
|
2914
|
+
"e.g. gmx minimize and an .mdp option.");
|
|
2915
|
+
|
|
2916
|
+
/* Create 2 states on the stack and extract pointers that we will swap */
|
|
2917
|
+
em_state_t s0{}, s1{};
|
|
2918
|
+
em_state_t* s_min = &s0;
|
|
2919
|
+
em_state_t* s_try = &s1;
|
|
2920
|
+
|
|
2921
|
+
ObservablesReducer observablesReducer = observablesReducerBuilder_->build();
|
|
2922
|
+
|
|
2923
|
+
/* Init em and store the local state in s_try */
|
|
2924
|
+
init_em(fpLog_,
|
|
2925
|
+
mdLog_,
|
|
2926
|
+
SD,
|
|
2927
|
+
cr_,
|
|
2928
|
+
ms_, /* PLUMED */
|
|
2929
|
+
inputRec_,
|
|
2930
|
+
mdModulesNotifiers_,
|
|
2931
|
+
imdSession_,
|
|
2932
|
+
pullWork_,
|
|
2933
|
+
stateGlobal_,
|
|
2934
|
+
topGlobal_,
|
|
2935
|
+
s_try,
|
|
2936
|
+
top_,
|
|
2937
|
+
nrnb_,
|
|
2938
|
+
fr_,
|
|
2939
|
+
mdAtoms_,
|
|
2940
|
+
&gstat,
|
|
2941
|
+
virtualSites_,
|
|
2942
|
+
constr_,
|
|
2943
|
+
nullptr);
|
|
2944
|
+
const bool simulationsShareState = false;
|
|
2945
|
+
gmx_mdoutf* outf = init_mdoutf(fpLog_,
|
|
2946
|
+
nFile_,
|
|
2947
|
+
fnm_,
|
|
2948
|
+
mdrunOptions_,
|
|
2949
|
+
cr_,
|
|
2950
|
+
outputProvider_,
|
|
2951
|
+
mdModulesNotifiers_,
|
|
2952
|
+
inputRec_,
|
|
2953
|
+
topGlobal_,
|
|
2954
|
+
nullptr,
|
|
2955
|
+
wallCycleCounters_,
|
|
2956
|
+
StartingBehavior::NewSimulation,
|
|
2957
|
+
simulationsShareState,
|
|
2958
|
+
ms_);
|
|
2959
|
+
gmx::EnergyOutput energyOutput(mdoutf_get_fp_ene(outf),
|
|
2960
|
+
topGlobal_,
|
|
2961
|
+
*inputRec_,
|
|
2962
|
+
pullWork_,
|
|
2963
|
+
nullptr,
|
|
2964
|
+
false,
|
|
2965
|
+
StartingBehavior::NewSimulation,
|
|
2966
|
+
simulationsShareState,
|
|
2967
|
+
mdModulesNotifiers_);
|
|
2968
|
+
|
|
2969
|
+
/* Print to log file */
|
|
2970
|
+
print_em_start(fpLog_, cr_, wallTimeAccounting_, wallCycleCounters_, SD);
|
|
2971
|
+
|
|
2972
|
+
/* Set variables for stepsize (in nm). This is the largest
|
|
2973
|
+
* step that we are going to make in any direction.
|
|
2974
|
+
*/
|
|
2975
|
+
ustep = inputRec_->em_stepsize;
|
|
2976
|
+
stepsize = 0;
|
|
2977
|
+
|
|
2978
|
+
/* Max number of steps */
|
|
2979
|
+
nsteps = inputRec_->nsteps;
|
|
2980
|
+
|
|
2981
|
+
if (MAIN(cr_))
|
|
2982
|
+
{
|
|
2983
|
+
/* Print to the screen */
|
|
2984
|
+
sp_header(stderr, SD, inputRec_->em_tol, nsteps);
|
|
2985
|
+
}
|
|
2986
|
+
if (fpLog_)
|
|
2987
|
+
{
|
|
2988
|
+
sp_header(fpLog_, SD, inputRec_->em_tol, nsteps);
|
|
2989
|
+
}
|
|
2990
|
+
EnergyEvaluator energyEvaluator{ fpLog_,
|
|
2991
|
+
mdLog_,
|
|
2992
|
+
cr_,
|
|
2993
|
+
ms_,
|
|
2994
|
+
topGlobal_,
|
|
2995
|
+
top_,
|
|
2996
|
+
inputRec_,
|
|
2997
|
+
mdModulesNotifiers_,
|
|
2998
|
+
imdSession_,
|
|
2999
|
+
pullWork_,
|
|
3000
|
+
enforcedRotation_,
|
|
3001
|
+
nrnb_,
|
|
3002
|
+
wallCycleCounters_,
|
|
3003
|
+
gstat,
|
|
3004
|
+
&observablesReducer,
|
|
3005
|
+
virtualSites_,
|
|
3006
|
+
constr_,
|
|
3007
|
+
mdAtoms_,
|
|
3008
|
+
fr_,
|
|
3009
|
+
runScheduleWork_,
|
|
3010
|
+
enerd_,
|
|
3011
|
+
-1,
|
|
3012
|
+
{} };
|
|
3013
|
+
|
|
3014
|
+
/**** HERE STARTS THE LOOP ****
|
|
3015
|
+
* count is the counter for the number of steps
|
|
3016
|
+
* bDone will be TRUE when the minimization has converged
|
|
3017
|
+
* bAbort will be TRUE when nsteps steps have been performed or when
|
|
3018
|
+
* the stepsize becomes smaller than is reasonable for machine precision
|
|
3019
|
+
*/
|
|
3020
|
+
count = 0;
|
|
3021
|
+
bDone = FALSE;
|
|
3022
|
+
bAbort = FALSE;
|
|
3023
|
+
while (!bDone && !bAbort)
|
|
3024
|
+
{
|
|
3025
|
+
bAbort = (nsteps >= 0) && (count == nsteps);
|
|
3026
|
+
|
|
3027
|
+
/* set new coordinates, except for first step */
|
|
3028
|
+
bool validStep = true;
|
|
3029
|
+
if (count > 0)
|
|
3030
|
+
{
|
|
3031
|
+
validStep = do_em_step(
|
|
3032
|
+
cr_, inputRec_, mdatoms, s_min, stepsize, s_min->f.view().forceWithPadding(), s_try, constr_, count);
|
|
3033
|
+
}
|
|
3034
|
+
|
|
3035
|
+
if (validStep)
|
|
3036
|
+
{
|
|
3037
|
+
energyEvaluator.run(s_try, mu_tot, vir, pres, count, count == 0, count);
|
|
3038
|
+
}
|
|
3039
|
+
else
|
|
3040
|
+
{
|
|
3041
|
+
// Signal constraint error during stepping with energy=inf
|
|
3042
|
+
s_try->epot = std::numeric_limits<real>::infinity();
|
|
3043
|
+
}
|
|
3044
|
+
|
|
3045
|
+
if (MAIN(cr_))
|
|
3046
|
+
{
|
|
3047
|
+
EnergyOutput::printHeader(fpLog_, count, count);
|
|
3048
|
+
}
|
|
3049
|
+
|
|
3050
|
+
if (count == 0)
|
|
3051
|
+
{
|
|
3052
|
+
s_min->epot = s_try->epot;
|
|
3053
|
+
}
|
|
3054
|
+
|
|
3055
|
+
/* Print it if necessary */
|
|
3056
|
+
if (MAIN(cr_))
|
|
3057
|
+
{
|
|
3058
|
+
if (mdrunOptions_.verbose)
|
|
3059
|
+
{
|
|
3060
|
+
fprintf(stderr,
|
|
3061
|
+
"Step=%5d, Dmax= %6.1e nm, Epot= %12.5e Fmax= %11.5e, atom= %d%c",
|
|
3062
|
+
count,
|
|
3063
|
+
ustep,
|
|
3064
|
+
s_try->epot,
|
|
3065
|
+
s_try->fmax,
|
|
3066
|
+
s_try->a_fmax + 1,
|
|
3067
|
+
((count == 0) || (s_try->epot < s_min->epot)) ? '\n' : '\r');
|
|
3068
|
+
fflush(stderr);
|
|
3069
|
+
}
|
|
3070
|
+
|
|
3071
|
+
if ((count == 0) || (s_try->epot < s_min->epot))
|
|
3072
|
+
{
|
|
3073
|
+
/* Store the new (lower) energies */
|
|
3074
|
+
matrix nullBox = {};
|
|
3075
|
+
energyOutput.addDataAtEnergyStep(false,
|
|
3076
|
+
false,
|
|
3077
|
+
static_cast<double>(count),
|
|
3078
|
+
mdatoms->tmass,
|
|
3079
|
+
enerd_,
|
|
3080
|
+
nullptr,
|
|
3081
|
+
nullBox,
|
|
3082
|
+
PTCouplingArrays(),
|
|
3083
|
+
0,
|
|
3084
|
+
vir,
|
|
3085
|
+
pres,
|
|
3086
|
+
nullptr,
|
|
3087
|
+
mu_tot,
|
|
3088
|
+
constr_);
|
|
3089
|
+
|
|
3090
|
+
imdSession_->fillEnergyRecord(count, TRUE);
|
|
3091
|
+
|
|
3092
|
+
const bool do_dr = do_per_step(steps_accepted, inputRec_->nstdisreout);
|
|
3093
|
+
const bool do_or = do_per_step(steps_accepted, inputRec_->nstorireout);
|
|
3094
|
+
energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf),
|
|
3095
|
+
TRUE,
|
|
3096
|
+
do_dr,
|
|
3097
|
+
do_or,
|
|
3098
|
+
fpLog_,
|
|
3099
|
+
count,
|
|
3100
|
+
count,
|
|
3101
|
+
fr_->fcdata.get(),
|
|
3102
|
+
nullptr);
|
|
3103
|
+
fflush(fpLog_);
|
|
3104
|
+
}
|
|
3105
|
+
}
|
|
3106
|
+
|
|
3107
|
+
/* Now if the new energy is smaller than the previous...
|
|
3108
|
+
* or if this is the first step!
|
|
3109
|
+
* or if we did random steps!
|
|
3110
|
+
*/
|
|
3111
|
+
|
|
3112
|
+
if ((count == 0) || (s_try->epot < s_min->epot))
|
|
3113
|
+
{
|
|
3114
|
+
steps_accepted++;
|
|
3115
|
+
|
|
3116
|
+
/* Test whether the convergence criterion is met... */
|
|
3117
|
+
bDone = (s_try->fmax < inputRec_->em_tol);
|
|
3118
|
+
|
|
3119
|
+
/* Copy the arrays for force, positions and energy */
|
|
3120
|
+
/* The 'Min' array always holds the coords and forces of the minimal
|
|
3121
|
+
sampled energy */
|
|
3122
|
+
swap_em_state(&s_min, &s_try);
|
|
3123
|
+
if (count > 0)
|
|
3124
|
+
{
|
|
3125
|
+
ustep *= 1.2;
|
|
3126
|
+
}
|
|
3127
|
+
|
|
3128
|
+
/* Write to trn, if necessary */
|
|
3129
|
+
do_x = do_per_step(steps_accepted, inputRec_->nstxout);
|
|
3130
|
+
do_f = do_per_step(steps_accepted, inputRec_->nstfout);
|
|
3131
|
+
write_em_traj(
|
|
3132
|
+
fpLog_, cr_, outf, do_x, do_f, nullptr, topGlobal_, inputRec_, count, s_min, stateGlobal_, observablesHistory_);
|
|
3133
|
+
}
|
|
3134
|
+
else
|
|
3135
|
+
{
|
|
3136
|
+
/* If energy is not smaller make the step smaller... */
|
|
3137
|
+
ustep *= 0.5;
|
|
3138
|
+
|
|
3139
|
+
if (haveDDAtomOrdering(*cr_) && s_min->s.ddp_count != cr_->dd->ddp_count)
|
|
3140
|
+
{
|
|
3141
|
+
/* Reload the old state */
|
|
3142
|
+
em_dd_partition_system(fpLog_,
|
|
3143
|
+
mdLog_,
|
|
3144
|
+
count,
|
|
3145
|
+
cr_,
|
|
3146
|
+
topGlobal_,
|
|
3147
|
+
inputRec_,
|
|
3148
|
+
mdModulesNotifiers_,
|
|
3149
|
+
imdSession_,
|
|
3150
|
+
pullWork_,
|
|
3151
|
+
s_min,
|
|
3152
|
+
top_,
|
|
3153
|
+
mdAtoms_,
|
|
3154
|
+
fr_,
|
|
3155
|
+
virtualSites_,
|
|
3156
|
+
constr_,
|
|
3157
|
+
nrnb_,
|
|
3158
|
+
wallCycleCounters_);
|
|
3159
|
+
}
|
|
3160
|
+
}
|
|
3161
|
+
|
|
3162
|
+
// If the force is very small after finishing minimization,
|
|
3163
|
+
// we risk dividing by zero when calculating the step size.
|
|
3164
|
+
// So we check first if the minimization has stopped before
|
|
3165
|
+
// trying to obtain a new step size.
|
|
3166
|
+
if (!bDone)
|
|
3167
|
+
{
|
|
3168
|
+
/* Determine new step */
|
|
3169
|
+
stepsize = ustep / s_min->fmax;
|
|
3170
|
+
}
|
|
3171
|
+
|
|
3172
|
+
/* Check if stepsize is too small, with 1 nm as a characteristic length */
|
|
3173
|
+
#if GMX_DOUBLE
|
|
3174
|
+
if (count == nsteps || ustep < 1e-12)
|
|
3175
|
+
#else
|
|
3176
|
+
if (count == nsteps || ustep < 1e-6)
|
|
3177
|
+
#endif
|
|
3178
|
+
{
|
|
3179
|
+
if (MAIN(cr_))
|
|
3180
|
+
{
|
|
3181
|
+
warn_step(fpLog_, inputRec_->em_tol, s_min->fmax, count == nsteps, constr_ != nullptr);
|
|
3182
|
+
}
|
|
3183
|
+
bAbort = TRUE;
|
|
3184
|
+
}
|
|
3185
|
+
|
|
3186
|
+
/* Send IMD energies and positions, if bIMD is TRUE. */
|
|
3187
|
+
if (imdSession_->run(count,
|
|
3188
|
+
TRUE,
|
|
3189
|
+
MAIN(cr_) ? stateGlobal_->box : nullptr,
|
|
3190
|
+
MAIN(cr_) ? stateGlobal_->x : gmx::ArrayRef<gmx::RVec>(),
|
|
3191
|
+
0)
|
|
3192
|
+
&& MAIN(cr_))
|
|
3193
|
+
{
|
|
3194
|
+
imdSession_->sendPositionsAndEnergies();
|
|
3195
|
+
}
|
|
3196
|
+
|
|
3197
|
+
count++;
|
|
3198
|
+
observablesReducer.markAsReadyToReduce();
|
|
3199
|
+
} /* End of the loop */
|
|
3200
|
+
|
|
3201
|
+
/* Print some data... */
|
|
3202
|
+
if (MAIN(cr_))
|
|
3203
|
+
{
|
|
3204
|
+
fprintf(stderr, "\nwriting lowest energy coordinates.\n");
|
|
3205
|
+
}
|
|
3206
|
+
write_em_traj(fpLog_,
|
|
3207
|
+
cr_,
|
|
3208
|
+
outf,
|
|
3209
|
+
TRUE,
|
|
3210
|
+
inputRec_->nstfout != 0,
|
|
3211
|
+
ftp2fn(efSTO, nFile_, fnm_),
|
|
3212
|
+
topGlobal_,
|
|
3213
|
+
inputRec_,
|
|
3214
|
+
count,
|
|
3215
|
+
s_min,
|
|
3216
|
+
stateGlobal_,
|
|
3217
|
+
observablesHistory_);
|
|
3218
|
+
|
|
3219
|
+
if (MAIN(cr_))
|
|
3220
|
+
{
|
|
3221
|
+
double sqrtNumAtoms = sqrt(static_cast<double>(stateGlobal_->numAtoms()));
|
|
3222
|
+
|
|
3223
|
+
print_converged(stderr, SD, inputRec_->em_tol, count, bDone, nsteps, s_min, sqrtNumAtoms);
|
|
3224
|
+
print_converged(fpLog_, SD, inputRec_->em_tol, count, bDone, nsteps, s_min, sqrtNumAtoms);
|
|
3225
|
+
}
|
|
3226
|
+
|
|
3227
|
+
finish_em(cr_, outf, wallTimeAccounting_, wallCycleCounters_);
|
|
3228
|
+
|
|
3229
|
+
walltime_accounting_set_nsteps_done(wallTimeAccounting_, count);
|
|
3230
|
+
}
|
|
3231
|
+
|
|
3232
|
+
void LegacySimulator::do_nm()
|
|
3233
|
+
{
|
|
3234
|
+
const char* NM = "Normal Mode Analysis";
|
|
3235
|
+
int nnodes;
|
|
3236
|
+
gmx_global_stat_t gstat;
|
|
3237
|
+
tensor vir, pres;
|
|
3238
|
+
rvec mu_tot = { 0 };
|
|
3239
|
+
rvec* dfdx;
|
|
3240
|
+
gmx_bool bSparse; /* use sparse matrix storage format */
|
|
3241
|
+
size_t sz;
|
|
3242
|
+
gmx_sparsematrix_t* sparse_matrix = nullptr;
|
|
3243
|
+
real* full_matrix = nullptr;
|
|
3244
|
+
|
|
3245
|
+
/* added with respect to mdrun */
|
|
3246
|
+
int row, col;
|
|
3247
|
+
real der_range = 10.0 * std::sqrt(GMX_REAL_EPS);
|
|
3248
|
+
real x_min;
|
|
3249
|
+
bool bIsMain = MAIN(cr_);
|
|
3250
|
+
auto* mdatoms = mdAtoms_->mdatoms();
|
|
3251
|
+
|
|
3252
|
+
GMX_LOG(mdLog_.info)
|
|
3253
|
+
.asParagraph()
|
|
3254
|
+
.appendText(
|
|
3255
|
+
"Note that activating normal-mode analysis via the integrator "
|
|
3256
|
+
".mdp option and the command gmx mdrun may "
|
|
3257
|
+
"be available in a different form in a future version of GROMACS, "
|
|
3258
|
+
"e.g. gmx normal-modes.");
|
|
3259
|
+
|
|
3260
|
+
if (constr_ != nullptr)
|
|
3261
|
+
{
|
|
3262
|
+
gmx_fatal(
|
|
3263
|
+
FARGS,
|
|
3264
|
+
"Constraints present with Normal Mode Analysis, this combination is not supported");
|
|
3265
|
+
}
|
|
3266
|
+
|
|
3267
|
+
gmx_shellfc_t* shellfc;
|
|
3268
|
+
|
|
3269
|
+
em_state_t state_work{};
|
|
3270
|
+
|
|
3271
|
+
fr_->longRangeNonbondeds->updateAfterPartition(*mdAtoms_->mdatoms());
|
|
3272
|
+
ObservablesReducer observablesReducer = observablesReducerBuilder_->build();
|
|
3273
|
+
|
|
3274
|
+
/* Init em and store the local state in state_minimum */
|
|
3275
|
+
init_em(fpLog_,
|
|
3276
|
+
mdLog_,
|
|
3277
|
+
NM,
|
|
3278
|
+
cr_,
|
|
3279
|
+
ms_, /* PLUMED */
|
|
3280
|
+
inputRec_,
|
|
3281
|
+
mdModulesNotifiers_,
|
|
3282
|
+
imdSession_,
|
|
3283
|
+
pullWork_,
|
|
3284
|
+
stateGlobal_,
|
|
3285
|
+
topGlobal_,
|
|
3286
|
+
&state_work,
|
|
3287
|
+
top_,
|
|
3288
|
+
nrnb_,
|
|
3289
|
+
fr_,
|
|
3290
|
+
mdAtoms_,
|
|
3291
|
+
&gstat,
|
|
3292
|
+
virtualSites_,
|
|
3293
|
+
constr_,
|
|
3294
|
+
&shellfc);
|
|
3295
|
+
const bool simulationsShareState = false;
|
|
3296
|
+
gmx_mdoutf* outf = init_mdoutf(fpLog_,
|
|
3297
|
+
nFile_,
|
|
3298
|
+
fnm_,
|
|
3299
|
+
mdrunOptions_,
|
|
3300
|
+
cr_,
|
|
3301
|
+
outputProvider_,
|
|
3302
|
+
mdModulesNotifiers_,
|
|
3303
|
+
inputRec_,
|
|
3304
|
+
topGlobal_,
|
|
3305
|
+
nullptr,
|
|
3306
|
+
wallCycleCounters_,
|
|
3307
|
+
StartingBehavior::NewSimulation,
|
|
3308
|
+
simulationsShareState,
|
|
3309
|
+
ms_);
|
|
3310
|
+
|
|
3311
|
+
std::vector<int> atom_index = get_atom_index(topGlobal_);
|
|
3312
|
+
std::vector<gmx::RVec> fneg(atom_index.size(), { 0, 0, 0 });
|
|
3313
|
+
snew(dfdx, atom_index.size());
|
|
3314
|
+
|
|
3315
|
+
#if !GMX_DOUBLE
|
|
3316
|
+
if (bIsMain)
|
|
3317
|
+
{
|
|
3318
|
+
fprintf(stderr,
|
|
3319
|
+
"NOTE: This version of GROMACS has been compiled in single precision,\n"
|
|
3320
|
+
" which MIGHT not be accurate enough for normal mode analysis.\n"
|
|
3321
|
+
" GROMACS now uses sparse matrix storage, so the memory requirements\n"
|
|
3322
|
+
" are fairly modest even if you recompile in double precision.\n\n");
|
|
3323
|
+
}
|
|
3324
|
+
#endif
|
|
3325
|
+
|
|
3326
|
+
/* Check if we can/should use sparse storage format.
|
|
3327
|
+
*
|
|
3328
|
+
* Sparse format is only useful when the Hessian itself is sparse, which it
|
|
3329
|
+
* will be when we use a cutoff.
|
|
3330
|
+
* For small systems (n<1000) it is easier to always use full matrix format, though.
|
|
3331
|
+
*/
|
|
3332
|
+
if (usingFullElectrostatics(fr_->ic->eeltype) || fr_->rlist == 0.0)
|
|
3333
|
+
{
|
|
3334
|
+
GMX_LOG(mdLog_.warning)
|
|
3335
|
+
.appendText("Non-cutoff electrostatics used, forcing full Hessian format.");
|
|
3336
|
+
bSparse = FALSE;
|
|
3337
|
+
}
|
|
3338
|
+
else if (atom_index.size() < 1000)
|
|
3339
|
+
{
|
|
3340
|
+
GMX_LOG(mdLog_.warning)
|
|
3341
|
+
.appendTextFormatted("Small system size (N=%zu), using full Hessian format.",
|
|
3342
|
+
atom_index.size());
|
|
3343
|
+
bSparse = FALSE;
|
|
3344
|
+
}
|
|
3345
|
+
else
|
|
3346
|
+
{
|
|
3347
|
+
GMX_LOG(mdLog_.warning).appendText("Using compressed symmetric sparse Hessian format.");
|
|
3348
|
+
bSparse = TRUE;
|
|
3349
|
+
}
|
|
3350
|
+
|
|
3351
|
+
/* Number of dimensions, based on real atoms, that is not vsites or shell */
|
|
3352
|
+
sz = DIM * atom_index.size();
|
|
3353
|
+
|
|
3354
|
+
fprintf(stderr, "Allocating Hessian memory...\n\n");
|
|
3355
|
+
|
|
3356
|
+
if (bSparse)
|
|
3357
|
+
{
|
|
3358
|
+
sparse_matrix = gmx_sparsematrix_init(sz);
|
|
3359
|
+
sparse_matrix->compressed_symmetric = TRUE;
|
|
3360
|
+
}
|
|
3361
|
+
else
|
|
3362
|
+
{
|
|
3363
|
+
snew(full_matrix, sz * sz);
|
|
3364
|
+
}
|
|
3365
|
+
|
|
3366
|
+
/* Write start time and temperature */
|
|
3367
|
+
print_em_start(fpLog_, cr_, wallTimeAccounting_, wallCycleCounters_, NM);
|
|
3368
|
+
|
|
3369
|
+
const int64_t numSteps = atom_index.size() * 2;
|
|
3370
|
+
if (bIsMain)
|
|
3371
|
+
{
|
|
3372
|
+
fprintf(stderr,
|
|
3373
|
+
"starting normal mode calculation '%s'\n%" PRId64 " steps.\n\n",
|
|
3374
|
+
*(topGlobal_.name),
|
|
3375
|
+
numSteps);
|
|
3376
|
+
}
|
|
3377
|
+
|
|
3378
|
+
nnodes = cr_->nnodes;
|
|
3379
|
+
|
|
3380
|
+
/* Make evaluate_energy do a single node force calculation */
|
|
3381
|
+
cr_->nnodes = 1;
|
|
3382
|
+
EnergyEvaluator energyEvaluator{ fpLog_,
|
|
3383
|
+
mdLog_,
|
|
3384
|
+
cr_,
|
|
3385
|
+
ms_,
|
|
3386
|
+
topGlobal_,
|
|
3387
|
+
top_,
|
|
3388
|
+
inputRec_,
|
|
3389
|
+
mdModulesNotifiers_,
|
|
3390
|
+
imdSession_,
|
|
3391
|
+
pullWork_,
|
|
3392
|
+
enforcedRotation_,
|
|
3393
|
+
nrnb_,
|
|
3394
|
+
wallCycleCounters_,
|
|
3395
|
+
gstat,
|
|
3396
|
+
&observablesReducer,
|
|
3397
|
+
virtualSites_,
|
|
3398
|
+
constr_,
|
|
3399
|
+
mdAtoms_,
|
|
3400
|
+
fr_,
|
|
3401
|
+
runScheduleWork_,
|
|
3402
|
+
enerd_,
|
|
3403
|
+
-1,
|
|
3404
|
+
{} };
|
|
3405
|
+
energyEvaluator.run(&state_work, mu_tot, vir, pres, -1, TRUE, 0);
|
|
3406
|
+
cr_->nnodes = nnodes;
|
|
3407
|
+
|
|
3408
|
+
/* if forces are not small, warn user */
|
|
3409
|
+
get_state_f_norm_max(cr_, &(inputRec_->opts), mdatoms, &state_work);
|
|
3410
|
+
|
|
3411
|
+
GMX_LOG(mdLog_.warning).appendTextFormatted("Maximum force:%12.5e", state_work.fmax);
|
|
3412
|
+
if (state_work.fmax > 1.0e-3)
|
|
3413
|
+
{
|
|
3414
|
+
GMX_LOG(mdLog_.warning)
|
|
3415
|
+
.appendText(
|
|
3416
|
+
"The force is probably not small enough to "
|
|
3417
|
+
"ensure that you are at a minimum.\n"
|
|
3418
|
+
"Be aware that negative eigenvalues may occur\n"
|
|
3419
|
+
"when the resulting matrix is diagonalized.");
|
|
3420
|
+
}
|
|
3421
|
+
|
|
3422
|
+
/***********************************************************
|
|
3423
|
+
*
|
|
3424
|
+
* Loop over all pairs in matrix
|
|
3425
|
+
*
|
|
3426
|
+
* do_force called twice. Once with positive and
|
|
3427
|
+
* once with negative displacement
|
|
3428
|
+
*
|
|
3429
|
+
************************************************************/
|
|
3430
|
+
|
|
3431
|
+
/* Steps are divided one by one over the nodes */
|
|
3432
|
+
bool bNS = true;
|
|
3433
|
+
auto state_work_x = makeArrayRef(state_work.s.x);
|
|
3434
|
+
auto state_work_f = state_work.f.view().force();
|
|
3435
|
+
for (Index aid = cr_->nodeid; aid < ssize(atom_index); aid += nnodes)
|
|
3436
|
+
{
|
|
3437
|
+
size_t atom = atom_index[aid];
|
|
3438
|
+
for (size_t d = 0; d < DIM; d++)
|
|
3439
|
+
{
|
|
3440
|
+
int64_t step = 0;
|
|
3441
|
+
double t = 0;
|
|
3442
|
+
|
|
3443
|
+
x_min = state_work_x[atom][d];
|
|
3444
|
+
|
|
3445
|
+
for (unsigned int dx = 0; (dx < 2); dx++)
|
|
3446
|
+
{
|
|
3447
|
+
if (dx == 0)
|
|
3448
|
+
{
|
|
3449
|
+
state_work_x[atom][d] = x_min - der_range;
|
|
3450
|
+
}
|
|
3451
|
+
else
|
|
3452
|
+
{
|
|
3453
|
+
state_work_x[atom][d] = x_min + der_range;
|
|
3454
|
+
}
|
|
3455
|
+
|
|
3456
|
+
/* Make evaluate_energy do a single node force calculation */
|
|
3457
|
+
cr_->nnodes = 1;
|
|
3458
|
+
if (shellfc)
|
|
3459
|
+
{
|
|
3460
|
+
/* Now is the time to relax the shells */
|
|
3461
|
+
relax_shell_flexcon(fpLog_,
|
|
3462
|
+
cr_,
|
|
3463
|
+
ms_,
|
|
3464
|
+
mdrunOptions_.verbose,
|
|
3465
|
+
nullptr,
|
|
3466
|
+
step,
|
|
3467
|
+
inputRec_,
|
|
3468
|
+
mdModulesNotifiers_,
|
|
3469
|
+
imdSession_,
|
|
3470
|
+
pullWork_,
|
|
3471
|
+
bNS,
|
|
3472
|
+
top_,
|
|
3473
|
+
constr_,
|
|
3474
|
+
enerd_,
|
|
3475
|
+
state_work.s.numAtoms(),
|
|
3476
|
+
state_work.s.x.arrayRefWithPadding(),
|
|
3477
|
+
state_work.s.v.arrayRefWithPadding(),
|
|
3478
|
+
state_work.s.box,
|
|
3479
|
+
state_work.s.lambda,
|
|
3480
|
+
&state_work.s.hist,
|
|
3481
|
+
&state_work.f.view(),
|
|
3482
|
+
vir,
|
|
3483
|
+
*mdatoms,
|
|
3484
|
+
fr_->longRangeNonbondeds.get(),
|
|
3485
|
+
nrnb_,
|
|
3486
|
+
wallCycleCounters_,
|
|
3487
|
+
shellfc,
|
|
3488
|
+
fr_,
|
|
3489
|
+
*runScheduleWork_,
|
|
3490
|
+
t,
|
|
3491
|
+
mu_tot,
|
|
3492
|
+
virtualSites_,
|
|
3493
|
+
DDBalanceRegionHandler(nullptr));
|
|
3494
|
+
bNS = false;
|
|
3495
|
+
step++;
|
|
3496
|
+
}
|
|
3497
|
+
else
|
|
3498
|
+
{
|
|
3499
|
+
energyEvaluator.run(&state_work, mu_tot, vir, pres, aid * 2 + dx, FALSE, step);
|
|
3500
|
+
}
|
|
3501
|
+
|
|
3502
|
+
cr_->nnodes = nnodes;
|
|
3503
|
+
|
|
3504
|
+
if (dx == 0)
|
|
3505
|
+
{
|
|
3506
|
+
std::copy(state_work_f.begin(), state_work_f.begin() + atom_index.size(), fneg.begin());
|
|
3507
|
+
}
|
|
3508
|
+
}
|
|
3509
|
+
|
|
3510
|
+
/* x is restored to original */
|
|
3511
|
+
state_work_x[atom][d] = x_min;
|
|
3512
|
+
|
|
3513
|
+
for (size_t j = 0; j < atom_index.size(); j++)
|
|
3514
|
+
{
|
|
3515
|
+
for (size_t k = 0; (k < DIM); k++)
|
|
3516
|
+
{
|
|
3517
|
+
dfdx[j][k] = -(state_work_f[atom_index[j]][k] - fneg[j][k]) / (2 * der_range);
|
|
3518
|
+
}
|
|
3519
|
+
}
|
|
3520
|
+
|
|
3521
|
+
if (!bIsMain)
|
|
3522
|
+
{
|
|
3523
|
+
#if GMX_MPI
|
|
3524
|
+
# define mpi_type GMX_MPI_REAL
|
|
3525
|
+
MPI_Send(dfdx[0], atom_index.size() * DIM, mpi_type, MAIN(cr_), cr_->nodeid, cr_->mpi_comm_mygroup);
|
|
3526
|
+
#endif
|
|
3527
|
+
}
|
|
3528
|
+
else
|
|
3529
|
+
{
|
|
3530
|
+
for (Index node = 0; (node < nnodes && aid + node < ssize(atom_index)); node++)
|
|
3531
|
+
{
|
|
3532
|
+
if (node > 0)
|
|
3533
|
+
{
|
|
3534
|
+
#if GMX_MPI
|
|
3535
|
+
MPI_Status stat;
|
|
3536
|
+
MPI_Recv(dfdx[0], atom_index.size() * DIM, mpi_type, node, node, cr_->mpi_comm_mygroup, &stat);
|
|
3537
|
+
# undef mpi_type
|
|
3538
|
+
#endif
|
|
3539
|
+
}
|
|
3540
|
+
|
|
3541
|
+
row = (aid + node) * DIM + d;
|
|
3542
|
+
|
|
3543
|
+
for (size_t j = 0; j < atom_index.size(); j++)
|
|
3544
|
+
{
|
|
3545
|
+
for (size_t k = 0; k < DIM; k++)
|
|
3546
|
+
{
|
|
3547
|
+
col = j * DIM + k;
|
|
3548
|
+
|
|
3549
|
+
if (bSparse)
|
|
3550
|
+
{
|
|
3551
|
+
if (col >= row && dfdx[j][k] != 0.0)
|
|
3552
|
+
{
|
|
3553
|
+
gmx_sparsematrix_increment_value(sparse_matrix, row, col, dfdx[j][k]);
|
|
3554
|
+
}
|
|
3555
|
+
}
|
|
3556
|
+
else
|
|
3557
|
+
{
|
|
3558
|
+
full_matrix[row * sz + col] = dfdx[j][k];
|
|
3559
|
+
}
|
|
3560
|
+
}
|
|
3561
|
+
}
|
|
3562
|
+
}
|
|
3563
|
+
}
|
|
3564
|
+
|
|
3565
|
+
if (mdrunOptions_.verbose && fpLog_)
|
|
3566
|
+
{
|
|
3567
|
+
fflush(fpLog_);
|
|
3568
|
+
}
|
|
3569
|
+
}
|
|
3570
|
+
/* write progress */
|
|
3571
|
+
if (bIsMain && mdrunOptions_.verbose)
|
|
3572
|
+
{
|
|
3573
|
+
fprintf(stderr,
|
|
3574
|
+
"\rFinished step %d out of %td",
|
|
3575
|
+
std::min<int>(atom + nnodes, atom_index.size()),
|
|
3576
|
+
ssize(atom_index));
|
|
3577
|
+
fflush(stderr);
|
|
3578
|
+
}
|
|
3579
|
+
}
|
|
3580
|
+
|
|
3581
|
+
if (bIsMain)
|
|
3582
|
+
{
|
|
3583
|
+
fprintf(stderr, "\n\nWriting Hessian...\n");
|
|
3584
|
+
gmx_mtxio_write(ftp2fn(efMTX, nFile_, fnm_), sz, sz, full_matrix, sparse_matrix);
|
|
3585
|
+
}
|
|
3586
|
+
|
|
3587
|
+
finish_em(cr_, outf, wallTimeAccounting_, wallCycleCounters_);
|
|
3588
|
+
|
|
3589
|
+
walltime_accounting_set_nsteps_done(wallTimeAccounting_, numSteps);
|
|
3590
|
+
}
|
|
3591
|
+
|
|
3592
|
+
} // namespace gmx
|