hillclimber 0.1.5a8__cp314-cp314-macosx_15_0_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hillclimber/__init__.py +39 -0
- hillclimber/actions.py +53 -0
- hillclimber/analysis.py +590 -0
- hillclimber/biases.py +293 -0
- hillclimber/calc.py +22 -0
- hillclimber/cvs.py +1065 -0
- hillclimber/interfaces.py +133 -0
- hillclimber/metadynamics.py +325 -0
- hillclimber/nodes.py +6 -0
- hillclimber/opes.py +359 -0
- hillclimber/selectors.py +230 -0
- hillclimber/virtual_atoms.py +341 -0
- hillclimber-0.1.5a8.dist-info/METADATA +209 -0
- hillclimber-0.1.5a8.dist-info/RECORD +469 -0
- hillclimber-0.1.5a8.dist-info/WHEEL +6 -0
- hillclimber-0.1.5a8.dist-info/entry_points.txt +8 -0
- hillclimber-0.1.5a8.dist-info/licenses/LICENSE +165 -0
- plumed/__init__.py +104 -0
- plumed/_lib/bin/plumed +0 -0
- plumed/_lib/bin/plumed-config +9 -0
- plumed/_lib/bin/plumed-patch +9 -0
- plumed/_lib/include/plumed/adjmat/AdjacencyMatrixBase.h +659 -0
- plumed/_lib/include/plumed/adjmat/ContactMatrix.h +59 -0
- plumed/_lib/include/plumed/asmjit/arch.h +228 -0
- plumed/_lib/include/plumed/asmjit/arm.h +43 -0
- plumed/_lib/include/plumed/asmjit/asmjit.h +69 -0
- plumed/_lib/include/plumed/asmjit/asmjit_apibegin.h +143 -0
- plumed/_lib/include/plumed/asmjit/asmjit_apiend.h +93 -0
- plumed/_lib/include/plumed/asmjit/asmjit_build.h +971 -0
- plumed/_lib/include/plumed/asmjit/assembler.h +183 -0
- plumed/_lib/include/plumed/asmjit/base.h +56 -0
- plumed/_lib/include/plumed/asmjit/codebuilder.h +944 -0
- plumed/_lib/include/plumed/asmjit/codecompiler.h +767 -0
- plumed/_lib/include/plumed/asmjit/codeemitter.h +528 -0
- plumed/_lib/include/plumed/asmjit/codeholder.h +777 -0
- plumed/_lib/include/plumed/asmjit/constpool.h +286 -0
- plumed/_lib/include/plumed/asmjit/cpuinfo.h +402 -0
- plumed/_lib/include/plumed/asmjit/func.h +1327 -0
- plumed/_lib/include/plumed/asmjit/globals.h +370 -0
- plumed/_lib/include/plumed/asmjit/inst.h +137 -0
- plumed/_lib/include/plumed/asmjit/logging.h +317 -0
- plumed/_lib/include/plumed/asmjit/misc_p.h +103 -0
- plumed/_lib/include/plumed/asmjit/moved_string.h +318 -0
- plumed/_lib/include/plumed/asmjit/operand.h +1599 -0
- plumed/_lib/include/plumed/asmjit/osutils.h +207 -0
- plumed/_lib/include/plumed/asmjit/regalloc_p.h +597 -0
- plumed/_lib/include/plumed/asmjit/runtime.h +227 -0
- plumed/_lib/include/plumed/asmjit/simdtypes.h +1104 -0
- plumed/_lib/include/plumed/asmjit/utils.h +1387 -0
- plumed/_lib/include/plumed/asmjit/vmem.h +183 -0
- plumed/_lib/include/plumed/asmjit/x86.h +45 -0
- plumed/_lib/include/plumed/asmjit/x86assembler.h +125 -0
- plumed/_lib/include/plumed/asmjit/x86builder.h +117 -0
- plumed/_lib/include/plumed/asmjit/x86compiler.h +322 -0
- plumed/_lib/include/plumed/asmjit/x86emitter.h +5149 -0
- plumed/_lib/include/plumed/asmjit/x86globals.h +535 -0
- plumed/_lib/include/plumed/asmjit/x86inst.h +2547 -0
- plumed/_lib/include/plumed/asmjit/x86instimpl_p.h +74 -0
- plumed/_lib/include/plumed/asmjit/x86internal_p.h +108 -0
- plumed/_lib/include/plumed/asmjit/x86logging_p.h +92 -0
- plumed/_lib/include/plumed/asmjit/x86misc.h +417 -0
- plumed/_lib/include/plumed/asmjit/x86operand.h +1133 -0
- plumed/_lib/include/plumed/asmjit/x86regalloc_p.h +734 -0
- plumed/_lib/include/plumed/asmjit/zone.h +1157 -0
- plumed/_lib/include/plumed/bias/Bias.h +82 -0
- plumed/_lib/include/plumed/bias/ReweightBase.h +58 -0
- plumed/_lib/include/plumed/blas/blas.h +253 -0
- plumed/_lib/include/plumed/blas/def_external.h +61 -0
- plumed/_lib/include/plumed/blas/def_internal.h +97 -0
- plumed/_lib/include/plumed/blas/real.h +49 -0
- plumed/_lib/include/plumed/cltools/CLTool.h +32 -0
- plumed/_lib/include/plumed/clusters/ClusteringBase.h +70 -0
- plumed/_lib/include/plumed/colvar/Colvar.h +32 -0
- plumed/_lib/include/plumed/colvar/ColvarInput.h +68 -0
- plumed/_lib/include/plumed/colvar/ColvarShortcut.h +81 -0
- plumed/_lib/include/plumed/colvar/CoordinationBase.h +52 -0
- plumed/_lib/include/plumed/colvar/MultiColvarTemplate.h +333 -0
- plumed/_lib/include/plumed/colvar/PathMSDBase.h +101 -0
- plumed/_lib/include/plumed/colvar/RMSDVector.h +78 -0
- plumed/_lib/include/plumed/config/Config.h +118 -0
- plumed/_lib/include/plumed/config/version.h +9 -0
- plumed/_lib/include/plumed/contour/ContourFindingObject.h +87 -0
- plumed/_lib/include/plumed/contour/DistanceFromContourBase.h +82 -0
- plumed/_lib/include/plumed/contour/FindContour.h +67 -0
- plumed/_lib/include/plumed/core/Action.h +540 -0
- plumed/_lib/include/plumed/core/ActionAnyorder.h +48 -0
- plumed/_lib/include/plumed/core/ActionAtomistic.h +343 -0
- plumed/_lib/include/plumed/core/ActionForInterface.h +99 -0
- plumed/_lib/include/plumed/core/ActionPilot.h +57 -0
- plumed/_lib/include/plumed/core/ActionRegister.h +124 -0
- plumed/_lib/include/plumed/core/ActionSet.h +163 -0
- plumed/_lib/include/plumed/core/ActionSetup.h +48 -0
- plumed/_lib/include/plumed/core/ActionShortcut.h +73 -0
- plumed/_lib/include/plumed/core/ActionToGetData.h +59 -0
- plumed/_lib/include/plumed/core/ActionToPutData.h +101 -0
- plumed/_lib/include/plumed/core/ActionWithArguments.h +140 -0
- plumed/_lib/include/plumed/core/ActionWithMatrix.h +87 -0
- plumed/_lib/include/plumed/core/ActionWithValue.h +258 -0
- plumed/_lib/include/plumed/core/ActionWithVector.h +94 -0
- plumed/_lib/include/plumed/core/ActionWithVirtualAtom.h +123 -0
- plumed/_lib/include/plumed/core/CLTool.h +177 -0
- plumed/_lib/include/plumed/core/CLToolMain.h +102 -0
- plumed/_lib/include/plumed/core/CLToolRegister.h +108 -0
- plumed/_lib/include/plumed/core/Colvar.h +115 -0
- plumed/_lib/include/plumed/core/DataPassingObject.h +94 -0
- plumed/_lib/include/plumed/core/DataPassingTools.h +54 -0
- plumed/_lib/include/plumed/core/DomainDecomposition.h +120 -0
- plumed/_lib/include/plumed/core/ExchangePatterns.h +47 -0
- plumed/_lib/include/plumed/core/FlexibleBin.h +63 -0
- plumed/_lib/include/plumed/core/GREX.h +61 -0
- plumed/_lib/include/plumed/core/GenericMolInfo.h +89 -0
- plumed/_lib/include/plumed/core/Group.h +41 -0
- plumed/_lib/include/plumed/core/ModuleMap.h +30 -0
- plumed/_lib/include/plumed/core/ParallelTaskManager.h +1023 -0
- plumed/_lib/include/plumed/core/PbcAction.h +61 -0
- plumed/_lib/include/plumed/core/PlumedMain.h +632 -0
- plumed/_lib/include/plumed/core/PlumedMainInitializer.h +118 -0
- plumed/_lib/include/plumed/core/RegisterBase.h +340 -0
- plumed/_lib/include/plumed/core/TargetDist.h +48 -0
- plumed/_lib/include/plumed/core/Value.h +547 -0
- plumed/_lib/include/plumed/core/WithCmd.h +93 -0
- plumed/_lib/include/plumed/dimred/SMACOF.h +55 -0
- plumed/_lib/include/plumed/drr/DRR.h +383 -0
- plumed/_lib/include/plumed/drr/colvar_UIestimator.h +777 -0
- plumed/_lib/include/plumed/fisst/legendre_rule_fast.h +44 -0
- plumed/_lib/include/plumed/function/Custom.h +54 -0
- plumed/_lib/include/plumed/function/Function.h +85 -0
- plumed/_lib/include/plumed/function/FunctionOfMatrix.h +368 -0
- plumed/_lib/include/plumed/function/FunctionOfScalar.h +135 -0
- plumed/_lib/include/plumed/function/FunctionOfVector.h +296 -0
- plumed/_lib/include/plumed/function/FunctionSetup.h +180 -0
- plumed/_lib/include/plumed/function/FunctionShortcut.h +130 -0
- plumed/_lib/include/plumed/function/FunctionWithSingleArgument.h +165 -0
- plumed/_lib/include/plumed/gridtools/ActionWithGrid.h +43 -0
- plumed/_lib/include/plumed/gridtools/EvaluateGridFunction.h +99 -0
- plumed/_lib/include/plumed/gridtools/FunctionOfGrid.h +295 -0
- plumed/_lib/include/plumed/gridtools/GridCoordinatesObject.h +179 -0
- plumed/_lib/include/plumed/gridtools/GridSearch.h +135 -0
- plumed/_lib/include/plumed/gridtools/Interpolator.h +45 -0
- plumed/_lib/include/plumed/gridtools/KDE.h +455 -0
- plumed/_lib/include/plumed/gridtools/RDF.h +40 -0
- plumed/_lib/include/plumed/gridtools/SumOfKernels.h +219 -0
- plumed/_lib/include/plumed/isdb/MetainferenceBase.h +398 -0
- plumed/_lib/include/plumed/lapack/def_external.h +207 -0
- plumed/_lib/include/plumed/lapack/def_internal.h +388 -0
- plumed/_lib/include/plumed/lapack/lapack.h +899 -0
- plumed/_lib/include/plumed/lapack/lapack_limits.h +79 -0
- plumed/_lib/include/plumed/lapack/real.h +50 -0
- plumed/_lib/include/plumed/lepton/CompiledExpression.h +164 -0
- plumed/_lib/include/plumed/lepton/CustomFunction.h +143 -0
- plumed/_lib/include/plumed/lepton/Exception.h +93 -0
- plumed/_lib/include/plumed/lepton/ExpressionProgram.h +137 -0
- plumed/_lib/include/plumed/lepton/ExpressionTreeNode.h +145 -0
- plumed/_lib/include/plumed/lepton/Lepton.h +85 -0
- plumed/_lib/include/plumed/lepton/MSVC_erfc.h +123 -0
- plumed/_lib/include/plumed/lepton/Operation.h +1302 -0
- plumed/_lib/include/plumed/lepton/ParsedExpression.h +165 -0
- plumed/_lib/include/plumed/lepton/Parser.h +111 -0
- plumed/_lib/include/plumed/lepton/windowsIncludes.h +73 -0
- plumed/_lib/include/plumed/mapping/Path.h +44 -0
- plumed/_lib/include/plumed/mapping/PathProjectionCalculator.h +57 -0
- plumed/_lib/include/plumed/matrixtools/MatrixOperationBase.h +54 -0
- plumed/_lib/include/plumed/matrixtools/MatrixTimesMatrix.h +309 -0
- plumed/_lib/include/plumed/matrixtools/MatrixTimesVectorBase.h +365 -0
- plumed/_lib/include/plumed/matrixtools/OuterProduct.h +238 -0
- plumed/_lib/include/plumed/maze/Core.h +65 -0
- plumed/_lib/include/plumed/maze/Loss.h +86 -0
- plumed/_lib/include/plumed/maze/Member.h +66 -0
- plumed/_lib/include/plumed/maze/Memetic.h +799 -0
- plumed/_lib/include/plumed/maze/Optimizer.h +357 -0
- plumed/_lib/include/plumed/maze/Random_MT.h +156 -0
- plumed/_lib/include/plumed/maze/Tools.h +183 -0
- plumed/_lib/include/plumed/metatomic/vesin.h +188 -0
- plumed/_lib/include/plumed/molfile/Gromacs.h +2013 -0
- plumed/_lib/include/plumed/molfile/endianswap.h +217 -0
- plumed/_lib/include/plumed/molfile/fastio.h +683 -0
- plumed/_lib/include/plumed/molfile/largefiles.h +78 -0
- plumed/_lib/include/plumed/molfile/libmolfile_plugin.h +77 -0
- plumed/_lib/include/plumed/molfile/molfile_plugin.h +1034 -0
- plumed/_lib/include/plumed/molfile/periodic_table.h +248 -0
- plumed/_lib/include/plumed/molfile/readpdb.h +447 -0
- plumed/_lib/include/plumed/molfile/vmdplugin.h +236 -0
- plumed/_lib/include/plumed/multicolvar/MultiColvarShortcuts.h +45 -0
- plumed/_lib/include/plumed/opes/ExpansionCVs.h +79 -0
- plumed/_lib/include/plumed/sasa/Sasa.h +32 -0
- plumed/_lib/include/plumed/secondarystructure/SecondaryStructureBase.h +372 -0
- plumed/_lib/include/plumed/setup/ActionSetup.h +25 -0
- plumed/_lib/include/plumed/small_vector/small_vector.h +6114 -0
- plumed/_lib/include/plumed/symfunc/CoordinationNumbers.h +41 -0
- plumed/_lib/include/plumed/tools/Angle.h +52 -0
- plumed/_lib/include/plumed/tools/AtomDistribution.h +138 -0
- plumed/_lib/include/plumed/tools/AtomNumber.h +152 -0
- plumed/_lib/include/plumed/tools/BiasRepresentation.h +106 -0
- plumed/_lib/include/plumed/tools/BitmaskEnum.h +167 -0
- plumed/_lib/include/plumed/tools/Brent1DRootSearch.h +159 -0
- plumed/_lib/include/plumed/tools/CheckInRange.h +44 -0
- plumed/_lib/include/plumed/tools/Citations.h +74 -0
- plumed/_lib/include/plumed/tools/ColvarOutput.h +118 -0
- plumed/_lib/include/plumed/tools/Communicator.h +316 -0
- plumed/_lib/include/plumed/tools/ConjugateGradient.h +80 -0
- plumed/_lib/include/plumed/tools/DLLoader.h +79 -0
- plumed/_lib/include/plumed/tools/ERMSD.h +73 -0
- plumed/_lib/include/plumed/tools/Exception.h +406 -0
- plumed/_lib/include/plumed/tools/File.h +28 -0
- plumed/_lib/include/plumed/tools/FileBase.h +153 -0
- plumed/_lib/include/plumed/tools/FileTools.h +37 -0
- plumed/_lib/include/plumed/tools/ForwardDecl.h +54 -0
- plumed/_lib/include/plumed/tools/Grid.h +638 -0
- plumed/_lib/include/plumed/tools/HistogramBead.h +136 -0
- plumed/_lib/include/plumed/tools/IFile.h +117 -0
- plumed/_lib/include/plumed/tools/KernelFunctions.h +113 -0
- plumed/_lib/include/plumed/tools/Keywords.h +380 -0
- plumed/_lib/include/plumed/tools/LatticeReduction.h +66 -0
- plumed/_lib/include/plumed/tools/LeptonCall.h +64 -0
- plumed/_lib/include/plumed/tools/LinkCells.h +126 -0
- plumed/_lib/include/plumed/tools/Log.h +41 -0
- plumed/_lib/include/plumed/tools/LoopUnroller.h +163 -0
- plumed/_lib/include/plumed/tools/Matrix.h +721 -0
- plumed/_lib/include/plumed/tools/MatrixSquareBracketsAccess.h +138 -0
- plumed/_lib/include/plumed/tools/MergeVectorTools.h +153 -0
- plumed/_lib/include/plumed/tools/Minimise1DBrent.h +244 -0
- plumed/_lib/include/plumed/tools/MinimiseBase.h +120 -0
- plumed/_lib/include/plumed/tools/MolDataClass.h +51 -0
- plumed/_lib/include/plumed/tools/NeighborList.h +112 -0
- plumed/_lib/include/plumed/tools/OFile.h +286 -0
- plumed/_lib/include/plumed/tools/OpenACC.h +180 -0
- plumed/_lib/include/plumed/tools/OpenMP.h +75 -0
- plumed/_lib/include/plumed/tools/PDB.h +154 -0
- plumed/_lib/include/plumed/tools/Pbc.h +139 -0
- plumed/_lib/include/plumed/tools/PlumedHandle.h +105 -0
- plumed/_lib/include/plumed/tools/RMSD.h +493 -0
- plumed/_lib/include/plumed/tools/Random.h +80 -0
- plumed/_lib/include/plumed/tools/RootFindingBase.h +79 -0
- plumed/_lib/include/plumed/tools/Stopwatch.h +475 -0
- plumed/_lib/include/plumed/tools/Subprocess.h +142 -0
- plumed/_lib/include/plumed/tools/SwitchingFunction.h +208 -0
- plumed/_lib/include/plumed/tools/Tensor.h +724 -0
- plumed/_lib/include/plumed/tools/TokenizedLine.h +123 -0
- plumed/_lib/include/plumed/tools/Tools.h +638 -0
- plumed/_lib/include/plumed/tools/Torsion.h +55 -0
- plumed/_lib/include/plumed/tools/TrajectoryParser.h +118 -0
- plumed/_lib/include/plumed/tools/Tree.h +61 -0
- plumed/_lib/include/plumed/tools/TypesafePtr.h +463 -0
- plumed/_lib/include/plumed/tools/Units.h +167 -0
- plumed/_lib/include/plumed/tools/Vector.h +433 -0
- plumed/_lib/include/plumed/tools/View.h +296 -0
- plumed/_lib/include/plumed/tools/View2D.h +100 -0
- plumed/_lib/include/plumed/tools/h36.h +39 -0
- plumed/_lib/include/plumed/vatom/ActionWithVirtualAtom.h +32 -0
- plumed/_lib/include/plumed/ves/BasisFunctions.h +380 -0
- plumed/_lib/include/plumed/ves/CoeffsBase.h +310 -0
- plumed/_lib/include/plumed/ves/CoeffsMatrix.h +220 -0
- plumed/_lib/include/plumed/ves/CoeffsVector.h +251 -0
- plumed/_lib/include/plumed/ves/FermiSwitchingFunction.h +74 -0
- plumed/_lib/include/plumed/ves/GridIntegrationWeights.h +50 -0
- plumed/_lib/include/plumed/ves/GridLinearInterpolation.h +81 -0
- plumed/_lib/include/plumed/ves/GridProjWeights.h +61 -0
- plumed/_lib/include/plumed/ves/LinearBasisSetExpansion.h +303 -0
- plumed/_lib/include/plumed/ves/Optimizer.h +444 -0
- plumed/_lib/include/plumed/ves/TargetDistModifer.h +53 -0
- plumed/_lib/include/plumed/ves/TargetDistribution.h +266 -0
- plumed/_lib/include/plumed/ves/VesBias.h +545 -0
- plumed/_lib/include/plumed/ves/VesTools.h +142 -0
- plumed/_lib/include/plumed/ves/WaveletGrid.h +75 -0
- plumed/_lib/include/plumed/volumes/ActionVolume.h +268 -0
- plumed/_lib/include/plumed/volumes/VolumeShortcut.h +147 -0
- plumed/_lib/include/plumed/wrapper/Plumed.h +5025 -0
- plumed/_lib/include/plumed/xdrfile/xdrfile.h +663 -0
- plumed/_lib/include/plumed/xdrfile/xdrfile_trr.h +89 -0
- plumed/_lib/include/plumed/xdrfile/xdrfile_xtc.h +90 -0
- plumed/_lib/lib/PythonCVInterface.dylib +0 -0
- plumed/_lib/lib/libplumed.dylib +0 -0
- plumed/_lib/lib/libplumedKernel.dylib +0 -0
- plumed/_lib/lib/libplumedWrapper.a +0 -0
- plumed/_lib/lib/pkgconfig/plumed.pc +13 -0
- plumed/_lib/lib/pkgconfig/plumedInternals.pc +13 -0
- plumed/_lib/lib/pkgconfig/plumedWrapper.pc +13 -0
- plumed/_lib/lib/plumed/fortran/plumed.f90 +879 -0
- plumed/_lib/lib/plumed/fortran/plumed_f08.f90 +2625 -0
- plumed/_lib/lib/plumed/modulefile +69 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.config +43 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/CMakeLists.txt +543 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/CMakeLists.txt.preplumed +540 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdlib/expanded.cpp +1628 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdlib/expanded.cpp.preplumed +1590 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdlib/expanded.h +103 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdlib/expanded.h.preplumed +99 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdlib/sim_util.cpp +2527 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdlib/sim_util.cpp.preplumed +2513 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdrun/legacymdrunoptions.cpp +208 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdrun/legacymdrunoptions.cpp.preplumed +175 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdrun/legacymdrunoptions.h +408 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdrun/legacymdrunoptions.h.preplumed +394 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdrun/md.cpp +2348 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdrun/md.cpp.preplumed +2091 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdrun/minimize.cpp +3573 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdrun/minimize.cpp.preplumed +3495 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdrun/replicaexchange.cpp +1506 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdrun/replicaexchange.cpp.preplumed +1402 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdrun/replicaexchange.h +114 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdrun/replicaexchange.h.preplumed +106 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdrun/rerun.cpp +997 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdrun/rerun.cpp.preplumed +906 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdrun/runner.cpp +2780 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/mdrun/runner.cpp.preplumed +2738 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/modularsimulator/expandedensembleelement.cpp +224 -0
- plumed/_lib/lib/plumed/patches/gromacs-2022.5.diff/src/gromacs/modularsimulator/expandedensembleelement.cpp.preplumed +222 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.config +43 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/CMakeLists.txt +549 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/CMakeLists.txt.preplumed +546 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdlib/expanded.cpp +1632 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdlib/expanded.cpp.preplumed +1594 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdlib/expanded.h +104 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdlib/expanded.h.preplumed +100 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdlib/sim_util.cpp +2624 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdlib/sim_util.cpp.preplumed +2610 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdrun/legacymdrunoptions.cpp +208 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdrun/legacymdrunoptions.cpp.preplumed +175 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdrun/legacymdrunoptions.h +409 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdrun/legacymdrunoptions.h.preplumed +395 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdrun/md.cpp +2419 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdrun/md.cpp.preplumed +2164 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdrun/minimize.cpp +3546 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdrun/minimize.cpp.preplumed +3468 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdrun/replicaexchange.cpp +1513 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdrun/replicaexchange.cpp.preplumed +1409 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdrun/replicaexchange.h +114 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdrun/replicaexchange.h.preplumed +106 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdrun/rerun.cpp +991 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdrun/rerun.cpp.preplumed +900 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdrun/runner.cpp +2895 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/mdrun/runner.cpp.preplumed +2849 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/modularsimulator/expandedensembleelement.cpp +224 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/modularsimulator/expandedensembleelement.cpp.preplumed +222 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/taskassignment/decidegpuusage.cpp +886 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/taskassignment/decidegpuusage.cpp.preplumed +880 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/taskassignment/include/gromacs/taskassignment/decidegpuusage.h +347 -0
- plumed/_lib/lib/plumed/patches/gromacs-2023.5.diff/src/gromacs/taskassignment/include/gromacs/taskassignment/decidegpuusage.h.preplumed +345 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.config +43 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/CMakeLists.txt +575 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/CMakeLists.txt.preplumed +572 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdlib/expanded.cpp +1632 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdlib/expanded.cpp.preplumed +1594 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdlib/expanded.h +104 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdlib/expanded.h.preplumed +100 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdlib/sim_util.cpp +2564 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdlib/sim_util.cpp.preplumed +2550 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdrun/legacymdrunoptions.cpp +208 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdrun/legacymdrunoptions.cpp.preplumed +175 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdrun/legacymdrunoptions.h +410 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdrun/legacymdrunoptions.h.preplumed +396 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdrun/md.cpp +2435 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdrun/md.cpp.preplumed +2187 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdrun/minimize.cpp +3592 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdrun/minimize.cpp.preplumed +3514 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdrun/replicaexchange.cpp +1513 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdrun/replicaexchange.cpp.preplumed +1409 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdrun/replicaexchange.h +114 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdrun/replicaexchange.h.preplumed +106 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdrun/rerun.cpp +958 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdrun/rerun.cpp.preplumed +929 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdrun/runner.cpp +2987 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/mdrun/runner.cpp.preplumed +2941 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/modularsimulator/expandedensembleelement.cpp +224 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/modularsimulator/expandedensembleelement.cpp.preplumed +222 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/taskassignment/decidegpuusage.cpp +904 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/taskassignment/decidegpuusage.cpp.preplumed +898 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/taskassignment/include/gromacs/taskassignment/decidegpuusage.h +353 -0
- plumed/_lib/lib/plumed/patches/gromacs-2024.3.diff/src/gromacs/taskassignment/include/gromacs/taskassignment/decidegpuusage.h.preplumed +351 -0
- plumed/_lib/lib/plumed/patches/gromacs-2025.0.config +39 -0
- plumed/_lib/lib/plumed/patches/gromacs-2025.0.diff/cmake/gmxManagePlumed.cmake +82 -0
- plumed/_lib/lib/plumed/patches/gromacs-2025.0.diff/cmake/gmxManagePlumed.cmake.preplumed +82 -0
- plumed/_lib/lib/plumed/patches/gromacs-2025.0.diff/src/gromacs/applied_forces/plumed/plumedMDModule.cpp +162 -0
- plumed/_lib/lib/plumed/patches/gromacs-2025.0.diff/src/gromacs/applied_forces/plumed/plumedMDModule.cpp.preplumed +154 -0
- plumed/_lib/lib/plumed/patches/gromacs-2025.0.diff/src/gromacs/applied_forces/plumed/plumedOptions.cpp +107 -0
- plumed/_lib/lib/plumed/patches/gromacs-2025.0.diff/src/gromacs/applied_forces/plumed/plumedOptions.cpp.preplumed +99 -0
- plumed/_lib/lib/plumed/patches/gromacs-2025.0.diff/src/gromacs/applied_forces/plumed/plumedOptions.h +120 -0
- plumed/_lib/lib/plumed/patches/gromacs-2025.0.diff/src/gromacs/applied_forces/plumed/plumedOptions.h.preplumed +111 -0
- plumed/_lib/lib/plumed/patches/gromacs-2025.0.diff/src/gromacs/applied_forces/plumed/plumedforceprovider.cpp +215 -0
- plumed/_lib/lib/plumed/patches/gromacs-2025.0.diff/src/gromacs/applied_forces/plumed/plumedforceprovider.cpp.preplumed +197 -0
- plumed/_lib/lib/plumed/patches/gromacs-2025.0.diff/src/gromacs/applied_forces/plumed/plumedforceprovider.h +87 -0
- plumed/_lib/lib/plumed/patches/gromacs-2025.0.diff/src/gromacs/applied_forces/plumed/plumedforceprovider.h.preplumed +86 -0
- plumed/_lib/lib/plumed/patches/gromacs-2025.0.diff/src/gromacs/mdrun/runner.cpp +2971 -0
- plumed/_lib/lib/plumed/patches/gromacs-2025.0.diff/src/gromacs/mdrun/runner.cpp.preplumed +2970 -0
- plumed/_lib/lib/plumed/patches/gromacs-2025.0.diff/src/gromacs/mdrunutility/mdmodulesnotifiers.h +430 -0
- plumed/_lib/lib/plumed/patches/gromacs-2025.0.diff/src/gromacs/mdrunutility/mdmodulesnotifiers.h.preplumed +429 -0
- plumed/_lib/lib/plumed/patches/namd-2.12.config +30 -0
- plumed/_lib/lib/plumed/patches/namd-2.12.diff +267 -0
- plumed/_lib/lib/plumed/patches/namd-2.13.config +30 -0
- plumed/_lib/lib/plumed/patches/namd-2.13.diff +267 -0
- plumed/_lib/lib/plumed/patches/namd-2.14.config +30 -0
- plumed/_lib/lib/plumed/patches/namd-2.14.diff +268 -0
- plumed/_lib/lib/plumed/patches/patch.sh +500 -0
- plumed/_lib/lib/plumed/patches/qespresso-5.0.2.config +25 -0
- plumed/_lib/lib/plumed/patches/qespresso-5.0.2.diff/PW/src/forces.f90 +368 -0
- plumed/_lib/lib/plumed/patches/qespresso-5.0.2.diff/PW/src/forces.f90.preplumed +366 -0
- plumed/_lib/lib/plumed/patches/qespresso-5.0.2.diff/PW/src/plugin_forces.f90 +71 -0
- plumed/_lib/lib/plumed/patches/qespresso-5.0.2.diff/PW/src/plugin_forces.f90.preplumed +24 -0
- plumed/_lib/lib/plumed/patches/qespresso-5.0.2.diff/PW/src/plugin_initialization.f90 +62 -0
- plumed/_lib/lib/plumed/patches/qespresso-5.0.2.diff/PW/src/plugin_initialization.f90.preplumed +21 -0
- plumed/_lib/lib/plumed/patches/qespresso-5.0.2.diff/PW/src/pwscf.f90 +189 -0
- plumed/_lib/lib/plumed/patches/qespresso-5.0.2.diff/PW/src/pwscf.f90.preplumed +185 -0
- plumed/_lib/lib/plumed/patches/qespresso-6.2.config +26 -0
- plumed/_lib/lib/plumed/patches/qespresso-6.2.diff/PW/src/forces.f90 +422 -0
- plumed/_lib/lib/plumed/patches/qespresso-6.2.diff/PW/src/forces.f90.preplumed +420 -0
- plumed/_lib/lib/plumed/patches/qespresso-6.2.diff/PW/src/plugin_ext_forces.f90 +70 -0
- plumed/_lib/lib/plumed/patches/qespresso-6.2.diff/PW/src/plugin_ext_forces.f90.preplumed +23 -0
- plumed/_lib/lib/plumed/patches/qespresso-6.2.diff/PW/src/plugin_initialization.f90 +62 -0
- plumed/_lib/lib/plumed/patches/qespresso-6.2.diff/PW/src/plugin_initialization.f90.preplumed +21 -0
- plumed/_lib/lib/plumed/patches/qespresso-6.2.diff/PW/src/run_pwscf.f90 +233 -0
- plumed/_lib/lib/plumed/patches/qespresso-6.2.diff/PW/src/run_pwscf.f90.preplumed +230 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.0.config +28 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.0.diff/Modules/Makefile +175 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.0.diff/Modules/Makefile.preplumed +171 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.0.diff/PW/src/forces.f90 +486 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.0.diff/PW/src/forces.f90.preplumed +484 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.0.diff/PW/src/plugin_ext_forces.f90 +74 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.0.diff/PW/src/plugin_ext_forces.f90.preplumed +23 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.0.diff/PW/src/plugin_initialization.f90 +64 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.0.diff/PW/src/plugin_initialization.f90.preplumed +21 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.0.diff/PW/src/run_pwscf.f90 +532 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.0.diff/PW/src/run_pwscf.f90.preplumed +518 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.2.config +28 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.2.diff/Modules/Makefile +249 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.2.diff/Modules/Makefile.preplumed +244 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.2.diff/PW/src/forces.f90 +532 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.2.diff/PW/src/forces.f90.preplumed +535 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.2.diff/PW/src/plugin_ext_forces.f90 +74 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.2.diff/PW/src/plugin_ext_forces.f90.preplumed +23 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.2.diff/PW/src/plugin_initialization.f90 +64 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.2.diff/PW/src/plugin_initialization.f90.preplumed +21 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.2.diff/PW/src/run_pwscf.f90 +569 -0
- plumed/_lib/lib/plumed/patches/qespresso-7.2.diff/PW/src/run_pwscf.f90.preplumed +560 -0
- plumed/_lib/lib/plumed/plumed-config +9 -0
- plumed/_lib/lib/plumed/plumed-mklib +9 -0
- plumed/_lib/lib/plumed/plumed-newcv +9 -0
- plumed/_lib/lib/plumed/plumed-partial_tempering +9 -0
- plumed/_lib/lib/plumed/plumed-patch +9 -0
- plumed/_lib/lib/plumed/plumed-runtime +0 -0
- plumed/_lib/lib/plumed/plumed-selector +9 -0
- plumed/_lib/lib/plumed/plumed-vim2html +9 -0
- plumed/_lib/lib/plumed/scripts/config.sh +126 -0
- plumed/_lib/lib/plumed/scripts/mklib.sh +175 -0
- plumed/_lib/lib/plumed/scripts/newcv.sh +26 -0
- plumed/_lib/lib/plumed/scripts/partial_tempering.sh +319 -0
- plumed/_lib/lib/plumed/scripts/patch.sh +4 -0
- plumed/_lib/lib/plumed/scripts/selector.sh +234 -0
- plumed/_lib/lib/plumed/scripts/vim2html.sh +190 -0
- plumed/_lib/lib/plumed/src/colvar/Template.cpp +116 -0
- plumed/_lib/lib/plumed/src/config/compile_options.sh +3 -0
- plumed/_lib/lib/plumed/src/config/config.txt +181 -0
- plumed/_lib/lib/plumed/src/lib/Plumed.cmake +6 -0
- plumed/_lib/lib/plumed/src/lib/Plumed.cmake.runtime +5 -0
- plumed/_lib/lib/plumed/src/lib/Plumed.cmake.shared +5 -0
- plumed/_lib/lib/plumed/src/lib/Plumed.cmake.static +3 -0
- plumed/_lib/lib/plumed/src/lib/Plumed.inc +6 -0
- plumed/_lib/lib/plumed/src/lib/Plumed.inc.runtime +5 -0
- plumed/_lib/lib/plumed/src/lib/Plumed.inc.shared +5 -0
- plumed/_lib/lib/plumed/src/lib/Plumed.inc.static +3 -0
- plumed/_lib/lib/plumed/vim/scripts.vim +6 -0
- plumed/_plumed_core.cpython-311-darwin.so +0 -0
- plumed/_plumed_core.cpython-312-darwin.so +0 -0
- plumed/_plumed_core.cpython-313-darwin.so +0 -0
- plumed/_plumed_core.cpython-314-darwin.so +0 -0
- plumedCommunications.cpython-311-darwin.so +0 -0
- plumedCommunications.cpython-312-darwin.so +0 -0
- plumedCommunications.cpython-313-darwin.so +0 -0
- plumedCommunications.cpython-314-darwin.so +0 -0
- plumedCommunications.pyi +431 -0
|
@@ -0,0 +1,2187 @@
|
|
|
1
|
+
/*
|
|
2
|
+
* This file is part of the GROMACS molecular simulation package.
|
|
3
|
+
*
|
|
4
|
+
* Copyright 1991- The GROMACS Authors
|
|
5
|
+
* and the project initiators Erik Lindahl, Berk Hess and David van der Spoel.
|
|
6
|
+
* Consult the AUTHORS/COPYING files and https://www.gromacs.org for details.
|
|
7
|
+
*
|
|
8
|
+
* GROMACS is free software; you can redistribute it and/or
|
|
9
|
+
* modify it under the terms of the GNU Lesser General Public License
|
|
10
|
+
* as published by the Free Software Foundation; either version 2.1
|
|
11
|
+
* of the License, or (at your option) any later version.
|
|
12
|
+
*
|
|
13
|
+
* GROMACS is distributed in the hope that it will be useful,
|
|
14
|
+
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
15
|
+
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
16
|
+
* Lesser General Public License for more details.
|
|
17
|
+
*
|
|
18
|
+
* You should have received a copy of the GNU Lesser General Public
|
|
19
|
+
* License along with GROMACS; if not, see
|
|
20
|
+
* https://www.gnu.org/licenses, or write to the Free Software Foundation,
|
|
21
|
+
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
22
|
+
*
|
|
23
|
+
* If you want to redistribute modifications to GROMACS, please
|
|
24
|
+
* consider that scientific software is very special. Version
|
|
25
|
+
* control is crucial - bugs must be traceable. We will be happy to
|
|
26
|
+
* consider code for inclusion in the official distribution, but
|
|
27
|
+
* derived work must not be called official GROMACS. Details are found
|
|
28
|
+
* in the README & COPYING files - if they are missing, get the
|
|
29
|
+
* official version at https://www.gromacs.org.
|
|
30
|
+
*
|
|
31
|
+
* To help us fund GROMACS development, we humbly ask that you cite
|
|
32
|
+
* the research papers on the package. Check out https://www.gromacs.org.
|
|
33
|
+
*/
|
|
34
|
+
/*! \internal \file
|
|
35
|
+
*
|
|
36
|
+
* \brief Implements the integrator for normal molecular dynamics simulations
|
|
37
|
+
*
|
|
38
|
+
* \author David van der Spoel <david.vanderspoel@icm.uu.se>
|
|
39
|
+
* \ingroup module_mdrun
|
|
40
|
+
*/
|
|
41
|
+
#include "gmxpre.h"
|
|
42
|
+
|
|
43
|
+
#include <cinttypes>
|
|
44
|
+
#include <cmath>
|
|
45
|
+
#include <cstdio>
|
|
46
|
+
#include <cstdlib>
|
|
47
|
+
|
|
48
|
+
#include <algorithm>
|
|
49
|
+
#include <memory>
|
|
50
|
+
#include <numeric>
|
|
51
|
+
|
|
52
|
+
#include "gromacs/applied_forces/awh/awh.h"
|
|
53
|
+
#include "gromacs/applied_forces/awh/read_params.h"
|
|
54
|
+
#include "gromacs/commandline/filenm.h"
|
|
55
|
+
#include "gromacs/domdec/collect.h"
|
|
56
|
+
#include "gromacs/domdec/dlbtiming.h"
|
|
57
|
+
#include "gromacs/domdec/domdec.h"
|
|
58
|
+
#include "gromacs/domdec/domdec_network.h"
|
|
59
|
+
#include "gromacs/domdec/domdec_struct.h"
|
|
60
|
+
#include "gromacs/domdec/gpuhaloexchange.h"
|
|
61
|
+
#include "gromacs/domdec/localtopologychecker.h"
|
|
62
|
+
#include "gromacs/domdec/mdsetup.h"
|
|
63
|
+
#include "gromacs/domdec/partition.h"
|
|
64
|
+
#include "gromacs/essentialdynamics/edsam.h"
|
|
65
|
+
#include "gromacs/ewald/pme_load_balancing.h"
|
|
66
|
+
#include "gromacs/ewald/pme_pp.h"
|
|
67
|
+
#include "gromacs/fileio/trxio.h"
|
|
68
|
+
#include "gromacs/gmxlib/network.h"
|
|
69
|
+
#include "gromacs/gmxlib/nrnb.h"
|
|
70
|
+
#include "gromacs/gpu_utils/device_stream_manager.h"
|
|
71
|
+
#include "gromacs/gpu_utils/gpu_utils.h"
|
|
72
|
+
#include "gromacs/imd/imd.h"
|
|
73
|
+
#include "gromacs/listed_forces/listed_forces.h"
|
|
74
|
+
#include "gromacs/listed_forces/listed_forces_gpu.h"
|
|
75
|
+
#include "gromacs/math/boxmatrix.h"
|
|
76
|
+
#include "gromacs/math/functions.h"
|
|
77
|
+
#include "gromacs/math/vec.h"
|
|
78
|
+
#include "gromacs/math/vectypes.h"
|
|
79
|
+
#include "gromacs/mdlib/checkpointhandler.h"
|
|
80
|
+
#include "gromacs/mdlib/compute_io.h"
|
|
81
|
+
#include "gromacs/mdlib/constr.h"
|
|
82
|
+
#include "gromacs/mdlib/coupling.h"
|
|
83
|
+
#include "gromacs/mdlib/ebin.h"
|
|
84
|
+
#include "gromacs/mdlib/enerdata_utils.h"
|
|
85
|
+
#include "gromacs/mdlib/energyoutput.h"
|
|
86
|
+
#include "gromacs/mdlib/expanded.h"
|
|
87
|
+
#include "gromacs/mdlib/force.h"
|
|
88
|
+
#include "gromacs/mdlib/force_flags.h"
|
|
89
|
+
#include "gromacs/mdlib/forcerec.h"
|
|
90
|
+
#include "gromacs/mdlib/freeenergyparameters.h"
|
|
91
|
+
#include "gromacs/mdlib/md_support.h"
|
|
92
|
+
#include "gromacs/mdlib/mdatoms.h"
|
|
93
|
+
#include "gromacs/mdlib/mdgraph_gpu.h"
|
|
94
|
+
#include "gromacs/mdlib/mdoutf.h"
|
|
95
|
+
#include "gromacs/mdlib/membed.h"
|
|
96
|
+
#include "gromacs/mdlib/resethandler.h"
|
|
97
|
+
#include "gromacs/mdlib/sighandler.h"
|
|
98
|
+
#include "gromacs/mdlib/simulationsignal.h"
|
|
99
|
+
#include "gromacs/mdlib/stat.h"
|
|
100
|
+
#include "gromacs/mdlib/stophandler.h"
|
|
101
|
+
#include "gromacs/mdlib/tgroup.h"
|
|
102
|
+
#include "gromacs/mdlib/trajectory_writing.h"
|
|
103
|
+
#include "gromacs/mdlib/update.h"
|
|
104
|
+
#include "gromacs/mdlib/update_constrain_gpu.h"
|
|
105
|
+
#include "gromacs/mdlib/update_vv.h"
|
|
106
|
+
#include "gromacs/mdlib/vcm.h"
|
|
107
|
+
#include "gromacs/mdlib/vsite.h"
|
|
108
|
+
#include "gromacs/mdrunutility/freeenergy.h"
|
|
109
|
+
#include "gromacs/mdrunutility/handlerestart.h"
|
|
110
|
+
#include "gromacs/mdrunutility/multisim.h"
|
|
111
|
+
#include "gromacs/mdrunutility/printtime.h"
|
|
112
|
+
#include "gromacs/mdtypes/awh_history.h"
|
|
113
|
+
#include "gromacs/mdtypes/awh_params.h"
|
|
114
|
+
#include "gromacs/mdtypes/commrec.h"
|
|
115
|
+
#include "gromacs/mdtypes/df_history.h"
|
|
116
|
+
#include "gromacs/mdtypes/energyhistory.h"
|
|
117
|
+
#include "gromacs/mdtypes/fcdata.h"
|
|
118
|
+
#include "gromacs/mdtypes/forcebuffers.h"
|
|
119
|
+
#include "gromacs/mdtypes/forcerec.h"
|
|
120
|
+
#include "gromacs/mdtypes/group.h"
|
|
121
|
+
#include "gromacs/mdtypes/iforceprovider.h"
|
|
122
|
+
#include "gromacs/mdtypes/inputrec.h"
|
|
123
|
+
#include "gromacs/mdtypes/interaction_const.h"
|
|
124
|
+
#include "gromacs/mdtypes/md_enums.h"
|
|
125
|
+
#include "gromacs/mdtypes/mdatom.h"
|
|
126
|
+
#include "gromacs/mdtypes/mdrunoptions.h"
|
|
127
|
+
#include "gromacs/mdtypes/multipletimestepping.h"
|
|
128
|
+
#include "gromacs/mdtypes/observableshistory.h"
|
|
129
|
+
#include "gromacs/mdtypes/observablesreducer.h"
|
|
130
|
+
#include "gromacs/mdtypes/pullhistory.h"
|
|
131
|
+
#include "gromacs/mdtypes/simulation_workload.h"
|
|
132
|
+
#include "gromacs/mdtypes/state.h"
|
|
133
|
+
#include "gromacs/mdtypes/state_propagator_data_gpu.h"
|
|
134
|
+
#include "gromacs/modularsimulator/energydata.h"
|
|
135
|
+
#include "gromacs/nbnxm/gpu_data_mgmt.h"
|
|
136
|
+
#include "gromacs/nbnxm/nbnxm.h"
|
|
137
|
+
#include "gromacs/pbcutil/pbc.h"
|
|
138
|
+
#include "gromacs/pulling/output.h"
|
|
139
|
+
#include "gromacs/pulling/pull.h"
|
|
140
|
+
#include "gromacs/swap/swapcoords.h"
|
|
141
|
+
#include "gromacs/taskassignment/include/gromacs/taskassignment/decidesimulationworkload.h"
|
|
142
|
+
#include "gromacs/timing/wallcycle.h"
|
|
143
|
+
#include "gromacs/timing/walltime_accounting.h"
|
|
144
|
+
#include "gromacs/topology/atoms.h"
|
|
145
|
+
#include "gromacs/topology/idef.h"
|
|
146
|
+
#include "gromacs/topology/mtop_util.h"
|
|
147
|
+
#include "gromacs/topology/topology.h"
|
|
148
|
+
#include "gromacs/trajectory/trajectoryframe.h"
|
|
149
|
+
#include "gromacs/utility/basedefinitions.h"
|
|
150
|
+
#include "gromacs/utility/cstringutil.h"
|
|
151
|
+
#include "gromacs/utility/fatalerror.h"
|
|
152
|
+
#include "gromacs/utility/logger.h"
|
|
153
|
+
#include "gromacs/utility/real.h"
|
|
154
|
+
#include "gromacs/utility/smalloc.h"
|
|
155
|
+
|
|
156
|
+
#include "legacysimulator.h"
|
|
157
|
+
#include "replicaexchange.h"
|
|
158
|
+
#include "shellfc.h"
|
|
159
|
+
|
|
160
|
+
using gmx::SimulationSignaller;
|
|
161
|
+
|
|
162
|
+
void gmx::LegacySimulator::do_md()
|
|
163
|
+
{
|
|
164
|
+
// TODO Historically, the EM and MD "integrators" used different
|
|
165
|
+
// names for the t_inputrec *parameter, but these must have the
|
|
166
|
+
// same name, now that it's a member of a struct. We use this ir
|
|
167
|
+
// alias to avoid a large ripple of nearly useless changes.
|
|
168
|
+
// t_inputrec is being replaced by IMdpOptionsProvider, so this
|
|
169
|
+
// will go away eventually.
|
|
170
|
+
const t_inputrec* ir = inputRec_;
|
|
171
|
+
|
|
172
|
+
double t, t0 = ir->init_t;
|
|
173
|
+
gmx_bool bGStatEveryStep, bGStat, bCalcVir, bCalcEnerStep, bCalcEner;
|
|
174
|
+
gmx_bool bNS = FALSE, bNStList, bStopCM, bFirstStep, bInitStep, bLastStep = FALSE;
|
|
175
|
+
gmx_bool bDoExpanded = FALSE;
|
|
176
|
+
gmx_bool do_ene, do_log, do_verbose;
|
|
177
|
+
gmx_bool bMainState;
|
|
178
|
+
unsigned int force_flags;
|
|
179
|
+
tensor force_vir = { { 0 } }, shake_vir = { { 0 } }, total_vir = { { 0 } }, pres = { { 0 } };
|
|
180
|
+
int i, m;
|
|
181
|
+
rvec mu_tot;
|
|
182
|
+
Matrix3x3 pressureCouplingMu{ { 0. } }, parrinelloRahmanM{ { 0. } };
|
|
183
|
+
gmx_repl_ex_t repl_ex = nullptr;
|
|
184
|
+
gmx_global_stat_t gstat;
|
|
185
|
+
gmx_shellfc_t* shellfc;
|
|
186
|
+
gmx_bool bSumEkinhOld, bDoReplEx, bExchanged, bNeedRepartition;
|
|
187
|
+
gmx_bool bTrotter;
|
|
188
|
+
real dvdl_constr;
|
|
189
|
+
std::vector<RVec> cbuf;
|
|
190
|
+
matrix lastbox;
|
|
191
|
+
int lamnew = 0;
|
|
192
|
+
/* for FEP */
|
|
193
|
+
double cycles;
|
|
194
|
+
real saved_conserved_quantity = 0;
|
|
195
|
+
real last_ekin = 0;
|
|
196
|
+
t_extmass MassQ;
|
|
197
|
+
char sbuf[STEPSTRSIZE], sbuf2[STEPSTRSIZE];
|
|
198
|
+
|
|
199
|
+
/* PME load balancing data for GPU kernels */
|
|
200
|
+
gmx_bool bPMETune = FALSE;
|
|
201
|
+
gmx_bool bPMETunePrinting = FALSE;
|
|
202
|
+
|
|
203
|
+
bool bInteractiveMDstep = false;
|
|
204
|
+
|
|
205
|
+
SimulationSignals signals;
|
|
206
|
+
// Most global communnication stages don't propagate mdrun
|
|
207
|
+
// signals, and will use this object to achieve that.
|
|
208
|
+
SimulationSignaller nullSignaller(nullptr, nullptr, nullptr, false, false);
|
|
209
|
+
|
|
210
|
+
if (!mdrunOptions_.writeConfout)
|
|
211
|
+
{
|
|
212
|
+
// This is on by default, and the main known use case for
|
|
213
|
+
// turning it off is for convenience in benchmarking, which is
|
|
214
|
+
// something that should not show up in the general user
|
|
215
|
+
// interface.
|
|
216
|
+
GMX_LOG(mdLog_.info)
|
|
217
|
+
.asParagraph()
|
|
218
|
+
.appendText(
|
|
219
|
+
"The -noconfout functionality is deprecated, and may be removed in a "
|
|
220
|
+
"future version.");
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
/* md-vv uses averaged full step velocities for T-control
|
|
224
|
+
md-vv-avek uses averaged half step velocities for T-control (but full step ekin for P control)
|
|
225
|
+
md uses averaged half step kinetic energies to determine temperature unless defined otherwise by GMX_EKIN_AVE_VEL; */
|
|
226
|
+
bTrotter = (EI_VV(ir->eI)
|
|
227
|
+
&& (inputrecNptTrotter(ir) || inputrecNphTrotter(ir) || inputrecNvtTrotter(ir)));
|
|
228
|
+
|
|
229
|
+
const bool bRerunMD = false;
|
|
230
|
+
|
|
231
|
+
int nstglobalcomm = computeGlobalCommunicationPeriod(mdLog_, ir, cr_);
|
|
232
|
+
bGStatEveryStep = (nstglobalcomm == 1);
|
|
233
|
+
|
|
234
|
+
const SimulationGroups* groups = &topGlobal_.groups;
|
|
235
|
+
|
|
236
|
+
std::unique_ptr<EssentialDynamics> ed = nullptr;
|
|
237
|
+
if (opt2bSet("-ei", nFile_, fnm_))
|
|
238
|
+
{
|
|
239
|
+
/* Initialize essential dynamics sampling */
|
|
240
|
+
ed = init_edsam(mdLog_,
|
|
241
|
+
opt2fn_null("-ei", nFile_, fnm_),
|
|
242
|
+
opt2fn("-eo", nFile_, fnm_),
|
|
243
|
+
topGlobal_,
|
|
244
|
+
*ir,
|
|
245
|
+
cr_,
|
|
246
|
+
constr_,
|
|
247
|
+
stateGlobal_,
|
|
248
|
+
observablesHistory_,
|
|
249
|
+
oenv_,
|
|
250
|
+
startingBehavior_);
|
|
251
|
+
}
|
|
252
|
+
else if (observablesHistory_->edsamHistory)
|
|
253
|
+
{
|
|
254
|
+
gmx_fatal(FARGS,
|
|
255
|
+
"The checkpoint is from a run with essential dynamics sampling, "
|
|
256
|
+
"but the current run did not specify the -ei option. "
|
|
257
|
+
"Either specify the -ei option to mdrun, or do not use this checkpoint file.");
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
int* fep_state = MAIN(cr_) ? &stateGlobal_->fep_state : nullptr;
|
|
261
|
+
gmx::ArrayRef<real> lambda = MAIN(cr_) ? stateGlobal_->lambda : gmx::ArrayRef<real>();
|
|
262
|
+
initialize_lambdas(
|
|
263
|
+
fpLog_, ir->efep, ir->bSimTemp, *ir->fepvals, ir->simtempvals->temperatures, ekind_, MAIN(cr_), fep_state, lambda);
|
|
264
|
+
Update upd(*ir, *ekind_, deform_);
|
|
265
|
+
|
|
266
|
+
// Simulated annealing updates the reference temperature.
|
|
267
|
+
const bool doSimulatedAnnealing = initSimulatedAnnealing(*ir, ekind_, &upd);
|
|
268
|
+
|
|
269
|
+
const bool useReplicaExchange = (replExParams_.exchangeInterval > 0);
|
|
270
|
+
|
|
271
|
+
t_fcdata& fcdata = *fr_->fcdata;
|
|
272
|
+
|
|
273
|
+
bool simulationsShareState = false;
|
|
274
|
+
bool simulationsShareHamiltonian = false;
|
|
275
|
+
int nstSignalComm = nstglobalcomm;
|
|
276
|
+
{
|
|
277
|
+
// TODO This implementation of ensemble orientation restraints is nasty because
|
|
278
|
+
// a user can't just do multi-sim with single-sim orientation restraints.
|
|
279
|
+
bool usingEnsembleRestraints =
|
|
280
|
+
(fcdata.disres->nsystems > 1) || ((ms_ != nullptr) && fcdata.orires);
|
|
281
|
+
bool awhUsesMultiSim = (ir->bDoAwh && ir->awhParams->shareBiasMultisim() && (ms_ != nullptr));
|
|
282
|
+
|
|
283
|
+
// Replica exchange, ensemble restraints and AWH need all
|
|
284
|
+
// simulations to remain synchronized, so they need
|
|
285
|
+
// checkpoints and stop conditions to act on the same step, so
|
|
286
|
+
// the propagation of such signals must take place between
|
|
287
|
+
// simulations, not just within simulations.
|
|
288
|
+
// TODO: Make algorithm initializers set these flags.
|
|
289
|
+
simulationsShareState = useReplicaExchange || usingEnsembleRestraints || awhUsesMultiSim;
|
|
290
|
+
|
|
291
|
+
// With AWH with bias sharing each simulation uses an non-shared, but identical, Hamiltonian
|
|
292
|
+
simulationsShareHamiltonian = useReplicaExchange || usingEnsembleRestraints;
|
|
293
|
+
|
|
294
|
+
if (simulationsShareState)
|
|
295
|
+
{
|
|
296
|
+
// Inter-simulation signal communication does not need to happen
|
|
297
|
+
// often, so we use a minimum of 200 steps to reduce overhead.
|
|
298
|
+
const int c_minimumInterSimulationSignallingInterval = 200;
|
|
299
|
+
nstSignalComm = ((c_minimumInterSimulationSignallingInterval + nstglobalcomm - 1) / nstglobalcomm)
|
|
300
|
+
* nstglobalcomm;
|
|
301
|
+
}
|
|
302
|
+
}
|
|
303
|
+
|
|
304
|
+
if (startingBehavior_ != StartingBehavior::RestartWithAppending)
|
|
305
|
+
{
|
|
306
|
+
pleaseCiteCouplingAlgorithms(fpLog_, *ir);
|
|
307
|
+
}
|
|
308
|
+
gmx_mdoutf* outf = init_mdoutf(fpLog_,
|
|
309
|
+
nFile_,
|
|
310
|
+
fnm_,
|
|
311
|
+
mdrunOptions_,
|
|
312
|
+
cr_,
|
|
313
|
+
outputProvider_,
|
|
314
|
+
mdModulesNotifiers_,
|
|
315
|
+
ir,
|
|
316
|
+
topGlobal_,
|
|
317
|
+
oenv_,
|
|
318
|
+
wallCycleCounters_,
|
|
319
|
+
startingBehavior_,
|
|
320
|
+
simulationsShareState,
|
|
321
|
+
ms_);
|
|
322
|
+
gmx::EnergyOutput energyOutput(mdoutf_get_fp_ene(outf),
|
|
323
|
+
topGlobal_,
|
|
324
|
+
*ir,
|
|
325
|
+
pullWork_,
|
|
326
|
+
mdoutf_get_fp_dhdl(outf),
|
|
327
|
+
false,
|
|
328
|
+
startingBehavior_,
|
|
329
|
+
simulationsShareHamiltonian,
|
|
330
|
+
mdModulesNotifiers_);
|
|
331
|
+
|
|
332
|
+
gstat = global_stat_init(ir);
|
|
333
|
+
|
|
334
|
+
const auto& simulationWork = runScheduleWork_->simulationWork;
|
|
335
|
+
const bool useGpuForPme = simulationWork.useGpuPme;
|
|
336
|
+
const bool useGpuForNonbonded = simulationWork.useGpuNonbonded;
|
|
337
|
+
const bool useGpuForUpdate = simulationWork.useGpuUpdate;
|
|
338
|
+
|
|
339
|
+
/* Check for polarizable models and flexible constraints */
|
|
340
|
+
shellfc = init_shell_flexcon(fpLog_,
|
|
341
|
+
topGlobal_,
|
|
342
|
+
constr_ ? constr_->numFlexibleConstraints() : 0,
|
|
343
|
+
ir->nstcalcenergy,
|
|
344
|
+
haveDDAtomOrdering(*cr_),
|
|
345
|
+
useGpuForPme);
|
|
346
|
+
|
|
347
|
+
{
|
|
348
|
+
double io = compute_io(ir, topGlobal_.natoms, *groups, energyOutput.numEnergyTerms(), 1);
|
|
349
|
+
if ((io > 2000) && MAIN(cr_))
|
|
350
|
+
{
|
|
351
|
+
fprintf(stderr, "\nWARNING: This run will generate roughly %.0f Mb of data\n\n", io);
|
|
352
|
+
}
|
|
353
|
+
}
|
|
354
|
+
|
|
355
|
+
ObservablesReducer observablesReducer = observablesReducerBuilder_->build();
|
|
356
|
+
|
|
357
|
+
ForceBuffers f(simulationWork.useMts,
|
|
358
|
+
(simulationWork.useGpuFBufferOpsWhenAllowed || useGpuForUpdate)
|
|
359
|
+
? PinningPolicy::PinnedIfSupported
|
|
360
|
+
: PinningPolicy::CannotBePinned);
|
|
361
|
+
const t_mdatoms* md = mdAtoms_->mdatoms();
|
|
362
|
+
if (haveDDAtomOrdering(*cr_))
|
|
363
|
+
{
|
|
364
|
+
// Local state only becomes valid now.
|
|
365
|
+
dd_init_local_state(*cr_->dd, stateGlobal_, state_);
|
|
366
|
+
|
|
367
|
+
/* Distribute the charge groups over the nodes from the main node */
|
|
368
|
+
dd_partition_system(fpLog_,
|
|
369
|
+
mdLog_,
|
|
370
|
+
ir->init_step,
|
|
371
|
+
cr_,
|
|
372
|
+
TRUE,
|
|
373
|
+
stateGlobal_,
|
|
374
|
+
topGlobal_,
|
|
375
|
+
*ir,
|
|
376
|
+
mdModulesNotifiers_,
|
|
377
|
+
imdSession_,
|
|
378
|
+
pullWork_,
|
|
379
|
+
state_,
|
|
380
|
+
&f,
|
|
381
|
+
mdAtoms_,
|
|
382
|
+
top_,
|
|
383
|
+
fr_,
|
|
384
|
+
virtualSites_,
|
|
385
|
+
constr_,
|
|
386
|
+
nrnb_,
|
|
387
|
+
nullptr,
|
|
388
|
+
FALSE);
|
|
389
|
+
upd.updateAfterPartition(state_->numAtoms(), md->cFREEZE, md->cTC, md->cACC);
|
|
390
|
+
fr_->longRangeNonbondeds->updateAfterPartition(*md);
|
|
391
|
+
}
|
|
392
|
+
else
|
|
393
|
+
{
|
|
394
|
+
/* Generate and initialize new topology */
|
|
395
|
+
mdAlgorithmsSetupAtomData(
|
|
396
|
+
cr_, *ir, topGlobal_, top_, fr_, &f, mdAtoms_, constr_, virtualSites_, shellfc);
|
|
397
|
+
|
|
398
|
+
upd.updateAfterPartition(state_->numAtoms(), md->cFREEZE, md->cTC, md->cACC);
|
|
399
|
+
fr_->longRangeNonbondeds->updateAfterPartition(*md);
|
|
400
|
+
}
|
|
401
|
+
|
|
402
|
+
// Now that the state is valid we can set up Parrinello-Rahman
|
|
403
|
+
init_parrinellorahman(ir->pressureCouplingOptions,
|
|
404
|
+
ir->deform,
|
|
405
|
+
ir->delta_t * ir->pressureCouplingOptions.nstpcouple,
|
|
406
|
+
state_->box,
|
|
407
|
+
state_->box_rel,
|
|
408
|
+
state_->boxv,
|
|
409
|
+
&parrinelloRahmanM,
|
|
410
|
+
&pressureCouplingMu);
|
|
411
|
+
|
|
412
|
+
std::unique_ptr<UpdateConstrainGpu> integrator;
|
|
413
|
+
|
|
414
|
+
StatePropagatorDataGpu* stateGpu = fr_->stateGpu;
|
|
415
|
+
|
|
416
|
+
// TODO: the assertions below should be handled by UpdateConstraintsBuilder.
|
|
417
|
+
if (useGpuForUpdate)
|
|
418
|
+
{
|
|
419
|
+
GMX_RELEASE_ASSERT(!haveDDAtomOrdering(*cr_) || ddUsesUpdateGroups(*cr_->dd)
|
|
420
|
+
|| constr_ == nullptr || constr_->numConstraintsTotal() == 0,
|
|
421
|
+
"Constraints in domain decomposition are only supported with update "
|
|
422
|
+
"groups if using GPU update.\n");
|
|
423
|
+
GMX_RELEASE_ASSERT(ir->eConstrAlg != ConstraintAlgorithm::Shake || constr_ == nullptr
|
|
424
|
+
|| constr_->numConstraintsTotal() == 0,
|
|
425
|
+
"SHAKE is not supported with GPU update.");
|
|
426
|
+
GMX_RELEASE_ASSERT(useGpuForPme || (useGpuForNonbonded && simulationWork.useGpuXBufferOpsWhenAllowed),
|
|
427
|
+
"Either PME or short-ranged non-bonded interaction tasks must run on "
|
|
428
|
+
"the GPU to use GPU update.\n");
|
|
429
|
+
GMX_RELEASE_ASSERT(ir->eI == IntegrationAlgorithm::MD,
|
|
430
|
+
"Only the md integrator is supported with the GPU update.\n");
|
|
431
|
+
GMX_RELEASE_ASSERT(
|
|
432
|
+
ir->etc != TemperatureCoupling::NoseHoover,
|
|
433
|
+
"Nose-Hoover temperature coupling is not supported with the GPU update.\n");
|
|
434
|
+
GMX_RELEASE_ASSERT(
|
|
435
|
+
ir->pressureCouplingOptions.epc == PressureCoupling::No
|
|
436
|
+
|| ir->pressureCouplingOptions.epc == PressureCoupling::ParrinelloRahman
|
|
437
|
+
|| ir->pressureCouplingOptions.epc == PressureCoupling::Berendsen
|
|
438
|
+
|| ir->pressureCouplingOptions.epc == PressureCoupling::CRescale,
|
|
439
|
+
"Only Parrinello-Rahman, Berendsen, and C-rescale pressure coupling are supported "
|
|
440
|
+
"with the GPU update.\n");
|
|
441
|
+
GMX_RELEASE_ASSERT(!md->haveVsites,
|
|
442
|
+
"Virtual sites are not supported with the GPU update.\n");
|
|
443
|
+
GMX_RELEASE_ASSERT(ed == nullptr,
|
|
444
|
+
"Essential dynamics is not supported with the GPU update.\n");
|
|
445
|
+
GMX_RELEASE_ASSERT(!ir->bPull || !pull_have_constraint(*ir->pull),
|
|
446
|
+
"Constraints pulling is not supported with the GPU update.\n");
|
|
447
|
+
GMX_RELEASE_ASSERT(fcdata.orires == nullptr,
|
|
448
|
+
"Orientation restraints are not supported with the GPU update.\n");
|
|
449
|
+
GMX_RELEASE_ASSERT(
|
|
450
|
+
ir->efep == FreeEnergyPerturbationType::No
|
|
451
|
+
|| (!haveFepPerturbedMasses(topGlobal_) && !havePerturbedConstraints(topGlobal_)),
|
|
452
|
+
"Free energy perturbation of masses and constraints are not supported with the GPU "
|
|
453
|
+
"update.");
|
|
454
|
+
|
|
455
|
+
if (constr_ != nullptr && constr_->numConstraintsTotal() > 0)
|
|
456
|
+
{
|
|
457
|
+
GMX_LOG(mdLog_.info)
|
|
458
|
+
.asParagraph()
|
|
459
|
+
.appendText("Updating coordinates and applying constraints on the GPU.");
|
|
460
|
+
}
|
|
461
|
+
else
|
|
462
|
+
{
|
|
463
|
+
GMX_LOG(mdLog_.info).asParagraph().appendText("Updating coordinates on the GPU.");
|
|
464
|
+
}
|
|
465
|
+
GMX_RELEASE_ASSERT(fr_->deviceStreamManager != nullptr,
|
|
466
|
+
"Device stream manager should be initialized in order to use GPU "
|
|
467
|
+
"update-constraints.");
|
|
468
|
+
GMX_RELEASE_ASSERT(
|
|
469
|
+
fr_->deviceStreamManager->streamIsValid(gmx::DeviceStreamType::UpdateAndConstraints),
|
|
470
|
+
"Update stream should be initialized in order to use GPU "
|
|
471
|
+
"update-constraints.");
|
|
472
|
+
integrator = std::make_unique<UpdateConstrainGpu>(
|
|
473
|
+
*ir,
|
|
474
|
+
topGlobal_,
|
|
475
|
+
ekind_->numTemperatureCouplingGroups(),
|
|
476
|
+
fr_->deviceStreamManager->context(),
|
|
477
|
+
fr_->deviceStreamManager->stream(gmx::DeviceStreamType::UpdateAndConstraints),
|
|
478
|
+
wallCycleCounters_);
|
|
479
|
+
|
|
480
|
+
stateGpu->setXUpdatedOnDeviceEvent(integrator->xUpdatedOnDeviceEvent());
|
|
481
|
+
|
|
482
|
+
integrator->setPbc(PbcType::Xyz, state_->box);
|
|
483
|
+
}
|
|
484
|
+
|
|
485
|
+
if (useGpuForPme || simulationWork.useGpuXBufferOpsWhenAllowed || useGpuForUpdate)
|
|
486
|
+
{
|
|
487
|
+
changePinningPolicy(&state_->x, PinningPolicy::PinnedIfSupported);
|
|
488
|
+
}
|
|
489
|
+
if (useGpuForUpdate)
|
|
490
|
+
{
|
|
491
|
+
changePinningPolicy(&state_->v, PinningPolicy::PinnedIfSupported);
|
|
492
|
+
}
|
|
493
|
+
|
|
494
|
+
// NOTE: The global state is no longer used at this point.
|
|
495
|
+
// But state_global is still used as temporary storage space for writing
|
|
496
|
+
// the global state to file and potentially for replica exchange.
|
|
497
|
+
// (Global topology should persist.)
|
|
498
|
+
|
|
499
|
+
update_mdatoms(mdAtoms_->mdatoms(), state_->lambda[FreeEnergyPerturbationCouplingType::Mass]);
|
|
500
|
+
|
|
501
|
+
if (ir->bExpanded)
|
|
502
|
+
{
|
|
503
|
+
/* Check nstexpanded here, because the grompp check was broken */
|
|
504
|
+
if (ir->expandedvals->nstexpanded % ir->nstcalcenergy != 0)
|
|
505
|
+
{
|
|
506
|
+
gmx_fatal(FARGS,
|
|
507
|
+
"With expanded ensemble, nstexpanded should be a multiple of nstcalcenergy");
|
|
508
|
+
}
|
|
509
|
+
init_expanded_ensemble(startingBehavior_ != StartingBehavior::NewSimulation, ir, state_->dfhist);
|
|
510
|
+
}
|
|
511
|
+
|
|
512
|
+
if (MAIN(cr_))
|
|
513
|
+
{
|
|
514
|
+
EnergyData::initializeEnergyHistory(startingBehavior_, observablesHistory_, &energyOutput);
|
|
515
|
+
}
|
|
516
|
+
|
|
517
|
+
preparePrevStepPullCom(
|
|
518
|
+
ir, pullWork_, md->massT, state_, stateGlobal_, cr_, startingBehavior_ != StartingBehavior::NewSimulation);
|
|
519
|
+
|
|
520
|
+
// TODO: Remove this by converting AWH into a ForceProvider
|
|
521
|
+
auto awh = prepareAwhModule(fpLog_,
|
|
522
|
+
*ir,
|
|
523
|
+
stateGlobal_,
|
|
524
|
+
cr_,
|
|
525
|
+
ms_,
|
|
526
|
+
startingBehavior_ != StartingBehavior::NewSimulation,
|
|
527
|
+
shellfc != nullptr,
|
|
528
|
+
opt2fn("-awh", nFile_, fnm_),
|
|
529
|
+
pullWork_);
|
|
530
|
+
|
|
531
|
+
if (useReplicaExchange && MAIN(cr_))
|
|
532
|
+
{
|
|
533
|
+
repl_ex = init_replica_exchange(fpLog_, ms_, topGlobal_.natoms, ir, replExParams_);
|
|
534
|
+
}
|
|
535
|
+
/* PME tuning is only supported in the Verlet scheme, with PME for
|
|
536
|
+
* Coulomb. It is not supported with only LJ PME.
|
|
537
|
+
* Disable PME tuning with GPU PME decomposition */
|
|
538
|
+
bPMETune = (mdrunOptions_.tunePme && usingPme(fr_->ic->eeltype) && !mdrunOptions_.reproducible
|
|
539
|
+
&& ir->cutoff_scheme != CutoffScheme::Group && !simulationWork.useGpuPmeDecomposition);
|
|
540
|
+
|
|
541
|
+
pme_load_balancing_t* pme_loadbal = nullptr;
|
|
542
|
+
if (bPMETune)
|
|
543
|
+
{
|
|
544
|
+
pme_loadbal_init(
|
|
545
|
+
&pme_loadbal, cr_, mdLog_, *ir, state_->box, *fr_->ic, *fr_->nbv, fr_->pmedata, fr_->nbv->useGpu());
|
|
546
|
+
}
|
|
547
|
+
|
|
548
|
+
if (!ir->bContinuation)
|
|
549
|
+
{
|
|
550
|
+
if (state_->hasEntry(StateEntry::V))
|
|
551
|
+
{
|
|
552
|
+
auto v = makeArrayRef(state_->v);
|
|
553
|
+
/* Set the velocities of vsites, shells and frozen atoms to zero */
|
|
554
|
+
for (i = 0; i < md->homenr; i++)
|
|
555
|
+
{
|
|
556
|
+
if (md->ptype[i] == ParticleType::Shell)
|
|
557
|
+
{
|
|
558
|
+
clear_rvec(v[i]);
|
|
559
|
+
}
|
|
560
|
+
else if (!md->cFREEZE.empty())
|
|
561
|
+
{
|
|
562
|
+
for (m = 0; m < DIM; m++)
|
|
563
|
+
{
|
|
564
|
+
if (ir->opts.nFreeze[md->cFREEZE[i]][m])
|
|
565
|
+
{
|
|
566
|
+
v[i][m] = 0;
|
|
567
|
+
}
|
|
568
|
+
}
|
|
569
|
+
}
|
|
570
|
+
}
|
|
571
|
+
}
|
|
572
|
+
|
|
573
|
+
if (constr_)
|
|
574
|
+
{
|
|
575
|
+
/* Constrain the initial coordinates and velocities */
|
|
576
|
+
do_constrain_first(fpLog_,
|
|
577
|
+
constr_,
|
|
578
|
+
ir,
|
|
579
|
+
md->nr,
|
|
580
|
+
md->homenr,
|
|
581
|
+
state_->x.arrayRefWithPadding(),
|
|
582
|
+
state_->v.arrayRefWithPadding(),
|
|
583
|
+
state_->box,
|
|
584
|
+
state_->lambda[FreeEnergyPerturbationCouplingType::Bonded]);
|
|
585
|
+
}
|
|
586
|
+
}
|
|
587
|
+
|
|
588
|
+
const int nstfep = computeFepPeriod(*ir, replExParams_);
|
|
589
|
+
|
|
590
|
+
/* Be REALLY careful about what flags you set here. You CANNOT assume
|
|
591
|
+
* this is the first step, since we might be restarting from a checkpoint,
|
|
592
|
+
* and in that case we should not do any modifications to the state.
|
|
593
|
+
*/
|
|
594
|
+
bStopCM = (ir->comm_mode != ComRemovalAlgorithm::No && !ir->bContinuation);
|
|
595
|
+
|
|
596
|
+
// When restarting from a checkpoint, it can be appropriate to
|
|
597
|
+
// initialize ekind from quantities in the checkpoint. Otherwise,
|
|
598
|
+
// compute_globals must initialize ekind before the simulation
|
|
599
|
+
// starts/restarts. However, only the main rank knows what was
|
|
600
|
+
// found in the checkpoint file, so we have to communicate in
|
|
601
|
+
// order to coordinate the restart.
|
|
602
|
+
//
|
|
603
|
+
// TODO Consider removing this communication if/when checkpoint
|
|
604
|
+
// reading directly follows .tpr reading, because all ranks can
|
|
605
|
+
// agree on hasReadEkinState at that time.
|
|
606
|
+
bool hasReadEkinState = MAIN(cr_) ? stateGlobal_->ekinstate.hasReadEkinState : false;
|
|
607
|
+
if (PAR(cr_))
|
|
608
|
+
{
|
|
609
|
+
gmx_bcast(sizeof(hasReadEkinState), &hasReadEkinState, cr_->mpi_comm_mygroup);
|
|
610
|
+
}
|
|
611
|
+
if (hasReadEkinState)
|
|
612
|
+
{
|
|
613
|
+
restore_ekinstate_from_state(cr_, ekind_, MAIN(cr_) ? &stateGlobal_->ekinstate : nullptr);
|
|
614
|
+
}
|
|
615
|
+
|
|
616
|
+
unsigned int cglo_flags =
|
|
617
|
+
(CGLO_TEMPERATURE | CGLO_GSTAT | (EI_VV(ir->eI) ? CGLO_PRESSURE : 0)
|
|
618
|
+
| (EI_VV(ir->eI) ? CGLO_CONSTRAINT : 0) | (hasReadEkinState ? CGLO_READEKIN : 0));
|
|
619
|
+
|
|
620
|
+
bSumEkinhOld = FALSE;
|
|
621
|
+
|
|
622
|
+
t_vcm vcm(topGlobal_.groups, *ir);
|
|
623
|
+
reportComRemovalInfo(fpLog_, vcm);
|
|
624
|
+
|
|
625
|
+
int64_t step = ir->init_step;
|
|
626
|
+
int64_t step_rel = 0;
|
|
627
|
+
|
|
628
|
+
/* To minimize communication, compute_globals computes the COM velocity
|
|
629
|
+
* and the kinetic energy for the velocities without COM motion removed.
|
|
630
|
+
* Thus to get the kinetic energy without the COM contribution, we need
|
|
631
|
+
* to call compute_globals twice.
|
|
632
|
+
*/
|
|
633
|
+
for (int cgloIteration = 0; cgloIteration < (bStopCM ? 2 : 1); cgloIteration++)
|
|
634
|
+
{
|
|
635
|
+
unsigned int cglo_flags_iteration = cglo_flags;
|
|
636
|
+
if (bStopCM && cgloIteration == 0)
|
|
637
|
+
{
|
|
638
|
+
cglo_flags_iteration |= CGLO_STOPCM;
|
|
639
|
+
cglo_flags_iteration &= ~CGLO_TEMPERATURE;
|
|
640
|
+
}
|
|
641
|
+
compute_globals(gstat,
|
|
642
|
+
cr_,
|
|
643
|
+
ir,
|
|
644
|
+
fr_,
|
|
645
|
+
ekind_,
|
|
646
|
+
makeConstArrayRef(state_->x),
|
|
647
|
+
makeConstArrayRef(state_->v),
|
|
648
|
+
state_->box,
|
|
649
|
+
md,
|
|
650
|
+
nrnb_,
|
|
651
|
+
&vcm,
|
|
652
|
+
nullptr,
|
|
653
|
+
enerd_,
|
|
654
|
+
force_vir,
|
|
655
|
+
shake_vir,
|
|
656
|
+
total_vir,
|
|
657
|
+
pres,
|
|
658
|
+
&nullSignaller,
|
|
659
|
+
state_->box,
|
|
660
|
+
&bSumEkinhOld,
|
|
661
|
+
cglo_flags_iteration,
|
|
662
|
+
step,
|
|
663
|
+
&observablesReducer);
|
|
664
|
+
// Clean up after pre-step use of compute_globals()
|
|
665
|
+
observablesReducer.markAsReadyToReduce();
|
|
666
|
+
|
|
667
|
+
if (cglo_flags_iteration & CGLO_STOPCM)
|
|
668
|
+
{
|
|
669
|
+
/* At initialization, do not pass x with acceleration-correction mode
|
|
670
|
+
* to avoid (incorrect) correction of the initial coordinates.
|
|
671
|
+
*/
|
|
672
|
+
auto x = (vcm.mode == ComRemovalAlgorithm::LinearAccelerationCorrection)
|
|
673
|
+
? ArrayRef<RVec>()
|
|
674
|
+
: makeArrayRef(state_->x);
|
|
675
|
+
process_and_stopcm_grp(fpLog_, &vcm, *md, x, makeArrayRef(state_->v));
|
|
676
|
+
inc_nrnb(nrnb_, eNR_STOPCM, md->homenr);
|
|
677
|
+
}
|
|
678
|
+
}
|
|
679
|
+
if (ir->eI == IntegrationAlgorithm::VVAK)
|
|
680
|
+
{
|
|
681
|
+
/* a second call to get the half step temperature initialized as well */
|
|
682
|
+
/* we do the same call as above, but turn the pressure off -- internally to
|
|
683
|
+
compute_globals, this is recognized as a velocity verlet half-step
|
|
684
|
+
kinetic energy calculation. This minimized excess variables, but
|
|
685
|
+
perhaps loses some logic?*/
|
|
686
|
+
|
|
687
|
+
compute_globals(gstat,
|
|
688
|
+
cr_,
|
|
689
|
+
ir,
|
|
690
|
+
fr_,
|
|
691
|
+
ekind_,
|
|
692
|
+
makeConstArrayRef(state_->x),
|
|
693
|
+
makeConstArrayRef(state_->v),
|
|
694
|
+
state_->box,
|
|
695
|
+
md,
|
|
696
|
+
nrnb_,
|
|
697
|
+
&vcm,
|
|
698
|
+
nullptr,
|
|
699
|
+
enerd_,
|
|
700
|
+
force_vir,
|
|
701
|
+
shake_vir,
|
|
702
|
+
total_vir,
|
|
703
|
+
pres,
|
|
704
|
+
&nullSignaller,
|
|
705
|
+
state_->box,
|
|
706
|
+
&bSumEkinhOld,
|
|
707
|
+
cglo_flags & ~CGLO_PRESSURE,
|
|
708
|
+
step,
|
|
709
|
+
&observablesReducer);
|
|
710
|
+
// Clean up after pre-step use of compute_globals()
|
|
711
|
+
observablesReducer.markAsReadyToReduce();
|
|
712
|
+
}
|
|
713
|
+
|
|
714
|
+
/* Calculate the initial half step temperature, and save the ekinh_old */
|
|
715
|
+
if (startingBehavior_ == StartingBehavior::NewSimulation)
|
|
716
|
+
{
|
|
717
|
+
for (i = 0; (i < ir->opts.ngtc); i++)
|
|
718
|
+
{
|
|
719
|
+
copy_mat(ekind_->tcstat[i].ekinh, ekind_->tcstat[i].ekinh_old);
|
|
720
|
+
}
|
|
721
|
+
}
|
|
722
|
+
|
|
723
|
+
/* need to make an initiation call to get the Trotter variables set, as well as other constants
|
|
724
|
+
for non-trotter temperature control */
|
|
725
|
+
auto trotter_seq = init_npt_vars(ir, *ekind_, state_, &MassQ, bTrotter);
|
|
726
|
+
|
|
727
|
+
if (MAIN(cr_))
|
|
728
|
+
{
|
|
729
|
+
if (!ir->bContinuation)
|
|
730
|
+
{
|
|
731
|
+
if (constr_ && ir->eConstrAlg == ConstraintAlgorithm::Lincs)
|
|
732
|
+
{
|
|
733
|
+
fprintf(fpLog_,
|
|
734
|
+
"RMS relative constraint deviation after constraining: %.2e\n",
|
|
735
|
+
constr_->rmsd());
|
|
736
|
+
}
|
|
737
|
+
if (EI_STATE_VELOCITY(ir->eI))
|
|
738
|
+
{
|
|
739
|
+
real temp = enerd_->term[F_TEMP];
|
|
740
|
+
if (ir->eI != IntegrationAlgorithm::VV)
|
|
741
|
+
{
|
|
742
|
+
/* Result of Ekin averaged over velocities of -half
|
|
743
|
+
* and +half step, while we only have -half step here.
|
|
744
|
+
*/
|
|
745
|
+
temp *= 2;
|
|
746
|
+
}
|
|
747
|
+
fprintf(fpLog_, "Initial temperature: %g K\n", temp);
|
|
748
|
+
}
|
|
749
|
+
}
|
|
750
|
+
|
|
751
|
+
char tbuf[20];
|
|
752
|
+
fprintf(stderr, "starting mdrun '%s'\n", *(topGlobal_.name));
|
|
753
|
+
if (ir->nsteps >= 0)
|
|
754
|
+
{
|
|
755
|
+
sprintf(tbuf, "%8.1f", (ir->init_step + ir->nsteps) * ir->delta_t);
|
|
756
|
+
}
|
|
757
|
+
else
|
|
758
|
+
{
|
|
759
|
+
sprintf(tbuf, "%s", "infinite");
|
|
760
|
+
}
|
|
761
|
+
if (ir->init_step > 0)
|
|
762
|
+
{
|
|
763
|
+
fprintf(stderr,
|
|
764
|
+
"%s steps, %s ps (continuing from step %s, %8.1f ps).\n",
|
|
765
|
+
gmx_step_str(ir->init_step + ir->nsteps, sbuf),
|
|
766
|
+
tbuf,
|
|
767
|
+
gmx_step_str(ir->init_step, sbuf2),
|
|
768
|
+
ir->init_step * ir->delta_t);
|
|
769
|
+
}
|
|
770
|
+
else
|
|
771
|
+
{
|
|
772
|
+
fprintf(stderr, "%s steps, %s ps.\n", gmx_step_str(ir->nsteps, sbuf), tbuf);
|
|
773
|
+
}
|
|
774
|
+
fprintf(fpLog_, "\n");
|
|
775
|
+
}
|
|
776
|
+
|
|
777
|
+
walltime_accounting_start_time(wallTimeAccounting_);
|
|
778
|
+
wallcycle_start(wallCycleCounters_, WallCycleCounter::Run);
|
|
779
|
+
print_start(fpLog_, cr_, wallTimeAccounting_, "mdrun");
|
|
780
|
+
|
|
781
|
+
/***********************************************************
|
|
782
|
+
*
|
|
783
|
+
* Loop over MD steps
|
|
784
|
+
*
|
|
785
|
+
************************************************************/
|
|
786
|
+
|
|
787
|
+
bFirstStep = TRUE;
|
|
788
|
+
/* Skip the first Nose-Hoover integration when we get the state from tpx */
|
|
789
|
+
bInitStep = startingBehavior_ == StartingBehavior::NewSimulation || EI_VV(ir->eI);
|
|
790
|
+
bSumEkinhOld = FALSE;
|
|
791
|
+
bExchanged = FALSE;
|
|
792
|
+
bNeedRepartition = FALSE;
|
|
793
|
+
|
|
794
|
+
auto stopHandler = stopHandlerBuilder_->getStopHandlerMD(
|
|
795
|
+
compat::not_null<SimulationSignal*>(&signals[eglsSTOPCOND]),
|
|
796
|
+
simulationsShareState,
|
|
797
|
+
MAIN(cr_),
|
|
798
|
+
ir->nstlist,
|
|
799
|
+
mdrunOptions_.reproducible,
|
|
800
|
+
nstSignalComm,
|
|
801
|
+
mdrunOptions_.maximumHoursToRun,
|
|
802
|
+
ir->nstlist == 0,
|
|
803
|
+
fpLog_,
|
|
804
|
+
step,
|
|
805
|
+
bNS,
|
|
806
|
+
wallTimeAccounting_);
|
|
807
|
+
|
|
808
|
+
real checkpointPeriod = mdrunOptions_.checkpointOptions.period;
|
|
809
|
+
if (ir->bExpanded)
|
|
810
|
+
{
|
|
811
|
+
GMX_LOG(mdLog_.info)
|
|
812
|
+
.asParagraph()
|
|
813
|
+
.appendText(
|
|
814
|
+
"Expanded ensemble with the legacy simulator does not always "
|
|
815
|
+
"checkpoint correctly, so checkpointing is disabled. You will "
|
|
816
|
+
"not be able to do a checkpoint restart of this simulation. "
|
|
817
|
+
"If you use the modular simulator (e.g. by choosing md-vv integrator) "
|
|
818
|
+
"then checkpointing is enabled. See "
|
|
819
|
+
"https://gitlab.com/gromacs/gromacs/-/issues/4629 for details.");
|
|
820
|
+
// Use a negative period to disable checkpointing.
|
|
821
|
+
checkpointPeriod = -1;
|
|
822
|
+
}
|
|
823
|
+
auto checkpointHandler = std::make_unique<CheckpointHandler>(
|
|
824
|
+
compat::make_not_null<SimulationSignal*>(&signals[eglsCHKPT]),
|
|
825
|
+
simulationsShareState,
|
|
826
|
+
ir->nstlist == 0,
|
|
827
|
+
MAIN(cr_),
|
|
828
|
+
mdrunOptions_.writeConfout,
|
|
829
|
+
checkpointPeriod);
|
|
830
|
+
|
|
831
|
+
const bool resetCountersIsLocal = true;
|
|
832
|
+
auto resetHandler = std::make_unique<ResetHandler>(
|
|
833
|
+
compat::make_not_null<SimulationSignal*>(&signals[eglsRESETCOUNTERS]),
|
|
834
|
+
!resetCountersIsLocal,
|
|
835
|
+
ir->nsteps,
|
|
836
|
+
MAIN(cr_),
|
|
837
|
+
mdrunOptions_.timingOptions.resetHalfway,
|
|
838
|
+
mdrunOptions_.maximumHoursToRun,
|
|
839
|
+
mdLog_,
|
|
840
|
+
wallCycleCounters_,
|
|
841
|
+
wallTimeAccounting_);
|
|
842
|
+
|
|
843
|
+
const DDBalanceRegionHandler ddBalanceRegionHandler(cr_);
|
|
844
|
+
|
|
845
|
+
if (MAIN(cr_) && isMultiSim(ms_) && !useReplicaExchange)
|
|
846
|
+
{
|
|
847
|
+
logInitialMultisimStatus(ms_, cr_, mdLog_, simulationsShareState, ir->nsteps, ir->init_step);
|
|
848
|
+
}
|
|
849
|
+
|
|
850
|
+
bool usedMdGpuGraphLastStep = false;
|
|
851
|
+
/* and stop now if we should */
|
|
852
|
+
bLastStep = (bLastStep || (ir->nsteps >= 0 && step_rel > ir->nsteps));
|
|
853
|
+
while (!bLastStep)
|
|
854
|
+
{
|
|
855
|
+
/* Determine if this is a neighbor search step */
|
|
856
|
+
bNStList = (ir->nstlist > 0 && step % ir->nstlist == 0);
|
|
857
|
+
|
|
858
|
+
if (bPMETune && bNStList)
|
|
859
|
+
{
|
|
860
|
+
// This has to be here because PME load balancing is called so early.
|
|
861
|
+
// TODO: Move to after all booleans are defined.
|
|
862
|
+
if (useGpuForUpdate && !bFirstStep)
|
|
863
|
+
{
|
|
864
|
+
stateGpu->copyCoordinatesFromGpu(state_->x, AtomLocality::Local);
|
|
865
|
+
stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
|
|
866
|
+
}
|
|
867
|
+
/* PME grid + cut-off optimization with GPUs or PME nodes */
|
|
868
|
+
pme_loadbal_do(pme_loadbal,
|
|
869
|
+
cr_,
|
|
870
|
+
(mdrunOptions_.verbose && MAIN(cr_)) ? stderr : nullptr,
|
|
871
|
+
fpLog_,
|
|
872
|
+
mdLog_,
|
|
873
|
+
*ir,
|
|
874
|
+
fr_,
|
|
875
|
+
state_->box,
|
|
876
|
+
state_->x,
|
|
877
|
+
wallCycleCounters_,
|
|
878
|
+
step,
|
|
879
|
+
step_rel,
|
|
880
|
+
&bPMETunePrinting,
|
|
881
|
+
simulationWork.useGpuPmePpCommunication);
|
|
882
|
+
}
|
|
883
|
+
|
|
884
|
+
wallcycle_start(wallCycleCounters_, WallCycleCounter::Step);
|
|
885
|
+
|
|
886
|
+
bLastStep = (step_rel == ir->nsteps);
|
|
887
|
+
t = t0 + step * ir->delta_t;
|
|
888
|
+
|
|
889
|
+
// TODO Refactor this, so that nstfep does not need a default value of zero
|
|
890
|
+
if (ir->efep != FreeEnergyPerturbationType::No || ir->bSimTemp)
|
|
891
|
+
{
|
|
892
|
+
/* find and set the current lambdas */
|
|
893
|
+
state_->lambda = currentLambdas(step, *(ir->fepvals), state_->fep_state);
|
|
894
|
+
|
|
895
|
+
bDoExpanded = (do_per_step(step, ir->expandedvals->nstexpanded) && (ir->bExpanded)
|
|
896
|
+
&& (!bFirstStep));
|
|
897
|
+
}
|
|
898
|
+
|
|
899
|
+
bDoReplEx = (useReplicaExchange && (step > 0) && !bLastStep
|
|
900
|
+
&& do_per_step(step, replExParams_.exchangeInterval));
|
|
901
|
+
|
|
902
|
+
if (doSimulatedAnnealing)
|
|
903
|
+
{
|
|
904
|
+
// Simulated annealing updates the reference temperature.
|
|
905
|
+
update_annealing_target_temp(*ir, t, ekind_, &upd);
|
|
906
|
+
}
|
|
907
|
+
|
|
908
|
+
/* Stop Center of Mass motion */
|
|
909
|
+
bStopCM = (ir->comm_mode != ComRemovalAlgorithm::No && do_per_step(step, ir->nstcomm));
|
|
910
|
+
|
|
911
|
+
/* Determine whether or not to do Neighbour Searching */
|
|
912
|
+
bNS = (bFirstStep || bNStList || bExchanged || bNeedRepartition);
|
|
913
|
+
|
|
914
|
+
/* Note that the stopHandler will cause termination at nstglobalcomm
|
|
915
|
+
* steps. Since this concides with nstcalcenergy, nsttcouple and/or
|
|
916
|
+
* nstpcouple steps, we have computed the half-step kinetic energy
|
|
917
|
+
* of the previous step and can always output energies at the last step.
|
|
918
|
+
*/
|
|
919
|
+
bLastStep = bLastStep || stopHandler->stoppingAfterCurrentStep(bNS);
|
|
920
|
+
|
|
921
|
+
/* do_log triggers energy and virial calculation. Because this leads
|
|
922
|
+
* to different code paths, forces can be different. Thus for exact
|
|
923
|
+
* continuation we should avoid extra log output.
|
|
924
|
+
* Note that the || bLastStep can result in non-exact continuation
|
|
925
|
+
* beyond the last step. But we don't consider that to be an issue.
|
|
926
|
+
*/
|
|
927
|
+
do_log = (do_per_step(step, ir->nstlog)
|
|
928
|
+
|| (bFirstStep && startingBehavior_ == StartingBehavior::NewSimulation) || bLastStep);
|
|
929
|
+
do_verbose = mdrunOptions_.verbose
|
|
930
|
+
&& (step % mdrunOptions_.verboseStepPrintInterval == 0 || bFirstStep || bLastStep);
|
|
931
|
+
|
|
932
|
+
// On search steps, when doing the update on the GPU, copy
|
|
933
|
+
// the coordinates and velocities to the host unless they are
|
|
934
|
+
// already there (ie on the first step and after replica
|
|
935
|
+
// exchange).
|
|
936
|
+
if (useGpuForUpdate && bNS && !bFirstStep && !bExchanged)
|
|
937
|
+
{
|
|
938
|
+
if (usedMdGpuGraphLastStep)
|
|
939
|
+
{
|
|
940
|
+
// Wait on coordinates produced from GPU graph
|
|
941
|
+
stateGpu->waitCoordinatesUpdatedOnDevice();
|
|
942
|
+
}
|
|
943
|
+
stateGpu->copyVelocitiesFromGpu(state_->v, AtomLocality::Local);
|
|
944
|
+
stateGpu->copyCoordinatesFromGpu(state_->x, AtomLocality::Local);
|
|
945
|
+
stateGpu->waitVelocitiesReadyOnHost(AtomLocality::Local);
|
|
946
|
+
stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
|
|
947
|
+
}
|
|
948
|
+
|
|
949
|
+
// We need to calculate virtual velocities if we are writing them in the current step.
|
|
950
|
+
// They also need to be periodically updated. Every 1000 steps is arbitrary, but a reasonable number.
|
|
951
|
+
// The reason why the velocities need to be updated regularly is that the virtual site coordinates
|
|
952
|
+
// are updated using these velocities during integration. Those coordinates are used for, e.g., domain
|
|
953
|
+
// decomposition. Before computing any forces the positions of the virtual sites are recalculated.
|
|
954
|
+
// This fixes a bug, #4879, which was introduced in MR !979.
|
|
955
|
+
const int c_virtualSiteVelocityUpdateInterval = 1000;
|
|
956
|
+
const bool needVirtualVelocitiesThisStep =
|
|
957
|
+
(virtualSites_ != nullptr)
|
|
958
|
+
&& (do_per_step(step, ir->nstvout) || checkpointHandler->isCheckpointingStep()
|
|
959
|
+
|| do_per_step(step, c_virtualSiteVelocityUpdateInterval));
|
|
960
|
+
|
|
961
|
+
if (virtualSites_ != nullptr)
|
|
962
|
+
{
|
|
963
|
+
// Virtual sites need to be updated before domain decomposition and forces are calculated
|
|
964
|
+
wallcycle_start(wallCycleCounters_, WallCycleCounter::VsiteConstr);
|
|
965
|
+
// md-vv calculates virtual velocities once it has full-step real velocities
|
|
966
|
+
virtualSites_->construct(state_->x,
|
|
967
|
+
state_->v,
|
|
968
|
+
state_->box,
|
|
969
|
+
(!EI_VV(inputRec_->eI) && needVirtualVelocitiesThisStep)
|
|
970
|
+
? VSiteOperation::PositionsAndVelocities
|
|
971
|
+
: VSiteOperation::Positions);
|
|
972
|
+
wallcycle_stop(wallCycleCounters_, WallCycleCounter::VsiteConstr);
|
|
973
|
+
}
|
|
974
|
+
|
|
975
|
+
if (bNS && !(bFirstStep && ir->bContinuation))
|
|
976
|
+
{
|
|
977
|
+
bMainState = FALSE;
|
|
978
|
+
/* Correct the new box if it is too skewed */
|
|
979
|
+
if (inputrecDynamicBox(ir))
|
|
980
|
+
{
|
|
981
|
+
if (correct_box(fpLog_, step, state_->box))
|
|
982
|
+
{
|
|
983
|
+
bMainState = TRUE;
|
|
984
|
+
}
|
|
985
|
+
}
|
|
986
|
+
// If update is offloaded, and the box was changed either
|
|
987
|
+
// above or in a replica exchange on the previous step,
|
|
988
|
+
// the GPU Update object should be informed
|
|
989
|
+
if (useGpuForUpdate && (bMainState || bExchanged))
|
|
990
|
+
{
|
|
991
|
+
integrator->setPbc(PbcType::Xyz, state_->box);
|
|
992
|
+
}
|
|
993
|
+
if (haveDDAtomOrdering(*cr_) && bMainState)
|
|
994
|
+
{
|
|
995
|
+
dd_collect_state(cr_->dd, state_, stateGlobal_);
|
|
996
|
+
}
|
|
997
|
+
|
|
998
|
+
if (haveDDAtomOrdering(*cr_))
|
|
999
|
+
{
|
|
1000
|
+
/* Repartition the domain decomposition */
|
|
1001
|
+
dd_partition_system(fpLog_,
|
|
1002
|
+
mdLog_,
|
|
1003
|
+
step,
|
|
1004
|
+
cr_,
|
|
1005
|
+
bMainState,
|
|
1006
|
+
stateGlobal_,
|
|
1007
|
+
topGlobal_,
|
|
1008
|
+
*ir,
|
|
1009
|
+
mdModulesNotifiers_,
|
|
1010
|
+
imdSession_,
|
|
1011
|
+
pullWork_,
|
|
1012
|
+
state_,
|
|
1013
|
+
&f,
|
|
1014
|
+
mdAtoms_,
|
|
1015
|
+
top_,
|
|
1016
|
+
fr_,
|
|
1017
|
+
virtualSites_,
|
|
1018
|
+
constr_,
|
|
1019
|
+
nrnb_,
|
|
1020
|
+
wallCycleCounters_,
|
|
1021
|
+
do_verbose && !bPMETunePrinting);
|
|
1022
|
+
upd.updateAfterPartition(state_->numAtoms(), md->cFREEZE, md->cTC, md->cACC);
|
|
1023
|
+
fr_->longRangeNonbondeds->updateAfterPartition(*md);
|
|
1024
|
+
}
|
|
1025
|
+
}
|
|
1026
|
+
|
|
1027
|
+
// Allocate or re-size GPU halo exchange object, if necessary
|
|
1028
|
+
if (bNS && simulationWork.havePpDomainDecomposition && simulationWork.useGpuHaloExchange)
|
|
1029
|
+
{
|
|
1030
|
+
GMX_RELEASE_ASSERT(fr_->deviceStreamManager != nullptr,
|
|
1031
|
+
"GPU device manager has to be initialized to use GPU "
|
|
1032
|
+
"version of halo exchange.");
|
|
1033
|
+
constructGpuHaloExchange(*cr_, *fr_->deviceStreamManager, wallCycleCounters_);
|
|
1034
|
+
}
|
|
1035
|
+
|
|
1036
|
+
if (MAIN(cr_) && do_log)
|
|
1037
|
+
{
|
|
1038
|
+
gmx::EnergyOutput::printHeader(
|
|
1039
|
+
fpLog_, step, t); /* can we improve the information printed here? */
|
|
1040
|
+
}
|
|
1041
|
+
|
|
1042
|
+
if (ir->efep != FreeEnergyPerturbationType::No)
|
|
1043
|
+
{
|
|
1044
|
+
update_mdatoms(mdAtoms_->mdatoms(), state_->lambda[FreeEnergyPerturbationCouplingType::Mass]);
|
|
1045
|
+
}
|
|
1046
|
+
|
|
1047
|
+
if (bExchanged)
|
|
1048
|
+
{
|
|
1049
|
+
/* We need the kinetic energy at minus the half step for determining
|
|
1050
|
+
* the full step kinetic energy and possibly for T-coupling.*/
|
|
1051
|
+
/* This may not be quite working correctly yet . . . . */
|
|
1052
|
+
int cglo_flags = CGLO_GSTAT | CGLO_TEMPERATURE;
|
|
1053
|
+
compute_globals(gstat,
|
|
1054
|
+
cr_,
|
|
1055
|
+
ir,
|
|
1056
|
+
fr_,
|
|
1057
|
+
ekind_,
|
|
1058
|
+
makeConstArrayRef(state_->x),
|
|
1059
|
+
makeConstArrayRef(state_->v),
|
|
1060
|
+
state_->box,
|
|
1061
|
+
md,
|
|
1062
|
+
nrnb_,
|
|
1063
|
+
&vcm,
|
|
1064
|
+
wallCycleCounters_,
|
|
1065
|
+
enerd_,
|
|
1066
|
+
nullptr,
|
|
1067
|
+
nullptr,
|
|
1068
|
+
nullptr,
|
|
1069
|
+
nullptr,
|
|
1070
|
+
&nullSignaller,
|
|
1071
|
+
state_->box,
|
|
1072
|
+
&bSumEkinhOld,
|
|
1073
|
+
cglo_flags,
|
|
1074
|
+
step,
|
|
1075
|
+
&observablesReducer);
|
|
1076
|
+
}
|
|
1077
|
+
clear_mat(force_vir);
|
|
1078
|
+
|
|
1079
|
+
checkpointHandler->decideIfCheckpointingThisStep(bNS, bFirstStep, bLastStep);
|
|
1080
|
+
|
|
1081
|
+
/* Determine the energy and pressure:
|
|
1082
|
+
* at nstcalcenergy steps and at energy output steps (set below).
|
|
1083
|
+
*/
|
|
1084
|
+
if (EI_VV(ir->eI) && (!bInitStep))
|
|
1085
|
+
{
|
|
1086
|
+
bCalcEnerStep = do_per_step(step, ir->nstcalcenergy);
|
|
1087
|
+
bCalcVir = bCalcEnerStep
|
|
1088
|
+
|| (ir->pressureCouplingOptions.epc != PressureCoupling::No
|
|
1089
|
+
&& (do_per_step(step, ir->pressureCouplingOptions.nstpcouple)
|
|
1090
|
+
|| do_per_step(step - 1, ir->pressureCouplingOptions.nstpcouple)));
|
|
1091
|
+
}
|
|
1092
|
+
else
|
|
1093
|
+
{
|
|
1094
|
+
bCalcEnerStep = do_per_step(step, ir->nstcalcenergy);
|
|
1095
|
+
bCalcVir = bCalcEnerStep
|
|
1096
|
+
|| (ir->pressureCouplingOptions.epc != PressureCoupling::No
|
|
1097
|
+
&& do_per_step(step, ir->pressureCouplingOptions.nstpcouple));
|
|
1098
|
+
}
|
|
1099
|
+
bCalcEner = bCalcEnerStep;
|
|
1100
|
+
|
|
1101
|
+
do_ene = (do_per_step(step, ir->nstenergy) || bLastStep);
|
|
1102
|
+
|
|
1103
|
+
if (do_ene || do_log || bDoReplEx)
|
|
1104
|
+
{
|
|
1105
|
+
bCalcVir = TRUE;
|
|
1106
|
+
bCalcEner = TRUE;
|
|
1107
|
+
}
|
|
1108
|
+
|
|
1109
|
+
// bCalcEner is only here for when the last step is not a mulitple of nstfep
|
|
1110
|
+
const bool computeDHDL = ((ir->efep != FreeEnergyPerturbationType::No || ir->bSimTemp)
|
|
1111
|
+
&& (do_per_step(step, nstfep) || bCalcEner));
|
|
1112
|
+
|
|
1113
|
+
/* Do we need global communication ? */
|
|
1114
|
+
bGStat = (bCalcVir || bCalcEner || bStopCM || do_per_step(step, nstglobalcomm)
|
|
1115
|
+
|| (EI_VV(ir->eI) && inputrecNvtTrotter(ir) && do_per_step(step - 1, nstglobalcomm)));
|
|
1116
|
+
|
|
1117
|
+
force_flags = (GMX_FORCE_STATECHANGED | ((inputrecDynamicBox(ir)) ? GMX_FORCE_DYNAMICBOX : 0)
|
|
1118
|
+
| GMX_FORCE_ALLFORCES | (bCalcVir ? GMX_FORCE_VIRIAL : 0)
|
|
1119
|
+
| (bCalcEner ? GMX_FORCE_ENERGY : 0) | (computeDHDL ? GMX_FORCE_DHDL : 0));
|
|
1120
|
+
if (simulationWork.useMts && !do_per_step(step, ir->nstfout))
|
|
1121
|
+
{
|
|
1122
|
+
// TODO: merge this with stepWork.useOnlyMtsCombinedForceBuffer
|
|
1123
|
+
force_flags |= GMX_FORCE_DO_NOT_NEED_NORMAL_FORCE;
|
|
1124
|
+
}
|
|
1125
|
+
|
|
1126
|
+
if (bNS)
|
|
1127
|
+
{
|
|
1128
|
+
if (fr_->listedForcesGpu)
|
|
1129
|
+
{
|
|
1130
|
+
fr_->listedForcesGpu->updateHaveInteractions(top_->idef);
|
|
1131
|
+
}
|
|
1132
|
+
runScheduleWork_->domainWork = setupDomainLifetimeWorkload(
|
|
1133
|
+
*ir, *fr_, pullWork_, ed ? ed->getLegacyED() : nullptr, *md, simulationWork);
|
|
1134
|
+
}
|
|
1135
|
+
|
|
1136
|
+
const int shellfcFlags = force_flags | (mdrunOptions_.verbose ? GMX_FORCE_ENERGY : 0);
|
|
1137
|
+
const int legacyForceFlags = ((shellfc) ? shellfcFlags : force_flags) | (bNS ? GMX_FORCE_NS : 0);
|
|
1138
|
+
|
|
1139
|
+
runScheduleWork_->stepWork = setupStepWorkload(
|
|
1140
|
+
legacyForceFlags, ir->mtsLevels, step, runScheduleWork_->domainWork, simulationWork);
|
|
1141
|
+
|
|
1142
|
+
const bool doTemperatureScaling = (ir->etc != TemperatureCoupling::No
|
|
1143
|
+
&& do_per_step(step + ir->nsttcouple - 1, ir->nsttcouple));
|
|
1144
|
+
|
|
1145
|
+
/* With leap-frog type integrators we compute the kinetic energy
|
|
1146
|
+
* at a whole time step as the average of the half-time step kinetic
|
|
1147
|
+
* energies of two subsequent steps. Therefore we need to compute the
|
|
1148
|
+
* half step kinetic energy also if we need energies at the next step.
|
|
1149
|
+
*/
|
|
1150
|
+
const bool needHalfStepKineticEnergy =
|
|
1151
|
+
(!EI_VV(ir->eI) && (do_per_step(step + 1, nstglobalcomm) || step_rel + 1 == ir->nsteps));
|
|
1152
|
+
|
|
1153
|
+
// Parrinello-Rahman requires the pressure to be availible before the update to compute
|
|
1154
|
+
// the velocity scaling matrix. Hence, it runs one step after the nstpcouple step.
|
|
1155
|
+
const bool doParrinelloRahman =
|
|
1156
|
+
(ir->pressureCouplingOptions.epc == PressureCoupling::ParrinelloRahman
|
|
1157
|
+
&& do_per_step(step + ir->pressureCouplingOptions.nstpcouple - 1,
|
|
1158
|
+
ir->pressureCouplingOptions.nstpcouple));
|
|
1159
|
+
|
|
1160
|
+
MdGpuGraph* mdGraph = simulationWork.useMdGpuGraph ? fr_->mdGraph[step % 2].get() : nullptr;
|
|
1161
|
+
|
|
1162
|
+
if (simulationWork.useMdGpuGraph)
|
|
1163
|
+
{
|
|
1164
|
+
// Reset graph on search step (due to changing neighbour list etc)
|
|
1165
|
+
// or virial step (due to changing shifts and box).
|
|
1166
|
+
if (bNS || bCalcVir)
|
|
1167
|
+
{
|
|
1168
|
+
fr_->mdGraph[MdGraphEvenOrOddStep::EvenStep]->reset();
|
|
1169
|
+
fr_->mdGraph[MdGraphEvenOrOddStep::OddStep]->reset();
|
|
1170
|
+
}
|
|
1171
|
+
else
|
|
1172
|
+
{
|
|
1173
|
+
mdGraph->setUsedGraphLastStep(usedMdGpuGraphLastStep);
|
|
1174
|
+
bool canUseMdGpuGraphThisStep =
|
|
1175
|
+
!bNS && !bCalcVir && !doTemperatureScaling && !doParrinelloRahman && !bGStat
|
|
1176
|
+
&& !needHalfStepKineticEnergy && !do_per_step(step, ir->nstxout)
|
|
1177
|
+
&& !do_per_step(step, ir->nstxout_compressed)
|
|
1178
|
+
&& !do_per_step(step, ir->nstvout) && !do_per_step(step, ir->nstfout)
|
|
1179
|
+
&& !checkpointHandler->isCheckpointingStep();
|
|
1180
|
+
if (mdGraph->captureThisStep(canUseMdGpuGraphThisStep))
|
|
1181
|
+
{
|
|
1182
|
+
mdGraph->startRecord(stateGpu->getCoordinatesReadyOnDeviceEvent(
|
|
1183
|
+
AtomLocality::Local, simulationWork, runScheduleWork_->stepWork));
|
|
1184
|
+
}
|
|
1185
|
+
}
|
|
1186
|
+
}
|
|
1187
|
+
if (!simulationWork.useMdGpuGraph || mdGraph->graphIsCapturingThisStep()
|
|
1188
|
+
|| !mdGraph->useGraphThisStep())
|
|
1189
|
+
{
|
|
1190
|
+
|
|
1191
|
+
if (shellfc)
|
|
1192
|
+
{
|
|
1193
|
+
/* Now is the time to relax the shells */
|
|
1194
|
+
relax_shell_flexcon(fpLog_,
|
|
1195
|
+
cr_,
|
|
1196
|
+
ms_,
|
|
1197
|
+
mdrunOptions_.verbose,
|
|
1198
|
+
enforcedRotation_,
|
|
1199
|
+
step,
|
|
1200
|
+
ir,
|
|
1201
|
+
mdModulesNotifiers_,
|
|
1202
|
+
imdSession_,
|
|
1203
|
+
pullWork_,
|
|
1204
|
+
bNS,
|
|
1205
|
+
top_,
|
|
1206
|
+
constr_,
|
|
1207
|
+
enerd_,
|
|
1208
|
+
state_->numAtoms(),
|
|
1209
|
+
state_->x.arrayRefWithPadding(),
|
|
1210
|
+
state_->v.arrayRefWithPadding(),
|
|
1211
|
+
state_->box,
|
|
1212
|
+
state_->lambda,
|
|
1213
|
+
&state_->hist,
|
|
1214
|
+
&f.view(),
|
|
1215
|
+
force_vir,
|
|
1216
|
+
*md,
|
|
1217
|
+
fr_->longRangeNonbondeds.get(),
|
|
1218
|
+
nrnb_,
|
|
1219
|
+
wallCycleCounters_,
|
|
1220
|
+
shellfc,
|
|
1221
|
+
fr_,
|
|
1222
|
+
*runScheduleWork_,
|
|
1223
|
+
t,
|
|
1224
|
+
mu_tot,
|
|
1225
|
+
virtualSites_,
|
|
1226
|
+
ddBalanceRegionHandler);
|
|
1227
|
+
}
|
|
1228
|
+
else
|
|
1229
|
+
{
|
|
1230
|
+
/* The AWH history need to be saved _before_ doing force calculations where the AWH bias
|
|
1231
|
+
is updated (or the AWH update will be performed twice for one step when continuing).
|
|
1232
|
+
It would be best to call this update function from do_md_trajectory_writing but that
|
|
1233
|
+
would occur after do_force. One would have to divide the update_awh function into one
|
|
1234
|
+
function applying the AWH force and one doing the AWH bias update. The update AWH
|
|
1235
|
+
bias function could then be called after do_md_trajectory_writing (then containing
|
|
1236
|
+
update_awh_history). The checkpointing will in the future probably moved to the start
|
|
1237
|
+
of the md loop which will rid of this issue. */
|
|
1238
|
+
if (awh && checkpointHandler->isCheckpointingStep() && MAIN(cr_))
|
|
1239
|
+
{
|
|
1240
|
+
awh->updateHistory(stateGlobal_->awhHistory.get());
|
|
1241
|
+
}
|
|
1242
|
+
|
|
1243
|
+
/* The coordinates (x) are shifted (to get whole molecules)
|
|
1244
|
+
* in do_force.
|
|
1245
|
+
* This is parallellized as well, and does communication too.
|
|
1246
|
+
* Check comments in sim_util.c
|
|
1247
|
+
*/
|
|
1248
|
+
do_force(fpLog_,
|
|
1249
|
+
cr_,
|
|
1250
|
+
ms_,
|
|
1251
|
+
*ir,
|
|
1252
|
+
mdModulesNotifiers_,
|
|
1253
|
+
awh.get(),
|
|
1254
|
+
enforcedRotation_,
|
|
1255
|
+
imdSession_,
|
|
1256
|
+
pullWork_,
|
|
1257
|
+
step,
|
|
1258
|
+
nrnb_,
|
|
1259
|
+
wallCycleCounters_,
|
|
1260
|
+
top_,
|
|
1261
|
+
state_->box,
|
|
1262
|
+
state_->x.arrayRefWithPadding(),
|
|
1263
|
+
state_->v.arrayRefWithPadding().unpaddedArrayRef(),
|
|
1264
|
+
&state_->hist,
|
|
1265
|
+
&f.view(),
|
|
1266
|
+
force_vir,
|
|
1267
|
+
md,
|
|
1268
|
+
enerd_,
|
|
1269
|
+
state_->lambda,
|
|
1270
|
+
fr_,
|
|
1271
|
+
*runScheduleWork_,
|
|
1272
|
+
virtualSites_,
|
|
1273
|
+
mu_tot,
|
|
1274
|
+
t,
|
|
1275
|
+
ed ? ed->getLegacyED() : nullptr,
|
|
1276
|
+
fr_->longRangeNonbondeds.get(),
|
|
1277
|
+
ddBalanceRegionHandler);
|
|
1278
|
+
}
|
|
1279
|
+
|
|
1280
|
+
// VV integrators do not need the following velocity half step
|
|
1281
|
+
// if it is the first step after starting from a checkpoint.
|
|
1282
|
+
// That is, the half step is needed on all other steps, and
|
|
1283
|
+
// also the first step when starting from a .tpr file.
|
|
1284
|
+
if (EI_VV(ir->eI))
|
|
1285
|
+
{
|
|
1286
|
+
integrateVVFirstStep(step,
|
|
1287
|
+
bFirstStep,
|
|
1288
|
+
bInitStep,
|
|
1289
|
+
startingBehavior_,
|
|
1290
|
+
nstglobalcomm,
|
|
1291
|
+
ir,
|
|
1292
|
+
fr_,
|
|
1293
|
+
cr_,
|
|
1294
|
+
state_,
|
|
1295
|
+
mdAtoms_->mdatoms(),
|
|
1296
|
+
&fcdata,
|
|
1297
|
+
&MassQ,
|
|
1298
|
+
&vcm,
|
|
1299
|
+
enerd_,
|
|
1300
|
+
&observablesReducer,
|
|
1301
|
+
ekind_,
|
|
1302
|
+
gstat,
|
|
1303
|
+
&last_ekin,
|
|
1304
|
+
bCalcVir,
|
|
1305
|
+
total_vir,
|
|
1306
|
+
shake_vir,
|
|
1307
|
+
force_vir,
|
|
1308
|
+
pres,
|
|
1309
|
+
do_log,
|
|
1310
|
+
do_ene,
|
|
1311
|
+
bCalcEner,
|
|
1312
|
+
bGStat,
|
|
1313
|
+
bStopCM,
|
|
1314
|
+
bTrotter,
|
|
1315
|
+
bExchanged,
|
|
1316
|
+
&bSumEkinhOld,
|
|
1317
|
+
&saved_conserved_quantity,
|
|
1318
|
+
&f,
|
|
1319
|
+
&upd,
|
|
1320
|
+
constr_,
|
|
1321
|
+
&nullSignaller,
|
|
1322
|
+
trotter_seq,
|
|
1323
|
+
nrnb_,
|
|
1324
|
+
fpLog_,
|
|
1325
|
+
wallCycleCounters_);
|
|
1326
|
+
if (virtualSites_ != nullptr && needVirtualVelocitiesThisStep)
|
|
1327
|
+
{
|
|
1328
|
+
// Positions were calculated earlier
|
|
1329
|
+
wallcycle_start(wallCycleCounters_, WallCycleCounter::VsiteConstr);
|
|
1330
|
+
virtualSites_->construct(state_->x, state_->v, state_->box, VSiteOperation::Velocities);
|
|
1331
|
+
wallcycle_stop(wallCycleCounters_, WallCycleCounter::VsiteConstr);
|
|
1332
|
+
}
|
|
1333
|
+
}
|
|
1334
|
+
|
|
1335
|
+
/* ######## END FIRST UPDATE STEP ############## */
|
|
1336
|
+
/* ######## If doing VV, we now have v(dt) ###### */
|
|
1337
|
+
if (bDoExpanded)
|
|
1338
|
+
{
|
|
1339
|
+
/* perform extended ensemble sampling in lambda - we don't
|
|
1340
|
+
actually move to the new state before outputting
|
|
1341
|
+
statistics, but if performing simulated tempering, we
|
|
1342
|
+
do update the velocities and the tau_t. */
|
|
1343
|
+
lamnew = ExpandedEnsembleDynamics(fpLog_,
|
|
1344
|
+
*inputRec_,
|
|
1345
|
+
*enerd_,
|
|
1346
|
+
ekind_,
|
|
1347
|
+
state_,
|
|
1348
|
+
&MassQ,
|
|
1349
|
+
state_->fep_state,
|
|
1350
|
+
state_->dfhist,
|
|
1351
|
+
step,
|
|
1352
|
+
state_->v.rvec_array(),
|
|
1353
|
+
md->homenr,
|
|
1354
|
+
md->cTC);
|
|
1355
|
+
/* history is maintained in state->dfhist, but state_global is what is sent to trajectory and log output */
|
|
1356
|
+
if (MAIN(cr_))
|
|
1357
|
+
{
|
|
1358
|
+
copy_df_history(stateGlobal_->dfhist, state_->dfhist);
|
|
1359
|
+
}
|
|
1360
|
+
}
|
|
1361
|
+
|
|
1362
|
+
// Copy coordinate from the GPU for the output/checkpointing if the update is offloaded
|
|
1363
|
+
// and coordinates have not already been copied for i) search or ii) CPU force tasks.
|
|
1364
|
+
if (useGpuForUpdate && !bNS && !runScheduleWork_->domainWork.haveCpuLocalForceWork
|
|
1365
|
+
&& (do_per_step(step, ir->nstxout) || do_per_step(step, ir->nstxout_compressed)
|
|
1366
|
+
|| checkpointHandler->isCheckpointingStep()))
|
|
1367
|
+
{
|
|
1368
|
+
stateGpu->copyCoordinatesFromGpu(state_->x, AtomLocality::Local);
|
|
1369
|
+
stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
|
|
1370
|
+
}
|
|
1371
|
+
// Copy velocities if needed for the output/checkpointing.
|
|
1372
|
+
// NOTE: Copy on the search steps is done at the beginning of the step.
|
|
1373
|
+
if (useGpuForUpdate && !bNS
|
|
1374
|
+
&& (do_per_step(step, ir->nstvout) || checkpointHandler->isCheckpointingStep()))
|
|
1375
|
+
{
|
|
1376
|
+
stateGpu->copyVelocitiesFromGpu(state_->v, AtomLocality::Local);
|
|
1377
|
+
stateGpu->waitVelocitiesReadyOnHost(AtomLocality::Local);
|
|
1378
|
+
}
|
|
1379
|
+
// Copy forces for the output if the forces were reduced on the GPU (not the case on virial steps)
|
|
1380
|
+
// and update is offloaded hence forces are kept on the GPU for update and have not been
|
|
1381
|
+
// already transferred in do_force().
|
|
1382
|
+
// TODO: There should be an improved, explicit mechanism that ensures this copy is only executed
|
|
1383
|
+
// when the forces are ready on the GPU -- the same synchronizer should be used as the one
|
|
1384
|
+
// prior to GPU update.
|
|
1385
|
+
// TODO: When the output flags will be included in step workload, this copy can be combined with the
|
|
1386
|
+
// copy call in do_force(...).
|
|
1387
|
+
// NOTE: The forces should not be copied here if the vsites are present, since they were modified
|
|
1388
|
+
// on host after the D2H copy in do_force(...).
|
|
1389
|
+
if (runScheduleWork_->stepWork.useGpuFBufferOps
|
|
1390
|
+
&& (simulationWork.useGpuUpdate && !virtualSites_) && do_per_step(step, ir->nstfout))
|
|
1391
|
+
{
|
|
1392
|
+
stateGpu->copyForcesFromGpu(f.view().force(), AtomLocality::Local);
|
|
1393
|
+
stateGpu->waitForcesReadyOnHost(AtomLocality::Local);
|
|
1394
|
+
}
|
|
1395
|
+
/* Now we have the energies and forces corresponding to the
|
|
1396
|
+
* coordinates at time t. We must output all of this before
|
|
1397
|
+
* the update.
|
|
1398
|
+
*/
|
|
1399
|
+
const EkindataState ekindataState =
|
|
1400
|
+
bGStat ? (bSumEkinhOld ? EkindataState::UsedNeedToReduce
|
|
1401
|
+
: EkindataState::UsedDoNotNeedToReduce)
|
|
1402
|
+
: EkindataState::NotUsed;
|
|
1403
|
+
do_md_trajectory_writing(fpLog_,
|
|
1404
|
+
cr_,
|
|
1405
|
+
nFile_,
|
|
1406
|
+
fnm_,
|
|
1407
|
+
step,
|
|
1408
|
+
step_rel,
|
|
1409
|
+
t,
|
|
1410
|
+
ir,
|
|
1411
|
+
state_,
|
|
1412
|
+
stateGlobal_,
|
|
1413
|
+
observablesHistory_,
|
|
1414
|
+
topGlobal_,
|
|
1415
|
+
fr_,
|
|
1416
|
+
outf,
|
|
1417
|
+
energyOutput,
|
|
1418
|
+
ekind_,
|
|
1419
|
+
f.view().force(),
|
|
1420
|
+
checkpointHandler->isCheckpointingStep(),
|
|
1421
|
+
bRerunMD,
|
|
1422
|
+
bLastStep,
|
|
1423
|
+
mdrunOptions_.writeConfout,
|
|
1424
|
+
ekindataState);
|
|
1425
|
+
/* Check if IMD step and do IMD communication, if bIMD is TRUE. */
|
|
1426
|
+
bInteractiveMDstep = imdSession_->run(step, bNS, state_->box, state_->x, t);
|
|
1427
|
+
|
|
1428
|
+
/* kludge -- virial is lost with restart for MTTK NPT control. Must reload (saved earlier). */
|
|
1429
|
+
if (startingBehavior_ != StartingBehavior::NewSimulation && bFirstStep
|
|
1430
|
+
&& (inputrecNptTrotter(ir) || inputrecNphTrotter(ir)))
|
|
1431
|
+
{
|
|
1432
|
+
copy_mat(state_->svir_prev, shake_vir);
|
|
1433
|
+
copy_mat(state_->fvir_prev, force_vir);
|
|
1434
|
+
}
|
|
1435
|
+
|
|
1436
|
+
stopHandler->setSignal();
|
|
1437
|
+
resetHandler->setSignal(wallTimeAccounting_);
|
|
1438
|
+
|
|
1439
|
+
if (bGStat || !PAR(cr_))
|
|
1440
|
+
{
|
|
1441
|
+
/* In parallel we only have to check for checkpointing in steps
|
|
1442
|
+
* where we do global communication,
|
|
1443
|
+
* otherwise the other nodes don't know.
|
|
1444
|
+
*/
|
|
1445
|
+
checkpointHandler->setSignal(wallTimeAccounting_);
|
|
1446
|
+
}
|
|
1447
|
+
|
|
1448
|
+
/* ######### START SECOND UPDATE STEP ################# */
|
|
1449
|
+
|
|
1450
|
+
/* at the start of step, randomize or scale the velocities ((if vv. Restriction of
|
|
1451
|
+
Andersen controlled in preprocessing */
|
|
1452
|
+
|
|
1453
|
+
if (ETC_ANDERSEN(ir->etc)) /* keep this outside of update_tcouple because of the extra info required to pass */
|
|
1454
|
+
{
|
|
1455
|
+
gmx_bool bIfRandomize;
|
|
1456
|
+
bIfRandomize = update_randomize_velocities(
|
|
1457
|
+
ir, step, cr_, md->homenr, md->cTC, md->invmass, state_->v, &upd, constr_);
|
|
1458
|
+
/* if we have constraints, we have to remove the kinetic energy parallel to the bonds */
|
|
1459
|
+
if (constr_ && bIfRandomize)
|
|
1460
|
+
{
|
|
1461
|
+
constrain_velocities(constr_, do_log, do_ene, step, state_, nullptr, false, nullptr);
|
|
1462
|
+
}
|
|
1463
|
+
}
|
|
1464
|
+
/* Box is changed in update() when we do pressure coupling,
|
|
1465
|
+
* but we should still use the old box for energy corrections and when
|
|
1466
|
+
* writing it to the energy file, so it matches the trajectory files for
|
|
1467
|
+
* the same timestep above. Make a copy in a separate array.
|
|
1468
|
+
*/
|
|
1469
|
+
copy_mat(state_->box, lastbox);
|
|
1470
|
+
|
|
1471
|
+
dvdl_constr = 0;
|
|
1472
|
+
|
|
1473
|
+
if (!useGpuForUpdate)
|
|
1474
|
+
{
|
|
1475
|
+
wallcycle_start(wallCycleCounters_, WallCycleCounter::Update);
|
|
1476
|
+
}
|
|
1477
|
+
/* UPDATE PRESSURE VARIABLES IN TROTTER FORMULATION WITH CONSTRAINTS */
|
|
1478
|
+
if (bTrotter)
|
|
1479
|
+
{
|
|
1480
|
+
trotter_update(ir,
|
|
1481
|
+
step,
|
|
1482
|
+
ekind_,
|
|
1483
|
+
state_,
|
|
1484
|
+
total_vir,
|
|
1485
|
+
md->homenr,
|
|
1486
|
+
md->cTC,
|
|
1487
|
+
md->invmass,
|
|
1488
|
+
&MassQ,
|
|
1489
|
+
trotter_seq,
|
|
1490
|
+
TrotterSequence::Three);
|
|
1491
|
+
/* We can only do Berendsen coupling after we have summed
|
|
1492
|
+
* the kinetic energy or virial. Since the happens
|
|
1493
|
+
* in global_state after update, we should only do it at
|
|
1494
|
+
* step % nstlist = 1 with bGStatEveryStep=FALSE.
|
|
1495
|
+
*/
|
|
1496
|
+
}
|
|
1497
|
+
else
|
|
1498
|
+
{
|
|
1499
|
+
update_tcouple(step, ir, state_, ekind_, &MassQ, md->homenr, md->cTC);
|
|
1500
|
+
update_pcouple_before_coordinates(mdLog_,
|
|
1501
|
+
step,
|
|
1502
|
+
ir->pressureCouplingOptions,
|
|
1503
|
+
ir->deform,
|
|
1504
|
+
ir->delta_t,
|
|
1505
|
+
state_,
|
|
1506
|
+
&pressureCouplingMu,
|
|
1507
|
+
&parrinelloRahmanM);
|
|
1508
|
+
}
|
|
1509
|
+
|
|
1510
|
+
if (EI_VV(ir->eI))
|
|
1511
|
+
{
|
|
1512
|
+
GMX_ASSERT(!useGpuForUpdate, "GPU update is not supported with VVAK integrator.");
|
|
1513
|
+
|
|
1514
|
+
integrateVVSecondStep(step,
|
|
1515
|
+
ir,
|
|
1516
|
+
fr_,
|
|
1517
|
+
cr_,
|
|
1518
|
+
state_,
|
|
1519
|
+
mdAtoms_->mdatoms(),
|
|
1520
|
+
&fcdata,
|
|
1521
|
+
&MassQ,
|
|
1522
|
+
&vcm,
|
|
1523
|
+
pullWork_,
|
|
1524
|
+
enerd_,
|
|
1525
|
+
&observablesReducer,
|
|
1526
|
+
ekind_,
|
|
1527
|
+
gstat,
|
|
1528
|
+
&dvdl_constr,
|
|
1529
|
+
bCalcVir,
|
|
1530
|
+
total_vir,
|
|
1531
|
+
shake_vir,
|
|
1532
|
+
force_vir,
|
|
1533
|
+
pres,
|
|
1534
|
+
lastbox,
|
|
1535
|
+
do_log,
|
|
1536
|
+
do_ene,
|
|
1537
|
+
bGStat,
|
|
1538
|
+
&bSumEkinhOld,
|
|
1539
|
+
&f,
|
|
1540
|
+
&cbuf,
|
|
1541
|
+
&upd,
|
|
1542
|
+
constr_,
|
|
1543
|
+
&nullSignaller,
|
|
1544
|
+
trotter_seq,
|
|
1545
|
+
nrnb_,
|
|
1546
|
+
wallCycleCounters_);
|
|
1547
|
+
}
|
|
1548
|
+
else
|
|
1549
|
+
{
|
|
1550
|
+
if (useGpuForUpdate)
|
|
1551
|
+
{
|
|
1552
|
+
// On search steps, update handles to device vectors
|
|
1553
|
+
// TODO: this condition has redundant / unnecessary clauses
|
|
1554
|
+
if (bNS && (bFirstStep || haveDDAtomOrdering(*cr_) || bExchanged))
|
|
1555
|
+
{
|
|
1556
|
+
integrator->set(stateGpu->getCoordinates(),
|
|
1557
|
+
stateGpu->getVelocities(),
|
|
1558
|
+
stateGpu->getForces(),
|
|
1559
|
+
top_->idef,
|
|
1560
|
+
*md);
|
|
1561
|
+
|
|
1562
|
+
// Copy data to the GPU after buffers might have been reinitialized
|
|
1563
|
+
/* The velocity copy is redundant if we had Center-of-Mass motion removed on
|
|
1564
|
+
* the previous step. We don't check that now. */
|
|
1565
|
+
stateGpu->copyVelocitiesToGpu(state_->v, AtomLocality::Local);
|
|
1566
|
+
}
|
|
1567
|
+
|
|
1568
|
+
// Copy x to the GPU unless we have already transferred in do_force().
|
|
1569
|
+
// We transfer in do_force() if a GPU force task requires x (PME or x buffer ops).
|
|
1570
|
+
if (!(runScheduleWork_->stepWork.haveGpuPmeOnThisRank
|
|
1571
|
+
|| runScheduleWork_->stepWork.useGpuXBufferOps))
|
|
1572
|
+
{
|
|
1573
|
+
stateGpu->copyCoordinatesToGpu(state_->x, AtomLocality::Local);
|
|
1574
|
+
// Coordinates are later used by the integrator running in the same stream.
|
|
1575
|
+
stateGpu->consumeCoordinatesCopiedToDeviceEvent(AtomLocality::Local);
|
|
1576
|
+
}
|
|
1577
|
+
|
|
1578
|
+
if ((simulationWork.useGpuPme && simulationWork.useCpuPmePpCommunication)
|
|
1579
|
+
|| (!runScheduleWork_->stepWork.useGpuFBufferOps))
|
|
1580
|
+
{
|
|
1581
|
+
// The PME forces were recieved to the host, and reduced on the CPU with the
|
|
1582
|
+
// rest of the forces computed on the GPU, so the final forces have to be
|
|
1583
|
+
// copied back to the GPU. Or the buffer ops were not offloaded this step,
|
|
1584
|
+
// so the forces are on the host and have to be copied
|
|
1585
|
+
stateGpu->copyForcesToGpu(f.view().force(), AtomLocality::Local);
|
|
1586
|
+
}
|
|
1587
|
+
const bool doTemperatureScaling =
|
|
1588
|
+
(ir->etc != TemperatureCoupling::No
|
|
1589
|
+
&& do_per_step(step + ir->nsttcouple - 1, ir->nsttcouple));
|
|
1590
|
+
|
|
1591
|
+
// This applies Leap-Frog, LINCS and SETTLE in succession
|
|
1592
|
+
integrator->integrate(
|
|
1593
|
+
stateGpu->getLocalForcesReadyOnDeviceEvent(
|
|
1594
|
+
runScheduleWork_->stepWork, runScheduleWork_->simulationWork),
|
|
1595
|
+
ir->delta_t,
|
|
1596
|
+
true,
|
|
1597
|
+
bCalcVir,
|
|
1598
|
+
shake_vir,
|
|
1599
|
+
doTemperatureScaling,
|
|
1600
|
+
ekind_->tcstat,
|
|
1601
|
+
doParrinelloRahman,
|
|
1602
|
+
ir->pressureCouplingOptions.nstpcouple * ir->delta_t,
|
|
1603
|
+
parrinelloRahmanM);
|
|
1604
|
+
}
|
|
1605
|
+
else
|
|
1606
|
+
{
|
|
1607
|
+
/* With multiple time stepping we need to do an additional normal
|
|
1608
|
+
* update step to obtain the virial and dH/dl, as the actual MTS integration
|
|
1609
|
+
* using an acceleration where the slow forces are multiplied by mtsFactor.
|
|
1610
|
+
* Using that acceleration would result in a virial with the slow
|
|
1611
|
+
* force contribution would be a factor mtsFactor too large.
|
|
1612
|
+
*/
|
|
1613
|
+
const bool separateVirialConstraining =
|
|
1614
|
+
(simulationWork.useMts && (bCalcVir || computeDHDL) && constr_ != nullptr);
|
|
1615
|
+
if (separateVirialConstraining)
|
|
1616
|
+
{
|
|
1617
|
+
upd.update_for_constraint_virial(*ir,
|
|
1618
|
+
md->homenr,
|
|
1619
|
+
md->havePartiallyFrozenAtoms,
|
|
1620
|
+
md->invmass,
|
|
1621
|
+
md->invMassPerDim,
|
|
1622
|
+
*state_,
|
|
1623
|
+
f.view().forceWithPadding(),
|
|
1624
|
+
*ekind_);
|
|
1625
|
+
|
|
1626
|
+
constrain_coordinates(constr_,
|
|
1627
|
+
do_log,
|
|
1628
|
+
do_ene,
|
|
1629
|
+
step,
|
|
1630
|
+
state_,
|
|
1631
|
+
upd.xp()->arrayRefWithPadding(),
|
|
1632
|
+
&dvdl_constr,
|
|
1633
|
+
bCalcVir,
|
|
1634
|
+
shake_vir);
|
|
1635
|
+
}
|
|
1636
|
+
|
|
1637
|
+
ArrayRefWithPadding<const RVec> forceCombined =
|
|
1638
|
+
(simulationWork.useMts && step % ir->mtsLevels[1].stepFactor == 0)
|
|
1639
|
+
? f.view().forceMtsCombinedWithPadding()
|
|
1640
|
+
: f.view().forceWithPadding();
|
|
1641
|
+
upd.update_coords(*ir,
|
|
1642
|
+
step,
|
|
1643
|
+
md->homenr,
|
|
1644
|
+
md->havePartiallyFrozenAtoms,
|
|
1645
|
+
md->ptype,
|
|
1646
|
+
md->invmass,
|
|
1647
|
+
md->invMassPerDim,
|
|
1648
|
+
state_,
|
|
1649
|
+
forceCombined,
|
|
1650
|
+
&fcdata,
|
|
1651
|
+
ekind_,
|
|
1652
|
+
parrinelloRahmanM,
|
|
1653
|
+
etrtPOSITION,
|
|
1654
|
+
cr_,
|
|
1655
|
+
constr_ != nullptr);
|
|
1656
|
+
|
|
1657
|
+
wallcycle_stop(wallCycleCounters_, WallCycleCounter::Update);
|
|
1658
|
+
|
|
1659
|
+
constrain_coordinates(constr_,
|
|
1660
|
+
do_log,
|
|
1661
|
+
do_ene,
|
|
1662
|
+
step,
|
|
1663
|
+
state_,
|
|
1664
|
+
upd.xp()->arrayRefWithPadding(),
|
|
1665
|
+
separateVirialConstraining ? nullptr : &dvdl_constr,
|
|
1666
|
+
bCalcVir && !separateVirialConstraining,
|
|
1667
|
+
shake_vir);
|
|
1668
|
+
|
|
1669
|
+
upd.update_sd_second_half(*ir,
|
|
1670
|
+
step,
|
|
1671
|
+
&dvdl_constr,
|
|
1672
|
+
md->homenr,
|
|
1673
|
+
md->ptype,
|
|
1674
|
+
md->invmass,
|
|
1675
|
+
state_,
|
|
1676
|
+
cr_,
|
|
1677
|
+
nrnb_,
|
|
1678
|
+
wallCycleCounters_,
|
|
1679
|
+
constr_,
|
|
1680
|
+
do_log,
|
|
1681
|
+
do_ene);
|
|
1682
|
+
upd.finish_update(*ir,
|
|
1683
|
+
md->havePartiallyFrozenAtoms,
|
|
1684
|
+
md->homenr,
|
|
1685
|
+
state_,
|
|
1686
|
+
wallCycleCounters_,
|
|
1687
|
+
constr_ != nullptr);
|
|
1688
|
+
}
|
|
1689
|
+
|
|
1690
|
+
if (ir->bPull && ir->pull->bSetPbcRefToPrevStepCOM)
|
|
1691
|
+
{
|
|
1692
|
+
updatePrevStepPullCom(pullWork_, state_->pull_com_prev_step);
|
|
1693
|
+
}
|
|
1694
|
+
|
|
1695
|
+
enerd_->term[F_DVDL_CONSTR] += dvdl_constr;
|
|
1696
|
+
}
|
|
1697
|
+
}
|
|
1698
|
+
|
|
1699
|
+
if (simulationWork.useMdGpuGraph)
|
|
1700
|
+
{
|
|
1701
|
+
GMX_ASSERT((mdGraph != nullptr), "MD GPU graph does not exist.");
|
|
1702
|
+
if (mdGraph->graphIsCapturingThisStep())
|
|
1703
|
+
{
|
|
1704
|
+
mdGraph->endRecord();
|
|
1705
|
+
// Force graph reinstantiation (instead of graph exec
|
|
1706
|
+
// update): with PME tuning, since the GPU kernels
|
|
1707
|
+
// chosen by the FFT library can vary with grid size;
|
|
1708
|
+
// or with an odd nstlist, since the odd/even step
|
|
1709
|
+
// pruning pattern will change
|
|
1710
|
+
bool forceGraphReinstantiation =
|
|
1711
|
+
pme_loadbal_is_active(pme_loadbal) || ((ir->nstlist % 2) == 1);
|
|
1712
|
+
mdGraph->createExecutableGraph(forceGraphReinstantiation);
|
|
1713
|
+
}
|
|
1714
|
+
if (mdGraph->useGraphThisStep())
|
|
1715
|
+
{
|
|
1716
|
+
mdGraph->launchGraphMdStep(integrator->xUpdatedOnDeviceEvent());
|
|
1717
|
+
}
|
|
1718
|
+
if (bNS)
|
|
1719
|
+
{
|
|
1720
|
+
// TODO: merge disableForDomainIfAnyPpRankHasCpuForces() back into reset() when
|
|
1721
|
+
// domainWork initialization is moved out of do_force().
|
|
1722
|
+
fr_->mdGraph[MdGraphEvenOrOddStep::EvenStep]->disableForDomainIfAnyPpRankHasCpuForces(
|
|
1723
|
+
runScheduleWork_->domainWork.haveCpuLocalForceWork);
|
|
1724
|
+
fr_->mdGraph[MdGraphEvenOrOddStep::OddStep]->disableForDomainIfAnyPpRankHasCpuForces(
|
|
1725
|
+
runScheduleWork_->domainWork.haveCpuLocalForceWork);
|
|
1726
|
+
}
|
|
1727
|
+
usedMdGpuGraphLastStep = mdGraph->useGraphThisStep();
|
|
1728
|
+
}
|
|
1729
|
+
|
|
1730
|
+
/* ############## IF NOT VV, Calculate globals HERE ############ */
|
|
1731
|
+
/* With Leap-Frog we can skip compute_globals at
|
|
1732
|
+
* non-communication steps, but we need to calculate
|
|
1733
|
+
* the kinetic energy one step before communication.
|
|
1734
|
+
*/
|
|
1735
|
+
{
|
|
1736
|
+
// Organize to do inter-simulation signalling on steps if
|
|
1737
|
+
// and when algorithms require it.
|
|
1738
|
+
const bool doInterSimSignal = (simulationsShareState && do_per_step(step, nstSignalComm));
|
|
1739
|
+
|
|
1740
|
+
if (useGpuForUpdate)
|
|
1741
|
+
{
|
|
1742
|
+
const bool coordinatesRequiredForStopCM =
|
|
1743
|
+
bStopCM && (bGStat || needHalfStepKineticEnergy || doInterSimSignal)
|
|
1744
|
+
&& !EI_VV(ir->eI);
|
|
1745
|
+
|
|
1746
|
+
// Copy coordinates when needed to stop the CM motion or for replica exchange
|
|
1747
|
+
if (coordinatesRequiredForStopCM || bDoReplEx)
|
|
1748
|
+
{
|
|
1749
|
+
stateGpu->copyCoordinatesFromGpu(state_->x, AtomLocality::Local);
|
|
1750
|
+
stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
|
|
1751
|
+
}
|
|
1752
|
+
|
|
1753
|
+
// Copy velocities back to the host if:
|
|
1754
|
+
// - Globals are computed this step (includes the energy output steps).
|
|
1755
|
+
// - Temperature is needed for the next step.
|
|
1756
|
+
// - This is a replica exchange step (even though we will only need
|
|
1757
|
+
// the velocities if an exchange succeeds)
|
|
1758
|
+
if (bGStat || needHalfStepKineticEnergy || bDoReplEx)
|
|
1759
|
+
{
|
|
1760
|
+
stateGpu->copyVelocitiesFromGpu(state_->v, AtomLocality::Local);
|
|
1761
|
+
stateGpu->waitVelocitiesReadyOnHost(AtomLocality::Local);
|
|
1762
|
+
}
|
|
1763
|
+
}
|
|
1764
|
+
|
|
1765
|
+
if (bGStat || needHalfStepKineticEnergy || doInterSimSignal)
|
|
1766
|
+
{
|
|
1767
|
+
// Since we're already communicating at this step, we
|
|
1768
|
+
// can propagate intra-simulation signals. Note that
|
|
1769
|
+
// check_nstglobalcomm has the responsibility for
|
|
1770
|
+
// choosing the value of nstglobalcomm that is one way
|
|
1771
|
+
// bGStat becomes true, so we can't get into a
|
|
1772
|
+
// situation where e.g. checkpointing can't be
|
|
1773
|
+
// signalled.
|
|
1774
|
+
bool doIntraSimSignal = true;
|
|
1775
|
+
SimulationSignaller signaller(&signals, cr_, ms_, doInterSimSignal, doIntraSimSignal);
|
|
1776
|
+
|
|
1777
|
+
compute_globals(gstat,
|
|
1778
|
+
cr_,
|
|
1779
|
+
ir,
|
|
1780
|
+
fr_,
|
|
1781
|
+
ekind_,
|
|
1782
|
+
makeConstArrayRef(state_->x),
|
|
1783
|
+
makeConstArrayRef(state_->v),
|
|
1784
|
+
state_->box,
|
|
1785
|
+
md,
|
|
1786
|
+
nrnb_,
|
|
1787
|
+
&vcm,
|
|
1788
|
+
wallCycleCounters_,
|
|
1789
|
+
enerd_,
|
|
1790
|
+
force_vir,
|
|
1791
|
+
shake_vir,
|
|
1792
|
+
total_vir,
|
|
1793
|
+
pres,
|
|
1794
|
+
&signaller,
|
|
1795
|
+
lastbox,
|
|
1796
|
+
&bSumEkinhOld,
|
|
1797
|
+
(bGStat ? CGLO_GSTAT : 0) | (!EI_VV(ir->eI) && bCalcEner ? CGLO_ENERGY : 0)
|
|
1798
|
+
| (!EI_VV(ir->eI) && bStopCM ? CGLO_STOPCM : 0)
|
|
1799
|
+
| (!EI_VV(ir->eI) ? CGLO_TEMPERATURE : 0)
|
|
1800
|
+
| (!EI_VV(ir->eI) ? CGLO_PRESSURE : 0) | CGLO_CONSTRAINT,
|
|
1801
|
+
step,
|
|
1802
|
+
&observablesReducer);
|
|
1803
|
+
if (!EI_VV(ir->eI) && bStopCM)
|
|
1804
|
+
{
|
|
1805
|
+
process_and_stopcm_grp(
|
|
1806
|
+
fpLog_, &vcm, *md, makeArrayRef(state_->x), makeArrayRef(state_->v));
|
|
1807
|
+
inc_nrnb(nrnb_, eNR_STOPCM, md->homenr);
|
|
1808
|
+
|
|
1809
|
+
// TODO: The special case of removing CM motion should be dealt more gracefully
|
|
1810
|
+
if (useGpuForUpdate)
|
|
1811
|
+
{
|
|
1812
|
+
// Issue #3988, #4106.
|
|
1813
|
+
stateGpu->resetCoordinatesCopiedToDeviceEvent(AtomLocality::Local);
|
|
1814
|
+
stateGpu->copyCoordinatesToGpu(state_->x, AtomLocality::Local);
|
|
1815
|
+
// Here we block until the H2D copy completes because event sync with the
|
|
1816
|
+
// force kernels that use the coordinates on the next steps is not implemented
|
|
1817
|
+
// (not because of a race on state->x being modified on the CPU while H2D is in progress).
|
|
1818
|
+
stateGpu->waitCoordinatesCopiedToDevice(AtomLocality::Local);
|
|
1819
|
+
// If the COM removal changed the velocities on the CPU, this has to be accounted for.
|
|
1820
|
+
if (vcm.mode != ComRemovalAlgorithm::No)
|
|
1821
|
+
{
|
|
1822
|
+
stateGpu->copyVelocitiesToGpu(state_->v, AtomLocality::Local);
|
|
1823
|
+
}
|
|
1824
|
+
}
|
|
1825
|
+
}
|
|
1826
|
+
}
|
|
1827
|
+
}
|
|
1828
|
+
|
|
1829
|
+
/* ############# END CALC EKIN AND PRESSURE ################# */
|
|
1830
|
+
|
|
1831
|
+
/* Note: this is OK, but there are some numerical precision issues with using the convergence of
|
|
1832
|
+
the virial that should probably be addressed eventually. state->veta has better properies,
|
|
1833
|
+
but what we actually need entering the new cycle is the new shake_vir value. Ideally, we could
|
|
1834
|
+
generate the new shake_vir, but test the veta value for convergence. This will take some thought. */
|
|
1835
|
+
|
|
1836
|
+
if (ir->efep != FreeEnergyPerturbationType::No && !EI_VV(ir->eI))
|
|
1837
|
+
{
|
|
1838
|
+
/* Sum up the foreign energy and dK/dl terms for md and sd.
|
|
1839
|
+
Currently done every step so that dH/dl is correct in the .edr */
|
|
1840
|
+
accumulateKineticLambdaComponents(enerd_, state_->lambda, *ir->fepvals);
|
|
1841
|
+
}
|
|
1842
|
+
|
|
1843
|
+
const real currentSystemRefT =
|
|
1844
|
+
(haveEnsembleTemperature(*ir) ? ekind_->currentEnsembleTemperature() : 0.0_real);
|
|
1845
|
+
const bool scaleCoordinates = !useGpuForUpdate || bDoReplEx;
|
|
1846
|
+
update_pcouple_after_coordinates(fpLog_,
|
|
1847
|
+
step,
|
|
1848
|
+
ir->pressureCouplingOptions,
|
|
1849
|
+
ir->ld_seed,
|
|
1850
|
+
currentSystemRefT,
|
|
1851
|
+
ir->opts.nFreeze,
|
|
1852
|
+
ir->deform,
|
|
1853
|
+
ir->delta_t,
|
|
1854
|
+
md->homenr,
|
|
1855
|
+
md->cFREEZE,
|
|
1856
|
+
pres,
|
|
1857
|
+
force_vir,
|
|
1858
|
+
shake_vir,
|
|
1859
|
+
&pressureCouplingMu,
|
|
1860
|
+
state_,
|
|
1861
|
+
nrnb_,
|
|
1862
|
+
upd.deform(),
|
|
1863
|
+
scaleCoordinates);
|
|
1864
|
+
|
|
1865
|
+
const bool doBerendsenPressureCoupling =
|
|
1866
|
+
(inputRec_->pressureCouplingOptions.epc == PressureCoupling::Berendsen
|
|
1867
|
+
&& do_per_step(step, inputRec_->pressureCouplingOptions.nstpcouple));
|
|
1868
|
+
const bool doCRescalePressureCoupling =
|
|
1869
|
+
(inputRec_->pressureCouplingOptions.epc == PressureCoupling::CRescale
|
|
1870
|
+
&& do_per_step(step, inputRec_->pressureCouplingOptions.nstpcouple));
|
|
1871
|
+
if (useGpuForUpdate
|
|
1872
|
+
&& (doBerendsenPressureCoupling || doCRescalePressureCoupling || doParrinelloRahman))
|
|
1873
|
+
{
|
|
1874
|
+
integrator->scaleCoordinates(pressureCouplingMu);
|
|
1875
|
+
if (doCRescalePressureCoupling)
|
|
1876
|
+
{
|
|
1877
|
+
integrator->scaleVelocities(invertBoxMatrix(pressureCouplingMu));
|
|
1878
|
+
}
|
|
1879
|
+
integrator->setPbc(PbcType::Xyz, state_->box);
|
|
1880
|
+
}
|
|
1881
|
+
|
|
1882
|
+
/* ################# END UPDATE STEP 2 ################# */
|
|
1883
|
+
/* #### We now have r(t+dt) and v(t+dt/2) ############# */
|
|
1884
|
+
|
|
1885
|
+
/* The coordinates (x) were unshifted in update */
|
|
1886
|
+
if (!bGStat)
|
|
1887
|
+
{
|
|
1888
|
+
/* We will not sum ekinh_old,
|
|
1889
|
+
* so signal that we still have to do it.
|
|
1890
|
+
*/
|
|
1891
|
+
bSumEkinhOld = TRUE;
|
|
1892
|
+
}
|
|
1893
|
+
|
|
1894
|
+
if (bCalcEner)
|
|
1895
|
+
{
|
|
1896
|
+
/* ######### BEGIN PREPARING EDR OUTPUT ########### */
|
|
1897
|
+
|
|
1898
|
+
/* use the directly determined last velocity, not actually the averaged half steps */
|
|
1899
|
+
if (bTrotter && ir->eI == IntegrationAlgorithm::VV)
|
|
1900
|
+
{
|
|
1901
|
+
enerd_->term[F_EKIN] = last_ekin;
|
|
1902
|
+
}
|
|
1903
|
+
enerd_->term[F_ETOT] = enerd_->term[F_EPOT] + enerd_->term[F_EKIN];
|
|
1904
|
+
|
|
1905
|
+
if (integratorHasConservedEnergyQuantity(ir))
|
|
1906
|
+
{
|
|
1907
|
+
if (EI_VV(ir->eI))
|
|
1908
|
+
{
|
|
1909
|
+
enerd_->term[F_ECONSERVED] = enerd_->term[F_ETOT] + saved_conserved_quantity;
|
|
1910
|
+
}
|
|
1911
|
+
else
|
|
1912
|
+
{
|
|
1913
|
+
enerd_->term[F_ECONSERVED] =
|
|
1914
|
+
enerd_->term[F_ETOT]
|
|
1915
|
+
+ NPT_energy(ir->pressureCouplingOptions,
|
|
1916
|
+
ir->etc,
|
|
1917
|
+
gmx::constArrayRefFromArray(ir->opts.nrdf, ir->opts.ngtc),
|
|
1918
|
+
*ekind_,
|
|
1919
|
+
inputrecNvtTrotter(ir) || inputrecNptTrotter(ir),
|
|
1920
|
+
state_,
|
|
1921
|
+
&MassQ);
|
|
1922
|
+
}
|
|
1923
|
+
}
|
|
1924
|
+
/* ######### END PREPARING EDR OUTPUT ########### */
|
|
1925
|
+
}
|
|
1926
|
+
|
|
1927
|
+
/* Output stuff */
|
|
1928
|
+
if (MAIN(cr_))
|
|
1929
|
+
{
|
|
1930
|
+
if (fpLog_ && do_log && bDoExpanded)
|
|
1931
|
+
{
|
|
1932
|
+
/* only needed if doing expanded ensemble */
|
|
1933
|
+
PrintFreeEnergyInfoToFile(fpLog_,
|
|
1934
|
+
ir->fepvals.get(),
|
|
1935
|
+
ir->expandedvals.get(),
|
|
1936
|
+
ir->bSimTemp ? ir->simtempvals.get() : nullptr,
|
|
1937
|
+
stateGlobal_->dfhist,
|
|
1938
|
+
state_->fep_state,
|
|
1939
|
+
ir->nstlog,
|
|
1940
|
+
step);
|
|
1941
|
+
}
|
|
1942
|
+
if (bCalcEner)
|
|
1943
|
+
{
|
|
1944
|
+
const bool outputDHDL = (computeDHDL && do_per_step(step, ir->fepvals->nstdhdl));
|
|
1945
|
+
|
|
1946
|
+
energyOutput.addDataAtEnergyStep(outputDHDL,
|
|
1947
|
+
bCalcEnerStep,
|
|
1948
|
+
t,
|
|
1949
|
+
md->tmass,
|
|
1950
|
+
enerd_,
|
|
1951
|
+
ir->fepvals.get(),
|
|
1952
|
+
lastbox,
|
|
1953
|
+
PTCouplingArrays{ state_->boxv,
|
|
1954
|
+
state_->nosehoover_xi,
|
|
1955
|
+
state_->nosehoover_vxi,
|
|
1956
|
+
state_->nhpres_xi,
|
|
1957
|
+
state_->nhpres_vxi },
|
|
1958
|
+
state_->fep_state,
|
|
1959
|
+
total_vir,
|
|
1960
|
+
pres,
|
|
1961
|
+
ekind_,
|
|
1962
|
+
mu_tot,
|
|
1963
|
+
constr_);
|
|
1964
|
+
}
|
|
1965
|
+
else
|
|
1966
|
+
{
|
|
1967
|
+
energyOutput.recordNonEnergyStep();
|
|
1968
|
+
}
|
|
1969
|
+
|
|
1970
|
+
gmx_bool do_dr = do_per_step(step, ir->nstdisreout);
|
|
1971
|
+
gmx_bool do_or = do_per_step(step, ir->nstorireout);
|
|
1972
|
+
|
|
1973
|
+
if (doSimulatedAnnealing)
|
|
1974
|
+
{
|
|
1975
|
+
gmx::EnergyOutput::printAnnealingTemperatures(
|
|
1976
|
+
do_log ? fpLog_ : nullptr, *groups, ir->opts, *ekind_);
|
|
1977
|
+
}
|
|
1978
|
+
if (do_log || do_ene || do_dr || do_or)
|
|
1979
|
+
{
|
|
1980
|
+
energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf),
|
|
1981
|
+
do_ene,
|
|
1982
|
+
do_dr,
|
|
1983
|
+
do_or,
|
|
1984
|
+
do_log ? fpLog_ : nullptr,
|
|
1985
|
+
step,
|
|
1986
|
+
t,
|
|
1987
|
+
fr_->fcdata.get(),
|
|
1988
|
+
awh.get());
|
|
1989
|
+
}
|
|
1990
|
+
if (do_log && ((ir->bDoAwh && awh->hasFepLambdaDimension()) || ir->fepvals->delta_lambda != 0))
|
|
1991
|
+
{
|
|
1992
|
+
const bool isInitialOutput = false;
|
|
1993
|
+
printLambdaStateToLog(fpLog_, state_->lambda, isInitialOutput);
|
|
1994
|
+
}
|
|
1995
|
+
|
|
1996
|
+
if (ir->bPull)
|
|
1997
|
+
{
|
|
1998
|
+
pull_print_output(pullWork_, step, t);
|
|
1999
|
+
}
|
|
2000
|
+
|
|
2001
|
+
if (do_per_step(step, ir->nstlog))
|
|
2002
|
+
{
|
|
2003
|
+
if (fflush(fpLog_) != 0)
|
|
2004
|
+
{
|
|
2005
|
+
gmx_fatal(FARGS, "Cannot flush logfile - maybe you are out of disk space?");
|
|
2006
|
+
}
|
|
2007
|
+
}
|
|
2008
|
+
}
|
|
2009
|
+
if (bDoExpanded)
|
|
2010
|
+
{
|
|
2011
|
+
/* Have to do this part _after_ outputting the logfile and the edr file */
|
|
2012
|
+
/* Gets written into the state at the beginning of next loop*/
|
|
2013
|
+
state_->fep_state = lamnew;
|
|
2014
|
+
}
|
|
2015
|
+
else if (ir->bDoAwh && awh->needForeignEnergyDifferences(step))
|
|
2016
|
+
{
|
|
2017
|
+
state_->fep_state = awh->fepLambdaState();
|
|
2018
|
+
}
|
|
2019
|
+
/* Print the remaining wall clock time for the run */
|
|
2020
|
+
if (isMainSimMainRank(ms_, MAIN(cr_)) && (do_verbose || gmx_got_usr_signal()) && !bPMETunePrinting)
|
|
2021
|
+
{
|
|
2022
|
+
if (shellfc)
|
|
2023
|
+
{
|
|
2024
|
+
fprintf(stderr, "\n");
|
|
2025
|
+
}
|
|
2026
|
+
print_time(stderr, wallTimeAccounting_, step, ir, cr_);
|
|
2027
|
+
}
|
|
2028
|
+
|
|
2029
|
+
/* Ion/water position swapping.
|
|
2030
|
+
* Not done in last step since trajectory writing happens before this call
|
|
2031
|
+
* in the MD loop and exchanges would be lost anyway. */
|
|
2032
|
+
bNeedRepartition = FALSE;
|
|
2033
|
+
if ((ir->eSwapCoords != SwapType::No) && (step > 0) && !bLastStep
|
|
2034
|
+
&& do_per_step(step, ir->swap->nstswap))
|
|
2035
|
+
{
|
|
2036
|
+
bNeedRepartition = do_swapcoords(cr_,
|
|
2037
|
+
step,
|
|
2038
|
+
t,
|
|
2039
|
+
ir,
|
|
2040
|
+
swap_,
|
|
2041
|
+
wallCycleCounters_,
|
|
2042
|
+
as_rvec_array(state_->x.data()),
|
|
2043
|
+
state_->box,
|
|
2044
|
+
MAIN(cr_) && mdrunOptions_.verbose,
|
|
2045
|
+
bRerunMD);
|
|
2046
|
+
|
|
2047
|
+
if (bNeedRepartition && haveDDAtomOrdering(*cr_))
|
|
2048
|
+
{
|
|
2049
|
+
dd_collect_state(cr_->dd, state_, stateGlobal_);
|
|
2050
|
+
}
|
|
2051
|
+
}
|
|
2052
|
+
|
|
2053
|
+
/* Replica exchange */
|
|
2054
|
+
bExchanged = FALSE;
|
|
2055
|
+
if (bDoReplEx)
|
|
2056
|
+
{
|
|
2057
|
+
bExchanged =
|
|
2058
|
+
replica_exchange(fpLog_, cr_, ms_, repl_ex, stateGlobal_, enerd_, state_, step, t);
|
|
2059
|
+
}
|
|
2060
|
+
|
|
2061
|
+
if ((bExchanged || bNeedRepartition) && haveDDAtomOrdering(*cr_))
|
|
2062
|
+
{
|
|
2063
|
+
dd_partition_system(fpLog_,
|
|
2064
|
+
mdLog_,
|
|
2065
|
+
step,
|
|
2066
|
+
cr_,
|
|
2067
|
+
TRUE,
|
|
2068
|
+
stateGlobal_,
|
|
2069
|
+
topGlobal_,
|
|
2070
|
+
*ir,
|
|
2071
|
+
mdModulesNotifiers_,
|
|
2072
|
+
imdSession_,
|
|
2073
|
+
pullWork_,
|
|
2074
|
+
state_,
|
|
2075
|
+
&f,
|
|
2076
|
+
mdAtoms_,
|
|
2077
|
+
top_,
|
|
2078
|
+
fr_,
|
|
2079
|
+
virtualSites_,
|
|
2080
|
+
constr_,
|
|
2081
|
+
nrnb_,
|
|
2082
|
+
wallCycleCounters_,
|
|
2083
|
+
FALSE);
|
|
2084
|
+
upd.updateAfterPartition(state_->numAtoms(), md->cFREEZE, md->cTC, md->cACC);
|
|
2085
|
+
fr_->longRangeNonbondeds->updateAfterPartition(*md);
|
|
2086
|
+
}
|
|
2087
|
+
|
|
2088
|
+
bFirstStep = FALSE;
|
|
2089
|
+
bInitStep = FALSE;
|
|
2090
|
+
|
|
2091
|
+
/* ####### SET VARIABLES FOR NEXT ITERATION IF THEY STILL NEED IT ###### */
|
|
2092
|
+
/* With all integrators, except VV, we need to retain the pressure
|
|
2093
|
+
* at the current step for coupling at the next step.
|
|
2094
|
+
*/
|
|
2095
|
+
if (state_->hasEntry(StateEntry::PressurePrevious)
|
|
2096
|
+
&& (bGStatEveryStep
|
|
2097
|
+
|| (ir->pressureCouplingOptions.nstpcouple > 0
|
|
2098
|
+
&& step % ir->pressureCouplingOptions.nstpcouple == 0)))
|
|
2099
|
+
{
|
|
2100
|
+
/* Store the pressure in t_state for pressure coupling
|
|
2101
|
+
* at the next MD step.
|
|
2102
|
+
*/
|
|
2103
|
+
copy_mat(pres, state_->pres_prev);
|
|
2104
|
+
}
|
|
2105
|
+
|
|
2106
|
+
/* ####### END SET VARIABLES FOR NEXT ITERATION ###### */
|
|
2107
|
+
|
|
2108
|
+
if ((membed_ != nullptr) && (!bLastStep))
|
|
2109
|
+
{
|
|
2110
|
+
rescale_membed(step_rel, membed_, as_rvec_array(stateGlobal_->x.data()));
|
|
2111
|
+
}
|
|
2112
|
+
|
|
2113
|
+
cycles = wallcycle_stop(wallCycleCounters_, WallCycleCounter::Step);
|
|
2114
|
+
if (haveDDAtomOrdering(*cr_) && wallCycleCounters_)
|
|
2115
|
+
{
|
|
2116
|
+
dd_cycles_add(cr_->dd, cycles, ddCyclStep);
|
|
2117
|
+
}
|
|
2118
|
+
|
|
2119
|
+
/* increase the MD step number */
|
|
2120
|
+
step++;
|
|
2121
|
+
step_rel++;
|
|
2122
|
+
observablesReducer.markAsReadyToReduce();
|
|
2123
|
+
|
|
2124
|
+
#if GMX_FAHCORE
|
|
2125
|
+
if (MAIN(cr))
|
|
2126
|
+
{
|
|
2127
|
+
fcReportProgress(ir->nsteps + ir->init_step, step);
|
|
2128
|
+
}
|
|
2129
|
+
#endif
|
|
2130
|
+
|
|
2131
|
+
resetHandler->resetCounters(
|
|
2132
|
+
step, step_rel, mdLog_, fpLog_, cr_, fr_->nbv.get(), nrnb_, fr_->pmedata, pme_loadbal, wallCycleCounters_, wallTimeAccounting_);
|
|
2133
|
+
|
|
2134
|
+
/* If bIMD is TRUE, the main updates the IMD energy record and sends positions to VMD client */
|
|
2135
|
+
imdSession_->updateEnergyRecordAndSendPositionsAndEnergies(bInteractiveMDstep, step, bCalcEner);
|
|
2136
|
+
|
|
2137
|
+
// any run that uses GPUs must be at least offloading nonbondeds
|
|
2138
|
+
const bool usingGpu = simulationWork.useGpuNonbonded;
|
|
2139
|
+
if (usingGpu)
|
|
2140
|
+
{
|
|
2141
|
+
// ensure that GPU errors do not propagate between MD steps
|
|
2142
|
+
checkPendingDeviceErrorBetweenSteps();
|
|
2143
|
+
}
|
|
2144
|
+
}
|
|
2145
|
+
/* End of main MD loop */
|
|
2146
|
+
|
|
2147
|
+
/* Closing TNG files can include compressing data. Therefore it is good to do that
|
|
2148
|
+
* before stopping the time measurements. */
|
|
2149
|
+
mdoutf_tng_close(outf);
|
|
2150
|
+
|
|
2151
|
+
/* Stop measuring walltime */
|
|
2152
|
+
walltime_accounting_end_time(wallTimeAccounting_);
|
|
2153
|
+
|
|
2154
|
+
if (simulationWork.haveSeparatePmeRank)
|
|
2155
|
+
{
|
|
2156
|
+
/* Tell the PME only node to finish */
|
|
2157
|
+
gmx_pme_send_finish(cr_);
|
|
2158
|
+
}
|
|
2159
|
+
|
|
2160
|
+
if (MAIN(cr_))
|
|
2161
|
+
{
|
|
2162
|
+
if (ir->nstcalcenergy > 0)
|
|
2163
|
+
{
|
|
2164
|
+
energyOutput.printEnergyConservation(fpLog_, ir->simulation_part, EI_MD(ir->eI));
|
|
2165
|
+
|
|
2166
|
+
gmx::EnergyOutput::printAnnealingTemperatures(fpLog_, *groups, ir->opts, *ekind_);
|
|
2167
|
+
energyOutput.printAverages(fpLog_, groups);
|
|
2168
|
+
}
|
|
2169
|
+
}
|
|
2170
|
+
done_mdoutf(outf);
|
|
2171
|
+
|
|
2172
|
+
if (bPMETune)
|
|
2173
|
+
{
|
|
2174
|
+
pme_loadbal_done(pme_loadbal, fpLog_, mdLog_, fr_->nbv->useGpu());
|
|
2175
|
+
}
|
|
2176
|
+
|
|
2177
|
+
done_shellfc(fpLog_, shellfc, step_rel);
|
|
2178
|
+
|
|
2179
|
+
if (useReplicaExchange && MAIN(cr_))
|
|
2180
|
+
{
|
|
2181
|
+
print_replica_exchange_statistics(fpLog_, repl_ex);
|
|
2182
|
+
}
|
|
2183
|
+
|
|
2184
|
+
walltime_accounting_set_nsteps_done(wallTimeAccounting_, step_rel);
|
|
2185
|
+
|
|
2186
|
+
global_stat_destroy(gstat);
|
|
2187
|
+
}
|