wmglobalqueue 2.4.5.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- Utils/CPMetrics.py +270 -0
- Utils/CertTools.py +100 -0
- Utils/EmailAlert.py +50 -0
- Utils/ExtendedUnitTestCase.py +62 -0
- Utils/FileTools.py +182 -0
- Utils/IteratorTools.py +80 -0
- Utils/MathUtils.py +31 -0
- Utils/MemoryCache.py +119 -0
- Utils/Patterns.py +24 -0
- Utils/Pipeline.py +137 -0
- Utils/PortForward.py +97 -0
- Utils/ProcFS.py +112 -0
- Utils/ProcessStats.py +194 -0
- Utils/PythonVersion.py +17 -0
- Utils/Signals.py +36 -0
- Utils/TemporaryEnvironment.py +27 -0
- Utils/Throttled.py +227 -0
- Utils/Timers.py +130 -0
- Utils/Timestamps.py +86 -0
- Utils/TokenManager.py +143 -0
- Utils/Tracing.py +60 -0
- Utils/TwPrint.py +98 -0
- Utils/Utilities.py +318 -0
- Utils/__init__.py +11 -0
- Utils/wmcoreDTools.py +707 -0
- WMCore/ACDC/Collection.py +57 -0
- WMCore/ACDC/CollectionTypes.py +12 -0
- WMCore/ACDC/CouchCollection.py +67 -0
- WMCore/ACDC/CouchFileset.py +238 -0
- WMCore/ACDC/CouchService.py +73 -0
- WMCore/ACDC/DataCollectionService.py +485 -0
- WMCore/ACDC/Fileset.py +94 -0
- WMCore/ACDC/__init__.py +11 -0
- WMCore/Algorithms/Alarm.py +39 -0
- WMCore/Algorithms/MathAlgos.py +274 -0
- WMCore/Algorithms/MiscAlgos.py +67 -0
- WMCore/Algorithms/ParseXMLFile.py +115 -0
- WMCore/Algorithms/Permissions.py +27 -0
- WMCore/Algorithms/Singleton.py +58 -0
- WMCore/Algorithms/SubprocessAlgos.py +129 -0
- WMCore/Algorithms/__init__.py +7 -0
- WMCore/Cache/GenericDataCache.py +98 -0
- WMCore/Cache/WMConfigCache.py +572 -0
- WMCore/Cache/__init__.py +0 -0
- WMCore/Configuration.py +659 -0
- WMCore/DAOFactory.py +47 -0
- WMCore/DataStructs/File.py +177 -0
- WMCore/DataStructs/Fileset.py +140 -0
- WMCore/DataStructs/Job.py +182 -0
- WMCore/DataStructs/JobGroup.py +142 -0
- WMCore/DataStructs/JobPackage.py +49 -0
- WMCore/DataStructs/LumiList.py +734 -0
- WMCore/DataStructs/Mask.py +219 -0
- WMCore/DataStructs/MathStructs/ContinuousSummaryHistogram.py +197 -0
- WMCore/DataStructs/MathStructs/DiscreteSummaryHistogram.py +92 -0
- WMCore/DataStructs/MathStructs/SummaryHistogram.py +117 -0
- WMCore/DataStructs/MathStructs/__init__.py +0 -0
- WMCore/DataStructs/Pickleable.py +24 -0
- WMCore/DataStructs/Run.py +256 -0
- WMCore/DataStructs/Subscription.py +175 -0
- WMCore/DataStructs/WMObject.py +47 -0
- WMCore/DataStructs/WorkUnit.py +112 -0
- WMCore/DataStructs/Workflow.py +60 -0
- WMCore/DataStructs/__init__.py +8 -0
- WMCore/Database/CMSCouch.py +1430 -0
- WMCore/Database/ConfigDBMap.py +29 -0
- WMCore/Database/CouchMonitoring.py +450 -0
- WMCore/Database/CouchUtils.py +118 -0
- WMCore/Database/DBCore.py +198 -0
- WMCore/Database/DBCreator.py +113 -0
- WMCore/Database/DBExceptionHandler.py +59 -0
- WMCore/Database/DBFactory.py +117 -0
- WMCore/Database/DBFormatter.py +177 -0
- WMCore/Database/Dialects.py +13 -0
- WMCore/Database/ExecuteDAO.py +327 -0
- WMCore/Database/MongoDB.py +241 -0
- WMCore/Database/MySQL/Destroy.py +42 -0
- WMCore/Database/MySQL/ListUserContent.py +20 -0
- WMCore/Database/MySQL/__init__.py +9 -0
- WMCore/Database/MySQLCore.py +132 -0
- WMCore/Database/Oracle/Destroy.py +56 -0
- WMCore/Database/Oracle/ListUserContent.py +19 -0
- WMCore/Database/Oracle/__init__.py +9 -0
- WMCore/Database/ResultSet.py +44 -0
- WMCore/Database/Transaction.py +91 -0
- WMCore/Database/__init__.py +9 -0
- WMCore/Database/ipy_profile_couch.py +438 -0
- WMCore/GlobalWorkQueue/CherryPyThreads/CleanUpTask.py +29 -0
- WMCore/GlobalWorkQueue/CherryPyThreads/HeartbeatMonitor.py +105 -0
- WMCore/GlobalWorkQueue/CherryPyThreads/LocationUpdateTask.py +28 -0
- WMCore/GlobalWorkQueue/CherryPyThreads/ReqMgrInteractionTask.py +35 -0
- WMCore/GlobalWorkQueue/CherryPyThreads/__init__.py +0 -0
- WMCore/GlobalWorkQueue/__init__.py +0 -0
- WMCore/GroupUser/CouchObject.py +127 -0
- WMCore/GroupUser/Decorators.py +51 -0
- WMCore/GroupUser/Group.py +33 -0
- WMCore/GroupUser/Interface.py +73 -0
- WMCore/GroupUser/User.py +96 -0
- WMCore/GroupUser/__init__.py +11 -0
- WMCore/Lexicon.py +836 -0
- WMCore/REST/Auth.py +202 -0
- WMCore/REST/CherryPyPeriodicTask.py +166 -0
- WMCore/REST/Error.py +333 -0
- WMCore/REST/Format.py +642 -0
- WMCore/REST/HeartbeatMonitorBase.py +90 -0
- WMCore/REST/Main.py +636 -0
- WMCore/REST/Server.py +2435 -0
- WMCore/REST/Services.py +24 -0
- WMCore/REST/Test.py +120 -0
- WMCore/REST/Tools.py +38 -0
- WMCore/REST/Validation.py +250 -0
- WMCore/REST/__init__.py +1 -0
- WMCore/ReqMgr/DataStructs/RequestStatus.py +209 -0
- WMCore/ReqMgr/DataStructs/RequestType.py +13 -0
- WMCore/ReqMgr/DataStructs/__init__.py +0 -0
- WMCore/ReqMgr/__init__.py +1 -0
- WMCore/Services/AlertManager/AlertManagerAPI.py +111 -0
- WMCore/Services/AlertManager/__init__.py +0 -0
- WMCore/Services/CRIC/CRIC.py +238 -0
- WMCore/Services/CRIC/__init__.py +0 -0
- WMCore/Services/DBS/DBS3Reader.py +1044 -0
- WMCore/Services/DBS/DBSConcurrency.py +44 -0
- WMCore/Services/DBS/DBSErrors.py +112 -0
- WMCore/Services/DBS/DBSReader.py +23 -0
- WMCore/Services/DBS/DBSUtils.py +166 -0
- WMCore/Services/DBS/DBSWriterObjects.py +381 -0
- WMCore/Services/DBS/ProdException.py +133 -0
- WMCore/Services/DBS/__init__.py +8 -0
- WMCore/Services/FWJRDB/FWJRDBAPI.py +118 -0
- WMCore/Services/FWJRDB/__init__.py +0 -0
- WMCore/Services/HTTPS/HTTPSAuthHandler.py +66 -0
- WMCore/Services/HTTPS/__init__.py +0 -0
- WMCore/Services/LogDB/LogDB.py +201 -0
- WMCore/Services/LogDB/LogDBBackend.py +191 -0
- WMCore/Services/LogDB/LogDBExceptions.py +11 -0
- WMCore/Services/LogDB/LogDBReport.py +85 -0
- WMCore/Services/LogDB/__init__.py +0 -0
- WMCore/Services/MSPileup/__init__.py +0 -0
- WMCore/Services/MSUtils/MSUtils.py +54 -0
- WMCore/Services/MSUtils/__init__.py +0 -0
- WMCore/Services/McM/McM.py +173 -0
- WMCore/Services/McM/__init__.py +8 -0
- WMCore/Services/MonIT/Grafana.py +133 -0
- WMCore/Services/MonIT/__init__.py +0 -0
- WMCore/Services/PyCondor/PyCondorAPI.py +154 -0
- WMCore/Services/PyCondor/__init__.py +0 -0
- WMCore/Services/ReqMgr/ReqMgr.py +261 -0
- WMCore/Services/ReqMgr/__init__.py +0 -0
- WMCore/Services/ReqMgrAux/ReqMgrAux.py +419 -0
- WMCore/Services/ReqMgrAux/__init__.py +0 -0
- WMCore/Services/RequestDB/RequestDBReader.py +267 -0
- WMCore/Services/RequestDB/RequestDBWriter.py +39 -0
- WMCore/Services/RequestDB/__init__.py +0 -0
- WMCore/Services/Requests.py +624 -0
- WMCore/Services/Rucio/Rucio.py +1290 -0
- WMCore/Services/Rucio/RucioUtils.py +74 -0
- WMCore/Services/Rucio/__init__.py +0 -0
- WMCore/Services/RucioConMon/RucioConMon.py +121 -0
- WMCore/Services/RucioConMon/__init__.py +0 -0
- WMCore/Services/Service.py +400 -0
- WMCore/Services/StompAMQ/__init__.py +0 -0
- WMCore/Services/TagCollector/TagCollector.py +155 -0
- WMCore/Services/TagCollector/XMLUtils.py +98 -0
- WMCore/Services/TagCollector/__init__.py +0 -0
- WMCore/Services/UUIDLib.py +13 -0
- WMCore/Services/UserFileCache/UserFileCache.py +160 -0
- WMCore/Services/UserFileCache/__init__.py +8 -0
- WMCore/Services/WMAgent/WMAgent.py +63 -0
- WMCore/Services/WMAgent/__init__.py +0 -0
- WMCore/Services/WMArchive/CMSSWMetrics.py +526 -0
- WMCore/Services/WMArchive/DataMap.py +463 -0
- WMCore/Services/WMArchive/WMArchive.py +33 -0
- WMCore/Services/WMArchive/__init__.py +0 -0
- WMCore/Services/WMBS/WMBS.py +97 -0
- WMCore/Services/WMBS/__init__.py +0 -0
- WMCore/Services/WMStats/DataStruct/RequestInfoCollection.py +300 -0
- WMCore/Services/WMStats/DataStruct/__init__.py +0 -0
- WMCore/Services/WMStats/WMStatsPycurl.py +145 -0
- WMCore/Services/WMStats/WMStatsReader.py +445 -0
- WMCore/Services/WMStats/WMStatsWriter.py +273 -0
- WMCore/Services/WMStats/__init__.py +0 -0
- WMCore/Services/WMStatsServer/WMStatsServer.py +134 -0
- WMCore/Services/WMStatsServer/__init__.py +0 -0
- WMCore/Services/WorkQueue/WorkQueue.py +492 -0
- WMCore/Services/WorkQueue/__init__.py +0 -0
- WMCore/Services/__init__.py +8 -0
- WMCore/Services/pycurl_manager.py +574 -0
- WMCore/WMBase.py +50 -0
- WMCore/WMConnectionBase.py +164 -0
- WMCore/WMException.py +183 -0
- WMCore/WMExceptions.py +269 -0
- WMCore/WMFactory.py +76 -0
- WMCore/WMInit.py +377 -0
- WMCore/WMLogging.py +104 -0
- WMCore/WMSpec/ConfigSectionTree.py +442 -0
- WMCore/WMSpec/Persistency.py +135 -0
- WMCore/WMSpec/Steps/BuildMaster.py +87 -0
- WMCore/WMSpec/Steps/BuildTools.py +201 -0
- WMCore/WMSpec/Steps/Builder.py +97 -0
- WMCore/WMSpec/Steps/Diagnostic.py +89 -0
- WMCore/WMSpec/Steps/Emulator.py +62 -0
- WMCore/WMSpec/Steps/ExecuteMaster.py +208 -0
- WMCore/WMSpec/Steps/Executor.py +210 -0
- WMCore/WMSpec/Steps/StepFactory.py +213 -0
- WMCore/WMSpec/Steps/TaskEmulator.py +75 -0
- WMCore/WMSpec/Steps/Template.py +204 -0
- WMCore/WMSpec/Steps/Templates/AlcaHarvest.py +76 -0
- WMCore/WMSpec/Steps/Templates/CMSSW.py +613 -0
- WMCore/WMSpec/Steps/Templates/DQMUpload.py +59 -0
- WMCore/WMSpec/Steps/Templates/DeleteFiles.py +70 -0
- WMCore/WMSpec/Steps/Templates/LogArchive.py +84 -0
- WMCore/WMSpec/Steps/Templates/LogCollect.py +105 -0
- WMCore/WMSpec/Steps/Templates/StageOut.py +105 -0
- WMCore/WMSpec/Steps/Templates/__init__.py +10 -0
- WMCore/WMSpec/Steps/WMExecutionFailure.py +21 -0
- WMCore/WMSpec/Steps/__init__.py +8 -0
- WMCore/WMSpec/Utilities.py +63 -0
- WMCore/WMSpec/WMSpecErrors.py +12 -0
- WMCore/WMSpec/WMStep.py +347 -0
- WMCore/WMSpec/WMTask.py +1997 -0
- WMCore/WMSpec/WMWorkload.py +2288 -0
- WMCore/WMSpec/WMWorkloadTools.py +382 -0
- WMCore/WMSpec/__init__.py +9 -0
- WMCore/WorkQueue/DataLocationMapper.py +273 -0
- WMCore/WorkQueue/DataStructs/ACDCBlock.py +47 -0
- WMCore/WorkQueue/DataStructs/Block.py +48 -0
- WMCore/WorkQueue/DataStructs/CouchWorkQueueElement.py +148 -0
- WMCore/WorkQueue/DataStructs/WorkQueueElement.py +274 -0
- WMCore/WorkQueue/DataStructs/WorkQueueElementResult.py +152 -0
- WMCore/WorkQueue/DataStructs/WorkQueueElementsSummary.py +185 -0
- WMCore/WorkQueue/DataStructs/__init__.py +0 -0
- WMCore/WorkQueue/Policy/End/EndPolicyInterface.py +44 -0
- WMCore/WorkQueue/Policy/End/SingleShot.py +22 -0
- WMCore/WorkQueue/Policy/End/__init__.py +32 -0
- WMCore/WorkQueue/Policy/PolicyInterface.py +17 -0
- WMCore/WorkQueue/Policy/Start/Block.py +258 -0
- WMCore/WorkQueue/Policy/Start/Dataset.py +180 -0
- WMCore/WorkQueue/Policy/Start/MonteCarlo.py +131 -0
- WMCore/WorkQueue/Policy/Start/ResubmitBlock.py +171 -0
- WMCore/WorkQueue/Policy/Start/StartPolicyInterface.py +316 -0
- WMCore/WorkQueue/Policy/Start/__init__.py +34 -0
- WMCore/WorkQueue/Policy/__init__.py +57 -0
- WMCore/WorkQueue/WMBSHelper.py +772 -0
- WMCore/WorkQueue/WorkQueue.py +1237 -0
- WMCore/WorkQueue/WorkQueueBackend.py +750 -0
- WMCore/WorkQueue/WorkQueueBase.py +39 -0
- WMCore/WorkQueue/WorkQueueExceptions.py +44 -0
- WMCore/WorkQueue/WorkQueueReqMgrInterface.py +278 -0
- WMCore/WorkQueue/WorkQueueUtils.py +130 -0
- WMCore/WorkQueue/__init__.py +13 -0
- WMCore/Wrappers/JsonWrapper/JSONThunker.py +342 -0
- WMCore/Wrappers/JsonWrapper/__init__.py +7 -0
- WMCore/Wrappers/__init__.py +6 -0
- WMCore/__init__.py +10 -0
- wmglobalqueue-2.4.5.1.data/data/bin/wmc-dist-patch +15 -0
- wmglobalqueue-2.4.5.1.data/data/bin/wmc-dist-unpatch +8 -0
- wmglobalqueue-2.4.5.1.data/data/bin/wmc-httpd +3 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/.couchapprc +1 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/README.md +40 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/_attachments/index.html +264 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/_attachments/js/ElementInfoByWorkflow.js +96 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/_attachments/js/StuckElementInfo.js +57 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/_attachments/js/WorkloadInfoTable.js +80 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/_attachments/js/dataTable.js +70 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/_attachments/js/namespace.js +23 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/_attachments/style/main.css +75 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/couchapp.json +4 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/filters/childQueueFilter.js +13 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/filters/filterDeletedDocs.js +3 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/filters/queueFilter.js +11 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/language +1 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/lib/mustache.js +333 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/lib/validate.js +27 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/lib/workqueue_utils.js +61 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/lists/elementsDetail.js +28 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/lists/filter.js +86 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/lists/stuckElements.js +38 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/lists/workRestrictions.js +153 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/lists/workflowSummary.js +28 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/rewrites.json +73 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/shows/redirect.js +23 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/shows/status.js +40 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/templates/ElementSummaryByWorkflow.html +27 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/templates/StuckElementSummary.html +26 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/templates/TaskStatus.html +23 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/templates/WorkflowSummary.html +27 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/templates/partials/workqueue-common-lib.html +2 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/templates/partials/yui-lib-remote.html +16 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/templates/partials/yui-lib.html +18 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/updates/in-place.js +50 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/validate_doc_update.js +8 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/vendor/couchapp/_attachments/jquery.couch.app.js +235 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/vendor/couchapp/_attachments/jquery.pathbinder.js +173 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/activeData/map.js +8 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/activeData/reduce.js +2 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/activeParentData/map.js +8 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/activeParentData/reduce.js +2 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/activePileupData/map.js +8 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/activePileupData/reduce.js +2 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/analyticsData/map.js +11 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/analyticsData/reduce.js +1 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/availableByPriority/map.js +6 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/conflicts/map.js +5 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/elements/map.js +5 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/elementsByData/map.js +8 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/elementsByParent/map.js +8 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/elementsByParentData/map.js +8 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/elementsByPileupData/map.js +8 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/elementsByStatus/map.js +8 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/elementsBySubscription/map.js +6 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/elementsByWorkflow/map.js +8 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/elementsByWorkflow/reduce.js +3 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/elementsDetailByWorkflowAndStatus/map.js +26 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/jobInjectStatusByRequest/map.js +10 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/jobInjectStatusByRequest/reduce.js +1 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/jobStatusByRequest/map.js +6 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/jobStatusByRequest/reduce.js +1 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/jobsByChildQueueAndPriority/map.js +6 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/jobsByChildQueueAndPriority/reduce.js +1 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/jobsByChildQueueAndStatus/map.js +6 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/jobsByChildQueueAndStatus/reduce.js +1 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/jobsByRequest/map.js +6 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/jobsByRequest/reduce.js +1 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/jobsByStatus/map.js +6 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/jobsByStatus/reduce.js +1 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/jobsByStatusAndPriority/map.js +6 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/jobsByStatusAndPriority/reduce.js +1 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/openRequests/map.js +6 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/recent-items/map.js +5 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/siteWhitelistByRequest/map.js +6 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/siteWhitelistByRequest/reduce.js +1 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/specsByWorkflow/map.js +5 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/stuckElements/map.js +38 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/wmbsInjectStatusByRequest/map.js +12 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/wmbsInjectStatusByRequest/reduce.js +3 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/wmbsUrl/map.js +6 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/wmbsUrl/reduce.js +2 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/wmbsUrlByRequest/map.js +6 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/wmbsUrlByRequest/reduce.js +2 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/workflowSummary/map.js +9 -0
- wmglobalqueue-2.4.5.1.data/data/data/couchapps/WorkQueue/views/workflowSummary/reduce.js +10 -0
- wmglobalqueue-2.4.5.1.dist-info/METADATA +26 -0
- wmglobalqueue-2.4.5.1.dist-info/RECORD +347 -0
- wmglobalqueue-2.4.5.1.dist-info/WHEEL +5 -0
- wmglobalqueue-2.4.5.1.dist-info/licenses/LICENSE +202 -0
- wmglobalqueue-2.4.5.1.dist-info/licenses/NOTICE +16 -0
- wmglobalqueue-2.4.5.1.dist-info/top_level.txt +2 -0
|
@@ -0,0 +1,2288 @@
|
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
"""
|
|
3
|
+
_WMWorkload_
|
|
4
|
+
|
|
5
|
+
Request level processing specification, acts as a container of a set
|
|
6
|
+
of related tasks.
|
|
7
|
+
"""
|
|
8
|
+
from __future__ import print_function
|
|
9
|
+
|
|
10
|
+
from builtins import next, range
|
|
11
|
+
from future.utils import viewitems, viewvalues
|
|
12
|
+
from collections import namedtuple
|
|
13
|
+
import inspect
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
from Utils.Utilities import strToBool, makeList
|
|
17
|
+
from WMCore.Configuration import ConfigSection
|
|
18
|
+
from WMCore.Lexicon import sanitizeURL
|
|
19
|
+
from WMCore.WMException import WMException
|
|
20
|
+
from WMCore.WMSpec.WMSpecErrors import WMSpecFactoryException
|
|
21
|
+
from WMCore.WMSpec.ConfigSectionTree import findTop
|
|
22
|
+
from WMCore.WMSpec.Persistency import PersistencyHelper
|
|
23
|
+
from WMCore.WMSpec.WMTask import WMTask, WMTaskHelper
|
|
24
|
+
from WMCore.WMSpec.WMWorkloadTools import (validateArgumentsUpdate, validateUnknownArgs, validateSiteLists,
|
|
25
|
+
_validateArgumentOptions, loadSpecClassByType,
|
|
26
|
+
setAssignArgumentsWithDefault)
|
|
27
|
+
|
|
28
|
+
parseTaskPath = lambda p: [x for x in p.split('/') if x.strip() != '']
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def getWorkloadFromTask(taskRef):
|
|
32
|
+
"""
|
|
33
|
+
_getWorkloadFromTask_
|
|
34
|
+
|
|
35
|
+
Util to retrieve a Workload wrapped in a WorkloadHelper
|
|
36
|
+
from a WMTask.
|
|
37
|
+
"""
|
|
38
|
+
nodeData = taskRef
|
|
39
|
+
if isinstance(taskRef, WMTaskHelper):
|
|
40
|
+
nodeData = taskRef.data
|
|
41
|
+
|
|
42
|
+
topNode = findTop(nodeData)
|
|
43
|
+
if not hasattr(topNode, "objectType"):
|
|
44
|
+
msg = "Top Node is not a WM definition object:\n"
|
|
45
|
+
msg += "Object has no objectType attribute"
|
|
46
|
+
# TODO: Replace with real exception class
|
|
47
|
+
raise RuntimeError(msg)
|
|
48
|
+
|
|
49
|
+
objType = getattr(topNode, "objectType")
|
|
50
|
+
if objType != "WMWorkload":
|
|
51
|
+
msg = "Top level object is not a WMWorkload: %s" % objType
|
|
52
|
+
# TODO: Replace with real exception class
|
|
53
|
+
raise RuntimeError(msg)
|
|
54
|
+
|
|
55
|
+
return WMWorkloadHelper(topNode)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class WMWorkloadException(WMException):
|
|
59
|
+
"""
|
|
60
|
+
_WMWorkloadException_
|
|
61
|
+
|
|
62
|
+
Exceptions raised by the Workload during filling
|
|
63
|
+
"""
|
|
64
|
+
pass
|
|
65
|
+
|
|
66
|
+
class WMWorkloadUnhandledException(WMException):
|
|
67
|
+
"""
|
|
68
|
+
_WMWorkloadUnhandledException_
|
|
69
|
+
|
|
70
|
+
Exceptions raised by the Workload during filling
|
|
71
|
+
"""
|
|
72
|
+
pass
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
setterTuple = namedtuple('SetterTuple', ['reqArg', 'setterFunc', 'setterSignature'])
|
|
76
|
+
|
|
77
|
+
class WMWorkloadHelper(PersistencyHelper):
|
|
78
|
+
"""
|
|
79
|
+
_WMWorkloadHelper_
|
|
80
|
+
|
|
81
|
+
Methods & Utils for working with a WMWorkload instance.
|
|
82
|
+
"""
|
|
83
|
+
|
|
84
|
+
def __init__(self, wmWorkload=None):
|
|
85
|
+
self.data = wmWorkload
|
|
86
|
+
self.settersMap = {}
|
|
87
|
+
|
|
88
|
+
def updateWorkloadArgs(self, reqArgs):
|
|
89
|
+
"""
|
|
90
|
+
Method to take a dictionary of arguments of the type:
|
|
91
|
+
{reqArg1: value,
|
|
92
|
+
reqArg2: value,
|
|
93
|
+
...}
|
|
94
|
+
and update the workload by a predefined map of reqArg to setter methods.
|
|
95
|
+
:param reqArgs: A Dictionary of request arguments to be updated
|
|
96
|
+
:return: Nothing, Raises an error of type WMWorkloadException if
|
|
97
|
+
fails to apply the proper setter method
|
|
98
|
+
"""
|
|
99
|
+
# NOTE: So far we support only a single argument setter methods, like
|
|
100
|
+
# setSiteWhitelist or setPriority. This may change in the future,
|
|
101
|
+
# but it will require a change in the logic of how we validate and
|
|
102
|
+
# call the proper setter methods bellow.
|
|
103
|
+
|
|
104
|
+
# populate the current instance settersMap
|
|
105
|
+
self.settersMap['RequestPriority'] = setterTuple('RequestPriority', self.setPriority, inspect.signature(self.setPriority))
|
|
106
|
+
self.settersMap['SiteBlacklist'] = setterTuple('SiteBlacklist', self.setSiteBlacklist, inspect.signature(self.setSiteBlacklist))
|
|
107
|
+
self.settersMap['SiteWhitelist'] = setterTuple('SiteWhitelist', self.setSiteWhitelist, inspect.signature(self.setSiteWhitelist))
|
|
108
|
+
|
|
109
|
+
reqArgsNothandled = []
|
|
110
|
+
# First validate if we can properly call the setter function given the reqArgs passed.
|
|
111
|
+
for reqArg, argValue in reqArgs.items():
|
|
112
|
+
if reqArg not in self.settersMap:
|
|
113
|
+
reqArgsNothandled.append(reqArg)
|
|
114
|
+
continue
|
|
115
|
+
try:
|
|
116
|
+
self.settersMap[reqArg].setterSignature.bind(argValue)
|
|
117
|
+
except TypeError as ex:
|
|
118
|
+
msg = f"Setter's method signature does not match the method calls we currently support: Error: req{str(ex)}"
|
|
119
|
+
raise WMWorkloadException(msg) from None
|
|
120
|
+
|
|
121
|
+
if reqArgsNothandled:
|
|
122
|
+
msg = f"Unsupported or missing setter method for updating request arguments: {reqArgsNothandled}."
|
|
123
|
+
raise WMWorkloadUnhandledException(msg) from None
|
|
124
|
+
|
|
125
|
+
# Now go through the reqArg again and call every setter method according to the map
|
|
126
|
+
for reqArg, argValue in reqArgs.items():
|
|
127
|
+
try:
|
|
128
|
+
self.settersMap[reqArg].setterFunc(argValue)
|
|
129
|
+
except Exception as ex:
|
|
130
|
+
currFrame = inspect.currentframe()
|
|
131
|
+
argsInfo = inspect.getargvalues(currFrame)
|
|
132
|
+
argVals = {arg: argsInfo.locals.get(arg) for arg in argsInfo.args}
|
|
133
|
+
msg = f"Failure while calling setter method {self.settersMap[reqArg].setterFunc.__name__} "
|
|
134
|
+
msg += f"With arguments: {argVals}"
|
|
135
|
+
msg += f"Full exception string: {str(ex)}"
|
|
136
|
+
raise WMWorkloadException(msg) from None
|
|
137
|
+
|
|
138
|
+
def setSpecUrl(self, url):
|
|
139
|
+
self.data.persistency.specUrl = sanitizeURL(url)["url"]
|
|
140
|
+
|
|
141
|
+
def specUrl(self):
|
|
142
|
+
"""
|
|
143
|
+
_specUrl_
|
|
144
|
+
|
|
145
|
+
return url location of workload
|
|
146
|
+
"""
|
|
147
|
+
return self.data.persistency.specUrl
|
|
148
|
+
|
|
149
|
+
def name(self):
|
|
150
|
+
"""
|
|
151
|
+
_name_
|
|
152
|
+
|
|
153
|
+
return name of the workload
|
|
154
|
+
"""
|
|
155
|
+
return self.data._internal_name
|
|
156
|
+
|
|
157
|
+
def setName(self, workloadName):
|
|
158
|
+
"""
|
|
159
|
+
_setName_
|
|
160
|
+
|
|
161
|
+
Set the workload name.
|
|
162
|
+
"""
|
|
163
|
+
self.data._internal_name = workloadName
|
|
164
|
+
return
|
|
165
|
+
|
|
166
|
+
def setRequestType(self, requestType):
|
|
167
|
+
self.data.requestType = requestType
|
|
168
|
+
|
|
169
|
+
def setStepProperties(self, assignArgs):
|
|
170
|
+
"""
|
|
171
|
+
_setStepProperties_
|
|
172
|
+
|
|
173
|
+
Used for properly setting AcqEra/ProcStr/ProcVer for each step in a StepChain request
|
|
174
|
+
during assignment. Only used if one of those parameters is a dictionary.
|
|
175
|
+
"""
|
|
176
|
+
if "AcquisitionEra" in assignArgs and isinstance(assignArgs["AcquisitionEra"], dict):
|
|
177
|
+
pass
|
|
178
|
+
elif "ProcessingString" in assignArgs and isinstance(assignArgs["ProcessingString"], dict):
|
|
179
|
+
pass
|
|
180
|
+
elif "ProcessingVersion" in assignArgs and isinstance(assignArgs["ProcessingVersion"], dict):
|
|
181
|
+
pass
|
|
182
|
+
else:
|
|
183
|
+
return
|
|
184
|
+
|
|
185
|
+
stepNameMapping = self.getStepMapping()
|
|
186
|
+
# it has only one top level task
|
|
187
|
+
for task in self.taskIterator():
|
|
188
|
+
# Merge task has cmsRun1 step, so it gets messy on Merge ACDC of StepChain
|
|
189
|
+
if task.taskType() == "Merge":
|
|
190
|
+
continue
|
|
191
|
+
task.updateLFNsAndDatasets(dictValues=assignArgs, stepMapping=stepNameMapping)
|
|
192
|
+
|
|
193
|
+
return
|
|
194
|
+
|
|
195
|
+
def setTaskEnvironmentVariables(self, envDict):
|
|
196
|
+
"""
|
|
197
|
+
_setTaskEnvironmentVariables_
|
|
198
|
+
|
|
199
|
+
Used for setting environment variables for each task in a request.
|
|
200
|
+
"""
|
|
201
|
+
if not isinstance(envDict, dict):
|
|
202
|
+
return
|
|
203
|
+
|
|
204
|
+
for task in self.taskIterator():
|
|
205
|
+
task.addEnvironmentVariables(envDict)
|
|
206
|
+
return
|
|
207
|
+
|
|
208
|
+
def setOverrideCatalog(self, tfcFile):
|
|
209
|
+
"""
|
|
210
|
+
_setOverrideCatalog_
|
|
211
|
+
|
|
212
|
+
Used for setting overrideCatalog option for each step in the workload.
|
|
213
|
+
"""
|
|
214
|
+
for task in self.taskIterator():
|
|
215
|
+
task.setOverrideCatalog(tfcFile)
|
|
216
|
+
return
|
|
217
|
+
|
|
218
|
+
def setStepMapping(self, mapping):
|
|
219
|
+
"""
|
|
220
|
+
_setStepMapping_
|
|
221
|
+
|
|
222
|
+
Mostly used for StepChains. It creates a mapping between the StepName and the step
|
|
223
|
+
number and the cmsRun number. E.g.:
|
|
224
|
+
{'GENSIM': ('Step1', 'cmsRun1'), 'DIGI': ('Step2', 'cmsRun2'), 'RECO': ('Step3', 'cmsRun3')}
|
|
225
|
+
"""
|
|
226
|
+
self.data.properties.stepMapping = mapping
|
|
227
|
+
|
|
228
|
+
def getStepMapping(self):
|
|
229
|
+
"""
|
|
230
|
+
_getStepMapping_
|
|
231
|
+
|
|
232
|
+
Only important for StepChains. Map from step name to step number
|
|
233
|
+
and cmsRun number.
|
|
234
|
+
"""
|
|
235
|
+
return getattr(self.data.properties, "stepMapping", None)
|
|
236
|
+
|
|
237
|
+
def setStepParentageMapping(self, mapping):
|
|
238
|
+
"""
|
|
239
|
+
_setStepParentageMapping_
|
|
240
|
+
|
|
241
|
+
Used for StepChains. Set a wider dictionary structure with a mapping between
|
|
242
|
+
parent and child steps as well as dataset parentage
|
|
243
|
+
"""
|
|
244
|
+
self.data.properties.stepParentageMapping = mapping
|
|
245
|
+
|
|
246
|
+
def getStepParentageMapping(self):
|
|
247
|
+
"""
|
|
248
|
+
_getStepParentageMapping_
|
|
249
|
+
|
|
250
|
+
Only important for StepChains. Map from step name to step and parent
|
|
251
|
+
step properties, including a map of output datasets to the parent dataset.
|
|
252
|
+
"""
|
|
253
|
+
return getattr(self.data.properties, "stepParentageMapping", {})
|
|
254
|
+
|
|
255
|
+
def getStepParentDataset(self, childDataset):
|
|
256
|
+
"""
|
|
257
|
+
:param childDataset: child dataset which is looking for parent dataset
|
|
258
|
+
:return: str parent dataset if exist, otherwise None
|
|
259
|
+
|
|
260
|
+
Correct parentage mapping is set when workflow is assigned, Shouldn't call this method before workflow is assigned
|
|
261
|
+
Assumes there is only one parent dataset given childDataset
|
|
262
|
+
"""
|
|
263
|
+
### FIXME: Seangchan, I don't think we need this method, since we'll add the
|
|
264
|
+
# map to the dbsbuffer_dataset table and then use it from there. So,
|
|
265
|
+
# wmbsHelper should actually fetch the simple map data and insert that into db
|
|
266
|
+
stepParentageMap = self.getStepParentageMapping()
|
|
267
|
+
if stepParentageMap:
|
|
268
|
+
for stepName in stepParentageMap:
|
|
269
|
+
stepItem = stepParentageMap[stepName]
|
|
270
|
+
outDSMap = stepItem["OutputDatasetMap"]
|
|
271
|
+
for outmodule in outDSMap:
|
|
272
|
+
if childDataset in outDSMap[outmodule] and stepItem['ParentDataset']:
|
|
273
|
+
return stepItem['ParentDataset']
|
|
274
|
+
else:
|
|
275
|
+
return None
|
|
276
|
+
|
|
277
|
+
def setTaskParentageMapping(self, mapping):
|
|
278
|
+
"""
|
|
279
|
+
_setTaskParentageMapping_
|
|
280
|
+
|
|
281
|
+
Used for TaskChains. Sets a dictionary with the task / parent task /
|
|
282
|
+
parent dataset / and output datasets relationship.
|
|
283
|
+
"""
|
|
284
|
+
self.data.properties.taskParentageMapping = mapping
|
|
285
|
+
|
|
286
|
+
def getTaskParentageMapping(self):
|
|
287
|
+
"""
|
|
288
|
+
_getTaskParentageMapping_
|
|
289
|
+
|
|
290
|
+
Only important for TaskChains. Returns a map of task name to
|
|
291
|
+
parent dataset and output datasets.
|
|
292
|
+
"""
|
|
293
|
+
return getattr(self.data.properties, "taskParentageMapping", {})
|
|
294
|
+
|
|
295
|
+
def getChainParentageSimpleMapping(self):
|
|
296
|
+
"""
|
|
297
|
+
Creates a simple map of task or step to parent and output datasets
|
|
298
|
+
such that it can be friendly stored in the reqmgr workload cache doc.
|
|
299
|
+
:return: {'Step1': {'ParentDset': 'blah1', 'ChildDsets': ['blah2']},
|
|
300
|
+
'Step2': {'ParentDset': 'blah2', 'ChildDsets': ['blah3', 'blah4],
|
|
301
|
+
...} if stepParentageMapping exist otherwise None
|
|
302
|
+
"""
|
|
303
|
+
if self.getRequestType() == "TaskChain":
|
|
304
|
+
chainMap = self.getTaskParentageMapping()
|
|
305
|
+
elif self.getRequestType() == "StepChain":
|
|
306
|
+
chainMap = self.getStepParentageMapping()
|
|
307
|
+
else:
|
|
308
|
+
return {}
|
|
309
|
+
|
|
310
|
+
newMap = {}
|
|
311
|
+
if chainMap:
|
|
312
|
+
for cData in viewvalues(chainMap):
|
|
313
|
+
cNum = cData.get('TaskNumber', cData.get('StepNumber'))
|
|
314
|
+
newMap[cNum] = {'ParentDset': cData['ParentDataset'],
|
|
315
|
+
'ChildDsets': []}
|
|
316
|
+
for outMod in cData['OutputDatasetMap']:
|
|
317
|
+
newMap[cNum]['ChildDsets'].append(cData['OutputDatasetMap'][outMod])
|
|
318
|
+
return newMap
|
|
319
|
+
|
|
320
|
+
def updateStepParentageMap(self):
|
|
321
|
+
"""
|
|
322
|
+
_updateStepParentageMap
|
|
323
|
+
Used to update the step parentage mapping of StepChain requests at the
|
|
324
|
+
end of the assignment process, given that we might have new output
|
|
325
|
+
dataset names
|
|
326
|
+
:return: just updates the workload property: stepParentageMapping
|
|
327
|
+
"""
|
|
328
|
+
topLevelTask = next(self.taskIterator())
|
|
329
|
+
if topLevelTask.taskType() == "Merge":
|
|
330
|
+
# handle ACDC for merge jobs, see #9051. Nothing to do here
|
|
331
|
+
return
|
|
332
|
+
|
|
333
|
+
parentMap = self.getStepParentageMapping()
|
|
334
|
+
listOfStepNames = list(parentMap)
|
|
335
|
+
for stepName in listOfStepNames:
|
|
336
|
+
if parentMap[stepName]['OutputDatasetMap']:
|
|
337
|
+
# then there is output dataset, let's update it
|
|
338
|
+
cmsRunNumber = parentMap[stepName]['StepCmsRun']
|
|
339
|
+
stepHelper = topLevelTask.getStepHelper(cmsRunNumber)
|
|
340
|
+
for outputModuleName in stepHelper.listOutputModules():
|
|
341
|
+
outputModule = stepHelper.getOutputModule(outputModuleName)
|
|
342
|
+
outputDataset = "/%s/%s/%s" % (outputModule.primaryDataset,
|
|
343
|
+
outputModule.processedDataset,
|
|
344
|
+
outputModule.dataTier)
|
|
345
|
+
|
|
346
|
+
# now find and replace the old dataset by the new dataset name
|
|
347
|
+
oldOutputDset = parentMap[stepName]['OutputDatasetMap'][outputModuleName]
|
|
348
|
+
for s in listOfStepNames:
|
|
349
|
+
if parentMap[s]['ParentDataset'] == oldOutputDset:
|
|
350
|
+
parentMap[s]['ParentDataset'] = outputDataset
|
|
351
|
+
if oldOutputDset == parentMap[s]['OutputDatasetMap'].get(outputModuleName, ""):
|
|
352
|
+
parentMap[s]['OutputDatasetMap'][outputModuleName] = outputDataset
|
|
353
|
+
|
|
354
|
+
self.setStepParentageMapping(parentMap)
|
|
355
|
+
|
|
356
|
+
return
|
|
357
|
+
|
|
358
|
+
def updateTaskParentageMap(self):
|
|
359
|
+
"""
|
|
360
|
+
_updateTaskParentageMap_
|
|
361
|
+
Used to update the task dataset parentage mapping of TaskChain requests
|
|
362
|
+
at the end of the assignment process, given that we might have new output
|
|
363
|
+
dataset names
|
|
364
|
+
:return: just updates the workload property: taskParentageMapping
|
|
365
|
+
"""
|
|
366
|
+
taskMap = self.getTaskParentageMapping()
|
|
367
|
+
|
|
368
|
+
for tName in taskMap:
|
|
369
|
+
if not taskMap[tName]['OutputDatasetMap']:
|
|
370
|
+
continue
|
|
371
|
+
|
|
372
|
+
taskO = self.getTaskByName(tName)
|
|
373
|
+
if taskO is None:
|
|
374
|
+
# Resubmission requests might not have certain tasks
|
|
375
|
+
continue
|
|
376
|
+
|
|
377
|
+
for outInfo in taskO.listOutputDatasetsAndModules():
|
|
378
|
+
# Check whether it's a transient output module
|
|
379
|
+
if outInfo['outputModule'] not in taskMap[tName]['OutputDatasetMap']:
|
|
380
|
+
continue
|
|
381
|
+
oldOutputDset = taskMap[tName]['OutputDatasetMap'][outInfo['outputModule']]
|
|
382
|
+
taskMap[tName]['OutputDatasetMap'][outInfo['outputModule']] = outInfo['outputDataset']
|
|
383
|
+
for tt in taskMap:
|
|
384
|
+
if taskMap[tt]['ParentDataset'] == oldOutputDset:
|
|
385
|
+
taskMap[tt]['ParentDataset'] = outInfo['outputDataset']
|
|
386
|
+
|
|
387
|
+
self.setTaskParentageMapping(taskMap)
|
|
388
|
+
|
|
389
|
+
return
|
|
390
|
+
|
|
391
|
+
def getInitialJobCount(self):
|
|
392
|
+
"""
|
|
393
|
+
_getInitialJobCount_
|
|
394
|
+
|
|
395
|
+
Get the initial job count, this is incremented everytime the workflow
|
|
396
|
+
is resubmitted with ACDC.
|
|
397
|
+
"""
|
|
398
|
+
return self.data.initialJobCount
|
|
399
|
+
|
|
400
|
+
def setInitialJobCount(self, jobCount):
|
|
401
|
+
"""
|
|
402
|
+
_setInitialJobCount_
|
|
403
|
+
|
|
404
|
+
Set the initial job count.
|
|
405
|
+
"""
|
|
406
|
+
self.data.initialJobCount = jobCount
|
|
407
|
+
return
|
|
408
|
+
|
|
409
|
+
def getDashboardActivity(self):
|
|
410
|
+
"""
|
|
411
|
+
_getDashboardActivity_
|
|
412
|
+
|
|
413
|
+
Retrieve the dashboard activity.
|
|
414
|
+
"""
|
|
415
|
+
return self.data.properties.dashboardActivity
|
|
416
|
+
|
|
417
|
+
def setDashboardActivity(self, dashboardActivity):
|
|
418
|
+
"""
|
|
419
|
+
_setDashboardActivity_
|
|
420
|
+
|
|
421
|
+
Set the dashboard activity for the workflow.
|
|
422
|
+
"""
|
|
423
|
+
self.data.properties.dashboardActivity = dashboardActivity
|
|
424
|
+
return
|
|
425
|
+
|
|
426
|
+
def getTopLevelTask(self):
|
|
427
|
+
"""
|
|
428
|
+
_getTopLevelTask_
|
|
429
|
+
|
|
430
|
+
Retrieve the top level task.
|
|
431
|
+
"""
|
|
432
|
+
topLevelTasks = []
|
|
433
|
+
for task in self.taskIterator():
|
|
434
|
+
if task.isTopOfTree():
|
|
435
|
+
topLevelTasks.append(task)
|
|
436
|
+
|
|
437
|
+
return topLevelTasks
|
|
438
|
+
|
|
439
|
+
def getOwner(self):
|
|
440
|
+
"""
|
|
441
|
+
_getOwner_
|
|
442
|
+
|
|
443
|
+
Retrieve the owner information.
|
|
444
|
+
"""
|
|
445
|
+
return self.data.owner.dictionary_()
|
|
446
|
+
|
|
447
|
+
def setOwner(self, name, ownerProperties=None):
|
|
448
|
+
"""
|
|
449
|
+
_setOwner_
|
|
450
|
+
sets the owner of wmspec.
|
|
451
|
+
Takes a name as a mandatory argument, and then a dictionary of properties
|
|
452
|
+
"""
|
|
453
|
+
ownerProperties = ownerProperties or {'dn': 'DEFAULT'}
|
|
454
|
+
|
|
455
|
+
self.data.owner.name = name
|
|
456
|
+
self.data.owner.group = "undefined"
|
|
457
|
+
|
|
458
|
+
if not isinstance(ownerProperties, dict):
|
|
459
|
+
raise Exception("Someone is trying to setOwner without a dictionary")
|
|
460
|
+
|
|
461
|
+
for key in ownerProperties:
|
|
462
|
+
setattr(self.data.owner, key, ownerProperties[key])
|
|
463
|
+
|
|
464
|
+
return
|
|
465
|
+
|
|
466
|
+
def setOwnerDetails(self, name, group, ownerProperties=None):
|
|
467
|
+
"""
|
|
468
|
+
_setOwnerDetails_
|
|
469
|
+
|
|
470
|
+
Set the owner, explicitly requiring the group and user arguments
|
|
471
|
+
"""
|
|
472
|
+
ownerProperties = ownerProperties or {'dn': 'DEFAULT'}
|
|
473
|
+
|
|
474
|
+
self.data.owner.name = name
|
|
475
|
+
self.data.owner.group = group
|
|
476
|
+
|
|
477
|
+
if not isinstance(ownerProperties, dict):
|
|
478
|
+
raise Exception("Someone is trying to setOwnerDetails without a dictionary")
|
|
479
|
+
for key in ownerProperties:
|
|
480
|
+
setattr(self.data.owner, key, ownerProperties[key])
|
|
481
|
+
return
|
|
482
|
+
|
|
483
|
+
def sandbox(self):
|
|
484
|
+
"""
|
|
485
|
+
_sandbox_
|
|
486
|
+
"""
|
|
487
|
+
return self.data.sandbox
|
|
488
|
+
|
|
489
|
+
def setSandbox(self, sandboxPath):
|
|
490
|
+
"""
|
|
491
|
+
_sandbox_
|
|
492
|
+
"""
|
|
493
|
+
self.data.sandbox = sandboxPath
|
|
494
|
+
|
|
495
|
+
def setPriority(self, priority):
|
|
496
|
+
"""
|
|
497
|
+
_setPriority_
|
|
498
|
+
|
|
499
|
+
Set the priority for the workload
|
|
500
|
+
"""
|
|
501
|
+
self.data.request.priority = int(priority)
|
|
502
|
+
|
|
503
|
+
def priority(self):
|
|
504
|
+
"""
|
|
505
|
+
_priority_
|
|
506
|
+
return priority of workload
|
|
507
|
+
"""
|
|
508
|
+
return self.data.request.priority
|
|
509
|
+
|
|
510
|
+
def setStartPolicy(self, policyName, **params):
|
|
511
|
+
"""
|
|
512
|
+
_setStartPolicy_
|
|
513
|
+
|
|
514
|
+
Set the Start policy and its parameters
|
|
515
|
+
"""
|
|
516
|
+
self.data.policies.start.policyName = policyName
|
|
517
|
+
for key, val in viewitems(params):
|
|
518
|
+
setattr(self.data.policies.start, key, val)
|
|
519
|
+
|
|
520
|
+
def startPolicy(self):
|
|
521
|
+
"""
|
|
522
|
+
_startPolicy_
|
|
523
|
+
|
|
524
|
+
Get Start Policy name
|
|
525
|
+
"""
|
|
526
|
+
return getattr(self.data.policies.start, "policyName", None)
|
|
527
|
+
|
|
528
|
+
def startPolicyParameters(self):
|
|
529
|
+
"""
|
|
530
|
+
_startPolicyParameters_
|
|
531
|
+
|
|
532
|
+
Get Start Policy parameters
|
|
533
|
+
"""
|
|
534
|
+
datadict = getattr(self.data.policies, "start")
|
|
535
|
+
return datadict.dictionary_()
|
|
536
|
+
|
|
537
|
+
def setEndPolicy(self, policyName, **params):
|
|
538
|
+
"""
|
|
539
|
+
_setEndPolicy_
|
|
540
|
+
|
|
541
|
+
Set the End policy and its parameters
|
|
542
|
+
"""
|
|
543
|
+
self.data.policies.end.policyName = policyName
|
|
544
|
+
for key, val in viewitems(params):
|
|
545
|
+
setattr(self.data.policies.end, key, val)
|
|
546
|
+
|
|
547
|
+
def endPolicy(self):
|
|
548
|
+
"""
|
|
549
|
+
_endPolicy_
|
|
550
|
+
|
|
551
|
+
Get End Policy name
|
|
552
|
+
"""
|
|
553
|
+
return getattr(self.data.policies.end, "policyName", None)
|
|
554
|
+
|
|
555
|
+
def endPolicyParameters(self):
|
|
556
|
+
"""
|
|
557
|
+
_startPolicyParameters_
|
|
558
|
+
|
|
559
|
+
Get Start Policy parameters
|
|
560
|
+
"""
|
|
561
|
+
datadict = getattr(self.data.policies, "end")
|
|
562
|
+
return datadict.dictionary_()
|
|
563
|
+
|
|
564
|
+
def getTask(self, taskName):
|
|
565
|
+
"""
|
|
566
|
+
_getTask_
|
|
567
|
+
|
|
568
|
+
Retrieve a - top level task - with the given name.
|
|
569
|
+
"""
|
|
570
|
+
task = getattr(self.data.tasks, taskName, None)
|
|
571
|
+
if task is None:
|
|
572
|
+
return None
|
|
573
|
+
return WMTaskHelper(task)
|
|
574
|
+
|
|
575
|
+
def getTaskByName(self, taskName):
|
|
576
|
+
"""
|
|
577
|
+
_getTaskByName_
|
|
578
|
+
|
|
579
|
+
Retrieve a task with the given name in the whole workflow tree.
|
|
580
|
+
"""
|
|
581
|
+
for t in self.taskIterator():
|
|
582
|
+
if t.name() == taskName:
|
|
583
|
+
return t
|
|
584
|
+
for x in t.taskIterator():
|
|
585
|
+
if x.name() == taskName:
|
|
586
|
+
return x
|
|
587
|
+
return None
|
|
588
|
+
|
|
589
|
+
def getTaskByPath(self, taskPath):
|
|
590
|
+
"""
|
|
591
|
+
_getTask_
|
|
592
|
+
|
|
593
|
+
Get a task instance based on the path name
|
|
594
|
+
|
|
595
|
+
"""
|
|
596
|
+
mapping = {}
|
|
597
|
+
for t in self.taskIterator():
|
|
598
|
+
for x in t.taskIterator():
|
|
599
|
+
mapping.__setitem__(x.getPathName, x.name())
|
|
600
|
+
|
|
601
|
+
taskList = parseTaskPath(taskPath)
|
|
602
|
+
|
|
603
|
+
if taskList[0] != self.name(): # should always be workload name first
|
|
604
|
+
msg = "Workload name does not match:\n"
|
|
605
|
+
msg += "requested name %s from workload %s " % (taskList[0],
|
|
606
|
+
self.name())
|
|
607
|
+
raise RuntimeError(msg)
|
|
608
|
+
if len(taskList) < 2:
|
|
609
|
+
# path should include workload and one task
|
|
610
|
+
msg = "Task Path does not contain a top level task:\n"
|
|
611
|
+
msg += taskPath
|
|
612
|
+
raise RuntimeError(msg)
|
|
613
|
+
|
|
614
|
+
topTask = self.getTask(taskList[1])
|
|
615
|
+
if topTask is None:
|
|
616
|
+
msg = "Task /%s/%s Not Found in Workload" % (taskList[0],
|
|
617
|
+
taskList[1])
|
|
618
|
+
raise RuntimeError(msg)
|
|
619
|
+
for x in topTask.taskIterator():
|
|
620
|
+
if x.getPathName() == taskPath:
|
|
621
|
+
return x
|
|
622
|
+
return None
|
|
623
|
+
|
|
624
|
+
def taskIterator(self):
|
|
625
|
+
"""
|
|
626
|
+
generator to traverse top level tasks
|
|
627
|
+
|
|
628
|
+
"""
|
|
629
|
+
for i in self.data.tasks.tasklist:
|
|
630
|
+
yield self.getTask(i)
|
|
631
|
+
|
|
632
|
+
def listAllTaskNodes(self):
|
|
633
|
+
"""
|
|
634
|
+
"""
|
|
635
|
+
result = []
|
|
636
|
+
for t in self.taskIterator():
|
|
637
|
+
if t != None:
|
|
638
|
+
result.extend(t.listNodes())
|
|
639
|
+
return result
|
|
640
|
+
|
|
641
|
+
def listAllTaskPathNames(self):
|
|
642
|
+
"""
|
|
643
|
+
_listAllTaskPathNames_
|
|
644
|
+
|
|
645
|
+
Generate a list of all known task path names including
|
|
646
|
+
tasks that are part of the top level tasks
|
|
647
|
+
"""
|
|
648
|
+
result = []
|
|
649
|
+
for t in self.taskIterator():
|
|
650
|
+
result.extend(t.listPathNames())
|
|
651
|
+
return result
|
|
652
|
+
|
|
653
|
+
def listAllTaskNames(self):
|
|
654
|
+
"""
|
|
655
|
+
_listAllTaskNames_
|
|
656
|
+
|
|
657
|
+
Generate a list of all known task names including
|
|
658
|
+
tasks that are part of the top level tasks
|
|
659
|
+
"""
|
|
660
|
+
result = []
|
|
661
|
+
for t in self.taskIterator():
|
|
662
|
+
result.extend(t.listNames())
|
|
663
|
+
return result
|
|
664
|
+
|
|
665
|
+
def listTasksOfType(self, ttype):
|
|
666
|
+
"""
|
|
667
|
+
_listTasksOfType_
|
|
668
|
+
|
|
669
|
+
Get tasks matching the type provided
|
|
670
|
+
"""
|
|
671
|
+
return [t for t in self.taskIterator() if t.taskType() == ttype]
|
|
672
|
+
|
|
673
|
+
def getAllTasks(self, cpuOnly=False):
|
|
674
|
+
"""
|
|
675
|
+
_getAllTasks_
|
|
676
|
+
|
|
677
|
+
Get all tasks from a workload.
|
|
678
|
+
If cpuOnly flag is set to True, then don't return utilitarian tasks.
|
|
679
|
+
"""
|
|
680
|
+
tasks = []
|
|
681
|
+
for n in self.listAllTaskPathNames():
|
|
682
|
+
task = self.getTaskByPath(taskPath=n)
|
|
683
|
+
if cpuOnly and task.taskType() in ["Cleanup", "LogCollect"]:
|
|
684
|
+
continue
|
|
685
|
+
tasks.append(task)
|
|
686
|
+
|
|
687
|
+
return tasks
|
|
688
|
+
|
|
689
|
+
def addTask(self, wmTask):
|
|
690
|
+
"""
|
|
691
|
+
_addTask_
|
|
692
|
+
|
|
693
|
+
Add a Task instance either naked or wrapped in a helper
|
|
694
|
+
|
|
695
|
+
"""
|
|
696
|
+
task = wmTask
|
|
697
|
+
if isinstance(wmTask, WMTaskHelper):
|
|
698
|
+
task = wmTask.data
|
|
699
|
+
helper = wmTask
|
|
700
|
+
else:
|
|
701
|
+
helper = WMTaskHelper(wmTask)
|
|
702
|
+
taskName = helper.name()
|
|
703
|
+
pathName = "/%s/%s" % (self.name(), taskName)
|
|
704
|
+
helper.setPathName(pathName)
|
|
705
|
+
if taskName in self.listAllTaskNodes():
|
|
706
|
+
msg = "Duplicate task name: %s\n" % taskName
|
|
707
|
+
msg += "Known tasks: %s\n" % self.listAllTaskNodes()
|
|
708
|
+
raise RuntimeError(msg)
|
|
709
|
+
self.data.tasks.tasklist.append(taskName)
|
|
710
|
+
setattr(self.data.tasks, taskName, task)
|
|
711
|
+
return
|
|
712
|
+
|
|
713
|
+
def newTask(self, taskName):
|
|
714
|
+
"""
|
|
715
|
+
_newTask_
|
|
716
|
+
|
|
717
|
+
Factory like interface for adding a toplevel task to this
|
|
718
|
+
workload
|
|
719
|
+
|
|
720
|
+
"""
|
|
721
|
+
if taskName in self.listAllTaskNodes():
|
|
722
|
+
msg = "Duplicate task name: %s\n" % taskName
|
|
723
|
+
msg += "Known tasks: %s\n" % self.listAllTaskNodes()
|
|
724
|
+
raise RuntimeError(msg)
|
|
725
|
+
task = WMTask(taskName)
|
|
726
|
+
helper = WMTaskHelper(task)
|
|
727
|
+
helper.setTopOfTree()
|
|
728
|
+
self.addTask(helper)
|
|
729
|
+
return helper
|
|
730
|
+
|
|
731
|
+
def removeTask(self, taskName):
|
|
732
|
+
"""
|
|
733
|
+
_removeTask_
|
|
734
|
+
|
|
735
|
+
Remove given task with given name
|
|
736
|
+
|
|
737
|
+
"""
|
|
738
|
+
self.data.tasks.__delattr__(taskName)
|
|
739
|
+
self.data.tasks.tasklist.remove(taskName)
|
|
740
|
+
return
|
|
741
|
+
|
|
742
|
+
def getSiteWhitelist(self):
|
|
743
|
+
"""
|
|
744
|
+
Get the site white list for the workflow
|
|
745
|
+
:return: site white list
|
|
746
|
+
"""
|
|
747
|
+
# loop over tasks to see if there is white lists
|
|
748
|
+
taskIterator = self.taskIterator()
|
|
749
|
+
siteList = []
|
|
750
|
+
for task in taskIterator:
|
|
751
|
+
for site in task.siteWhitelist():
|
|
752
|
+
siteList.append(site)
|
|
753
|
+
return list(set(siteList))
|
|
754
|
+
|
|
755
|
+
def setSiteWhitelist(self, siteWhitelist):
|
|
756
|
+
"""
|
|
757
|
+
_setSiteWhitelist_
|
|
758
|
+
|
|
759
|
+
Set the site white list for the top level tasks in the workload.
|
|
760
|
+
"""
|
|
761
|
+
if not isinstance(siteWhitelist, list):
|
|
762
|
+
siteWhitelist = [siteWhitelist]
|
|
763
|
+
|
|
764
|
+
taskIterator = self.taskIterator()
|
|
765
|
+
|
|
766
|
+
for task in taskIterator:
|
|
767
|
+
task.setSiteWhitelist(siteWhitelist)
|
|
768
|
+
|
|
769
|
+
return
|
|
770
|
+
|
|
771
|
+
def getSiteBlacklist(self):
|
|
772
|
+
"""
|
|
773
|
+
Get the site black list for the workflow
|
|
774
|
+
:return: site black list
|
|
775
|
+
"""
|
|
776
|
+
# loop over tasks to see if there is black lists
|
|
777
|
+
taskIterator = self.getAllTasks(cpuOnly=False)
|
|
778
|
+
siteList = []
|
|
779
|
+
for task in taskIterator:
|
|
780
|
+
for site in task.siteBlacklist():
|
|
781
|
+
siteList.append(site)
|
|
782
|
+
return list(set(siteList))
|
|
783
|
+
|
|
784
|
+
def setSiteBlacklist(self, siteBlacklist):
|
|
785
|
+
"""
|
|
786
|
+
_setSiteBlacklist_
|
|
787
|
+
|
|
788
|
+
Set the site black list for all tasks in the workload.
|
|
789
|
+
"""
|
|
790
|
+
if not isinstance(siteBlacklist, type([])):
|
|
791
|
+
siteBlacklist = [siteBlacklist]
|
|
792
|
+
|
|
793
|
+
taskIterator = self.getAllTasks(cpuOnly=False)
|
|
794
|
+
|
|
795
|
+
for task in taskIterator:
|
|
796
|
+
task.setSiteBlacklist(siteBlacklist)
|
|
797
|
+
|
|
798
|
+
return
|
|
799
|
+
|
|
800
|
+
def setBlockWhitelist(self, blockWhitelist, initialTask=None):
|
|
801
|
+
"""
|
|
802
|
+
_setBlockWhitelist_
|
|
803
|
+
|
|
804
|
+
Set the block white list for all tasks that have an input dataset
|
|
805
|
+
defined.
|
|
806
|
+
"""
|
|
807
|
+
if not isinstance(blockWhitelist, type([])):
|
|
808
|
+
blockWhitelist = [blockWhitelist]
|
|
809
|
+
|
|
810
|
+
if initialTask:
|
|
811
|
+
taskIterator = initialTask.childTaskIterator()
|
|
812
|
+
else:
|
|
813
|
+
taskIterator = self.taskIterator()
|
|
814
|
+
|
|
815
|
+
for task in taskIterator:
|
|
816
|
+
if task.getInputDatasetPath():
|
|
817
|
+
task.setInputBlockWhitelist(blockWhitelist)
|
|
818
|
+
self.setBlockWhitelist(blockWhitelist, task)
|
|
819
|
+
|
|
820
|
+
return
|
|
821
|
+
|
|
822
|
+
def setBlockBlacklist(self, blockBlacklist, initialTask=None):
|
|
823
|
+
"""
|
|
824
|
+
_setBlockBlacklist_
|
|
825
|
+
|
|
826
|
+
Set the block black list for all tasks that have an input dataset
|
|
827
|
+
defined.
|
|
828
|
+
"""
|
|
829
|
+
if not isinstance(blockBlacklist, type([])):
|
|
830
|
+
blockBlacklist = [blockBlacklist]
|
|
831
|
+
|
|
832
|
+
if initialTask:
|
|
833
|
+
taskIterator = initialTask.childTaskIterator()
|
|
834
|
+
else:
|
|
835
|
+
taskIterator = self.taskIterator()
|
|
836
|
+
|
|
837
|
+
for task in taskIterator:
|
|
838
|
+
if task.getInputDatasetPath():
|
|
839
|
+
task.setInputBlockBlacklist(blockBlacklist)
|
|
840
|
+
self.setBlockBlacklist(blockBlacklist, task)
|
|
841
|
+
|
|
842
|
+
return
|
|
843
|
+
|
|
844
|
+
def setRunWhitelist(self, runWhitelist, initialTask=None):
|
|
845
|
+
"""
|
|
846
|
+
_setRunWhitelist_
|
|
847
|
+
|
|
848
|
+
Set the run white list for all tasks that have an input dataset defined.
|
|
849
|
+
"""
|
|
850
|
+
if not isinstance(runWhitelist, type([])):
|
|
851
|
+
runWhitelist = [runWhitelist]
|
|
852
|
+
|
|
853
|
+
if initialTask:
|
|
854
|
+
taskIterator = initialTask.childTaskIterator()
|
|
855
|
+
else:
|
|
856
|
+
taskIterator = self.taskIterator()
|
|
857
|
+
|
|
858
|
+
for task in taskIterator:
|
|
859
|
+
if task.getInputDatasetPath():
|
|
860
|
+
task.setInputRunWhitelist(runWhitelist)
|
|
861
|
+
task.setSplittingParameters(runWhitelist=runWhitelist)
|
|
862
|
+
self.setRunWhitelist(runWhitelist, task)
|
|
863
|
+
|
|
864
|
+
return
|
|
865
|
+
|
|
866
|
+
def setRunBlacklist(self, runBlacklist, initialTask=None):
|
|
867
|
+
"""
|
|
868
|
+
_setRunBlacklist_
|
|
869
|
+
|
|
870
|
+
Set the run black list for all tasks that have an input dataset defined.
|
|
871
|
+
"""
|
|
872
|
+
if not isinstance(runBlacklist, type([])):
|
|
873
|
+
runBlacklist = [runBlacklist]
|
|
874
|
+
|
|
875
|
+
if initialTask:
|
|
876
|
+
taskIterator = initialTask.childTaskIterator()
|
|
877
|
+
else:
|
|
878
|
+
taskIterator = self.taskIterator()
|
|
879
|
+
|
|
880
|
+
for task in taskIterator:
|
|
881
|
+
if task.getInputDatasetPath():
|
|
882
|
+
task.setInputRunBlacklist(runBlacklist)
|
|
883
|
+
self.setRunBlacklist(runBlacklist, task)
|
|
884
|
+
|
|
885
|
+
return
|
|
886
|
+
|
|
887
|
+
def updateLFNsAndDatasets(self, runNumber=None):
|
|
888
|
+
"""
|
|
889
|
+
_updateLFNsAndDatasets_
|
|
890
|
+
|
|
891
|
+
Update all the output LFNs and data names for all tasks in the workflow.
|
|
892
|
+
This needs to be called after updating the acquisition era, processing
|
|
893
|
+
version or merged/unmerged lfn base.
|
|
894
|
+
"""
|
|
895
|
+
taskIterator = self.taskIterator()
|
|
896
|
+
|
|
897
|
+
for task in taskIterator:
|
|
898
|
+
task.updateLFNsAndDatasets(runNumber=runNumber)
|
|
899
|
+
return
|
|
900
|
+
|
|
901
|
+
def updateDatasetName(self, mergeTask, datasetName):
|
|
902
|
+
"""
|
|
903
|
+
_updateDatasetName_
|
|
904
|
+
|
|
905
|
+
Updates the dataset name argument of the mergeTask's harvesting
|
|
906
|
+
children tasks
|
|
907
|
+
"""
|
|
908
|
+
for task in mergeTask.childTaskIterator():
|
|
909
|
+
if task.taskType() == "Harvesting":
|
|
910
|
+
for stepName in task.listAllStepNames():
|
|
911
|
+
stepHelper = task.getStepHelper(stepName)
|
|
912
|
+
|
|
913
|
+
if stepHelper.stepType() == "CMSSW":
|
|
914
|
+
cmsswHelper = stepHelper.getTypeHelper()
|
|
915
|
+
cmsswHelper.setDatasetName(datasetName)
|
|
916
|
+
|
|
917
|
+
return
|
|
918
|
+
|
|
919
|
+
def setCoresAndStreams(self, cores, nStreams, initialTask=None):
|
|
920
|
+
"""
|
|
921
|
+
_setCoresAndStreams_
|
|
922
|
+
|
|
923
|
+
Update the number of cores and event streams for each task in the spec.
|
|
924
|
+
|
|
925
|
+
One can update only the number of cores, which will set the number of streams to 0 (default).
|
|
926
|
+
However, updating only the number of streams is not allowed, it's coupled to # of cores.
|
|
927
|
+
|
|
928
|
+
:param cores: number of cores. Can be either an integer or a dict key'ed by taskname
|
|
929
|
+
:param nStreams: number of streams. Can be either an integer or a dict key'ed by taskname
|
|
930
|
+
:param initialTask: parent task object
|
|
931
|
+
"""
|
|
932
|
+
if not cores:
|
|
933
|
+
return
|
|
934
|
+
|
|
935
|
+
if initialTask:
|
|
936
|
+
taskIterator = initialTask.childTaskIterator()
|
|
937
|
+
else:
|
|
938
|
+
taskIterator = self.taskIterator()
|
|
939
|
+
|
|
940
|
+
for task in taskIterator:
|
|
941
|
+
task.setNumberOfCores(cores, nStreams)
|
|
942
|
+
self.setCoresAndStreams(cores, nStreams, task)
|
|
943
|
+
|
|
944
|
+
return
|
|
945
|
+
|
|
946
|
+
def setGPUSettings(self, requiresGPU, gpuParams, initialTask=None):
|
|
947
|
+
"""
|
|
948
|
+
Setter method for the workload GPU parameters.
|
|
949
|
+
It's responsible for setting whether GPUs are required or not; and
|
|
950
|
+
which GPU parameters to be used for that. This is done for every
|
|
951
|
+
task of this spec.
|
|
952
|
+
:param requiresGPU: string defining whether GPUs are needed. For TaskChains, it
|
|
953
|
+
could be a dictionary key'ed by the taskname.
|
|
954
|
+
:param gpuParams: GPU settings. A JSON encoded object, from either a None object
|
|
955
|
+
or a dictionary. For TaskChains, it could be a dictionary key'ed by the taskname
|
|
956
|
+
:param initialTask: parent task object
|
|
957
|
+
"""
|
|
958
|
+
if not requiresGPU:
|
|
959
|
+
return
|
|
960
|
+
|
|
961
|
+
if initialTask:
|
|
962
|
+
taskIterator = initialTask.childTaskIterator()
|
|
963
|
+
else:
|
|
964
|
+
taskIterator = self.taskIterator()
|
|
965
|
+
|
|
966
|
+
for task in taskIterator:
|
|
967
|
+
task.setTaskGPUSettings(requiresGPU, gpuParams)
|
|
968
|
+
self.setGPUSettings(requiresGPU, gpuParams, task)
|
|
969
|
+
return
|
|
970
|
+
|
|
971
|
+
def setMemory(self, memory, initialTask=None):
|
|
972
|
+
"""
|
|
973
|
+
_setMemory_
|
|
974
|
+
|
|
975
|
+
Update memory requirements for each task in the spec, thus it
|
|
976
|
+
can be either an integer or a dictionary key'ed by the task name.
|
|
977
|
+
"""
|
|
978
|
+
if not memory:
|
|
979
|
+
return
|
|
980
|
+
|
|
981
|
+
if initialTask:
|
|
982
|
+
taskIterator = initialTask.childTaskIterator()
|
|
983
|
+
else:
|
|
984
|
+
taskIterator = self.taskIterator()
|
|
985
|
+
|
|
986
|
+
for task in taskIterator:
|
|
987
|
+
if isinstance(memory, dict):
|
|
988
|
+
mem = memory.get(task.name())
|
|
989
|
+
else:
|
|
990
|
+
mem = memory
|
|
991
|
+
task.setJobResourceInformation(memoryReq=mem)
|
|
992
|
+
self.setMemory(memory, task)
|
|
993
|
+
|
|
994
|
+
return
|
|
995
|
+
|
|
996
|
+
def setTimePerEvent(self, timePerEvent, initialTask=None):
|
|
997
|
+
"""
|
|
998
|
+
_setTimePerEvent_
|
|
999
|
+
|
|
1000
|
+
Update TimePerEvent requirements for each task in the spec, thus it
|
|
1001
|
+
can be either an integer or a dictionary key'ed by the task name.
|
|
1002
|
+
"""
|
|
1003
|
+
# don't set it for utilitarian/merge tasks
|
|
1004
|
+
if not timePerEvent:
|
|
1005
|
+
return
|
|
1006
|
+
|
|
1007
|
+
if initialTask:
|
|
1008
|
+
taskIterator = initialTask.childTaskIterator()
|
|
1009
|
+
else:
|
|
1010
|
+
taskIterator = self.taskIterator()
|
|
1011
|
+
|
|
1012
|
+
for task in taskIterator:
|
|
1013
|
+
if isinstance(timePerEvent, dict):
|
|
1014
|
+
timePE = timePerEvent.get(task.name())
|
|
1015
|
+
else:
|
|
1016
|
+
timePE = timePerEvent
|
|
1017
|
+
task.setJobResourceInformation(timePerEvent=timePE)
|
|
1018
|
+
self.setTimePerEvent(timePerEvent, task)
|
|
1019
|
+
|
|
1020
|
+
return
|
|
1021
|
+
|
|
1022
|
+
def setAcquisitionEra(self, acquisitionEras):
|
|
1023
|
+
"""
|
|
1024
|
+
_setAcquistionEra_
|
|
1025
|
+
|
|
1026
|
+
Change the acquisition era for all tasks in the spec and then update
|
|
1027
|
+
all of the output LFNs and datasets to use the new acquisition era.
|
|
1028
|
+
"""
|
|
1029
|
+
stepNameMapping = self.getStepMapping()
|
|
1030
|
+
for task in self.taskIterator():
|
|
1031
|
+
task.setAcquisitionEra(acquisitionEras, stepChainMap=stepNameMapping)
|
|
1032
|
+
|
|
1033
|
+
self.updateLFNsAndDatasets()
|
|
1034
|
+
# set acquistionEra for workload (need to refactor)
|
|
1035
|
+
self.data.properties.acquisitionEra = acquisitionEras
|
|
1036
|
+
return
|
|
1037
|
+
|
|
1038
|
+
def setProcessingVersion(self, processingVersions):
|
|
1039
|
+
"""
|
|
1040
|
+
_setProcessingVersion_
|
|
1041
|
+
|
|
1042
|
+
Change the processing version for all tasks in the spec and then update
|
|
1043
|
+
all of the output LFNs and datasets to use the new processing version.
|
|
1044
|
+
|
|
1045
|
+
:param processingVersions: can be any data-type but it is set from StdBase
|
|
1046
|
+
which performs the input data sanitization/type already.
|
|
1047
|
+
"""
|
|
1048
|
+
stepNameMapping = self.getStepMapping()
|
|
1049
|
+
|
|
1050
|
+
for task in self.taskIterator():
|
|
1051
|
+
task.setProcessingVersion(processingVersions, stepChainMap=stepNameMapping)
|
|
1052
|
+
|
|
1053
|
+
self.updateLFNsAndDatasets()
|
|
1054
|
+
self.data.properties.processingVersion = processingVersions
|
|
1055
|
+
return
|
|
1056
|
+
|
|
1057
|
+
def setProcessingString(self, processingStrings):
|
|
1058
|
+
"""
|
|
1059
|
+
_setProcessingString_
|
|
1060
|
+
|
|
1061
|
+
Change the processing string for all tasks in the spec and then update
|
|
1062
|
+
all of the output LFNs and datasets to use the new processing version.
|
|
1063
|
+
"""
|
|
1064
|
+
stepNameMapping = self.getStepMapping()
|
|
1065
|
+
|
|
1066
|
+
for task in self.taskIterator():
|
|
1067
|
+
task.setProcessingString(processingStrings, stepChainMap=stepNameMapping)
|
|
1068
|
+
|
|
1069
|
+
self.updateLFNsAndDatasets()
|
|
1070
|
+
self.data.properties.processingString = processingStrings
|
|
1071
|
+
return
|
|
1072
|
+
|
|
1073
|
+
def setLumiList(self, lumiLists):
|
|
1074
|
+
"""
|
|
1075
|
+
_setLumiList_
|
|
1076
|
+
|
|
1077
|
+
Change the lumi mask for all tasks in the spec
|
|
1078
|
+
"""
|
|
1079
|
+
|
|
1080
|
+
for task in self.taskIterator():
|
|
1081
|
+
task.setLumiMask(lumiLists, override=False)
|
|
1082
|
+
|
|
1083
|
+
# set lumiList for workload (need to refactor)
|
|
1084
|
+
self.data.properties.lumiList = lumiLists
|
|
1085
|
+
return
|
|
1086
|
+
|
|
1087
|
+
def setTaskProperties(self, requestArgs):
|
|
1088
|
+
# FIXME (Alan): I don't think it works, given that the assignment
|
|
1089
|
+
# parameters never have the TaskChain parameter...
|
|
1090
|
+
if not 'TaskChain' in requestArgs:
|
|
1091
|
+
return
|
|
1092
|
+
numTasks = requestArgs['TaskChain']
|
|
1093
|
+
taskArgs = []
|
|
1094
|
+
for i in range(numTasks):
|
|
1095
|
+
taskArgs.append(requestArgs["Task%s" % (i + 1)])
|
|
1096
|
+
|
|
1097
|
+
for prop in taskArgs:
|
|
1098
|
+
taskName = prop['TaskName']
|
|
1099
|
+
for task in self.getAllTasks():
|
|
1100
|
+
if task.name() == taskName:
|
|
1101
|
+
del prop['TaskName']
|
|
1102
|
+
task.setProperties(prop)
|
|
1103
|
+
break
|
|
1104
|
+
return
|
|
1105
|
+
|
|
1106
|
+
def getAcquisitionEra(self, taskName=None):
|
|
1107
|
+
"""
|
|
1108
|
+
_getAcquisitionEra_
|
|
1109
|
+
|
|
1110
|
+
Get the acquisition era
|
|
1111
|
+
"""
|
|
1112
|
+
if taskName and isinstance(self.data.properties.acquisitionEra, dict):
|
|
1113
|
+
return self.data.properties.acquisitionEra.get(taskName, None)
|
|
1114
|
+
return self.data.properties.acquisitionEra
|
|
1115
|
+
|
|
1116
|
+
def getRequestType(self):
|
|
1117
|
+
"""
|
|
1118
|
+
_getRequestType_
|
|
1119
|
+
|
|
1120
|
+
Get the Request type (ReReco, TaskChain, etc)
|
|
1121
|
+
"""
|
|
1122
|
+
if getattr(self.data, 'requestType', None):
|
|
1123
|
+
return getattr(self.data, "requestType")
|
|
1124
|
+
|
|
1125
|
+
if hasattr(self.data, "request"):
|
|
1126
|
+
if hasattr(self.data.request, "schema"):
|
|
1127
|
+
return getattr(self.data.request.schema, "RequestType", None)
|
|
1128
|
+
return None
|
|
1129
|
+
|
|
1130
|
+
def getProcessingVersion(self, taskName=None):
|
|
1131
|
+
"""
|
|
1132
|
+
_getProcessingVersion_
|
|
1133
|
+
|
|
1134
|
+
Get the processingVersion
|
|
1135
|
+
"""
|
|
1136
|
+
|
|
1137
|
+
if taskName and isinstance(self.data.properties.processingVersion, dict):
|
|
1138
|
+
return self.data.properties.processingVersion.get(taskName, 0)
|
|
1139
|
+
return self.data.properties.processingVersion
|
|
1140
|
+
|
|
1141
|
+
def getProcessingString(self, taskName=None):
|
|
1142
|
+
"""
|
|
1143
|
+
_getProcessingString_
|
|
1144
|
+
|
|
1145
|
+
Get the processingString
|
|
1146
|
+
"""
|
|
1147
|
+
|
|
1148
|
+
if taskName and isinstance(self.data.properties.processingString, dict):
|
|
1149
|
+
return self.data.properties.processingString.get(taskName, None)
|
|
1150
|
+
return self.data.properties.processingString
|
|
1151
|
+
|
|
1152
|
+
def getLumiList(self):
|
|
1153
|
+
"""
|
|
1154
|
+
_getLumiList_
|
|
1155
|
+
|
|
1156
|
+
Get the LumitList from workload (task level should have the same lumiList)
|
|
1157
|
+
"""
|
|
1158
|
+
return self.data.properties.lumiList
|
|
1159
|
+
|
|
1160
|
+
def setValidStatus(self, validStatus):
|
|
1161
|
+
"""
|
|
1162
|
+
_setValidStatus_
|
|
1163
|
+
|
|
1164
|
+
Sets the status that will be reported to the processed dataset
|
|
1165
|
+
in DBS
|
|
1166
|
+
"""
|
|
1167
|
+
|
|
1168
|
+
self.data.properties.validStatus = validStatus
|
|
1169
|
+
return
|
|
1170
|
+
|
|
1171
|
+
def getValidStatus(self):
|
|
1172
|
+
"""
|
|
1173
|
+
_getValidStatus_
|
|
1174
|
+
|
|
1175
|
+
Get the valid status for DBS
|
|
1176
|
+
"""
|
|
1177
|
+
|
|
1178
|
+
return getattr(self.data.properties, 'validStatus', None)
|
|
1179
|
+
|
|
1180
|
+
def setAllowOpportunistic(self, allowOpport):
|
|
1181
|
+
"""
|
|
1182
|
+
_setAllowOpportunistic_
|
|
1183
|
+
|
|
1184
|
+
Set a flag which enables the workflow to run in cloud resources.
|
|
1185
|
+
"""
|
|
1186
|
+
self.data.properties.allowOpportunistic = allowOpport
|
|
1187
|
+
return
|
|
1188
|
+
|
|
1189
|
+
def getAllowOpportunistic(self):
|
|
1190
|
+
"""
|
|
1191
|
+
_getAllowOpportunistic_
|
|
1192
|
+
|
|
1193
|
+
Retrieve AllowOpportunitisc flag for the workflow
|
|
1194
|
+
"""
|
|
1195
|
+
return getattr(self.data.properties, 'allowOpportunistic', None)
|
|
1196
|
+
|
|
1197
|
+
def setPrepID(self, prepID):
|
|
1198
|
+
"""
|
|
1199
|
+
_setPrepID_
|
|
1200
|
+
|
|
1201
|
+
Set the prepID to for all the tasks below
|
|
1202
|
+
"""
|
|
1203
|
+
|
|
1204
|
+
taskIterator = self.taskIterator()
|
|
1205
|
+
for task in taskIterator:
|
|
1206
|
+
task.setPrepID(prepID)
|
|
1207
|
+
self.data.properties.prepID = prepID
|
|
1208
|
+
return
|
|
1209
|
+
|
|
1210
|
+
def getPrepID(self):
|
|
1211
|
+
"""
|
|
1212
|
+
_getPrepID_
|
|
1213
|
+
|
|
1214
|
+
Get the prepID for the workflow
|
|
1215
|
+
"""
|
|
1216
|
+
return getattr(self.data.properties, 'prepID', None)
|
|
1217
|
+
|
|
1218
|
+
def setDbsUrl(self, dbsUrl):
|
|
1219
|
+
"""
|
|
1220
|
+
_setDbsUrl_
|
|
1221
|
+
|
|
1222
|
+
Set the workload level DbsUrl.
|
|
1223
|
+
"""
|
|
1224
|
+
# TODO: this replace can be removed in one year from now, thus March 2022
|
|
1225
|
+
dbsUrl = dbsUrl.replace("cmsweb.cern.ch", "cmsweb-prod.cern.ch")
|
|
1226
|
+
# stripping any end slashes, which no longer work in the Go-based server
|
|
1227
|
+
self.data.dbsUrl = dbsUrl.rstrip("/")
|
|
1228
|
+
|
|
1229
|
+
def getDbsUrl(self):
|
|
1230
|
+
"""
|
|
1231
|
+
_getDbsUrl_
|
|
1232
|
+
|
|
1233
|
+
Get the DbsUrl specified for the input dataset.
|
|
1234
|
+
"""
|
|
1235
|
+
if getattr(self.data, 'dbsUrl', None):
|
|
1236
|
+
return getattr(self.data, "dbsUrl")
|
|
1237
|
+
|
|
1238
|
+
if hasattr(self.data, "request"):
|
|
1239
|
+
if hasattr(self.data.request, "schema"):
|
|
1240
|
+
if not getattr(self.data.request.schema, "DbsUrl", None):
|
|
1241
|
+
return "https://cmsweb-prod.cern.ch/dbs/prod/global/DBSReader"
|
|
1242
|
+
|
|
1243
|
+
return getattr(self.data.request.schema, "DbsUrl")
|
|
1244
|
+
|
|
1245
|
+
def setStatus(self, status):
|
|
1246
|
+
"""
|
|
1247
|
+
_setStatus_
|
|
1248
|
+
|
|
1249
|
+
Set the status of the workflow
|
|
1250
|
+
Optional
|
|
1251
|
+
:param: Request status
|
|
1252
|
+
"""
|
|
1253
|
+
self.data.request.status = status
|
|
1254
|
+
return
|
|
1255
|
+
|
|
1256
|
+
def getStatus(self):
|
|
1257
|
+
"""
|
|
1258
|
+
_getStatus_
|
|
1259
|
+
|
|
1260
|
+
Get the Status for the workflow
|
|
1261
|
+
"""
|
|
1262
|
+
return getattr(self.data.request, 'status', None)
|
|
1263
|
+
|
|
1264
|
+
def setCampaign(self, campaign):
|
|
1265
|
+
"""
|
|
1266
|
+
_setCampaign_
|
|
1267
|
+
|
|
1268
|
+
Set the campaign to which this workflow belongs
|
|
1269
|
+
Optional
|
|
1270
|
+
"""
|
|
1271
|
+
self.data.properties.campaign = campaign
|
|
1272
|
+
return
|
|
1273
|
+
|
|
1274
|
+
def getCampaign(self):
|
|
1275
|
+
"""
|
|
1276
|
+
_getCampaign_
|
|
1277
|
+
|
|
1278
|
+
Get the campaign for the workflow
|
|
1279
|
+
"""
|
|
1280
|
+
return getattr(self.data.properties, 'campaign', None)
|
|
1281
|
+
|
|
1282
|
+
def setLFNBase(self, mergedLFNBase, unmergedLFNBase, runNumber=None):
|
|
1283
|
+
"""
|
|
1284
|
+
_setLFNBase_
|
|
1285
|
+
|
|
1286
|
+
Set the merged and unmerged base LFNs for all tasks. Update all of the
|
|
1287
|
+
output LFNs to use them.
|
|
1288
|
+
"""
|
|
1289
|
+
self.data.properties.mergedLFNBase = mergedLFNBase
|
|
1290
|
+
self.data.properties.unmergedLFNBase = unmergedLFNBase
|
|
1291
|
+
|
|
1292
|
+
# set all child tasks lfn base.
|
|
1293
|
+
for task in self.taskIterator():
|
|
1294
|
+
task.setLFNBase(mergedLFNBase, unmergedLFNBase)
|
|
1295
|
+
self.updateLFNsAndDatasets(runNumber=runNumber)
|
|
1296
|
+
return
|
|
1297
|
+
|
|
1298
|
+
def setMergeParameters(self, minSize, maxSize, maxEvents,
|
|
1299
|
+
initialTask=None):
|
|
1300
|
+
"""
|
|
1301
|
+
_setMergeParameters_
|
|
1302
|
+
|
|
1303
|
+
Set the parameters for every merge task in the workload. Also update
|
|
1304
|
+
the min merge size of every CMSSW step.
|
|
1305
|
+
"""
|
|
1306
|
+
if initialTask:
|
|
1307
|
+
taskIterator = initialTask.childTaskIterator()
|
|
1308
|
+
else:
|
|
1309
|
+
taskIterator = self.taskIterator()
|
|
1310
|
+
|
|
1311
|
+
for task in taskIterator:
|
|
1312
|
+
for stepName in task.listAllStepNames():
|
|
1313
|
+
stepHelper = task.getStepHelper(stepName)
|
|
1314
|
+
if stepHelper.stepType() == "StageOut" and stepHelper.minMergeSize() > 0:
|
|
1315
|
+
stepHelper.setMinMergeSize(minSize, maxEvents)
|
|
1316
|
+
|
|
1317
|
+
if task.taskType() == "Merge":
|
|
1318
|
+
task.setSplittingParameters(min_merge_size=minSize,
|
|
1319
|
+
max_merge_size=maxSize,
|
|
1320
|
+
max_merge_events=maxEvents)
|
|
1321
|
+
|
|
1322
|
+
self.setMergeParameters(minSize, maxSize, maxEvents, task)
|
|
1323
|
+
|
|
1324
|
+
return
|
|
1325
|
+
|
|
1326
|
+
def setWorkQueueSplitPolicy(self, policyName, splitAlgo, splitArgs, **kwargs):
|
|
1327
|
+
"""
|
|
1328
|
+
Sets the WorkQueue start policy.
|
|
1329
|
+
|
|
1330
|
+
:param policyName: string with the policy name. Supported values should match the
|
|
1331
|
+
WMCore/WorkQueue/Policy/Start module names (Dataset, Block, MonteCarlo or ResubmitBlock)
|
|
1332
|
+
:param splitAlgo: string with the job splitting algorithm name. Supported values should
|
|
1333
|
+
match the WMCore/JobSplitting module names.
|
|
1334
|
+
:param splitArgs: dictionary with job splitting arguments
|
|
1335
|
+
:param kwargs: dictionary with additional arguments that can be passed directly to
|
|
1336
|
+
the startPolicyArgs
|
|
1337
|
+
:return: None
|
|
1338
|
+
"""
|
|
1339
|
+
SplitAlgoToStartPolicy = {"FileBased": ["NumberOfFiles"],
|
|
1340
|
+
"EventBased": ["NumberOfEvents",
|
|
1341
|
+
"NumberOfEventsPerLumi"],
|
|
1342
|
+
"LumiBased": ["NumberOfLumis"],
|
|
1343
|
+
"Harvest": ["NumberOfRuns"],
|
|
1344
|
+
"EventAwareLumiBased": ["NumberOfEvents"]}
|
|
1345
|
+
SplitAlgoToArgMap = {"NumberOfFiles": "files_per_job",
|
|
1346
|
+
"NumberOfEvents": "events_per_job",
|
|
1347
|
+
"NumberOfLumis": "lumis_per_job",
|
|
1348
|
+
"NumberOfRuns": "runs_per_job",
|
|
1349
|
+
"NumberOfEventsPerLumi": "events_per_lumi"}
|
|
1350
|
+
startPolicyArgs = {'SplittingAlgo': splitAlgo}
|
|
1351
|
+
startPolicyArgs.update(kwargs)
|
|
1352
|
+
|
|
1353
|
+
sliceTypes = SplitAlgoToStartPolicy.get(splitAlgo, ["NumberOfFiles"])
|
|
1354
|
+
sliceType = sliceTypes[0]
|
|
1355
|
+
sliceSize = splitArgs.get(SplitAlgoToArgMap[sliceType], 1)
|
|
1356
|
+
startPolicyArgs["SliceType"] = sliceType
|
|
1357
|
+
startPolicyArgs["SliceSize"] = sliceSize
|
|
1358
|
+
|
|
1359
|
+
if len(sliceTypes) > 1:
|
|
1360
|
+
subSliceType = sliceTypes[1]
|
|
1361
|
+
subSliceSize = splitArgs.get(SplitAlgoToArgMap[subSliceType],
|
|
1362
|
+
sliceSize)
|
|
1363
|
+
startPolicyArgs["SubSliceType"] = subSliceType
|
|
1364
|
+
startPolicyArgs["SubSliceSize"] = subSliceSize
|
|
1365
|
+
|
|
1366
|
+
self.setStartPolicy(policyName, **startPolicyArgs)
|
|
1367
|
+
self.setEndPolicy("SingleShot")
|
|
1368
|
+
return
|
|
1369
|
+
|
|
1370
|
+
def setJobSplittingParameters(self, taskPath, splitAlgo, splitArgs, updateOnly=False):
|
|
1371
|
+
"""
|
|
1372
|
+
_setJobSplittingParameters_
|
|
1373
|
+
|
|
1374
|
+
Update the job splitting algorithm and arguments for the given task.
|
|
1375
|
+
"""
|
|
1376
|
+
taskHelper = self.getTaskByPath(taskPath)
|
|
1377
|
+
if taskHelper == None:
|
|
1378
|
+
return
|
|
1379
|
+
|
|
1380
|
+
if taskHelper.isTopOfTree():
|
|
1381
|
+
self.setWorkQueueSplitPolicy(self.startPolicy(), splitAlgo, splitArgs)
|
|
1382
|
+
|
|
1383
|
+
# There are currently two merge algorithms in WMBS. WMBSMergeBySize
|
|
1384
|
+
# will reassemble the parent file. This is only necessary for
|
|
1385
|
+
# EventBased processing where we break up lumi sections. Everything
|
|
1386
|
+
# else can use ParentlessMergeBySize which won't reassemble parents.
|
|
1387
|
+
# Everything defaults to ParentlessMergeBySize as it is much less load
|
|
1388
|
+
# on the database.
|
|
1389
|
+
minMergeSize = None
|
|
1390
|
+
maxMergeEvents = None
|
|
1391
|
+
for childTask in taskHelper.childTaskIterator():
|
|
1392
|
+
if childTask.taskType() == "Merge":
|
|
1393
|
+
if splitAlgo == "EventBased" and taskHelper.taskType() != "Production":
|
|
1394
|
+
mergeAlgo = "WMBSMergeBySize"
|
|
1395
|
+
for stepName in childTask.listAllStepNames():
|
|
1396
|
+
stepHelper = childTask.getStepHelper(stepName)
|
|
1397
|
+
if stepHelper.stepType() == "CMSSW":
|
|
1398
|
+
stepCmsswHelper = stepHelper.getTypeHelper()
|
|
1399
|
+
stepCmsswHelper.setSkipBadFiles(False)
|
|
1400
|
+
else:
|
|
1401
|
+
mergeAlgo = "ParentlessMergeBySize"
|
|
1402
|
+
|
|
1403
|
+
childSplitParams = childTask.jobSplittingParameters()
|
|
1404
|
+
minMergeSize = childSplitParams["min_merge_size"]
|
|
1405
|
+
maxMergeEvents = childSplitParams["max_merge_events"]
|
|
1406
|
+
if not updateOnly:
|
|
1407
|
+
del childSplitParams["algorithm"]
|
|
1408
|
+
del childSplitParams["siteWhitelist"]
|
|
1409
|
+
del childSplitParams["siteBlacklist"]
|
|
1410
|
+
childTask.setSplittingAlgorithm(mergeAlgo, **childSplitParams)
|
|
1411
|
+
else:
|
|
1412
|
+
childTask.updateSplittingParameters(mergeAlgo, **childSplitParams)
|
|
1413
|
+
# Set the splitting algorithm for the task. If the split algo is
|
|
1414
|
+
# EventBased, we need to disable straight to merge. If this isn't an
|
|
1415
|
+
# EventBased algo we need to enable straight to merge. If straight
|
|
1416
|
+
# to merge is disabled then keep it that way.
|
|
1417
|
+
if not updateOnly:
|
|
1418
|
+
taskHelper.setSplittingAlgorithm(splitAlgo, **splitArgs)
|
|
1419
|
+
else:
|
|
1420
|
+
taskHelper.updateSplittingParameters(splitAlgo, **splitArgs)
|
|
1421
|
+
for stepName in taskHelper.listAllStepNames():
|
|
1422
|
+
stepHelper = taskHelper.getStepHelper(stepName)
|
|
1423
|
+
if stepHelper.stepType() == "StageOut":
|
|
1424
|
+
if splitAlgo != "EventBased" and stepHelper.minMergeSize() != -1 and minMergeSize:
|
|
1425
|
+
stepHelper.setMinMergeSize(minMergeSize, maxMergeEvents)
|
|
1426
|
+
else:
|
|
1427
|
+
stepHelper.disableStraightToMerge()
|
|
1428
|
+
if stepHelper.stepType() == "CMSSW" and splitAlgo == "WMBSMergeBySize" \
|
|
1429
|
+
and stepHelper.getSkipBadFiles():
|
|
1430
|
+
stepHelper.setSkipBadFiles(False)
|
|
1431
|
+
|
|
1432
|
+
if taskHelper.isTopOfTree() and taskHelper.taskType() == "Production" and stepName == "cmsRun1":
|
|
1433
|
+
# set it only for the first cmsRun in multi-steps tasks
|
|
1434
|
+
stepHelper.setEventsPerLumi(splitArgs.get("events_per_lumi", None))
|
|
1435
|
+
return
|
|
1436
|
+
|
|
1437
|
+
def listJobSplittingParametersByTask(self, initialTask=None, performance=True):
|
|
1438
|
+
"""
|
|
1439
|
+
_listJobSplittingParametersByTask_
|
|
1440
|
+
|
|
1441
|
+
Create a dictionary that maps task names to job splitting parameters.
|
|
1442
|
+
"""
|
|
1443
|
+
output = {}
|
|
1444
|
+
|
|
1445
|
+
if initialTask:
|
|
1446
|
+
taskIterator = initialTask.childTaskIterator()
|
|
1447
|
+
else:
|
|
1448
|
+
taskIterator = self.taskIterator()
|
|
1449
|
+
|
|
1450
|
+
for task in taskIterator:
|
|
1451
|
+
taskName = task.getPathName()
|
|
1452
|
+
taskParams = task.jobSplittingParameters(performance)
|
|
1453
|
+
del taskParams["siteWhitelist"]
|
|
1454
|
+
del taskParams["siteBlacklist"]
|
|
1455
|
+
output[taskName] = taskParams
|
|
1456
|
+
output[taskName]["type"] = task.taskType()
|
|
1457
|
+
output.update(self.listJobSplittingParametersByTask(task, performance))
|
|
1458
|
+
|
|
1459
|
+
return output
|
|
1460
|
+
|
|
1461
|
+
def listInputDatasets(self):
|
|
1462
|
+
"""
|
|
1463
|
+
_listInputDatasets_
|
|
1464
|
+
|
|
1465
|
+
List all the input datasets in the workload
|
|
1466
|
+
"""
|
|
1467
|
+
inputDatasets = []
|
|
1468
|
+
|
|
1469
|
+
taskIterator = self.taskIterator()
|
|
1470
|
+
for task in taskIterator:
|
|
1471
|
+
path = task.getInputDatasetPath()
|
|
1472
|
+
if path:
|
|
1473
|
+
inputDatasets.append(path)
|
|
1474
|
+
|
|
1475
|
+
return inputDatasets
|
|
1476
|
+
|
|
1477
|
+
def listOutputDatasets(self, initialTask=None):
|
|
1478
|
+
"""
|
|
1479
|
+
_listOutputDatasets_
|
|
1480
|
+
|
|
1481
|
+
List the names of all the datasets produced by this workflow.
|
|
1482
|
+
"""
|
|
1483
|
+
outputDatasets = []
|
|
1484
|
+
|
|
1485
|
+
if initialTask:
|
|
1486
|
+
taskIterator = initialTask.childTaskIterator()
|
|
1487
|
+
else:
|
|
1488
|
+
taskIterator = self.taskIterator()
|
|
1489
|
+
|
|
1490
|
+
for task in taskIterator:
|
|
1491
|
+
for stepName in task.listAllStepNames():
|
|
1492
|
+
stepHelper = task.getStepHelper(stepName)
|
|
1493
|
+
|
|
1494
|
+
if not getattr(stepHelper.data.output, "keep", True):
|
|
1495
|
+
continue
|
|
1496
|
+
|
|
1497
|
+
if stepHelper.stepType() == "CMSSW":
|
|
1498
|
+
for outputModuleName in stepHelper.listOutputModules():
|
|
1499
|
+
# Only consider non-transient output
|
|
1500
|
+
outputModule = stepHelper.getOutputModule(outputModuleName)
|
|
1501
|
+
if getattr(outputModule, "transient", False):
|
|
1502
|
+
continue
|
|
1503
|
+
outputDataset = "/%s/%s/%s" % (outputModule.primaryDataset,
|
|
1504
|
+
outputModule.processedDataset,
|
|
1505
|
+
outputModule.dataTier)
|
|
1506
|
+
outputDatasets.append(outputDataset)
|
|
1507
|
+
|
|
1508
|
+
moreDatasets = self.listOutputDatasets(task)
|
|
1509
|
+
outputDatasets.extend(moreDatasets)
|
|
1510
|
+
|
|
1511
|
+
return outputDatasets
|
|
1512
|
+
|
|
1513
|
+
def listAllOutputModulesLFNBases(self, initialTask=None, onlyUnmerged=True):
|
|
1514
|
+
"""
|
|
1515
|
+
_listAllOutputModulesLFNBases_
|
|
1516
|
+
|
|
1517
|
+
List all output LFN bases defined in this workload object.
|
|
1518
|
+
"""
|
|
1519
|
+
listLFNBases = set()
|
|
1520
|
+
if initialTask:
|
|
1521
|
+
taskIterator = initialTask.childTaskIterator()
|
|
1522
|
+
else:
|
|
1523
|
+
taskIterator = self.taskIterator()
|
|
1524
|
+
|
|
1525
|
+
for task in taskIterator:
|
|
1526
|
+
for stepName in task.listAllStepNames():
|
|
1527
|
+
outModule = task.getOutputModulesForStep(stepName)
|
|
1528
|
+
for module in viewvalues(outModule.dictionary_()):
|
|
1529
|
+
lfnBase = getattr(module, "lfnBase", "")
|
|
1530
|
+
if not onlyUnmerged and lfnBase:
|
|
1531
|
+
listLFNBases.add(lfnBase)
|
|
1532
|
+
elif lfnBase.startswith('/store/unmerged'):
|
|
1533
|
+
listLFNBases.add(lfnBase)
|
|
1534
|
+
# recursively go through all the tasks
|
|
1535
|
+
listLFNBases.update(self.listAllOutputModulesLFNBases(task, onlyUnmerged))
|
|
1536
|
+
|
|
1537
|
+
return list(listLFNBases)
|
|
1538
|
+
|
|
1539
|
+
def listPileupDatasets(self, initialTask=None):
|
|
1540
|
+
"""
|
|
1541
|
+
_listPileUpDataset_
|
|
1542
|
+
|
|
1543
|
+
Returns a dictionary with all the required pile-up datasets
|
|
1544
|
+
in this workload and their associated dbs url as the key
|
|
1545
|
+
"""
|
|
1546
|
+
pileupDatasets = {}
|
|
1547
|
+
|
|
1548
|
+
if initialTask:
|
|
1549
|
+
taskIterator = initialTask.childTaskIterator()
|
|
1550
|
+
else:
|
|
1551
|
+
taskIterator = self.taskIterator()
|
|
1552
|
+
|
|
1553
|
+
for task in taskIterator:
|
|
1554
|
+
for stepName in task.listAllStepNames():
|
|
1555
|
+
stepHelper = task.getStepHelper(stepName)
|
|
1556
|
+
if stepHelper.stepType() == "CMSSW":
|
|
1557
|
+
pileupSection = stepHelper.getPileup()
|
|
1558
|
+
if pileupSection is None:
|
|
1559
|
+
continue
|
|
1560
|
+
dbsUrl = stepHelper.data.dbsUrl
|
|
1561
|
+
if dbsUrl not in pileupDatasets:
|
|
1562
|
+
pileupDatasets[dbsUrl] = set()
|
|
1563
|
+
for pileupType in pileupSection.listSections_():
|
|
1564
|
+
datasets = getattr(getattr(stepHelper.data.pileup, pileupType), "dataset")
|
|
1565
|
+
pileupDatasets[dbsUrl].update(datasets)
|
|
1566
|
+
|
|
1567
|
+
pileupDatasets.update(self.listPileupDatasets(task))
|
|
1568
|
+
|
|
1569
|
+
return pileupDatasets
|
|
1570
|
+
|
|
1571
|
+
def listOutputProducingTasks(self, initialTask=None):
|
|
1572
|
+
"""
|
|
1573
|
+
_listOutputProducingTasks_
|
|
1574
|
+
|
|
1575
|
+
List the paths to any task capable of producing merged output
|
|
1576
|
+
"""
|
|
1577
|
+
taskList = []
|
|
1578
|
+
|
|
1579
|
+
if initialTask:
|
|
1580
|
+
taskIterator = initialTask.childTaskIterator()
|
|
1581
|
+
else:
|
|
1582
|
+
taskIterator = self.taskIterator()
|
|
1583
|
+
|
|
1584
|
+
for task in taskIterator:
|
|
1585
|
+
for stepName in task.listAllStepNames():
|
|
1586
|
+
stepHelper = task.getStepHelper(stepName)
|
|
1587
|
+
if not getattr(stepHelper.data.output, "keep", True):
|
|
1588
|
+
continue
|
|
1589
|
+
|
|
1590
|
+
if stepHelper.stepType() == "CMSSW":
|
|
1591
|
+
if stepHelper.listOutputModules():
|
|
1592
|
+
taskList.append(task.getPathName())
|
|
1593
|
+
break
|
|
1594
|
+
|
|
1595
|
+
taskList.extend(self.listOutputProducingTasks(task))
|
|
1596
|
+
|
|
1597
|
+
return taskList
|
|
1598
|
+
|
|
1599
|
+
def setSubscriptionInformation(self, initialTask=None, custodialSites=None,
|
|
1600
|
+
nonCustodialSites=None,
|
|
1601
|
+
priority="Low", primaryDataset=None,
|
|
1602
|
+
useSkim=False, isSkim=False,
|
|
1603
|
+
dataTier=None, deleteFromSource=False,
|
|
1604
|
+
datasetLifetime=None):
|
|
1605
|
+
"""
|
|
1606
|
+
_setSubscriptionInformation_
|
|
1607
|
+
|
|
1608
|
+
Set the given subscription information for all datasets
|
|
1609
|
+
in the workload that match the given primaryDataset (if any)
|
|
1610
|
+
"""
|
|
1611
|
+
|
|
1612
|
+
if custodialSites and not isinstance(custodialSites, list):
|
|
1613
|
+
custodialSites = [custodialSites]
|
|
1614
|
+
if nonCustodialSites and not isinstance(nonCustodialSites, list):
|
|
1615
|
+
nonCustodialSites = [nonCustodialSites]
|
|
1616
|
+
|
|
1617
|
+
if initialTask:
|
|
1618
|
+
taskIterator = initialTask.childTaskIterator()
|
|
1619
|
+
else:
|
|
1620
|
+
taskIterator = self.taskIterator()
|
|
1621
|
+
|
|
1622
|
+
for task in taskIterator:
|
|
1623
|
+
task.setSubscriptionInformation(custodialSites, nonCustodialSites,
|
|
1624
|
+
priority, primaryDataset,
|
|
1625
|
+
useSkim, isSkim,
|
|
1626
|
+
dataTier, deleteFromSource,
|
|
1627
|
+
datasetLifetime)
|
|
1628
|
+
self.setSubscriptionInformation(task,
|
|
1629
|
+
custodialSites, nonCustodialSites,
|
|
1630
|
+
priority, primaryDataset,
|
|
1631
|
+
useSkim, isSkim,
|
|
1632
|
+
dataTier, deleteFromSource,
|
|
1633
|
+
datasetLifetime)
|
|
1634
|
+
|
|
1635
|
+
return
|
|
1636
|
+
|
|
1637
|
+
def getSubscriptionInformation(self, initialTask=None):
|
|
1638
|
+
"""
|
|
1639
|
+
_getSubscriptionInformation_
|
|
1640
|
+
|
|
1641
|
+
Get the subscription information for the whole workload, this is given by
|
|
1642
|
+
dataset and aggregated according to the information from each individual task
|
|
1643
|
+
See WMTask.WMTaskHelper.getSubscriptionInformation for the output structure
|
|
1644
|
+
"""
|
|
1645
|
+
subInfo = {}
|
|
1646
|
+
|
|
1647
|
+
# Add site lists without duplicates
|
|
1648
|
+
extendWithoutDups = lambda x, y: x + list(set(y) - set(x))
|
|
1649
|
+
# Choose the lowest priority
|
|
1650
|
+
solvePrioConflicts = lambda x, y: y if x == "High" or y == "Low" else x
|
|
1651
|
+
# Always choose a logical AND
|
|
1652
|
+
solveDelConflicts = lambda x, y: x and y
|
|
1653
|
+
|
|
1654
|
+
if initialTask:
|
|
1655
|
+
taskIterator = initialTask.childTaskIterator()
|
|
1656
|
+
subInfo = initialTask.getSubscriptionInformation()
|
|
1657
|
+
else:
|
|
1658
|
+
taskIterator = self.taskIterator()
|
|
1659
|
+
|
|
1660
|
+
for task in taskIterator:
|
|
1661
|
+
taskSubInfo = self.getSubscriptionInformation(task)
|
|
1662
|
+
for dataset in taskSubInfo:
|
|
1663
|
+
if dataset in subInfo:
|
|
1664
|
+
subInfo[dataset]["CustodialSites"] = extendWithoutDups(taskSubInfo[dataset]["CustodialSites"],
|
|
1665
|
+
subInfo[dataset]["CustodialSites"])
|
|
1666
|
+
subInfo[dataset]["NonCustodialSites"] = extendWithoutDups(taskSubInfo[dataset]["NonCustodialSites"],
|
|
1667
|
+
subInfo[dataset]["NonCustodialSites"])
|
|
1668
|
+
subInfo[dataset]["Priority"] = solvePrioConflicts(taskSubInfo[dataset]["Priority"],
|
|
1669
|
+
subInfo[dataset]["Priority"])
|
|
1670
|
+
subInfo[dataset]["DeleteFromSource"] = solveDelConflicts(taskSubInfo[dataset]["DeleteFromSource"],
|
|
1671
|
+
subInfo[dataset]["DeleteFromSource"])
|
|
1672
|
+
else:
|
|
1673
|
+
subInfo[dataset] = taskSubInfo[dataset]
|
|
1674
|
+
subInfo[dataset]["CustodialSites"] = list(
|
|
1675
|
+
set(subInfo[dataset]["CustodialSites"]) - set(subInfo[dataset]["NonCustodialSites"]))
|
|
1676
|
+
|
|
1677
|
+
return subInfo
|
|
1678
|
+
|
|
1679
|
+
def getWorkloadOverrides(self):
|
|
1680
|
+
"""
|
|
1681
|
+
_getWorkloadOverrides_
|
|
1682
|
+
|
|
1683
|
+
Get the overrides config section
|
|
1684
|
+
of this workload, creates it if it doesn't exist
|
|
1685
|
+
"""
|
|
1686
|
+
return self.data.section_('overrides')
|
|
1687
|
+
|
|
1688
|
+
def setWorkloadOverrides(self, overrides):
|
|
1689
|
+
"""
|
|
1690
|
+
_setWorkloadOverrides_
|
|
1691
|
+
|
|
1692
|
+
Set the override parameters for all logArch steps
|
|
1693
|
+
in all tasks.
|
|
1694
|
+
"""
|
|
1695
|
+
if overrides:
|
|
1696
|
+
for task in self.getAllTasks():
|
|
1697
|
+
for stepName in task.listAllStepNames():
|
|
1698
|
+
stepHelper = task.getStepHelper(stepName)
|
|
1699
|
+
if stepHelper.stepType() == "LogArchive":
|
|
1700
|
+
for key, value in viewitems(overrides):
|
|
1701
|
+
stepHelper.addOverride(key, value)
|
|
1702
|
+
# save it at workload level as well
|
|
1703
|
+
for key, value in viewitems(overrides):
|
|
1704
|
+
setattr(self.data.overrides, key, value)
|
|
1705
|
+
|
|
1706
|
+
return
|
|
1707
|
+
|
|
1708
|
+
def setBlockCloseSettings(self, blockCloseMaxWaitTime,
|
|
1709
|
+
blockCloseMaxFiles, blockCloseMaxEvents,
|
|
1710
|
+
blockCloseMaxSize):
|
|
1711
|
+
"""
|
|
1712
|
+
_setBlockCloseSettings_
|
|
1713
|
+
|
|
1714
|
+
Set the parameters that define when a block should be closed
|
|
1715
|
+
for this workload, they should all be defined so it is a single call
|
|
1716
|
+
"""
|
|
1717
|
+
self.data.properties.blockCloseMaxWaitTime = blockCloseMaxWaitTime
|
|
1718
|
+
self.data.properties.blockCloseMaxFiles = blockCloseMaxFiles
|
|
1719
|
+
self.data.properties.blockCloseMaxEvents = blockCloseMaxEvents
|
|
1720
|
+
self.data.properties.blockCloseMaxSize = blockCloseMaxSize
|
|
1721
|
+
|
|
1722
|
+
def getBlockCloseMaxWaitTime(self):
|
|
1723
|
+
"""
|
|
1724
|
+
_getBlockCloseMaxWaitTime_
|
|
1725
|
+
|
|
1726
|
+
Return the amount of time that a block should stay open
|
|
1727
|
+
for this workload before closing it in DBS
|
|
1728
|
+
"""
|
|
1729
|
+
|
|
1730
|
+
return getattr(self.data.properties, 'blockCloseMaxWaitTime', 66400)
|
|
1731
|
+
|
|
1732
|
+
def getBlockCloseMaxSize(self):
|
|
1733
|
+
"""
|
|
1734
|
+
_getBlockCloseMaxSize_
|
|
1735
|
+
|
|
1736
|
+
Return the maximum size that a block from this workload should have
|
|
1737
|
+
"""
|
|
1738
|
+
|
|
1739
|
+
return getattr(self.data.properties, 'blockCloseMaxSize', 5000000000000)
|
|
1740
|
+
|
|
1741
|
+
def getBlockCloseMaxEvents(self):
|
|
1742
|
+
"""
|
|
1743
|
+
_blockCloseMaxEvents_
|
|
1744
|
+
|
|
1745
|
+
Return the maximum number of events that a block from this workload
|
|
1746
|
+
should have
|
|
1747
|
+
"""
|
|
1748
|
+
|
|
1749
|
+
return getattr(self.data.properties, 'blockCloseMaxEvents', 25000000)
|
|
1750
|
+
|
|
1751
|
+
def getBlockCloseMaxFiles(self):
|
|
1752
|
+
"""
|
|
1753
|
+
_getBlockCloseMaxFiles_
|
|
1754
|
+
|
|
1755
|
+
Return the maximum amount of files that a block from this workload
|
|
1756
|
+
should have
|
|
1757
|
+
"""
|
|
1758
|
+
|
|
1759
|
+
return getattr(self.data.properties, 'blockCloseMaxFiles', 500)
|
|
1760
|
+
|
|
1761
|
+
def getUnmergedLFNBase(self):
|
|
1762
|
+
"""
|
|
1763
|
+
_getUnmergedLFNBase_
|
|
1764
|
+
|
|
1765
|
+
Get the unmerged LFN Base from properties
|
|
1766
|
+
"""
|
|
1767
|
+
|
|
1768
|
+
return getattr(self.data.properties, 'unmergedLFNBase', None)
|
|
1769
|
+
|
|
1770
|
+
def getMergedLFNBase(self):
|
|
1771
|
+
"""
|
|
1772
|
+
_getMergedLFNBase_
|
|
1773
|
+
|
|
1774
|
+
Get the merged LFN Base from properties
|
|
1775
|
+
"""
|
|
1776
|
+
|
|
1777
|
+
return getattr(self.data.properties, 'mergedLFNBase', None)
|
|
1778
|
+
|
|
1779
|
+
def getLFNBases(self):
|
|
1780
|
+
"""
|
|
1781
|
+
_getLFNBases_
|
|
1782
|
+
|
|
1783
|
+
Retrieve the LFN bases. They are returned as a tuple with the merged
|
|
1784
|
+
LFN base first, followed by the unmerged LFN base.
|
|
1785
|
+
"""
|
|
1786
|
+
return self.getMergedLFNBase(), self.getUnmergedLFNBase()
|
|
1787
|
+
|
|
1788
|
+
def setRetryPolicy(self):
|
|
1789
|
+
"""
|
|
1790
|
+
_setRetryPolicy_
|
|
1791
|
+
|
|
1792
|
+
"""
|
|
1793
|
+
pass
|
|
1794
|
+
|
|
1795
|
+
def truncate(self, newWorkloadName, initialTaskPath, serverUrl,
|
|
1796
|
+
databaseName, collectionName=None):
|
|
1797
|
+
"""
|
|
1798
|
+
_truncate_
|
|
1799
|
+
|
|
1800
|
+
Truncate a workflow so that it can be used for resubmission. This will
|
|
1801
|
+
rename the workflow and set the task in the intitialTaskPath parameter
|
|
1802
|
+
to be the top level task. This modifies the workflow in place.
|
|
1803
|
+
The input collection name can be specified otherwise it will default to
|
|
1804
|
+
the old workload name.
|
|
1805
|
+
"""
|
|
1806
|
+
if not collectionName:
|
|
1807
|
+
collectionName = self.name()
|
|
1808
|
+
|
|
1809
|
+
allTaskPaths = self.listAllTaskPathNames()
|
|
1810
|
+
newTopLevelTask = self.getTaskByPath(initialTaskPath)
|
|
1811
|
+
newTopLevelTask.addInputACDC(serverUrl, databaseName, collectionName,
|
|
1812
|
+
initialTaskPath)
|
|
1813
|
+
newTopLevelTask.setInputStep(None)
|
|
1814
|
+
workloadOwner = self.getOwner()
|
|
1815
|
+
self.setInitialJobCount(self.getInitialJobCount() + 10000000)
|
|
1816
|
+
newTopLevelTask.setSplittingParameters(collectionName=collectionName,
|
|
1817
|
+
filesetName=initialTaskPath,
|
|
1818
|
+
couchURL=serverUrl,
|
|
1819
|
+
couchDB=databaseName,
|
|
1820
|
+
owner=workloadOwner["name"],
|
|
1821
|
+
group=workloadOwner["group"],
|
|
1822
|
+
initial_lfn_counter=self.getInitialJobCount())
|
|
1823
|
+
|
|
1824
|
+
for taskPath in allTaskPaths:
|
|
1825
|
+
if not taskPath.startswith(initialTaskPath) or taskPath == initialTaskPath:
|
|
1826
|
+
taskName = taskPath.split("/")[-1]
|
|
1827
|
+
if hasattr(self.data.tasks, taskName):
|
|
1828
|
+
delattr(self.data.tasks, taskName)
|
|
1829
|
+
if taskName in self.data.tasks.tasklist:
|
|
1830
|
+
self.data.tasks.tasklist.remove(taskName)
|
|
1831
|
+
|
|
1832
|
+
self.setName(newWorkloadName)
|
|
1833
|
+
self.addTask(newTopLevelTask)
|
|
1834
|
+
newTopLevelTask.setTopOfTree()
|
|
1835
|
+
|
|
1836
|
+
self.setWorkQueueSplitPolicy("ResubmitBlock",
|
|
1837
|
+
newTopLevelTask.jobSplittingAlgorithm(),
|
|
1838
|
+
newTopLevelTask.jobSplittingParameters())
|
|
1839
|
+
|
|
1840
|
+
def adjustPathsForTask(initialTask, parentPath):
|
|
1841
|
+
"""
|
|
1842
|
+
_adjustPathsForTask_
|
|
1843
|
+
|
|
1844
|
+
Given an initial task and the path for that task set the path
|
|
1845
|
+
correctly for all of the children tasks.
|
|
1846
|
+
"""
|
|
1847
|
+
for childTask in initialTask.childTaskIterator():
|
|
1848
|
+
childTask.setPathName("%s/%s" % (parentPath, childTask.name()))
|
|
1849
|
+
inputStep = childTask.getInputStep()
|
|
1850
|
+
if inputStep != None:
|
|
1851
|
+
inputStep = inputStep.replace(parentPath, "/" + newWorkloadName)
|
|
1852
|
+
childTask.setInputStep(inputStep)
|
|
1853
|
+
|
|
1854
|
+
adjustPathsForTask(childTask, childTask.getPathName())
|
|
1855
|
+
|
|
1856
|
+
return
|
|
1857
|
+
|
|
1858
|
+
adjustPathsForTask(newTopLevelTask, "/%s/%s" % (newWorkloadName,
|
|
1859
|
+
newTopLevelTask.name()))
|
|
1860
|
+
return
|
|
1861
|
+
|
|
1862
|
+
def ignoreOutputModules(self, badModules, initialTask=None):
|
|
1863
|
+
"""
|
|
1864
|
+
_ignoreOutputModules_
|
|
1865
|
+
|
|
1866
|
+
If there is a list of ignored output modules the following must be done:
|
|
1867
|
+
- Trim the workload tree so that no task that depends on the merged output of the ignored modules
|
|
1868
|
+
exists in the tree, also eliminate the merge task for such modules
|
|
1869
|
+
- Add flags to make the runtime code ignore the files from this module so they are not
|
|
1870
|
+
staged out
|
|
1871
|
+
"""
|
|
1872
|
+
|
|
1873
|
+
if not badModules:
|
|
1874
|
+
return
|
|
1875
|
+
|
|
1876
|
+
if initialTask:
|
|
1877
|
+
taskIterator = initialTask.childTaskIterator()
|
|
1878
|
+
else:
|
|
1879
|
+
taskIterator = self.taskIterator()
|
|
1880
|
+
|
|
1881
|
+
for task in taskIterator:
|
|
1882
|
+
# Find the children tasks that have a bad output module as
|
|
1883
|
+
# input, disown them. Can't delete them on the spot, save the names in a list
|
|
1884
|
+
childTasksToDelete = []
|
|
1885
|
+
for childTask in task.childTaskIterator():
|
|
1886
|
+
taskInput = childTask.inputReference()
|
|
1887
|
+
inputOutputModule = getattr(taskInput, "outputModule", None)
|
|
1888
|
+
if inputOutputModule in badModules:
|
|
1889
|
+
childTasksToDelete.append(childTask.name())
|
|
1890
|
+
|
|
1891
|
+
# Now delete
|
|
1892
|
+
for childTaskName in childTasksToDelete:
|
|
1893
|
+
task.deleteChild(childTaskName)
|
|
1894
|
+
|
|
1895
|
+
if childTasksToDelete:
|
|
1896
|
+
# Tell any CMSSW step to ignore the output modules
|
|
1897
|
+
for stepName in task.listAllStepNames():
|
|
1898
|
+
stepHelper = task.getStepHelper(stepName)
|
|
1899
|
+
if stepHelper.stepType() == "CMSSW":
|
|
1900
|
+
stepHelper.setIgnoredOutputModules(badModules)
|
|
1901
|
+
# Go deeper in the tree
|
|
1902
|
+
self.ignoreOutputModules(badModules, task)
|
|
1903
|
+
|
|
1904
|
+
return
|
|
1905
|
+
|
|
1906
|
+
def setCMSSWVersions(self, cmsswVersion=None, globalTag=None,
|
|
1907
|
+
scramArch=None, initialTask=None):
|
|
1908
|
+
"""
|
|
1909
|
+
_setCMSSWVersions_
|
|
1910
|
+
|
|
1911
|
+
Set the CMSSW version and the global tag for all CMSSW steps in the
|
|
1912
|
+
workload.
|
|
1913
|
+
"""
|
|
1914
|
+
if initialTask:
|
|
1915
|
+
taskIterator = initialTask.childTaskIterator()
|
|
1916
|
+
else:
|
|
1917
|
+
taskIterator = self.taskIterator()
|
|
1918
|
+
|
|
1919
|
+
for task in taskIterator:
|
|
1920
|
+
for stepName in task.listAllStepNames():
|
|
1921
|
+
stepHelper = task.getStepHelper(stepName)
|
|
1922
|
+
|
|
1923
|
+
if stepHelper.stepType() == "CMSSW":
|
|
1924
|
+
if cmsswVersion != None:
|
|
1925
|
+
if scramArch != None:
|
|
1926
|
+
stepHelper.cmsswSetup(cmsswVersion=cmsswVersion,
|
|
1927
|
+
scramArch=scramArch)
|
|
1928
|
+
else:
|
|
1929
|
+
stepHelper.cmsswSetup(cmsswVersion=cmsswVersion)
|
|
1930
|
+
|
|
1931
|
+
if globalTag != None:
|
|
1932
|
+
stepHelper.setGlobalTag(globalTag)
|
|
1933
|
+
|
|
1934
|
+
self.setCMSSWVersions(cmsswVersion, globalTag, scramArch, task)
|
|
1935
|
+
|
|
1936
|
+
return
|
|
1937
|
+
|
|
1938
|
+
def getCMSSWVersions(self, initialTask=None):
|
|
1939
|
+
"""
|
|
1940
|
+
_getCMSSWVersions_
|
|
1941
|
+
|
|
1942
|
+
Return a list of all CMSSW releases being used in this workload.
|
|
1943
|
+
"""
|
|
1944
|
+
versions = set()
|
|
1945
|
+
if initialTask:
|
|
1946
|
+
taskIterator = initialTask.childTaskIterator()
|
|
1947
|
+
else:
|
|
1948
|
+
taskIterator = self.taskIterator()
|
|
1949
|
+
|
|
1950
|
+
for task in taskIterator:
|
|
1951
|
+
for stepName in task.listAllStepNames():
|
|
1952
|
+
stepHelper = task.getStepHelper(stepName)
|
|
1953
|
+
if stepHelper.stepType() == "CMSSW":
|
|
1954
|
+
versions.add(stepHelper.getCMSSWVersion())
|
|
1955
|
+
versions.update(self.getCMSSWVersions(task))
|
|
1956
|
+
|
|
1957
|
+
return list(versions)
|
|
1958
|
+
|
|
1959
|
+
def generateWorkloadSummary(self):
|
|
1960
|
+
"""
|
|
1961
|
+
_generateWorkloadSummary_
|
|
1962
|
+
|
|
1963
|
+
Generates a dictionary with the following information:
|
|
1964
|
+
task paths
|
|
1965
|
+
ACDC
|
|
1966
|
+
input datasets
|
|
1967
|
+
output datasets
|
|
1968
|
+
|
|
1969
|
+
Intended for use in putting WMSpec info into couch
|
|
1970
|
+
"""
|
|
1971
|
+
summary = {'tasks': [],
|
|
1972
|
+
'ACDC': {"collection": None, "filesets": {}},
|
|
1973
|
+
'input': [],
|
|
1974
|
+
'output': [],
|
|
1975
|
+
'owner': {},
|
|
1976
|
+
}
|
|
1977
|
+
|
|
1978
|
+
summary['tasks'] = self.listAllTaskPathNames()
|
|
1979
|
+
summary['output'] = self.listOutputDatasets()
|
|
1980
|
+
summary['input'] = self.listInputDatasets()
|
|
1981
|
+
summary['owner'] = self.data.owner.dictionary_()
|
|
1982
|
+
summary['performance'] = {}
|
|
1983
|
+
for t in summary['tasks']:
|
|
1984
|
+
summary['performance'][t] = {}
|
|
1985
|
+
|
|
1986
|
+
return summary
|
|
1987
|
+
|
|
1988
|
+
def setupPerformanceMonitoring(self, softTimeout, gracePeriod):
|
|
1989
|
+
"""
|
|
1990
|
+
_setupPerformanceMonitoring_
|
|
1991
|
+
|
|
1992
|
+
Setups performance monitors for all tasks in the workflow
|
|
1993
|
+
"""
|
|
1994
|
+
for task in self.getAllTasks():
|
|
1995
|
+
task.setPerformanceMonitor(softTimeout=softTimeout, gracePeriod=gracePeriod)
|
|
1996
|
+
|
|
1997
|
+
return
|
|
1998
|
+
|
|
1999
|
+
def listAllCMSSWConfigCacheIDs(self):
|
|
2000
|
+
"""
|
|
2001
|
+
_listAllCMSSWConfigCacheIDs_
|
|
2002
|
+
|
|
2003
|
+
Go through each task and check to see if we have a configCacheID
|
|
2004
|
+
"""
|
|
2005
|
+
result = []
|
|
2006
|
+
for t in self.taskIterator():
|
|
2007
|
+
result.extend(t.getConfigCacheIDs())
|
|
2008
|
+
return result
|
|
2009
|
+
|
|
2010
|
+
def setTrustLocationFlag(self, inputFlag=False, pileupFlag=False):
|
|
2011
|
+
"""
|
|
2012
|
+
Set the input and the pileup flags in the top level tasks
|
|
2013
|
+
indicating that site lists should be used as location data
|
|
2014
|
+
|
|
2015
|
+
The input data flag has to be set only for top level tasks, otherwise
|
|
2016
|
+
it affects where secondary jobs are meant to run.
|
|
2017
|
+
The pileup flag has to be set for all the tasks in the workload.
|
|
2018
|
+
|
|
2019
|
+
Validate these parameters to make sure they are only set for workflows
|
|
2020
|
+
that require those type of input datasets (ACDCs are not validated)
|
|
2021
|
+
"""
|
|
2022
|
+
isACDCWorkflow = False
|
|
2023
|
+
for task in self.taskIterator():
|
|
2024
|
+
if task.getInputACDC():
|
|
2025
|
+
isACDCWorkflow = True
|
|
2026
|
+
|
|
2027
|
+
if inputFlag is True and isACDCWorkflow is False and not self.listInputDatasets():
|
|
2028
|
+
msg = "Setting TrustSitelists=True for workflows without input dataset is forbidden!"
|
|
2029
|
+
raise RuntimeError(msg)
|
|
2030
|
+
if pileupFlag is True and isACDCWorkflow is False and not self.listPileupDatasets():
|
|
2031
|
+
msg = "Setting TrustPUSitelists=True for workflows without pileup dataset is forbidden!"
|
|
2032
|
+
raise RuntimeError(msg)
|
|
2033
|
+
for task in self.getAllTasks(cpuOnly=True):
|
|
2034
|
+
if task.isTopOfTree():
|
|
2035
|
+
task.setTrustSitelists(inputFlag, pileupFlag)
|
|
2036
|
+
else:
|
|
2037
|
+
task.setTrustSitelists(False, pileupFlag)
|
|
2038
|
+
|
|
2039
|
+
return
|
|
2040
|
+
|
|
2041
|
+
def getTrustLocationFlag(self):
|
|
2042
|
+
"""
|
|
2043
|
+
_getTrustLocationFlag_
|
|
2044
|
+
|
|
2045
|
+
Get a tuple with the inputFlag and the pileupFlag values from
|
|
2046
|
+
the *top level* tasks that indicates whether the site lists should
|
|
2047
|
+
be trusted as the location for input and/or for the pileup data.
|
|
2048
|
+
"""
|
|
2049
|
+
for task in self.getTopLevelTask():
|
|
2050
|
+
return task.getTrustSitelists()
|
|
2051
|
+
return {'trustlists': False, 'trustPUlists': False}
|
|
2052
|
+
|
|
2053
|
+
def validateArgumentForAssignment(self, schema):
|
|
2054
|
+
specClass = loadSpecClassByType(self.getRequestType())
|
|
2055
|
+
argumentDefinition = specClass.getWorkloadAssignArgs()
|
|
2056
|
+
validateArgumentsUpdate(schema, argumentDefinition)
|
|
2057
|
+
return
|
|
2058
|
+
|
|
2059
|
+
def validateSiteListsUpdate(self, arguments):
|
|
2060
|
+
"""
|
|
2061
|
+
Validate a dictionary of workflow arguments for possible Site lists update
|
|
2062
|
+
It must catch any eventual conflict between the currently existing site lists
|
|
2063
|
+
and the ones provided by the user, presuming that the change would happen by
|
|
2064
|
+
fully substituting an already existing site list at the workflow if provided
|
|
2065
|
+
with the arguments here.
|
|
2066
|
+
"""
|
|
2067
|
+
# NOTE: We have 3 different use cases to validate for siteLists conflicts:
|
|
2068
|
+
# * A change to both SiteWhitelist and SiteBlacklist
|
|
2069
|
+
# * A change only to SiteWhitelist
|
|
2070
|
+
# * A change only to SiteBlacklist
|
|
2071
|
+
if "SiteWhitelist" in arguments and "SiteBlacklist" in arguments:
|
|
2072
|
+
validateSiteLists(arguments)
|
|
2073
|
+
return
|
|
2074
|
+
fullSiteWhitelist = set()
|
|
2075
|
+
fullSiteBlacklist = set()
|
|
2076
|
+
if "SiteWhitelist" in arguments and "SiteBlacklist" not in arguments:
|
|
2077
|
+
fullSiteWhitelist = set(makeList(arguments["SiteWhitelist"]))
|
|
2078
|
+
fullSiteBlacklist = set(self.getSiteBlacklist())
|
|
2079
|
+
if "SiteBlacklist" in arguments and "SiteWhitelist" not in arguments:
|
|
2080
|
+
fullSiteWhitelist = set(self.getSiteWhitelist())
|
|
2081
|
+
fullSiteBlacklist = set(makeList(arguments["SiteBlacklist"]))
|
|
2082
|
+
siteConflicts = fullSiteWhitelist & fullSiteBlacklist
|
|
2083
|
+
if siteConflicts:
|
|
2084
|
+
msg = "Validation of Site Lists for update failed due to conflicts with existing Site Lists. "
|
|
2085
|
+
msg += f"A site can only be black listed or whitelisted. Conflicting sites: {list(siteConflicts)}"
|
|
2086
|
+
raise WMSpecFactoryException(msg)
|
|
2087
|
+
return
|
|
2088
|
+
|
|
2089
|
+
def validateArgumentsPartialUpdate(self, arguments):
|
|
2090
|
+
"""
|
|
2091
|
+
Validates the provided parameters schema for workflow arguments update, using
|
|
2092
|
+
the arguments definitions for assignment as provided at StdBase.getWorkloadAssignArgs
|
|
2093
|
+
:param arguments: Workflow arguments schema to be validated - Must be a properly
|
|
2094
|
+
defined dictionary of {arg: value} pairs.
|
|
2095
|
+
:return: Nothing. Raises proper exceptions when argument validation fails
|
|
2096
|
+
|
|
2097
|
+
NOTE: In order to avoid full schema validation and enforcing mandatory arguments,
|
|
2098
|
+
we set the optionKey argument for this call to NONE. This way it is ignored
|
|
2099
|
+
during the next step of the validation process (namely at
|
|
2100
|
+
WMWorkloadTools._validateArgumentOptions), and all of the so provided
|
|
2101
|
+
arguments in the schema are considered optional, but nonetheless they
|
|
2102
|
+
still go through the full value validation process.
|
|
2103
|
+
"""
|
|
2104
|
+
specClass = loadSpecClassByType(self.getRequestType())
|
|
2105
|
+
argumentDefinition = specClass.getWorkloadAssignArgs()
|
|
2106
|
+
validateUnknownArgs(arguments, argumentDefinition)
|
|
2107
|
+
_validateArgumentOptions(arguments, argumentDefinition, optionKey=None)
|
|
2108
|
+
self.validateSiteListsUpdate(arguments)
|
|
2109
|
+
return
|
|
2110
|
+
|
|
2111
|
+
def updateArguments(self, kwargs):
|
|
2112
|
+
"""
|
|
2113
|
+
set up all the argument related to assigning request.
|
|
2114
|
+
args are validated before update.
|
|
2115
|
+
assignment is common for all different types spec.
|
|
2116
|
+
|
|
2117
|
+
Input data should have been validated already using
|
|
2118
|
+
validateArgumentForAssignment.
|
|
2119
|
+
"""
|
|
2120
|
+
specClass = loadSpecClassByType(self.getRequestType())
|
|
2121
|
+
argumentDefinition = specClass.getWorkloadAssignArgs()
|
|
2122
|
+
setAssignArgumentsWithDefault(kwargs, argumentDefinition)
|
|
2123
|
+
|
|
2124
|
+
if kwargs.get('RequestPriority') is not None and kwargs['RequestPriority'] != self.priority():
|
|
2125
|
+
self.setPriority(kwargs['RequestPriority'])
|
|
2126
|
+
else:
|
|
2127
|
+
# if it's the same, pop it out to avoid priority transition update
|
|
2128
|
+
kwargs.pop("RequestPriority", None)
|
|
2129
|
+
|
|
2130
|
+
self.setWorkloadOverrides(kwargs["Override"])
|
|
2131
|
+
self.setSiteWhitelist(kwargs["SiteWhitelist"])
|
|
2132
|
+
self.setSiteBlacklist(kwargs["SiteBlacklist"])
|
|
2133
|
+
self.setTrustLocationFlag(inputFlag=strToBool(kwargs["TrustSitelists"]),
|
|
2134
|
+
pileupFlag=strToBool(kwargs["TrustPUSitelists"]))
|
|
2135
|
+
|
|
2136
|
+
self.setMergeParameters(int(kwargs["MinMergeSize"]),
|
|
2137
|
+
int(kwargs["MaxMergeSize"]),
|
|
2138
|
+
int(kwargs["MaxMergeEvents"]))
|
|
2139
|
+
|
|
2140
|
+
# FIXME not validated
|
|
2141
|
+
if kwargs.get("MergedLFNBase") and kwargs.get("UnmergedLFNBase"):
|
|
2142
|
+
self.setLFNBase(kwargs["MergedLFNBase"], kwargs["UnmergedLFNBase"])
|
|
2143
|
+
# Set ProcessingVersion and AcquisitionEra, which could be json encoded dicts
|
|
2144
|
+
# it should be processed once LFNBase are set
|
|
2145
|
+
if kwargs.get("AcquisitionEra") is not None:
|
|
2146
|
+
self.setAcquisitionEra(kwargs["AcquisitionEra"])
|
|
2147
|
+
if kwargs.get("ProcessingString") is not None:
|
|
2148
|
+
self.setProcessingString(kwargs["ProcessingString"])
|
|
2149
|
+
if kwargs.get("ProcessingVersion") is not None:
|
|
2150
|
+
self.setProcessingVersion(kwargs["ProcessingVersion"])
|
|
2151
|
+
|
|
2152
|
+
self.setupPerformanceMonitoring(softTimeout=kwargs["SoftTimeout"],
|
|
2153
|
+
gracePeriod=kwargs["GracePeriod"])
|
|
2154
|
+
|
|
2155
|
+
# Check whether we should check location for the data
|
|
2156
|
+
self.setAllowOpportunistic(allowOpport=strToBool(kwargs["AllowOpportunistic"]))
|
|
2157
|
+
|
|
2158
|
+
# Block closing information
|
|
2159
|
+
self.setBlockCloseSettings(kwargs["BlockCloseMaxWaitTime"],
|
|
2160
|
+
kwargs["BlockCloseMaxFiles"],
|
|
2161
|
+
kwargs["BlockCloseMaxEvents"],
|
|
2162
|
+
kwargs["BlockCloseMaxSize"])
|
|
2163
|
+
|
|
2164
|
+
self.setDashboardActivity(kwargs["Dashboard"])
|
|
2165
|
+
|
|
2166
|
+
if kwargs.get("Memory") is not None:
|
|
2167
|
+
self.setMemory(kwargs.get("Memory"))
|
|
2168
|
+
if kwargs.get("Multicore") is not None:
|
|
2169
|
+
self.setCoresAndStreams(kwargs.get("Multicore"), kwargs.get("EventStreams"))
|
|
2170
|
+
|
|
2171
|
+
# MUST be set after AcqEra/ProcStr/ProcVer
|
|
2172
|
+
if self.getRequestType() == "StepChain":
|
|
2173
|
+
self.setStepProperties(kwargs)
|
|
2174
|
+
self.updateStepParentageMap()
|
|
2175
|
+
elif self.getRequestType() == "TaskChain":
|
|
2176
|
+
# TODO: need to define proper task form maybe kwargs['Tasks']?
|
|
2177
|
+
self.setTaskProperties(kwargs)
|
|
2178
|
+
self.updateTaskParentageMap()
|
|
2179
|
+
|
|
2180
|
+
# Since it lists the output datasets, it has to be done in the very end
|
|
2181
|
+
# Set phedex subscription information
|
|
2182
|
+
if kwargs.get("CustodialSites") or kwargs.get("NonCustodialSites"):
|
|
2183
|
+
self.setSubscriptionInformation(custodialSites=kwargs["CustodialSites"],
|
|
2184
|
+
nonCustodialSites=kwargs["NonCustodialSites"],
|
|
2185
|
+
priority=kwargs["SubscriptionPriority"],
|
|
2186
|
+
deleteFromSource=kwargs["DeleteFromSource"])
|
|
2187
|
+
|
|
2188
|
+
return
|
|
2189
|
+
|
|
2190
|
+
def loadSpecFromCouch(self, couchurl, requestName):
|
|
2191
|
+
"""
|
|
2192
|
+
This depends on PersitencyHelper.py saveCouch (That method better be decomposed)
|
|
2193
|
+
"""
|
|
2194
|
+
return self.load("%s/%s/spec" % (couchurl, requestName))
|
|
2195
|
+
|
|
2196
|
+
def setTaskPropertiesFromWorkload(self):
|
|
2197
|
+
"""
|
|
2198
|
+
set task properties inherits from workload properties
|
|
2199
|
+
This is need to be called at the end of the buildWorkload function
|
|
2200
|
+
after all the tasks are added.
|
|
2201
|
+
It sets acquisitionEra, processingVersion, processingString,
|
|
2202
|
+
since those values are needed to be set for all the tasks in the workload
|
|
2203
|
+
TODO: need to force to call this function after task is added instead of
|
|
2204
|
+
rely on coder's won't forget to call this at the end of
|
|
2205
|
+
self.buildWorkload()
|
|
2206
|
+
"""
|
|
2207
|
+
self.setAcquisitionEra(self.getAcquisitionEra())
|
|
2208
|
+
self.setProcessingVersion(self.getProcessingVersion())
|
|
2209
|
+
self.setProcessingString(self.getProcessingString())
|
|
2210
|
+
self.setLumiList(self.getLumiList())
|
|
2211
|
+
self.setPrepID(self.getPrepID())
|
|
2212
|
+
return
|
|
2213
|
+
|
|
2214
|
+
|
|
2215
|
+
class WMWorkload(ConfigSection):
|
|
2216
|
+
"""
|
|
2217
|
+
_WMWorkload_
|
|
2218
|
+
|
|
2219
|
+
Request container
|
|
2220
|
+
|
|
2221
|
+
"""
|
|
2222
|
+
|
|
2223
|
+
def __init__(self, name="test"):
|
|
2224
|
+
ConfigSection.__init__(self, name)
|
|
2225
|
+
self.objectType = self.__class__.__name__
|
|
2226
|
+
# //persistent data
|
|
2227
|
+
# //
|
|
2228
|
+
# //
|
|
2229
|
+
self.section_("persistency")
|
|
2230
|
+
self.persistency.specUrl = None
|
|
2231
|
+
# //
|
|
2232
|
+
# // request related information
|
|
2233
|
+
# //
|
|
2234
|
+
self.section_("request")
|
|
2235
|
+
self.request.priority = None # what should be the default value
|
|
2236
|
+
self.request.status = None
|
|
2237
|
+
# //
|
|
2238
|
+
# // owner related information
|
|
2239
|
+
# //
|
|
2240
|
+
self.section_("owner")
|
|
2241
|
+
|
|
2242
|
+
# //
|
|
2243
|
+
# // Policies applied to this workload by the processing system
|
|
2244
|
+
# //
|
|
2245
|
+
self.section_("policies")
|
|
2246
|
+
self.policies.section_("start")
|
|
2247
|
+
self.policies.section_("end")
|
|
2248
|
+
self.policies.start.policyName = None
|
|
2249
|
+
self.policies.end.policyName = None
|
|
2250
|
+
|
|
2251
|
+
# //
|
|
2252
|
+
# // properties of the Workload and all tasks there-in
|
|
2253
|
+
# //
|
|
2254
|
+
self.section_("properties")
|
|
2255
|
+
self.properties.unmergedLFNBase = "/store/unmerged"
|
|
2256
|
+
self.properties.mergedLFNBase = "/store/data"
|
|
2257
|
+
self.properties.dashboardActivity = None
|
|
2258
|
+
self.properties.blockCloseMaxWaitTime = 66400
|
|
2259
|
+
self.properties.blockCloseMaxSize = 5000000000000
|
|
2260
|
+
self.properties.blockCloseMaxFiles = 500
|
|
2261
|
+
self.properties.blockCloseMaxEvents = 250000000
|
|
2262
|
+
self.properties.prepID = None
|
|
2263
|
+
|
|
2264
|
+
# Overrides for this workload
|
|
2265
|
+
self.section_("overrides")
|
|
2266
|
+
|
|
2267
|
+
# //
|
|
2268
|
+
# // tasks
|
|
2269
|
+
# //
|
|
2270
|
+
self.section_("tasks")
|
|
2271
|
+
self.tasks.tasklist = []
|
|
2272
|
+
|
|
2273
|
+
# workload spec type
|
|
2274
|
+
self.requestType = ""
|
|
2275
|
+
self.dbsUrl = None
|
|
2276
|
+
|
|
2277
|
+
self.sandbox = None
|
|
2278
|
+
self.initialJobCount = 0
|
|
2279
|
+
|
|
2280
|
+
|
|
2281
|
+
def newWorkload(workloadName):
|
|
2282
|
+
"""
|
|
2283
|
+
_newWorkload_
|
|
2284
|
+
|
|
2285
|
+
Util method to create a new WMWorkload and wrap it in a helper
|
|
2286
|
+
|
|
2287
|
+
"""
|
|
2288
|
+
return WMWorkloadHelper(WMWorkload(workloadName))
|