lsst-ctrl-execute 29.0.0rc3__py3-none-any.whl → 29.2025.1400__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lsst/ctrl/execute/allocator.py +28 -2
- lsst/ctrl/execute/allocatorParser.py +17 -0
- lsst/ctrl/execute/condorConfig.py +1 -0
- lsst/ctrl/execute/slurmPlugin.py +14 -0
- {lsst_ctrl_execute-29.0.0rc3.dist-info → lsst_ctrl_execute-29.2025.1400.dist-info}/METADATA +1 -1
- {lsst_ctrl_execute-29.0.0rc3.dist-info → lsst_ctrl_execute-29.2025.1400.dist-info}/RECORD +8 -8
- {lsst_ctrl_execute-29.0.0rc3.dist-info → lsst_ctrl_execute-29.2025.1400.dist-info}/WHEEL +0 -0
- {lsst_ctrl_execute-29.0.0rc3.dist-info → lsst_ctrl_execute-29.2025.1400.dist-info}/entry_points.txt +0 -0
lsst/ctrl/execute/allocator.py
CHANGED
|
@@ -108,12 +108,16 @@ class Allocator:
|
|
|
108
108
|
self.commandLineDefaults["NODE_COUNT"] = self.opts.nodeCount
|
|
109
109
|
self.commandLineDefaults["COLLECTOR"] = self.opts.collector
|
|
110
110
|
self.commandLineDefaults["CPORT"] = self.opts.collectorport
|
|
111
|
-
|
|
111
|
+
if self.opts.exclusive:
|
|
112
|
+
self.commandLineDefaults["CPUS"] = self.configuration.platform.peakcpus
|
|
113
|
+
else:
|
|
114
|
+
self.commandLineDefaults["CPUS"] = self.opts.cpus
|
|
112
115
|
self.commandLineDefaults["WALL_CLOCK"] = self.opts.maximumWallClock
|
|
113
116
|
self.commandLineDefaults["ACCOUNT"] = self.opts.account
|
|
114
117
|
self.commandLineDefaults["MEMPERCORE"] = 4096
|
|
115
118
|
self.commandLineDefaults["ALLOWEDAUTO"] = 500
|
|
116
119
|
self.commandLineDefaults["AUTOCPUS"] = 16
|
|
120
|
+
self.commandLineDefaults["MINAUTOCPUS"] = 15
|
|
117
121
|
self.commandLineDefaults["QUEUE"] = self.opts.queue
|
|
118
122
|
self.load()
|
|
119
123
|
|
|
@@ -346,7 +350,17 @@ class Allocator:
|
|
|
346
350
|
"""Size of standard glideins for allocateNodes auto
|
|
347
351
|
@return the value of autoCPUs
|
|
348
352
|
"""
|
|
349
|
-
|
|
353
|
+
if self.getParameter("EXCLUSIVE"):
|
|
354
|
+
peakcpus = self.configuration.platform.peakcpus
|
|
355
|
+
return peakcpus
|
|
356
|
+
else:
|
|
357
|
+
return self.getParameter("AUTOCPUS")
|
|
358
|
+
|
|
359
|
+
def getMinAutoCPUs(self):
|
|
360
|
+
"""Minimum Size of standard glideins for allocateNodes auto
|
|
361
|
+
@return the value of minAutoCPUs
|
|
362
|
+
"""
|
|
363
|
+
return self.getParameter("MINAUTOCPUS")
|
|
350
364
|
|
|
351
365
|
def getWallClock(self):
|
|
352
366
|
"""Accessor for WALL_CLOCK
|
|
@@ -366,6 +380,18 @@ class Allocator:
|
|
|
366
380
|
"""
|
|
367
381
|
return self.getParameter("RESERVATION")
|
|
368
382
|
|
|
383
|
+
def getExclusive(self):
|
|
384
|
+
"""Accessor for EXCLUSIVE
|
|
385
|
+
@return the value of EXCLUSIVE
|
|
386
|
+
"""
|
|
387
|
+
return self.getParameter("EXCLUSIVE")
|
|
388
|
+
|
|
389
|
+
def getExcluser(self):
|
|
390
|
+
"""Accessor for EXCLUSER
|
|
391
|
+
@return the value of EXCLUSER
|
|
392
|
+
"""
|
|
393
|
+
return self.getParameter("EXCLUSER")
|
|
394
|
+
|
|
369
395
|
def getParameter(self, value):
|
|
370
396
|
"""Accessor for generic value
|
|
371
397
|
@return None if value is not set. Otherwise, use the command line
|
|
@@ -178,6 +178,23 @@ class AllocatorParser:
|
|
|
178
178
|
dest="packnodes",
|
|
179
179
|
help="encourage nodes to pack jobs rather than spread",
|
|
180
180
|
)
|
|
181
|
+
parser.add_argument(
|
|
182
|
+
"--exclusive",
|
|
183
|
+
action="store_true",
|
|
184
|
+
dest="exclusive",
|
|
185
|
+
default=None,
|
|
186
|
+
help="glidein will be an exclusive batch job; the glidein will be "
|
|
187
|
+
"the only job on the node, and have all available cores, memory."
|
|
188
|
+
"Settings for the number of cores -c are ignored, overridden.",
|
|
189
|
+
)
|
|
190
|
+
parser.add_argument(
|
|
191
|
+
"--exclusive-user",
|
|
192
|
+
action="store_true",
|
|
193
|
+
dest="exclusiveUser",
|
|
194
|
+
default=None,
|
|
195
|
+
help="glidein will be an exclusive to the user batch job; only "
|
|
196
|
+
" other jobs of the same user will share the node with the glidein",
|
|
197
|
+
)
|
|
181
198
|
parser.add_argument("-v", "--verbose", action="store_true", dest="verbose", help="verbose")
|
|
182
199
|
parser.add_argument(
|
|
183
200
|
"-r",
|
|
@@ -40,6 +40,7 @@ class PlatformConfig(pexConfig.Config):
|
|
|
40
40
|
eupsPath = pexConfig.Field(doc="location of remote EUPS stack", dtype=str, default=None)
|
|
41
41
|
nodeSetRequired = pexConfig.Field(doc="is the nodeset required", dtype=bool, default=False)
|
|
42
42
|
scheduler = pexConfig.Field(doc="scheduler type", dtype=str, default=None)
|
|
43
|
+
peakcpus = pexConfig.Field(doc="peakcpus", dtype=int, default=None)
|
|
43
44
|
manager = pexConfig.Field(doc="workflow manager", dtype=str, default=None)
|
|
44
45
|
setup_using = pexConfig.Field(doc="environment setup type", dtype=str, default=None)
|
|
45
46
|
manager_software_home = pexConfig.Field(
|
lsst/ctrl/execute/slurmPlugin.py
CHANGED
|
@@ -214,6 +214,16 @@ class SlurmPlugin(Allocator):
|
|
|
214
214
|
else:
|
|
215
215
|
self.defaults["RESERVATION"] = ""
|
|
216
216
|
|
|
217
|
+
if self.opts.exclusive is not None:
|
|
218
|
+
self.defaults["EXCLUSIVE"] = "#SBATCH --exclusive"
|
|
219
|
+
else:
|
|
220
|
+
self.defaults["EXCLUSIVE"] = ""
|
|
221
|
+
|
|
222
|
+
if self.opts.exclusiveUser is not None:
|
|
223
|
+
self.defaults["EXCLUSER"] = "#SBATCH --exclusive=user"
|
|
224
|
+
else:
|
|
225
|
+
self.defaults["EXCLUSER"] = ""
|
|
226
|
+
|
|
217
227
|
if self.opts.qos:
|
|
218
228
|
self.defaults["QOS"] = f"#SBATCH --qos {self.opts.qos}"
|
|
219
229
|
else:
|
|
@@ -281,7 +291,11 @@ class SlurmPlugin(Allocator):
|
|
|
281
291
|
"""Determine and submit the glideins needed from job pressure."""
|
|
282
292
|
|
|
283
293
|
verbose = self.isVerbose()
|
|
294
|
+
cpus = self.getCPUs()
|
|
284
295
|
autoCPUs = self.getAutoCPUs()
|
|
296
|
+
minAutoCPUs = self.getMinAutoCPUs()
|
|
297
|
+
if cpus >= minAutoCPUs:
|
|
298
|
+
autoCPUs = cpus
|
|
285
299
|
memoryPerCore = self.getMemoryPerCore()
|
|
286
300
|
memoryLimit = autoCPUs * memoryPerCore
|
|
287
301
|
auser = self.getUserName()
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: lsst-ctrl-execute
|
|
3
|
-
Version: 29.
|
|
3
|
+
Version: 29.2025.1400
|
|
4
4
|
Summary: Utilities for executing and managing workloads.
|
|
5
5
|
Project-URL: Homepage, https://github.com/lsst/ctrl_execute
|
|
6
6
|
Author-email: Rubin Observatory Data Management <dm-admin@lists.lsst.org>
|
|
@@ -2,9 +2,9 @@ lsst/__init__.py,sha256=_2bZAHuDVAx7MM7KA7pt3DYp641NY4RzSoRAwesWKfU,67
|
|
|
2
2
|
lsst/ctrl/__init__.py,sha256=_2bZAHuDVAx7MM7KA7pt3DYp641NY4RzSoRAwesWKfU,67
|
|
3
3
|
lsst/ctrl/execute/__init__.py,sha256=3tOhxyhnHbwyLLGKmL13A_TGqKjNErHjJybplX9mPzw,1019
|
|
4
4
|
lsst/ctrl/execute/allocationConfig.py,sha256=TxC6pPIsR1GBfGIU8_R4dm4OCieIdZDBFOYLak1ItAc,2369
|
|
5
|
-
lsst/ctrl/execute/allocator.py,sha256=
|
|
6
|
-
lsst/ctrl/execute/allocatorParser.py,sha256=
|
|
7
|
-
lsst/ctrl/execute/condorConfig.py,sha256=
|
|
5
|
+
lsst/ctrl/execute/allocator.py,sha256=ZibFXk-uKwK2vvW8AY5WCiwiXHpiyMW8Z6R3QtAQNCs,17211
|
|
6
|
+
lsst/ctrl/execute/allocatorParser.py,sha256=vSjPPY27pNoS0-uHC91MZ24MU9ed24R2m8hkKghMPY4,7411
|
|
7
|
+
lsst/ctrl/execute/condorConfig.py,sha256=w-KV9ulxCnz1o17Jb116TW5d7l8L-5w9RE5BwA6kULY,2624
|
|
8
8
|
lsst/ctrl/execute/condorInfoConfig.py,sha256=pHNQCAn8J4F789GLuEIUShZEPEbAwO5Fac-jMreM5NA,2045
|
|
9
9
|
lsst/ctrl/execute/envString.py,sha256=lAi4me41NhEhQl9XvPtvykPJvYeDkibkIv338u14OoY,1608
|
|
10
10
|
lsst/ctrl/execute/findPackageFile.py,sha256=rADUyy8KEac2sQX-z0cDSBVOvvJkJSd0eTpcNP_kO1Y,3608
|
|
@@ -12,13 +12,13 @@ lsst/ctrl/execute/namedClassFactory.py,sha256=UzQ7MLKAkNK0XnmofF4oYz1kADuLbdDdMH
|
|
|
12
12
|
lsst/ctrl/execute/pbsPlugin.py,sha256=DMmHvHvMWfzbMBxA_TD1LG0FEaelun_djgoK7vwh9ps,3831
|
|
13
13
|
lsst/ctrl/execute/qCommand.py,sha256=XaLmJydjbtaS5Edk4yi6re92z6xsQNFPuU7izq_UGNc,2374
|
|
14
14
|
lsst/ctrl/execute/seqFile.py,sha256=L-FNE5GB10DuSHyYeI6lJvH243_fGYQnQ55TX8rGMYo,1980
|
|
15
|
-
lsst/ctrl/execute/slurmPlugin.py,sha256=
|
|
15
|
+
lsst/ctrl/execute/slurmPlugin.py,sha256=BN99DB8iKB2OG9GW0llaRyp7ocOXSf0mn62vgJfZWNk,20190
|
|
16
16
|
lsst/ctrl/execute/templateWriter.py,sha256=koqquE_2-z7sue7H_iHv1cpjbjuIxdTyluLsQB0qTgM,1929
|
|
17
17
|
lsst/ctrl/execute/libexec/allocateNodes.py,sha256=GCJdWY3j1E3_im3NpO08VK2q_v6AHfEYsuaO20eI8zE,2863
|
|
18
18
|
lsst/ctrl/execute/libexec/dagIdInfo.py,sha256=U3jfrtkw2lw25QWRlLK4XuL_L29FqNm0D5Fy_F4ZJG4,1834
|
|
19
19
|
lsst/ctrl/execute/libexec/qdelete.py,sha256=-YD1LnsKiePWXZhImoixx-Obx4GqN21m00MEgHSmnGc,1231
|
|
20
20
|
lsst/ctrl/execute/libexec/qstatus.py,sha256=ds9JgLWFQ3nXHyXAPhXjdtSuiLvxxm1_YajH6I5xwcc,1572
|
|
21
|
-
lsst_ctrl_execute-29.
|
|
22
|
-
lsst_ctrl_execute-29.
|
|
23
|
-
lsst_ctrl_execute-29.
|
|
24
|
-
lsst_ctrl_execute-29.
|
|
21
|
+
lsst_ctrl_execute-29.2025.1400.dist-info/METADATA,sha256=MeZMCjerAW-pOpZRfQii18r08_FG0XT0JhiiSQuBzHU,857
|
|
22
|
+
lsst_ctrl_execute-29.2025.1400.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
23
|
+
lsst_ctrl_execute-29.2025.1400.dist-info/entry_points.txt,sha256=pNyRrmyX0WVZfYRSfmuhm1vxOqk2np_7SQWxQ5DgB1A,242
|
|
24
|
+
lsst_ctrl_execute-29.2025.1400.dist-info/RECORD,,
|
|
File without changes
|
{lsst_ctrl_execute-29.0.0rc3.dist-info → lsst_ctrl_execute-29.2025.1400.dist-info}/entry_points.txt
RENAMED
|
File without changes
|