snakemake-executor-plugin-slurm 1.1.0__py3-none-any.whl → 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of snakemake-executor-plugin-slurm might be problematic. Click here for more details.

@@ -26,9 +26,9 @@ from snakemake_interface_executor_plugins.jobs import (
26
26
  JobExecutorInterface,
27
27
  )
28
28
  from snakemake_interface_common.exceptions import WorkflowError
29
- from snakemake_executor_plugin_slurm_jobstep import get_cpu_setting
30
29
 
31
30
  from .utils import delete_slurm_environment, delete_empty_dirs, set_gres_string
31
+ from .submit_string import get_submit_command
32
32
 
33
33
 
34
34
  @dataclass
@@ -135,9 +135,10 @@ common_settings = CommonSettings(
135
135
  # Required:
136
136
  # Implementation of your executor
137
137
  class Executor(RemoteExecutor):
138
- def __post_init__(self):
138
+ def __post_init__(self, test_mode: bool = False):
139
139
  # run check whether we are running in a SLURM job context
140
140
  self.warn_on_jobcontext()
141
+ self.test_mode = test_mode
141
142
  self.run_uuid = str(uuid.uuid4())
142
143
  self.logger.info(f"SLURM run ID: {self.run_uuid}")
143
144
  self._fallback_account_arg = None
@@ -225,31 +226,28 @@ class Executor(RemoteExecutor):
225
226
  comment_str = f"rule_{job.name}"
226
227
  else:
227
228
  comment_str = f"rule_{job.name}_wildcards_{wildcard_str}"
228
- call = (
229
- f"sbatch "
230
- f"--parsable "
231
- f"--job-name {self.run_uuid} "
232
- f"--output '{slurm_logfile}' "
233
- f"--export=ALL "
234
- f"--comment '{comment_str}'"
235
- )
229
+ # check whether the 'slurm_extra' parameter is used correctly
230
+ # prior to putatively setting in the sbatch call
231
+ if job.resources.get("slurm_extra"):
232
+ self.check_slurm_extra(job)
236
233
 
237
- if not self.workflow.executor_settings.no_account:
238
- call += self.get_account_arg(job)
234
+ job_params = {
235
+ "run_uuid": self.run_uuid,
236
+ "slurm_logfile": slurm_logfile,
237
+ "comment_str": comment_str,
238
+ "account": self.get_account_arg(job),
239
+ "partition": self.get_partition_arg(job),
240
+ "workdir": self.workflow.workdir_init,
241
+ }
239
242
 
240
- call += self.get_partition_arg(job)
243
+ call = get_submit_command(job, job_params)
241
244
 
242
245
  if self.workflow.executor_settings.requeue:
243
246
  call += " --requeue"
244
247
 
245
248
  call += set_gres_string(job)
246
249
 
247
- if job.resources.get("clusters"):
248
- call += f" --clusters {job.resources.clusters}"
249
-
250
- if job.resources.get("runtime"):
251
- call += f" -t {job.resources.runtime}"
252
- else:
250
+ if not job.resources.get("runtime"):
253
251
  self.logger.warning(
254
252
  "No wall time information given. This might or might not "
255
253
  "work on your cluster. "
@@ -257,28 +255,12 @@ class Executor(RemoteExecutor):
257
255
  "default via --default-resources."
258
256
  )
259
257
 
260
- if job.resources.get("constraint"):
261
- call += f" -C '{job.resources.constraint}'"
262
- if job.resources.get("mem_mb_per_cpu"):
263
- call += f" --mem-per-cpu {job.resources.mem_mb_per_cpu}"
264
- elif job.resources.get("mem_mb"):
265
- call += f" --mem {job.resources.mem_mb}"
266
- else:
258
+ if not job.resources.get("mem_mb_per_cpu") and not job.resources.get("mem_mb"):
267
259
  self.logger.warning(
268
260
  "No job memory information ('mem_mb' or 'mem_mb_per_cpu') is given "
269
261
  "- submitting without. This might or might not work on your cluster."
270
262
  )
271
263
 
272
- if job.resources.get("nodes", False):
273
- call += f" --nodes={job.resources.get('nodes', 1)}"
274
-
275
- # fixes #40 - set ntasks regardless of mpi, because
276
- # SLURM v22.05 will require it for all jobs
277
- gpu_job = job.resources.get("gpu") or "gpu" in job.resources.get("gres", "")
278
- if gpu_job:
279
- call += f" --ntasks-per-gpu={job.resources.get('tasks', 1)}"
280
- else:
281
- call += f" --ntasks={job.resources.get('tasks', 1)}"
282
264
  # MPI job
283
265
  if job.resources.get("mpi", False):
284
266
  if not job.resources.get("tasks_per_node") and not job.resources.get(
@@ -290,19 +272,8 @@ class Executor(RemoteExecutor):
290
272
  "Probably not what you want."
291
273
  )
292
274
 
293
- # we need to set cpus-per-task OR cpus-per-gpu, the function
294
- # will return a string with the corresponding value
295
- call += f" {get_cpu_setting(job, gpu_job)}"
296
- if job.resources.get("slurm_extra"):
297
- self.check_slurm_extra(job)
298
- call += f" {job.resources.slurm_extra}"
299
-
300
275
  exec_job = self.format_job_exec(job)
301
276
 
302
- # ensure that workdir is set correctly
303
- # use short argument as this is the same in all slurm versions
304
- # (see https://github.com/snakemake/snakemake/issues/2014)
305
- call += f" -D {self.workflow.workdir_init}"
306
277
  # and finally the job to execute with all the snakemake parameters
307
278
  call += f' --wrap="{exec_job}"'
308
279
 
@@ -712,7 +683,7 @@ We leave it to SLURM to resume your job(s)"""
712
683
  )
713
684
  return ""
714
685
 
715
- if account not in accounts:
686
+ if account.lower() not in accounts:
716
687
  raise WorkflowError(
717
688
  f"The given account {account} appears to be invalid. Available "
718
689
  f"accounts:\n{', '.join(accounts)}"
@@ -0,0 +1,72 @@
1
+ from snakemake_executor_plugin_slurm_jobstep import get_cpu_setting
2
+ from types import SimpleNamespace
3
+
4
+
5
+ def get_submit_command(job, params):
6
+ """
7
+ Return the submit command for the job.
8
+ """
9
+ # Convert params dict to a SimpleNamespace for attribute-style access
10
+ params = SimpleNamespace(**params)
11
+
12
+ call = (
13
+ f"sbatch "
14
+ f"--parsable "
15
+ f"--job-name {params.run_uuid} "
16
+ f'--output "{params.slurm_logfile}" '
17
+ f"--export=ALL "
18
+ f'--comment "{params.comment_str}"'
19
+ )
20
+
21
+ # No accout or partition checking is required, here.
22
+ # Checking is done in the submit function.
23
+
24
+ # here, only the string is used, as it already contains
25
+ # '-A {account_name}'
26
+ call += f" {params.account}"
27
+ # here, only the string is used, as it already contains
28
+ # '- p {partition_name}'
29
+ call += f" {params.partition}"
30
+
31
+ if job.resources.get("clusters"):
32
+ call += f" --clusters {job.resources.clusters}"
33
+
34
+ if job.resources.get("runtime"):
35
+ call += f" -t {job.resources.runtime}"
36
+
37
+ if job.resources.get("constraint") or isinstance(
38
+ job.resources.get("constraint"), str
39
+ ):
40
+ call += f" -C '{job.resources.get('constraint')}'"
41
+
42
+ if job.resources.get("qos") or isinstance(job.resources.get("qos"), str):
43
+ call += f" --qos='{job.resources.qos}'"
44
+
45
+ if job.resources.get("mem_mb_per_cpu"):
46
+ call += f" --mem-per-cpu {job.resources.mem_mb_per_cpu}"
47
+ elif job.resources.get("mem_mb"):
48
+ call += f" --mem {job.resources.mem_mb}"
49
+
50
+ if job.resources.get("nodes", False):
51
+ call += f" --nodes={job.resources.get('nodes', 1)}"
52
+
53
+ # fixes #40 - set ntasks regardless of mpi, because
54
+ # SLURM v22.05 will require it for all jobs
55
+ gpu_job = job.resources.get("gpu") or "gpu" in job.resources.get("gres", "")
56
+ if gpu_job:
57
+ call += f" --ntasks-per-gpu={job.resources.get('tasks', 1)}"
58
+ else:
59
+ call += f" --ntasks={job.resources.get('tasks', 1)}"
60
+
61
+ # we need to set cpus-per-task OR cpus-per-gpu, the function
62
+ # will return a string with the corresponding value
63
+ call += f" {get_cpu_setting(job, gpu_job)}"
64
+ if job.resources.get("slurm_extra"):
65
+ call += f" {job.resources.slurm_extra}"
66
+
67
+ # ensure that workdir is set correctly
68
+ # use short argument as this is the same in all slurm versions
69
+ # (see https://github.com/snakemake/snakemake/issues/2014)
70
+ call += f" -D '{params.workdir}'"
71
+
72
+ return call
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: snakemake-executor-plugin-slurm
3
- Version: 1.1.0
3
+ Version: 1.2.0
4
4
  Summary: A Snakemake executor plugin for submitting jobs to a SLURM cluster.
5
5
  License: MIT
6
6
  Keywords: snakemake,plugin,executor,cluster,slurm
@@ -0,0 +1,7 @@
1
+ snakemake_executor_plugin_slurm/__init__.py,sha256=EqMKNkKYVFeDfw2pwCnFKYxgKOGJazlDm658wvFvQN0,30942
2
+ snakemake_executor_plugin_slurm/submit_string.py,sha256=sXzMm5SVNQ4upIOcsIZjUqj7khnG-lieo5yJSSus5sc,2483
3
+ snakemake_executor_plugin_slurm/utils.py,sha256=ZzXiXFDVLs15PLJnDP0eq98fNCtzlLbhtT03ec8Ou34,3578
4
+ snakemake_executor_plugin_slurm-1.2.0.dist-info/LICENSE,sha256=YVc4xTLWMqGfFL36120k7rzXtsT6e4RkJsh68VVn12s,1076
5
+ snakemake_executor_plugin_slurm-1.2.0.dist-info/METADATA,sha256=rOpk-4_-aw3w-2X0POSy6rAvFZnPfzArN6MT9CuUxwA,1360
6
+ snakemake_executor_plugin_slurm-1.2.0.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
7
+ snakemake_executor_plugin_slurm-1.2.0.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 2.1.1
2
+ Generator: poetry-core 2.1.2
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
@@ -1,6 +0,0 @@
1
- snakemake_executor_plugin_slurm/__init__.py,sha256=yNz8JRS5jDcY4Jyr16Fvk5afGMDyYAEHuoackPcK-MI,32142
2
- snakemake_executor_plugin_slurm/utils.py,sha256=ZzXiXFDVLs15PLJnDP0eq98fNCtzlLbhtT03ec8Ou34,3578
3
- snakemake_executor_plugin_slurm-1.1.0.dist-info/LICENSE,sha256=YVc4xTLWMqGfFL36120k7rzXtsT6e4RkJsh68VVn12s,1076
4
- snakemake_executor_plugin_slurm-1.1.0.dist-info/METADATA,sha256=0h-JOJUxaORswgyMb18PpTcvGlI1lrpUUSXR9h8kBWk,1360
5
- snakemake_executor_plugin_slurm-1.1.0.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
6
- snakemake_executor_plugin_slurm-1.1.0.dist-info/RECORD,,