snakemake-executor-plugin-slurm 0.1.4__tar.gz → 0.2.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of snakemake-executor-plugin-slurm might be problematic. Click here for more details.
- {snakemake_executor_plugin_slurm-0.1.4 → snakemake_executor_plugin_slurm-0.2.0}/PKG-INFO +1 -1
- {snakemake_executor_plugin_slurm-0.1.4 → snakemake_executor_plugin_slurm-0.2.0}/pyproject.toml +1 -1
- {snakemake_executor_plugin_slurm-0.1.4 → snakemake_executor_plugin_slurm-0.2.0}/snakemake_executor_plugin_slurm/__init__.py +11 -5
- {snakemake_executor_plugin_slurm-0.1.4 → snakemake_executor_plugin_slurm-0.2.0}/LICENSE +0 -0
- {snakemake_executor_plugin_slurm-0.1.4 → snakemake_executor_plugin_slurm-0.2.0}/README.md +0 -0
|
@@ -68,8 +68,6 @@ class Executor(RemoteExecutor):
|
|
|
68
68
|
# with job_info being of type
|
|
69
69
|
# snakemake_interface_executor_plugins.executors.base.SubmittedJobInfo.
|
|
70
70
|
|
|
71
|
-
jobid = job.jobid
|
|
72
|
-
|
|
73
71
|
log_folder = f"group_{job.name}" if job.is_group() else f"rule_{job.name}"
|
|
74
72
|
|
|
75
73
|
slurm_logfile = os.path.abspath(f".snakemake/slurm_logs/{log_folder}/%j.log")
|
|
@@ -78,7 +76,10 @@ class Executor(RemoteExecutor):
|
|
|
78
76
|
# generic part of a submission string:
|
|
79
77
|
# we use a run_uuid as the job-name, to allow `--name`-based
|
|
80
78
|
# filtering in the job status checks (`sacct --name` and `squeue --name`)
|
|
81
|
-
call =
|
|
79
|
+
call = (
|
|
80
|
+
f"sbatch --job-name {self.run_uuid} --output {slurm_logfile} --export=ALL "
|
|
81
|
+
f"--comment {job.name}"
|
|
82
|
+
)
|
|
82
83
|
|
|
83
84
|
call += self.get_account_arg(job)
|
|
84
85
|
call += self.get_partition_arg(job)
|
|
@@ -149,7 +150,7 @@ class Executor(RemoteExecutor):
|
|
|
149
150
|
slurm_jobid = out.split(" ")[-1]
|
|
150
151
|
slurm_logfile = slurm_logfile.replace("%j", slurm_jobid)
|
|
151
152
|
self.logger.info(
|
|
152
|
-
f"Job {jobid} has been submitted with SLURM jobid {slurm_jobid} "
|
|
153
|
+
f"Job {job.jobid} has been submitted with SLURM jobid {slurm_jobid} "
|
|
153
154
|
f"(log: {slurm_logfile})."
|
|
154
155
|
)
|
|
155
156
|
self.report_job_submission(
|
|
@@ -209,7 +210,7 @@ class Executor(RemoteExecutor):
|
|
|
209
210
|
(status_of_jobs, sacct_query_duration) = await self.job_stati(
|
|
210
211
|
# -X: only show main job, no substeps
|
|
211
212
|
f"sacct -X --parsable2 --noheader --format=JobIdRaw,State "
|
|
212
|
-
f"--name {self.run_uuid}"
|
|
213
|
+
f"--starttime now-2days --endtime now --name {self.run_uuid}"
|
|
213
214
|
)
|
|
214
215
|
if status_of_jobs is None and sacct_query_duration is None:
|
|
215
216
|
self.logger.debug(f"could not check status of job {self.run_uuid}")
|
|
@@ -220,6 +221,10 @@ class Executor(RemoteExecutor):
|
|
|
220
221
|
active_jobs_ids_with_current_sacct_status = (
|
|
221
222
|
set(status_of_jobs.keys()) & active_jobs_ids
|
|
222
223
|
)
|
|
224
|
+
self.logger.debug(
|
|
225
|
+
f"active_jobs_ids_with_current_sacct_status are: "
|
|
226
|
+
f"{active_jobs_ids_with_current_sacct_status}"
|
|
227
|
+
)
|
|
223
228
|
active_jobs_seen_by_sacct = (
|
|
224
229
|
active_jobs_seen_by_sacct
|
|
225
230
|
| active_jobs_ids_with_current_sacct_status
|
|
@@ -231,6 +236,7 @@ class Executor(RemoteExecutor):
|
|
|
231
236
|
active_jobs_seen_by_sacct
|
|
232
237
|
- active_jobs_ids_with_current_sacct_status
|
|
233
238
|
)
|
|
239
|
+
self.logger.debug(f"missing_sacct_status are: {missing_sacct_status}")
|
|
234
240
|
if not missing_sacct_status:
|
|
235
241
|
break
|
|
236
242
|
if i >= status_attempts - 1:
|
|
File without changes
|
|
File without changes
|