brainforge-cli 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- brainforge_cli-0.1.0/PKG-INFO +14 -0
- brainforge_cli-0.1.0/README.md +42 -0
- brainforge_cli-0.1.0/brainforge/__init__.py +5 -0
- brainforge_cli-0.1.0/brainforge/auth.py +76 -0
- brainforge_cli-0.1.0/brainforge/cli.py +168 -0
- brainforge_cli-0.1.0/brainforge/job_logs.py +84 -0
- brainforge_cli-0.1.0/brainforge/job_status.py +88 -0
- brainforge_cli-0.1.0/brainforge/run.py +277 -0
- brainforge_cli-0.1.0/brainforge/template.py +72 -0
- brainforge_cli-0.1.0/brainforge_cli.egg-info/PKG-INFO +14 -0
- brainforge_cli-0.1.0/brainforge_cli.egg-info/SOURCES.txt +15 -0
- brainforge_cli-0.1.0/brainforge_cli.egg-info/dependency_links.txt +1 -0
- brainforge_cli-0.1.0/brainforge_cli.egg-info/entry_points.txt +3 -0
- brainforge_cli-0.1.0/brainforge_cli.egg-info/requires.txt +4 -0
- brainforge_cli-0.1.0/brainforge_cli.egg-info/top_level.txt +1 -0
- brainforge_cli-0.1.0/setup.cfg +4 -0
- brainforge_cli-0.1.0/setup.py +23 -0
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: brainforge-cli
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Command-line interface for Brainforge
|
|
5
|
+
Author: Daniel Trinh
|
|
6
|
+
Requires-Python: >=3.10
|
|
7
|
+
Requires-Dist: requests
|
|
8
|
+
Requires-Dist: click
|
|
9
|
+
Requires-Dist: PyYAML
|
|
10
|
+
Requires-Dist: globus-sdk
|
|
11
|
+
Dynamic: author
|
|
12
|
+
Dynamic: requires-dist
|
|
13
|
+
Dynamic: requires-python
|
|
14
|
+
Dynamic: summary
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
# Brainforge CLI
|
|
2
|
+
|
|
3
|
+
Command-line interface for Brainforge V3.
|
|
4
|
+
|
|
5
|
+
## Local Installation (Development)
|
|
6
|
+
|
|
7
|
+
To install the CLI for local development with real-time updates:
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
pip install -e .
|
|
11
|
+
```
|
|
12
|
+
|
|
13
|
+
## Publishing to PyPI
|
|
14
|
+
|
|
15
|
+
If you want others to install the CLI simply via `pip install brainforge`, follow these steps:
|
|
16
|
+
|
|
17
|
+
### 1. Install Publishing Tools
|
|
18
|
+
You will need Python's official packaging tools. Install them globally:
|
|
19
|
+
```bash
|
|
20
|
+
pip install --upgrade build twine
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
### 2. Build the Package
|
|
24
|
+
From inside the `brainforge-cli` folder (where `setup.py` is), run:
|
|
25
|
+
```bash
|
|
26
|
+
python -m build
|
|
27
|
+
```
|
|
28
|
+
*(This generates two files inside a new `dist/` folder: a `.whl` and a `.tar.gz`)*
|
|
29
|
+
|
|
30
|
+
### 3. Upload to PyPI
|
|
31
|
+
You need an account on [PyPI (Python Package Index)](https://pypi.org/).
|
|
32
|
+
- Register an account.
|
|
33
|
+
- Go to your Account Settings and [create an API token](https://pypi.org/manage/account/token/).
|
|
34
|
+
- Upload your build using `twine`:
|
|
35
|
+
|
|
36
|
+
```bash
|
|
37
|
+
twine upload dist/*
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
*(When prompted for a username, type `__token__`. For the password, paste your PyPI API token!)*
|
|
41
|
+
|
|
42
|
+
> **Note**: If the name `brainforge` is already taken by someone else on PyPI, you will need to rename your package in `setup.py` (e.g. `name="brainforge-cli"` or `name="brainforge-v3"`).
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import os
|
|
3
|
+
|
|
4
|
+
import click
|
|
5
|
+
import globus_sdk
|
|
6
|
+
|
|
7
|
+
# The Client ID should be replaced with a real Globus Native App Client ID
|
|
8
|
+
# registered in the Globus Developer Console.
|
|
9
|
+
# Default to an environment variable if provided.
|
|
10
|
+
CLIENT_ID = os.environ.get("GLOBUS_CLIENT_ID", "64371d44-2d7f-4330-a7fb-046d03f1d418")
|
|
11
|
+
CREDENTIALS_FILE = os.path.expanduser("~/.brainforge/credentials.json")
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def get_auth_client():
|
|
15
|
+
return globus_sdk.NativeAppAuthClient(CLIENT_ID)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def login():
|
|
19
|
+
try:
|
|
20
|
+
client = get_auth_client()
|
|
21
|
+
# Request necessary scopes to get a recognizable token back
|
|
22
|
+
client.oauth2_start_flow(
|
|
23
|
+
refresh_tokens=True, requested_scopes=globus_sdk.scopes.AuthScopes.openid
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
auth_url = client.oauth2_get_authorize_url()
|
|
27
|
+
click.echo("Please authenticate with Globus here:\n")
|
|
28
|
+
click.secho(f"{auth_url}\n", fg="cyan")
|
|
29
|
+
|
|
30
|
+
auth_code = click.prompt("Please enter the authorization code you received")
|
|
31
|
+
|
|
32
|
+
token_response = client.oauth2_exchange_code_for_tokens(auth_code.strip())
|
|
33
|
+
|
|
34
|
+
# Save tokens
|
|
35
|
+
os.makedirs(os.path.dirname(CREDENTIALS_FILE), exist_ok=True)
|
|
36
|
+
with open(CREDENTIALS_FILE, 'w') as f:
|
|
37
|
+
# We save the dictionary of tokens by resource server
|
|
38
|
+
json.dump(token_response.by_resource_server, f)
|
|
39
|
+
|
|
40
|
+
click.secho(
|
|
41
|
+
f"Successfully authenticated! Credentials saved to {CREDENTIALS_FILE}",
|
|
42
|
+
fg="green",
|
|
43
|
+
)
|
|
44
|
+
except globus_sdk.AuthAPIError as e:
|
|
45
|
+
click.secho(f"Globus Auth failed: {e.message}", fg="red")
|
|
46
|
+
except Exception as e:
|
|
47
|
+
click.secho(f"Authentication failed: {e}", fg="red")
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def get_access_token():
|
|
51
|
+
"""
|
|
52
|
+
Returns the Globus Access Token from local storage.
|
|
53
|
+
"""
|
|
54
|
+
if not os.path.exists(CREDENTIALS_FILE):
|
|
55
|
+
return None
|
|
56
|
+
|
|
57
|
+
try:
|
|
58
|
+
with open(CREDENTIALS_FILE, 'r') as f:
|
|
59
|
+
tokens = json.load(f)
|
|
60
|
+
|
|
61
|
+
# The Auth server's token is typically what we need to pass back to the Django API
|
|
62
|
+
auth_data = tokens.get("auth.globus.org", {})
|
|
63
|
+
return auth_data.get("access_token")
|
|
64
|
+
except Exception:
|
|
65
|
+
return None
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def logout():
|
|
69
|
+
"""
|
|
70
|
+
Logs out the user by deleting local credentials.
|
|
71
|
+
"""
|
|
72
|
+
if os.path.exists(CREDENTIALS_FILE):
|
|
73
|
+
os.remove(CREDENTIALS_FILE)
|
|
74
|
+
click.secho("Successfully logged out (Local credentials deleted).", fg="green")
|
|
75
|
+
else:
|
|
76
|
+
click.secho("You are already logged out.", fg="yellow")
|
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
import sys
|
|
2
|
+
|
|
3
|
+
import brainforge.auth as auth
|
|
4
|
+
import click
|
|
5
|
+
from brainforge.job_logs import fetch_logs
|
|
6
|
+
from brainforge.job_status import check_status
|
|
7
|
+
from brainforge.run import generate_sbatch_script
|
|
8
|
+
from brainforge.run import parse_spec
|
|
9
|
+
from brainforge.run import submit_job
|
|
10
|
+
from brainforge.run import validate_spec
|
|
11
|
+
from brainforge.template import generate_yaml_template
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@click.group()
|
|
15
|
+
@click.version_option(version="0.1.0")
|
|
16
|
+
def main():
|
|
17
|
+
"""
|
|
18
|
+
BrainForge CLI: The premier interface for executing neuroimaging analyses.
|
|
19
|
+
|
|
20
|
+
Provides tools for generating SLURM job configurations, securely
|
|
21
|
+
submitting arrays to the cluster, and tracking runtime statuses.
|
|
22
|
+
"""
|
|
23
|
+
pass
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
@main.command()
|
|
27
|
+
def info():
|
|
28
|
+
"""Display information about Brainforge CLI and its current version."""
|
|
29
|
+
click.echo("Welcome to the Brainforge CLI v0.1.0")
|
|
30
|
+
click.echo("Run 'brainforge --help' to see all available commands.")
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
@main.command()
|
|
34
|
+
def login():
|
|
35
|
+
"""Authenticate with Globus."""
|
|
36
|
+
auth.login()
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
@main.command()
|
|
40
|
+
def logout():
|
|
41
|
+
"""Logout of Globus."""
|
|
42
|
+
auth.logout()
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
@main.group()
|
|
46
|
+
def job():
|
|
47
|
+
"""
|
|
48
|
+
Manage and monitor SLURM jobs submitted via BrainForge.
|
|
49
|
+
|
|
50
|
+
Use this group to submit jobs, check their queue/execution status,
|
|
51
|
+
or tail their logs live on the cluster.
|
|
52
|
+
"""
|
|
53
|
+
pass
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
@job.command(name='run')
|
|
57
|
+
@click.argument('spec_file', type=click.Path(exists=True, dir_okay=False), required=False)
|
|
58
|
+
@click.option(
|
|
59
|
+
'--yaml',
|
|
60
|
+
'yaml_str',
|
|
61
|
+
help="Parse the YAML spec directly from a string instead of a file.",
|
|
62
|
+
)
|
|
63
|
+
@click.option(
|
|
64
|
+
'--dry-run',
|
|
65
|
+
is_flag=True,
|
|
66
|
+
help="Generate and print the sbatch script without submitting to SLURM.",
|
|
67
|
+
)
|
|
68
|
+
def run_job(spec_file, yaml_str, dry_run):
|
|
69
|
+
"""
|
|
70
|
+
Submit a SLURM job using a YAML specification file or string.
|
|
71
|
+
|
|
72
|
+
Takes a SPEC_FILE containing `slurm` and `execution` blocks.
|
|
73
|
+
Will automatically detect if this is a single job or a multiple job
|
|
74
|
+
(array job) based on the presence of `participant` or `participants`.
|
|
75
|
+
"""
|
|
76
|
+
import yaml
|
|
77
|
+
if yaml_str:
|
|
78
|
+
try:
|
|
79
|
+
spec = yaml.safe_load(yaml_str)
|
|
80
|
+
except Exception as e:
|
|
81
|
+
click.secho(f"Error parsing YAML string: {e}", fg="red")
|
|
82
|
+
sys.exit(1)
|
|
83
|
+
elif spec_file:
|
|
84
|
+
spec = parse_spec(spec_file)
|
|
85
|
+
else:
|
|
86
|
+
click.secho("Error: You must provide either a SPEC_FILE argument or a --yaml configuration.", fg="red")
|
|
87
|
+
sys.exit(1)
|
|
88
|
+
|
|
89
|
+
# Validate the spec
|
|
90
|
+
validate_spec(spec)
|
|
91
|
+
|
|
92
|
+
# Generate script
|
|
93
|
+
script_content = generate_sbatch_script(spec)
|
|
94
|
+
|
|
95
|
+
# Submit job (or print if dry_run)
|
|
96
|
+
submit_job(script_content, spec, dry_run=dry_run)
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
@job.command()
|
|
100
|
+
@click.argument('job_id')
|
|
101
|
+
@click.option('--json', 'as_json', is_flag=True, help="Output status securely in JSON format")
|
|
102
|
+
def status(job_id, as_json):
|
|
103
|
+
"""
|
|
104
|
+
Check the queue and runtime status of a scheduled SLURM job.
|
|
105
|
+
|
|
106
|
+
Uses `sacct` (for comprehensive history) and falls back to `squeue`
|
|
107
|
+
for nascent jobs. Can lookup both active and completed jobs.
|
|
108
|
+
"""
|
|
109
|
+
check_status(job_id, as_json=as_json)
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
@job.command()
|
|
113
|
+
@click.argument('job_id')
|
|
114
|
+
@click.option(
|
|
115
|
+
'--type',
|
|
116
|
+
'-t',
|
|
117
|
+
'log_type',
|
|
118
|
+
type=click.Choice(['output', 'error']),
|
|
119
|
+
default='output',
|
|
120
|
+
help="Type of logs to view (output or error).",
|
|
121
|
+
)
|
|
122
|
+
@click.option(
|
|
123
|
+
'--follow',
|
|
124
|
+
'-f',
|
|
125
|
+
is_flag=True,
|
|
126
|
+
help="Follow the log contents live.",
|
|
127
|
+
)
|
|
128
|
+
def logs(job_id, log_type, follow):
|
|
129
|
+
"""
|
|
130
|
+
Tail or view the live standard logs of a SLURM job.
|
|
131
|
+
|
|
132
|
+
Dynamically finds the output file SLURM is writing to using `scontrol`.
|
|
133
|
+
By default it cats the standard output to the console. Use `--type error` for standard error,
|
|
134
|
+
and `-f` to follow the stream live (like tail -f).
|
|
135
|
+
"""
|
|
136
|
+
fetch_logs(job_id, log_type, follow=follow)
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
@job.command(name="generate")
|
|
140
|
+
@click.option(
|
|
141
|
+
'--type',
|
|
142
|
+
'-t',
|
|
143
|
+
'job_type', # map back to variable name
|
|
144
|
+
prompt='Select job template type',
|
|
145
|
+
type=click.Choice(['single', 'multiple']),
|
|
146
|
+
default='single',
|
|
147
|
+
help="Type of job template (single or multiple).",
|
|
148
|
+
)
|
|
149
|
+
@click.option(
|
|
150
|
+
'--name',
|
|
151
|
+
'-n',
|
|
152
|
+
prompt='Enter output filename',
|
|
153
|
+
default='template.yaml',
|
|
154
|
+
help="Output filename for the generated template.",
|
|
155
|
+
)
|
|
156
|
+
def generate_cmd(job_type, name):
|
|
157
|
+
"""
|
|
158
|
+
Generate a working YAML template for a BrainForge SLURM job.
|
|
159
|
+
|
|
160
|
+
Creates a boilerplate .yaml file pre-filled with all required
|
|
161
|
+
variables (`SINGULARITY_IMAGE`, `HOST_INPUT_DIR`, etc.) to run
|
|
162
|
+
either a `single` job or an array job over `multiple` participants.
|
|
163
|
+
"""
|
|
164
|
+
generate_yaml_template(job_type, name)
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
if __name__ == "__main__":
|
|
168
|
+
main()
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import subprocess
|
|
3
|
+
|
|
4
|
+
import click
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def fetch_logs(job_id, log_type, follow=False):
|
|
8
|
+
"""
|
|
9
|
+
Fetch the StdOut or StdErr log path for a given active SLURM job
|
|
10
|
+
and print its contents (or follow live) to the console.
|
|
11
|
+
"""
|
|
12
|
+
try:
|
|
13
|
+
cmd = ["scontrol", "show", "job", str(job_id)]
|
|
14
|
+
res = subprocess.run(cmd, capture_output=True, text=True, check=True)
|
|
15
|
+
|
|
16
|
+
stdout_path = None
|
|
17
|
+
stderr_path = None
|
|
18
|
+
|
|
19
|
+
for line in res.stdout.split('\n'):
|
|
20
|
+
# scontrol output lines often have multiple key=value pairs separated by spaces
|
|
21
|
+
tokens = line.split()
|
|
22
|
+
for token in tokens:
|
|
23
|
+
if token.startswith("StdOut="):
|
|
24
|
+
stdout_path = token.split("=", 1)[1]
|
|
25
|
+
elif token.startswith("StdErr="):
|
|
26
|
+
stderr_path = token.split("=", 1)[1]
|
|
27
|
+
|
|
28
|
+
target_path = stdout_path if log_type == 'output' else stderr_path
|
|
29
|
+
|
|
30
|
+
if not target_path or target_path == '/dev/null':
|
|
31
|
+
click.secho(
|
|
32
|
+
f"Could not determine the {log_type} file path from SLURM configuration.",
|
|
33
|
+
fg="red",
|
|
34
|
+
)
|
|
35
|
+
return
|
|
36
|
+
|
|
37
|
+
if not os.path.exists(target_path):
|
|
38
|
+
click.secho(
|
|
39
|
+
f"Log file does not exist yet (or was deleted):\n{target_path}",
|
|
40
|
+
fg="yellow",
|
|
41
|
+
)
|
|
42
|
+
return
|
|
43
|
+
|
|
44
|
+
if follow:
|
|
45
|
+
click.secho(
|
|
46
|
+
f"--- Following {log_type} logs for Job {job_id} ---",
|
|
47
|
+
fg="cyan",
|
|
48
|
+
bold=True,
|
|
49
|
+
)
|
|
50
|
+
click.secho(f"File: {target_path} (Ctrl+C to stop)\n", fg="cyan")
|
|
51
|
+
try:
|
|
52
|
+
subprocess.run(["tail", "-f", target_path])
|
|
53
|
+
except KeyboardInterrupt:
|
|
54
|
+
click.echo("\nStopped following.")
|
|
55
|
+
return
|
|
56
|
+
else:
|
|
57
|
+
click.secho(
|
|
58
|
+
f"--- Viewing {log_type} logs for Job {job_id} ---",
|
|
59
|
+
fg="cyan",
|
|
60
|
+
bold=True,
|
|
61
|
+
)
|
|
62
|
+
click.secho(f"File: {target_path}\n", fg="cyan")
|
|
63
|
+
|
|
64
|
+
with open(target_path, 'r') as f:
|
|
65
|
+
content = f.read()
|
|
66
|
+
|
|
67
|
+
if not content.strip():
|
|
68
|
+
click.echo("<File is currently empty>")
|
|
69
|
+
else:
|
|
70
|
+
click.echo(content)
|
|
71
|
+
|
|
72
|
+
except subprocess.CalledProcessError:
|
|
73
|
+
click.secho(f"Job {job_id} not found in active SLURM queue.", fg="red")
|
|
74
|
+
click.echo(
|
|
75
|
+
"Hint: SLURM only registers paths for currently active or very recently finished jobs."
|
|
76
|
+
)
|
|
77
|
+
click.echo(
|
|
78
|
+
"If your job finished a while ago, check the 'logs/' folder in the directory where you submitted it."
|
|
79
|
+
)
|
|
80
|
+
except FileNotFoundError:
|
|
81
|
+
click.secho(
|
|
82
|
+
"Error: 'scontrol' command not found. Are you on a system with SLURM installed?",
|
|
83
|
+
fg="red",
|
|
84
|
+
)
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import subprocess
|
|
3
|
+
|
|
4
|
+
import click
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def check_status(job_id, as_json=False):
|
|
8
|
+
"""
|
|
9
|
+
Check the status of a SLURM job using sacct.
|
|
10
|
+
If sacct returns empty (sometimes happens right after submission), fall back to squeue.
|
|
11
|
+
"""
|
|
12
|
+
try:
|
|
13
|
+
if as_json:
|
|
14
|
+
cmd = ["sacct", "-j", str(job_id), "-X", "-P", "--format=JobID,State,ExitCode,End"]
|
|
15
|
+
result = subprocess.run(cmd, capture_output=True, text=True, check=True)
|
|
16
|
+
output = result.stdout.strip()
|
|
17
|
+
lines = output.split('\n')
|
|
18
|
+
if len(lines) > 1:
|
|
19
|
+
parts = lines[1].split('|')
|
|
20
|
+
if len(parts) >= 4:
|
|
21
|
+
click.echo(json.dumps({
|
|
22
|
+
"job_id": parts[0],
|
|
23
|
+
"state": parts[1].split(' ')[0],
|
|
24
|
+
"exit_code": parts[2],
|
|
25
|
+
"end_time": parts[3]
|
|
26
|
+
}))
|
|
27
|
+
return
|
|
28
|
+
# fallback
|
|
29
|
+
cmd2 = ["squeue", "-j", str(job_id), "-O", "jobid,state", "-h"]
|
|
30
|
+
res2 = subprocess.run(cmd2, capture_output=True, text=True)
|
|
31
|
+
if res2.returncode == 0 and res2.stdout.strip():
|
|
32
|
+
parts = res2.stdout.strip().split()
|
|
33
|
+
if len(parts) >= 2:
|
|
34
|
+
click.echo(json.dumps({
|
|
35
|
+
"job_id": parts[0],
|
|
36
|
+
"state": parts[1],
|
|
37
|
+
"exit_code": "",
|
|
38
|
+
"end_time": ""
|
|
39
|
+
}))
|
|
40
|
+
return
|
|
41
|
+
|
|
42
|
+
click.echo(json.dumps({}))
|
|
43
|
+
return
|
|
44
|
+
|
|
45
|
+
# -X only shows the main allocation, hiding duplicate .batch and .extern steps
|
|
46
|
+
cmd = ["sacct", "-j", str(job_id), "-X"]
|
|
47
|
+
result = subprocess.run(cmd, capture_output=True, text=True, check=True)
|
|
48
|
+
|
|
49
|
+
output = result.stdout.strip()
|
|
50
|
+
|
|
51
|
+
# sacct prints headers even if empty, but let's check if there are no real data lines
|
|
52
|
+
lines = output.split('\n')
|
|
53
|
+
|
|
54
|
+
# Usually sacct has 2 header lines. If there are <= 2 lines, it might not have loaded
|
|
55
|
+
if len(lines) <= 2:
|
|
56
|
+
# Fallback to squeue for very nascent jobs
|
|
57
|
+
cmd2 = ["squeue", "-j", str(job_id)]
|
|
58
|
+
try:
|
|
59
|
+
res2 = subprocess.run(cmd2, capture_output=True, text=True, check=True)
|
|
60
|
+
queue_output = res2.stdout.strip()
|
|
61
|
+
if queue_output:
|
|
62
|
+
click.secho(
|
|
63
|
+
f"--- Status for SLURM Job: {job_id} (squeue) ---",
|
|
64
|
+
fg="cyan",
|
|
65
|
+
bold=True,
|
|
66
|
+
)
|
|
67
|
+
click.echo(queue_output)
|
|
68
|
+
return
|
|
69
|
+
except subprocess.CalledProcessError:
|
|
70
|
+
pass
|
|
71
|
+
|
|
72
|
+
click.secho(f"--- Status for SLURM Job: {job_id} ---", fg="cyan", bold=True)
|
|
73
|
+
click.echo(output)
|
|
74
|
+
|
|
75
|
+
except subprocess.CalledProcessError as e:
|
|
76
|
+
if as_json:
|
|
77
|
+
click.echo(json.dumps({"error": f"Failed to query status: {e.stderr}"}))
|
|
78
|
+
return
|
|
79
|
+
click.secho(f"Failed to query status for job ID {job_id}.", fg="red")
|
|
80
|
+
click.echo(e.stderr)
|
|
81
|
+
except FileNotFoundError:
|
|
82
|
+
if as_json:
|
|
83
|
+
click.echo(json.dumps({"error": "sacct command not found"}))
|
|
84
|
+
return
|
|
85
|
+
click.secho(
|
|
86
|
+
"Error: 'sacct' command not found. Are you on a system with SLURM installed?",
|
|
87
|
+
fg="red",
|
|
88
|
+
)
|
|
@@ -0,0 +1,277 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import subprocess
|
|
3
|
+
import sys
|
|
4
|
+
import tempfile
|
|
5
|
+
|
|
6
|
+
import click
|
|
7
|
+
import requests
|
|
8
|
+
import yaml
|
|
9
|
+
from brainforge.auth import get_access_token
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def parse_spec(filepath):
|
|
13
|
+
try:
|
|
14
|
+
with open(filepath, 'r') as f:
|
|
15
|
+
return yaml.safe_load(f)
|
|
16
|
+
except Exception as e:
|
|
17
|
+
click.secho(f"Error reading YAML file: {e}", fg="red")
|
|
18
|
+
sys.exit(1)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def validate_spec(spec):
|
|
22
|
+
"""
|
|
23
|
+
Validates that the provided YAML spec has the required structure and fields.
|
|
24
|
+
Exits the CLI with an error if validation fails.
|
|
25
|
+
"""
|
|
26
|
+
if not isinstance(spec, dict):
|
|
27
|
+
click.secho("Error: YAML spec must be a dictionary at the top level.", fg="red")
|
|
28
|
+
sys.exit(1)
|
|
29
|
+
|
|
30
|
+
if 'slurm' not in spec:
|
|
31
|
+
click.secho(
|
|
32
|
+
"Error: Missing required 'slurm' section in the spec file.", fg="red"
|
|
33
|
+
)
|
|
34
|
+
sys.exit(1)
|
|
35
|
+
|
|
36
|
+
if not isinstance(spec['slurm'], dict):
|
|
37
|
+
click.secho("Error: The 'slurm' section must be a dictionary.", fg="red")
|
|
38
|
+
sys.exit(1)
|
|
39
|
+
|
|
40
|
+
required_slurm_keys = [
|
|
41
|
+
'job-name',
|
|
42
|
+
'partition',
|
|
43
|
+
'account',
|
|
44
|
+
'time',
|
|
45
|
+
'mem',
|
|
46
|
+
'cpus-per-task',
|
|
47
|
+
'output',
|
|
48
|
+
'error',
|
|
49
|
+
]
|
|
50
|
+
for key in required_slurm_keys:
|
|
51
|
+
if key not in spec['slurm']:
|
|
52
|
+
click.secho(f"Error: Missing required slurm parameter: '{key}'.", fg="red")
|
|
53
|
+
sys.exit(1)
|
|
54
|
+
|
|
55
|
+
if 'execution' not in spec:
|
|
56
|
+
click.secho(
|
|
57
|
+
"Error: Missing required 'execution' section in the spec file.", fg="red"
|
|
58
|
+
)
|
|
59
|
+
sys.exit(1)
|
|
60
|
+
|
|
61
|
+
execution = spec['execution']
|
|
62
|
+
if not isinstance(execution, dict):
|
|
63
|
+
click.secho("Error: The 'execution' section must be a dictionary.", fg="red")
|
|
64
|
+
sys.exit(1)
|
|
65
|
+
|
|
66
|
+
if 'command' not in execution or not execution['command']:
|
|
67
|
+
click.secho(
|
|
68
|
+
"Error: The 'execution' block must contain a 'command' string to run.",
|
|
69
|
+
fg="red",
|
|
70
|
+
)
|
|
71
|
+
sys.exit(1)
|
|
72
|
+
|
|
73
|
+
modules = execution.get('modules', [])
|
|
74
|
+
if not isinstance(modules, list):
|
|
75
|
+
click.secho("Error: 'execution.modules' must be a list.", fg="red")
|
|
76
|
+
sys.exit(1)
|
|
77
|
+
|
|
78
|
+
has_singularity = any(
|
|
79
|
+
m == 'singularity' or m.startswith('singularity/') for m in modules
|
|
80
|
+
)
|
|
81
|
+
if not has_singularity:
|
|
82
|
+
click.secho(
|
|
83
|
+
"Error: 'execution.modules' must contain 'singularity' or specify a version (e.g., 'singularity/3.10.2').",
|
|
84
|
+
fg="red",
|
|
85
|
+
)
|
|
86
|
+
sys.exit(1)
|
|
87
|
+
|
|
88
|
+
env_vars = execution.get('env', {})
|
|
89
|
+
if not isinstance(env_vars, dict):
|
|
90
|
+
click.secho("Error: 'execution.env' must be a dictionary.", fg="red")
|
|
91
|
+
sys.exit(1)
|
|
92
|
+
|
|
93
|
+
required_env_keys = ['SINGULARITY_IMAGE', 'HOST_INPUT_DIR', 'HOST_OUTPUT_DIR']
|
|
94
|
+
for key in required_env_keys:
|
|
95
|
+
if key not in env_vars:
|
|
96
|
+
click.secho(
|
|
97
|
+
f"Error: Missing required execution.env parameter: '{key}'.", fg="red"
|
|
98
|
+
)
|
|
99
|
+
sys.exit(1)
|
|
100
|
+
|
|
101
|
+
participants = execution.get('participants')
|
|
102
|
+
participant = execution.get('participant')
|
|
103
|
+
|
|
104
|
+
if participants is not None and participant is not None:
|
|
105
|
+
click.secho(
|
|
106
|
+
"Error: Cannot specify both 'participant' and 'participants'. Choose one.",
|
|
107
|
+
fg="red",
|
|
108
|
+
)
|
|
109
|
+
sys.exit(1)
|
|
110
|
+
|
|
111
|
+
if participants is None and participant is None:
|
|
112
|
+
click.secho(
|
|
113
|
+
"Error: You must specify either 'participant' (for single jobs) or 'participants' (for array jobs).",
|
|
114
|
+
fg="red",
|
|
115
|
+
)
|
|
116
|
+
sys.exit(1)
|
|
117
|
+
|
|
118
|
+
if participants is not None:
|
|
119
|
+
if not isinstance(participants, list) or len(participants) == 0:
|
|
120
|
+
click.secho(
|
|
121
|
+
"Error: 'participants' must be a non-empty list of subject identifiers.",
|
|
122
|
+
fg="red",
|
|
123
|
+
)
|
|
124
|
+
sys.exit(1)
|
|
125
|
+
|
|
126
|
+
if participant is not None:
|
|
127
|
+
# Checking if it's truthy handles empty strings intuitively (e.g. "")
|
|
128
|
+
if not str(participant).strip():
|
|
129
|
+
click.secho(
|
|
130
|
+
"Error: 'participant' must be a valid, non-empty identifier.", fg="red"
|
|
131
|
+
)
|
|
132
|
+
sys.exit(1)
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def generate_sbatch_script(spec):
|
|
136
|
+
lines = ["#!/bin/bash"]
|
|
137
|
+
|
|
138
|
+
slurm_config = spec.get('slurm', {})
|
|
139
|
+
execution = spec.get('execution', {})
|
|
140
|
+
|
|
141
|
+
# 1. Add #SBATCH directives
|
|
142
|
+
for key, value in slurm_config.items():
|
|
143
|
+
if value is True:
|
|
144
|
+
# Some flags might not have a value, e.g. --exclusive
|
|
145
|
+
lines.append(f"#SBATCH --{key}")
|
|
146
|
+
elif value is False:
|
|
147
|
+
continue
|
|
148
|
+
else:
|
|
149
|
+
lines.append(f"#SBATCH --{key}={value}")
|
|
150
|
+
|
|
151
|
+
# 2. Add array logic if applicable
|
|
152
|
+
participants = execution.get('participants', [])
|
|
153
|
+
if participants:
|
|
154
|
+
# Array bounds
|
|
155
|
+
num_items = len(participants)
|
|
156
|
+
lines.append(f"#SBATCH --array=0-{num_items - 1}")
|
|
157
|
+
|
|
158
|
+
lines.append("")
|
|
159
|
+
lines.append("# --- Auto-generated by BrainForge CLI ---")
|
|
160
|
+
lines.append("")
|
|
161
|
+
|
|
162
|
+
# 3. Handle array item mapping or single participant mapping
|
|
163
|
+
if participants:
|
|
164
|
+
lines.append('BFG_PARTICIPANTS=(')
|
|
165
|
+
for item in participants:
|
|
166
|
+
# Safely quote items
|
|
167
|
+
lines.append(f' "{item}"')
|
|
168
|
+
lines.append(')')
|
|
169
|
+
lines.append('BFG_PARTICIPANT=${BFG_PARTICIPANTS[$SLURM_ARRAY_TASK_ID]}')
|
|
170
|
+
lines.append('echo "BrainForge Task running for participant: $BFG_PARTICIPANT"')
|
|
171
|
+
lines.append("")
|
|
172
|
+
elif 'participant' in execution:
|
|
173
|
+
# Singular participant definition
|
|
174
|
+
val = execution['participant']
|
|
175
|
+
lines.append(f'BFG_PARTICIPANT="{val}"')
|
|
176
|
+
lines.append('echo "BrainForge Task running for participant: $BFG_PARTICIPANT"')
|
|
177
|
+
lines.append("")
|
|
178
|
+
|
|
179
|
+
# 4. Modules loading
|
|
180
|
+
modules = execution.get('modules', [])
|
|
181
|
+
if modules:
|
|
182
|
+
for mod in modules:
|
|
183
|
+
lines.append(f"module load {mod}")
|
|
184
|
+
lines.append("")
|
|
185
|
+
|
|
186
|
+
# 5. Environment variables
|
|
187
|
+
env_vars = execution.get('env', {})
|
|
188
|
+
if env_vars:
|
|
189
|
+
for key, val in env_vars.items():
|
|
190
|
+
lines.append(f'export {key}="{val}"')
|
|
191
|
+
lines.append("")
|
|
192
|
+
|
|
193
|
+
# 6. Actual command
|
|
194
|
+
cmd = execution.get('command', '')
|
|
195
|
+
if cmd:
|
|
196
|
+
lines.append(cmd)
|
|
197
|
+
|
|
198
|
+
return "\n".join(lines)
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
def submit_job(script_content, spec, dry_run=False):
|
|
202
|
+
# Optional Database Logging via API
|
|
203
|
+
api_config = spec.get('api', {})
|
|
204
|
+
if 'raw_image_id' in api_config and 'spec_id' in api_config:
|
|
205
|
+
base_url = api_config.get('base_url', 'http://localhost:8000/api')
|
|
206
|
+
token = get_access_token()
|
|
207
|
+
if not token:
|
|
208
|
+
click.secho(
|
|
209
|
+
"Error: API config found but you are not logged in. Run 'brainforge login' first.",
|
|
210
|
+
fg="red",
|
|
211
|
+
)
|
|
212
|
+
if not dry_run:
|
|
213
|
+
sys.exit(1)
|
|
214
|
+
else:
|
|
215
|
+
click.echo("Registering run with BrainForge Backend API...")
|
|
216
|
+
headers = {
|
|
217
|
+
"Authorization": f"Bearer {token}",
|
|
218
|
+
"Content-Type": "application/json",
|
|
219
|
+
}
|
|
220
|
+
try:
|
|
221
|
+
# Based on the serializers.py, it expects raw_image_id and spec_id
|
|
222
|
+
resp = requests.post(
|
|
223
|
+
f"{base_url}/preprocessing_analyses/run_analysis/",
|
|
224
|
+
json={
|
|
225
|
+
"raw_image_id": api_config['raw_image_id'],
|
|
226
|
+
"spec_id": api_config['spec_id'],
|
|
227
|
+
},
|
|
228
|
+
headers=headers,
|
|
229
|
+
)
|
|
230
|
+
|
|
231
|
+
if resp.status_code == 201:
|
|
232
|
+
click.secho(
|
|
233
|
+
"Successfully registered job in the backend database.",
|
|
234
|
+
fg="green",
|
|
235
|
+
)
|
|
236
|
+
else:
|
|
237
|
+
click.secho(
|
|
238
|
+
f"Failed to register job. HTTP {resp.status_code}: {resp.text}",
|
|
239
|
+
fg="yellow",
|
|
240
|
+
)
|
|
241
|
+
except Exception as e:
|
|
242
|
+
click.secho(f"API request failed: {e}", fg="red")
|
|
243
|
+
|
|
244
|
+
if dry_run:
|
|
245
|
+
click.secho("--- DRY RUN: Generated batch script ---", fg="cyan")
|
|
246
|
+
click.echo(script_content)
|
|
247
|
+
click.secho("---------------------------------------", fg="cyan")
|
|
248
|
+
return
|
|
249
|
+
|
|
250
|
+
# Create a temporary file to hold the generated sbatch script
|
|
251
|
+
fd, path = tempfile.mkstemp(suffix=".sh", prefix="bfg_run_")
|
|
252
|
+
with os.fdopen(fd, 'w') as f:
|
|
253
|
+
f.write(script_content)
|
|
254
|
+
|
|
255
|
+
try:
|
|
256
|
+
# Run sbatch on the temporary file
|
|
257
|
+
result = subprocess.run(
|
|
258
|
+
['sbatch', path], capture_output=True, text=True, check=True
|
|
259
|
+
)
|
|
260
|
+
click.secho("Job submitted successfully!", fg="green")
|
|
261
|
+
click.echo(result.stdout.strip())
|
|
262
|
+
except subprocess.CalledProcessError as e:
|
|
263
|
+
click.secho("Failed to submit job to SLURM:", fg="red")
|
|
264
|
+
click.echo(e.stderr)
|
|
265
|
+
sys.exit(1)
|
|
266
|
+
except FileNotFoundError:
|
|
267
|
+
click.secho(
|
|
268
|
+
"Error: 'sbatch' command not found. Are you on a system with SLURM installed?",
|
|
269
|
+
fg="red",
|
|
270
|
+
)
|
|
271
|
+
sys.exit(1)
|
|
272
|
+
finally:
|
|
273
|
+
# Clean up the temporary file
|
|
274
|
+
try:
|
|
275
|
+
os.remove(path)
|
|
276
|
+
except OSError:
|
|
277
|
+
pass
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
import os
|
|
2
|
+
|
|
3
|
+
import click
|
|
4
|
+
|
|
5
|
+
SINGLE_JOB_TEMPLATE = """# Single Job Template for BrainForge CLI
|
|
6
|
+
slurm:
|
|
7
|
+
job-name: "name"
|
|
8
|
+
partition: "partition"
|
|
9
|
+
account: "account"
|
|
10
|
+
nodes: 1
|
|
11
|
+
ntasks: 1
|
|
12
|
+
mem: "8G"
|
|
13
|
+
cpus-per-task: 4
|
|
14
|
+
output: "logs/job_%j.out"
|
|
15
|
+
error: "logs/job_%j.err"
|
|
16
|
+
|
|
17
|
+
execution:
|
|
18
|
+
modules:
|
|
19
|
+
- singularity
|
|
20
|
+
env:
|
|
21
|
+
SINGULARITY_IMAGE: "/path/to/container.sif"
|
|
22
|
+
HOST_INPUT_DIR: "/path/to/input"
|
|
23
|
+
HOST_OUTPUT_DIR: "/path/to/output"
|
|
24
|
+
participant: "sub-01"
|
|
25
|
+
command: "echo 'Running Single Job for item $BFG_PARTICIPANT'"
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
MULTIPLE_JOB_TEMPLATE = """# Multiple Job Template for BrainForge CLI
|
|
29
|
+
slurm:
|
|
30
|
+
job-name: "name"
|
|
31
|
+
partition: "partition"
|
|
32
|
+
account: "account"
|
|
33
|
+
nodes: 1
|
|
34
|
+
ntasks: 1
|
|
35
|
+
mem: "8G"
|
|
36
|
+
cpus-per-task: 4
|
|
37
|
+
output: "logs/job_%A_%a.out"
|
|
38
|
+
error: "logs/job_%A_%a.err"
|
|
39
|
+
|
|
40
|
+
execution:
|
|
41
|
+
modules:
|
|
42
|
+
- singularity
|
|
43
|
+
env:
|
|
44
|
+
SINGULARITY_IMAGE: "/path/to/container.sif"
|
|
45
|
+
HOST_INPUT_DIR: "/path/to/input"
|
|
46
|
+
HOST_OUTPUT_DIR: "/path/to/output"
|
|
47
|
+
participants:
|
|
48
|
+
- "sub-01"
|
|
49
|
+
- "sub-02"
|
|
50
|
+
- "sub-03"
|
|
51
|
+
command: "echo 'Running Multiple Job for item $BFG_PARTICIPANT'"
|
|
52
|
+
"""
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def generate_yaml_template(job_type, output_name):
|
|
56
|
+
if job_type == 'multiple':
|
|
57
|
+
content = MULTIPLE_JOB_TEMPLATE
|
|
58
|
+
else:
|
|
59
|
+
content = SINGLE_JOB_TEMPLATE
|
|
60
|
+
|
|
61
|
+
# Ensure we don't accidentally overwrite without warning, though
|
|
62
|
+
# for simplicity we'll just open and write.
|
|
63
|
+
# If file exists, click.confirm could be used, but let's keep it simple first.
|
|
64
|
+
if os.path.exists(output_name):
|
|
65
|
+
click.confirm(f"File '{output_name}' already exists. Overwrite?", abort=True)
|
|
66
|
+
|
|
67
|
+
with open(output_name, 'w') as f:
|
|
68
|
+
f.write(content)
|
|
69
|
+
|
|
70
|
+
click.secho(
|
|
71
|
+
f"Successfully generated {job_type} template: {output_name}", fg="green"
|
|
72
|
+
)
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: brainforge-cli
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Command-line interface for Brainforge
|
|
5
|
+
Author: Daniel Trinh
|
|
6
|
+
Requires-Python: >=3.10
|
|
7
|
+
Requires-Dist: requests
|
|
8
|
+
Requires-Dist: click
|
|
9
|
+
Requires-Dist: PyYAML
|
|
10
|
+
Requires-Dist: globus-sdk
|
|
11
|
+
Dynamic: author
|
|
12
|
+
Dynamic: requires-dist
|
|
13
|
+
Dynamic: requires-python
|
|
14
|
+
Dynamic: summary
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
README.md
|
|
2
|
+
setup.py
|
|
3
|
+
brainforge/__init__.py
|
|
4
|
+
brainforge/auth.py
|
|
5
|
+
brainforge/cli.py
|
|
6
|
+
brainforge/job_logs.py
|
|
7
|
+
brainforge/job_status.py
|
|
8
|
+
brainforge/run.py
|
|
9
|
+
brainforge/template.py
|
|
10
|
+
brainforge_cli.egg-info/PKG-INFO
|
|
11
|
+
brainforge_cli.egg-info/SOURCES.txt
|
|
12
|
+
brainforge_cli.egg-info/dependency_links.txt
|
|
13
|
+
brainforge_cli.egg-info/entry_points.txt
|
|
14
|
+
brainforge_cli.egg-info/requires.txt
|
|
15
|
+
brainforge_cli.egg-info/top_level.txt
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
brainforge
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
from setuptools import find_packages
|
|
2
|
+
from setuptools import setup
|
|
3
|
+
|
|
4
|
+
setup(
|
|
5
|
+
name="brainforge-cli",
|
|
6
|
+
version="0.1.0",
|
|
7
|
+
description="Command-line interface for Brainforge",
|
|
8
|
+
author="Daniel Trinh",
|
|
9
|
+
packages=find_packages(),
|
|
10
|
+
install_requires=[
|
|
11
|
+
"requests",
|
|
12
|
+
"click",
|
|
13
|
+
"PyYAML",
|
|
14
|
+
"globus-sdk",
|
|
15
|
+
],
|
|
16
|
+
entry_points={
|
|
17
|
+
"console_scripts": [
|
|
18
|
+
"brainforge=brainforge.cli:main",
|
|
19
|
+
"bf=brainforge.cli:main",
|
|
20
|
+
],
|
|
21
|
+
},
|
|
22
|
+
python_requires=">=3.10",
|
|
23
|
+
)
|