jazari 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
jazari/__init__.py ADDED
File without changes
@@ -0,0 +1,38 @@
1
+ import os, yaml
2
+ from pathlib import Path
3
+ from rich.console import Console
4
+ # Config helpers
5
+ # defining the path to the config file: ~/.jazari/config.yaml
6
+ CONFIG_DIR = os.path.join(Path.home(), '.jazari')
7
+ CONFIG_FILE = os.path.join(CONFIG_DIR, 'config.yaml')
8
+ console = Console()
9
+
10
+ def load_config():
11
+ '''
12
+ Loading config file if exists, otherwise returns empty dict.
13
+ '''
14
+ if os.path.exists(CONFIG_FILE):
15
+ try:
16
+ with open(CONFIG_FILE, 'r') as f:
17
+ return yaml.safe_load(f) or {}
18
+ except Exception as e:
19
+ console.print(f'[yellow]Warning: Could not read config file: {e}[/yellow]')
20
+
21
+ return {}
22
+
23
+ def get_config_default(key, CONFIG, default_value):
24
+ '''
25
+ Helper to get a default value from config or fall back to a hardcoded one.
26
+ '''
27
+ return CONFIG.get(key, default_value)
28
+
29
+ def make_config_dir(new_config):
30
+ # Creating directory if doesn't exist
31
+ Path(CONFIG_DIR).mkdir(parents = True, exist_ok=True)
32
+
33
+ # Writing config to the yaml file
34
+ with open(CONFIG_FILE, 'w') as f:
35
+ yaml.dump(new_config, f, default_flow_style=False)
36
+
37
+ console.print(f'\n[bold green]✅ Configuration saved![/bold green]')
38
+ console.print('You can now run jobs without specifying these flags.')
jazari/main.py ADDED
@@ -0,0 +1,329 @@
1
+ import typer, sys, os, tempfile, subprocess, netrc, shutil, getpass
2
+
3
+ from typing import Optional
4
+ from pathlib import Path
5
+
6
+ from rich.console import Console
7
+ from rich.syntax import Syntax
8
+ from rich.prompt import Prompt, Confirm
9
+ from rich.table import Table
10
+ from rich import box
11
+
12
+ from jazari.slurm_generator import generate_sbatch_script
13
+ from jazari.config_generator import load_config, get_config_default, make_config_dir, CONFIG_FILE
14
+
15
+ # Initializing Rich console for pretty printing
16
+ console = Console()
17
+
18
+ # Load config at start.
19
+ CONFIG = load_config()
20
+
21
+ # --- HELPER FUNCTIONS ---
22
+
23
+ def get_current_user() -> str:
24
+ """Returns the current username, robustly on Linux environments."""
25
+ try:
26
+ return os.environ['USER']
27
+ except KeyError:
28
+ return getpass.getuser()
29
+
30
+ def get_wandb_api_key() -> Optional[str]:
31
+ """
32
+ Attempts to read the W&B API key from the local ~/.netrc file.
33
+ """
34
+ try:
35
+ login_info = netrc.netrc().authenticators("api.wandb.ai")
36
+ if login_info:
37
+ username, _, api_key = login_info
38
+ console.print(f"[dim]Found W&B API key for user: {username}[/dim]")
39
+ return api_key
40
+ else:
41
+ return None
42
+ except FileNotFoundError:
43
+ return None
44
+ except Exception as e:
45
+ console.print(f"[dim bold red]Warning: Could not read W&B credentials: {e}[/dim]")
46
+ return None
47
+
48
+ # --- COMMAND FUNCTIONS (No decorators here) ---
49
+
50
+ def status_command():
51
+ """
52
+ View the status of active Slurm jobs for the current user.
53
+ """
54
+ # 1. Check if squeue exists available
55
+ if not shutil.which("squeue"):
56
+ console.print("[bold red]Error:[/bold red] 'squeue' command not found.")
57
+ console.print("Are you running this on a Slurm login node?")
58
+ sys.exit(1)
59
+
60
+ user = get_current_user()
61
+
62
+ # 2. Define the squeue command
63
+ # -u: Filter by user
64
+ # -h: No header (we'll make our own)
65
+ # -o: Custom format with '|' delimiter.
66
+ # %i=ID, %j=Name, %T=State, %M=Time Used, %D=Nodes, %R=Reason/Nodelist
67
+ squeue_cmd = ["squeue", "-u", user, "-h", "-o", "%i|%j|%T|%M|%D|%R"]
68
+
69
+ try:
70
+ # 3. Run the command and capture output
71
+ result = subprocess.run(squeue_cmd, check=True, capture_output=True, text=True)
72
+ output_lines = result.stdout.strip().split('\n')
73
+
74
+ # Remove empty lines if squeue returned nothing
75
+ output_lines = [line for line in output_lines if line.strip()]
76
+
77
+ if not output_lines:
78
+ console.print(f"\n[dim]No active jobs found for user: [bold]{user}[/bold][/dim]\n")
79
+ return
80
+
81
+ # 4. Build the Rich Table
82
+ table = Table(
83
+ title=f"🐘 Cluster Activity for user: [bold green]{user}[/bold green]",
84
+ box=box.ROUNDED,
85
+ header_style="bold blue",
86
+ expand=True
87
+ )
88
+ table.add_column("Job ID", style="cyan", no_wrap=True)
89
+ table.add_column("Name")
90
+ table.add_column("State")
91
+ table.add_column("Time Used", justify="right")
92
+ table.add_column("Nodes", justify="right")
93
+ table.add_column("Nodelist / Reason", style="dim")
94
+
95
+ # 5. Parse output and add rows
96
+ for line in output_lines:
97
+ try:
98
+ job_id, name, state, time_used, nodes, reason = line.split('|')
99
+
100
+ # Color-code the state
101
+ if state == "RUNNING":
102
+ state_formatted = f"[bold green]{state}[/bold green]"
103
+ elif state == "PENDING":
104
+ state_formatted = f"[yellow]{state}[/yellow]"
105
+ else:
106
+ state_formatted = f"[red]{state}[/red]"
107
+
108
+ table.add_row(job_id, name, state_formatted, time_used, nodes, reason)
109
+ except ValueError:
110
+ # Handle rare cases where splitting fails
111
+ continue
112
+
113
+ console.print(table)
114
+
115
+ except subprocess.CalledProcessError as e:
116
+ console.print(f"[bold red]Error running squeue:[/bold red] {e.stderr}")
117
+
118
+ def kill_command(
119
+ job_id: Optional[str] = typer.Argument(None, help = 'Job ID to cancel.'),
120
+ cancel_all: bool = typer.Option(False, '--all', help = 'Cancel ALL active jobs.')
121
+ ):
122
+ # Cancel a specific job or all jobs
123
+ if not shutil.which("scancel"):
124
+ console.print("[bold red]Error:[/bold red] 'scancel' command not found.")
125
+ console.print("Are you running this on a Slurm login node?")
126
+ sys.exit(1)
127
+
128
+ user = get_current_user()
129
+
130
+ if cancel_all:
131
+ if not Confirm.ask(f"[bold red]⚠️ DANGER:[/bold red] Cancel ALL jobs for '{user}'?"):
132
+ sys.exit(0)
133
+ subprocess.run(["scancel", "-u", user], check=True)
134
+ console.print(f"[bold green]💥 All jobs cancelled.[/bold green]")
135
+ return
136
+
137
+ if job_id:
138
+ subprocess.run(["scancel", job_id], check=True)
139
+ console.print(f"[bold green]💥 Signal sent to cancel job {job_id}.[/bold green]")
140
+ return
141
+
142
+ console.print("[yellow]Usage: jazari kill <JOB_ID> or jazari kill --all[/yellow]")
143
+
144
+ def logs_command(
145
+ job_id: Optional[str] = typer.Argument(None, help="Job ID to view."),
146
+ tail: bool = typer.Option(False, "--tail", "-f", help="Follow output live."),
147
+ error: bool = typer.Option(False, "--error", "-e", help="View .err log instead of .out")
148
+ ):
149
+ """View or follow job logs."""
150
+ log_dir = Path("logs")
151
+ if not log_dir.exists():
152
+ console.print("[red]Error: 'logs/' directory not found.[/red]")
153
+ sys.exit(1)
154
+
155
+ suffix = ".err" if error else ".out"
156
+ target_file = None
157
+
158
+ if job_id:
159
+ matching = list(log_dir.glob(f"*{job_id}{suffix}"))
160
+ if not matching:
161
+ console.print(f"[red]No log found for Job {job_id}[/red]")
162
+ sys.exit(1)
163
+ target_file = matching[0]
164
+ else:
165
+ # Find latest
166
+ files = list(log_dir.glob(f"*{suffix}"))
167
+ if not files:
168
+ console.print(f"[yellow]No {suffix} files found.[/yellow]")
169
+ return
170
+ files.sort(key=os.path.getmtime)
171
+ target_file = files[-1]
172
+
173
+ console.print(f"[dim]Viewing: {target_file}[/dim]")
174
+ cmd = ["tail", "-f", str(target_file)] if tail else ["cat", str(target_file)]
175
+
176
+ try:
177
+ subprocess.run(cmd, check=True)
178
+ except KeyboardInterrupt:
179
+ console.print("\n[dim]Stopped.[/dim]")
180
+
181
+ def init_command():
182
+ """
183
+ Initialize Jazari config for this cluster.
184
+ Interactively asks the user for defaults and saves them to ~/.jazari/config.yaml.
185
+ """
186
+ console.print('[bold blue]Welcome to Jazari setup.[/bold blue]')
187
+ console.print(f"This will create a configuration file at [dim]{CONFIG_FILE}[/dim]\n")
188
+
189
+ # 1. Ask for Slurm Account
190
+ # We use the current config value as the default prompt if it exists.
191
+ default_account = CONFIG.get("account", "")
192
+ account = Prompt.ask(
193
+ "Enter your default Slurm account (e.g., def-user)",
194
+ default=default_account
195
+ )
196
+
197
+ # 2. Ask for default time limit
198
+ default_time = CONFIG.get("time", "01:00:00")
199
+ time_limit = Prompt.ask(
200
+ "Enter default time limit (D-HH:MM)",
201
+ default=default_time
202
+ )
203
+
204
+ # 3. Ask for W&B tracking default
205
+ default_wandb = CONFIG.get("track_wandb", False)
206
+ track_wandb = Confirm.ask(
207
+ "Enable W&B tracking by default?",
208
+ default=default_wandb
209
+ )
210
+
211
+ # Prepare the config dictionary
212
+ new_config = {
213
+ "account": account,
214
+ "time": time_limit,
215
+ "track_wandb": track_wandb
216
+ }
217
+
218
+ make_config_dir(new_config)
219
+
220
+ console.print(f"\n[bold green]✅ Configuration saved![/bold green]")
221
+ console.print("You can now run jobs without specifying these flags.")
222
+
223
+ def run_command(
224
+ # The default values for these arguments are now pulled dynamically from the
225
+ # CONFIG dictionary using the get_config_default helper.
226
+ command: list[str] = typer.Argument(..., help="The command to run (e.g., python train.py --batch 64). Use '--' before it if it has flags."),
227
+ nodes: int = typer.Option(get_config_default("nodes", CONFIG, 1), '--nodes', '-N', help='Number of nodes.'),
228
+ gpus: int = typer.Option(get_config_default("gpus", CONFIG, 1), '--gpus', '-G', help='GPUs per node.'),
229
+ cpus: int = typer.Option(get_config_default("cpus", CONFIG, 1), '--cpus', '-c', help='CPUs per task/GPU'),
230
+ time: str = typer.Option(get_config_default("time", CONFIG, '01:00:00'), '--time', '-t', help='Time limit (D-HH:MM)'),
231
+ name: str = typer.Option('jazari_run', '--name', '-n', help='Job name'),
232
+ account: Optional[str] = typer.Option(get_config_default("account", CONFIG, None), "--account", "-A", help="Slurm account to charge."),
233
+ track_wandb: bool = typer.Option(get_config_default("track_wandb", CONFIG, False), "--track-wandb", help="Auto-configure W&B."),
234
+ push_to_hub: Optional[str] = typer.Option(None, "--push-to-hub", help="Hugging Face repo ID to upload model to (e.g. 'my-org/my-model)."),
235
+ pull_data: Optional[str] = typer.Option(None, '--pull-data', help='Huggingface dataset ID to download.'),
236
+ dry_run: bool = typer.Option(False, '--dry-run', help='Print sbatch script without submitting.'),
237
+ ):
238
+ '''
239
+ Launch a distributed training job.
240
+ '''
241
+ if not command:
242
+ console.print("[bold red]Error:[/bold red] You must provide a command to run.")
243
+ sys.exit(1)
244
+
245
+ log_dir = "logs"
246
+ if not os.path.exists(log_dir):
247
+ os.makedirs(log_dir, exist_ok=True)
248
+
249
+ full_command_str = ' '.join(command)
250
+
251
+ # --- Weights & Biases ---
252
+ wandb_key = None
253
+ if track_wandb:
254
+ console.print("[dim]W&B tracking enabled.[/dim]")
255
+ wandb_key = get_wandb_api_key()
256
+ if not wandb_key:
257
+ console.print("[bold yellow]Warning:[/bold yellow] Could not find W&B API key locally.")
258
+ console.print("Please run [green]wandb login[/green] on this machine first.")
259
+ # -----------------
260
+
261
+ console.print(f'[bold]🐘 Generating Slurm script for: {name}[/bold]')
262
+
263
+ sbatch_content = generate_sbatch_script(
264
+ nodes = nodes,
265
+ gpus_per_node = gpus,
266
+ cpus_per_task = cpus,
267
+ time_limit = time,
268
+ job_name = name,
269
+ account_name = account,
270
+ wandb_api_key = wandb_key,
271
+ hf_repo_id = push_to_hub,
272
+ hf_dataset_id = pull_data,
273
+ user_command = full_command_str
274
+ )
275
+
276
+ if dry_run:
277
+ console.print('\n[yellow]--- DRY RUN: Generated #SBATCH Script ---[/yellow]')
278
+ syntax = Syntax(sbatch_content, 'bash', theme = 'monokai', line_numbers = True)
279
+ console.print(syntax)
280
+ console.print('[yellow]-----------------------------------------------[/yellow]')
281
+ else:
282
+ console.print("[dim]Submitting to Slurm scheduler...[/dim]")
283
+
284
+ # --- Submission ---
285
+ with tempfile.NamedTemporaryFile(mode='w+', suffix=".sh", delete=False) as temp_file:
286
+ temp_script_path = temp_file.name
287
+ temp_file.write(sbatch_content)
288
+
289
+ try:
290
+ result = subprocess.run(
291
+ ["sbatch", temp_script_path],
292
+ check=True,
293
+ capture_output=True,
294
+ text=True
295
+ )
296
+ # Extract the job ID from the output "Submitted batch job 12345"
297
+ job_id = result.stdout.strip().split()[-1]
298
+ console.print(f"[bold green]✅ Job submitted successfully![/bold green] (ID: [bold]{job_id}[/bold])")
299
+ console.print(f"[dim]View logs: cat logs/{name}-{job_id}.out[/dim]")
300
+
301
+ except subprocess.CalledProcessError as e:
302
+ console.print("[bold red]❌ Failed to submit job.[/bold red]")
303
+ console.print(f"Sbatch error: {e.stderr}")
304
+ except FileNotFoundError:
305
+ console.print("\n[bold red]❌ Error:[/bold red] 'sbatch' command not found.")
306
+ console.print("Are you running this on a Slurm login node?")
307
+ finally:
308
+ if os.path.exists(temp_script_path):
309
+ os.unlink(temp_script_path)
310
+
311
+ # --- APP DEFINITION AND COMMAND REGISTRATION ---
312
+
313
+ # Initializing the Typer app
314
+ app = typer.Typer(
315
+ name="jazari",
316
+ help="🐘 The orchestration layer for modern Slurm clusters.",
317
+ add_completion=False,
318
+ no_args_is_help=True
319
+ )
320
+
321
+ # Explicitly register the commands
322
+ app.command(name="status")(status_command)
323
+ app.command(name="kill")(kill_command)
324
+ app.command(name="logs")(logs_command)
325
+ app.command(name="init")(init_command)
326
+ app.command(name="run")(run_command)
327
+
328
+ if __name__ == "__main__":
329
+ app()
@@ -0,0 +1,43 @@
1
+ import os
2
+ from typing import Optional
3
+ from jinja2 import Environment, FileSystemLoader
4
+
5
+ # Setting up the jinya environment to load templates from the templates folder.
6
+ base_dir = os.path.dirname(os.path.abspath(__file__))
7
+ template_dir = os.path.join(base_dir, "templates")
8
+ env = Environment(loader = FileSystemLoader(template_dir), trim_blocks = True, lstrip_blocks = True)
9
+
10
+ def generate_sbatch_script(
11
+ nodes: int,
12
+ gpus_per_node: int,
13
+ cpus_per_task: int,
14
+ time_limit: str,
15
+ job_name: str,
16
+ account_name: Optional[str],
17
+ wandb_api_key: Optional[str],
18
+ hf_repo_id: Optional[str],
19
+ hf_dataset_id: Optional[str],
20
+ user_command: str
21
+ ):
22
+ '''
23
+ Loads the master template and renders it with user parameters.
24
+ '''
25
+
26
+ # loading the template file
27
+ template = env.get_template('sbatch_master.sh.j2')
28
+
29
+ # inflating placeholders
30
+ rendered_script = template.render(
31
+ nodes = nodes,
32
+ gpus_per_node = gpus_per_node,
33
+ cpus_per_task = cpus_per_task,
34
+ time_limit = time_limit,
35
+ job_name = job_name,
36
+ account_name = account_name,
37
+ wandb_api_key = wandb_api_key,
38
+ hf_repo_id = hf_repo_id,
39
+ hf_dataset_id = hf_dataset_id,
40
+ user_command_script = user_command
41
+ )
42
+
43
+ return rendered_script
@@ -0,0 +1,171 @@
1
+ Metadata-Version: 2.4
2
+ Name: jazari
3
+ Version: 0.0.1
4
+ Summary: The orchestration layer for modern Slurm clusters.
5
+ Author-email: Levent Ozbek <levent@jazari.run>
6
+ License: MIT
7
+ Requires-Python: >=3.8
8
+ Description-Content-Type: text/markdown
9
+ License-File: LICENSE
10
+ Requires-Dist: annotated-types==0.7.0
11
+ Requires-Dist: certifi==2025.11.12
12
+ Requires-Dist: charset-normalizer==3.4.4
13
+ Requires-Dist: click==8.3.1
14
+ Requires-Dist: gitdb==4.0.12
15
+ Requires-Dist: GitPython==3.1.45
16
+ Requires-Dist: idna==3.11
17
+ Requires-Dist: Jinja2==3.1.6
18
+ Requires-Dist: markdown-it-py==4.0.0
19
+ Requires-Dist: MarkupSafe==3.0.3
20
+ Requires-Dist: mdurl==0.1.2
21
+ Requires-Dist: packaging==25.0
22
+ Requires-Dist: platformdirs==4.5.0
23
+ Requires-Dist: pydantic==2.12.5
24
+ Requires-Dist: pydantic_core==2.41.5
25
+ Requires-Dist: Pygments==2.19.2
26
+ Requires-Dist: PyYAML==6.0.3
27
+ Requires-Dist: requests==2.32.5
28
+ Requires-Dist: rich==14.2.0
29
+ Requires-Dist: sentry-sdk==2.46.0
30
+ Requires-Dist: shellingham==1.5.4
31
+ Requires-Dist: smmap==5.0.2
32
+ Requires-Dist: typer==0.20.0
33
+ Requires-Dist: typing-inspection==0.4.2
34
+ Requires-Dist: typing_extensions==4.15.0
35
+ Requires-Dist: urllib3==2.5.0
36
+ Requires-Dist: wandb==0.17.0
37
+ Requires-Dist: huggingface_hub>=0.23.0
38
+ Dynamic: license-file
39
+
40
+ # 🐘 Jazari
41
+
42
+ **The orchestration layer for modern Slurm clusters.**
43
+
44
+ Jazari is a command-line tool that makes launching distributed AI/ML training jobs on high-performance computing (HPC) clusters as easy as running a script on your laptop.
45
+
46
+ It abstracts away the complexity of writing Slurm (`#SBATCH`) scripts, handling multi-node networking, and managing environment variables.
47
+
48
+ [![Python 3.10+](https://img.shields.io/badge/python-3.10+-blue.svg)](https://www.python.org/downloads/)
49
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
50
+
51
+ ---
52
+
53
+ ## Why Jazari?
54
+
55
+ If you are a researcher using a university or national supercomputer (like Compute Canada/Digital Alliance), you know the pain:
56
+
57
+ * **The Script Nightmare:** Copy-pasting old bash scripts, accidentally leaving in wrong parameters, and debugging obscure `sbatch` errors.
58
+ * **Networking Headaches:** Manually figuring out how to get PyTorch DDP to find the master node's IP address across multiple machines.
59
+ * **Experiment Fragmentation:** Having a 4-node training run spawn 4 separate experiments in Weights & Biases.
60
+ * **Slurm Arcana:** Remembering obscure flags for accounts, time formats, and memory allocation.
61
+
62
+ **Jazari solves this.** You write your Python training script, and Jazari handles the rest.
63
+
64
+ ---
65
+
66
+ ## ✨ Key Features
67
+
68
+ * **🚀 One-Command Launch:** Go from Python script to running distributed job with a single CLI command.
69
+ * **🤖 Automatic Slurm Generation:** Uses robust Jinja2 templates to generate correct, safe `#SBATCH` scripts on the fly.
70
+ * **🧠 Zero-Config DDP Networking:** Automatically resolves master IP, ports, and world size for PyTorch Distributed Data Parallel.
71
+ * **📈 Seamless Weights & Biases:** Securely propagates your local API key and ensures multi-node jobs log as a single, clean run.
72
+ * **⚙️ User "Init" Profiles:** Save your default account, time limits, and preferences once and never type them again.
73
+ * **🎨 Beautiful CLI:** Modern, color-coded output with rich error reporting.
74
+
75
+ ---
76
+
77
+ ## 🛠️ Installation
78
+
79
+ Jazari is designed to be installed in a virtual environment on your cluster's login node.
80
+
81
+ ```bash
82
+ # 1. Clone the repository
83
+ git clone [https://github.com/levoz92/jazari.git](https://github.com/levoz92/jazari.git)
84
+ cd jazari
85
+
86
+ # 2. Create and activate a virtual environment (recommended)
87
+ python3 -m venv venv
88
+ source venv/bin/activate
89
+
90
+ # 3. Install in editable mode
91
+ pip install -e .
92
+ ```
93
+
94
+ Verify the installation:
95
+ ```bash
96
+ jazari --help
97
+ ```
98
+
99
+ ---
100
+
101
+ ## ⚡ Quick Start
102
+
103
+ ### 1. One-Time Setup
104
+
105
+ On your cluster login node, run the init command to save your defaults (like your allocation account).
106
+
107
+ ```bash
108
+ jazari init
109
+ ```
110
+
111
+ ### 2. Create a Python Script
112
+
113
+ Here is a minimal PyTorch DDP example (`train.py`):
114
+
115
+ ```python
116
+ import os
117
+ import torch
118
+ import torch.distributed as dist
119
+
120
+ def main():
121
+ # 1. Initialize Process Group (Jazari sets all necessary env vars)
122
+ dist.init_process_group(backend="nccl")
123
+
124
+ rank = dist.get_rank()
125
+ world_size = dist.get_world_size()
126
+ print(f"👋 Hello from rank {rank} of {world_size} on node {os.uname().nodename}!")
127
+
128
+ # Your training loop here...
129
+
130
+ # 3. Clean up
131
+ dist.destroy_process_group()
132
+
133
+ if __name__ == "__main__":
134
+ main()
135
+ ```
136
+
137
+ ### 3. Launch It!
138
+
139
+ Run your script on 2 nodes with 4 GPUs per node (8 GPUs total).
140
+
141
+ ```bash
142
+ jazari run -N 2 -G 4 --name "my-big-run" --track-wandb python train.py --batch-size 128
143
+ ```
144
+
145
+ **That's it.** Jazari will generate the script, submit it to Slurm, and stream the output.
146
+
147
+ ---
148
+
149
+ ## 📖 Usage Reference
150
+
151
+ ### `jazari init`
152
+ Interactively configure your default settings. These are saved to `~/.jazari/config.yaml`.
153
+
154
+ ### `jazari run [OPTIONS] COMMAND`
155
+
156
+ | Option | Shorthand | Description | Default |
157
+ | :--- | :--- | :--- | :--- |
158
+ | `--nodes` | `-N` | Number of compute nodes to request. | `1` (or config default) |
159
+ | `--gpus` | `-G` | Number of GPUs per node. | `1` (or config default) |
160
+ | `--cpus` | `-c` | Number of CPU cores per task. | `1` (or config default) |
161
+ | `--time` | `-t` | Time limit in `D-HH:MM` format (e.g., `0-02:30` for 2.5 hours). | `01:00:00` (or config default) |
162
+ | `--account`| `-A` | Slurm account to charge (e.g., `def-user`). | Config default |
163
+ | `--name` | `-n` | Name for the job in Slurm and W&B. | `jazari_run` |
164
+ | `--track-wandb` | | Auto-configure Weights & Biases environment. | `False` (or config default) |
165
+ | `--dry-run` | | Print the generated generated `#SBATCH` script to stdout without submitting. | `False` |
166
+
167
+ ---
168
+
169
+ ## 📄 License
170
+
171
+ This project is licensed under the [MIT License](LICENSE) - see the LICENSE file for details.
@@ -0,0 +1,10 @@
1
+ jazari/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ jazari/config_generator.py,sha256=BBg10poQLHIGy95frLdivNF4xhml6xeuWL7BWketm8g,1270
3
+ jazari/main.py,sha256=Xb2MSMDyCWm1QQobPK28u-NKeKmWWwa_HzjFMuyXWRQ,12636
4
+ jazari/slurm_generator.py,sha256=m5F76NqzMIwO4lL7MnyMWtuJTdj5Qv5qIN-P93CVKzM,1485
5
+ jazari-0.0.1.dist-info/licenses/LICENSE,sha256=OvKjjZV2nvR_v2QrhWR4TMgvtDmZYfZLFSt3bk1q6QA,1068
6
+ jazari-0.0.1.dist-info/METADATA,sha256=TqnlHeTjswRoosVf1-Xhg6t7vTIcYPvHaBjDBQ0rF2o,5826
7
+ jazari-0.0.1.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
8
+ jazari-0.0.1.dist-info/entry_points.txt,sha256=9vYDaHn1_jBw_cMNC9ZKl4vRk3n2aDQ5dHL0cM8tEfE,43
9
+ jazari-0.0.1.dist-info/top_level.txt,sha256=NY_Ke0lfKudUMc5w4rycAxzHKSgTUkhYqcmdFMYrya4,7
10
+ jazari-0.0.1.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (80.10.2)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ jazari = jazari.main:app
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Levent Ozbek
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1 @@
1
+ jazari