slurm-script-generator 0.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,240 @@
1
+ Metadata-Version: 2.4
2
+ Name: slurm-script-generator
3
+ Version: 0.1
4
+ Summary: Generate slurm scripts.
5
+ Author: Max
6
+ Project-URL: Source, https://github.com/max-models/slurm-script-generator
7
+ Keywords: python
8
+ Classifier: Development Status :: 3 - Alpha
9
+ Classifier: Programming Language :: Python :: 3 :: Only
10
+ Classifier: Programming Language :: Python :: 3.8
11
+ Classifier: Programming Language :: Python :: 3.9
12
+ Classifier: Programming Language :: Python :: 3.10
13
+ Classifier: Programming Language :: Python :: 3.11
14
+ Classifier: Programming Language :: Python :: 3.12
15
+ Classifier: Programming Language :: Python :: 3.13
16
+ Requires-Python: >=3.8
17
+ Description-Content-Type: text/markdown
18
+ Provides-Extra: dev
19
+ Requires-Dist: black; extra == "dev"
20
+ Requires-Dist: check-manifest; extra == "dev"
21
+ Requires-Dist: isort; extra == "dev"
22
+ Requires-Dist: pytest; extra == "dev"
23
+ Provides-Extra: test
24
+ Requires-Dist: coverage; extra == "test"
25
+
26
+ # Slurm script generator
27
+
28
+ ## Install
29
+
30
+ ```
31
+ pip install .
32
+ ```
33
+
34
+ ## Generate scripts
35
+
36
+ Generate a slurm script to `slurm_script.sh` with
37
+
38
+ ```bash
39
+ ❯ generate-slurm-script --nodes 1 --ntasks-per-node 16 --output slurm_script.sh
40
+ ❯ cat slurm_script.sh
41
+ #!/bin/bash
42
+ ##########################################
43
+ #SBATCH --nodes 1 # number of nodes on which to run
44
+ #SBATCH --ntasks_per_node 16 # number of tasks to invoke on each node
45
+ ##########################################
46
+ ```
47
+
48
+ To export the settings to a json file you can use `--export-json`:
49
+
50
+ ```bash
51
+ ❯ generate-slurm-script --nodes 2 --export-json setup.json
52
+ #!/bin/bash
53
+ ##########################################
54
+ #SBATCH --nodes 2 # number of nodes on which to run
55
+ ##########################################
56
+ ```
57
+
58
+ This json file can used as a basis for creating new scripts
59
+
60
+ ```bash
61
+ ❯ generate-slurm-script --input setup.json --ntasks-per-node 16
62
+ #!/bin/bash
63
+ ##########################################
64
+ #SBATCH --nodes 2 # number of nodes on which to run
65
+ #SBATCH --ntasks_per_node 16 # number of tasks to invoke on each node
66
+ ##########################################
67
+ ```
68
+
69
+ ### Add modules
70
+
71
+ Add modules with
72
+
73
+ ```bash
74
+ ❯ generate-slurm-script --modules gcc/13 openmpi/5.0
75
+ #!/bin/bash
76
+ ##########################################
77
+ ##########################################
78
+
79
+ module purge # Purge modules
80
+ module load gcc/13 openmpi/5.0 # modules
81
+ module list # List loaded modules
82
+ ```
83
+
84
+ ### Add virtual environment
85
+
86
+ ```bash
87
+ ❯ generate-slurm-script --nodes 1 --ntasks-per-node 16 --venv ~/virtual_envs/env
88
+ args_dict.get("modules") = None
89
+ #!/bin/bash
90
+ ##########################################
91
+ #SBATCH --nodes 1 # number of nodes on which to run
92
+ #SBATCH --ntasks_per_node 16 # number of tasks to invoke on each node
93
+ ##########################################
94
+
95
+ source /Users/max/virtual_envs/env/bin/activate # virtual environment
96
+ ```
97
+
98
+ ### Other
99
+
100
+ All optional arguments can be shown with
101
+
102
+ ```bash
103
+ ❯ generate-slurm-script -h
104
+ usage: generate-slurm-script [-h] [-A NAME] [-b TIME] [--bell] [--no-bell] [--bb SPEC] [--bbf FILE_NAME] [-c NCPUS] [--comment NAME] [--container PATH] [--container-id ID]
105
+ [--cpu-freq MIN[-MAX[:GOV]]] [--delay-boot MINS] [-d TYPE:JOBID[:TIME]] [--deadline TIME] [-D PATH] [--get-user-env] [--gres LIST]
106
+ [--gres-flags OPTS] [-H] [-I [SECS]] [-J NAME] [-k] [-K [SIGNAL]] [-L NAMES] [-M NAMES] [-m TYPE] [--mail-type TYPE] [--mail-user USER]
107
+ [--mcs-label MCS] [-n N] [--nice [VALUE]] [-N NODES] [--ntasks-per-node N] [--oom-kill-step [0|1]] [-O] [--power FLAGS] [--priority VALUE]
108
+ [--profile VALUE] [-p PARTITION] [-q QOS] [-Q] [--reboot] [-s] [--signal [R:]NUM[@TIME]] [--spread-job] [--switches MAX_SWITCHES[@MAX_TIME]]
109
+ [-S CORES] [--thread-spec THREADS] [-t MINUTES] [--time-min MINUTES] [--tres-bind ...] [--tres-per-task LIST] [--use-min-nodes] [--wckey WCKEY]
110
+ [--cluster-constraint LIST] [--contiguous] [-C LIST] [-F FILENAME] [--mem MB] [--mincpus N] [--reservation NAME] [--tmp MB] [-w HOST [HOST ...]]
111
+ [-x HOST [HOST ...]] [--exclusive-user] [--exclusive-mcs] [--mem-per-cpu MB] [--resv-ports] [--sockets-per-node S] [--cores-per-socket C]
112
+ [--threads-per-core T] [-B S[:C[:T]]] [--ntasks-per-core N] [--ntasks-per-socket N] [--hint HINT] [--mem-bind BIND] [--cpus-per-gpu N] [-G N]
113
+ [--gpu-bind ...] [--gpu-freq ...] [--gpus-per-node N] [--gpus-per-socket N] [--gpus-per-task N] [--mem-per-gpu MEM_PER_GPU]
114
+ [--disable-stdout-job-summary] [--nvmps] [--line-length LINE_LENGHT] [--modules MODULES [MODULES ...]] [--venv VENV] [--printenv] [--print-self]
115
+ [--likwid] [--input INPUT_PATH] [--output OUTPUT_PATH] [--export-json JSON_PATH]
116
+
117
+ Slurm job submission options
118
+
119
+ options:
120
+ -h, --help show this help message and exit
121
+ -A, --account NAME charge job to specified account (default: None)
122
+ -b, --begin TIME defer job until HH:MM MM/DD/YY (default: None)
123
+ --bell ring the terminal bell when the job is allocated (default: False)
124
+ --no-bell do NOT ring the terminal bell (default: True)
125
+ --bb SPEC burst buffer specifications (default: None)
126
+ --bbf FILE_NAME burst buffer specification file (default: None)
127
+ -c, --cpus-per-task NCPUS
128
+ number of cpus required per task (default: None)
129
+ --comment NAME arbitrary comment (default: None)
130
+ --container PATH Path to OCI container bundle (default: None)
131
+ --container-id ID OCI container ID (default: None)
132
+ --cpu-freq MIN[-MAX[:GOV]]
133
+ requested cpu frequency (and governor) (default: None)
134
+ --delay-boot MINS delay boot for desired node features (default: None)
135
+ -d, --dependency TYPE:JOBID[:TIME]
136
+ defer job until condition on jobid is satisfied (default: None)
137
+ --deadline TIME remove the job if no ending possible before this deadline (default: None)
138
+ -D, --chdir PATH change working directory (default: None)
139
+ --get-user-env used by Moab. See srun man page (default: False)
140
+ --gres LIST required generic resources (default: None)
141
+ --gres-flags OPTS flags related to GRES management (default: None)
142
+ -H, --hold submit job in held state (default: False)
143
+ -I, --immediate [SECS]
144
+ exit if resources not available in "secs" (default: None)
145
+ -J, --job-name NAME name of job (default: None)
146
+ -k, --no-kill do not kill job on node failure (default: False)
147
+ -K, --kill-command [SIGNAL]
148
+ signal to send terminating job (default: None)
149
+ -L, --licenses NAMES required license, comma separated (default: None)
150
+ -M, --clusters NAMES Comma separated list of clusters to issue commands to (default: None)
151
+ -m, --distribution TYPE
152
+ distribution method for processes to nodes (default: None)
153
+ --mail-type TYPE notify on state change (default: None)
154
+ --mail-user USER who to send email notification for job state changes (default: None)
155
+ --mcs-label MCS mcs label if mcs plugin mcs/group is used (default: None)
156
+ -n, --ntasks N number of processors required (default: None)
157
+ --nice [VALUE] decrease scheduling priority by value (default: None)
158
+ -N, --nodes NODES number of nodes on which to run (default: None)
159
+ --ntasks-per-node N number of tasks to invoke on each node (default: None)
160
+ --oom-kill-step [0|1]
161
+ set the OOMKillStep behaviour (default: None)
162
+ -O, --overcommit overcommit resources (default: False)
163
+ --power FLAGS power management options (default: None)
164
+ --priority VALUE set the priority of the job (default: None)
165
+ --profile VALUE enable acct_gather_profile for detailed data (default: None)
166
+ -p, --partition PARTITION
167
+ partition requested (default: None)
168
+ -q, --qos QOS quality of service (default: None)
169
+ -Q, --quiet quiet mode (suppress informational messages) (default: False)
170
+ --reboot reboot compute nodes before starting job (default: False)
171
+ -s, --oversubscribe oversubscribe resources with other jobs (default: False)
172
+ --signal [R:]NUM[@TIME]
173
+ send signal when time limit within time seconds (default: None)
174
+ --spread-job spread job across as many nodes as possible (default: False)
175
+ --switches MAX_SWITCHES[@MAX_TIME]
176
+ optimum switches and max time to wait for optimum (default: None)
177
+ -S, --core-spec CORES
178
+ count of reserved cores (default: None)
179
+ --thread-spec THREADS
180
+ count of reserved threads (default: None)
181
+ -t, --time MINUTES time limit (default: None)
182
+ --time-min MINUTES minimum time limit (if distinct) (default: None)
183
+ --tres-bind ... task to tres binding options (default: None)
184
+ --tres-per-task LIST list of tres required per task (default: None)
185
+ --use-min-nodes if a range of node counts is given, prefer the smaller count (default: False)
186
+ --wckey WCKEY wckey to run job under (default: None)
187
+ --cluster-constraint LIST
188
+ specify a list of cluster constraints (default: None)
189
+ --contiguous demand a contiguous range of nodes (default: False)
190
+ -C, --constraint LIST
191
+ specify a list of constraints (default: None)
192
+ -F, --nodefile FILENAME
193
+ request a specific list of hosts (default: None)
194
+ --mem MB minimum amount of real memory (default: None)
195
+ --mincpus N minimum number of logical processors per node (default: None)
196
+ --reservation NAME allocate resources from named reservation (default: None)
197
+ --tmp MB minimum amount of temporary disk (default: None)
198
+ -w, --nodelist HOST [HOST ...]
199
+ request a specific list of hosts (default: None)
200
+ -x, --exclude HOST [HOST ...]
201
+ exclude a specific list of hosts (default: None)
202
+ --exclusive-user allocate nodes in exclusive mode for cpu consumable resource (default: False)
203
+ --exclusive-mcs allocate nodes in exclusive mode when mcs plugin is enabled (default: False)
204
+ --mem-per-cpu MB maximum amount of real memory per allocated cpu (default: None)
205
+ --resv-ports reserve communication ports (default: False)
206
+ --sockets-per-node S number of sockets per node to allocate (default: None)
207
+ --cores-per-socket C number of cores per socket to allocate (default: None)
208
+ --threads-per-core T number of threads per core to allocate (default: None)
209
+ -B, --extra-node-info S[:C[:T]]
210
+ combine request of sockets, cores and threads (default: None)
211
+ --ntasks-per-core N number of tasks to invoke on each core (default: None)
212
+ --ntasks-per-socket N
213
+ number of tasks to invoke on each socket (default: None)
214
+ --hint HINT Bind tasks according to application hints (default: None)
215
+ --mem-bind BIND Bind memory to locality domains (default: None)
216
+ --cpus-per-gpu N number of CPUs required per allocated GPU (default: None)
217
+ -G, --gpus N count of GPUs required for the job (default: None)
218
+ --gpu-bind ... task to gpu binding options (default: None)
219
+ --gpu-freq ... frequency and voltage of GPUs (default: None)
220
+ --gpus-per-node N number of GPUs required per allocated node (default: None)
221
+ --gpus-per-socket N number of GPUs required per allocated socket (default: None)
222
+ --gpus-per-task N number of GPUs required per spawned task (default: None)
223
+ --mem-per-gpu MEM_PER_GPU
224
+ real memory required per allocated GPU (default: None)
225
+ --disable-stdout-job-summary
226
+ disable job summary in stdout file for the job (default: False)
227
+ --nvmps launching NVIDIA MPS for job (default: False)
228
+ --line-length LINE_LENGHT
229
+ line length before start of comment (default: 40)
230
+ --modules MODULES [MODULES ...]
231
+ Modules to load (e.g., --modules mod1 mod2 mod3) (default: [])
232
+ --venv VENV virtual environment to load with `source VENV/bin/activate` (default: None)
233
+ --printenv print all environment variables (default: False)
234
+ --print-self print the batch script in the batch script (default: False)
235
+ --likwid Set up likwid environment variables (default: False)
236
+ --input INPUT_PATH path to input json file (default: None)
237
+ --output OUTPUT_PATH json path to save slurm batch script to (default: None)
238
+ --export-json JSON_PATH
239
+ path to export yaml for generating the slurm script to (default: None)
240
+ ```
@@ -0,0 +1,215 @@
1
+ # Slurm script generator
2
+
3
+ ## Install
4
+
5
+ ```
6
+ pip install .
7
+ ```
8
+
9
+ ## Generate scripts
10
+
11
+ Generate a slurm script to `slurm_script.sh` with
12
+
13
+ ```bash
14
+ ❯ generate-slurm-script --nodes 1 --ntasks-per-node 16 --output slurm_script.sh
15
+ ❯ cat slurm_script.sh
16
+ #!/bin/bash
17
+ ##########################################
18
+ #SBATCH --nodes 1 # number of nodes on which to run
19
+ #SBATCH --ntasks_per_node 16 # number of tasks to invoke on each node
20
+ ##########################################
21
+ ```
22
+
23
+ To export the settings to a json file you can use `--export-json`:
24
+
25
+ ```bash
26
+ ❯ generate-slurm-script --nodes 2 --export-json setup.json
27
+ #!/bin/bash
28
+ ##########################################
29
+ #SBATCH --nodes 2 # number of nodes on which to run
30
+ ##########################################
31
+ ```
32
+
33
+ This json file can used as a basis for creating new scripts
34
+
35
+ ```bash
36
+ ❯ generate-slurm-script --input setup.json --ntasks-per-node 16
37
+ #!/bin/bash
38
+ ##########################################
39
+ #SBATCH --nodes 2 # number of nodes on which to run
40
+ #SBATCH --ntasks_per_node 16 # number of tasks to invoke on each node
41
+ ##########################################
42
+ ```
43
+
44
+ ### Add modules
45
+
46
+ Add modules with
47
+
48
+ ```bash
49
+ ❯ generate-slurm-script --modules gcc/13 openmpi/5.0
50
+ #!/bin/bash
51
+ ##########################################
52
+ ##########################################
53
+
54
+ module purge # Purge modules
55
+ module load gcc/13 openmpi/5.0 # modules
56
+ module list # List loaded modules
57
+ ```
58
+
59
+ ### Add virtual environment
60
+
61
+ ```bash
62
+ ❯ generate-slurm-script --nodes 1 --ntasks-per-node 16 --venv ~/virtual_envs/env
63
+ args_dict.get("modules") = None
64
+ #!/bin/bash
65
+ ##########################################
66
+ #SBATCH --nodes 1 # number of nodes on which to run
67
+ #SBATCH --ntasks_per_node 16 # number of tasks to invoke on each node
68
+ ##########################################
69
+
70
+ source /Users/max/virtual_envs/env/bin/activate # virtual environment
71
+ ```
72
+
73
+ ### Other
74
+
75
+ All optional arguments can be shown with
76
+
77
+ ```bash
78
+ ❯ generate-slurm-script -h
79
+ usage: generate-slurm-script [-h] [-A NAME] [-b TIME] [--bell] [--no-bell] [--bb SPEC] [--bbf FILE_NAME] [-c NCPUS] [--comment NAME] [--container PATH] [--container-id ID]
80
+ [--cpu-freq MIN[-MAX[:GOV]]] [--delay-boot MINS] [-d TYPE:JOBID[:TIME]] [--deadline TIME] [-D PATH] [--get-user-env] [--gres LIST]
81
+ [--gres-flags OPTS] [-H] [-I [SECS]] [-J NAME] [-k] [-K [SIGNAL]] [-L NAMES] [-M NAMES] [-m TYPE] [--mail-type TYPE] [--mail-user USER]
82
+ [--mcs-label MCS] [-n N] [--nice [VALUE]] [-N NODES] [--ntasks-per-node N] [--oom-kill-step [0|1]] [-O] [--power FLAGS] [--priority VALUE]
83
+ [--profile VALUE] [-p PARTITION] [-q QOS] [-Q] [--reboot] [-s] [--signal [R:]NUM[@TIME]] [--spread-job] [--switches MAX_SWITCHES[@MAX_TIME]]
84
+ [-S CORES] [--thread-spec THREADS] [-t MINUTES] [--time-min MINUTES] [--tres-bind ...] [--tres-per-task LIST] [--use-min-nodes] [--wckey WCKEY]
85
+ [--cluster-constraint LIST] [--contiguous] [-C LIST] [-F FILENAME] [--mem MB] [--mincpus N] [--reservation NAME] [--tmp MB] [-w HOST [HOST ...]]
86
+ [-x HOST [HOST ...]] [--exclusive-user] [--exclusive-mcs] [--mem-per-cpu MB] [--resv-ports] [--sockets-per-node S] [--cores-per-socket C]
87
+ [--threads-per-core T] [-B S[:C[:T]]] [--ntasks-per-core N] [--ntasks-per-socket N] [--hint HINT] [--mem-bind BIND] [--cpus-per-gpu N] [-G N]
88
+ [--gpu-bind ...] [--gpu-freq ...] [--gpus-per-node N] [--gpus-per-socket N] [--gpus-per-task N] [--mem-per-gpu MEM_PER_GPU]
89
+ [--disable-stdout-job-summary] [--nvmps] [--line-length LINE_LENGHT] [--modules MODULES [MODULES ...]] [--venv VENV] [--printenv] [--print-self]
90
+ [--likwid] [--input INPUT_PATH] [--output OUTPUT_PATH] [--export-json JSON_PATH]
91
+
92
+ Slurm job submission options
93
+
94
+ options:
95
+ -h, --help show this help message and exit
96
+ -A, --account NAME charge job to specified account (default: None)
97
+ -b, --begin TIME defer job until HH:MM MM/DD/YY (default: None)
98
+ --bell ring the terminal bell when the job is allocated (default: False)
99
+ --no-bell do NOT ring the terminal bell (default: True)
100
+ --bb SPEC burst buffer specifications (default: None)
101
+ --bbf FILE_NAME burst buffer specification file (default: None)
102
+ -c, --cpus-per-task NCPUS
103
+ number of cpus required per task (default: None)
104
+ --comment NAME arbitrary comment (default: None)
105
+ --container PATH Path to OCI container bundle (default: None)
106
+ --container-id ID OCI container ID (default: None)
107
+ --cpu-freq MIN[-MAX[:GOV]]
108
+ requested cpu frequency (and governor) (default: None)
109
+ --delay-boot MINS delay boot for desired node features (default: None)
110
+ -d, --dependency TYPE:JOBID[:TIME]
111
+ defer job until condition on jobid is satisfied (default: None)
112
+ --deadline TIME remove the job if no ending possible before this deadline (default: None)
113
+ -D, --chdir PATH change working directory (default: None)
114
+ --get-user-env used by Moab. See srun man page (default: False)
115
+ --gres LIST required generic resources (default: None)
116
+ --gres-flags OPTS flags related to GRES management (default: None)
117
+ -H, --hold submit job in held state (default: False)
118
+ -I, --immediate [SECS]
119
+ exit if resources not available in "secs" (default: None)
120
+ -J, --job-name NAME name of job (default: None)
121
+ -k, --no-kill do not kill job on node failure (default: False)
122
+ -K, --kill-command [SIGNAL]
123
+ signal to send terminating job (default: None)
124
+ -L, --licenses NAMES required license, comma separated (default: None)
125
+ -M, --clusters NAMES Comma separated list of clusters to issue commands to (default: None)
126
+ -m, --distribution TYPE
127
+ distribution method for processes to nodes (default: None)
128
+ --mail-type TYPE notify on state change (default: None)
129
+ --mail-user USER who to send email notification for job state changes (default: None)
130
+ --mcs-label MCS mcs label if mcs plugin mcs/group is used (default: None)
131
+ -n, --ntasks N number of processors required (default: None)
132
+ --nice [VALUE] decrease scheduling priority by value (default: None)
133
+ -N, --nodes NODES number of nodes on which to run (default: None)
134
+ --ntasks-per-node N number of tasks to invoke on each node (default: None)
135
+ --oom-kill-step [0|1]
136
+ set the OOMKillStep behaviour (default: None)
137
+ -O, --overcommit overcommit resources (default: False)
138
+ --power FLAGS power management options (default: None)
139
+ --priority VALUE set the priority of the job (default: None)
140
+ --profile VALUE enable acct_gather_profile for detailed data (default: None)
141
+ -p, --partition PARTITION
142
+ partition requested (default: None)
143
+ -q, --qos QOS quality of service (default: None)
144
+ -Q, --quiet quiet mode (suppress informational messages) (default: False)
145
+ --reboot reboot compute nodes before starting job (default: False)
146
+ -s, --oversubscribe oversubscribe resources with other jobs (default: False)
147
+ --signal [R:]NUM[@TIME]
148
+ send signal when time limit within time seconds (default: None)
149
+ --spread-job spread job across as many nodes as possible (default: False)
150
+ --switches MAX_SWITCHES[@MAX_TIME]
151
+ optimum switches and max time to wait for optimum (default: None)
152
+ -S, --core-spec CORES
153
+ count of reserved cores (default: None)
154
+ --thread-spec THREADS
155
+ count of reserved threads (default: None)
156
+ -t, --time MINUTES time limit (default: None)
157
+ --time-min MINUTES minimum time limit (if distinct) (default: None)
158
+ --tres-bind ... task to tres binding options (default: None)
159
+ --tres-per-task LIST list of tres required per task (default: None)
160
+ --use-min-nodes if a range of node counts is given, prefer the smaller count (default: False)
161
+ --wckey WCKEY wckey to run job under (default: None)
162
+ --cluster-constraint LIST
163
+ specify a list of cluster constraints (default: None)
164
+ --contiguous demand a contiguous range of nodes (default: False)
165
+ -C, --constraint LIST
166
+ specify a list of constraints (default: None)
167
+ -F, --nodefile FILENAME
168
+ request a specific list of hosts (default: None)
169
+ --mem MB minimum amount of real memory (default: None)
170
+ --mincpus N minimum number of logical processors per node (default: None)
171
+ --reservation NAME allocate resources from named reservation (default: None)
172
+ --tmp MB minimum amount of temporary disk (default: None)
173
+ -w, --nodelist HOST [HOST ...]
174
+ request a specific list of hosts (default: None)
175
+ -x, --exclude HOST [HOST ...]
176
+ exclude a specific list of hosts (default: None)
177
+ --exclusive-user allocate nodes in exclusive mode for cpu consumable resource (default: False)
178
+ --exclusive-mcs allocate nodes in exclusive mode when mcs plugin is enabled (default: False)
179
+ --mem-per-cpu MB maximum amount of real memory per allocated cpu (default: None)
180
+ --resv-ports reserve communication ports (default: False)
181
+ --sockets-per-node S number of sockets per node to allocate (default: None)
182
+ --cores-per-socket C number of cores per socket to allocate (default: None)
183
+ --threads-per-core T number of threads per core to allocate (default: None)
184
+ -B, --extra-node-info S[:C[:T]]
185
+ combine request of sockets, cores and threads (default: None)
186
+ --ntasks-per-core N number of tasks to invoke on each core (default: None)
187
+ --ntasks-per-socket N
188
+ number of tasks to invoke on each socket (default: None)
189
+ --hint HINT Bind tasks according to application hints (default: None)
190
+ --mem-bind BIND Bind memory to locality domains (default: None)
191
+ --cpus-per-gpu N number of CPUs required per allocated GPU (default: None)
192
+ -G, --gpus N count of GPUs required for the job (default: None)
193
+ --gpu-bind ... task to gpu binding options (default: None)
194
+ --gpu-freq ... frequency and voltage of GPUs (default: None)
195
+ --gpus-per-node N number of GPUs required per allocated node (default: None)
196
+ --gpus-per-socket N number of GPUs required per allocated socket (default: None)
197
+ --gpus-per-task N number of GPUs required per spawned task (default: None)
198
+ --mem-per-gpu MEM_PER_GPU
199
+ real memory required per allocated GPU (default: None)
200
+ --disable-stdout-job-summary
201
+ disable job summary in stdout file for the job (default: False)
202
+ --nvmps launching NVIDIA MPS for job (default: False)
203
+ --line-length LINE_LENGHT
204
+ line length before start of comment (default: 40)
205
+ --modules MODULES [MODULES ...]
206
+ Modules to load (e.g., --modules mod1 mod2 mod3) (default: [])
207
+ --venv VENV virtual environment to load with `source VENV/bin/activate` (default: None)
208
+ --printenv print all environment variables (default: False)
209
+ --print-self print the batch script in the batch script (default: False)
210
+ --likwid Set up likwid environment variables (default: False)
211
+ --input INPUT_PATH path to input json file (default: None)
212
+ --output OUTPUT_PATH json path to save slurm batch script to (default: None)
213
+ --export-json JSON_PATH
214
+ path to export yaml for generating the slurm script to (default: None)
215
+ ```
@@ -0,0 +1,39 @@
1
+ [build-system]
2
+ build-backend = "setuptools.build_meta"
3
+
4
+ requires = [ "setuptools", "wheel" ]
5
+
6
+ [project]
7
+ name = "slurm-script-generator"
8
+ version = "0.1"
9
+ description = "Generate slurm scripts."
10
+ readme = "README.md"
11
+ keywords = [ "python" ]
12
+ license = { file = "LICENSE.txt" }
13
+ authors = [ { name = "Max" } ]
14
+ requires-python = ">=3.8"
15
+ classifiers = [
16
+ "Development Status :: 3 - Alpha",
17
+ "Programming Language :: Python :: 3 :: Only",
18
+ "Programming Language :: Python :: 3.8",
19
+ "Programming Language :: Python :: 3.9",
20
+ "Programming Language :: Python :: 3.10",
21
+ "Programming Language :: Python :: 3.11",
22
+ "Programming Language :: Python :: 3.12",
23
+ "Programming Language :: Python :: 3.13",
24
+ ]
25
+ dependencies = [
26
+ ]
27
+
28
+ optional-dependencies.dev = [
29
+ "black",
30
+ "check-manifest",
31
+ "isort",
32
+ "pytest",
33
+ ]
34
+ optional-dependencies.test = [ "coverage" ]
35
+ urls."Source" = "https://github.com/max-models/slurm-script-generator"
36
+ scripts.generate-slurm-script = "slurm_script_generator.main:main"
37
+
38
+ [tool.setuptools.packages.find]
39
+ where = [ "src" ]
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+