vec-inf 0.3.0__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vec_inf/README.md +4 -1
- vec_inf/cli/_cli.py +32 -9
- vec_inf/launch_server.sh +23 -5
- vec_inf/models/README.md +106 -31
- {vec_inf-0.3.0.dist-info → vec_inf-0.3.1.dist-info}/METADATA +20 -17
- vec_inf-0.3.1.dist-info/RECORD +15 -0
- vec_inf/models/CodeLlama/README.md +0 -12
- vec_inf/models/CodeLlama/config.sh +0 -5
- vec_inf/models/Llama-2/README.md +0 -10
- vec_inf/models/Llama-2/config.sh +0 -5
- vec_inf/models/Meta-Llama-3/README.md +0 -8
- vec_inf/models/Meta-Llama-3/config.sh +0 -5
- vec_inf/models/Meta-Llama-3.1/README.md +0 -8
- vec_inf/models/Meta-Llama-3.1/config.sh +0 -6
- vec_inf/models/Mistral/README.md +0 -10
- vec_inf/models/Mistral/config.sh +0 -5
- vec_inf/models/Mixtral/README.md +0 -8
- vec_inf/models/Mixtral/config.sh +0 -5
- vec_inf/models/Phi-3/README.md +0 -6
- vec_inf/models/Phi-3/config.sh +0 -6
- vec_inf/models/c4ai-command-r/README.md +0 -5
- vec_inf/models/c4ai-command-r/config.sh +0 -5
- vec_inf/models/dbrx/README.md +0 -5
- vec_inf/models/dbrx/config.sh +0 -5
- vec_inf/models/gemma-2/README.md +0 -8
- vec_inf/models/gemma-2/config.sh +0 -6
- vec_inf/models/llava-1.5/README.md +0 -7
- vec_inf/models/llava-1.5/chat_template.jinja +0 -23
- vec_inf/models/llava-1.5/config.sh +0 -5
- vec_inf/models/llava-v1.6/README.md +0 -7
- vec_inf/models/llava-v1.6/chat_template.jinja +0 -23
- vec_inf/models/llava-v1.6/config.sh +0 -5
- vec_inf-0.3.0.dist-info/RECORD +0 -41
- {vec_inf-0.3.0.dist-info → vec_inf-0.3.1.dist-info}/WHEEL +0 -0
- {vec_inf-0.3.0.dist-info → vec_inf-0.3.1.dist-info}/entry_points.txt +0 -0
vec_inf/README.md
CHANGED
|
@@ -1,5 +1,8 @@
|
|
|
1
1
|
# `vec-inf` Commands
|
|
2
2
|
|
|
3
3
|
* `launch`: Specify a model family and other optional parameters to launch an OpenAI compatible inference server, `--json-mode` supported. Check [`here`](./models/README.md) for complete list of available options.
|
|
4
|
+
* `list`: List all available model names, `--json-mode` supported.
|
|
4
5
|
* `status`: Check the model status by providing its Slurm job ID, `--json-mode` supported.
|
|
5
|
-
* `shutdown`: Shutdown a model by providing its Slurm job ID.
|
|
6
|
+
* `shutdown`: Shutdown a model by providing its Slurm job ID.
|
|
7
|
+
|
|
8
|
+
Use `--help` to see all available options
|
vec_inf/cli/_cli.py
CHANGED
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
import os
|
|
2
2
|
|
|
3
3
|
import click
|
|
4
|
-
import pandas as pd
|
|
5
4
|
from rich.console import Console
|
|
6
5
|
from rich.columns import Columns
|
|
7
6
|
from rich.panel import Panel
|
|
@@ -27,12 +26,12 @@ def cli():
|
|
|
27
26
|
@click.option(
|
|
28
27
|
"--model-family",
|
|
29
28
|
type=str,
|
|
30
|
-
help='The model family
|
|
29
|
+
help='The model family'
|
|
31
30
|
)
|
|
32
31
|
@click.option(
|
|
33
32
|
"--model-variant",
|
|
34
33
|
type=str,
|
|
35
|
-
help='The model variant
|
|
34
|
+
help='The model variant'
|
|
36
35
|
)
|
|
37
36
|
@click.option(
|
|
38
37
|
"--max-model-len",
|
|
@@ -57,12 +56,12 @@ def cli():
|
|
|
57
56
|
@click.option(
|
|
58
57
|
"--qos",
|
|
59
58
|
type=str,
|
|
60
|
-
help='Quality of service, default
|
|
59
|
+
help='Quality of service, default depends on suggested resource allocation required for the model'
|
|
61
60
|
)
|
|
62
61
|
@click.option(
|
|
63
62
|
"--time",
|
|
64
63
|
type=str,
|
|
65
|
-
help='Time limit for job, this should comply with QoS, default to
|
|
64
|
+
help='Time limit for job, this should comply with QoS, default to max walltime of the chosen QoS'
|
|
66
65
|
)
|
|
67
66
|
@click.option(
|
|
68
67
|
"--data-type",
|
|
@@ -77,7 +76,7 @@ def cli():
|
|
|
77
76
|
@click.option(
|
|
78
77
|
"--log-dir",
|
|
79
78
|
type=str,
|
|
80
|
-
help='Path to slurm log directory'
|
|
79
|
+
help='Path to slurm log directory, default to .vec-inf-logs in home directory'
|
|
81
80
|
)
|
|
82
81
|
@click.option(
|
|
83
82
|
"--json-mode",
|
|
@@ -150,7 +149,7 @@ def launch(
|
|
|
150
149
|
@click.option(
|
|
151
150
|
"--log-dir",
|
|
152
151
|
type=str,
|
|
153
|
-
help='Path to slurm log directory. This is required if
|
|
152
|
+
help='Path to slurm log directory. This is required if --log-dir was set in model launch'
|
|
154
153
|
)
|
|
155
154
|
@click.option(
|
|
156
155
|
"--json-mode",
|
|
@@ -238,16 +237,40 @@ def shutdown(slurm_job_id: int) -> None:
|
|
|
238
237
|
|
|
239
238
|
|
|
240
239
|
@cli.command("list")
|
|
240
|
+
@click.argument(
|
|
241
|
+
"model-name",
|
|
242
|
+
required=False)
|
|
241
243
|
@click.option(
|
|
242
244
|
"--json-mode",
|
|
243
245
|
is_flag=True,
|
|
244
246
|
help='Output in JSON string',
|
|
245
247
|
)
|
|
246
|
-
def list(json_mode: bool=False) -> None:
|
|
248
|
+
def list(model_name: str=None, json_mode: bool=False) -> None:
|
|
247
249
|
"""
|
|
248
|
-
List all available models
|
|
250
|
+
List all available models, or get default setup of a specific model
|
|
249
251
|
"""
|
|
250
252
|
models_df = load_models_df()
|
|
253
|
+
|
|
254
|
+
if model_name:
|
|
255
|
+
if model_name not in models_df['model_name'].values:
|
|
256
|
+
raise ValueError(f"Model name {model_name} not found in available models")
|
|
257
|
+
|
|
258
|
+
excluded_keys = {'venv', 'log_dir', 'pipeline_parallelism'}
|
|
259
|
+
model_row = models_df.loc[models_df['model_name'] == model_name]
|
|
260
|
+
|
|
261
|
+
if json_mode:
|
|
262
|
+
# click.echo(model_row.to_json(orient='records'))
|
|
263
|
+
filtered_model_row = model_row.drop(columns=excluded_keys, errors='ignore')
|
|
264
|
+
click.echo(filtered_model_row.to_json(orient='records'))
|
|
265
|
+
return
|
|
266
|
+
table = create_table(key_title="Model Config", value_title="Value")
|
|
267
|
+
for _, row in model_row.iterrows():
|
|
268
|
+
for key, value in row.items():
|
|
269
|
+
if key not in excluded_keys:
|
|
270
|
+
table.add_row(key, str(value))
|
|
271
|
+
CONSOLE.print(table)
|
|
272
|
+
return
|
|
273
|
+
|
|
251
274
|
if json_mode:
|
|
252
275
|
click.echo(models_df['model_name'].to_json(orient='records'))
|
|
253
276
|
return
|
vec_inf/launch_server.sh
CHANGED
|
@@ -22,7 +22,7 @@ while [[ "$#" -gt 0 ]]; do
|
|
|
22
22
|
shift
|
|
23
23
|
done
|
|
24
24
|
|
|
25
|
-
required_vars=(model_family model_variant partition qos walltime num_nodes num_gpus max_model_len vocab_size
|
|
25
|
+
required_vars=(model_family model_variant partition qos walltime num_nodes num_gpus max_model_len vocab_size)
|
|
26
26
|
|
|
27
27
|
for var in "$required_vars[@]"; do
|
|
28
28
|
if [ -z "$!var" ]; then
|
|
@@ -40,10 +40,28 @@ export NUM_NODES=$num_nodes
|
|
|
40
40
|
export NUM_GPUS=$num_gpus
|
|
41
41
|
export VLLM_MAX_MODEL_LEN=$max_model_len
|
|
42
42
|
export VLLM_MAX_LOGPROBS=$vocab_size
|
|
43
|
-
|
|
44
|
-
export
|
|
45
|
-
export
|
|
46
|
-
export
|
|
43
|
+
# For custom models, the following are set to default if not specified
|
|
44
|
+
export VLLM_DATA_TYPE="auto"
|
|
45
|
+
export VENV_BASE="singularity"
|
|
46
|
+
export LOG_DIR="default"
|
|
47
|
+
# Pipeline parallelism is disabled and can only be enabled if specified in models.csv as this is an experimental feature
|
|
48
|
+
export PIPELINE_PARALLELISM="false"
|
|
49
|
+
|
|
50
|
+
if [ -n "$data_type" ]; then
|
|
51
|
+
export VLLM_DATA_TYPE=$data_type
|
|
52
|
+
fi
|
|
53
|
+
|
|
54
|
+
if [ -n "$virtual_env" ]; then
|
|
55
|
+
export VENV_BASE=$virtual_env
|
|
56
|
+
fi
|
|
57
|
+
|
|
58
|
+
if [ -n "$log_dir" ]; then
|
|
59
|
+
export LOG_DIR=$log_dir
|
|
60
|
+
fi
|
|
61
|
+
|
|
62
|
+
if [ -n "$pipeline_parallelism" ]; then
|
|
63
|
+
export PIPELINE_PARALLELISM=$pipeline_parallelism
|
|
64
|
+
fi
|
|
47
65
|
|
|
48
66
|
# ================================= Set default environment variables ======================================
|
|
49
67
|
# Slurm job configuration
|
vec_inf/models/README.md
CHANGED
|
@@ -1,31 +1,106 @@
|
|
|
1
|
-
#
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
1
|
+
# Available Models
|
|
2
|
+
More profiling metrics coming soon!
|
|
3
|
+
|
|
4
|
+
## [Cohere for AI: Command R](https://huggingface.co/collections/CohereForAI/c4ai-command-r-plus-660ec4c34f7a69c50ce7f7b9)
|
|
5
|
+
|
|
6
|
+
| Variant | Suggested resource allocation | Avg prompt throughput | Avg generation throughput |
|
|
7
|
+
|:----------:|:----------:|:----------:|:----------:|
|
|
8
|
+
|[`c4ai-command-r-plus`](https://huggingface.co/CohereForAI/c4ai-command-r-plus)| 8x a40 (2 nodes, 4 a40/node) | 412 tokens/s | 541 tokens/s |
|
|
9
|
+
|
|
10
|
+
## [Code Llama](https://huggingface.co/collections/meta-llama/code-llama-family-661da32d0a9d678b6f55b933)
|
|
11
|
+
|
|
12
|
+
| Variant | Suggested resource allocation | Avg prompt throughput | Avg generation throughput |
|
|
13
|
+
|:----------:|:----------:|:----------:|:----------:|
|
|
14
|
+
| [`CodeLlama-7b-hf`](https://huggingface.co/meta-llama/CodeLlama-7b-hf) | 1x a40 | - tokens/s | - tokens/s |
|
|
15
|
+
| [`CodeLlama-7b-Instruct-hf`](https://huggingface.co/meta-llama/CodeLlama-7b-Instruct-hf) | 1x a40 | - tokens/s | - tokens/s |
|
|
16
|
+
| [`CodeLlama-13b-hf`](https://huggingface.co/meta-llama/CodeLlama-13b-hf) | 1x a40 | - tokens/s | - tokens/s |
|
|
17
|
+
| [`CodeLlama-13b-Instruct-hf`](https://huggingface.co/meta-llama/CodeLlama-13b-Instruct-hf) | 1x a40 | - tokens/s | - tokens/s |
|
|
18
|
+
| [`CodeLlama-34b-hf`](https://huggingface.co/meta-llama/CodeLlama-34b-hf) | 2x a40 | - tokens/s | - tokens/s |
|
|
19
|
+
| [`CodeLlama-34b-Instruct-hf`](https://huggingface.co/meta-llama/CodeLlama-34b-Instruct-hf) | 2x a40 | - tokens/s | - tokens/s |
|
|
20
|
+
| [`CodeLlama-70b-hf`](https://huggingface.co/meta-llama/CodeLlama-70b-hf) | 4x a40 | - tokens/s | - tokens/s |
|
|
21
|
+
| [`CodeLlama-70b-Instruct-hf`](https://huggingface.co/meta-llama/CodeLlama-70b-Instruct-hf) | 4x a40 | - tokens/s | - tokens/s |
|
|
22
|
+
|
|
23
|
+
## [Databricks: DBRX](https://huggingface.co/collections/databricks/dbrx-6601c0852a0cdd3c59f71962)
|
|
24
|
+
|
|
25
|
+
| Variant | Suggested resource allocation | Avg prompt throughput | Avg generation throughput |
|
|
26
|
+
|:----------:|:----------:|:----------:|:----------:|
|
|
27
|
+
|[`dbrx-instruct`](https://huggingface.co/databricks/dbrx-instruct)| 8x a40 (2 nodes, 4 a40/node) | 107 tokens/s | 904 tokens/s |
|
|
28
|
+
|
|
29
|
+
## [Google: Gemma 2](https://huggingface.co/collections/google/gemma-2-release-667d6600fd5220e7b967f315)
|
|
30
|
+
|
|
31
|
+
| Variant | Suggested resource allocation | Avg prompt throughput | Avg generation throughput |
|
|
32
|
+
|:----------:|:----------:|:----------:|:----------:|
|
|
33
|
+
| [`gemma-2-9b`](https://huggingface.co/google/gemma-2-9b) | 1x a40 | - tokens/s | - tokens/s |
|
|
34
|
+
| [`gemma-2-9b-it`](https://huggingface.co/google/gemma-2-9b-it) | 1x a40 | - tokens/s | - tokens/s |
|
|
35
|
+
| [`gemma-2-27b`](https://huggingface.co/google/gemma-2-27b) | 2x a40 | - tokens/s | - tokens/s |
|
|
36
|
+
| [`gemma-2-27b-it`](https://huggingface.co/google/gemma-2-27b-it) | 2x a40 | - tokens/s | - tokens/s |
|
|
37
|
+
|
|
38
|
+
## [LLaVa-1.5](https://huggingface.co/collections/llava-hf/llava-15-65f762d5b6941db5c2ba07e0)
|
|
39
|
+
|
|
40
|
+
| Variant | Suggested resource allocation | Avg prompt throughput | Avg generation throughput |
|
|
41
|
+
|:----------:|:----------:|:----------:|:----------:|
|
|
42
|
+
|[`llava-1.5-7b-hf`](https://huggingface.co/llava-hf/llava-1.5-7b-hf)| 1x a40 | - tokens/s | - tokens/s |
|
|
43
|
+
|[`llava-1.5-13b-hf`](https://huggingface.co/llava-hf/llava-1.5-13b-hf)| 1x a40 | - tokens/s | - tokens/s |
|
|
44
|
+
|
|
45
|
+
## [LLaVa-NeXT](https://huggingface.co/collections/llava-hf/llava-next-65f75c4afac77fd37dbbe6cf)
|
|
46
|
+
|
|
47
|
+
| Variant | Suggested resource allocation | Avg prompt throughput | Avg generation throughput |
|
|
48
|
+
|:----------:|:----------:|:----------:|:----------:|
|
|
49
|
+
|[`llava-v1.6-mistral-7b-hf`](https://huggingface.co/llava-hf/llava-v1.6-mistral-7b-hf)| 1x a40 | - tokens/s | - tokens/s |
|
|
50
|
+
|[`llava-v1.6-34b-hf`](https://huggingface.co/llava-hf/llava-v1.6-34b-hf)| 2x a40 | - tokens/s | - tokens/s |
|
|
51
|
+
|
|
52
|
+
## [Meta: Llama 2](https://huggingface.co/collections/meta-llama/llama-2-family-661da1f90a9d678b6f55773b)
|
|
53
|
+
|
|
54
|
+
| Variant | Suggested resource allocation |
|
|
55
|
+
|:----------:|:----------:|
|
|
56
|
+
| [`Llama-2-7b-hf`](https://huggingface.co/meta-llama/Llama-2-7b-hf) | 1x a40 |
|
|
57
|
+
| [`Llama-2-7b-chat-hf`](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) | 1x a40 |
|
|
58
|
+
| [`Llama-2-13b-hf`](https://huggingface.co/meta-llama/Llama-2-13b-hf) | 1x a40 |
|
|
59
|
+
| [`Llama-2-13b-chat-hf`](https://huggingface.co/meta-llama/Llama-2-13b-chat-hf) | 1x a40 |
|
|
60
|
+
| [`Llama-2-70b-hf`](https://huggingface.co/meta-llama/Llama-2-70b-hf) | 4x a40 |
|
|
61
|
+
| [`Llama-2-70b-chat-hf`](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf) | 4x a40 |
|
|
62
|
+
|
|
63
|
+
## [Meta: Llama 3](https://huggingface.co/collections/meta-llama/meta-llama-3-66214712577ca38149ebb2b6)
|
|
64
|
+
|
|
65
|
+
| Variant | Suggested resource allocation | Avg prompt throughput | Avg generation throughput |
|
|
66
|
+
|:----------:|:----------:|:----------:|:----------:|
|
|
67
|
+
| [`Meta-Llama-3-8B`](https://huggingface.co/meta-llama/Meta-Llama-3-8B) | 1x a40 | 222 tokens/s | 1811 tokens/s |
|
|
68
|
+
| [`Meta-Llama-3-8B-Instruct`](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct) | 1x a40 | 371 tokens/s | 1990 tokens/s |
|
|
69
|
+
| [`Meta-Llama-3-70B`](https://huggingface.co/meta-llama/Meta-Llama-3-70B) | 4x a40 | 81 tokens/s | 618 tokens/s |
|
|
70
|
+
| [`Meta-Llama-3-70B-Instruct`](https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct) | 4x a40 | 301 tokens/s | 660 tokens/s |
|
|
71
|
+
|
|
72
|
+
## [Meta: Llama 3.1](https://huggingface.co/collections/meta-llama/llama-31-669fc079a0c406a149a5738f)
|
|
73
|
+
|
|
74
|
+
| Variant | Suggested resource allocation | Avg prompt throughput | Avg generation throughput |
|
|
75
|
+
|:----------:|:----------:|:----------:|:----------:|
|
|
76
|
+
| [`Meta-Llama-3.1-8B`](https://huggingface.co/meta-llama/Meta-Llama-3.1-8B) | 1x a40 | - tokens/s | - tokens/s |
|
|
77
|
+
| [`Meta-Llama-3.1-8B-Instruct`](https://huggingface.co/meta-llama/Meta-Llama-3.1-8B-Instruct) | 1x a40 | - tokens/s | - tokens/s |
|
|
78
|
+
| [`Meta-Llama-3.1-70B`](https://huggingface.co/meta-llama/Meta-Llama-3.1-70B) | 4x a40 | - tokens/s | - tokens/s |
|
|
79
|
+
| [`Meta-Llama-3.1-70B-Instruct`](https://huggingface.co/meta-llama/Meta-Llama-3.1-70B-Instruct) | 4x a40 | - tokens/s | - tokens/s |
|
|
80
|
+
| [`Meta-Llama-3.1-405B-Instruct`](https://huggingface.co/meta-llama/Meta-Llama-3.1-405B-Instruct) | 32x a40 (8 nodes, 4 a40/node) | - tokens/s | - tokens/s |
|
|
81
|
+
|
|
82
|
+
## [Mistral AI: Mistral](https://huggingface.co/mistralai)
|
|
83
|
+
|
|
84
|
+
| Variant (Mistral) | Suggested resource allocation | Avg prompt throughput | Avg generation throughput |
|
|
85
|
+
|:----------:|:----------:|:----------:|:----------:|
|
|
86
|
+
|[`Mistral-7B-v0.1`](https://huggingface.co/mistralai/Mistral-7B-v0.1)| 1x a40 | - tokens/s | - tokens/s|
|
|
87
|
+
|[`Mistral-7B-Instruct-v0.1`](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1)| 1x a40 | - tokens/s | - tokens/s|
|
|
88
|
+
|[`Mistral-7B-Instruct-v0.2`](https://huggingface.co/mistralai/Mistral-7B-v0.2)| 1x a40 | - tokens/s | - tokens/s|
|
|
89
|
+
|[`Mistral-7B-v0.3`](https://huggingface.co/mistralai/Mistral-7B-v0.3)| 1x a40 | - tokens/s | - tokens/s |
|
|
90
|
+
|[`Mistral-7B-Instruct-v0.3`](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3)| 1x a40 | - tokens/s | - tokens/s|
|
|
91
|
+
|[`Mistral-Large-Instruct-2407`](https://huggingface.co/mistralai/Mistral-Large-Instruct-2407)| 4x a40 | - tokens/s | - tokens/s|
|
|
92
|
+
|
|
93
|
+
## [Mistral AI: Mixtral](https://huggingface.co/mistralai)
|
|
94
|
+
|
|
95
|
+
| Variant (Mixtral) | Suggested resource allocation | Avg prompt throughput | Avg generation throughput |
|
|
96
|
+
|:----------:|:----------:|:----------:|:----------:|
|
|
97
|
+
|[`Mixtral-8x7B-Instruct-v0.1`](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1)| 4x a40 | 222 tokens/s | 1543 tokens/s |
|
|
98
|
+
|[`Mixtral-8x22B-v0.1`](https://huggingface.co/mistralai/Mixtral-8x22B-v0.1)| 8x a40 (2 nodes, 4 a40/node) | 145 tokens/s | 827 tokens/s|
|
|
99
|
+
|[`Mixtral-8x22B-Instruct-v0.1`](https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1)| 8x a40 (2 nodes, 4 a40/node) | 95 tokens/s | 803 tokens/s|
|
|
100
|
+
|
|
101
|
+
## [Microsoft: Phi 3](https://huggingface.co/collections/microsoft/phi-3-6626e15e9585a200d2d761e3)
|
|
102
|
+
|
|
103
|
+
| Variant | Suggested resource allocation | Avg prompt throughput | Avg generation throughput |
|
|
104
|
+
|:----------:|:----------:|:----------:|:----------:|
|
|
105
|
+
| [`Phi-3-medium-128k-instruct`](https://huggingface.co/microsoft/Phi-3-medium-128k-instruct) | 2x a40 | - tokens/s | - tokens/s |
|
|
106
|
+
| [`Phi-3-vision-128k-instruct`](https://huggingface.co/microsoft/Phi-3-vision-128k-instruct) | 2x a40 | - tokens/s | - tokens/s |
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: vec-inf
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.1
|
|
4
4
|
Summary: Efficient LLM inference on Slurm clusters using vLLM.
|
|
5
5
|
License: MIT
|
|
6
6
|
Author: Marshall Wang
|
|
@@ -22,7 +22,7 @@ Requires-Dist: vllm-nccl-cu12 (>=2.18,<2.19) ; extra == "dev"
|
|
|
22
22
|
Description-Content-Type: text/markdown
|
|
23
23
|
|
|
24
24
|
# Vector Inference: Easy inference on Slurm clusters
|
|
25
|
-
This repository provides an easy-to-use solution to run inference servers on [Slurm](https://slurm.schedmd.com/overview.html)-managed computing clusters using [vLLM](https://docs.vllm.ai/en/latest/). **All scripts in this repository runs natively on the Vector Institute cluster environment**. To adapt to other environments, update
|
|
25
|
+
This repository provides an easy-to-use solution to run inference servers on [Slurm](https://slurm.schedmd.com/overview.html)-managed computing clusters using [vLLM](https://docs.vllm.ai/en/latest/). **All scripts in this repository runs natively on the Vector Institute cluster environment**. To adapt to other environments, update [`launch_server.sh`](vec-inf/launch_server.sh), [`vllm.slurm`](vec-inf/vllm.slurm), [`multinode_vllm.slurm`](vec-inf/multinode_vllm.slurm) and [`models.csv`](vec-inf/models/models.csv) accordingly.
|
|
26
26
|
|
|
27
27
|
## Installation
|
|
28
28
|
If you are using the Vector cluster environment, and you don't need any customization to the inference server environment, run the following to install package:
|
|
@@ -32,15 +32,15 @@ pip install vec-inf
|
|
|
32
32
|
Otherwise, we recommend using the provided [`Dockerfile`](Dockerfile) to set up your own environment with the package
|
|
33
33
|
|
|
34
34
|
## Launch an inference server
|
|
35
|
-
We will use the Llama 3 model as example, to launch an inference server for Llama
|
|
35
|
+
We will use the Llama 3.1 model as example, to launch an OpenAI compatible inference server for Meta-Llama-3.1-8B-Instruct, run:
|
|
36
36
|
```bash
|
|
37
|
-
vec-inf launch
|
|
37
|
+
vec-inf launch Meta-Llama-3.1-8B-Instruct
|
|
38
38
|
```
|
|
39
39
|
You should see an output like the following:
|
|
40
40
|
|
|
41
|
-
<img src="https://github.com/user-attachments/assets/
|
|
41
|
+
<img width="400" alt="launch_img" src="https://github.com/user-attachments/assets/557eb421-47db-4810-bccd-c49c526b1b43">
|
|
42
42
|
|
|
43
|
-
|
|
43
|
+
The model would be launched using the [default parameters](vec-inf/models/models.csv), you can override these values by providing additional options, use `--help` to see the full list. You can also launch your own customized model as long as the model architecture is [supported by vLLM](https://docs.vllm.ai/en/stable/models/supported_models.html), you'll need to specify all model launching related options to run a successful run.
|
|
44
44
|
|
|
45
45
|
You can check the inference server status by providing the Slurm job ID to the `status` command:
|
|
46
46
|
```bash
|
|
@@ -49,18 +49,17 @@ vec-inf status 13014393
|
|
|
49
49
|
|
|
50
50
|
You should see an output like the following:
|
|
51
51
|
|
|
52
|
-
<img src="https://github.com/user-attachments/assets/
|
|
52
|
+
<img width="400" alt="status_img" src="https://github.com/user-attachments/assets/7385b9ca-9159-4ca9-bae2-7e26d80d9747">
|
|
53
53
|
|
|
54
54
|
There are 5 possible states:
|
|
55
55
|
|
|
56
|
-
* **PENDING**: Job submitted to Slurm, but not executed yet.
|
|
56
|
+
* **PENDING**: Job submitted to Slurm, but not executed yet. Job pending reason will be shown.
|
|
57
57
|
* **LAUNCHING**: Job is running but the server is not ready yet.
|
|
58
|
-
* **READY**: Inference server running and ready to take requests.
|
|
59
|
-
* **FAILED**: Inference server in an unhealthy state.
|
|
58
|
+
* **READY**: Inference server running and ready to take requests.
|
|
59
|
+
* **FAILED**: Inference server in an unhealthy state. Job failed reason will be shown.
|
|
60
60
|
* **SHUTDOWN**: Inference server is shutdown/cancelled.
|
|
61
61
|
|
|
62
|
-
Note that the base URL is only available when model is in `READY` state.
|
|
63
|
-
Both `launch` and `status` command supports `--json-mode`, where the output information would be structured as a JSON string.
|
|
62
|
+
Note that the base URL is only available when model is in `READY` state, and if you've changed the Slurm log directory path, you also need to specify it when using the `status` command.
|
|
64
63
|
|
|
65
64
|
Finally, when you're finished using a model, you can shut it down by providing the Slurm job ID:
|
|
66
65
|
```bash
|
|
@@ -69,15 +68,19 @@ vec-inf shutdown 13014393
|
|
|
69
68
|
> Shutting down model with Slurm Job ID: 13014393
|
|
70
69
|
```
|
|
71
70
|
|
|
72
|
-
|
|
71
|
+
You call view the full list of available models by running the `list` command:
|
|
73
72
|
```bash
|
|
74
|
-
vec-inf
|
|
73
|
+
vec-inf list
|
|
75
74
|
```
|
|
75
|
+
<img width="1200" alt="list_img" src="https://github.com/user-attachments/assets/a4f0d896-989d-43bf-82a2-6a6e5d0d288f">
|
|
76
76
|
|
|
77
|
-
|
|
77
|
+
You can also view the default setup for a specific supported model by providing the model name, for example `Meta-Llama-3.1-70B-Instruct`:
|
|
78
78
|
```bash
|
|
79
|
-
vec-inf
|
|
79
|
+
vec-inf list Meta-Llama-3.1-70B-Instruct
|
|
80
80
|
```
|
|
81
|
+
<img width="400" alt="list_model_img" src="https://github.com/user-attachments/assets/5dec7a33-ba6b-490d-af47-4cf7341d0b42">
|
|
82
|
+
|
|
83
|
+
`launch`, `list`, and `status` command supports `--json-mode`, where the command output would be structured as a JSON string.
|
|
81
84
|
|
|
82
85
|
## Send inference requests
|
|
83
86
|
Once the inference server is ready, you can start sending in inference requests. We provide example scripts for sending inference requests in [`examples`](examples) folder. Make sure to update the model server URL and the model weights location in the scripts. For example, you can run `python examples/inference/llm/completions.py`, and you should expect to see an output like the following:
|
|
@@ -90,5 +93,5 @@ If you want to run inference from your local device, you can open a SSH tunnel t
|
|
|
90
93
|
```bash
|
|
91
94
|
ssh -L 8081:172.17.8.29:8081 username@v.vectorinstitute.ai -N
|
|
92
95
|
```
|
|
93
|
-
The example provided above is for the vector cluster, change the variables accordingly for your environment
|
|
96
|
+
Where the last number in the URL is the GPU number (gpu029 in this case). The example provided above is for the vector cluster, change the variables accordingly for your environment
|
|
94
97
|
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
vec_inf/README.md,sha256=jtvslzw1MjTFFIXwzlrb0NstUyTEDL0S_k27K5bLl34,499
|
|
2
|
+
vec_inf/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
3
|
+
vec_inf/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
|
+
vec_inf/cli/_cli.py,sha256=8UHNFitbmq1OTNO1cLM_LVuHFndnvNyQSezGs1oT3tc,8346
|
|
5
|
+
vec_inf/cli/_utils.py,sha256=2Grz-bX_mGjzxXUBdrX7MbNfXUM7JQ3399GKe-N74FE,3910
|
|
6
|
+
vec_inf/find_port.sh,sha256=bGQ6LYSFVSsfDIGatrSg5YvddbZfaPL0R-Bjo4KYD6I,1088
|
|
7
|
+
vec_inf/launch_server.sh,sha256=BW5oK_10OjfHXhIsdf9vPsEBlCXh8j2lOV7qvSlPcZU,3998
|
|
8
|
+
vec_inf/models/README.md,sha256=y_Cr1ZAkqIw1vIEOZMEp4FsyLGVijDoIoqwxn6aeQwo,8138
|
|
9
|
+
vec_inf/models/models.csv,sha256=JFGMhT9o7Pf0tkY-w2GRQG5MxdYK2V5T8s6bk166MpM,4720
|
|
10
|
+
vec_inf/multinode_vllm.slurm,sha256=pedYWIzPN-BKtL6ezoZSKJ3DO7RduDyAR4_cxZD4KyY,3938
|
|
11
|
+
vec_inf/vllm.slurm,sha256=6Nx14qyAwHlbweCbFMUcMV2jaZSv41ghkyx2MiHJY8Y,1608
|
|
12
|
+
vec_inf-0.3.1.dist-info/METADATA,sha256=xRhpXmFmMv5A77xdJaKBo_m7UXC13CkBmzegnQzQnPg,5701
|
|
13
|
+
vec_inf-0.3.1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
14
|
+
vec_inf-0.3.1.dist-info/entry_points.txt,sha256=JF4uXsj1H4XacxaBw9f0KN0P0qDzmp7K_1zTEBDappo,48
|
|
15
|
+
vec_inf-0.3.1.dist-info/RECORD,,
|
|
@@ -1,12 +0,0 @@
|
|
|
1
|
-
# [Code Llama](https://huggingface.co/collections/meta-llama/code-llama-family-661da32d0a9d678b6f55b933)
|
|
2
|
-
|
|
3
|
-
| Variant | Suggested resource allocation | Avg prompt throughput | Avg generation throughput |
|
|
4
|
-
|:----------:|:----------:|:----------:|:----------:|
|
|
5
|
-
| [`7b-hf`](https://huggingface.co/meta-llama/CodeLlama-7b-hf) | 1x a40 | - tokens/s | - tokens/s |
|
|
6
|
-
| [**`7b-Instruct-hf`**](https://huggingface.co/meta-llama/CodeLlama-7b-Instruct-hf) | 1x a40 | - tokens/s | - tokens/s |
|
|
7
|
-
| [`13b-hf`](https://huggingface.co/meta-llama/CodeLlama-13b-hf) | 1x a40 | - tokens/s | - tokens/s |
|
|
8
|
-
| [`13b-Instruct-hf`](https://huggingface.co/meta-llama/CodeLlama-13b-Instruct-hf) | 1x a40 | - tokens/s | - tokens/s |
|
|
9
|
-
| [`34b-hf`](https://huggingface.co/meta-llama/CodeLlama-34b-hf) | 2x a40 | - tokens/s | - tokens/s |
|
|
10
|
-
| [`34b-Instruct-hf`](https://huggingface.co/meta-llama/CodeLlama-34b-Instruct-hf) | 2x a40 | - tokens/s | - tokens/s |
|
|
11
|
-
| [`70b-hf`](https://huggingface.co/meta-llama/CodeLlama-70b-hf) | 4x a40 | - tokens/s | - tokens/s |
|
|
12
|
-
| [`70b-Instruct-hf`](https://huggingface.co/meta-llama/CodeLlama-70b-Instruct-hf) | 4x a40 | - tokens/s | - tokens/s |
|
vec_inf/models/Llama-2/README.md
DELETED
|
@@ -1,10 +0,0 @@
|
|
|
1
|
-
# [Meta: Llama 2](https://huggingface.co/collections/meta-llama/llama-2-family-661da1f90a9d678b6f55773b)
|
|
2
|
-
|
|
3
|
-
| Variant | Suggested resource allocation |
|
|
4
|
-
|:----------:|:----------:|
|
|
5
|
-
| [**`7b-hf`**](https://huggingface.co/meta-llama/Llama-2-7b-hf) | 1x a40 |
|
|
6
|
-
| [`7b-chat-hf`](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) | 1x a40 |
|
|
7
|
-
| [`13b-hf`](https://huggingface.co/meta-llama/Llama-2-13b-hf) | 1x a40 |
|
|
8
|
-
| [`13b-chat-hf`](https://huggingface.co/meta-llama/Llama-2-13b-chat-hf) | 1x a40 |
|
|
9
|
-
| [`70b-hf`](https://huggingface.co/meta-llama/Llama-2-70b-hf) | 4x a40 |
|
|
10
|
-
| [`70b-chat-hf`](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf) | 4x a40 |
|
vec_inf/models/Llama-2/config.sh
DELETED
|
@@ -1,8 +0,0 @@
|
|
|
1
|
-
# [Meta: Llama 3](https://huggingface.co/collections/meta-llama/meta-llama-3-66214712577ca38149ebb2b6)
|
|
2
|
-
|
|
3
|
-
| Variant | Suggested resource allocation | Avg prompt throughput | Avg generation throughput |
|
|
4
|
-
|:----------:|:----------:|:----------:|:----------:|
|
|
5
|
-
| [`8B`](https://huggingface.co/meta-llama/Meta-Llama-3-8B) | 1x a40 | 222 tokens/s | 1811 tokens/s |
|
|
6
|
-
| [**`8B-Instruct`**](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct) | 1x a40 | 371 tokens/s | 1990 tokens/s |
|
|
7
|
-
| [`70B`](https://huggingface.co/meta-llama/Meta-Llama-3-70B) | 4x a40 | 81 tokens/s | 618 tokens/s |
|
|
8
|
-
| [`70B-Instruct`](https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct) | 4x a40 | 301 tokens/s | 660 tokens/s |
|
|
@@ -1,8 +0,0 @@
|
|
|
1
|
-
# [Meta: Llama 3.1](https://huggingface.co/collections/meta-llama/llama-31-669fc079a0c406a149a5738f)
|
|
2
|
-
|
|
3
|
-
| Variant | Suggested resource allocation | Avg prompt throughput | Avg generation throughput |
|
|
4
|
-
|:----------:|:----------:|:----------:|:----------:|
|
|
5
|
-
| [`8B`](https://huggingface.co/meta-llama/Meta-Llama-3.1-8B) | 1x a40 | - tokens/s | - tokens/s |
|
|
6
|
-
| [**`8B-Instruct`**](https://huggingface.co/meta-llama/Meta-Llama-3.1-8B-Instruct) | 1x a40 | - tokens/s | - tokens/s |
|
|
7
|
-
| [`70B`](https://huggingface.co/meta-llama/Meta-Llama-3.1-70B) | 4x a40 | - tokens/s | - tokens/s |
|
|
8
|
-
| [`70B-Instruct`](https://huggingface.co/meta-llama/Meta-Llama-3.1-70B-Instruct) | 4x a40 | - tokens/s | - tokens/s |
|
vec_inf/models/Mistral/README.md
DELETED
|
@@ -1,10 +0,0 @@
|
|
|
1
|
-
# [Mistral AI: Mistral](https://huggingface.co/mistralai)
|
|
2
|
-
* Supported model variants:
|
|
3
|
-
|
|
4
|
-
| Variant (Mistral) | Suggested resource allocation | Avg prompt throughput | Avg generation throughput |
|
|
5
|
-
|:----------:|:----------:|:----------:|:----------:|
|
|
6
|
-
|[`7B-v0.1`](https://huggingface.co/mistralai/Mistral-7B-v0.1)| 1x a40 | - tokens/s | - tokens/s|
|
|
7
|
-
|[`7B-Instruct-v0.1`](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1)| 1x a40 | - tokens/s | - tokens/s|
|
|
8
|
-
|[`7B-Instruct-v0.2`](https://huggingface.co/mistralai/Mistral-7B-v0.2)| 1x a40 | - tokens/s | - tokens/s|
|
|
9
|
-
|[`7B-v0.3`](https://huggingface.co/mistralai/Mistral-7B-v0.3)| 1x a40 | - tokens/s | - tokens/s |
|
|
10
|
-
|[**`7B-Instruct-v0.3`**](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3)| 1x a40 | - tokens/s | - tokens/s|
|
vec_inf/models/Mistral/config.sh
DELETED
vec_inf/models/Mixtral/README.md
DELETED
|
@@ -1,8 +0,0 @@
|
|
|
1
|
-
# [Mistral AI: Mixtral](https://huggingface.co/mistralai)
|
|
2
|
-
* Supported model variants:
|
|
3
|
-
|
|
4
|
-
| Variant (Mixtral) | Suggested resource allocation | Avg prompt throughput | Avg generation throughput |
|
|
5
|
-
|:----------:|:----------:|:----------:|:----------:|
|
|
6
|
-
|[**`8x7B-Instruct-v0.1`**](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1)| 4x a40 | 222 tokens/s | 1543 tokens/s |
|
|
7
|
-
|[`8x22B-v0.1`](https://huggingface.co/mistralai/Mixtral-8x22B-v0.1)| 8x a40 (2 nodes, 4 a40/node) | 145 tokens/s | 827 tokens/s|
|
|
8
|
-
|[`8x22B-Instruct-v0.1`](https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1)| 8x a40 (2 nodes, 4 a40/node) | 95 tokens/s | 803 tokens/s|
|
vec_inf/models/Mixtral/config.sh
DELETED
vec_inf/models/Phi-3/README.md
DELETED
|
@@ -1,6 +0,0 @@
|
|
|
1
|
-
# [Microsoft: Phi 3](https://huggingface.co/collections/microsoft/phi-3-6626e15e9585a200d2d761e3)
|
|
2
|
-
|
|
3
|
-
| Variant | Suggested resource allocation | Avg prompt throughput | Avg generation throughput |
|
|
4
|
-
|:----------:|:----------:|:----------:|:----------:|
|
|
5
|
-
| [**`medium-128k-instruct`**](https://huggingface.co/microsoft/Phi-3-medium-128k-instruct) | 2x a40 | - tokens/s | - tokens/s |
|
|
6
|
-
| [**`vision-128k-instruct`**](https://huggingface.co/microsoft/Phi-3-vision-128k-instruct) | 2x a40 | - tokens/s | - tokens/s |
|
vec_inf/models/Phi-3/config.sh
DELETED
|
@@ -1,5 +0,0 @@
|
|
|
1
|
-
# [Cohere for AI: Command R](https://huggingface.co/collections/CohereForAI/c4ai-command-r-plus-660ec4c34f7a69c50ce7f7b9)
|
|
2
|
-
|
|
3
|
-
| Variant | Suggested resource allocation | Avg prompt throughput | Avg generation throughput |
|
|
4
|
-
|:----------:|:----------:|:----------:|:----------:|
|
|
5
|
-
|[**`plus`**](https://huggingface.co/CohereForAI/c4ai-command-r-plus)| 8x a40 (2 nodes, 4 a40/node) | 412 tokens/s | 541 tokens/s |
|
vec_inf/models/dbrx/README.md
DELETED
|
@@ -1,5 +0,0 @@
|
|
|
1
|
-
# [Databricks: DBRX](https://huggingface.co/collections/databricks/dbrx-6601c0852a0cdd3c59f71962)
|
|
2
|
-
|
|
3
|
-
| Variant | Suggested resource allocation | Avg prompt throughput | Avg generation throughput |
|
|
4
|
-
|:----------:|:----------:|:----------:|:----------:|
|
|
5
|
-
|[**`dbrx-instruct`**](https://huggingface.co/databricks/dbrx-instruct)| 8x a40 (2 nodes, 4 a40/node) | 107 tokens/s | 904 tokens/s |
|
vec_inf/models/dbrx/config.sh
DELETED
vec_inf/models/gemma-2/README.md
DELETED
|
@@ -1,8 +0,0 @@
|
|
|
1
|
-
# [Google: Gemma 2](https://huggingface.co/collections/google/gemma-2-release-667d6600fd5220e7b967f315)
|
|
2
|
-
|
|
3
|
-
| Variant | Suggested resource allocation | Avg prompt throughput | Avg generation throughput |
|
|
4
|
-
|:----------:|:----------:|:----------:|:----------:|
|
|
5
|
-
| [`9b`](https://huggingface.co/google/gemma-2-9b) | 1x a40 | - tokens/s | - tokens/s |
|
|
6
|
-
| [**`9b-it`**](https://huggingface.co/google/gemma-2-9b-it) | 1x a40 | - tokens/s | - tokens/s |
|
|
7
|
-
| [`27b`](https://huggingface.co/google/gemma-2-27b) | 2x a40 | - tokens/s | - tokens/s |
|
|
8
|
-
| [`27b-it`](https://huggingface.co/google/gemma-2-27b-it) | 2x a40 | - tokens/s | - tokens/s |
|
vec_inf/models/gemma-2/config.sh
DELETED
|
@@ -1,7 +0,0 @@
|
|
|
1
|
-
# [LLaVa-1.5](https://huggingface.co/collections/llava-hf/llava-15-65f762d5b6941db5c2ba07e0)
|
|
2
|
-
* Supported model variants:
|
|
3
|
-
|
|
4
|
-
| Variant | Suggested resource allocation | Avg prompt throughput | Avg generation throughput |
|
|
5
|
-
|:----------:|:----------:|:----------:|:----------:|
|
|
6
|
-
|[**`7b-hf`**](https://huggingface.co/llava-hf/llava-1.5-7b-hf)| 1x a40 | - tokens/s | - tokens/s |
|
|
7
|
-
|[`13b-hf`](https://huggingface.co/llava-hf/llava-1.5-13b-hf)| 1x a40 | - tokens/s | - tokens/s |
|
|
@@ -1,23 +0,0 @@
|
|
|
1
|
-
{%- if messages[0]['role'] == 'system' -%}
|
|
2
|
-
{%- set system_message = messages[0]['content'] -%}
|
|
3
|
-
{%- set messages = messages[1:] -%}
|
|
4
|
-
{%- else -%}
|
|
5
|
-
{% set system_message = '' -%}
|
|
6
|
-
{%- endif -%}
|
|
7
|
-
|
|
8
|
-
{{ bos_token + system_message }}
|
|
9
|
-
{%- for message in messages -%}
|
|
10
|
-
{%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}
|
|
11
|
-
{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}
|
|
12
|
-
{%- endif -%}
|
|
13
|
-
|
|
14
|
-
{%- if message['role'] == 'user' -%}
|
|
15
|
-
{{ 'USER: ' + message['content'] + '\n' }}
|
|
16
|
-
{%- elif message['role'] == 'assistant' -%}
|
|
17
|
-
{{ 'ASSISTANT: ' + message['content'] + eos_token + '\n' }}
|
|
18
|
-
{%- endif -%}
|
|
19
|
-
{%- endfor -%}
|
|
20
|
-
|
|
21
|
-
{%- if add_generation_prompt -%}
|
|
22
|
-
{{ 'ASSISTANT:' }}
|
|
23
|
-
{% endif %}
|
|
@@ -1,7 +0,0 @@
|
|
|
1
|
-
# [LLaVa-NeXT](https://huggingface.co/collections/llava-hf/llava-next-65f75c4afac77fd37dbbe6cf)
|
|
2
|
-
* Supported model variants:
|
|
3
|
-
|
|
4
|
-
| Variant | Suggested resource allocation | Avg prompt throughput | Avg generation throughput |
|
|
5
|
-
|:----------:|:----------:|:----------:|:----------:|
|
|
6
|
-
|[**`mistral-7b-hf`**](https://huggingface.co/llava-hf/llava-v1.6-mistral-7b-hf)| 1x a40 | - tokens/s | - tokens/s |
|
|
7
|
-
|[`34b-hf`](https://huggingface.co/llava-hf/llava-v1.6-34b-hf)| 2x a40 | - tokens/s | - tokens/s |
|
|
@@ -1,23 +0,0 @@
|
|
|
1
|
-
{%- if messages[0]['role'] == 'system' -%}
|
|
2
|
-
{%- set system_message = messages[0]['content'] -%}
|
|
3
|
-
{%- set messages = messages[1:] -%}
|
|
4
|
-
{%- else -%}
|
|
5
|
-
{% set system_message = '' -%}
|
|
6
|
-
{%- endif -%}
|
|
7
|
-
|
|
8
|
-
{{ bos_token + system_message }}
|
|
9
|
-
{%- for message in messages -%}
|
|
10
|
-
{%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}
|
|
11
|
-
{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}
|
|
12
|
-
{%- endif -%}
|
|
13
|
-
|
|
14
|
-
{%- if message['role'] == 'user' -%}
|
|
15
|
-
{{ 'USER: ' + message['content'] + '\n' }}
|
|
16
|
-
{%- elif message['role'] == 'assistant' -%}
|
|
17
|
-
{{ 'ASSISTANT: ' + message['content'] + eos_token + '\n' }}
|
|
18
|
-
{%- endif -%}
|
|
19
|
-
{%- endfor -%}
|
|
20
|
-
|
|
21
|
-
{%- if add_generation_prompt -%}
|
|
22
|
-
{{ 'ASSISTANT:' }}
|
|
23
|
-
{% endif %}
|
vec_inf-0.3.0.dist-info/RECORD
DELETED
|
@@ -1,41 +0,0 @@
|
|
|
1
|
-
vec_inf/README.md,sha256=6QAPmd9ccLDHmZMNs4Tjjv0dA28FQIVFJtgmnwgAkPE,389
|
|
2
|
-
vec_inf/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
3
|
-
vec_inf/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
|
-
vec_inf/cli/_cli.py,sha256=weUeKHz1Hjq4AnJAfw-YpFceKWThrs80FgfWS1Ccq5I,7332
|
|
5
|
-
vec_inf/cli/_utils.py,sha256=2Grz-bX_mGjzxXUBdrX7MbNfXUM7JQ3399GKe-N74FE,3910
|
|
6
|
-
vec_inf/find_port.sh,sha256=bGQ6LYSFVSsfDIGatrSg5YvddbZfaPL0R-Bjo4KYD6I,1088
|
|
7
|
-
vec_inf/launch_server.sh,sha256=dVBVx6udXjb2Vw2rRTddTewDuw0WtV8ne0ImS0brMVk,3577
|
|
8
|
-
vec_inf/models/CodeLlama/README.md,sha256=4R5Vp8mq_Qa9WgwGutihEw3vBc_5Euj-QTgHeq7s_ds,1145
|
|
9
|
-
vec_inf/models/CodeLlama/config.sh,sha256=_DFM1NJibpSmbOIlSKB28m0C5PzM9mb8jVLLigSTgiQ,136
|
|
10
|
-
vec_inf/models/Llama-2/README.md,sha256=0asL53BytxSeilUoyZmy1Z6bJd-UMgTkwu721HVNpn4,656
|
|
11
|
-
vec_inf/models/Llama-2/config.sh,sha256=rAjwo51rczP7VWr9nLsVrlWwRqWA9ncGJzr61LdTBU8,129
|
|
12
|
-
vec_inf/models/Meta-Llama-3/README.md,sha256=FQgpLniE_krQyoTe8ziRFyzyZMkNamCFAhqkd-N0TR8,704
|
|
13
|
-
vec_inf/models/Meta-Llama-3/config.sh,sha256=q-SpdvTIbC4-U8xfaV_Uzzodl5okxq_Z5YNnzGYwdVQ,136
|
|
14
|
-
vec_inf/models/Meta-Llama-3.1/README.md,sha256=yjzIg5xp5XgUzZxJmM3mz6uzqSl_n7tTTi9YyTuudAk,693
|
|
15
|
-
vec_inf/models/Meta-Llama-3.1/config.sh,sha256=XhV-e33tuNJYX32PHx8AxZ5sR_A_z3glcuDfiZooV0o,162
|
|
16
|
-
vec_inf/models/Mistral/README.md,sha256=uv4c_oHr3DAN_3fy0YfcGiIGmMdz1Vswx3wfaAcChlk,788
|
|
17
|
-
vec_inf/models/Mistral/config.sh,sha256=8UWTYouNmctOd_eM0ArmuXhSYRkwkMqLY8WbturH1wY,135
|
|
18
|
-
vec_inf/models/Mixtral/README.md,sha256=Ic94pH0NY-MniVR5b1uRDJrpYx1rVXLYQpjFEw98054,655
|
|
19
|
-
vec_inf/models/Mixtral/config.sh,sha256=AbTfEmzHZ3UX08WAa2zcgdGPDw178xtfCh7l3znZIUQ,137
|
|
20
|
-
vec_inf/models/Phi-3/README.md,sha256=lj8Bx538O0yC8SjID-GyFHDSf6MU6HezPdtqCO6zm1E,507
|
|
21
|
-
vec_inf/models/Phi-3/config.sh,sha256=vX6UWZg7YCtDAO3QKHz7PwvGJ5clp7QYnytNPFx4tZ0,161
|
|
22
|
-
vec_inf/models/README.md,sha256=V5atdrL3y6euM244iBrh6ASstWvr__uvCy3y7Ktg2qU,2390
|
|
23
|
-
vec_inf/models/c4ai-command-r/README.md,sha256=yGCYVzsMpBSYa2eSn-YU2kBFv3qW3acd-mHY7FLIc9M,406
|
|
24
|
-
vec_inf/models/c4ai-command-r/config.sh,sha256=InBRtlAHIxve3xbNN0UomMCh4xlAZlOQu-j4wWWc3Co,132
|
|
25
|
-
vec_inf/models/dbrx/README.md,sha256=MJRyZtqhYqN9_BvTD-lxqf34ytYCP6a1tg4_aWfJhsI,384
|
|
26
|
-
vec_inf/models/dbrx/config.sh,sha256=UjtHdUZ_6TKDGR8c449iVkaBa63a8Z3-IaxDq_KO4Go,126
|
|
27
|
-
vec_inf/models/gemma-2/README.md,sha256=4QMheXAZe0bNNQ2kEZn15I3x83rF9iLQnSUejx8p46o,628
|
|
28
|
-
vec_inf/models/gemma-2/config.sh,sha256=Tl1U774WXoOsAbiGa4tZGg53GWh_niqe5Z_bQ92VX1I,166
|
|
29
|
-
vec_inf/models/llava-1.5/README.md,sha256=YxkN_BWnK4nNf0rXi4_1isJVxlV73YhVABxhCjqNSvY,471
|
|
30
|
-
vec_inf/models/llava-1.5/chat_template.jinja,sha256=qCE9YwTfTa3jwjrB5yAnqVIm1bDkUBc5LjHBM0d9Sso,765
|
|
31
|
-
vec_inf/models/llava-1.5/config.sh,sha256=Yvb6s1mil0vmkVllnC3DjphpSkC2U5KOQG3l5OJawME,127
|
|
32
|
-
vec_inf/models/llava-v1.6/README.md,sha256=5JYjW0XoGJf40wd-oIdO3uAQYPv5XPZQuo5T37pxZcg,491
|
|
33
|
-
vec_inf/models/llava-v1.6/chat_template.jinja,sha256=qCE9YwTfTa3jwjrB5yAnqVIm1bDkUBc5LjHBM0d9Sso,765
|
|
34
|
-
vec_inf/models/llava-v1.6/config.sh,sha256=zuoK2cg5KgKbs9jk_M3R-vALGP1TesMkWEeRjSw209E,136
|
|
35
|
-
vec_inf/models/models.csv,sha256=JFGMhT9o7Pf0tkY-w2GRQG5MxdYK2V5T8s6bk166MpM,4720
|
|
36
|
-
vec_inf/multinode_vllm.slurm,sha256=pedYWIzPN-BKtL6ezoZSKJ3DO7RduDyAR4_cxZD4KyY,3938
|
|
37
|
-
vec_inf/vllm.slurm,sha256=6Nx14qyAwHlbweCbFMUcMV2jaZSv41ghkyx2MiHJY8Y,1608
|
|
38
|
-
vec_inf-0.3.0.dist-info/METADATA,sha256=Vqr7b5pmz4rWK1B4my9a_jG6BT5C_8XvGJtzjy3HVng,5142
|
|
39
|
-
vec_inf-0.3.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
40
|
-
vec_inf-0.3.0.dist-info/entry_points.txt,sha256=JF4uXsj1H4XacxaBw9f0KN0P0qDzmp7K_1zTEBDappo,48
|
|
41
|
-
vec_inf-0.3.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|