vec-inf 0.4.1__tar.gz → 0.5.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {vec_inf-0.4.1 → vec_inf-0.5.0}/.github/workflows/code_checks.yml +2 -2
- vec_inf-0.5.0/.github/workflows/docker.yml +55 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/.github/workflows/docs_build.yml +3 -3
- {vec_inf-0.4.1 → vec_inf-0.5.0}/.github/workflows/docs_deploy.yml +5 -5
- {vec_inf-0.4.1 → vec_inf-0.5.0}/.github/workflows/publish.yml +14 -9
- {vec_inf-0.4.1 → vec_inf-0.5.0}/.github/workflows/unit_tests.yml +17 -7
- {vec_inf-0.4.1 → vec_inf-0.5.0}/.pre-commit-config.yaml +1 -1
- {vec_inf-0.4.1 → vec_inf-0.5.0}/Dockerfile +15 -42
- vec_inf-0.5.0/PKG-INFO +210 -0
- vec_inf-0.5.0/README.md +188 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/codecov.yml +1 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/docs/source/index.md +1 -1
- vec_inf-0.5.0/docs/source/user_guide.md +181 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/examples/inference/llm/chat_completions.py +1 -1
- {vec_inf-0.4.1 → vec_inf-0.5.0}/pyproject.toml +7 -5
- vec_inf-0.5.0/tests/test_imports.py +17 -0
- vec_inf-0.5.0/tests/vec_inf/cli/test_cli.py +516 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/tests/vec_inf/cli/test_utils.py +84 -69
- vec_inf-0.5.0/uv.lock +4511 -0
- vec_inf-0.5.0/vec_inf/cli/_cli.py +230 -0
- vec_inf-0.5.0/vec_inf/cli/_config.py +87 -0
- vec_inf-0.5.0/vec_inf/cli/_helper.py +675 -0
- vec_inf-0.5.0/vec_inf/cli/_utils.py +162 -0
- {vec_inf-0.4.1/vec_inf/models → vec_inf-0.5.0/vec_inf/config}/README.md +30 -0
- vec_inf-0.5.0/vec_inf/config/models.yaml +1274 -0
- vec_inf-0.5.0/vec_inf/multinode_vllm.slurm +154 -0
- vec_inf-0.5.0/vec_inf/vllm.slurm +90 -0
- vec_inf-0.4.1/PKG-INFO +0 -121
- vec_inf-0.4.1/README.md +0 -101
- vec_inf-0.4.1/docs/source/user_guide.md +0 -123
- vec_inf-0.4.1/uv.lock +0 -3336
- vec_inf-0.4.1/vec_inf/cli/_cli.py +0 -438
- vec_inf-0.4.1/vec_inf/cli/_utils.py +0 -147
- vec_inf-0.4.1/vec_inf/launch_server.sh +0 -145
- vec_inf-0.4.1/vec_inf/models/models.csv +0 -85
- vec_inf-0.4.1/vec_inf/multinode_vllm.slurm +0 -124
- vec_inf-0.4.1/vec_inf/vllm.slurm +0 -59
- {vec_inf-0.4.1 → vec_inf-0.5.0}/.github/ISSUE_TEMPLATE/bug_report.md +0 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/.github/ISSUE_TEMPLATE/config.yml +0 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/.github/ISSUE_TEMPLATE/feature_request.md +0 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/.github/dependabot.yml +0 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/.github/pull_request_template.md +0 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/.gitignore +0 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/.python-version +0 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/LICENSE +0 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/docs/Makefile +0 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/docs/make.bat +0 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/docs/source/_static/custom.js +0 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/docs/source/_static/logos/vector_logo.png +0 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/docs/source/_static/require.min.js +0 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/docs/source/_templates/base.html +0 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/docs/source/_templates/custom-class-template.rst +0 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/docs/source/_templates/custom-module-template.rst +0 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/docs/source/_templates/page.html +0 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/docs/source/conf.py +0 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/examples/README.md +0 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/examples/inference/llm/completions.py +0 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/examples/inference/llm/completions.sh +0 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/examples/inference/text_embedding/embeddings.py +0 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/examples/inference/vlm/vision_completions.py +0 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/examples/logits/logits.py +0 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/profile/avg_throughput.py +0 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/profile/gen.py +0 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/tests/__init__.py +0 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/tests/vec_inf/__init__.py +0 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/tests/vec_inf/cli/__init__.py +0 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/vec_inf/README.md +0 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/vec_inf/__init__.py +0 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/vec_inf/cli/__init__.py +0 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/vec_inf/find_port.sh +0 -0
- {vec_inf-0.4.1 → vec_inf-0.5.0}/venv.sh +0 -0
|
@@ -30,7 +30,7 @@ jobs:
|
|
|
30
30
|
steps:
|
|
31
31
|
- uses: actions/checkout@v4.2.2
|
|
32
32
|
- name: Install uv
|
|
33
|
-
uses: astral-sh/setup-uv@v5.
|
|
33
|
+
uses: astral-sh/setup-uv@v5.3.1
|
|
34
34
|
with:
|
|
35
35
|
# Install a specific version of uv.
|
|
36
36
|
version: "0.5.21"
|
|
@@ -46,6 +46,6 @@ jobs:
|
|
|
46
46
|
source .venv/bin/activate
|
|
47
47
|
pre-commit run --all-files
|
|
48
48
|
- name: pip-audit (gh-action-pip-audit)
|
|
49
|
-
uses: pypa/gh-action-pip-audit@v1.0
|
|
49
|
+
uses: pypa/gh-action-pip-audit@v1.1.0
|
|
50
50
|
with:
|
|
51
51
|
virtual-environment: .venv/
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
name: docker
|
|
2
|
+
|
|
3
|
+
on:
|
|
4
|
+
release:
|
|
5
|
+
types: [published]
|
|
6
|
+
push:
|
|
7
|
+
branches:
|
|
8
|
+
- main
|
|
9
|
+
paths:
|
|
10
|
+
- Dockerfile
|
|
11
|
+
- .github/workflows/docker.yml
|
|
12
|
+
pull_request:
|
|
13
|
+
branches:
|
|
14
|
+
- main
|
|
15
|
+
- develop
|
|
16
|
+
paths:
|
|
17
|
+
- Dockerfile
|
|
18
|
+
- .github/workflows/docker.yml
|
|
19
|
+
|
|
20
|
+
jobs:
|
|
21
|
+
push_to_registry:
|
|
22
|
+
name: Push Docker image to Docker Hub
|
|
23
|
+
runs-on: ubuntu-latest
|
|
24
|
+
steps:
|
|
25
|
+
- name: Checkout repository
|
|
26
|
+
uses: actions/checkout@v4.2.2
|
|
27
|
+
|
|
28
|
+
- name: Extract vLLM version
|
|
29
|
+
id: vllm-version
|
|
30
|
+
run: |
|
|
31
|
+
VERSION=$(grep -A 1 'name = "vllm"' uv.lock | grep version | cut -d '"' -f 2)
|
|
32
|
+
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
|
33
|
+
|
|
34
|
+
- name: Log in to Docker Hub
|
|
35
|
+
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772
|
|
36
|
+
with:
|
|
37
|
+
username: ${{ secrets.DOCKER_USERNAME }}
|
|
38
|
+
password: ${{ secrets.DOCKER_PASSWORD }}
|
|
39
|
+
|
|
40
|
+
- name: Extract metadata (tags, labels) for Docker
|
|
41
|
+
id: meta
|
|
42
|
+
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804
|
|
43
|
+
with:
|
|
44
|
+
images: vectorinstitute/vector-inference
|
|
45
|
+
|
|
46
|
+
- name: Build and push Docker image
|
|
47
|
+
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4
|
|
48
|
+
with:
|
|
49
|
+
context: .
|
|
50
|
+
file: ./Dockerfile
|
|
51
|
+
push: true
|
|
52
|
+
tags: |
|
|
53
|
+
${{ steps.meta.outputs.tags }}
|
|
54
|
+
vectorinstitute/vector-inference:${{ steps.vllm-version.outputs.version }}
|
|
55
|
+
labels: ${{ steps.meta.outputs.labels }}
|
|
@@ -27,18 +27,18 @@ jobs:
|
|
|
27
27
|
- uses: actions/checkout@v4.2.2
|
|
28
28
|
|
|
29
29
|
- name: Install uv
|
|
30
|
-
uses: astral-sh/setup-uv@
|
|
30
|
+
uses: astral-sh/setup-uv@f94ec6bedd8674c4426838e6b50417d36b6ab231
|
|
31
31
|
with:
|
|
32
32
|
version: "0.5.21"
|
|
33
33
|
enable-cache: true
|
|
34
34
|
|
|
35
35
|
- name: "Set up Python"
|
|
36
|
-
uses: actions/setup-python@
|
|
36
|
+
uses: actions/setup-python@8039c45ed9a312fba91f3399cd0605ba2ebfe93c
|
|
37
37
|
with:
|
|
38
38
|
python-version-file: ".python-version"
|
|
39
39
|
|
|
40
40
|
- name: Install the project
|
|
41
|
-
run: uv sync --
|
|
41
|
+
run: uv sync --dev --group docs
|
|
42
42
|
|
|
43
43
|
- name: Build docs
|
|
44
44
|
run: cd docs && rm -rf source/reference/api/_autosummary && uv run make html
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
name: docs
|
|
2
2
|
permissions:
|
|
3
|
-
contents:
|
|
3
|
+
contents: write
|
|
4
4
|
pull-requests: write
|
|
5
5
|
|
|
6
6
|
on:
|
|
@@ -31,19 +31,19 @@ jobs:
|
|
|
31
31
|
submodules: 'true'
|
|
32
32
|
|
|
33
33
|
- name: Install uv
|
|
34
|
-
uses: astral-sh/setup-uv@
|
|
34
|
+
uses: astral-sh/setup-uv@f94ec6bedd8674c4426838e6b50417d36b6ab231
|
|
35
35
|
with:
|
|
36
36
|
# Install a specific version of uv.
|
|
37
37
|
version: "0.5.21"
|
|
38
38
|
enable-cache: true
|
|
39
39
|
|
|
40
40
|
- name: "Set up Python"
|
|
41
|
-
uses: actions/setup-python@
|
|
41
|
+
uses: actions/setup-python@8039c45ed9a312fba91f3399cd0605ba2ebfe93c
|
|
42
42
|
with:
|
|
43
43
|
python-version-file: ".python-version"
|
|
44
44
|
|
|
45
45
|
- name: Install the project
|
|
46
|
-
run: uv sync --
|
|
46
|
+
run: uv sync --dev --group docs
|
|
47
47
|
|
|
48
48
|
- name: Build docs
|
|
49
49
|
run: |
|
|
@@ -53,7 +53,7 @@ jobs:
|
|
|
53
53
|
touch build/html/.nojekyll
|
|
54
54
|
|
|
55
55
|
- name: Deploy to Github pages
|
|
56
|
-
uses: JamesIves/github-pages-deploy-action@
|
|
56
|
+
uses: JamesIves/github-pages-deploy-action@6c2d9db40f9296374acc17b90404b6e8864128c8
|
|
57
57
|
with:
|
|
58
58
|
branch: github_pages
|
|
59
59
|
folder: docs/build/html
|
|
@@ -12,16 +12,21 @@ jobs:
|
|
|
12
12
|
run: |
|
|
13
13
|
sudo apt-get update
|
|
14
14
|
sudo apt-get install libcurl4-openssl-dev libssl-dev
|
|
15
|
-
|
|
16
|
-
-
|
|
17
|
-
|
|
18
|
-
-
|
|
15
|
+
|
|
16
|
+
- uses: actions/checkout@v4.2.2
|
|
17
|
+
|
|
18
|
+
- name: Install uv
|
|
19
|
+
uses: astral-sh/setup-uv@v5
|
|
20
|
+
with:
|
|
21
|
+
version: "0.6.6"
|
|
22
|
+
enable-cache: true
|
|
23
|
+
|
|
24
|
+
- uses: actions/setup-python@v5.4.0
|
|
19
25
|
with:
|
|
20
26
|
python-version: '3.10'
|
|
27
|
+
|
|
21
28
|
- name: Build package
|
|
22
|
-
run:
|
|
29
|
+
run: uv build
|
|
30
|
+
|
|
23
31
|
- name: Publish package
|
|
24
|
-
|
|
25
|
-
with:
|
|
26
|
-
user: __token__
|
|
27
|
-
password: ${{ secrets.PYPI_API_TOKEN }}
|
|
32
|
+
run: uv publish --token ${{ secrets.PYPI_API_TOKEN }}
|
|
@@ -39,33 +39,43 @@ on:
|
|
|
39
39
|
jobs:
|
|
40
40
|
unit-tests:
|
|
41
41
|
runs-on: ubuntu-latest
|
|
42
|
+
strategy:
|
|
43
|
+
matrix:
|
|
44
|
+
python-version: ["3.10", "3.11", "3.12"]
|
|
42
45
|
steps:
|
|
43
46
|
- uses: actions/checkout@v4.2.2
|
|
44
47
|
|
|
45
48
|
- name: Install uv
|
|
46
|
-
uses: astral-sh/setup-uv@v5.
|
|
49
|
+
uses: astral-sh/setup-uv@v5.3.1
|
|
47
50
|
with:
|
|
48
51
|
# Install a specific version of uv.
|
|
49
52
|
version: "0.5.21"
|
|
50
53
|
enable-cache: true
|
|
51
54
|
|
|
52
|
-
- name: "Set up Python"
|
|
55
|
+
- name: "Set up Python ${{ matrix.python-version }}"
|
|
53
56
|
uses: actions/setup-python@v5.4.0
|
|
54
57
|
with:
|
|
55
|
-
python-version
|
|
58
|
+
python-version: ${{ matrix.python-version }}
|
|
56
59
|
|
|
57
60
|
- name: Install the project
|
|
58
|
-
run: uv sync --
|
|
61
|
+
run: uv sync --dev
|
|
59
62
|
|
|
60
63
|
- name: Install dependencies and check code
|
|
61
64
|
run: |
|
|
62
65
|
uv run pytest -m "not integration_test" --cov vec_inf --cov-report=xml tests
|
|
63
66
|
|
|
64
|
-
|
|
67
|
+
- name: Install the core package only
|
|
68
|
+
run: uv sync --no-dev
|
|
69
|
+
|
|
70
|
+
- name: Run package import tests
|
|
71
|
+
run: |
|
|
72
|
+
uv run pytest tests/test_imports.py
|
|
73
|
+
|
|
65
74
|
- name: Upload coverage to Codecov
|
|
66
|
-
uses: codecov/codecov-action@v5.
|
|
75
|
+
uses: codecov/codecov-action@v5.4.0
|
|
67
76
|
with:
|
|
68
77
|
token: ${{ secrets.CODECOV_TOKEN }}
|
|
69
|
-
|
|
78
|
+
file: ./coverage.xml
|
|
79
|
+
name: codecov-umbrella
|
|
70
80
|
fail_ci_if_error: true
|
|
71
81
|
verbose: true
|
|
@@ -12,27 +12,14 @@ ARG TORCH_CUDA_ARCH_LIST="7.5;8.0;8.6+PTX"
|
|
|
12
12
|
# Set the Python version
|
|
13
13
|
ARG PYTHON_VERSION=3.10.12
|
|
14
14
|
|
|
15
|
-
# Install dependencies
|
|
15
|
+
# Install system dependencies
|
|
16
16
|
RUN apt-get update && apt-get install -y \
|
|
17
|
-
wget \
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
zlib1g-dev \
|
|
21
|
-
libbz2-dev \
|
|
22
|
-
libreadline-dev \
|
|
23
|
-
libsqlite3-dev \
|
|
24
|
-
libffi-dev \
|
|
25
|
-
libncursesw5-dev \
|
|
26
|
-
xz-utils \
|
|
27
|
-
tk-dev \
|
|
28
|
-
libxml2-dev \
|
|
29
|
-
libxmlsec1-dev \
|
|
30
|
-
liblzma-dev \
|
|
31
|
-
git \
|
|
32
|
-
vim \
|
|
17
|
+
wget build-essential libssl-dev zlib1g-dev libbz2-dev \
|
|
18
|
+
libreadline-dev libsqlite3-dev libffi-dev libncursesw5-dev \
|
|
19
|
+
xz-utils tk-dev libxml2-dev libxmlsec1-dev liblzma-dev git vim \
|
|
33
20
|
&& rm -rf /var/lib/apt/lists/*
|
|
34
21
|
|
|
35
|
-
#
|
|
22
|
+
# Install Python
|
|
36
23
|
RUN wget https://www.python.org/ftp/python/$PYTHON_VERSION/Python-$PYTHON_VERSION.tgz && \
|
|
37
24
|
tar -xzf Python-$PYTHON_VERSION.tgz && \
|
|
38
25
|
cd Python-$PYTHON_VERSION && \
|
|
@@ -42,38 +29,24 @@ RUN wget https://www.python.org/ftp/python/$PYTHON_VERSION/Python-$PYTHON_VERSIO
|
|
|
42
29
|
cd .. && \
|
|
43
30
|
rm -rf Python-$PYTHON_VERSION.tgz Python-$PYTHON_VERSION
|
|
44
31
|
|
|
45
|
-
#
|
|
32
|
+
# Install pip and core Python tools
|
|
46
33
|
RUN wget https://bootstrap.pypa.io/get-pip.py && \
|
|
47
34
|
python3.10 get-pip.py && \
|
|
48
|
-
rm get-pip.py
|
|
35
|
+
rm get-pip.py && \
|
|
36
|
+
python3.10 -m pip install --upgrade pip setuptools wheel uv
|
|
49
37
|
|
|
50
|
-
#
|
|
51
|
-
RUN python3.10 -m pip install --upgrade pip setuptools wheel
|
|
52
|
-
|
|
53
|
-
# Install Poetry using Python 3.10
|
|
54
|
-
RUN python3.10 -m pip install poetry
|
|
55
|
-
|
|
56
|
-
# Don't create venv
|
|
57
|
-
RUN poetry config virtualenvs.create false
|
|
58
|
-
|
|
59
|
-
# Set working directory
|
|
38
|
+
# Set up project
|
|
60
39
|
WORKDIR /vec-inf
|
|
61
|
-
|
|
62
|
-
# Copy current directory
|
|
63
40
|
COPY . /vec-inf
|
|
64
41
|
|
|
65
|
-
#
|
|
66
|
-
RUN
|
|
67
|
-
|
|
68
|
-
# Install vec-inf
|
|
69
|
-
RUN poetry install --extras "dev"
|
|
70
|
-
|
|
71
|
-
# Install Flash Attention 2 backend
|
|
42
|
+
# Install project dependencies with build requirements
|
|
43
|
+
RUN PIP_INDEX_URL="https://download.pytorch.org/whl/cu121" uv pip install --system -e .[dev]
|
|
44
|
+
# Install Flash Attention
|
|
72
45
|
RUN python3.10 -m pip install flash-attn --no-build-isolation
|
|
73
46
|
|
|
74
|
-
#
|
|
75
|
-
RUN mkdir -p /vec-inf/nccl
|
|
76
|
-
|
|
47
|
+
# Final configuration
|
|
48
|
+
RUN mkdir -p /vec-inf/nccl && \
|
|
49
|
+
mv /root/.config/vllm/nccl/cu12/libnccl.so.2.18.1 /vec-inf/nccl/libnccl.so.2.18.1
|
|
77
50
|
|
|
78
51
|
# Set the default command to start an interactive shell
|
|
79
52
|
CMD ["bash"]
|
vec_inf-0.5.0/PKG-INFO
ADDED
|
@@ -0,0 +1,210 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: vec-inf
|
|
3
|
+
Version: 0.5.0
|
|
4
|
+
Summary: Efficient LLM inference on Slurm clusters using vLLM.
|
|
5
|
+
Author-email: Marshall Wang <marshall.wang@vectorinstitute.ai>
|
|
6
|
+
License-Expression: MIT
|
|
7
|
+
License-File: LICENSE
|
|
8
|
+
Requires-Python: >=3.10
|
|
9
|
+
Requires-Dist: click>=8.1.0
|
|
10
|
+
Requires-Dist: pydantic>=2.10.6
|
|
11
|
+
Requires-Dist: pyyaml>=6.0.2
|
|
12
|
+
Requires-Dist: requests>=2.31.0
|
|
13
|
+
Requires-Dist: rich>=13.7.0
|
|
14
|
+
Provides-Extra: dev
|
|
15
|
+
Requires-Dist: cupy-cuda12x==12.1.0; extra == 'dev'
|
|
16
|
+
Requires-Dist: ray>=2.40.0; extra == 'dev'
|
|
17
|
+
Requires-Dist: torch>=2.5.1; extra == 'dev'
|
|
18
|
+
Requires-Dist: vllm-nccl-cu12<2.19,>=2.18; extra == 'dev'
|
|
19
|
+
Requires-Dist: vllm>=0.7.3; extra == 'dev'
|
|
20
|
+
Requires-Dist: xgrammar>=0.1.11; extra == 'dev'
|
|
21
|
+
Description-Content-Type: text/markdown
|
|
22
|
+
|
|
23
|
+
# Vector Inference: Easy inference on Slurm clusters
|
|
24
|
+
|
|
25
|
+
----------------------------------------------------
|
|
26
|
+
|
|
27
|
+
[](https://pypi.org/project/vec-inf)
|
|
28
|
+
[](https://github.com/VectorInstitute/vector-inference/actions/workflows/code_checks.yml)
|
|
29
|
+
[](https://github.com/VectorInstitute/vector-inference/actions/workflows/docs_deploy.yml)
|
|
30
|
+
[](https://app.codecov.io/github/VectorInstitute/vector-inference/tree/develop)
|
|
31
|
+

|
|
32
|
+
|
|
33
|
+
This repository provides an easy-to-use solution to run inference servers on [Slurm](https://slurm.schedmd.com/overview.html)-managed computing clusters using [vLLM](https://docs.vllm.ai/en/latest/). **All scripts in this repository runs natively on the Vector Institute cluster environment**. To adapt to other environments, update the environment variables in [`cli/_helper.py`](vec_inf/cli/_helper.py), [`cli/_config.py`](vec_inf/cli/_config.py), [`vllm.slurm`](vec_inf/vllm.slurm), [`multinode_vllm.slurm`](vec_inf/multinode_vllm.slurm) and [`models.yaml`](vec_inf/config/models.yaml) accordingly.
|
|
34
|
+
|
|
35
|
+
## Installation
|
|
36
|
+
If you are using the Vector cluster environment, and you don't need any customization to the inference server environment, run the following to install package:
|
|
37
|
+
|
|
38
|
+
```bash
|
|
39
|
+
pip install vec-inf
|
|
40
|
+
```
|
|
41
|
+
Otherwise, we recommend using the provided [`Dockerfile`](Dockerfile) to set up your own environment with the package
|
|
42
|
+
|
|
43
|
+
## Usage
|
|
44
|
+
|
|
45
|
+
### `launch` command
|
|
46
|
+
|
|
47
|
+
The `launch` command allows users to deploy a model as a slurm job. If the job successfully launches, a URL endpoint is exposed for the user to send requests for inference.
|
|
48
|
+
|
|
49
|
+
We will use the Llama 3.1 model as example, to launch an OpenAI compatible inference server for Meta-Llama-3.1-8B-Instruct, run:
|
|
50
|
+
|
|
51
|
+
```bash
|
|
52
|
+
vec-inf launch Meta-Llama-3.1-8B-Instruct
|
|
53
|
+
```
|
|
54
|
+
You should see an output like the following:
|
|
55
|
+
|
|
56
|
+
<img width="600" alt="launch_img" src="https://github.com/user-attachments/assets/883e6a5b-8016-4837-8fdf-39097dfb18bf">
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
#### Overrides
|
|
60
|
+
|
|
61
|
+
Models that are already supported by `vec-inf` would be launched using the cached configuration or [default configuration](vec_inf/config/models.yaml). You can override these values by providing additional parameters. Use `vec-inf launch --help` to see the full list of parameters that can be
|
|
62
|
+
overriden. For example, if `qos` is to be overriden:
|
|
63
|
+
|
|
64
|
+
```bash
|
|
65
|
+
vec-inf launch Meta-Llama-3.1-8B-Instruct --qos <new_qos>
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
#### Custom models
|
|
69
|
+
|
|
70
|
+
You can also launch your own custom model as long as the model architecture is [supported by vLLM](https://docs.vllm.ai/en/stable/models/supported_models.html), and make sure to follow the instructions below:
|
|
71
|
+
* Your model weights directory naming convention should follow `$MODEL_FAMILY-$MODEL_VARIANT` ($MODEL_VARIANT is OPTIONAL).
|
|
72
|
+
* Your model weights directory should contain HuggingFace format weights.
|
|
73
|
+
* You should specify your model configuration by:
|
|
74
|
+
* Creating a custom configuration file for your model and specify its path via setting the environment variable `VEC_INF_CONFIG`. Check the [default parameters](vec_inf/config/models.yaml) file for the format of the config file. All the parameters for the model should be specified in that config file.
|
|
75
|
+
* Using launch command options to specify your model setup.
|
|
76
|
+
* For other model launch parameters you can reference the default values for similar models using the [`list` command ](#list-command).
|
|
77
|
+
|
|
78
|
+
Here is an example to deploy a custom [Qwen2.5-7B-Instruct-1M](https://huggingface.co/Qwen/Qwen2.5-7B-Instruct-1M) model which is not
|
|
79
|
+
supported in the default list of models using a user custom config. In this case, the model weights are assumed to be downloaded to
|
|
80
|
+
a `model-weights` directory inside the user's home directory. The weights directory of the model follows the naming convention so it
|
|
81
|
+
would be named `Qwen2.5-7B-Instruct-1M`. The following yaml file would need to be created, lets say it is named `/h/<username>/my-model-config.yaml`.
|
|
82
|
+
|
|
83
|
+
```yaml
|
|
84
|
+
models:
|
|
85
|
+
Qwen2.5-7B-Instruct-1M:
|
|
86
|
+
model_family: Qwen2.5
|
|
87
|
+
model_variant: 7B-Instruct-1M
|
|
88
|
+
model_type: LLM
|
|
89
|
+
gpus_per_node: 1
|
|
90
|
+
num_nodes: 1
|
|
91
|
+
vocab_size: 152064
|
|
92
|
+
max_model_len: 1010000
|
|
93
|
+
max_num_seqs: 256
|
|
94
|
+
pipeline_parallelism: true
|
|
95
|
+
enforce_eager: false
|
|
96
|
+
qos: m2
|
|
97
|
+
time: 08:00:00
|
|
98
|
+
partition: a40
|
|
99
|
+
model_weights_parent_dir: /h/<username>/model-weights
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
You would then set the `VEC_INF_CONFIG` path using:
|
|
103
|
+
|
|
104
|
+
```bash
|
|
105
|
+
export VEC_INF_CONFIG=/h/<username>/my-model-config.yaml
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
Note that there are other parameters that can also be added to the config but not shown in this example, such as `data_type` and `log_dir`.
|
|
109
|
+
|
|
110
|
+
### `status` command
|
|
111
|
+
You can check the inference server status by providing the Slurm job ID to the `status` command:
|
|
112
|
+
```bash
|
|
113
|
+
vec-inf status 15373800
|
|
114
|
+
```
|
|
115
|
+
|
|
116
|
+
If the server is pending for resources, you should see an output like this:
|
|
117
|
+
|
|
118
|
+
<img width="400" alt="status_pending_img" src="https://github.com/user-attachments/assets/b659c302-eae1-4560-b7a9-14eb3a822a2f">
|
|
119
|
+
|
|
120
|
+
When the server is ready, you should see an output like this:
|
|
121
|
+
|
|
122
|
+
<img width="400" alt="status_ready_img" src="https://github.com/user-attachments/assets/672986c2-736c-41ce-ac7c-1fb585cdcb0d">
|
|
123
|
+
|
|
124
|
+
There are 5 possible states:
|
|
125
|
+
|
|
126
|
+
* **PENDING**: Job submitted to Slurm, but not executed yet. Job pending reason will be shown.
|
|
127
|
+
* **LAUNCHING**: Job is running but the server is not ready yet.
|
|
128
|
+
* **READY**: Inference server running and ready to take requests.
|
|
129
|
+
* **FAILED**: Inference server in an unhealthy state. Job failed reason will be shown.
|
|
130
|
+
* **SHUTDOWN**: Inference server is shutdown/cancelled.
|
|
131
|
+
|
|
132
|
+
Note that the base URL is only available when model is in `READY` state, and if you've changed the Slurm log directory path, you also need to specify it when using the `status` command.
|
|
133
|
+
|
|
134
|
+
### `metrics` command
|
|
135
|
+
Once your server is ready, you can check performance metrics by providing the Slurm job ID to the `metrics` command:
|
|
136
|
+
```bash
|
|
137
|
+
vec-inf metrics 15373800
|
|
138
|
+
```
|
|
139
|
+
|
|
140
|
+
And you will see the performance metrics streamed to your console, note that the metrics are updated with a 2-second interval.
|
|
141
|
+
|
|
142
|
+
<img width="400" alt="metrics_img" src="https://github.com/user-attachments/assets/3ee143d0-1a71-4944-bbd7-4c3299bf0339">
|
|
143
|
+
|
|
144
|
+
### `shutdown` command
|
|
145
|
+
Finally, when you're finished using a model, you can shut it down by providing the Slurm job ID:
|
|
146
|
+
```bash
|
|
147
|
+
vec-inf shutdown 15373800
|
|
148
|
+
|
|
149
|
+
> Shutting down model with Slurm Job ID: 15373800
|
|
150
|
+
```
|
|
151
|
+
|
|
152
|
+
### `list` command
|
|
153
|
+
You call view the full list of available models by running the `list` command:
|
|
154
|
+
```bash
|
|
155
|
+
vec-inf list
|
|
156
|
+
```
|
|
157
|
+
<img width="940" alt="list_img" src="https://github.com/user-attachments/assets/8cf901c4-404c-4398-a52f-0486f00747a3">
|
|
158
|
+
|
|
159
|
+
NOTE: The above screenshot does not represent the full list of models supported.
|
|
160
|
+
|
|
161
|
+
You can also view the default setup for a specific supported model by providing the model name, for example `Meta-Llama-3.1-70B-Instruct`:
|
|
162
|
+
```bash
|
|
163
|
+
vec-inf list Meta-Llama-3.1-70B-Instruct
|
|
164
|
+
```
|
|
165
|
+
<img width="500" alt="list_model_img" src="https://github.com/user-attachments/assets/34e53937-2d86-443e-85f6-34e408653ddb">
|
|
166
|
+
|
|
167
|
+
`launch`, `list`, and `status` command supports `--json-mode`, where the command output would be structured as a JSON string.
|
|
168
|
+
|
|
169
|
+
## Send inference requests
|
|
170
|
+
Once the inference server is ready, you can start sending in inference requests. We provide example scripts for sending inference requests in [`examples`](examples) folder. Make sure to update the model server URL and the model weights location in the scripts. For example, you can run `python examples/inference/llm/chat_completions.py`, and you should expect to see an output like the following:
|
|
171
|
+
|
|
172
|
+
```json
|
|
173
|
+
{
|
|
174
|
+
"id":"chatcmpl-387c2579231948ffaf66cdda5439d3dc",
|
|
175
|
+
"choices": [
|
|
176
|
+
{
|
|
177
|
+
"finish_reason":"stop",
|
|
178
|
+
"index":0,
|
|
179
|
+
"logprobs":null,
|
|
180
|
+
"message": {
|
|
181
|
+
"content":"Arrr, I be Captain Chatbeard, the scurviest chatbot on the seven seas! Ye be wantin' to know me identity, eh? Well, matey, I be a swashbucklin' AI, here to provide ye with answers and swappin' tales, savvy?",
|
|
182
|
+
"role":"assistant",
|
|
183
|
+
"function_call":null,
|
|
184
|
+
"tool_calls":[],
|
|
185
|
+
"reasoning_content":null
|
|
186
|
+
},
|
|
187
|
+
"stop_reason":null
|
|
188
|
+
}
|
|
189
|
+
],
|
|
190
|
+
"created":1742496683,
|
|
191
|
+
"model":"Meta-Llama-3.1-8B-Instruct",
|
|
192
|
+
"object":"chat.completion",
|
|
193
|
+
"system_fingerprint":null,
|
|
194
|
+
"usage": {
|
|
195
|
+
"completion_tokens":66,
|
|
196
|
+
"prompt_tokens":32,
|
|
197
|
+
"total_tokens":98,
|
|
198
|
+
"prompt_tokens_details":null
|
|
199
|
+
},
|
|
200
|
+
"prompt_logprobs":null
|
|
201
|
+
}
|
|
202
|
+
```
|
|
203
|
+
**NOTE**: For multimodal models, currently only `ChatCompletion` is available, and only one image can be provided for each prompt.
|
|
204
|
+
|
|
205
|
+
## SSH tunnel from your local device
|
|
206
|
+
If you want to run inference from your local device, you can open a SSH tunnel to your cluster environment like the following:
|
|
207
|
+
```bash
|
|
208
|
+
ssh -L 8081:172.17.8.29:8081 username@v.vectorinstitute.ai -N
|
|
209
|
+
```
|
|
210
|
+
Where the last number in the URL is the GPU number (gpu029 in this case). The example provided above is for the vector cluster, change the variables accordingly for your environment
|