guidellm 0.3.0rc20250507__tar.gz → 0.4.0a2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of guidellm might be problematic. Click here for more details.

Files changed (73) hide show
  1. guidellm-0.4.0a2/PKG-INFO +317 -0
  2. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/README.md +87 -25
  3. guidellm-0.4.0a2/pyproject.toml +247 -0
  4. guidellm-0.4.0a2/setup.py +126 -0
  5. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/__init__.py +8 -13
  6. guidellm-0.4.0a2/src/guidellm/__main__.py +515 -0
  7. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/backend/backend.py +25 -4
  8. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/backend/openai.py +147 -27
  9. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/backend/response.py +6 -2
  10. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/benchmark/__init__.py +16 -22
  11. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/benchmark/aggregator.py +3 -3
  12. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/benchmark/benchmark.py +11 -12
  13. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/benchmark/entrypoints.py +34 -10
  14. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/benchmark/output.py +57 -5
  15. guidellm-0.4.0a2/src/guidellm/benchmark/scenario.py +104 -0
  16. guidellm-0.4.0a2/src/guidellm/benchmark/scenarios/__init__.py +0 -0
  17. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/config.py +28 -7
  18. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/dataset/creator.py +1 -1
  19. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/dataset/synthetic.py +36 -11
  20. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/logger.py +8 -4
  21. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/objects/pydantic.py +30 -1
  22. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/objects/statistics.py +20 -14
  23. guidellm-0.4.0a2/src/guidellm/preprocess/__init__.py +3 -0
  24. guidellm-0.4.0a2/src/guidellm/preprocess/dataset.py +374 -0
  25. guidellm-0.4.0a2/src/guidellm/presentation/__init__.py +28 -0
  26. guidellm-0.4.0a2/src/guidellm/presentation/builder.py +27 -0
  27. guidellm-0.4.0a2/src/guidellm/presentation/data_models.py +232 -0
  28. guidellm-0.4.0a2/src/guidellm/presentation/injector.py +66 -0
  29. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/request/__init__.py +6 -3
  30. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/request/loader.py +5 -5
  31. {guidellm-0.3.0rc20250507/src/guidellm/scheduler → guidellm-0.4.0a2/src/guidellm/request}/types.py +4 -1
  32. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/scheduler/__init__.py +10 -15
  33. guidellm-0.4.0a2/src/guidellm/scheduler/queues.py +25 -0
  34. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/scheduler/result.py +21 -3
  35. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/scheduler/scheduler.py +68 -60
  36. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/scheduler/strategy.py +26 -24
  37. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/scheduler/worker.py +64 -103
  38. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/utils/__init__.py +17 -5
  39. guidellm-0.4.0a2/src/guidellm/utils/cli.py +62 -0
  40. guidellm-0.4.0a2/src/guidellm/utils/default_group.py +105 -0
  41. guidellm-0.4.0a2/src/guidellm/utils/dict.py +23 -0
  42. guidellm-0.4.0a2/src/guidellm/utils/hf_datasets.py +36 -0
  43. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/utils/random.py +1 -1
  44. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/utils/text.py +12 -5
  45. guidellm-0.4.0a2/src/guidellm/version.py +6 -0
  46. guidellm-0.4.0a2/src/guidellm.egg-info/PKG-INFO +317 -0
  47. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm.egg-info/SOURCES.txt +16 -1
  48. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm.egg-info/requires.txt +8 -7
  49. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm.egg-info/top_level.txt +1 -0
  50. guidellm-0.3.0rc20250507/PKG-INFO +0 -451
  51. guidellm-0.3.0rc20250507/pyproject.toml +0 -74
  52. guidellm-0.3.0rc20250507/src/guidellm/__main__.py +0 -294
  53. guidellm-0.3.0rc20250507/src/guidellm.egg-info/PKG-INFO +0 -451
  54. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/LICENSE +0 -0
  55. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/MANIFEST.in +0 -0
  56. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/setup.cfg +0 -0
  57. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/backend/__init__.py +6 -6
  58. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/benchmark/benchmarker.py +2 -2
  59. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/benchmark/profile.py +4 -4
  60. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/benchmark/progress.py +2 -2
  61. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/data/__init__.py +0 -0
  62. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/data/prideandprejudice.txt.gz +0 -0
  63. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/dataset/__init__.py +4 -4
  64. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/dataset/entrypoints.py +0 -0
  65. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/dataset/file.py +0 -0
  66. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/dataset/hf_datasets.py +0 -0
  67. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/dataset/in_memory.py +0 -0
  68. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/objects/__init__.py +2 -2
  69. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/request/request.py +0 -0
  70. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/utils/colors.py +0 -0
  71. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm/utils/hf_transformers.py +0 -0
  72. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm.egg-info/dependency_links.txt +0 -0
  73. {guidellm-0.3.0rc20250507 → guidellm-0.4.0a2}/src/guidellm.egg-info/entry_points.txt +0 -0
@@ -0,0 +1,317 @@
1
+ Metadata-Version: 2.4
2
+ Name: guidellm
3
+ Version: 0.4.0a2
4
+ Summary: Guidance platform for deploying and managing large language models.
5
+ Author: Red Hat
6
+ License-Expression: Apache-2.0
7
+ Project-URL: homepage, https://github.com/vllm-project/guidellm
8
+ Project-URL: source, https://github.com/vllm-project/guidellm
9
+ Project-URL: issues, https://github.com/vllm-project/guidellm/issues
10
+ Project-URL: docs, https://github.com/vllm-project/guidellm/tree/main/docs
11
+ Keywords: ai,benchmarking,deep-learning,deployment,evaluation,guidance,inference,language-models,large-language-model,llm,machine-learning,model-benchmark,model-evaluation,nlp,performance,vllm
12
+ Requires-Python: <4.0,>=3.9.0
13
+ Description-Content-Type: text/markdown
14
+ License-File: LICENSE
15
+ Requires-Dist: click<8.2.0,>=8.0.0
16
+ Requires-Dist: datasets
17
+ Requires-Dist: ftfy>=6.0.0
18
+ Requires-Dist: httpx[http2]<1.0.0
19
+ Requires-Dist: loguru
20
+ Requires-Dist: numpy
21
+ Requires-Dist: pillow
22
+ Requires-Dist: protobuf
23
+ Requires-Dist: pydantic>=2.11.7
24
+ Requires-Dist: pydantic-settings>=2.0.0
25
+ Requires-Dist: pyyaml>=6.0.0
26
+ Requires-Dist: rich
27
+ Requires-Dist: transformers
28
+ Provides-Extra: dev
29
+ Requires-Dist: build>=1.0.0; extra == "dev"
30
+ Requires-Dist: setuptools>=61.0; extra == "dev"
31
+ Requires-Dist: setuptools-git-versioning<3,>=2.0; extra == "dev"
32
+ Requires-Dist: pre-commit~=3.5.0; extra == "dev"
33
+ Requires-Dist: scipy~=1.10; extra == "dev"
34
+ Requires-Dist: sphinx~=7.1.2; extra == "dev"
35
+ Requires-Dist: tox~=4.16.0; extra == "dev"
36
+ Requires-Dist: lorem~=0.1.1; extra == "dev"
37
+ Requires-Dist: pytest~=8.2.2; extra == "dev"
38
+ Requires-Dist: pytest-asyncio~=0.23.8; extra == "dev"
39
+ Requires-Dist: pytest-cov~=5.0.0; extra == "dev"
40
+ Requires-Dist: pytest-mock~=3.14.0; extra == "dev"
41
+ Requires-Dist: pytest-rerunfailures~=14.0; extra == "dev"
42
+ Requires-Dist: respx~=0.22.0; extra == "dev"
43
+ Requires-Dist: mypy~=1.15.0; extra == "dev"
44
+ Requires-Dist: ruff~=0.11.7; extra == "dev"
45
+ Requires-Dist: mdformat~=0.7.17; extra == "dev"
46
+ Requires-Dist: mdformat-footnote~=0.1.1; extra == "dev"
47
+ Requires-Dist: mdformat-frontmatter~=2.0.8; extra == "dev"
48
+ Requires-Dist: mdformat-gfm~=0.3.6; extra == "dev"
49
+ Requires-Dist: types-PyYAML~=6.0.1; extra == "dev"
50
+ Requires-Dist: types-requests~=2.32.0; extra == "dev"
51
+ Requires-Dist: types-toml; extra == "dev"
52
+ Requires-Dist: mkdocs-linkcheck~=1.0.6; extra == "dev"
53
+ Dynamic: license-file
54
+
55
+ <p align="center">
56
+ <picture>
57
+ <source media="(prefers-color-scheme: dark)" srcset="https://raw.githubusercontent.com/vllm-project/guidellm/main/docs/assets/guidellm-logo-light.png">
58
+ <img alt="GuideLLM Logo" src="https://raw.githubusercontent.com/vllm-project/guidellm/main/docs/assets/guidellm-logo-dark.png" width=55%>
59
+ </picture>
60
+ </p>
61
+
62
+ <h3 align="center">
63
+ Scale Efficiently: Evaluate and Optimize Your LLM Deployments for Real-World Inference
64
+ </h3>
65
+
66
+ [![GitHub Release](https://img.shields.io/github/release/vllm-project/guidellm.svg?label=Version)](https://github.com/vllm-project/guidellm/releases) [![Documentation](https://img.shields.io/badge/Documentation-8A2BE2?logo=read-the-docs&logoColor=%23ffffff&color=%231BC070)](https://github.com/vllm-project/guidellm/tree/main/docs) [![License](https://img.shields.io/github/license/vllm-project/guidellm.svg)](https://github.com/vllm-project/guidellm/blob/main/LICENSE) [![PyPI Release](https://img.shields.io/pypi/v/guidellm.svg?label=PyPI%20Release)](https://pypi.python.org/pypi/guidellm) [![Python Versions](https://img.shields.io/badge/Python-3.9--3.13-orange)](https://pypi.python.org/pypi/guidellm) [![Nightly Build](https://img.shields.io/github/actions/workflow/status/vllm-project/guidellm/nightly.yml?branch=main&label=Nightly%20Build)](https://github.com/vllm-project/guidellm/actions/workflows/nightly.yml)
67
+
68
+ ## Overview
69
+
70
+ <p>
71
+ <picture>
72
+ <source media="(prefers-color-scheme: dark)" srcset="https://raw.githubusercontent.com/vllm-project/guidellm/main/docs/assets/guidellm-user-flows-dark.png">
73
+ <img alt="GuideLLM User Flows" src="https://raw.githubusercontent.com/vllm-project/guidellm/main/docs/assets/guidellm-user-flows-light.png">
74
+ </picture>
75
+ </p>
76
+
77
+ **GuideLLM** is a platform for evaluating and optimizing the deployment of large language models (LLMs). By simulating real-world inference workloads, GuideLLM enables users to assess the performance, resource requirements, and cost implications of deploying LLMs on various hardware configurations. This approach ensures efficient, scalable, and cost-effective LLM inference serving while maintaining high service quality.
78
+
79
+ ### Key Features
80
+
81
+ - **Performance Evaluation:** Analyze LLM inference under different load scenarios to ensure your system meets your service level objectives (SLOs).
82
+ - **Resource Optimization:** Determine the most suitable hardware configurations for running your models effectively.
83
+ - **Cost Estimation:** Understand the financial impact of different deployment strategies and make informed decisions to minimize costs.
84
+ - **Scalability Testing:** Simulate scaling to handle large numbers of concurrent users without performance degradation.
85
+
86
+ ## Getting Started
87
+
88
+ ### Installation
89
+
90
+ Before installing, ensure you have the following prerequisites:
91
+
92
+ - OS: Linux or MacOS
93
+ - Python: 3.9 – 3.13
94
+
95
+ The latest GuideLLM release can be installed using pip:
96
+
97
+ ```bash
98
+ pip install guidellm
99
+ ```
100
+
101
+ Or from source code using pip:
102
+
103
+ ```bash
104
+ pip install git+https://github.com/vllm-project/guidellm.git
105
+ ```
106
+
107
+ For detailed installation instructions and requirements, see the [Installation Guide](https://github.com/vllm-project/guidellm/blob/main/docs/install.md).
108
+
109
+ ### With Podman / Docker
110
+
111
+ Alternatively we publish container images at [ghcr.io/vllm-project/guidellm](https://github.com/vllm-project/guidellm/pkgs/container/guidellm). Running a container is (by default) equivalent to `guidellm benchmark run`:
112
+
113
+ ```bash
114
+ podman run \
115
+ --rm -it \
116
+ -v "./results:/results:rw" \
117
+ -e GUIDELLM_TARGET=http://localhost:8000 \
118
+ -e GUIDELLM_RATE_TYPE=sweep \
119
+ -e GUIDELLM_MAX_SECONDS=30 \
120
+ -e GUIDELLM_DATA="prompt_tokens=256,output_tokens=128" \
121
+ ghcr.io/vllm-project/guidellm:latest
122
+ ```
123
+
124
+ > [!TIP] CLI options can also be specified as ENV variables (E.g. `--rate-type sweep` -> `GUIDELLM_RATE_TYPE=sweep`). If both are specified then the CLI option overrides the the ENV.
125
+
126
+ Replace `latest` with `stable` for the newest tagged release or set a specific release if desired.
127
+
128
+ ### Quick Start
129
+
130
+ #### 1. Start an OpenAI Compatible Server (vLLM)
131
+
132
+ GuideLLM requires an OpenAI-compatible server to run evaluations. [vLLM](https://github.com/vllm-project/vllm) is recommended for this purpose. After installing vLLM on your desired server (`pip install vllm`), start a vLLM server with a Llama 3.1 8B quantized model by running the following command:
133
+
134
+ ```bash
135
+ vllm serve "neuralmagic/Meta-Llama-3.1-8B-Instruct-quantized.w4a16"
136
+ ```
137
+
138
+ For more information on starting a vLLM server, see the [vLLM Documentation](https://docs.vllm.ai/en/latest/serving/openai_compatible_server.html).
139
+
140
+ For information on starting other supported inference servers or platforms, see the [Supported Backends Documentation](https://github.com/vllm-project/guidellm/blob/main/docs/backends.md).
141
+
142
+ #### 2. Run a GuideLLM Benchmark
143
+
144
+ To run a GuideLLM benchmark, use the `guidellm benchmark` command with the target set to an OpenAI-compatible server. For this example, the target is set to 'http://localhost:8000', assuming that vLLM is active and running on the same server. Otherwise, update it to the appropriate location. By default, GuideLLM automatically determines the model available on the server and uses it. To target a different model, pass the desired name with the `--model` argument. Additionally, the `--rate-type` is set to `sweep`, which automatically runs a range of benchmarks to determine the minimum and maximum rates that the server and model can support. Each benchmark run under the sweep will run for 30 seconds, as set by the `--max-seconds` argument. Finally, `--data` is set to a synthetic dataset with 256 prompt tokens and 128 output tokens per request. For more arguments, supported scenarios, and configurations, jump to the [Configurations Section](#configurations) or run `guidellm benchmark --help`.
145
+
146
+ Now, to start benchmarking, run the following command:
147
+
148
+ ```bash
149
+ guidellm benchmark \
150
+ --target "http://localhost:8000" \
151
+ --rate-type sweep \
152
+ --max-seconds 30 \
153
+ --data "prompt_tokens=256,output_tokens=128"
154
+ ```
155
+
156
+ The above command will begin the evaluation and provide progress updates similar to the following: <img src= "https://raw.githubusercontent.com/vllm-project/guidellm/main/docs/assets/sample-benchmarks.gif"/>
157
+
158
+ #### 3. Analyze the Results
159
+
160
+ After the evaluation is completed, GuideLLM will summarize the results into three sections:
161
+
162
+ 1. Benchmarks Metadata: A summary of the benchmark run and the arguments used to create it, including the server, data, profile, and more.
163
+ 2. Benchmarks Info: A high-level view of each benchmark and the requests that were run, including the type, duration, request statuses, and number of tokens.
164
+ 3. Benchmarks Stats: A summary of the statistics for each benchmark run, including the request rate, concurrency, latency, and token-level metrics such as TTFT, ITL, and more.
165
+
166
+ The sections will look similar to the following: <img alt="Sample GuideLLM benchmark output" src="https://raw.githubusercontent.com/vllm-project/guidellm/main/docs/assets/sample-output.png" />
167
+
168
+ For more details about the metrics and definitions, please refer to the [Metrics Documentation](https://github.com/vllm-project/guidellm/blob/main/docs/metrics.md).
169
+
170
+ #### 4. Explore the Results File
171
+
172
+ By default, the full results, including complete statistics and request data, are saved to a file `benchmarks.json` in the current working directory. This file can be used for further analysis or reporting, and additionally can be reloaded into Python for further analysis using the `guidellm.benchmark.GenerativeBenchmarksReport` class. You can specify a different file name and extension with the `--output` argument.
173
+
174
+ For more details about the supported output file types, please take a look at the [Outputs Documentation](https://github.com/vllm-project/guidellm/blob/main/docs/outputs.md).
175
+
176
+ #### 5. Use the Results
177
+
178
+ The results from GuideLLM are used to optimize your LLM deployment for performance, resource efficiency, and cost. By analyzing the performance metrics, you can identify bottlenecks, determine the optimal request rate, and select the most cost-effective hardware configuration for your deployment.
179
+
180
+ For example, when deploying a chat application, we likely want to ensure that our time to first token (TTFT) and inter-token latency (ITL) are under certain thresholds to meet our service level objectives (SLOs) or service level agreements (SLAs). For example, setting TTFT to 200ms and ITL 25ms for the sample data provided in the example above, we can see that even though the server is capable of handling up to 13 requests per second, we would only be able to meet our SLOs for 99% of users at a request rate of 3.5 requests per second. If we relax our constraints on ITL to 50 ms, then we can meet the TTFT SLA for 99% of users at a request rate of approximately 10 requests per second.
181
+
182
+ For further details on determining the optimal request rate and SLOs, refer to the [SLOs Documentation](https://github.com/vllm-project/guidellm/blob/main/docs/service_level_objectives.md).
183
+
184
+ ### Configurations
185
+
186
+ GuideLLM offers a range of configurations through both the benchmark CLI command and environment variables, which provide default values and more granular controls. The most common configurations are listed below. A complete list is easily accessible, though, by running `guidellm benchmark --help` or `guidellm config` respectively.
187
+
188
+ #### Benchmark CLI
189
+
190
+ The `guidellm benchmark` command is used to run benchmarks against a generative AI backend/server. The command accepts a variety of arguments to customize the benchmark run. The most common arguments include:
191
+
192
+ - `--target`: Specifies the target path for the backend to run benchmarks against. For example, `http://localhost:8000`. This is required to define the server endpoint.
193
+
194
+ - `--model`: Allows selecting a specific model from the server. If not provided, it defaults to the first model available on the server. Useful when multiple models are hosted on the same server.
195
+
196
+ - `--processor`: Used only for synthetic data creation or when the token source configuration is set to local for calculating token metrics locally. It must match the model's processor or tokenizer to ensure compatibility and correctness. This supports either a HuggingFace model ID or a local path to a processor or tokenizer.
197
+
198
+ - `--data`: Specifies the dataset to use. This can be a HuggingFace dataset ID, a local path to a dataset, or standard text files such as CSV, JSONL, and more. Additionally, synthetic data configurations can be provided using JSON or key-value strings. Synthetic data options include:
199
+
200
+ - `prompt_tokens`: Average number of tokens for prompts.
201
+ - `output_tokens`: Average number of tokens for outputs.
202
+ - `TYPE_stdev`, `TYPE_min`, `TYPE_max`: Standard deviation, minimum, and maximum values for the specified type (e.g., `prompt_tokens`, `output_tokens`). If not provided, will use the provided tokens value only.
203
+ - `samples`: Number of samples to generate, defaults to 1000.
204
+ - `source`: Source text data for generation, defaults to a local copy of Pride and Prejudice.
205
+
206
+ - `--data-args`: A JSON string used to specify the columns to source data from (e.g., `prompt_column`, `output_tokens_count_column`) and additional arguments to pass into the HuggingFace datasets constructor.
207
+
208
+ - `--data-sampler`: Enables applying `random` shuffling or sampling to the dataset. If not set, no sampling is used.
209
+
210
+ - `--rate-type`: Defines the type of benchmark to run (default sweep). Supported types include:
211
+
212
+ - `synchronous`: Runs a single stream of requests one at a time. `--rate` must not be set for this mode.
213
+ - `throughput`: Runs all requests in parallel to measure the maximum throughput for the server (bounded by GUIDELLM\_\_MAX_CONCURRENCY config argument). `--rate` must not be set for this mode.
214
+ - `concurrent`: Runs a fixed number of streams of requests in parallel. `--rate` must be set to the desired concurrency level/number of streams.
215
+ - `constant`: Sends requests asynchronously at a constant rate set by `--rate`.
216
+ - `poisson`: Sends requests at a rate following a Poisson distribution with the mean set by `--rate`.
217
+ - `sweep`: Automatically determines the minimum and maximum rates the server can support by running synchronous and throughput benchmarks, and then runs a series of benchmarks equally spaced between the two rates. The number of benchmarks is set by `--rate` (default is 10).
218
+
219
+ - `--max-seconds`: Sets the maximum duration (in seconds) for each benchmark run. If not specified, the benchmark will run until the dataset is exhausted or the `--max-requests` limit is reached.
220
+
221
+ - `--max-requests`: Sets the maximum number of requests for each benchmark run. If not provided, the benchmark will run until `--max-seconds` is reached or the dataset is exhausted.
222
+
223
+ - `--warmup-percent`: Specifies the percentage of the benchmark to treat as a warmup phase. Requests during this phase are excluded from the final results.
224
+
225
+ - `--cooldown-percent`: Specifies the percentage of the benchmark to treat as a cooldown phase. Requests during this phase are excluded from the final results.
226
+
227
+ - `--output-path`: Defines the path to save the benchmark results. Supports JSON, YAML, or CSV formats. If a directory is provided, the results will be saved as `benchmarks.json` in that directory. If not set, the results will be saved in the current working directory.
228
+
229
+ ### GuideLLM UI
230
+
231
+ GuideLLM UI is a companion frontend for visualizing the results of a GuideLLM benchmark run.
232
+
233
+ ### 🛠 Generating an HTML report with a benchmark run
234
+
235
+ For either pathway below you'll need to set the output path to benchmarks.html for your run:
236
+
237
+ ```bash
238
+ --output-path=benchmarks.html
239
+ ```
240
+
241
+ Alternatively load a saved run using the from-file command and also set the output to benchmarks.html
242
+
243
+ 1. Use the Hosted Build (Recommended for Most Users)
244
+
245
+ This is preconfigured. The latest stable version of the hosted UI (https://blog.vllm.ai/guidellm/ui/latest) will be used to build the local html file.
246
+
247
+ Execute your run, then open benchmarks.html in your browser and you're done—no further setup required.
248
+
249
+ 2. Build and Serve the UI Locally (For Development) This option is useful if:
250
+
251
+ - You are actively developing the UI
252
+
253
+ - You want to test changes to the UI before publishing
254
+
255
+ - You want full control over how the report is displayed
256
+
257
+ ```bash
258
+ npm install
259
+ npm run build
260
+ npm run serve
261
+ ```
262
+
263
+ This will start a local server (e.g., at http://localhost:3000). Then set the Environment to LOCAL before running your benchmarks.
264
+
265
+ ```bash
266
+ export GUIDELLM__ENV=local
267
+ ```
268
+
269
+ Then you can execute your run.
270
+
271
+ ## Resources
272
+
273
+ ### Documentation
274
+
275
+ Our comprehensive documentation offers detailed guides and resources to help you maximize the benefits of GuideLLM. Whether just getting started or looking to dive deeper into advanced topics, you can find what you need in our [Documentation](https://github.com/vllm-project/guidellm/blob/main/docs).
276
+
277
+ ### Core Docs
278
+
279
+ - [**Installation Guide**](https://github.com/vllm-project/guidellm/blob/main/docs/install.md) - This guide provides step-by-step instructions for installing GuideLLM, including prerequisites and setup tips.
280
+ - [**Backends Guide**](https://github.com/vllm-project/guidellm/blob/main/docs/backends.md) - A comprehensive overview of supported backends and how to set them up for use with GuideLLM.
281
+ - [**Data/Datasets Guide**](https://github.com/vllm-project/guidellm/blob/main/docs/datasets.md) - Information on supported datasets, including how to use them for benchmarking.
282
+ - [**Metrics Guide**](https://github.com/vllm-project/guidellm/blob/main/docs/metrics.md) - Detailed explanations of the metrics used in GuideLLM, including definitions and how to interpret them.
283
+ - [**Outputs Guide**](https://github.com/vllm-project/guidellm/blob/main/docs/outputs.md) - Information on the different output formats supported by GuideLLM and how to use them.
284
+ - [**Architecture Overview**](https://github.com/vllm-project/guidellm/blob/main/docs/architecture.md) - A detailed look at GuideLLM's design, components, and how they interact.
285
+
286
+ ### Supporting External Documentation
287
+
288
+ - [**vLLM Documentation**](https://vllm.readthedocs.io/en/latest/) - Official vLLM documentation provides insights into installation, usage, and supported models.
289
+
290
+ ### Contribution Docs
291
+
292
+ We appreciate contributions to the code, examples, integrations, documentation, bug reports, and feature requests! Your feedback and involvement are crucial in helping GuideLLM grow and improve. Below are some ways you can get involved:
293
+
294
+ - [**DEVELOPING.md**](https://github.com/vllm-project/guidellm/blob/main/DEVELOPING.md) - Development guide for setting up your environment and making contributions.
295
+ - [**CONTRIBUTING.md**](https://github.com/vllm-project/guidellm/blob/main/CONTRIBUTING.md) - Guidelines for contributing to the project, including code standards, pull request processes, and more.
296
+ - [**CODE_OF_CONDUCT.md**](https://github.com/vllm-project/guidellm/blob/main/CODE_OF_CONDUCT.md) - Our expectations for community behavior to ensure a welcoming and inclusive environment.
297
+
298
+ ### Releases
299
+
300
+ Visit our [GitHub Releases Page](https://github.com/vllm-project/guidellm/releases) and review the release notes to stay updated with the latest releases.
301
+
302
+ ### License
303
+
304
+ GuideLLM is licensed under the [Apache License 2.0](https://github.com/vllm-project/guidellm/blob/main/LICENSE).
305
+
306
+ ### Cite
307
+
308
+ If you find GuideLLM helpful in your research or projects, please consider citing it:
309
+
310
+ ```bibtex
311
+ @misc{guidellm2024,
312
+ title={GuideLLM: Scalable Inference and Optimization for Large Language Models},
313
+ author={Neural Magic, Inc.},
314
+ year={2024},
315
+ howpublished={\url{https://github.com/vllm-project/guidellm}},
316
+ }
317
+ ```
@@ -1,7 +1,7 @@
1
1
  <p align="center">
2
2
  <picture>
3
- <source media="(prefers-color-scheme: dark)" srcset="https://raw.githubusercontent.com/neuralmagic/guidellm/main/docs/assets/guidellm-logo-light.png">
4
- <img alt="GuideLLM Logo" src="https://raw.githubusercontent.com/neuralmagic/guidellm/main/docs/assets/guidellm-logo-dark.png" width=55%>
3
+ <source media="(prefers-color-scheme: dark)" srcset="https://raw.githubusercontent.com/vllm-project/guidellm/main/docs/assets/guidellm-logo-light.png">
4
+ <img alt="GuideLLM Logo" src="https://raw.githubusercontent.com/vllm-project/guidellm/main/docs/assets/guidellm-logo-dark.png" width=55%>
5
5
  </picture>
6
6
  </p>
7
7
 
@@ -9,14 +9,14 @@
9
9
  Scale Efficiently: Evaluate and Optimize Your LLM Deployments for Real-World Inference
10
10
  </h3>
11
11
 
12
- [![GitHub Release](https://img.shields.io/github/release/neuralmagic/guidellm.svg?label=Version)](https://github.com/neuralmagic/guidellm/releases) [![Documentation](https://img.shields.io/badge/Documentation-8A2BE2?logo=read-the-docs&logoColor=%23ffffff&color=%231BC070)](https://github.com/neuralmagic/guidellm/tree/main/docs) [![License](https://img.shields.io/github/license/neuralmagic/guidellm.svg)](https://github.com/neuralmagic/guidellm/blob/main/LICENSE) [![PyPI Release](https://img.shields.io/pypi/v/guidellm.svg?label=PyPI%20Release)](https://pypi.python.org/pypi/guidellm) [![Python Versions](https://img.shields.io/badge/Python-3.9--3.13-orange)](https://pypi.python.org/pypi/guidellm) [![Nightly Build](https://img.shields.io/github/actions/workflow/status/neuralmagic/guidellm/nightly.yml?branch=main&label=Nightly%20Build)](https://github.com/neuralmagic/guidellm/actions/workflows/nightly.yml)
12
+ [![GitHub Release](https://img.shields.io/github/release/vllm-project/guidellm.svg?label=Version)](https://github.com/vllm-project/guidellm/releases) [![Documentation](https://img.shields.io/badge/Documentation-8A2BE2?logo=read-the-docs&logoColor=%23ffffff&color=%231BC070)](https://github.com/vllm-project/guidellm/tree/main/docs) [![License](https://img.shields.io/github/license/vllm-project/guidellm.svg)](https://github.com/vllm-project/guidellm/blob/main/LICENSE) [![PyPI Release](https://img.shields.io/pypi/v/guidellm.svg?label=PyPI%20Release)](https://pypi.python.org/pypi/guidellm) [![Python Versions](https://img.shields.io/badge/Python-3.9--3.13-orange)](https://pypi.python.org/pypi/guidellm) [![Nightly Build](https://img.shields.io/github/actions/workflow/status/vllm-project/guidellm/nightly.yml?branch=main&label=Nightly%20Build)](https://github.com/vllm-project/guidellm/actions/workflows/nightly.yml)
13
13
 
14
14
  ## Overview
15
15
 
16
16
  <p>
17
17
  <picture>
18
- <source media="(prefers-color-scheme: dark)" srcset="https://raw.githubusercontent.com/neuralmagic/guidellm/main/docs/assets/guidellm-user-flows-dark.png">
19
- <img alt="GuideLLM User Flows" src="https://raw.githubusercontent.com/neuralmagic/guidellm/main/docs/assets/guidellm-user-flows-light.png">
18
+ <source media="(prefers-color-scheme: dark)" srcset="https://raw.githubusercontent.com/vllm-project/guidellm/main/docs/assets/guidellm-user-flows-dark.png">
19
+ <img alt="GuideLLM User Flows" src="https://raw.githubusercontent.com/vllm-project/guidellm/main/docs/assets/guidellm-user-flows-light.png">
20
20
  </picture>
21
21
  </p>
22
22
 
@@ -47,10 +47,29 @@ pip install guidellm
47
47
  Or from source code using pip:
48
48
 
49
49
  ```bash
50
- pip install git+https://github.com/neuralmagic/guidellm.git
50
+ pip install git+https://github.com/vllm-project/guidellm.git
51
51
  ```
52
52
 
53
- For detailed installation instructions and requirements, see the [Installation Guide](https://github.com/neuralmagic/guidellm/tree/main/docs/install.md).
53
+ For detailed installation instructions and requirements, see the [Installation Guide](https://github.com/vllm-project/guidellm/blob/main/docs/install.md).
54
+
55
+ ### With Podman / Docker
56
+
57
+ Alternatively we publish container images at [ghcr.io/vllm-project/guidellm](https://github.com/vllm-project/guidellm/pkgs/container/guidellm). Running a container is (by default) equivalent to `guidellm benchmark run`:
58
+
59
+ ```bash
60
+ podman run \
61
+ --rm -it \
62
+ -v "./results:/results:rw" \
63
+ -e GUIDELLM_TARGET=http://localhost:8000 \
64
+ -e GUIDELLM_RATE_TYPE=sweep \
65
+ -e GUIDELLM_MAX_SECONDS=30 \
66
+ -e GUIDELLM_DATA="prompt_tokens=256,output_tokens=128" \
67
+ ghcr.io/vllm-project/guidellm:latest
68
+ ```
69
+
70
+ > [!TIP] CLI options can also be specified as ENV variables (E.g. `--rate-type sweep` -> `GUIDELLM_RATE_TYPE=sweep`). If both are specified then the CLI option overrides the the ENV.
71
+
72
+ Replace `latest` with `stable` for the newest tagged release or set a specific release if desired.
54
73
 
55
74
  ### Quick Start
56
75
 
@@ -64,7 +83,7 @@ vllm serve "neuralmagic/Meta-Llama-3.1-8B-Instruct-quantized.w4a16"
64
83
 
65
84
  For more information on starting a vLLM server, see the [vLLM Documentation](https://docs.vllm.ai/en/latest/serving/openai_compatible_server.html).
66
85
 
67
- For information on starting other supported inference servers or platforms, see the [Supported Backends documentation](https://github.com/neuralmagic/guidellm/tree/main/docs/backends.md).
86
+ For information on starting other supported inference servers or platforms, see the [Supported Backends Documentation](https://github.com/vllm-project/guidellm/blob/main/docs/backends.md).
68
87
 
69
88
  #### 2. Run a GuideLLM Benchmark
70
89
 
@@ -80,7 +99,7 @@ guidellm benchmark \
80
99
  --data "prompt_tokens=256,output_tokens=128"
81
100
  ```
82
101
 
83
- The above command will begin the evaluation and provide progress updates similar to the following: <img src= "https://raw.githubusercontent.com/neuralmagic/guidellm/main/docs/assets/sample-benchmarks.gif"/>
102
+ The above command will begin the evaluation and provide progress updates similar to the following: <img src= "https://raw.githubusercontent.com/vllm-project/guidellm/main/docs/assets/sample-benchmarks.gif"/>
84
103
 
85
104
  #### 3. Analyze the Results
86
105
 
@@ -90,15 +109,15 @@ After the evaluation is completed, GuideLLM will summarize the results into thre
90
109
  2. Benchmarks Info: A high-level view of each benchmark and the requests that were run, including the type, duration, request statuses, and number of tokens.
91
110
  3. Benchmarks Stats: A summary of the statistics for each benchmark run, including the request rate, concurrency, latency, and token-level metrics such as TTFT, ITL, and more.
92
111
 
93
- The sections will look similar to the following: <img alt="Sample GuideLLM benchmark output" src="https://raw.githubusercontent.com/neuralmagic/guidellm/main/docs/assets/sample-output.png" />
112
+ The sections will look similar to the following: <img alt="Sample GuideLLM benchmark output" src="https://raw.githubusercontent.com/vllm-project/guidellm/main/docs/assets/sample-output.png" />
94
113
 
95
- For more details about the metrics and definitions, please refer to the [Metrics documentation](https://raw.githubusercontent.com/neuralmagic/guidellm/main/docs/metrics.md).
114
+ For more details about the metrics and definitions, please refer to the [Metrics Documentation](https://github.com/vllm-project/guidellm/blob/main/docs/metrics.md).
96
115
 
97
116
  #### 4. Explore the Results File
98
117
 
99
118
  By default, the full results, including complete statistics and request data, are saved to a file `benchmarks.json` in the current working directory. This file can be used for further analysis or reporting, and additionally can be reloaded into Python for further analysis using the `guidellm.benchmark.GenerativeBenchmarksReport` class. You can specify a different file name and extension with the `--output` argument.
100
119
 
101
- For more details about the supported output file types, please take a look at the [Outputs documentation](raw.githubusercontent.com/neuralmagic/guidellm/main/docs/outputs.md).
120
+ For more details about the supported output file types, please take a look at the [Outputs Documentation](https://github.com/vllm-project/guidellm/blob/main/docs/outputs.md).
102
121
 
103
122
  #### 5. Use the Results
104
123
 
@@ -106,7 +125,7 @@ The results from GuideLLM are used to optimize your LLM deployment for performan
106
125
 
107
126
  For example, when deploying a chat application, we likely want to ensure that our time to first token (TTFT) and inter-token latency (ITL) are under certain thresholds to meet our service level objectives (SLOs) or service level agreements (SLAs). For example, setting TTFT to 200ms and ITL 25ms for the sample data provided in the example above, we can see that even though the server is capable of handling up to 13 requests per second, we would only be able to meet our SLOs for 99% of users at a request rate of 3.5 requests per second. If we relax our constraints on ITL to 50 ms, then we can meet the TTFT SLA for 99% of users at a request rate of approximately 10 requests per second.
108
127
 
109
- For further details on determining the optimal request rate and SLOs, refer to the [SLOs documentation](https://raw.githubusercontent.com/neuralmagic/guidellm/main/docs/service_level_objectives.md).
128
+ For further details on determining the optimal request rate and SLOs, refer to the [SLOs Documentation](https://github.com/vllm-project/guidellm/blob/main/docs/service_level_objectives.md).
110
129
 
111
130
  ### Configurations
112
131
 
@@ -153,19 +172,62 @@ The `guidellm benchmark` command is used to run benchmarks against a generative
153
172
 
154
173
  - `--output-path`: Defines the path to save the benchmark results. Supports JSON, YAML, or CSV formats. If a directory is provided, the results will be saved as `benchmarks.json` in that directory. If not set, the results will be saved in the current working directory.
155
174
 
175
+ ### GuideLLM UI
176
+
177
+ GuideLLM UI is a companion frontend for visualizing the results of a GuideLLM benchmark run.
178
+
179
+ ### 🛠 Generating an HTML report with a benchmark run
180
+
181
+ For either pathway below you'll need to set the output path to benchmarks.html for your run:
182
+
183
+ ```bash
184
+ --output-path=benchmarks.html
185
+ ```
186
+
187
+ Alternatively load a saved run using the from-file command and also set the output to benchmarks.html
188
+
189
+ 1. Use the Hosted Build (Recommended for Most Users)
190
+
191
+ This is preconfigured. The latest stable version of the hosted UI (https://blog.vllm.ai/guidellm/ui/latest) will be used to build the local html file.
192
+
193
+ Execute your run, then open benchmarks.html in your browser and you're done—no further setup required.
194
+
195
+ 2. Build and Serve the UI Locally (For Development) This option is useful if:
196
+
197
+ - You are actively developing the UI
198
+
199
+ - You want to test changes to the UI before publishing
200
+
201
+ - You want full control over how the report is displayed
202
+
203
+ ```bash
204
+ npm install
205
+ npm run build
206
+ npm run serve
207
+ ```
208
+
209
+ This will start a local server (e.g., at http://localhost:3000). Then set the Environment to LOCAL before running your benchmarks.
210
+
211
+ ```bash
212
+ export GUIDELLM__ENV=local
213
+ ```
214
+
215
+ Then you can execute your run.
216
+
156
217
  ## Resources
157
218
 
158
219
  ### Documentation
159
220
 
160
- Our comprehensive documentation offers detailed guides and resources to help you maximize the benefits of GuideLLM. Whether just getting started or looking to dive deeper into advanced topics, you can find what you need in our [documentation](https://github.com/neuralmagic/guidellm/tree/main/docs).
221
+ Our comprehensive documentation offers detailed guides and resources to help you maximize the benefits of GuideLLM. Whether just getting started or looking to dive deeper into advanced topics, you can find what you need in our [Documentation](https://github.com/vllm-project/guidellm/blob/main/docs).
161
222
 
162
223
  ### Core Docs
163
224
 
164
- - [**Installation Guide**](https://github.com/neuralmagic/guidellm/tree/main/docs/install.md) - This guide provides step-by-step instructions for installing GuideLLM, including prerequisites and setup tips.
165
- - [**Backends Guide**](https://github.com/neuralmagic/guidellm/tree/main/docs/backends.md) - A comprehensive overview of supported backends and how to set them up for use with GuideLLM.
166
- - [**Metrics Guide**](https://github.com/neuralmagic/guidellm/tree/main/docs/metrics.md) - Detailed explanations of the metrics used in GuideLLM, including definitions and how to interpret them.
167
- - [**Outputs Guide**](https://github.com/neuralmagic/guidellm/tree/main/docs/outputs.md) - Information on the different output formats supported by GuideLLM and how to use them.
168
- - [**Architecture Overview**](https://github.com/neuralmagic/guidellm/tree/main/docs/architecture.md) - A detailed look at GuideLLM's design, components, and how they interact.
225
+ - [**Installation Guide**](https://github.com/vllm-project/guidellm/blob/main/docs/install.md) - This guide provides step-by-step instructions for installing GuideLLM, including prerequisites and setup tips.
226
+ - [**Backends Guide**](https://github.com/vllm-project/guidellm/blob/main/docs/backends.md) - A comprehensive overview of supported backends and how to set them up for use with GuideLLM.
227
+ - [**Data/Datasets Guide**](https://github.com/vllm-project/guidellm/blob/main/docs/datasets.md) - Information on supported datasets, including how to use them for benchmarking.
228
+ - [**Metrics Guide**](https://github.com/vllm-project/guidellm/blob/main/docs/metrics.md) - Detailed explanations of the metrics used in GuideLLM, including definitions and how to interpret them.
229
+ - [**Outputs Guide**](https://github.com/vllm-project/guidellm/blob/main/docs/outputs.md) - Information on the different output formats supported by GuideLLM and how to use them.
230
+ - [**Architecture Overview**](https://github.com/vllm-project/guidellm/blob/main/docs/architecture.md) - A detailed look at GuideLLM's design, components, and how they interact.
169
231
 
170
232
  ### Supporting External Documentation
171
233
 
@@ -175,17 +237,17 @@ Our comprehensive documentation offers detailed guides and resources to help you
175
237
 
176
238
  We appreciate contributions to the code, examples, integrations, documentation, bug reports, and feature requests! Your feedback and involvement are crucial in helping GuideLLM grow and improve. Below are some ways you can get involved:
177
239
 
178
- - [**DEVELOPING.md**](https://github.com/neuralmagic/guidellm/blob/main/DEVELOPING.md) - Development guide for setting up your environment and making contributions.
179
- - [**CONTRIBUTING.md**](https://github.com/neuralmagic/guidellm/blob/main/CONTRIBUTING.md) - Guidelines for contributing to the project, including code standards, pull request processes, and more.
180
- - [**CODE_OF_CONDUCT.md**](https://github.com/neuralmagic/guidellm/blob/main/CODE_OF_CONDUCT.md) - Our expectations for community behavior to ensure a welcoming and inclusive environment.
240
+ - [**DEVELOPING.md**](https://github.com/vllm-project/guidellm/blob/main/DEVELOPING.md) - Development guide for setting up your environment and making contributions.
241
+ - [**CONTRIBUTING.md**](https://github.com/vllm-project/guidellm/blob/main/CONTRIBUTING.md) - Guidelines for contributing to the project, including code standards, pull request processes, and more.
242
+ - [**CODE_OF_CONDUCT.md**](https://github.com/vllm-project/guidellm/blob/main/CODE_OF_CONDUCT.md) - Our expectations for community behavior to ensure a welcoming and inclusive environment.
181
243
 
182
244
  ### Releases
183
245
 
184
- Visit our [GitHub Releases page](https://github.com/neuralmagic/guidellm/releases) and review the release notes to stay updated with the latest releases.
246
+ Visit our [GitHub Releases Page](https://github.com/vllm-project/guidellm/releases) and review the release notes to stay updated with the latest releases.
185
247
 
186
248
  ### License
187
249
 
188
- GuideLLM is licensed under the [Apache License 2.0](https://github.com/neuralmagic/guidellm/blob/main/LICENSE).
250
+ GuideLLM is licensed under the [Apache License 2.0](https://github.com/vllm-project/guidellm/blob/main/LICENSE).
189
251
 
190
252
  ### Cite
191
253
 
@@ -196,6 +258,6 @@ If you find GuideLLM helpful in your research or projects, please consider citin
196
258
  title={GuideLLM: Scalable Inference and Optimization for Large Language Models},
197
259
  author={Neural Magic, Inc.},
198
260
  year={2024},
199
- howpublished={\url{https://github.com/neuralmagic/guidellm}},
261
+ howpublished={\url{https://github.com/vllm-project/guidellm}},
200
262
  }
201
263
  ```