tpu-inference 0.11.1.dev202511150811__py3-none-any.whl → 0.11.1.dev202512030818__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tpu-inference might be problematic. Click here for more details.

Files changed (54) hide show
  1. tests/kernels/fused_moe_v1_test.py +303 -34
  2. tests/lora/test_layers.py +0 -6
  3. tests/lora/utils.py +0 -8
  4. tests/test_envs.py +32 -11
  5. tests/test_utils.py +1 -2
  6. tpu_inference/__init__.py +22 -3
  7. tpu_inference/core/disagg_utils.py +6 -8
  8. tpu_inference/distributed/tpu_connector.py +3 -4
  9. tpu_inference/distributed/utils.py +3 -2
  10. tpu_inference/envs.py +61 -8
  11. tpu_inference/executors/ray_distributed_executor.py +31 -11
  12. tpu_inference/kernels/fused_moe/v1/kernel.py +641 -110
  13. tpu_inference/kernels/ragged_paged_attention/v3/kernel.py +77 -54
  14. tpu_inference/kernels/ragged_paged_attention/v3/kernel_hd64.py +213 -126
  15. tpu_inference/layers/common/attention_interface.py +7 -1
  16. tpu_inference/layers/common/sharding.py +5 -5
  17. tpu_inference/layers/vllm/fused_moe.py +74 -25
  18. tpu_inference/layers/vllm/quantization/common.py +6 -1
  19. tpu_inference/layers/vllm/quantization/mxfp4.py +137 -62
  20. tpu_inference/layers/vllm/quantization/unquantized.py +107 -113
  21. tpu_inference/layers/vllm/sharding.py +2 -2
  22. tpu_inference/lora/torch_punica_tpu.py +1 -2
  23. tpu_inference/models/common/model_loader.py +45 -11
  24. tpu_inference/models/jax/llama3.py +2 -1
  25. tpu_inference/models/jax/llama_eagle3.py +8 -5
  26. tpu_inference/models/jax/llama_guard_4.py +361 -0
  27. tpu_inference/models/jax/qwen2.py +2 -1
  28. tpu_inference/models/jax/qwen2_5_vl.py +163 -48
  29. tpu_inference/models/jax/qwen3.py +2 -1
  30. tpu_inference/models/jax/utils/quantization/quantization_utils.py +3 -6
  31. tpu_inference/models/jax/utils/weight_utils.py +198 -143
  32. tpu_inference/models/vllm/vllm_model_wrapper.py +14 -7
  33. tpu_inference/platforms/tpu_platform.py +28 -22
  34. tpu_inference/runner/compilation_manager.py +144 -59
  35. tpu_inference/runner/kv_cache_manager.py +17 -18
  36. tpu_inference/runner/persistent_batch_manager.py +40 -2
  37. tpu_inference/runner/structured_decoding_manager.py +2 -3
  38. tpu_inference/runner/tpu_runner.py +271 -147
  39. tpu_inference/runner/utils.py +2 -2
  40. tpu_inference/spec_decode/jax/eagle3.py +71 -21
  41. tpu_inference/tpu_info.py +4 -3
  42. tpu_inference/utils.py +36 -13
  43. tpu_inference/worker/tpu_worker.py +162 -25
  44. {tpu_inference-0.11.1.dev202511150811.dist-info → tpu_inference-0.11.1.dev202512030818.dist-info}/METADATA +3 -2
  45. {tpu_inference-0.11.1.dev202511150811.dist-info → tpu_inference-0.11.1.dev202512030818.dist-info}/RECORD +48 -53
  46. tpu_inference/mock/__init__.py +0 -0
  47. tpu_inference/mock/vllm_config_utils.py +0 -28
  48. tpu_inference/mock/vllm_envs.py +0 -1219
  49. tpu_inference/mock/vllm_logger.py +0 -212
  50. tpu_inference/mock/vllm_logging_utils.py +0 -15
  51. tpu_inference/models/jax/phi3.py +0 -376
  52. {tpu_inference-0.11.1.dev202511150811.dist-info → tpu_inference-0.11.1.dev202512030818.dist-info}/WHEEL +0 -0
  53. {tpu_inference-0.11.1.dev202511150811.dist-info → tpu_inference-0.11.1.dev202512030818.dist-info}/licenses/LICENSE +0 -0
  54. {tpu_inference-0.11.1.dev202511150811.dist-info → tpu_inference-0.11.1.dev202512030818.dist-info}/top_level.txt +0 -0
@@ -1,1219 +0,0 @@
1
- # SPDX-License-Identifier: Apache-2.0
2
- # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
-
4
- import hashlib
5
- import os
6
- import sys
7
- import tempfile
8
- from typing import TYPE_CHECKING, Any, Callable, Optional
9
-
10
- if TYPE_CHECKING:
11
- VLLM_HOST_IP: str = ""
12
- VLLM_PORT: Optional[int] = None
13
- VLLM_RPC_BASE_PATH: str = tempfile.gettempdir()
14
- VLLM_USE_MODELSCOPE: bool = False
15
- VLLM_RINGBUFFER_WARNING_INTERVAL: int = 60
16
- VLLM_NCCL_SO_PATH: Optional[str] = None
17
- LD_LIBRARY_PATH: Optional[str] = None
18
- VLLM_USE_TRITON_FLASH_ATTN: bool = True
19
- VLLM_V1_USE_PREFILL_DECODE_ATTENTION: bool = False
20
- VLLM_USE_AITER_UNIFIED_ATTENTION: bool = False
21
- VLLM_FLASH_ATTN_VERSION: Optional[int] = None
22
- LOCAL_RANK: int = 0
23
- CUDA_VISIBLE_DEVICES: Optional[str] = None
24
- VLLM_ENGINE_ITERATION_TIMEOUT_S: int = 60
25
- VLLM_API_KEY: Optional[str] = None
26
- S3_ACCESS_KEY_ID: Optional[str] = None
27
- S3_SECRET_ACCESS_KEY: Optional[str] = None
28
- S3_ENDPOINT_URL: Optional[str] = None
29
- VLLM_MODEL_REDIRECT_PATH: Optional[str] = None
30
- VLLM_CACHE_ROOT: str = os.path.expanduser("~/.cache/vllm")
31
- VLLM_CONFIG_ROOT: str = os.path.expanduser("~/.config/vllm")
32
- VLLM_USAGE_STATS_SERVER: str = "https://stats.vllm.ai"
33
- VLLM_NO_USAGE_STATS: bool = False
34
- VLLM_DO_NOT_TRACK: bool = False
35
- VLLM_USAGE_SOURCE: str = ""
36
- VLLM_CONFIGURE_LOGGING: int = 1
37
- VLLM_LOGGING_LEVEL: str = "INFO"
38
- VLLM_LOGGING_PREFIX: str = ""
39
- VLLM_LOGGING_CONFIG_PATH: Optional[str] = None
40
- VLLM_LOGITS_PROCESSOR_THREADS: Optional[int] = None
41
- VLLM_LOG_STATS_INTERVAL: float = 10.
42
- VLLM_TRACE_FUNCTION: int = 0
43
- VLLM_ATTENTION_BACKEND: Optional[str] = None
44
- VLLM_USE_FLASHINFER_SAMPLER: Optional[bool] = None
45
- VLLM_FLASHINFER_FORCE_TENSOR_CORES: bool = False
46
- VLLM_PP_LAYER_PARTITION: Optional[str] = None
47
- VLLM_CPU_KVCACHE_SPACE: Optional[int] = 0
48
- VLLM_CPU_OMP_THREADS_BIND: str = ""
49
- VLLM_CPU_NUM_OF_RESERVED_CPU: Optional[int] = None
50
- VLLM_CPU_MOE_PREPACK: bool = True
51
- VLLM_CPU_SGL_KERNEL: bool = False
52
- VLLM_XLA_CACHE_PATH: str = os.path.join(VLLM_CACHE_ROOT, "xla_cache")
53
- VLLM_XLA_CHECK_RECOMPILATION: bool = False
54
- VLLM_FUSED_MOE_CHUNK_SIZE: int = 64 * 1024
55
- VLLM_ENABLE_FUSED_MOE_ACTIVATION_CHUNKING: bool = True
56
- VLLM_USE_RAY_SPMD_WORKER: bool = False
57
- VLLM_USE_RAY_COMPILED_DAG: bool = False
58
- VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE: str = "auto"
59
- VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM: bool = False
60
- VLLM_USE_RAY_WRAPPED_PP_COMM: bool = True
61
- VLLM_XLA_USE_SPMD: bool = False
62
- VLLM_WORKER_MULTIPROC_METHOD: str = "fork"
63
- VLLM_ASSETS_CACHE: str = os.path.join(VLLM_CACHE_ROOT, "assets")
64
- VLLM_IMAGE_FETCH_TIMEOUT: int = 5
65
- VLLM_VIDEO_FETCH_TIMEOUT: int = 30
66
- VLLM_AUDIO_FETCH_TIMEOUT: int = 10
67
- VLLM_MEDIA_LOADING_THREAD_COUNT: int = 8
68
- VLLM_MAX_AUDIO_CLIP_FILESIZE_MB: int = 25
69
- VLLM_VIDEO_LOADER_BACKEND: str = "opencv"
70
- VLLM_MM_INPUT_CACHE_GIB: int = 4
71
- VLLM_TARGET_DEVICE: str = "cuda"
72
- MAX_JOBS: Optional[str] = None
73
- NVCC_THREADS: Optional[str] = None
74
- VLLM_USE_PRECOMPILED: bool = False
75
- VLLM_DOCKER_BUILD_CONTEXT: bool = False
76
- VLLM_TEST_USE_PRECOMPILED_NIGHTLY_WHEEL: bool = False
77
- VLLM_KEEP_ALIVE_ON_ENGINE_DEATH: bool = False
78
- CMAKE_BUILD_TYPE: Optional[str] = None
79
- VERBOSE: bool = False
80
- VLLM_ALLOW_LONG_MAX_MODEL_LEN: bool = False
81
- VLLM_RPC_TIMEOUT: int = 10000 # ms
82
- VLLM_HTTP_TIMEOUT_KEEP_ALIVE: int = 5 # seconds
83
- VLLM_PLUGINS: Optional[list[str]] = None
84
- VLLM_LORA_RESOLVER_CACHE_DIR: Optional[str] = None
85
- VLLM_TORCH_PROFILER_DIR: Optional[str] = None
86
- VLLM_TORCH_PROFILER_RECORD_SHAPES: bool = False
87
- VLLM_TORCH_PROFILER_WITH_PROFILE_MEMORY: bool = False
88
- VLLM_TORCH_PROFILER_WITH_STACK: bool = True
89
- VLLM_TORCH_PROFILER_WITH_FLOPS: bool = False
90
- VLLM_USE_TRITON_AWQ: bool = False
91
- VLLM_ALLOW_RUNTIME_LORA_UPDATING: bool = False
92
- VLLM_SKIP_P2P_CHECK: bool = False
93
- VLLM_DISABLED_KERNELS: list[str] = []
94
- VLLM_ROCM_USE_AITER: bool = False
95
- VLLM_ROCM_USE_AITER_PAGED_ATTN: bool = False
96
- VLLM_ROCM_USE_AITER_LINEAR: bool = True
97
- VLLM_ROCM_USE_AITER_MOE: bool = True
98
- VLLM_ROCM_USE_AITER_RMSNORM: bool = True
99
- VLLM_ROCM_USE_AITER_MLA: bool = True
100
- VLLM_ROCM_USE_AITER_MHA: bool = True
101
- VLLM_ROCM_USE_SKINNY_GEMM: bool = True
102
- VLLM_ROCM_FP8_PADDING: bool = True
103
- VLLM_ROCM_MOE_PADDING: bool = True
104
- VLLM_ROCM_CUSTOM_PAGED_ATTN: bool = True
105
- VLLM_ENABLE_V1_MULTIPROCESSING: bool = True
106
- VLLM_LOG_BATCHSIZE_INTERVAL: float = -1
107
- VLLM_DISABLE_COMPILE_CACHE: bool = False
108
- Q_SCALE_CONSTANT: int = 200
109
- K_SCALE_CONSTANT: int = 200
110
- V_SCALE_CONSTANT: int = 100
111
- VLLM_SERVER_DEV_MODE: bool = False
112
- VLLM_V1_OUTPUT_PROC_CHUNK_SIZE: int = 128
113
- VLLM_MLA_DISABLE: bool = False
114
- VLLM_RAY_PER_WORKER_GPUS: float = 1.0
115
- VLLM_RAY_BUNDLE_INDICES: str = ""
116
- VLLM_CUDART_SO_PATH: Optional[str] = None
117
- VLLM_DP_RANK: int = 0
118
- VLLM_DP_RANK_LOCAL: int = -1
119
- VLLM_DP_SIZE: int = 1
120
- VLLM_DP_MASTER_IP: str = ""
121
- VLLM_DP_MASTER_PORT: int = 0
122
- VLLM_MOE_DP_CHUNK_SIZE: int = 256
123
- VLLM_RANDOMIZE_DP_DUMMY_INPUTS: bool = False
124
- VLLM_MARLIN_USE_ATOMIC_ADD: bool = False
125
- VLLM_MXFP4_USE_MARLIN: Optional[bool] = None
126
- VLLM_V0_USE_OUTLINES_CACHE: bool = False
127
- VLLM_V1_USE_OUTLINES_CACHE: bool = False
128
- VLLM_TPU_BUCKET_PADDING_GAP: int = 0
129
- VLLM_TPU_MOST_MODEL_LEN: Optional[int] = None
130
- VLLM_TPU_USING_PATHWAYS: bool = False
131
- VLLM_USE_DEEP_GEMM: bool = False
132
- VLLM_USE_DEEP_GEMM_E8M0: bool = True
133
- VLLM_SKIP_DEEP_GEMM_WARMUP: bool = False
134
- VLLM_USE_FLASHINFER_MOE_FP8: bool = False
135
- VLLM_USE_FLASHINFER_MOE_FP4: bool = False
136
- VLLM_FLASHINFER_MOE_BACKEND: str = "throughput"
137
- VLLM_XGRAMMAR_CACHE_MB: int = 0
138
- VLLM_MSGPACK_ZERO_COPY_THRESHOLD: int = 256
139
- VLLM_ALLOW_INSECURE_SERIALIZATION: bool = False
140
- VLLM_NIXL_SIDE_CHANNEL_HOST: str = "localhost"
141
- VLLM_NIXL_SIDE_CHANNEL_PORT: int = 5557
142
- VLLM_ALL2ALL_BACKEND: str = "naive"
143
- VLLM_MAX_TOKENS_PER_EXPERT_FP4_MOE: int = 163840
144
- VLLM_TOOL_PARSE_REGEX_TIMEOUT_SECONDS: int = 1
145
- VLLM_SLEEP_WHEN_IDLE: bool = False
146
- VLLM_MQ_MAX_CHUNK_BYTES_MB: int = 16
147
- VLLM_EXECUTE_MODEL_TIMEOUT_SECONDS: int = 300
148
- VLLM_KV_CACHE_LAYOUT: Optional[str] = None
149
- VLLM_COMPUTE_NANS_IN_LOGITS: bool = False
150
- VLLM_USE_NVFP4_CT_EMULATIONS: bool = False
151
- VLLM_ROCM_QUICK_REDUCE_QUANTIZATION: str = "NONE"
152
- VLLM_ROCM_QUICK_REDUCE_CAST_BF16_TO_FP16: bool = True
153
- VLLM_ROCM_QUICK_REDUCE_MAX_SIZE_BYTES_MB: Optional[int] = None
154
- VLLM_NIXL_ABORT_REQUEST_TIMEOUT: int = 120
155
- VLLM_USE_CUDNN_PREFILL: bool = False
156
- VLLM_ENABLE_CUDAGRAPH_GC: bool = False
157
- VLLM_LOOPBACK_IP: str = ""
158
- VLLM_ALLOW_CHUNKED_LOCAL_ATTN_WITH_HYBRID_KV_CACHE: bool = False
159
- VLLM_ENABLE_RESPONSES_API_STORE: bool = False
160
- VLLM_USE_TRTLLM_ATTENTION: Optional[str] = None
161
- VLLM_USE_FLASHINFER_MOE_MXFP4_MXFP8: bool = False
162
- VLLM_USE_FLASHINFER_MOE_MXFP4_BF16: bool = False
163
- VLLM_TUNED_CONFIG_FOLDER: Optional[str] = None
164
-
165
-
166
- def get_default_cache_root():
167
- return os.getenv(
168
- "XDG_CACHE_HOME",
169
- os.path.join(os.path.expanduser("~"), ".cache"),
170
- )
171
-
172
-
173
- def get_default_config_root():
174
- return os.getenv(
175
- "XDG_CONFIG_HOME",
176
- os.path.join(os.path.expanduser("~"), ".config"),
177
- )
178
-
179
-
180
- def maybe_convert_int(value: Optional[str]) -> Optional[int]:
181
- if value is None:
182
- return None
183
- return int(value)
184
-
185
-
186
- def maybe_convert_bool(value: Optional[str]) -> Optional[bool]:
187
- if value is None:
188
- return None
189
- return bool(int(value))
190
-
191
-
192
- def get_vllm_port() -> Optional[int]:
193
- """Get the port from VLLM_PORT environment variable.
194
-
195
- Returns:
196
- The port number as an integer if VLLM_PORT is set, None otherwise.
197
-
198
- Raises:
199
- ValueError: If VLLM_PORT is a URI, suggest k8s service discovery issue.
200
- """
201
- if 'VLLM_PORT' not in os.environ:
202
- return None
203
-
204
- port = os.getenv('VLLM_PORT', '0')
205
-
206
- try:
207
- return int(port)
208
- except ValueError as err:
209
- from urllib.parse import urlparse
210
- parsed = urlparse(port)
211
- if parsed.scheme:
212
- raise ValueError(
213
- f"VLLM_PORT '{port}' appears to be a URI. "
214
- "This may be caused by a Kubernetes service discovery issue,"
215
- "check the warning in: https://docs.vllm.ai/en/stable/serving/env_vars.html"
216
- ) from None
217
- raise ValueError(
218
- f"VLLM_PORT '{port}' must be a valid integer") from err
219
-
220
-
221
- # The begin-* and end* here are used by the documentation generator
222
- # to extract the used env vars.
223
-
224
- # --8<-- [start:env-vars-definition]
225
-
226
- environment_variables: dict[str, Callable[[], Any]] = {
227
-
228
- # ================== Installation Time Env Vars ==================
229
-
230
- # Target device of vLLM, supporting [cuda (by default),
231
- # rocm, neuron, cpu]
232
- "VLLM_TARGET_DEVICE":
233
- lambda: os.getenv("VLLM_TARGET_DEVICE", "cuda").lower(),
234
-
235
- # Maximum number of compilation jobs to run in parallel.
236
- # By default this is the number of CPUs
237
- "MAX_JOBS":
238
- lambda: os.getenv("MAX_JOBS", None),
239
-
240
- # Number of threads to use for nvcc
241
- # By default this is 1.
242
- # If set, `MAX_JOBS` will be reduced to avoid oversubscribing the CPU.
243
- "NVCC_THREADS":
244
- lambda: os.getenv("NVCC_THREADS", None),
245
-
246
- # If set, vllm will use precompiled binaries (*.so)
247
- "VLLM_USE_PRECOMPILED":
248
- lambda: os.environ.get("VLLM_USE_PRECOMPILED", "").strip().lower() in
249
- ("1", "true") or bool(os.environ.get("VLLM_PRECOMPILED_WHEEL_LOCATION")),
250
-
251
- # Used to mark that setup.py is running in a Docker build context,
252
- # in order to force the use of precompiled binaries.
253
- "VLLM_DOCKER_BUILD_CONTEXT":
254
- lambda: os.environ.get("VLLM_DOCKER_BUILD_CONTEXT", "").strip().lower() in
255
- ("1", "true"),
256
-
257
- # Whether to force using nightly wheel in python build.
258
- # This is used for testing the nightly wheel in python build.
259
- "VLLM_TEST_USE_PRECOMPILED_NIGHTLY_WHEEL":
260
- lambda: bool(int(os.getenv("VLLM_TEST_USE_PRECOMPILED_NIGHTLY_WHEEL", "0"))
261
- ),
262
-
263
- # CMake build type
264
- # If not set, defaults to "Debug" or "RelWithDebInfo"
265
- # Available options: "Debug", "Release", "RelWithDebInfo"
266
- "CMAKE_BUILD_TYPE":
267
- lambda: os.getenv("CMAKE_BUILD_TYPE"),
268
-
269
- # If set, vllm will print verbose logs during installation
270
- "VERBOSE":
271
- lambda: bool(int(os.getenv('VERBOSE', '0'))),
272
-
273
- # Root directory for vLLM configuration files
274
- # Defaults to `~/.config/vllm` unless `XDG_CONFIG_HOME` is set
275
- # Note that this not only affects how vllm finds its configuration files
276
- # during runtime, but also affects how vllm installs its configuration
277
- # files during **installation**.
278
- "VLLM_CONFIG_ROOT":
279
- lambda: os.path.expanduser(
280
- os.getenv(
281
- "VLLM_CONFIG_ROOT",
282
- os.path.join(get_default_config_root(), "vllm"),
283
- )),
284
-
285
- # ================== Runtime Env Vars ==================
286
-
287
- # Root directory for vLLM cache files
288
- # Defaults to `~/.cache/vllm` unless `XDG_CACHE_HOME` is set
289
- "VLLM_CACHE_ROOT":
290
- lambda: os.path.expanduser(
291
- os.getenv(
292
- "VLLM_CACHE_ROOT",
293
- os.path.join(get_default_cache_root(), "vllm"),
294
- )),
295
-
296
- # used in distributed environment to determine the ip address
297
- # of the current node, when the node has multiple network interfaces.
298
- # If you are using multi-node inference, you should set this differently
299
- # on each node.
300
- 'VLLM_HOST_IP':
301
- lambda: os.getenv('VLLM_HOST_IP', ""),
302
-
303
- # used in distributed environment to manually set the communication port
304
- # Note: if VLLM_PORT is set, and some code asks for multiple ports, the
305
- # VLLM_PORT will be used as the first port, and the rest will be generated
306
- # by incrementing the VLLM_PORT value.
307
- 'VLLM_PORT':
308
- get_vllm_port,
309
-
310
- # path used for ipc when the frontend api server is running in
311
- # multi-processing mode to communicate with the backend engine process.
312
- 'VLLM_RPC_BASE_PATH':
313
- lambda: os.getenv('VLLM_RPC_BASE_PATH', tempfile.gettempdir()),
314
-
315
- # If true, will load models from ModelScope instead of Hugging Face Hub.
316
- # note that the value is true or false, not numbers
317
- "VLLM_USE_MODELSCOPE":
318
- lambda: os.environ.get("VLLM_USE_MODELSCOPE", "False").lower() == "true",
319
-
320
- # Interval in seconds to log a warning message when the ring buffer is full
321
- "VLLM_RINGBUFFER_WARNING_INTERVAL":
322
- lambda: int(os.environ.get("VLLM_RINGBUFFER_WARNING_INTERVAL", "60")),
323
-
324
- # path to cudatoolkit home directory, under which should be bin, include,
325
- # and lib directories.
326
- "CUDA_HOME":
327
- lambda: os.environ.get("CUDA_HOME", None),
328
-
329
- # Path to the NCCL library file. It is needed because nccl>=2.19 brought
330
- # by PyTorch contains a bug: https://github.com/NVIDIA/nccl/issues/1234
331
- "VLLM_NCCL_SO_PATH":
332
- lambda: os.environ.get("VLLM_NCCL_SO_PATH", None),
333
-
334
- # when `VLLM_NCCL_SO_PATH` is not set, vllm will try to find the nccl
335
- # library file in the locations specified by `LD_LIBRARY_PATH`
336
- "LD_LIBRARY_PATH":
337
- lambda: os.environ.get("LD_LIBRARY_PATH", None),
338
-
339
- # flag to control if vllm should use triton flash attention
340
- "VLLM_USE_TRITON_FLASH_ATTN":
341
- lambda: (os.environ.get("VLLM_USE_TRITON_FLASH_ATTN", "True").lower() in
342
- ("true", "1")),
343
-
344
- # Use separate prefill and decode kernels for V1 attention instead of
345
- # the unified triton kernel.
346
- "VLLM_V1_USE_PREFILL_DECODE_ATTENTION":
347
- lambda:
348
- (os.getenv("VLLM_V1_USE_PREFILL_DECODE_ATTENTION", "False").lower() in
349
- ("true", "1")),
350
-
351
- # Use AITER triton unified attention for V1 attention
352
- "VLLM_USE_AITER_UNIFIED_ATTENTION":
353
- lambda:
354
- (os.getenv("VLLM_USE_AITER_UNIFIED_ATTENTION", "False").lower() in
355
- ("true", "1")),
356
-
357
- # Force vllm to use a specific flash-attention version (2 or 3), only valid
358
- # when using the flash-attention backend.
359
- "VLLM_FLASH_ATTN_VERSION":
360
- lambda: maybe_convert_int(os.environ.get("VLLM_FLASH_ATTN_VERSION", None)),
361
-
362
- # Internal flag to enable Dynamo fullgraph capture
363
- "VLLM_TEST_DYNAMO_FULLGRAPH_CAPTURE":
364
- lambda: bool(
365
- os.environ.get("VLLM_TEST_DYNAMO_FULLGRAPH_CAPTURE", "1") != "0"),
366
-
367
- # Feature flag to enable/disable Inductor standalone compile.
368
- # In torch <= 2.7 we ignore this flag; in torch >= 2.8 this is
369
- # enabled by default.
370
- "VLLM_USE_STANDALONE_COMPILE":
371
- lambda: os.environ.get("VLLM_USE_STANDALONE_COMPILE", "1") == "1",
372
-
373
- # local rank of the process in the distributed setting, used to determine
374
- # the GPU device id
375
- "LOCAL_RANK":
376
- lambda: int(os.environ.get("LOCAL_RANK", "0")),
377
-
378
- # used to control the visible devices in the distributed setting
379
- "CUDA_VISIBLE_DEVICES":
380
- lambda: os.environ.get("CUDA_VISIBLE_DEVICES", None),
381
-
382
- # timeout for each iteration in the engine
383
- "VLLM_ENGINE_ITERATION_TIMEOUT_S":
384
- lambda: int(os.environ.get("VLLM_ENGINE_ITERATION_TIMEOUT_S", "60")),
385
-
386
- # API key for vLLM API server
387
- "VLLM_API_KEY":
388
- lambda: os.environ.get("VLLM_API_KEY", None),
389
-
390
- # Whether to log responses from API Server for debugging
391
- "VLLM_DEBUG_LOG_API_SERVER_RESPONSE":
392
- lambda: os.environ.get("VLLM_DEBUG_LOG_API_SERVER_RESPONSE", "False"
393
- ).lower() == "true",
394
-
395
- # S3 access information, used for tensorizer to load model from S3
396
- "S3_ACCESS_KEY_ID":
397
- lambda: os.environ.get("S3_ACCESS_KEY_ID", None),
398
- "S3_SECRET_ACCESS_KEY":
399
- lambda: os.environ.get("S3_SECRET_ACCESS_KEY", None),
400
- "S3_ENDPOINT_URL":
401
- lambda: os.environ.get("S3_ENDPOINT_URL", None),
402
-
403
- # Usage stats collection
404
- "VLLM_USAGE_STATS_SERVER":
405
- lambda: os.environ.get("VLLM_USAGE_STATS_SERVER", "https://stats.vllm.ai"),
406
- "VLLM_NO_USAGE_STATS":
407
- lambda: os.environ.get("VLLM_NO_USAGE_STATS", "0") == "1",
408
- "VLLM_DO_NOT_TRACK":
409
- lambda: (os.environ.get("VLLM_DO_NOT_TRACK", None) or os.environ.get(
410
- "DO_NOT_TRACK", None) or "0") == "1",
411
- "VLLM_USAGE_SOURCE":
412
- lambda: os.environ.get("VLLM_USAGE_SOURCE", "production"),
413
-
414
- # Logging configuration
415
- # If set to 0, vllm will not configure logging
416
- # If set to 1, vllm will configure logging using the default configuration
417
- # or the configuration file specified by VLLM_LOGGING_CONFIG_PATH
418
- "VLLM_CONFIGURE_LOGGING":
419
- lambda: int(os.getenv("VLLM_CONFIGURE_LOGGING", "1")),
420
- "VLLM_LOGGING_CONFIG_PATH":
421
- lambda: os.getenv("VLLM_LOGGING_CONFIG_PATH"),
422
-
423
- # this is used for configuring the default logging level
424
- "VLLM_LOGGING_LEVEL":
425
- lambda: os.getenv("VLLM_LOGGING_LEVEL", "INFO").upper(),
426
-
427
- # if set, VLLM_LOGGING_PREFIX will be prepended to all log messages
428
- "VLLM_LOGGING_PREFIX":
429
- lambda: os.getenv("VLLM_LOGGING_PREFIX", ""),
430
-
431
- # if set, vllm will call logits processors in a thread pool with this many
432
- # threads. This is useful when using custom logits processors that either
433
- # (a) launch additional CUDA kernels or (b) do significant CPU-bound work
434
- # while not holding the python GIL, or both.
435
- "VLLM_LOGITS_PROCESSOR_THREADS":
436
- lambda: int(os.getenv("VLLM_LOGITS_PROCESSOR_THREADS", "0"))
437
- if "VLLM_LOGITS_PROCESSOR_THREADS" in os.environ else None,
438
-
439
- # If set, vllm will log stats at this interval in seconds
440
- # If not set, vllm will log stats every 10 seconds.
441
- "VLLM_LOG_STATS_INTERVAL":
442
- lambda: val if (val := float(os.getenv("VLLM_LOG_STATS_INTERVAL", "10.")))
443
- > 0. else 10.,
444
-
445
- # Trace function calls
446
- # If set to 1, vllm will trace function calls
447
- # Useful for debugging
448
- "VLLM_TRACE_FUNCTION":
449
- lambda: int(os.getenv("VLLM_TRACE_FUNCTION", "0")),
450
-
451
- # Backend for attention computation
452
- # Available options:
453
- # - "TORCH_SDPA": use torch.nn.MultiheadAttention
454
- # - "FLASH_ATTN": use FlashAttention
455
- # - "XFORMERS": use XFormers
456
- # - "ROCM_FLASH": use ROCmFlashAttention
457
- # - "FLASHINFER": use flashinfer
458
- # - "FLASHMLA": use FlashMLA
459
- "VLLM_ATTENTION_BACKEND":
460
- lambda: os.getenv("VLLM_ATTENTION_BACKEND", None),
461
-
462
- # If set, vllm will use flashinfer sampler
463
- "VLLM_USE_FLASHINFER_SAMPLER":
464
- lambda: bool(int(os.environ["VLLM_USE_FLASHINFER_SAMPLER"]))
465
- if "VLLM_USE_FLASHINFER_SAMPLER" in os.environ else None,
466
-
467
- # If set, vllm will force flashinfer to use tensor cores;
468
- # otherwise will use heuristic based on model architecture.
469
- "VLLM_FLASHINFER_FORCE_TENSOR_CORES":
470
- lambda: bool(int(os.getenv("VLLM_FLASHINFER_FORCE_TENSOR_CORES", "0"))),
471
-
472
- # Pipeline stage partition strategy
473
- "VLLM_PP_LAYER_PARTITION":
474
- lambda: os.getenv("VLLM_PP_LAYER_PARTITION", None),
475
-
476
- # (CPU backend only) CPU key-value cache space.
477
- # default is None and will be set as 4 GB
478
- "VLLM_CPU_KVCACHE_SPACE":
479
- lambda: int(os.getenv("VLLM_CPU_KVCACHE_SPACE", "0"))
480
- if "VLLM_CPU_KVCACHE_SPACE" in os.environ else None,
481
-
482
- # (CPU backend only) CPU core ids bound by OpenMP threads, e.g., "0-31",
483
- # "0,1,2", "0-31,33". CPU cores of different ranks are separated by '|'.
484
- "VLLM_CPU_OMP_THREADS_BIND":
485
- lambda: os.getenv("VLLM_CPU_OMP_THREADS_BIND", "auto"),
486
-
487
- # (CPU backend only) CPU cores not used by OMP threads .
488
- # Those CPU cores will not be used by OMP threads of a rank.
489
- "VLLM_CPU_NUM_OF_RESERVED_CPU":
490
- lambda: int(os.getenv("VLLM_CPU_NUM_OF_RESERVED_CPU", "0"))
491
- if "VLLM_CPU_NUM_OF_RESERVED_CPU" in os.environ else None,
492
-
493
- # (CPU backend only) whether to use prepack for MoE layer. This will be
494
- # passed to ipex.llm.modules.GatedMLPMOE. On unsupported CPUs, you might
495
- # need to set this to "0" (False).
496
- "VLLM_CPU_MOE_PREPACK":
497
- lambda: bool(int(os.getenv("VLLM_CPU_MOE_PREPACK", "1"))),
498
-
499
- # (CPU backend only) whether to use SGL kernels, optimized for small batch.
500
- "VLLM_CPU_SGL_KERNEL":
501
- lambda: bool(int(os.getenv("VLLM_CPU_SGL_KERNEL", "0"))),
502
-
503
- # If the env var is set, then all workers will execute as separate
504
- # processes from the engine, and we use the same mechanism to trigger
505
- # execution on all workers.
506
- # Run vLLM with VLLM_USE_RAY_SPMD_WORKER=1 to enable it.
507
- "VLLM_USE_RAY_SPMD_WORKER":
508
- lambda: bool(int(os.getenv("VLLM_USE_RAY_SPMD_WORKER", "0"))),
509
-
510
- # If the env var is set, it uses the Ray's Compiled Graph
511
- # (previously known as ADAG) API which optimizes the
512
- # control plane overhead.
513
- # Run vLLM with VLLM_USE_RAY_COMPILED_DAG=1 to enable it.
514
- # Note that this variable is set to 1 in V1 by default
515
- # when ray distributed executor is used.
516
- "VLLM_USE_RAY_COMPILED_DAG":
517
- lambda: bool(int(os.getenv("VLLM_USE_RAY_COMPILED_DAG", "0"))),
518
-
519
- # If the env var is set, Ray Compiled Graph uses the specified
520
- # channel type to communicate between workers belonging to
521
- # different pipeline-parallel stages.
522
- # Available options:
523
- # - "auto": use the default channel type
524
- # - "nccl": use NCCL for communication
525
- # - "shm": use shared memory and gRPC for communication
526
- # This flag is ignored if VLLM_USE_RAY_COMPILED_DAG is not set.
527
- "VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE":
528
- lambda: os.getenv("VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE", "auto"),
529
-
530
- # If the env var is set, it enables GPU communication overlap
531
- # (experimental feature) in Ray's Compiled Graph. This flag is ignored if
532
- # VLLM_USE_RAY_COMPILED_DAG is not set.
533
- "VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM":
534
- lambda: bool(int(os.getenv("VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM", "0"))
535
- ),
536
-
537
- # If the env var is set, it uses a Ray Communicator wrapping
538
- # vLLM's pipeline parallelism communicator to interact with Ray's
539
- # Compiled Graph. Otherwise, it uses Ray's NCCL communicator.
540
- # This flag is ignored if VLLM_USE_RAY_COMPILED_DAG is not set.
541
- "VLLM_USE_RAY_WRAPPED_PP_COMM":
542
- lambda: bool(int(os.getenv("VLLM_USE_RAY_WRAPPED_PP_COMM", "1"))),
543
-
544
- # Use dedicated multiprocess context for workers.
545
- # Both spawn and fork work
546
- "VLLM_WORKER_MULTIPROC_METHOD":
547
- lambda: os.getenv("VLLM_WORKER_MULTIPROC_METHOD", "fork"),
548
-
549
- # Path to the cache for storing downloaded assets
550
- "VLLM_ASSETS_CACHE":
551
- lambda: os.path.expanduser(
552
- os.getenv(
553
- "VLLM_ASSETS_CACHE",
554
- os.path.join(get_default_cache_root(), "vllm", "assets"),
555
- )),
556
-
557
- # Timeout for fetching images when serving multimodal models
558
- # Default is 5 seconds
559
- "VLLM_IMAGE_FETCH_TIMEOUT":
560
- lambda: int(os.getenv("VLLM_IMAGE_FETCH_TIMEOUT", "5")),
561
-
562
- # Timeout for fetching videos when serving multimodal models
563
- # Default is 30 seconds
564
- "VLLM_VIDEO_FETCH_TIMEOUT":
565
- lambda: int(os.getenv("VLLM_VIDEO_FETCH_TIMEOUT", "30")),
566
-
567
- # Timeout for fetching audio when serving multimodal models
568
- # Default is 10 seconds
569
- "VLLM_AUDIO_FETCH_TIMEOUT":
570
- lambda: int(os.getenv("VLLM_AUDIO_FETCH_TIMEOUT", "10")),
571
-
572
- # Max number of workers for the thread pool handling
573
- # media bytes loading. Set to 1 to disable parallel processing.
574
- # Default is 8
575
- "VLLM_MEDIA_LOADING_THREAD_COUNT":
576
- lambda: int(os.getenv("VLLM_MEDIA_LOADING_THREAD_COUNT", "8")),
577
-
578
- # Maximum filesize in MB for a single audio file when processing
579
- # speech-to-text requests. Files larger than this will be rejected.
580
- # Default is 25 MB
581
- "VLLM_MAX_AUDIO_CLIP_FILESIZE_MB":
582
- lambda: int(os.getenv("VLLM_MAX_AUDIO_CLIP_FILESIZE_MB", "25")),
583
-
584
- # Backend for Video IO
585
- # - "opencv": Default backend that uses OpenCV stream buffered backend.
586
- #
587
- # Custom backend implementations can be registered
588
- # via `@VIDEO_LOADER_REGISTRY.register("my_custom_video_loader")` and
589
- # imported at runtime.
590
- # If a non-existing backend is used, an AssertionError will be thrown.
591
- "VLLM_VIDEO_LOADER_BACKEND":
592
- lambda: os.getenv("VLLM_VIDEO_LOADER_BACKEND", "opencv"),
593
-
594
- # [DEPRECATED] Cache size (in GiB per process) for multimodal input cache
595
- # Default is 4 GiB per API process + 4 GiB per engine core process
596
- "VLLM_MM_INPUT_CACHE_GIB":
597
- lambda: int(os.getenv("VLLM_MM_INPUT_CACHE_GIB", "4")),
598
-
599
- # Path to the XLA persistent cache directory.
600
- # Only used for XLA devices such as TPUs.
601
- "VLLM_XLA_CACHE_PATH":
602
- lambda: os.path.expanduser(
603
- os.getenv(
604
- "VLLM_XLA_CACHE_PATH",
605
- os.path.join(get_default_cache_root(), "vllm", "xla_cache"),
606
- )),
607
-
608
- # If set, assert on XLA recompilation after each execution step.
609
- "VLLM_XLA_CHECK_RECOMPILATION":
610
- lambda: bool(int(os.getenv("VLLM_XLA_CHECK_RECOMPILATION", "0"))),
611
-
612
- # Enable SPMD mode for TPU backend.
613
- "VLLM_XLA_USE_SPMD":
614
- lambda: bool(int(os.getenv("VLLM_XLA_USE_SPMD", "0"))),
615
- "VLLM_FUSED_MOE_CHUNK_SIZE":
616
- lambda: int(os.getenv("VLLM_FUSED_MOE_CHUNK_SIZE", "32768")),
617
- # Control whether to use fused MoE activation chunking. Current chunking
618
- # logic is incompatible with torch.compile and causes IMA. See issue
619
- # https://github.com/vllm-project/vllm/issues/19631.
620
- "VLLM_ENABLE_FUSED_MOE_ACTIVATION_CHUNKING":
621
- lambda: bool(
622
- int(os.getenv("VLLM_ENABLE_FUSED_MOE_ACTIVATION_CHUNKING", "1"))),
623
-
624
- # If set, the OpenAI API server will stay alive even after the underlying
625
- # AsyncLLMEngine errors and stops serving requests
626
- "VLLM_KEEP_ALIVE_ON_ENGINE_DEATH":
627
- lambda: bool(os.getenv("VLLM_KEEP_ALIVE_ON_ENGINE_DEATH", 0)),
628
-
629
- # If the env var VLLM_ALLOW_LONG_MAX_MODEL_LEN is set, it allows
630
- # the user to specify a max sequence length greater than
631
- # the max length derived from the model's config.json.
632
- # To enable this, set VLLM_ALLOW_LONG_MAX_MODEL_LEN=1.
633
- "VLLM_ALLOW_LONG_MAX_MODEL_LEN":
634
- lambda:
635
- (os.environ.get("VLLM_ALLOW_LONG_MAX_MODEL_LEN", "0").strip().lower() in
636
- ("1", "true")),
637
-
638
- # If set, forces FP8 Marlin to be used for FP8 quantization regardless
639
- # of the hardware support for FP8 compute.
640
- "VLLM_TEST_FORCE_FP8_MARLIN":
641
- lambda:
642
- (os.environ.get("VLLM_TEST_FORCE_FP8_MARLIN", "0").strip().lower() in
643
- ("1", "true")),
644
- "VLLM_TEST_FORCE_LOAD_FORMAT":
645
- lambda: os.getenv("VLLM_TEST_FORCE_LOAD_FORMAT", "dummy"),
646
-
647
- # Time in ms for the zmq client to wait for a response from the backend
648
- # server for simple data operations
649
- "VLLM_RPC_TIMEOUT":
650
- lambda: int(os.getenv("VLLM_RPC_TIMEOUT", "10000")),
651
-
652
- # Timeout in seconds for keeping HTTP connections alive in API server
653
- "VLLM_HTTP_TIMEOUT_KEEP_ALIVE":
654
- lambda: int(os.environ.get("VLLM_HTTP_TIMEOUT_KEEP_ALIVE", "5")),
655
-
656
- # a list of plugin names to load, separated by commas.
657
- # if this is not set, it means all plugins will be loaded
658
- # if this is set to an empty string, no plugins will be loaded
659
- "VLLM_PLUGINS":
660
- lambda: None if "VLLM_PLUGINS" not in os.environ else os.environ[
661
- "VLLM_PLUGINS"].split(","),
662
-
663
- # a local directory to look in for unrecognized LoRA adapters.
664
- # only works if plugins are enabled and
665
- # VLLM_ALLOW_RUNTIME_LORA_UPDATING is enabled.
666
- "VLLM_LORA_RESOLVER_CACHE_DIR":
667
- lambda: os.getenv("VLLM_LORA_RESOLVER_CACHE_DIR", None),
668
-
669
- # Enables torch profiler if set. Path to the directory where torch profiler
670
- # traces are saved. Note that it must be an absolute path.
671
- "VLLM_TORCH_PROFILER_DIR":
672
- lambda: (None if os.getenv("VLLM_TORCH_PROFILER_DIR", None) is None else os
673
- .path.expanduser(os.getenv("VLLM_TORCH_PROFILER_DIR", "."))),
674
-
675
- # Enable torch profiler to record shapes if set
676
- # VLLM_TORCH_PROFILER_RECORD_SHAPES=1. If not set, torch profiler will
677
- # not record shapes.
678
- "VLLM_TORCH_PROFILER_RECORD_SHAPES":
679
- lambda: bool(os.getenv("VLLM_TORCH_PROFILER_RECORD_SHAPES", "0") != "0"),
680
-
681
- # Enable torch profiler to profile memory if set
682
- # VLLM_TORCH_PROFILER_WITH_PROFILE_MEMORY=1. If not set, torch profiler
683
- # will not profile memory.
684
- "VLLM_TORCH_PROFILER_WITH_PROFILE_MEMORY":
685
- lambda: bool(
686
- os.getenv("VLLM_TORCH_PROFILER_WITH_PROFILE_MEMORY", "0") != "0"),
687
-
688
- # Enable torch profiler to profile stack if set
689
- # VLLM_TORCH_PROFILER_WITH_STACK=1. If not set, torch profiler WILL
690
- # profile stack by default.
691
- "VLLM_TORCH_PROFILER_WITH_STACK":
692
- lambda: bool(os.getenv("VLLM_TORCH_PROFILER_WITH_STACK", "1") != "0"),
693
-
694
- # Enable torch profiler to profile flops if set
695
- # VLLM_TORCH_PROFILER_WITH_FLOPS=1. If not set, torch profiler will
696
- # not profile flops.
697
- "VLLM_TORCH_PROFILER_WITH_FLOPS":
698
- lambda: bool(os.getenv("VLLM_TORCH_PROFILER_WITH_FLOPS", "0") != "0"),
699
-
700
- # If set, vLLM will use Triton implementations of AWQ.
701
- "VLLM_USE_TRITON_AWQ":
702
- lambda: bool(int(os.getenv("VLLM_USE_TRITON_AWQ", "0"))),
703
-
704
- # If set, allow loading or unloading lora adapters in runtime,
705
- "VLLM_ALLOW_RUNTIME_LORA_UPDATING":
706
- lambda:
707
- (os.environ.get("VLLM_ALLOW_RUNTIME_LORA_UPDATING", "0").strip().lower() in
708
- ("1", "true")),
709
-
710
- # We assume drivers can report p2p status correctly.
711
- # If the program hangs when using custom allreduce,
712
- # potantially caused by a bug in the driver (535 series),
713
- # if might be helpful to set VLLM_SKIP_P2P_CHECK=0
714
- # so that vLLM can verify if p2p is actually working.
715
- # See https://github.com/vllm-project/vllm/blob/a9b15c606fea67a072416ea0ea115261a2756058/vllm/distributed/device_communicators/custom_all_reduce_utils.py#L101-L108 for details. # noqa
716
- "VLLM_SKIP_P2P_CHECK":
717
- lambda: os.getenv("VLLM_SKIP_P2P_CHECK", "1") == "1",
718
-
719
- # List of quantization kernels that should be disabled, used for testing
720
- # and performance comparisons. Currently only affects MPLinearKernel
721
- # selection
722
- # (kernels: MacheteLinearKernel, MarlinLinearKernel, ExllamaLinearKernel)
723
- "VLLM_DISABLED_KERNELS":
724
- lambda: [] if "VLLM_DISABLED_KERNELS" not in os.environ else os.environ[
725
- "VLLM_DISABLED_KERNELS"].split(","),
726
-
727
- # Disable aiter ops unless specifically enabled.
728
- # Acts as a parent switch to enable the rest of the other operations.
729
- "VLLM_ROCM_USE_AITER":
730
- lambda: (os.getenv("VLLM_ROCM_USE_AITER", "False").lower() in
731
- ("true", "1")),
732
-
733
- # Whether to use aiter paged attention.
734
- # By default is disabled.
735
- "VLLM_ROCM_USE_AITER_PAGED_ATTN":
736
- lambda: (os.getenv("VLLM_ROCM_USE_AITER_PAGED_ATTN", "False").lower() in
737
- ("true", "1")),
738
-
739
- # use aiter linear op if aiter ops are enabled
740
- # The following list of related ops
741
- # - scaled_mm (per-tensor / rowwise)
742
- "VLLM_ROCM_USE_AITER_LINEAR":
743
- lambda: (os.getenv("VLLM_ROCM_USE_AITER_LINEAR", "True").lower() in
744
- ("true", "1")),
745
-
746
- # Whether to use aiter moe ops.
747
- # By default is enabled.
748
- "VLLM_ROCM_USE_AITER_MOE":
749
- lambda: (os.getenv("VLLM_ROCM_USE_AITER_MOE", "True").lower() in
750
- ("true", "1")),
751
-
752
- # use aiter rms norm op if aiter ops are enabled.
753
- "VLLM_ROCM_USE_AITER_RMSNORM":
754
- lambda: (os.getenv("VLLM_ROCM_USE_AITER_RMSNORM", "True").lower() in
755
- ("true", "1")),
756
-
757
- # Whether to use aiter mla ops.
758
- # By default is enabled.
759
- "VLLM_ROCM_USE_AITER_MLA":
760
- lambda: (os.getenv("VLLM_ROCM_USE_AITER_MLA", "True").lower() in
761
- ("true", "1")),
762
-
763
- # Whether to use aiter mha ops.
764
- # By default is enabled.
765
- "VLLM_ROCM_USE_AITER_MHA":
766
- lambda: (os.getenv("VLLM_ROCM_USE_AITER_MHA", "True").lower() in
767
- ("true", "1")),
768
-
769
- # use rocm skinny gemms
770
- "VLLM_ROCM_USE_SKINNY_GEMM":
771
- lambda: (os.getenv("VLLM_ROCM_USE_SKINNY_GEMM", "True").lower() in
772
- ("true", "1")),
773
-
774
- # Pad the fp8 weights to 256 bytes for ROCm
775
- "VLLM_ROCM_FP8_PADDING":
776
- lambda: bool(int(os.getenv("VLLM_ROCM_FP8_PADDING", "1"))),
777
-
778
- # Pad the weights for the moe kernel
779
- "VLLM_ROCM_MOE_PADDING":
780
- lambda: bool(int(os.getenv("VLLM_ROCM_MOE_PADDING", "1"))),
781
-
782
- # custom paged attention kernel for MI3* cards
783
- "VLLM_ROCM_CUSTOM_PAGED_ATTN":
784
- lambda: (os.getenv("VLLM_ROCM_CUSTOM_PAGED_ATTN", "True").lower() in
785
- ("true", "1")),
786
-
787
- # Custom quick allreduce kernel for MI3* cards
788
- # Choice of quantization level: FP, INT8, INT6, INT4 or NONE
789
- # Recommended for large models to get allreduce
790
- "VLLM_ROCM_QUICK_REDUCE_QUANTIZATION":
791
- lambda: os.getenv("VLLM_ROCM_QUICK_REDUCE_QUANTIZATION", "NONE").upper(),
792
-
793
- # Custom quick allreduce kernel for MI3* cards
794
- # Due to the lack of the bfloat16 asm instruction, bfloat16
795
- # kernels are slower than fp16,
796
- # If environment variable is set to 1, the input is converted to fp16
797
- "VLLM_ROCM_QUICK_REDUCE_CAST_BF16_TO_FP16":
798
- lambda:
799
- (os.getenv("VLLM_ROCM_QUICK_REDUCE_CAST_BF16_TO_FP16", "True").lower() in
800
- ("true", "1")),
801
-
802
- # Custom quick allreduce kernel for MI3* cards.
803
- # Controls the maximum allowed number of data bytes(MB) for custom quick
804
- # allreduce communication.
805
- # Default: 2048 MB.
806
- # Data exceeding this size will use either custom allreduce or RCCL
807
- # communication.
808
- "VLLM_ROCM_QUICK_REDUCE_MAX_SIZE_BYTES_MB":
809
- lambda: maybe_convert_int(
810
- os.environ.get("VLLM_ROCM_QUICK_REDUCE_MAX_SIZE_BYTES_MB", None)),
811
-
812
- # Divisor for dynamic query scale factor calculation for FP8 KV Cache
813
- "Q_SCALE_CONSTANT":
814
- lambda: int(os.getenv("Q_SCALE_CONSTANT", "200")),
815
- # Divisor for dynamic key scale factor calculation for FP8 KV Cache
816
- "K_SCALE_CONSTANT":
817
- lambda: int(os.getenv("K_SCALE_CONSTANT", "200")),
818
- # Divisor for dynamic value scale factor calculation for FP8 KV Cache
819
- "V_SCALE_CONSTANT":
820
- lambda: int(os.getenv("V_SCALE_CONSTANT", "100")),
821
-
822
- # If set, enable multiprocessing in LLM for the V1 code path.
823
- "VLLM_ENABLE_V1_MULTIPROCESSING":
824
- lambda: bool(int(os.getenv("VLLM_ENABLE_V1_MULTIPROCESSING", "1"))),
825
- "VLLM_LOG_BATCHSIZE_INTERVAL":
826
- lambda: float(os.getenv("VLLM_LOG_BATCHSIZE_INTERVAL", "-1")),
827
- "VLLM_DISABLE_COMPILE_CACHE":
828
- lambda: bool(int(os.getenv("VLLM_DISABLE_COMPILE_CACHE", "0"))),
829
-
830
- # If set, vllm will run in development mode, which will enable
831
- # some additional endpoints for developing and debugging,
832
- # e.g. `/reset_prefix_cache`
833
- "VLLM_SERVER_DEV_MODE":
834
- lambda: bool(int(os.getenv("VLLM_SERVER_DEV_MODE", "0"))),
835
-
836
- # Controls the maximum number of requests to handle in a
837
- # single asyncio task when processing per-token outputs in the
838
- # V1 AsyncLLM interface. It is applicable when handling a high
839
- # concurrency of streaming requests.
840
- # Setting this too high can result in a higher variance of
841
- # inter-message latencies. Setting it too low can negatively impact
842
- # TTFT and overall throughput.
843
- "VLLM_V1_OUTPUT_PROC_CHUNK_SIZE":
844
- lambda: int(os.getenv("VLLM_V1_OUTPUT_PROC_CHUNK_SIZE", "128")),
845
-
846
- # If set, vLLM will disable the MLA attention optimizations.
847
- "VLLM_MLA_DISABLE":
848
- lambda: bool(int(os.getenv("VLLM_MLA_DISABLE", "0"))),
849
-
850
- # Number of GPUs per worker in Ray, if it is set to be a fraction,
851
- # it allows ray to schedule multiple actors on a single GPU,
852
- # so that users can colocate other actors on the same GPUs as vLLM.
853
- "VLLM_RAY_PER_WORKER_GPUS":
854
- lambda: float(os.getenv("VLLM_RAY_PER_WORKER_GPUS", "1.0")),
855
-
856
- # Bundle indices for Ray, if it is set, it can control precisely
857
- # which indices are used for the Ray bundle, for every worker.
858
- # Format: comma-separated list of integers, e.g. "0,1,2,3"
859
- "VLLM_RAY_BUNDLE_INDICES":
860
- lambda: os.getenv("VLLM_RAY_BUNDLE_INDICES", ""),
861
-
862
- # In some system, find_loaded_library() may not work. So we allow users to
863
- # specify the path through environment variable VLLM_CUDART_SO_PATH.
864
- "VLLM_CUDART_SO_PATH":
865
- lambda: os.getenv("VLLM_CUDART_SO_PATH", None),
866
-
867
- # Rank of the process in the data parallel setting
868
- "VLLM_DP_RANK":
869
- lambda: int(os.getenv("VLLM_DP_RANK", "0")),
870
-
871
- # Rank of the process in the data parallel setting.
872
- # Defaults to VLLM_DP_RANK when not set.
873
- "VLLM_DP_RANK_LOCAL":
874
- lambda: int(
875
- os.getenv("VLLM_DP_RANK_LOCAL", sys.modules[__name__].VLLM_DP_RANK)),
876
-
877
- # World size of the data parallel setting
878
- "VLLM_DP_SIZE":
879
- lambda: int(os.getenv("VLLM_DP_SIZE", "1")),
880
-
881
- # IP address of the master node in the data parallel setting
882
- "VLLM_DP_MASTER_IP":
883
- lambda: os.getenv("VLLM_DP_MASTER_IP", "127.0.0.1"),
884
-
885
- # Port of the master node in the data parallel setting
886
- "VLLM_DP_MASTER_PORT":
887
- lambda: int(os.getenv("VLLM_DP_MASTER_PORT", "0")),
888
-
889
- # In the context of executing MoE models with Data-Parallel, Expert-Parallel
890
- # and Batched All-to-All dispatch/combine kernels, VLLM_MOE_DP_CHUNK_SIZE
891
- # dictates the quantum of tokens that can be dispatched from a DP
892
- # rank. All DP ranks process the activations in VLLM_MOE_DP_CHUNK_SIZE
893
- # units.
894
- "VLLM_MOE_DP_CHUNK_SIZE":
895
- lambda: int(os.getenv("VLLM_MOE_DP_CHUNK_SIZE", "256")),
896
-
897
- # Randomize inputs during dummy runs when using Data Parallel
898
- "VLLM_RANDOMIZE_DP_DUMMY_INPUTS":
899
- lambda: os.environ.get("VLLM_RANDOMIZE_DP_DUMMY_INPUTS", "0") == "1",
900
-
901
- # Whether to use S3 path for model loading in CI via RunAI Streamer
902
- "VLLM_CI_USE_S3":
903
- lambda: os.environ.get("VLLM_CI_USE_S3", "0") == "1",
904
-
905
- # Use model_redirect to redirect the model name to a local folder.
906
- # `model_redirect` can be a json file mapping the model between
907
- # repo_id and local folder:
908
- # {"meta-llama/Llama-3.2-1B": "/tmp/Llama-3.2-1B"}
909
- # or a space separated values table file:
910
- # meta-llama/Llama-3.2-1B /tmp/Llama-3.2-1B
911
- "VLLM_MODEL_REDIRECT_PATH":
912
- lambda: os.environ.get("VLLM_MODEL_REDIRECT_PATH", None),
913
-
914
- # Whether to use atomicAdd reduce in gptq/awq marlin kernel.
915
- "VLLM_MARLIN_USE_ATOMIC_ADD":
916
- lambda: os.environ.get("VLLM_MARLIN_USE_ATOMIC_ADD", "0") == "1",
917
-
918
- # Whether to use marlin kernel in mxfp4 quantization method
919
- "VLLM_MXFP4_USE_MARLIN":
920
- lambda: maybe_convert_bool(os.environ.get("VLLM_MXFP4_USE_MARLIN", None)),
921
-
922
- # Whether to turn on the outlines cache for V0
923
- # This cache is unbounded and on disk, so it's not safe to use in
924
- # an environment with potentially malicious users.
925
- "VLLM_V0_USE_OUTLINES_CACHE":
926
- lambda: os.environ.get("VLLM_V0_USE_OUTLINES_CACHE", "0") == "1",
927
-
928
- # Whether to turn on the outlines cache for V1
929
- # This cache is unbounded and on disk, so it's not safe to use in
930
- # an environment with potentially malicious users.
931
- "VLLM_V1_USE_OUTLINES_CACHE":
932
- lambda: os.environ.get("VLLM_V1_USE_OUTLINES_CACHE", "0") == "1",
933
-
934
- # Gap between padding buckets for the forward pass. So we have
935
- # 8, we will run forward pass with [16, 24, 32, ...].
936
- "VLLM_TPU_BUCKET_PADDING_GAP":
937
- lambda: int(os.environ["VLLM_TPU_BUCKET_PADDING_GAP"])
938
- if "VLLM_TPU_BUCKET_PADDING_GAP" in os.environ else 0,
939
- "VLLM_TPU_MOST_MODEL_LEN":
940
- lambda: maybe_convert_int(os.environ.get("VLLM_TPU_MOST_MODEL_LEN", None)),
941
-
942
- # Whether using Pathways
943
- "VLLM_TPU_USING_PATHWAYS":
944
- lambda: bool("proxy" in os.getenv("JAX_PLATFORMS", "").lower()),
945
-
946
- # Allow use of DeepGemm kernels for fused moe ops.
947
- "VLLM_USE_DEEP_GEMM":
948
- lambda: bool(int(os.getenv("VLLM_USE_DEEP_GEMM", "0"))),
949
-
950
- # Whether to use E8M0 scaling when DeepGEMM is used on Blackwell GPUs.
951
- # E8M0 is faster on B200 but may reduce accuracy.
952
- "VLLM_USE_DEEP_GEMM_E8M0":
953
- lambda: bool(int(os.getenv("VLLM_USE_DEEP_GEMM_E8M0", "1"))),
954
- # DeepGemm JITs the kernels on-demand. The warmup attempts to make DeepGemm
955
- # JIT all the required kernels before model execution so there is no
956
- # JIT'ing in the hot-path. However, this warmup increases the engine
957
- # startup time by a couple of minutes.
958
- # Set `VLLM_SKIP_DEEP_GEMM_WARMUP` to disable the warmup.
959
- "VLLM_SKIP_DEEP_GEMM_WARMUP":
960
- lambda: bool(int(os.getenv("VLLM_SKIP_DEEP_GEMM_WARMUP", "0"))),
961
-
962
- # Allow use of FlashInfer MoE kernels for fused moe ops.
963
- "VLLM_USE_FLASHINFER_MOE_FP8":
964
- lambda: bool(int(os.getenv("VLLM_USE_FLASHINFER_MOE_FP8", "0"))),
965
-
966
- # Allow use of FlashInfer CUTLASS kernels for fused moe ops.
967
- "VLLM_USE_FLASHINFER_MOE_FP4":
968
- lambda: bool(int(os.getenv("VLLM_USE_FLASHINFER_MOE_FP4", "0"))),
969
-
970
- # If set to 1, use the FlashInfer
971
- # MXFP8 (activation) x MXFP4 (weight) MoE backend.
972
- "VLLM_USE_FLASHINFER_MOE_MXFP4_MXFP8":
973
- lambda: bool(int(os.getenv("VLLM_USE_FLASHINFER_MOE_MXFP4_MXFP8", "0"))),
974
-
975
- # If set to 1, use the FlashInfer
976
- # BF16 (activation) x MXFP4 (weight) MoE backend.
977
- "VLLM_USE_FLASHINFER_MOE_MXFP4_BF16":
978
- lambda: bool(int(os.getenv("VLLM_USE_FLASHINFER_MOE_MXFP4_BF16", "0"))),
979
-
980
- # Control the cache sized used by the xgrammar compiler. The default
981
- # of 512 MB should be enough for roughly 1000 JSON schemas.
982
- # It can be changed with this variable if needed for some reason.
983
- "VLLM_XGRAMMAR_CACHE_MB":
984
- lambda: int(os.getenv("VLLM_XGRAMMAR_CACHE_MB", "512")),
985
-
986
- # Control the threshold for msgspec to use 'zero copy' for
987
- # serialization/deserialization of tensors. Tensors below
988
- # this limit will be encoded into the msgpack buffer, and
989
- # tensors above will instead be sent via a separate message.
990
- # While the sending side still actually copies the tensor
991
- # in all cases, on the receiving side, tensors above this
992
- # limit will actually be zero-copy decoded.
993
- "VLLM_MSGPACK_ZERO_COPY_THRESHOLD":
994
- lambda: int(os.getenv("VLLM_MSGPACK_ZERO_COPY_THRESHOLD", "256")),
995
-
996
- # If set, allow insecure serialization using pickle.
997
- # This is useful for environments where it is deemed safe to use the
998
- # insecure method and it is needed for some reason.
999
- "VLLM_ALLOW_INSECURE_SERIALIZATION":
1000
- lambda: bool(int(os.getenv("VLLM_ALLOW_INSECURE_SERIALIZATION", "0"))),
1001
-
1002
- # IP address used for NIXL handshake between remote agents.
1003
- "VLLM_NIXL_SIDE_CHANNEL_HOST":
1004
- lambda: os.getenv("VLLM_NIXL_SIDE_CHANNEL_HOST", "localhost"),
1005
-
1006
- # Port used for NIXL handshake between remote agents.
1007
- "VLLM_NIXL_SIDE_CHANNEL_PORT":
1008
- lambda: int(os.getenv("VLLM_NIXL_SIDE_CHANNEL_PORT", "5557")),
1009
-
1010
- # all2all backend for vllm's expert parallel communication
1011
- # Available options:
1012
- # - "naive": naive all2all implementation using all-reduce
1013
- # - "pplx": use pplx kernels
1014
- # - "deepep_high_throughput", use deepep high-throughput kernels
1015
- # - "deepep_low_latency", use deepep low-latency kernels
1016
- "VLLM_ALL2ALL_BACKEND":
1017
- lambda: os.getenv("VLLM_ALL2ALL_BACKEND", "naive"),
1018
-
1019
- # Flashinfer MoE backend for vLLM's fused Mixture-of-Experts support. Both
1020
- # require compute capability 10.0 or above.
1021
- # Available options:
1022
- # - "throughput": [default]
1023
- # Uses CUTLASS kernels optimized for high-throughput batch inference.
1024
- # - "latency":
1025
- # Uses TensorRT-LLM kernels optimized for low-latency inference.
1026
- # To set this backend, define the environment variable:
1027
- # export VLLM_FLASHINFER_MOE_BACKEND=latency.
1028
- # If not set, defaults to "throughput".
1029
- "VLLM_FLASHINFER_MOE_BACKEND": lambda: os.getenv(
1030
- "VLLM_FLASHINFER_MOE_BACKEND", "throughput"
1031
- ),
1032
-
1033
- # Control the maximum number of tokens per expert supported by the
1034
- # NVFP4 MoE CUTLASS Kernel. This value is used to create a buffer for
1035
- # the blockscale tensor of activations NVFP4 Quantization.
1036
- # This is used to prevent the kernel from running out of memory.
1037
- "VLLM_MAX_TOKENS_PER_EXPERT_FP4_MOE":
1038
- lambda: int(os.getenv("VLLM_MAX_TOKENS_PER_EXPERT_FP4_MOE", "163840")),
1039
-
1040
- # MoE routing strategy selector.
1041
- # See `RoutingSimulator.get_available_strategies()` # for available
1042
- # strategies.
1043
- # Cutstom routing strategies can be registered by
1044
- # RoutingSimulator.register_strategy()
1045
- # Note: custom strategies may not produce correct model outputs
1046
- "VLLM_MOE_ROUTING_SIMULATION_STRATEGY":
1047
- lambda: os.environ.get("VLLM_MOE_ROUTING_SIMULATION_STRATEGY", "").lower(),
1048
-
1049
- # Regex timeout for use by the vLLM tool parsing plugins.
1050
- "VLLM_TOOL_PARSE_REGEX_TIMEOUT_SECONDS":
1051
- lambda: int(os.getenv("VLLM_TOOL_PARSE_REGEX_TIMEOUT_SECONDS", "1")),
1052
-
1053
- # Reduce CPU usage when vLLM is idle. Enabling this will incur small
1054
- # latency penalty when a request eventually comes.
1055
- "VLLM_SLEEP_WHEN_IDLE":
1056
- lambda: bool(int(os.getenv("VLLM_SLEEP_WHEN_IDLE", "0"))),
1057
-
1058
- # Control the max chunk bytes (in MB) for the rpc message queue.
1059
- # Object larger than this threshold will be broadcast to worker
1060
- # processes via zmq.
1061
- "VLLM_MQ_MAX_CHUNK_BYTES_MB":
1062
- lambda: int(os.getenv("VLLM_MQ_MAX_CHUNK_BYTES_MB", "16")),
1063
-
1064
- # Timeout in seconds for execute_model RPC calls in multiprocessing
1065
- # executor (only applies when TP > 1).
1066
- "VLLM_EXECUTE_MODEL_TIMEOUT_SECONDS":
1067
- lambda: int(os.getenv("VLLM_EXECUTE_MODEL_TIMEOUT_SECONDS", "300")),
1068
-
1069
- # KV Cache layout used throughout vllm.
1070
- # Some common values are:
1071
- # - NHD
1072
- # - HND
1073
- # Where N=num_blocks, H=num_heads and D=head_size. The default value will
1074
- # leave the layout choice to the backend. Mind that backends may only
1075
- # implement and support a subset of all possible layouts.
1076
- "VLLM_KV_CACHE_LAYOUT":
1077
- lambda: os.getenv("VLLM_KV_CACHE_LAYOUT", None),
1078
-
1079
- # Enable checking whether the generated logits contain NaNs,
1080
- # indicating corrupted output. Useful for debugging low level bugs
1081
- # or bad hardware but it may add compute overhead.
1082
- "VLLM_COMPUTE_NANS_IN_LOGITS":
1083
- lambda: bool(int(os.getenv("VLLM_COMPUTE_NANS_IN_LOGITS", "0"))),
1084
-
1085
- # Controls whether or not emulations are used for NVFP4
1086
- # generations on machines < 100 for compressed-tensors
1087
- # models
1088
- "VLLM_USE_NVFP4_CT_EMULATIONS":
1089
- lambda: bool(int(os.getenv("VLLM_USE_NVFP4_CT_EMULATIONS", "0"))),
1090
-
1091
- # Time (in seconds) after which the KV cache on the producer side is
1092
- # automatically cleared if no READ notification is received from the
1093
- # consumer. This is only applicable when using NixlConnector in a
1094
- # disaggregated decode-prefill setup.
1095
- "VLLM_NIXL_ABORT_REQUEST_TIMEOUT":
1096
- lambda: int(os.getenv("VLLM_NIXL_ABORT_REQUEST_TIMEOUT", "120")),
1097
-
1098
- # Controls whether or not to use cudnn prefill
1099
- "VLLM_USE_CUDNN_PREFILL":
1100
- lambda: bool(int(os.getenv("VLLM_USE_CUDNN_PREFILL", "0"))),
1101
-
1102
- # If set to 1, use the TRTLLM attention backend in flashinfer.
1103
- "VLLM_USE_TRTLLM_ATTENTION":
1104
- lambda: os.getenv("VLLM_USE_TRTLLM_ATTENTION", None),
1105
-
1106
- # If set to 1, force the use of TRTLLM FP4 GEMM backend in flashinfer.
1107
- # Otherwise, uses the first available of: flashinfer cutlass GEMM,
1108
- # vllm cutlass GEMM, marlin GEMM.
1109
- "VLLM_USE_TRTLLM_FP4_GEMM":
1110
- lambda: bool(int(os.getenv("VLLM_USE_TRTLLM_FP4_GEMM", "0"))),
1111
-
1112
- # Controls garbage collection during CUDA graph capture.
1113
- # If set to 0 (default), enables GC freezing to speed up capture time.
1114
- # If set to 1, allows GC to run during capture.
1115
- "VLLM_ENABLE_CUDAGRAPH_GC":
1116
- lambda: bool(int(os.getenv("VLLM_ENABLE_CUDAGRAPH_GC", "0"))),
1117
-
1118
- # Used to force set up loopback IP
1119
- "VLLM_LOOPBACK_IP":
1120
- lambda: os.getenv("VLLM_LOOPBACK_IP", ""),
1121
-
1122
- # Used to set the process name prefix for vLLM processes.
1123
- # This is useful for debugging and monitoring purposes.
1124
- # The default value is "VLLM".
1125
- "VLLM_PROCESS_NAME_PREFIX":
1126
- lambda: os.getenv("VLLM_PROCESS_NAME_PREFIX", "VLLM"),
1127
-
1128
- # Allow chunked local attention with hybrid kv cache manager.
1129
- # Currently using the Hybrid KV cache manager with chunked local attention
1130
- # in the Llama4 models (the only models currently using chunked local attn)
1131
- # causes a latency regression. For this reason, we disable it by default.
1132
- # This flag is used to allow users to enable it if they want to (to save on
1133
- # kv-cache memory usage and enable longer contexts)
1134
- # TODO(lucas): Remove this flag once latency regression is resolved.
1135
- "VLLM_ALLOW_CHUNKED_LOCAL_ATTN_WITH_HYBRID_KV_CACHE":
1136
- lambda: bool(int(os.getenv(\
1137
- "VLLM_ALLOW_CHUNKED_LOCAL_ATTN_WITH_HYBRID_KV_CACHE", "0"))),
1138
-
1139
- # Enables support for the "store" option in the OpenAI Responses API.
1140
- # When set to 1, vLLM's OpenAI server will retain the input and output
1141
- # messages for those requests in memory. By default, this is disabled (0),
1142
- # and the "store" option is ignored.
1143
- # NOTE/WARNING:
1144
- # 1. Messages are kept in memory only (not persisted to disk) and will be
1145
- # lost when the vLLM server shuts down.
1146
- # 2. Enabling this option will cause a memory leak, as stored messages are
1147
- # never removed from memory until the server terminates.
1148
- "VLLM_ENABLE_RESPONSES_API_STORE":
1149
- lambda: bool(int(os.getenv("VLLM_ENABLE_RESPONSES_API_STORE", "0"))),
1150
-
1151
- # Allows vllm to find tuned config under customized folder
1152
- "VLLM_TUNED_CONFIG_FOLDER":
1153
- lambda: os.getenv("VLLM_TUNED_CONFIG_FOLDER", None),
1154
-
1155
- }
1156
-
1157
- # --8<-- [end:env-vars-definition]
1158
-
1159
-
1160
- def __getattr__(name: str):
1161
- # lazy evaluation of environment variables
1162
- if name in environment_variables:
1163
- return environment_variables[name]()
1164
- raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
1165
-
1166
-
1167
- def __dir__():
1168
- return list(environment_variables.keys())
1169
-
1170
-
1171
- def is_set(name: str):
1172
- """Check if an environment variable is explicitly set."""
1173
- if name in environment_variables:
1174
- return name in os.environ
1175
- raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
1176
-
1177
-
1178
- def compute_hash() -> str:
1179
- """
1180
- WARNING: Whenever a new key is added to this environment
1181
- variables, ensure that it is included in the factors list if
1182
- it affects the computation graph. For example, different values
1183
- of VLLM_PP_LAYER_PARTITION will generate different computation
1184
- graphs, so it is included in the factors list. The env vars that
1185
- affect the choice of different kernels or attention backends should
1186
- also be included in the factors list.
1187
- """
1188
- factors: list[Any] = []
1189
-
1190
- # summarize environment variables
1191
- def factorize(name: str):
1192
- if __getattr__(name):
1193
- factors.append(__getattr__(name))
1194
- else:
1195
- factors.append("None")
1196
-
1197
- # The values of envs may affects the computation graph.
1198
- # TODO(DefTruth): hash all environment variables?
1199
- # for key in environment_variables:
1200
- # factorize(key)
1201
- environment_variables_to_hash = [
1202
- "VLLM_PP_LAYER_PARTITION",
1203
- "VLLM_MLA_DISABLE",
1204
- "VLLM_USE_TRITON_FLASH_ATTN",
1205
- "VLLM_USE_TRITON_AWQ",
1206
- "VLLM_DP_RANK",
1207
- "VLLM_DP_SIZE",
1208
- "VLLM_USE_STANDALONE_COMPILE",
1209
- "VLLM_FUSED_MOE_CHUNK_SIZE",
1210
- "VLLM_USE_TRTLLM_FP4_GEMM",
1211
- ]
1212
- for key in environment_variables_to_hash:
1213
- if key in environment_variables:
1214
- factorize(key)
1215
-
1216
- hash_str = hashlib.md5(str(factors).encode(),
1217
- usedforsecurity=False).hexdigest()
1218
-
1219
- return hash_str