vllm-npu 0.4.2__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- vllm/__init__.py +23 -0
- vllm/_custom_ops.py +251 -0
- vllm/attention/__init__.py +13 -0
- vllm/attention/backends/__init__.py +0 -0
- vllm/attention/backends/abstract.py +127 -0
- vllm/attention/backends/flash_attn.py +271 -0
- vllm/attention/backends/flashinfer.py +220 -0
- vllm/attention/backends/rocm_flash_attn.py +374 -0
- vllm/attention/backends/torch_sdpa.py +250 -0
- vllm/attention/backends/xformers.py +393 -0
- vllm/attention/layer.py +56 -0
- vllm/attention/ops/__init__.py +0 -0
- vllm/attention/ops/paged_attn.py +216 -0
- vllm/attention/ops/prefix_prefill.py +792 -0
- vllm/attention/ops/triton_flash_attention.py +810 -0
- vllm/attention/selector.py +91 -0
- vllm/block.py +84 -0
- vllm/config.py +1225 -0
- vllm/core/__init__.py +0 -0
- vllm/core/block/__init__.py +0 -0
- vllm/core/block/block_table.py +295 -0
- vllm/core/block/common.py +199 -0
- vllm/core/block/cpu_gpu_block_allocator.py +228 -0
- vllm/core/block/interfaces.py +205 -0
- vllm/core/block/naive_block.py +318 -0
- vllm/core/block/prefix_caching_block.py +606 -0
- vllm/core/block_manager_v1.py +625 -0
- vllm/core/block_manager_v2.py +258 -0
- vllm/core/evictor_v1.py +105 -0
- vllm/core/evictor_v2.py +127 -0
- vllm/core/interfaces.py +113 -0
- vllm/core/policy.py +45 -0
- vllm/core/scheduler.py +1163 -0
- vllm/distributed/__init__.py +3 -0
- vllm/distributed/communication_op.py +237 -0
- vllm/distributed/device_communicators/__init__.py +0 -0
- vllm/distributed/device_communicators/custom_all_reduce.py +274 -0
- vllm/distributed/device_communicators/pynccl.py +287 -0
- vllm/distributed/device_communicators/pynccl_utils.py +66 -0
- vllm/distributed/parallel_state.py +339 -0
- vllm/distributed/utils.py +136 -0
- vllm/engine/__init__.py +0 -0
- vllm/engine/arg_utils.py +649 -0
- vllm/engine/async_llm_engine.py +737 -0
- vllm/engine/llm_engine.py +784 -0
- vllm/engine/metrics.py +368 -0
- vllm/engine/output_processor/__init__.py +0 -0
- vllm/engine/output_processor/interfaces.py +76 -0
- vllm/engine/output_processor/multi_step.py +142 -0
- vllm/engine/output_processor/single_step.py +284 -0
- vllm/engine/output_processor/stop_checker.py +101 -0
- vllm/engine/output_processor/util.py +19 -0
- vllm/entrypoints/__init__.py +0 -0
- vllm/entrypoints/api_server.py +119 -0
- vllm/entrypoints/llm.py +259 -0
- vllm/entrypoints/openai/__init__.py +0 -0
- vllm/entrypoints/openai/api_server.py +186 -0
- vllm/entrypoints/openai/cli_args.py +115 -0
- vllm/entrypoints/openai/protocol.py +460 -0
- vllm/entrypoints/openai/serving_chat.py +392 -0
- vllm/entrypoints/openai/serving_completion.py +347 -0
- vllm/entrypoints/openai/serving_engine.py +234 -0
- vllm/envs.py +217 -0
- vllm/executor/__init__.py +0 -0
- vllm/executor/cpu_executor.py +152 -0
- vllm/executor/distributed_gpu_executor.py +115 -0
- vllm/executor/executor_base.py +115 -0
- vllm/executor/gpu_executor.py +150 -0
- vllm/executor/multiproc_worker_utils.py +263 -0
- vllm/executor/neuron_executor.py +91 -0
- vllm/executor/ray_gpu_executor.py +327 -0
- vllm/executor/ray_utils.py +119 -0
- vllm/logger.py +153 -0
- vllm/logging/__init__.py +5 -0
- vllm/logging/formatter.py +15 -0
- vllm/lora/__init__.py +0 -0
- vllm/lora/fully_sharded_layers.py +262 -0
- vllm/lora/layers.py +1181 -0
- vllm/lora/lora.py +167 -0
- vllm/lora/models.py +645 -0
- vllm/lora/punica.py +213 -0
- vllm/lora/request.py +32 -0
- vllm/lora/utils.py +98 -0
- vllm/lora/worker_manager.py +251 -0
- vllm/model_executor/__init__.py +7 -0
- vllm/model_executor/guided_decoding/__init__.py +25 -0
- vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py +70 -0
- vllm/model_executor/guided_decoding/outlines_decoding.py +130 -0
- vllm/model_executor/guided_decoding/outlines_logits_processors.py +184 -0
- vllm/model_executor/layers/__init__.py +0 -0
- vllm/model_executor/layers/activation.py +173 -0
- vllm/model_executor/layers/fused_moe/__init__.py +7 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +140 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/fused_moe.py +479 -0
- vllm/model_executor/layers/layernorm.py +71 -0
- vllm/model_executor/layers/linear.py +709 -0
- vllm/model_executor/layers/logits_processor.py +115 -0
- vllm/model_executor/layers/ops/__init__.py +0 -0
- vllm/model_executor/layers/ops/rand.py +157 -0
- vllm/model_executor/layers/ops/sample.py +406 -0
- vllm/model_executor/layers/quantization/__init__.py +35 -0
- vllm/model_executor/layers/quantization/aqlm.py +376 -0
- vllm/model_executor/layers/quantization/awq.py +175 -0
- vllm/model_executor/layers/quantization/base_config.py +97 -0
- vllm/model_executor/layers/quantization/fp8.py +265 -0
- vllm/model_executor/layers/quantization/gptq.py +224 -0
- vllm/model_executor/layers/quantization/gptq_marlin.py +438 -0
- vllm/model_executor/layers/quantization/marlin.py +227 -0
- vllm/model_executor/layers/quantization/schema.py +84 -0
- vllm/model_executor/layers/quantization/squeezellm.py +137 -0
- vllm/model_executor/layers/rejection_sampler.py +405 -0
- vllm/model_executor/layers/rotary_embedding.py +525 -0
- vllm/model_executor/layers/sampler.py +1051 -0
- vllm/model_executor/layers/vocab_parallel_embedding.py +155 -0
- vllm/model_executor/model_loader/__init__.py +30 -0
- vllm/model_executor/model_loader/loader.py +362 -0
- vllm/model_executor/model_loader/neuron.py +136 -0
- vllm/model_executor/model_loader/tensorizer.py +368 -0
- vllm/model_executor/model_loader/utils.py +41 -0
- vllm/model_executor/model_loader/weight_utils.py +372 -0
- vllm/model_executor/models/__init__.py +119 -0
- vllm/model_executor/models/baichuan.py +410 -0
- vllm/model_executor/models/bloom.py +327 -0
- vllm/model_executor/models/chatglm.py +386 -0
- vllm/model_executor/models/commandr.py +373 -0
- vllm/model_executor/models/dbrx.py +413 -0
- vllm/model_executor/models/decilm.py +122 -0
- vllm/model_executor/models/deepseek.py +438 -0
- vllm/model_executor/models/falcon.py +444 -0
- vllm/model_executor/models/gemma.py +393 -0
- vllm/model_executor/models/gpt2.py +266 -0
- vllm/model_executor/models/gpt_bigcode.py +274 -0
- vllm/model_executor/models/gpt_j.py +281 -0
- vllm/model_executor/models/gpt_neox.py +295 -0
- vllm/model_executor/models/internlm2.py +323 -0
- vllm/model_executor/models/jais.py +333 -0
- vllm/model_executor/models/llama.py +442 -0
- vllm/model_executor/models/llava.py +239 -0
- vllm/model_executor/models/minicpm.py +531 -0
- vllm/model_executor/models/mixtral.py +583 -0
- vllm/model_executor/models/mixtral_quant.py +404 -0
- vllm/model_executor/models/mpt.py +295 -0
- vllm/model_executor/models/olmo.py +356 -0
- vllm/model_executor/models/opt.py +349 -0
- vllm/model_executor/models/orion.py +319 -0
- vllm/model_executor/models/phi.py +300 -0
- vllm/model_executor/models/qwen.py +284 -0
- vllm/model_executor/models/qwen2.py +367 -0
- vllm/model_executor/models/qwen2_moe.py +447 -0
- vllm/model_executor/models/stablelm.py +301 -0
- vllm/model_executor/models/starcoder2.py +302 -0
- vllm/model_executor/models/xverse.py +366 -0
- vllm/model_executor/sampling_metadata.py +588 -0
- vllm/model_executor/utils.py +35 -0
- vllm/outputs.py +150 -0
- vllm/py.typed +2 -0
- vllm/sampling_params.py +340 -0
- vllm/sequence.py +766 -0
- vllm/spec_decode/__init__.py +0 -0
- vllm/spec_decode/batch_expansion.py +397 -0
- vllm/spec_decode/interfaces.py +73 -0
- vllm/spec_decode/metrics.py +191 -0
- vllm/spec_decode/multi_step_worker.py +203 -0
- vllm/spec_decode/ngram_worker.py +176 -0
- vllm/spec_decode/spec_decode_worker.py +472 -0
- vllm/spec_decode/top1_proposer.py +200 -0
- vllm/spec_decode/util.py +228 -0
- vllm/test_utils.py +41 -0
- vllm/transformers_utils/__init__.py +0 -0
- vllm/transformers_utils/config.py +58 -0
- vllm/transformers_utils/configs/__init__.py +16 -0
- vllm/transformers_utils/configs/chatglm.py +68 -0
- vllm/transformers_utils/configs/dbrx.py +278 -0
- vllm/transformers_utils/configs/falcon.py +87 -0
- vllm/transformers_utils/configs/jais.py +236 -0
- vllm/transformers_utils/configs/mpt.py +178 -0
- vllm/transformers_utils/detokenizer.py +313 -0
- vllm/transformers_utils/tokenizer.py +149 -0
- vllm/transformers_utils/tokenizer_group/__init__.py +33 -0
- vllm/transformers_utils/tokenizer_group/base_tokenizer_group.py +55 -0
- vllm/transformers_utils/tokenizer_group/ray_tokenizer_group.py +169 -0
- vllm/transformers_utils/tokenizer_group/tokenizer_group.py +78 -0
- vllm/transformers_utils/tokenizers/__init__.py +5 -0
- vllm/transformers_utils/tokenizers/baichuan.py +255 -0
- vllm/usage/__init__.py +0 -0
- vllm/usage/usage_lib.py +209 -0
- vllm/utils.py +677 -0
- vllm/worker/__init__.py +0 -0
- vllm/worker/cache_engine.py +105 -0
- vllm/worker/cpu_model_runner.py +346 -0
- vllm/worker/cpu_worker.py +321 -0
- vllm/worker/model_runner.py +1168 -0
- vllm/worker/neuron_model_runner.py +196 -0
- vllm/worker/neuron_worker.py +98 -0
- vllm/worker/worker.py +345 -0
- vllm/worker/worker_base.py +146 -0
- vllm_npu-0.4.2.dist-info/LICENSE +201 -0
- vllm_npu-0.4.2.dist-info/METADATA +173 -0
- vllm_npu-0.4.2.dist-info/RECORD +219 -0
- vllm_npu-0.4.2.dist-info/WHEEL +5 -0
- vllm_npu-0.4.2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,91 @@
|
|
1
|
+
import enum
|
2
|
+
from functools import lru_cache
|
3
|
+
from typing import Type
|
4
|
+
|
5
|
+
import torch
|
6
|
+
|
7
|
+
import vllm.envs as envs
|
8
|
+
from vllm.attention.backends.abstract import AttentionBackend
|
9
|
+
from vllm.logger import init_logger
|
10
|
+
from vllm.utils import is_cpu, is_hip
|
11
|
+
|
12
|
+
logger = init_logger(__name__)
|
13
|
+
|
14
|
+
|
15
|
+
class _Backend(enum.Enum):
|
16
|
+
FLASH_ATTN = enum.auto()
|
17
|
+
XFORMERS = enum.auto()
|
18
|
+
ROCM_FLASH = enum.auto()
|
19
|
+
TORCH_SDPA = enum.auto()
|
20
|
+
FLASHINFER = enum.auto()
|
21
|
+
|
22
|
+
|
23
|
+
@lru_cache(maxsize=None)
|
24
|
+
def get_attn_backend(dtype: torch.dtype) -> Type[AttentionBackend]:
|
25
|
+
backend = _which_attn_to_use(dtype)
|
26
|
+
if backend == _Backend.FLASH_ATTN:
|
27
|
+
logger.info("Using FlashAttention-2 backend.")
|
28
|
+
from vllm.attention.backends.flash_attn import ( # noqa: F401
|
29
|
+
FlashAttentionBackend)
|
30
|
+
return FlashAttentionBackend
|
31
|
+
elif backend == _Backend.XFORMERS:
|
32
|
+
logger.info("Using XFormers backend.")
|
33
|
+
from vllm.attention.backends.xformers import ( # noqa: F401
|
34
|
+
XFormersBackend)
|
35
|
+
return XFormersBackend
|
36
|
+
elif backend == _Backend.ROCM_FLASH:
|
37
|
+
logger.info("Using ROCmFlashAttention backend.")
|
38
|
+
from vllm.attention.backends.rocm_flash_attn import ( # noqa: F401
|
39
|
+
ROCmFlashAttentionBackend)
|
40
|
+
return ROCmFlashAttentionBackend
|
41
|
+
elif backend == _Backend.TORCH_SDPA:
|
42
|
+
logger.info("Using Torch SDPA backend.")
|
43
|
+
from vllm.attention.backends.torch_sdpa import TorchSDPABackend
|
44
|
+
return TorchSDPABackend
|
45
|
+
elif backend == _Backend.FLASHINFER:
|
46
|
+
logger.info("Using Flashinfer backend.")
|
47
|
+
logger.warning("Eager mode is enforced for the Flashinfer backend. ")
|
48
|
+
from vllm.attention.backends.flashinfer import FlashInferBackend
|
49
|
+
return FlashInferBackend
|
50
|
+
else:
|
51
|
+
raise ValueError("Invalid attention backend.")
|
52
|
+
|
53
|
+
|
54
|
+
def _which_attn_to_use(dtype: torch.dtype) -> _Backend:
|
55
|
+
"""Returns which flash attention backend to use."""
|
56
|
+
if is_cpu():
|
57
|
+
return _Backend.TORCH_SDPA
|
58
|
+
|
59
|
+
if is_hip():
|
60
|
+
# AMD GPUs.
|
61
|
+
if torch.cuda.get_device_capability()[0] != 9:
|
62
|
+
# not Instinct series GPUs.
|
63
|
+
logger.info("flash_atten is not supported on NAVI GPUs.")
|
64
|
+
return _Backend.ROCM_FLASH
|
65
|
+
|
66
|
+
# NVIDIA GPUs.
|
67
|
+
if torch.cuda.get_device_capability()[0] < 8:
|
68
|
+
# Volta and Turing NVIDIA GPUs.
|
69
|
+
logger.info("Cannot use FlashAttention-2 backend for Volta and Turing "
|
70
|
+
"GPUs.")
|
71
|
+
return _Backend.XFORMERS
|
72
|
+
|
73
|
+
if dtype not in (torch.float16, torch.bfloat16):
|
74
|
+
logger.info("Cannot use FlashAttention-2 backend for dtype other than "
|
75
|
+
"torch.float16 or torch.bfloat16.")
|
76
|
+
return _Backend.XFORMERS
|
77
|
+
|
78
|
+
try:
|
79
|
+
import flash_attn # noqa: F401
|
80
|
+
except ImportError:
|
81
|
+
logger.info(
|
82
|
+
"Cannot use FlashAttention-2 backend because the flash_attn "
|
83
|
+
"package is not found. Please install it for better performance.")
|
84
|
+
return _Backend.XFORMERS
|
85
|
+
|
86
|
+
backend_by_env_var = envs.VLLM_ATTENTION_BACKEND
|
87
|
+
if backend_by_env_var is not None:
|
88
|
+
return _Backend[backend_by_env_var]
|
89
|
+
|
90
|
+
# Default case.
|
91
|
+
return _Backend.FLASH_ATTN
|
vllm/block.py
ADDED
@@ -0,0 +1,84 @@
|
|
1
|
+
"""Token blocks."""
|
2
|
+
from typing import List
|
3
|
+
|
4
|
+
from vllm.utils import Device
|
5
|
+
|
6
|
+
_BLANK_TOKEN_ID = -1
|
7
|
+
|
8
|
+
DEFAULT_LAST_ACCESSED_TIME = -1
|
9
|
+
|
10
|
+
|
11
|
+
class LogicalTokenBlock:
|
12
|
+
"""A block that stores a contiguous chunk of tokens from left to right.
|
13
|
+
|
14
|
+
Logical blocks are used to represent the states of the corresponding
|
15
|
+
physical blocks in the KV cache.
|
16
|
+
"""
|
17
|
+
|
18
|
+
def __init__(
|
19
|
+
self,
|
20
|
+
block_number: int,
|
21
|
+
block_size: int,
|
22
|
+
) -> None:
|
23
|
+
self.block_number = block_number
|
24
|
+
self.block_size = block_size
|
25
|
+
|
26
|
+
self.token_ids = [_BLANK_TOKEN_ID] * block_size
|
27
|
+
self.num_tokens = 0
|
28
|
+
|
29
|
+
def is_empty(self) -> bool:
|
30
|
+
return self.num_tokens == 0
|
31
|
+
|
32
|
+
def get_num_empty_slots(self) -> int:
|
33
|
+
return self.block_size - self.num_tokens
|
34
|
+
|
35
|
+
def is_full(self) -> bool:
|
36
|
+
return self.num_tokens == self.block_size
|
37
|
+
|
38
|
+
def append_tokens(self, token_ids: List[int]) -> None:
|
39
|
+
assert len(token_ids) <= self.get_num_empty_slots()
|
40
|
+
curr_idx = self.num_tokens
|
41
|
+
self.token_ids[curr_idx:curr_idx + len(token_ids)] = token_ids
|
42
|
+
self.num_tokens += len(token_ids)
|
43
|
+
|
44
|
+
def get_token_ids(self) -> List[int]:
|
45
|
+
return self.token_ids[:self.num_tokens]
|
46
|
+
|
47
|
+
def get_last_token_id(self) -> int:
|
48
|
+
assert self.num_tokens > 0
|
49
|
+
return self.token_ids[self.num_tokens - 1]
|
50
|
+
|
51
|
+
|
52
|
+
class PhysicalTokenBlock:
|
53
|
+
"""Represents the state of a block in the KV cache."""
|
54
|
+
|
55
|
+
def __init__(
|
56
|
+
self,
|
57
|
+
device: Device,
|
58
|
+
block_number: int,
|
59
|
+
block_size: int,
|
60
|
+
block_hash: int,
|
61
|
+
num_hashed_tokens: int,
|
62
|
+
) -> None:
|
63
|
+
self.device = device
|
64
|
+
self.block_number = block_number
|
65
|
+
self.block_size = block_size
|
66
|
+
self.block_hash = block_hash
|
67
|
+
self.num_hashed_tokens = num_hashed_tokens
|
68
|
+
|
69
|
+
self.ref_count = 0
|
70
|
+
self.last_accessed = DEFAULT_LAST_ACCESSED_TIME
|
71
|
+
|
72
|
+
self.computed = False
|
73
|
+
|
74
|
+
def __repr__(self) -> str:
|
75
|
+
return (f'PhysicalTokenBlock(device={self.device}, '
|
76
|
+
f'block_number={self.block_number}, '
|
77
|
+
f'num_hashed_tokens={self.num_hashed_tokens}, '
|
78
|
+
f'ref_count={self.ref_count}, '
|
79
|
+
f'last_accessed={self.last_accessed}, '
|
80
|
+
f'computed={self.computed})')
|
81
|
+
|
82
|
+
|
83
|
+
# Mapping: logical block number -> physical block.
|
84
|
+
BlockTable = List[PhysicalTokenBlock]
|