vllm-npu 0.4.2__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- vllm/__init__.py +23 -0
- vllm/_custom_ops.py +251 -0
- vllm/attention/__init__.py +13 -0
- vllm/attention/backends/__init__.py +0 -0
- vllm/attention/backends/abstract.py +127 -0
- vllm/attention/backends/flash_attn.py +271 -0
- vllm/attention/backends/flashinfer.py +220 -0
- vllm/attention/backends/rocm_flash_attn.py +374 -0
- vllm/attention/backends/torch_sdpa.py +250 -0
- vllm/attention/backends/xformers.py +393 -0
- vllm/attention/layer.py +56 -0
- vllm/attention/ops/__init__.py +0 -0
- vllm/attention/ops/paged_attn.py +216 -0
- vllm/attention/ops/prefix_prefill.py +792 -0
- vllm/attention/ops/triton_flash_attention.py +810 -0
- vllm/attention/selector.py +91 -0
- vllm/block.py +84 -0
- vllm/config.py +1225 -0
- vllm/core/__init__.py +0 -0
- vllm/core/block/__init__.py +0 -0
- vllm/core/block/block_table.py +295 -0
- vllm/core/block/common.py +199 -0
- vllm/core/block/cpu_gpu_block_allocator.py +228 -0
- vllm/core/block/interfaces.py +205 -0
- vllm/core/block/naive_block.py +318 -0
- vllm/core/block/prefix_caching_block.py +606 -0
- vllm/core/block_manager_v1.py +625 -0
- vllm/core/block_manager_v2.py +258 -0
- vllm/core/evictor_v1.py +105 -0
- vllm/core/evictor_v2.py +127 -0
- vllm/core/interfaces.py +113 -0
- vllm/core/policy.py +45 -0
- vllm/core/scheduler.py +1163 -0
- vllm/distributed/__init__.py +3 -0
- vllm/distributed/communication_op.py +237 -0
- vllm/distributed/device_communicators/__init__.py +0 -0
- vllm/distributed/device_communicators/custom_all_reduce.py +274 -0
- vllm/distributed/device_communicators/pynccl.py +287 -0
- vllm/distributed/device_communicators/pynccl_utils.py +66 -0
- vllm/distributed/parallel_state.py +339 -0
- vllm/distributed/utils.py +136 -0
- vllm/engine/__init__.py +0 -0
- vllm/engine/arg_utils.py +649 -0
- vllm/engine/async_llm_engine.py +737 -0
- vllm/engine/llm_engine.py +784 -0
- vllm/engine/metrics.py +368 -0
- vllm/engine/output_processor/__init__.py +0 -0
- vllm/engine/output_processor/interfaces.py +76 -0
- vllm/engine/output_processor/multi_step.py +142 -0
- vllm/engine/output_processor/single_step.py +284 -0
- vllm/engine/output_processor/stop_checker.py +101 -0
- vllm/engine/output_processor/util.py +19 -0
- vllm/entrypoints/__init__.py +0 -0
- vllm/entrypoints/api_server.py +119 -0
- vllm/entrypoints/llm.py +259 -0
- vllm/entrypoints/openai/__init__.py +0 -0
- vllm/entrypoints/openai/api_server.py +186 -0
- vllm/entrypoints/openai/cli_args.py +115 -0
- vllm/entrypoints/openai/protocol.py +460 -0
- vllm/entrypoints/openai/serving_chat.py +392 -0
- vllm/entrypoints/openai/serving_completion.py +347 -0
- vllm/entrypoints/openai/serving_engine.py +234 -0
- vllm/envs.py +217 -0
- vllm/executor/__init__.py +0 -0
- vllm/executor/cpu_executor.py +152 -0
- vllm/executor/distributed_gpu_executor.py +115 -0
- vllm/executor/executor_base.py +115 -0
- vllm/executor/gpu_executor.py +150 -0
- vllm/executor/multiproc_worker_utils.py +263 -0
- vllm/executor/neuron_executor.py +91 -0
- vllm/executor/ray_gpu_executor.py +327 -0
- vllm/executor/ray_utils.py +119 -0
- vllm/logger.py +153 -0
- vllm/logging/__init__.py +5 -0
- vllm/logging/formatter.py +15 -0
- vllm/lora/__init__.py +0 -0
- vllm/lora/fully_sharded_layers.py +262 -0
- vllm/lora/layers.py +1181 -0
- vllm/lora/lora.py +167 -0
- vllm/lora/models.py +645 -0
- vllm/lora/punica.py +213 -0
- vllm/lora/request.py +32 -0
- vllm/lora/utils.py +98 -0
- vllm/lora/worker_manager.py +251 -0
- vllm/model_executor/__init__.py +7 -0
- vllm/model_executor/guided_decoding/__init__.py +25 -0
- vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py +70 -0
- vllm/model_executor/guided_decoding/outlines_decoding.py +130 -0
- vllm/model_executor/guided_decoding/outlines_logits_processors.py +184 -0
- vllm/model_executor/layers/__init__.py +0 -0
- vllm/model_executor/layers/activation.py +173 -0
- vllm/model_executor/layers/fused_moe/__init__.py +7 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +140 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/fused_moe.py +479 -0
- vllm/model_executor/layers/layernorm.py +71 -0
- vllm/model_executor/layers/linear.py +709 -0
- vllm/model_executor/layers/logits_processor.py +115 -0
- vllm/model_executor/layers/ops/__init__.py +0 -0
- vllm/model_executor/layers/ops/rand.py +157 -0
- vllm/model_executor/layers/ops/sample.py +406 -0
- vllm/model_executor/layers/quantization/__init__.py +35 -0
- vllm/model_executor/layers/quantization/aqlm.py +376 -0
- vllm/model_executor/layers/quantization/awq.py +175 -0
- vllm/model_executor/layers/quantization/base_config.py +97 -0
- vllm/model_executor/layers/quantization/fp8.py +265 -0
- vllm/model_executor/layers/quantization/gptq.py +224 -0
- vllm/model_executor/layers/quantization/gptq_marlin.py +438 -0
- vllm/model_executor/layers/quantization/marlin.py +227 -0
- vllm/model_executor/layers/quantization/schema.py +84 -0
- vllm/model_executor/layers/quantization/squeezellm.py +137 -0
- vllm/model_executor/layers/rejection_sampler.py +405 -0
- vllm/model_executor/layers/rotary_embedding.py +525 -0
- vllm/model_executor/layers/sampler.py +1051 -0
- vllm/model_executor/layers/vocab_parallel_embedding.py +155 -0
- vllm/model_executor/model_loader/__init__.py +30 -0
- vllm/model_executor/model_loader/loader.py +362 -0
- vllm/model_executor/model_loader/neuron.py +136 -0
- vllm/model_executor/model_loader/tensorizer.py +368 -0
- vllm/model_executor/model_loader/utils.py +41 -0
- vllm/model_executor/model_loader/weight_utils.py +372 -0
- vllm/model_executor/models/__init__.py +119 -0
- vllm/model_executor/models/baichuan.py +410 -0
- vllm/model_executor/models/bloom.py +327 -0
- vllm/model_executor/models/chatglm.py +386 -0
- vllm/model_executor/models/commandr.py +373 -0
- vllm/model_executor/models/dbrx.py +413 -0
- vllm/model_executor/models/decilm.py +122 -0
- vllm/model_executor/models/deepseek.py +438 -0
- vllm/model_executor/models/falcon.py +444 -0
- vllm/model_executor/models/gemma.py +393 -0
- vllm/model_executor/models/gpt2.py +266 -0
- vllm/model_executor/models/gpt_bigcode.py +274 -0
- vllm/model_executor/models/gpt_j.py +281 -0
- vllm/model_executor/models/gpt_neox.py +295 -0
- vllm/model_executor/models/internlm2.py +323 -0
- vllm/model_executor/models/jais.py +333 -0
- vllm/model_executor/models/llama.py +442 -0
- vllm/model_executor/models/llava.py +239 -0
- vllm/model_executor/models/minicpm.py +531 -0
- vllm/model_executor/models/mixtral.py +583 -0
- vllm/model_executor/models/mixtral_quant.py +404 -0
- vllm/model_executor/models/mpt.py +295 -0
- vllm/model_executor/models/olmo.py +356 -0
- vllm/model_executor/models/opt.py +349 -0
- vllm/model_executor/models/orion.py +319 -0
- vllm/model_executor/models/phi.py +300 -0
- vllm/model_executor/models/qwen.py +284 -0
- vllm/model_executor/models/qwen2.py +367 -0
- vllm/model_executor/models/qwen2_moe.py +447 -0
- vllm/model_executor/models/stablelm.py +301 -0
- vllm/model_executor/models/starcoder2.py +302 -0
- vllm/model_executor/models/xverse.py +366 -0
- vllm/model_executor/sampling_metadata.py +588 -0
- vllm/model_executor/utils.py +35 -0
- vllm/outputs.py +150 -0
- vllm/py.typed +2 -0
- vllm/sampling_params.py +340 -0
- vllm/sequence.py +766 -0
- vllm/spec_decode/__init__.py +0 -0
- vllm/spec_decode/batch_expansion.py +397 -0
- vllm/spec_decode/interfaces.py +73 -0
- vllm/spec_decode/metrics.py +191 -0
- vllm/spec_decode/multi_step_worker.py +203 -0
- vllm/spec_decode/ngram_worker.py +176 -0
- vllm/spec_decode/spec_decode_worker.py +472 -0
- vllm/spec_decode/top1_proposer.py +200 -0
- vllm/spec_decode/util.py +228 -0
- vllm/test_utils.py +41 -0
- vllm/transformers_utils/__init__.py +0 -0
- vllm/transformers_utils/config.py +58 -0
- vllm/transformers_utils/configs/__init__.py +16 -0
- vllm/transformers_utils/configs/chatglm.py +68 -0
- vllm/transformers_utils/configs/dbrx.py +278 -0
- vllm/transformers_utils/configs/falcon.py +87 -0
- vllm/transformers_utils/configs/jais.py +236 -0
- vllm/transformers_utils/configs/mpt.py +178 -0
- vllm/transformers_utils/detokenizer.py +313 -0
- vllm/transformers_utils/tokenizer.py +149 -0
- vllm/transformers_utils/tokenizer_group/__init__.py +33 -0
- vllm/transformers_utils/tokenizer_group/base_tokenizer_group.py +55 -0
- vllm/transformers_utils/tokenizer_group/ray_tokenizer_group.py +169 -0
- vllm/transformers_utils/tokenizer_group/tokenizer_group.py +78 -0
- vllm/transformers_utils/tokenizers/__init__.py +5 -0
- vllm/transformers_utils/tokenizers/baichuan.py +255 -0
- vllm/usage/__init__.py +0 -0
- vllm/usage/usage_lib.py +209 -0
- vllm/utils.py +677 -0
- vllm/worker/__init__.py +0 -0
- vllm/worker/cache_engine.py +105 -0
- vllm/worker/cpu_model_runner.py +346 -0
- vllm/worker/cpu_worker.py +321 -0
- vllm/worker/model_runner.py +1168 -0
- vllm/worker/neuron_model_runner.py +196 -0
- vllm/worker/neuron_worker.py +98 -0
- vllm/worker/worker.py +345 -0
- vllm/worker/worker_base.py +146 -0
- vllm_npu-0.4.2.dist-info/LICENSE +201 -0
- vllm_npu-0.4.2.dist-info/METADATA +173 -0
- vllm_npu-0.4.2.dist-info/RECORD +219 -0
- vllm_npu-0.4.2.dist-info/WHEEL +5 -0
- vllm_npu-0.4.2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,321 @@
|
|
1
|
+
"""A CPU worker class."""
|
2
|
+
from typing import Any, Dict, List, Optional, Tuple
|
3
|
+
|
4
|
+
import torch
|
5
|
+
import torch.distributed
|
6
|
+
|
7
|
+
from vllm.attention import get_attn_backend
|
8
|
+
from vllm.config import (CacheConfig, DeviceConfig, LoadConfig, LoRAConfig,
|
9
|
+
ModelConfig, ParallelConfig, SchedulerConfig,
|
10
|
+
VisionLanguageConfig)
|
11
|
+
from vllm.distributed import (broadcast_tensor_dict,
|
12
|
+
ensure_model_parallel_initialized,
|
13
|
+
init_distributed_environment)
|
14
|
+
from vllm.logger import init_logger
|
15
|
+
from vllm.model_executor import set_random_seed
|
16
|
+
from vllm.sequence import ExecuteModelRequest, SamplerOutput
|
17
|
+
from vllm.utils import STR_DTYPE_TO_TORCH_DTYPE
|
18
|
+
from vllm.worker.cpu_model_runner import CPUModelRunner
|
19
|
+
from vllm.worker.worker_base import LoraNotSupportedWorkerBase
|
20
|
+
|
21
|
+
logger = init_logger(__name__)
|
22
|
+
|
23
|
+
|
24
|
+
class CPUCacheEngine:
|
25
|
+
"""Manages the KV cache for CPU backend.
|
26
|
+
|
27
|
+
This class is responsible for initializing and managing CPU KV
|
28
|
+
caches. It also provides methods for performing KV cache operations, such
|
29
|
+
as copying.
|
30
|
+
"""
|
31
|
+
|
32
|
+
def __init__(self, cache_config: CacheConfig, model_config: ModelConfig,
|
33
|
+
parallel_config: ParallelConfig,
|
34
|
+
device_config: DeviceConfig) -> None:
|
35
|
+
assert device_config.device_type == "cpu"
|
36
|
+
self.cache_config = cache_config
|
37
|
+
self.model_config = model_config
|
38
|
+
self.parallel_config = parallel_config
|
39
|
+
|
40
|
+
self.head_size = model_config.get_head_size()
|
41
|
+
self.num_layers = model_config.get_num_layers(parallel_config)
|
42
|
+
self.num_heads = model_config.get_num_kv_heads(parallel_config)
|
43
|
+
|
44
|
+
self.block_size = cache_config.block_size
|
45
|
+
# Note: In CacheConfig, num_gpu_blocks actual is num_cpu_blocks
|
46
|
+
# for CPU backend, because we want to reuse KV cache management
|
47
|
+
# in the scheduler.
|
48
|
+
self.num_cpu_blocks = cache_config.num_gpu_blocks
|
49
|
+
|
50
|
+
if cache_config.cache_dtype == "auto":
|
51
|
+
self.dtype = model_config.dtype
|
52
|
+
else:
|
53
|
+
self.dtype = STR_DTYPE_TO_TORCH_DTYPE[cache_config.cache_dtype]
|
54
|
+
|
55
|
+
# Get attention backend.
|
56
|
+
self.attn_backend = get_attn_backend(model_config.dtype)
|
57
|
+
|
58
|
+
# Initialize the cache.
|
59
|
+
self.cpu_cache = self._allocate_kv_cache(self.num_cpu_blocks)
|
60
|
+
|
61
|
+
def _allocate_kv_cache(
|
62
|
+
self,
|
63
|
+
num_blocks: int,
|
64
|
+
) -> List[torch.Tensor]:
|
65
|
+
"""Allocates KV cache on CPU."""
|
66
|
+
kv_cache_shape = self.attn_backend.get_kv_cache_shape(
|
67
|
+
num_blocks, self.block_size, self.num_heads, self.head_size)
|
68
|
+
kv_cache: List[torch.Tensor] = []
|
69
|
+
for _ in range(self.num_layers):
|
70
|
+
kv_cache.append(
|
71
|
+
torch.empty(kv_cache_shape, dtype=self.dtype, device="cpu"))
|
72
|
+
return kv_cache
|
73
|
+
|
74
|
+
def swap_in(self, src_to_dst: Dict[int, int]) -> None:
|
75
|
+
raise NotImplementedError("Swap is not supported in CPUCacheEngine.")
|
76
|
+
|
77
|
+
def swap_out(self, src_to_dst: Dict[int, int]) -> None:
|
78
|
+
raise NotImplementedError("Swap is not supported in CPUCacheEngine.")
|
79
|
+
|
80
|
+
def copy(self, src_to_dsts: Dict[int, List[int]]) -> None:
|
81
|
+
self.attn_backend.copy_blocks(self.cpu_cache, src_to_dsts)
|
82
|
+
|
83
|
+
@staticmethod
|
84
|
+
def get_cache_block_size(
|
85
|
+
block_size: int,
|
86
|
+
cache_dtype: str,
|
87
|
+
model_config: ModelConfig,
|
88
|
+
parallel_config: ParallelConfig,
|
89
|
+
) -> int:
|
90
|
+
head_size = model_config.get_head_size()
|
91
|
+
num_heads = model_config.get_num_kv_heads(parallel_config)
|
92
|
+
num_layers = model_config.get_num_layers(parallel_config)
|
93
|
+
|
94
|
+
key_cache_block = block_size * num_heads * head_size
|
95
|
+
value_cache_block = key_cache_block
|
96
|
+
total = num_layers * (key_cache_block + value_cache_block)
|
97
|
+
if cache_dtype == "auto":
|
98
|
+
dtype = model_config.dtype
|
99
|
+
else:
|
100
|
+
dtype = STR_DTYPE_TO_TORCH_DTYPE[cache_dtype]
|
101
|
+
dtype_size = torch.tensor([], dtype=dtype).element_size()
|
102
|
+
return dtype_size * total
|
103
|
+
|
104
|
+
|
105
|
+
class CPUWorker(LoraNotSupportedWorkerBase):
|
106
|
+
"""A worker class that executes (a partition of) the model on a CPU socket.
|
107
|
+
|
108
|
+
Each worker is associated with a single CPU socket. The worker is
|
109
|
+
responsible for maintaining the KV cache and executing the model on the
|
110
|
+
CPU. In case of distributed inference, each worker is assigned a partition
|
111
|
+
of the model.
|
112
|
+
"""
|
113
|
+
|
114
|
+
def __init__(
|
115
|
+
self,
|
116
|
+
model_config: ModelConfig,
|
117
|
+
parallel_config: ParallelConfig,
|
118
|
+
scheduler_config: SchedulerConfig,
|
119
|
+
device_config: DeviceConfig,
|
120
|
+
cache_config: CacheConfig,
|
121
|
+
load_config: LoadConfig,
|
122
|
+
local_rank: int,
|
123
|
+
rank: int,
|
124
|
+
distributed_init_method: str,
|
125
|
+
lora_config: Optional[LoRAConfig] = None,
|
126
|
+
vision_language_config: Optional[VisionLanguageConfig] = None,
|
127
|
+
kv_cache_dtype: Optional[str] = "auto",
|
128
|
+
is_driver_worker: bool = False,
|
129
|
+
) -> None:
|
130
|
+
self.model_config = model_config
|
131
|
+
self.parallel_config = parallel_config
|
132
|
+
self.scheduler_config = scheduler_config
|
133
|
+
self.device_config = device_config
|
134
|
+
self.cache_config = cache_config
|
135
|
+
self.load_config = load_config
|
136
|
+
self.local_rank = local_rank
|
137
|
+
self.rank = rank
|
138
|
+
self.distributed_init_method = distributed_init_method
|
139
|
+
self.lora_config = lora_config
|
140
|
+
self.vision_language_config = vision_language_config
|
141
|
+
self.is_driver_worker = is_driver_worker
|
142
|
+
if self.is_driver_worker:
|
143
|
+
assert self.rank == 0, "The driver worker must have rank 0."
|
144
|
+
|
145
|
+
if self.model_config.trust_remote_code:
|
146
|
+
# note: lazy import to avoid importing torch before initializing
|
147
|
+
from vllm.utils import init_cached_hf_modules
|
148
|
+
init_cached_hf_modules()
|
149
|
+
self.model_runner = CPUModelRunner(
|
150
|
+
model_config,
|
151
|
+
parallel_config,
|
152
|
+
scheduler_config,
|
153
|
+
device_config,
|
154
|
+
load_config=self.load_config,
|
155
|
+
lora_config=self.lora_config,
|
156
|
+
vision_language_config=self.vision_language_config,
|
157
|
+
kv_cache_dtype=kv_cache_dtype,
|
158
|
+
is_driver_worker=is_driver_worker)
|
159
|
+
# Uninitialized cache engine. Will be initialized by
|
160
|
+
# initialize_cache.
|
161
|
+
self.cache_engine: CPUCacheEngine
|
162
|
+
self.cpu_cache: List[torch.Tensor]
|
163
|
+
|
164
|
+
def init_device(self) -> None:
|
165
|
+
self.init_distributed_environment()
|
166
|
+
# Set random seed.
|
167
|
+
set_random_seed(self.model_config.seed)
|
168
|
+
|
169
|
+
def load_model(self):
|
170
|
+
self.model_runner.load_model()
|
171
|
+
|
172
|
+
def determine_num_available_blocks(self) -> Tuple[int, int]:
|
173
|
+
"""Determine the number of blocks available for the KV cache.
|
174
|
+
|
175
|
+
This determines how many KV blocks can fit into the configured CPU
|
176
|
+
KV cache space.
|
177
|
+
|
178
|
+
Note that since vLLM assumes a block resides on GPU if it can be
|
179
|
+
modified, we return num_gpu_blocks=num_cpu_blocks and num_cpu_blocks=0.
|
180
|
+
This allows us to reuse the scheduler of vLLM without generalizing it
|
181
|
+
to different devices.
|
182
|
+
"""
|
183
|
+
# For CPU device, the block number will be calculated based on the
|
184
|
+
# cpu_kvcache_space.
|
185
|
+
cache_block_size = self.get_cache_block_size_bytes()
|
186
|
+
num_cpu_blocks = int(self.cache_config.cpu_kvcache_space_bytes //
|
187
|
+
cache_block_size)
|
188
|
+
num_cpu_blocks = max(num_cpu_blocks, 0)
|
189
|
+
|
190
|
+
# Note: To reuse the cache management procedure,
|
191
|
+
# use cpu cache as 'gpu cache'.
|
192
|
+
num_gpu_blocks = num_cpu_blocks
|
193
|
+
num_cpu_blocks = 0
|
194
|
+
return num_gpu_blocks, num_cpu_blocks
|
195
|
+
|
196
|
+
def initialize_cache(self, num_gpu_blocks: int,
|
197
|
+
num_cpu_blocks: int) -> None:
|
198
|
+
"""Initialize the KV cache. Currently, swappable CPU memory is not
|
199
|
+
supported.
|
200
|
+
|
201
|
+
Since this worker does not support GPUs, we use the num_gpu_blocks to
|
202
|
+
determine how many non-swappable CPU blocks to allocate.
|
203
|
+
"""
|
204
|
+
assert (num_cpu_blocks == 0
|
205
|
+
), f"{type(self)} does not support swappable cache"
|
206
|
+
|
207
|
+
# Note: To reuse the cache management procedure,
|
208
|
+
# use cpu cache as 'gpu cache'.
|
209
|
+
num_cpu_blocks = num_gpu_blocks
|
210
|
+
|
211
|
+
self._validate_num_cpu_blocks(num_cpu_blocks)
|
212
|
+
self.cache_config.num_gpu_blocks = num_cpu_blocks
|
213
|
+
self.cache_config.num_cpu_blocks = 0
|
214
|
+
|
215
|
+
# Initialize the cache.
|
216
|
+
self._init_cache_engine()
|
217
|
+
|
218
|
+
def _validate_num_cpu_blocks(self, num_cpu_blocks: int) -> None:
|
219
|
+
"""Raise errors if the num_cpu_blocks is invalid.
|
220
|
+
"""
|
221
|
+
if num_cpu_blocks <= 0:
|
222
|
+
raise ValueError("No available memory for the cache blocks. "
|
223
|
+
"Try increasing `VLLM_CPU_KVCACHE_SPACE` when "
|
224
|
+
"initializing the engine.")
|
225
|
+
|
226
|
+
max_seq_len = self.cache_config.block_size * num_cpu_blocks
|
227
|
+
if self.model_config.max_model_len > max_seq_len:
|
228
|
+
raise ValueError(
|
229
|
+
f"The model's max seq len ({self.model_config.max_model_len}) "
|
230
|
+
"is larger than the maximum number of tokens that can be "
|
231
|
+
f"stored in KV cache ({max_seq_len}). Try increasing "
|
232
|
+
"`VLLM_CPU_KVCACHE_SPACE` or decreasing `max_model_len` when "
|
233
|
+
"initializing the engine.")
|
234
|
+
|
235
|
+
def _init_cache_engine(self) -> None:
|
236
|
+
self.cache_engine = CPUCacheEngine(self.cache_config,
|
237
|
+
self.model_config,
|
238
|
+
self.parallel_config,
|
239
|
+
self.device_config)
|
240
|
+
self.cpu_cache = self.cache_engine.cpu_cache
|
241
|
+
self.model_runner.block_size = self.cache_engine.block_size
|
242
|
+
|
243
|
+
assert self.cpu_cache is not None
|
244
|
+
|
245
|
+
# Populate the cache to warmup the memory
|
246
|
+
for layer_cache in self.cpu_cache:
|
247
|
+
layer_cache.fill_(0)
|
248
|
+
|
249
|
+
def cache_copy(
|
250
|
+
self,
|
251
|
+
blocks_to_copy: Dict[int, List[int]],
|
252
|
+
) -> None:
|
253
|
+
if blocks_to_copy:
|
254
|
+
self.cache_engine.copy(blocks_to_copy)
|
255
|
+
|
256
|
+
@torch.inference_mode()
|
257
|
+
def execute_model(
|
258
|
+
self,
|
259
|
+
execute_model_req: Optional[ExecuteModelRequest] = None,
|
260
|
+
) -> List[SamplerOutput]:
|
261
|
+
|
262
|
+
if execute_model_req is None:
|
263
|
+
seq_group_metadata_list = None
|
264
|
+
else:
|
265
|
+
seq_group_metadata_list = execute_model_req.seq_group_metadata_list
|
266
|
+
|
267
|
+
if self.is_driver_worker:
|
268
|
+
assert seq_group_metadata_list is not None
|
269
|
+
num_seq_groups: int = len(seq_group_metadata_list)
|
270
|
+
assert execute_model_req is not None
|
271
|
+
blocks_to_copy = execute_model_req.blocks_to_copy
|
272
|
+
assert len(execute_model_req.blocks_to_swap_in) == 0
|
273
|
+
assert len(execute_model_req.blocks_to_swap_out) == 0
|
274
|
+
data: Dict[str, Any] = {
|
275
|
+
"num_seq_groups": num_seq_groups,
|
276
|
+
"blocks_to_copy": execute_model_req.blocks_to_copy,
|
277
|
+
}
|
278
|
+
broadcast_tensor_dict(data, src=0)
|
279
|
+
else:
|
280
|
+
data = broadcast_tensor_dict(src=0)
|
281
|
+
num_seq_groups = data["num_seq_groups"]
|
282
|
+
blocks_to_copy = data["blocks_to_copy"]
|
283
|
+
|
284
|
+
self.cache_copy(blocks_to_copy)
|
285
|
+
|
286
|
+
# If there is no input, we don't need to execute the model.
|
287
|
+
if num_seq_groups == 0:
|
288
|
+
return []
|
289
|
+
|
290
|
+
output = self.model_runner.execute_model(seq_group_metadata_list,
|
291
|
+
self.cpu_cache)
|
292
|
+
|
293
|
+
# CPU worker only supports single-step execution.
|
294
|
+
return [output]
|
295
|
+
|
296
|
+
def init_distributed_environment(self) -> None:
|
297
|
+
"""Initialize the distributed environment."""
|
298
|
+
|
299
|
+
parallel_config = self.parallel_config
|
300
|
+
rank = self.rank
|
301
|
+
distributed_init_method = self.distributed_init_method
|
302
|
+
init_distributed_environment(
|
303
|
+
world_size=parallel_config.world_size,
|
304
|
+
rank=rank,
|
305
|
+
distributed_init_method=distributed_init_method,
|
306
|
+
backend="gloo",
|
307
|
+
)
|
308
|
+
|
309
|
+
# A small all_reduce for warmup.
|
310
|
+
torch.distributed.all_reduce(torch.zeros(1).cpu())
|
311
|
+
|
312
|
+
ensure_model_parallel_initialized(
|
313
|
+
parallel_config.tensor_parallel_size,
|
314
|
+
parallel_config.pipeline_parallel_size)
|
315
|
+
|
316
|
+
def get_cache_block_size_bytes(self) -> int:
|
317
|
+
"""Return the size in bytes of a single KV cache block.
|
318
|
+
"""
|
319
|
+
return CPUCacheEngine.get_cache_block_size(
|
320
|
+
self.cache_config.block_size, self.cache_config.cache_dtype,
|
321
|
+
self.model_config, self.parallel_config)
|