vllm-npu 0.4.2__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (219) hide show
  1. vllm/__init__.py +23 -0
  2. vllm/_custom_ops.py +251 -0
  3. vllm/attention/__init__.py +13 -0
  4. vllm/attention/backends/__init__.py +0 -0
  5. vllm/attention/backends/abstract.py +127 -0
  6. vllm/attention/backends/flash_attn.py +271 -0
  7. vllm/attention/backends/flashinfer.py +220 -0
  8. vllm/attention/backends/rocm_flash_attn.py +374 -0
  9. vllm/attention/backends/torch_sdpa.py +250 -0
  10. vllm/attention/backends/xformers.py +393 -0
  11. vllm/attention/layer.py +56 -0
  12. vllm/attention/ops/__init__.py +0 -0
  13. vllm/attention/ops/paged_attn.py +216 -0
  14. vllm/attention/ops/prefix_prefill.py +792 -0
  15. vllm/attention/ops/triton_flash_attention.py +810 -0
  16. vllm/attention/selector.py +91 -0
  17. vllm/block.py +84 -0
  18. vllm/config.py +1225 -0
  19. vllm/core/__init__.py +0 -0
  20. vllm/core/block/__init__.py +0 -0
  21. vllm/core/block/block_table.py +295 -0
  22. vllm/core/block/common.py +199 -0
  23. vllm/core/block/cpu_gpu_block_allocator.py +228 -0
  24. vllm/core/block/interfaces.py +205 -0
  25. vllm/core/block/naive_block.py +318 -0
  26. vllm/core/block/prefix_caching_block.py +606 -0
  27. vllm/core/block_manager_v1.py +625 -0
  28. vllm/core/block_manager_v2.py +258 -0
  29. vllm/core/evictor_v1.py +105 -0
  30. vllm/core/evictor_v2.py +127 -0
  31. vllm/core/interfaces.py +113 -0
  32. vllm/core/policy.py +45 -0
  33. vllm/core/scheduler.py +1163 -0
  34. vllm/distributed/__init__.py +3 -0
  35. vllm/distributed/communication_op.py +237 -0
  36. vllm/distributed/device_communicators/__init__.py +0 -0
  37. vllm/distributed/device_communicators/custom_all_reduce.py +274 -0
  38. vllm/distributed/device_communicators/pynccl.py +287 -0
  39. vllm/distributed/device_communicators/pynccl_utils.py +66 -0
  40. vllm/distributed/parallel_state.py +339 -0
  41. vllm/distributed/utils.py +136 -0
  42. vllm/engine/__init__.py +0 -0
  43. vllm/engine/arg_utils.py +649 -0
  44. vllm/engine/async_llm_engine.py +737 -0
  45. vllm/engine/llm_engine.py +784 -0
  46. vllm/engine/metrics.py +368 -0
  47. vllm/engine/output_processor/__init__.py +0 -0
  48. vllm/engine/output_processor/interfaces.py +76 -0
  49. vllm/engine/output_processor/multi_step.py +142 -0
  50. vllm/engine/output_processor/single_step.py +284 -0
  51. vllm/engine/output_processor/stop_checker.py +101 -0
  52. vllm/engine/output_processor/util.py +19 -0
  53. vllm/entrypoints/__init__.py +0 -0
  54. vllm/entrypoints/api_server.py +119 -0
  55. vllm/entrypoints/llm.py +259 -0
  56. vllm/entrypoints/openai/__init__.py +0 -0
  57. vllm/entrypoints/openai/api_server.py +186 -0
  58. vllm/entrypoints/openai/cli_args.py +115 -0
  59. vllm/entrypoints/openai/protocol.py +460 -0
  60. vllm/entrypoints/openai/serving_chat.py +392 -0
  61. vllm/entrypoints/openai/serving_completion.py +347 -0
  62. vllm/entrypoints/openai/serving_engine.py +234 -0
  63. vllm/envs.py +217 -0
  64. vllm/executor/__init__.py +0 -0
  65. vllm/executor/cpu_executor.py +152 -0
  66. vllm/executor/distributed_gpu_executor.py +115 -0
  67. vllm/executor/executor_base.py +115 -0
  68. vllm/executor/gpu_executor.py +150 -0
  69. vllm/executor/multiproc_worker_utils.py +263 -0
  70. vllm/executor/neuron_executor.py +91 -0
  71. vllm/executor/ray_gpu_executor.py +327 -0
  72. vllm/executor/ray_utils.py +119 -0
  73. vllm/logger.py +153 -0
  74. vllm/logging/__init__.py +5 -0
  75. vllm/logging/formatter.py +15 -0
  76. vllm/lora/__init__.py +0 -0
  77. vllm/lora/fully_sharded_layers.py +262 -0
  78. vllm/lora/layers.py +1181 -0
  79. vllm/lora/lora.py +167 -0
  80. vllm/lora/models.py +645 -0
  81. vllm/lora/punica.py +213 -0
  82. vllm/lora/request.py +32 -0
  83. vllm/lora/utils.py +98 -0
  84. vllm/lora/worker_manager.py +251 -0
  85. vllm/model_executor/__init__.py +7 -0
  86. vllm/model_executor/guided_decoding/__init__.py +25 -0
  87. vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py +70 -0
  88. vllm/model_executor/guided_decoding/outlines_decoding.py +130 -0
  89. vllm/model_executor/guided_decoding/outlines_logits_processors.py +184 -0
  90. vllm/model_executor/layers/__init__.py +0 -0
  91. vllm/model_executor/layers/activation.py +173 -0
  92. vllm/model_executor/layers/fused_moe/__init__.py +7 -0
  93. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  94. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  95. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  96. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  97. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  98. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  99. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  100. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  101. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  102. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  103. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  104. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  105. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +140 -0
  106. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  107. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  108. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  109. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  110. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
  111. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  112. vllm/model_executor/layers/fused_moe/fused_moe.py +479 -0
  113. vllm/model_executor/layers/layernorm.py +71 -0
  114. vllm/model_executor/layers/linear.py +709 -0
  115. vllm/model_executor/layers/logits_processor.py +115 -0
  116. vllm/model_executor/layers/ops/__init__.py +0 -0
  117. vllm/model_executor/layers/ops/rand.py +157 -0
  118. vllm/model_executor/layers/ops/sample.py +406 -0
  119. vllm/model_executor/layers/quantization/__init__.py +35 -0
  120. vllm/model_executor/layers/quantization/aqlm.py +376 -0
  121. vllm/model_executor/layers/quantization/awq.py +175 -0
  122. vllm/model_executor/layers/quantization/base_config.py +97 -0
  123. vllm/model_executor/layers/quantization/fp8.py +265 -0
  124. vllm/model_executor/layers/quantization/gptq.py +224 -0
  125. vllm/model_executor/layers/quantization/gptq_marlin.py +438 -0
  126. vllm/model_executor/layers/quantization/marlin.py +227 -0
  127. vllm/model_executor/layers/quantization/schema.py +84 -0
  128. vllm/model_executor/layers/quantization/squeezellm.py +137 -0
  129. vllm/model_executor/layers/rejection_sampler.py +405 -0
  130. vllm/model_executor/layers/rotary_embedding.py +525 -0
  131. vllm/model_executor/layers/sampler.py +1051 -0
  132. vllm/model_executor/layers/vocab_parallel_embedding.py +155 -0
  133. vllm/model_executor/model_loader/__init__.py +30 -0
  134. vllm/model_executor/model_loader/loader.py +362 -0
  135. vllm/model_executor/model_loader/neuron.py +136 -0
  136. vllm/model_executor/model_loader/tensorizer.py +368 -0
  137. vllm/model_executor/model_loader/utils.py +41 -0
  138. vllm/model_executor/model_loader/weight_utils.py +372 -0
  139. vllm/model_executor/models/__init__.py +119 -0
  140. vllm/model_executor/models/baichuan.py +410 -0
  141. vllm/model_executor/models/bloom.py +327 -0
  142. vllm/model_executor/models/chatglm.py +386 -0
  143. vllm/model_executor/models/commandr.py +373 -0
  144. vllm/model_executor/models/dbrx.py +413 -0
  145. vllm/model_executor/models/decilm.py +122 -0
  146. vllm/model_executor/models/deepseek.py +438 -0
  147. vllm/model_executor/models/falcon.py +444 -0
  148. vllm/model_executor/models/gemma.py +393 -0
  149. vllm/model_executor/models/gpt2.py +266 -0
  150. vllm/model_executor/models/gpt_bigcode.py +274 -0
  151. vllm/model_executor/models/gpt_j.py +281 -0
  152. vllm/model_executor/models/gpt_neox.py +295 -0
  153. vllm/model_executor/models/internlm2.py +323 -0
  154. vllm/model_executor/models/jais.py +333 -0
  155. vllm/model_executor/models/llama.py +442 -0
  156. vllm/model_executor/models/llava.py +239 -0
  157. vllm/model_executor/models/minicpm.py +531 -0
  158. vllm/model_executor/models/mixtral.py +583 -0
  159. vllm/model_executor/models/mixtral_quant.py +404 -0
  160. vllm/model_executor/models/mpt.py +295 -0
  161. vllm/model_executor/models/olmo.py +356 -0
  162. vllm/model_executor/models/opt.py +349 -0
  163. vllm/model_executor/models/orion.py +319 -0
  164. vllm/model_executor/models/phi.py +300 -0
  165. vllm/model_executor/models/qwen.py +284 -0
  166. vllm/model_executor/models/qwen2.py +367 -0
  167. vllm/model_executor/models/qwen2_moe.py +447 -0
  168. vllm/model_executor/models/stablelm.py +301 -0
  169. vllm/model_executor/models/starcoder2.py +302 -0
  170. vllm/model_executor/models/xverse.py +366 -0
  171. vllm/model_executor/sampling_metadata.py +588 -0
  172. vllm/model_executor/utils.py +35 -0
  173. vllm/outputs.py +150 -0
  174. vllm/py.typed +2 -0
  175. vllm/sampling_params.py +340 -0
  176. vllm/sequence.py +766 -0
  177. vllm/spec_decode/__init__.py +0 -0
  178. vllm/spec_decode/batch_expansion.py +397 -0
  179. vllm/spec_decode/interfaces.py +73 -0
  180. vllm/spec_decode/metrics.py +191 -0
  181. vllm/spec_decode/multi_step_worker.py +203 -0
  182. vllm/spec_decode/ngram_worker.py +176 -0
  183. vllm/spec_decode/spec_decode_worker.py +472 -0
  184. vllm/spec_decode/top1_proposer.py +200 -0
  185. vllm/spec_decode/util.py +228 -0
  186. vllm/test_utils.py +41 -0
  187. vllm/transformers_utils/__init__.py +0 -0
  188. vllm/transformers_utils/config.py +58 -0
  189. vllm/transformers_utils/configs/__init__.py +16 -0
  190. vllm/transformers_utils/configs/chatglm.py +68 -0
  191. vllm/transformers_utils/configs/dbrx.py +278 -0
  192. vllm/transformers_utils/configs/falcon.py +87 -0
  193. vllm/transformers_utils/configs/jais.py +236 -0
  194. vllm/transformers_utils/configs/mpt.py +178 -0
  195. vllm/transformers_utils/detokenizer.py +313 -0
  196. vllm/transformers_utils/tokenizer.py +149 -0
  197. vllm/transformers_utils/tokenizer_group/__init__.py +33 -0
  198. vllm/transformers_utils/tokenizer_group/base_tokenizer_group.py +55 -0
  199. vllm/transformers_utils/tokenizer_group/ray_tokenizer_group.py +169 -0
  200. vllm/transformers_utils/tokenizer_group/tokenizer_group.py +78 -0
  201. vllm/transformers_utils/tokenizers/__init__.py +5 -0
  202. vllm/transformers_utils/tokenizers/baichuan.py +255 -0
  203. vllm/usage/__init__.py +0 -0
  204. vllm/usage/usage_lib.py +209 -0
  205. vllm/utils.py +677 -0
  206. vllm/worker/__init__.py +0 -0
  207. vllm/worker/cache_engine.py +105 -0
  208. vllm/worker/cpu_model_runner.py +346 -0
  209. vllm/worker/cpu_worker.py +321 -0
  210. vllm/worker/model_runner.py +1168 -0
  211. vllm/worker/neuron_model_runner.py +196 -0
  212. vllm/worker/neuron_worker.py +98 -0
  213. vllm/worker/worker.py +345 -0
  214. vllm/worker/worker_base.py +146 -0
  215. vllm_npu-0.4.2.dist-info/LICENSE +201 -0
  216. vllm_npu-0.4.2.dist-info/METADATA +173 -0
  217. vllm_npu-0.4.2.dist-info/RECORD +219 -0
  218. vllm_npu-0.4.2.dist-info/WHEEL +5 -0
  219. vllm_npu-0.4.2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,105 @@
1
+ """CacheEngine class for managing the KV cache."""
2
+ from typing import Dict, List
3
+
4
+ import torch
5
+
6
+ from vllm.attention import get_attn_backend
7
+ from vllm.config import CacheConfig, ModelConfig, ParallelConfig
8
+ from vllm.logger import init_logger
9
+ from vllm.utils import STR_DTYPE_TO_TORCH_DTYPE, is_pin_memory_available
10
+
11
+ logger = init_logger(__name__)
12
+
13
+
14
+ class CacheEngine:
15
+ """Manages the KV cache.
16
+
17
+ This class is responsible for initializing and managing the GPU and CPU KV
18
+ caches. It also provides methods for performing KV cache operations, such
19
+ as swapping and copying.
20
+ """
21
+
22
+ def __init__(
23
+ self,
24
+ cache_config: CacheConfig,
25
+ model_config: ModelConfig,
26
+ parallel_config: ParallelConfig,
27
+ ) -> None:
28
+ self.cache_config = cache_config
29
+ self.model_config = model_config
30
+ self.parallel_config = parallel_config
31
+
32
+ self.head_size = model_config.get_head_size()
33
+ self.num_layers = model_config.get_num_layers(parallel_config)
34
+ self.num_heads = model_config.get_num_kv_heads(parallel_config)
35
+
36
+ self.block_size = cache_config.block_size
37
+ self.num_gpu_blocks = cache_config.num_gpu_blocks
38
+ self.num_cpu_blocks = cache_config.num_cpu_blocks
39
+
40
+ if cache_config.cache_dtype == "auto":
41
+ self.dtype = model_config.dtype
42
+ else:
43
+ self.dtype = STR_DTYPE_TO_TORCH_DTYPE[cache_config.cache_dtype]
44
+
45
+ # Get attention backend.
46
+ self.attn_backend = get_attn_backend(model_config.dtype)
47
+
48
+ # Initialize the cache.
49
+ self.gpu_cache = self._allocate_kv_cache(self.num_gpu_blocks, "cuda")
50
+ self.cpu_cache = self._allocate_kv_cache(self.num_cpu_blocks, "cpu")
51
+
52
+ def _allocate_kv_cache(
53
+ self,
54
+ num_blocks: int,
55
+ device: str,
56
+ ) -> List[torch.Tensor]:
57
+ """Allocates KV cache on the specified device."""
58
+ kv_cache_shape = self.attn_backend.get_kv_cache_shape(
59
+ num_blocks, self.block_size, self.num_heads, self.head_size)
60
+ pin_memory = is_pin_memory_available() if device == "cpu" else False
61
+ kv_cache: List[torch.Tensor] = []
62
+ for _ in range(self.num_layers):
63
+ kv_cache.append(
64
+ torch.empty(kv_cache_shape,
65
+ dtype=self.dtype,
66
+ pin_memory=pin_memory,
67
+ device=device))
68
+ return kv_cache
69
+
70
+ def swap_in(self, src_to_dst: Dict[int, int]) -> None:
71
+ for i in range(self.num_layers):
72
+ self.attn_backend.swap_blocks(self.cpu_cache[i], self.gpu_cache[i],
73
+ src_to_dst)
74
+
75
+ def swap_out(self, src_to_dst: Dict[int, int]) -> None:
76
+ for i in range(self.num_layers):
77
+ self.attn_backend.swap_blocks(self.gpu_cache[i], self.cpu_cache[i],
78
+ src_to_dst)
79
+
80
+ def copy(self, src_to_dsts: Dict[int, List[int]]) -> None:
81
+ self.attn_backend.copy_blocks(self.gpu_cache, src_to_dsts)
82
+
83
+ @staticmethod
84
+ def get_cache_block_size(
85
+ cache_config: CacheConfig,
86
+ model_config: ModelConfig,
87
+ parallel_config: ParallelConfig,
88
+ ) -> int:
89
+ head_size = model_config.get_head_size()
90
+ num_heads = model_config.get_num_kv_heads(parallel_config)
91
+ num_layers = model_config.get_num_layers(parallel_config)
92
+
93
+ key_cache_block = cache_config.block_size * num_heads * head_size
94
+ value_cache_block = key_cache_block
95
+ total = num_layers * (key_cache_block + value_cache_block)
96
+ if cache_config.cache_dtype == "auto":
97
+ dtype = model_config.dtype
98
+ else:
99
+ dtype = STR_DTYPE_TO_TORCH_DTYPE[cache_config.cache_dtype]
100
+ dtype_size = _get_dtype_size(dtype)
101
+ return dtype_size * total
102
+
103
+
104
+ def _get_dtype_size(dtype: torch.dtype) -> int:
105
+ return torch.tensor([], dtype=dtype).element_size()
@@ -0,0 +1,346 @@
1
+ from typing import List, Optional, Tuple
2
+
3
+ import torch
4
+ from torch import nn
5
+
6
+ from vllm.attention import AttentionMetadata, get_attn_backend
7
+ from vllm.config import (DeviceConfig, LoadConfig, LoRAConfig, ModelConfig,
8
+ ParallelConfig, SchedulerConfig, VisionLanguageConfig)
9
+ from vllm.distributed import broadcast_tensor_dict
10
+ from vllm.logger import init_logger
11
+ from vllm.model_executor import SamplingMetadata
12
+ from vllm.model_executor.model_loader import get_model
13
+ from vllm.sequence import SamplerOutput, SequenceGroupMetadata
14
+ from vllm.utils import make_tensor_with_pad
15
+
16
+ logger = init_logger(__name__)
17
+
18
+ _PAD_SLOT_ID = -1
19
+
20
+
21
+ class CPUModelRunner:
22
+
23
+ def __init__(
24
+ self,
25
+ model_config: ModelConfig,
26
+ parallel_config: ParallelConfig,
27
+ scheduler_config: SchedulerConfig,
28
+ device_config: DeviceConfig,
29
+ load_config: LoadConfig,
30
+ lora_config: Optional[LoRAConfig],
31
+ vision_language_config: Optional[VisionLanguageConfig],
32
+ kv_cache_dtype: Optional[str] = "auto",
33
+ is_driver_worker: bool = False,
34
+ *args,
35
+ **kwargs,
36
+ ):
37
+ self.model_config = model_config
38
+ self.parallel_config = parallel_config
39
+ self.scheduler_config = scheduler_config
40
+ # Currently, CPU worker doesn't support chunked prefill.
41
+ assert self.scheduler_config.chunked_prefill_enabled is False
42
+ self.lora_config = lora_config
43
+ self.vision_language_config = vision_language_config
44
+ self.load_config = load_config
45
+ self.is_driver_worker = is_driver_worker
46
+
47
+ # model_config can be None in tests/samplers/test_sampler.py.
48
+ # FIXME(woosuk): This is a hack to make the tests work. Refactor this.
49
+ self.sliding_window = (model_config.get_sliding_window()
50
+ if model_config is not None else None)
51
+ self.device_config = (device_config
52
+ if device_config is not None else DeviceConfig())
53
+ self.device = self.device_config.device
54
+
55
+ self.kv_cache_dtype = kv_cache_dtype
56
+
57
+ self.attn_backend = get_attn_backend(
58
+ self.model_config.dtype if model_config is not None else None)
59
+
60
+ # Lazy initialization.
61
+ self.model: nn.Module # Set after init_Model
62
+ self.block_size: int # Set after initial profiling.
63
+
64
+ def load_model(self) -> None:
65
+ self.model = get_model(
66
+ model_config=self.model_config,
67
+ load_config=self.load_config,
68
+ device_config=self.device_config,
69
+ vision_language_config=self.vision_language_config,
70
+ lora_config=self.lora_config,
71
+ parallel_config=self.parallel_config,
72
+ scheduler_config=self.scheduler_config)
73
+
74
+ def _prepare_prompt(
75
+ self,
76
+ seq_group_metadata_list: List[SequenceGroupMetadata],
77
+ ) -> Tuple[torch.Tensor, torch.Tensor, AttentionMetadata, List[int],
78
+ Optional[torch.Tensor]]:
79
+ assert len(seq_group_metadata_list) > 0
80
+ input_tokens: List[int] = []
81
+ input_positions: List[int] = []
82
+ slot_mapping: List[int] = []
83
+ seq_lens: List[int] = []
84
+ multi_modal_input_list: List[torch.Tensor] = []
85
+
86
+ for seq_group_metadata in seq_group_metadata_list:
87
+ assert seq_group_metadata.is_prompt
88
+ seq_ids = list(seq_group_metadata.seq_data.keys())
89
+ assert len(seq_ids) == 1
90
+ seq_id = seq_ids[0]
91
+
92
+ seq_data = seq_group_metadata.seq_data[seq_id]
93
+ prompt_tokens = seq_data.get_token_ids()
94
+ computed_len = seq_data.get_num_computed_tokens()
95
+ seq_len = len(prompt_tokens)
96
+
97
+ seq_lens.append(seq_len) # Prompt token num
98
+ input_tokens.extend(prompt_tokens) # Token ids
99
+
100
+ # Token position ids
101
+ # NOTE(woosuk): Here we assume that the first token in the prompt
102
+ # is always the first token in the sequence.
103
+ input_positions.extend(list(range(computed_len, seq_len)))
104
+
105
+ if seq_group_metadata.multi_modal_data:
106
+ multi_modal_input_list.append(
107
+ seq_group_metadata.multi_modal_data.data)
108
+
109
+ # Compute the slot mapping.
110
+ block_table = seq_group_metadata.block_tables[seq_id]
111
+ # Mask the [0, start_idx) tokens of the prompt with _PAD_SLOT_ID,
112
+ # where start_idx is max(0, seq_len - sliding_window).
113
+ # For example, if the prompt len is 10, sliding window is 8, and
114
+ # block size is 4, the first two tokens are masked and the slot
115
+ # mapping will be [-1, -1, 2, 3, 4, 5, 6, 7, 0, 1].
116
+ start_idx = 0
117
+ if self.sliding_window is not None:
118
+ start_idx = max(0, seq_len - self.sliding_window)
119
+
120
+ for i in range(computed_len, seq_len):
121
+ if i < start_idx:
122
+ slot_mapping.append(_PAD_SLOT_ID)
123
+ continue
124
+
125
+ block_number = block_table[i //
126
+ self.block_size] # type: ignore
127
+ block_offset = i % self.block_size # type: ignore
128
+ slot = block_number * self.block_size + block_offset
129
+ slot_mapping.append(slot)
130
+
131
+ if multi_modal_input_list:
132
+ assert self.vision_language_config, (
133
+ "Multi-modal inputs are only supported by "
134
+ "vision language models.")
135
+ multi_modal_input = torch.cat(multi_modal_input_list,
136
+ dim=0).to(self.device)
137
+ else:
138
+ multi_modal_input = None
139
+
140
+ num_prompt_tokens = len(input_tokens)
141
+
142
+ input_tokens = torch.tensor(input_tokens,
143
+ dtype=torch.long,
144
+ device=self.device) # type: ignore
145
+ input_positions = torch.tensor(input_positions,
146
+ dtype=torch.long,
147
+ device=self.device) # type: ignore
148
+ slot_mapping = torch.tensor(slot_mapping,
149
+ dtype=torch.long,
150
+ device=self.device) # type: ignore
151
+
152
+ attn_metadata = self.attn_backend.make_metadata(
153
+ is_prompt=True,
154
+ seq_lens=seq_lens,
155
+ seq_lens_tensor=None,
156
+ max_seq_len=None,
157
+ num_prefills=len(seq_lens),
158
+ num_prefill_tokens=num_prompt_tokens,
159
+ num_decode_tokens=0,
160
+ prefill_metadata=None,
161
+ decode_metadata=None,
162
+ block_tables=torch.tensor([]),
163
+ slot_mapping=slot_mapping,
164
+ kv_cache_dtype=self.kv_cache_dtype,
165
+ )
166
+ return (input_tokens, input_positions, attn_metadata, seq_lens,
167
+ multi_modal_input)
168
+
169
+ def _prepare_decode(
170
+ self,
171
+ seq_group_metadata_list: List[SequenceGroupMetadata],
172
+ ) -> Tuple[torch.Tensor, torch.Tensor, AttentionMetadata]:
173
+ assert len(seq_group_metadata_list) > 0
174
+ input_tokens: List[int] = []
175
+ input_positions: List[int] = []
176
+ slot_mapping: List[int] = []
177
+ seq_lens: List[int] = []
178
+ block_tables: List[List[int]] = []
179
+
180
+ for seq_group_metadata in seq_group_metadata_list:
181
+ assert not seq_group_metadata.is_prompt
182
+ assert seq_group_metadata.token_chunk_size == 1
183
+
184
+ seq_ids = list(seq_group_metadata.seq_data.keys())
185
+
186
+ for seq_id in seq_ids:
187
+ seq_data = seq_group_metadata.seq_data[seq_id]
188
+ generation_token = seq_data.get_last_token_id()
189
+ input_tokens.append(generation_token)
190
+
191
+ seq_len = seq_data.get_len()
192
+ position = seq_len - 1
193
+ input_positions.append(position)
194
+
195
+ seq_len = seq_len if self.sliding_window is None else min(
196
+ seq_len, self.sliding_window)
197
+ seq_lens.append(seq_len)
198
+
199
+ block_table = seq_group_metadata.block_tables[seq_id]
200
+ block_number = block_table[position // self.block_size]
201
+ block_offset = position % self.block_size
202
+ slot = block_number * self.block_size + block_offset
203
+ slot_mapping.append(slot)
204
+
205
+ if self.sliding_window is not None:
206
+ sliding_window_blocks = (self.sliding_window //
207
+ self.block_size)
208
+ block_table = block_table[-sliding_window_blocks:]
209
+ block_tables.append(block_table)
210
+
211
+ max_seq_len = max(seq_lens)
212
+
213
+ input_tokens = torch.tensor(input_tokens,
214
+ dtype=torch.long,
215
+ device=self.device)
216
+ input_positions = torch.tensor(input_positions,
217
+ dtype=torch.long,
218
+ device=self.device)
219
+ slot_mapping = torch.tensor(slot_mapping,
220
+ dtype=torch.long,
221
+ device=self.device)
222
+ seq_lens_tensor = torch.tensor(seq_lens,
223
+ dtype=torch.int,
224
+ device=self.device)
225
+
226
+ max_block_table_len = max(
227
+ len(block_table) for block_table in block_tables)
228
+ block_tables = make_tensor_with_pad(
229
+ block_tables,
230
+ max_len=max_block_table_len,
231
+ pad=0,
232
+ dtype=torch.int,
233
+ device=self.device,
234
+ )
235
+
236
+ attn_metadata = self.attn_backend.make_metadata(
237
+ is_prompt=False,
238
+ slot_mapping=slot_mapping,
239
+ seq_lens=seq_lens,
240
+ seq_lens_tensor=seq_lens_tensor,
241
+ max_seq_len=max_seq_len,
242
+ num_prefill_tokens=0,
243
+ num_decode_tokens=len(input_tokens),
244
+ num_prefills=0,
245
+ prefill_metadata=None,
246
+ decode_metadata=None,
247
+ block_tables=block_tables,
248
+ kv_cache_dtype=self.kv_cache_dtype,
249
+ )
250
+ return (
251
+ input_tokens,
252
+ input_positions,
253
+ attn_metadata,
254
+ )
255
+
256
+ def prepare_input_tensors(
257
+ self,
258
+ seq_group_metadata_list: List[SequenceGroupMetadata],
259
+ ) -> Tuple[torch.Tensor, torch.Tensor, AttentionMetadata, SamplingMetadata,
260
+ Optional[torch.Tensor]]:
261
+ multi_modal_input = None
262
+ if self.is_driver_worker:
263
+ # NOTE: We assume that all sequences in the group are all prompts or
264
+ # all decodes.
265
+ is_prompt = seq_group_metadata_list[0].is_prompt
266
+ # Prepare input tensors.
267
+ if is_prompt:
268
+ (input_tokens, input_positions, attn_metadata, seq_lens,
269
+ multi_modal_input
270
+ ) = self._prepare_prompt(seq_group_metadata_list)
271
+ else:
272
+ (input_tokens, input_positions,
273
+ attn_metadata) = self._prepare_decode(seq_group_metadata_list)
274
+ seq_lens = []
275
+ sampling_metadata = SamplingMetadata.prepare(
276
+ seq_group_metadata_list,
277
+ seq_lens,
278
+ # query_lens is not needed if chunked prefill is not
279
+ # supported. Since CPU worker doesn't support chunked prefill
280
+ # just use seq_lens instead.
281
+ seq_lens,
282
+ self.device,
283
+ pin_memory=False)
284
+ # Broadcast the metadata.
285
+ metadata_dict = {
286
+ "input_tokens": input_tokens,
287
+ "input_positions": input_positions,
288
+ "selected_token_indices":
289
+ sampling_metadata.selected_token_indices,
290
+ }
291
+ metadata_dict.update(attn_metadata.asdict_zerocopy())
292
+ broadcast_tensor_dict(metadata_dict, src=0)
293
+ else:
294
+ metadata_dict = broadcast_tensor_dict(src=0)
295
+ input_tokens = metadata_dict.pop("input_tokens")
296
+ input_positions = metadata_dict.pop("input_positions")
297
+ selected_token_indices = metadata_dict.pop(
298
+ "selected_token_indices")
299
+ attn_metadata = self.attn_backend.make_metadata(**metadata_dict)
300
+ sampling_metadata = SamplingMetadata(
301
+ seq_groups=None,
302
+ seq_data=None,
303
+ seq_lens=None,
304
+ selected_token_indices=selected_token_indices,
305
+ categorized_sample_indices=None,
306
+ generators=None,
307
+ )
308
+
309
+ return (input_tokens, input_positions, attn_metadata,
310
+ sampling_metadata, multi_modal_input)
311
+
312
+ @torch.inference_mode()
313
+ def execute_model(
314
+ self,
315
+ seq_group_metadata_list: List[SequenceGroupMetadata],
316
+ kv_caches: List[torch.Tensor],
317
+ ) -> Optional[SamplerOutput]:
318
+ (input_tokens, input_positions, attn_metadata, sampling_metadata,
319
+ multi_modal_input
320
+ ) = self.prepare_input_tensors(seq_group_metadata_list)
321
+
322
+ model_executable = self.model
323
+ execute_model_kwargs = {
324
+ "input_ids": input_tokens,
325
+ "positions": input_positions,
326
+ "kv_caches": kv_caches,
327
+ "attn_metadata": attn_metadata,
328
+ }
329
+ if self.vision_language_config:
330
+ execute_model_kwargs.update({"image_input": multi_modal_input})
331
+
332
+ hidden_states = model_executable(**execute_model_kwargs)
333
+
334
+ # Compute the logits.
335
+ logits = self.model.compute_logits(hidden_states, sampling_metadata)
336
+
337
+ # Only perform sampling in the driver worker.
338
+ if not self.is_driver_worker:
339
+ return None
340
+
341
+ # Sample the next token.
342
+ output = self.model.sample(
343
+ logits=logits,
344
+ sampling_metadata=sampling_metadata,
345
+ )
346
+ return output