vllm-npu 0.4.2__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- vllm/__init__.py +23 -0
- vllm/_custom_ops.py +251 -0
- vllm/attention/__init__.py +13 -0
- vllm/attention/backends/__init__.py +0 -0
- vllm/attention/backends/abstract.py +127 -0
- vllm/attention/backends/flash_attn.py +271 -0
- vllm/attention/backends/flashinfer.py +220 -0
- vllm/attention/backends/rocm_flash_attn.py +374 -0
- vllm/attention/backends/torch_sdpa.py +250 -0
- vllm/attention/backends/xformers.py +393 -0
- vllm/attention/layer.py +56 -0
- vllm/attention/ops/__init__.py +0 -0
- vllm/attention/ops/paged_attn.py +216 -0
- vllm/attention/ops/prefix_prefill.py +792 -0
- vllm/attention/ops/triton_flash_attention.py +810 -0
- vllm/attention/selector.py +91 -0
- vllm/block.py +84 -0
- vllm/config.py +1225 -0
- vllm/core/__init__.py +0 -0
- vllm/core/block/__init__.py +0 -0
- vllm/core/block/block_table.py +295 -0
- vllm/core/block/common.py +199 -0
- vllm/core/block/cpu_gpu_block_allocator.py +228 -0
- vllm/core/block/interfaces.py +205 -0
- vllm/core/block/naive_block.py +318 -0
- vllm/core/block/prefix_caching_block.py +606 -0
- vllm/core/block_manager_v1.py +625 -0
- vllm/core/block_manager_v2.py +258 -0
- vllm/core/evictor_v1.py +105 -0
- vllm/core/evictor_v2.py +127 -0
- vllm/core/interfaces.py +113 -0
- vllm/core/policy.py +45 -0
- vllm/core/scheduler.py +1163 -0
- vllm/distributed/__init__.py +3 -0
- vllm/distributed/communication_op.py +237 -0
- vllm/distributed/device_communicators/__init__.py +0 -0
- vllm/distributed/device_communicators/custom_all_reduce.py +274 -0
- vllm/distributed/device_communicators/pynccl.py +287 -0
- vllm/distributed/device_communicators/pynccl_utils.py +66 -0
- vllm/distributed/parallel_state.py +339 -0
- vllm/distributed/utils.py +136 -0
- vllm/engine/__init__.py +0 -0
- vllm/engine/arg_utils.py +649 -0
- vllm/engine/async_llm_engine.py +737 -0
- vllm/engine/llm_engine.py +784 -0
- vllm/engine/metrics.py +368 -0
- vllm/engine/output_processor/__init__.py +0 -0
- vllm/engine/output_processor/interfaces.py +76 -0
- vllm/engine/output_processor/multi_step.py +142 -0
- vllm/engine/output_processor/single_step.py +284 -0
- vllm/engine/output_processor/stop_checker.py +101 -0
- vllm/engine/output_processor/util.py +19 -0
- vllm/entrypoints/__init__.py +0 -0
- vllm/entrypoints/api_server.py +119 -0
- vllm/entrypoints/llm.py +259 -0
- vllm/entrypoints/openai/__init__.py +0 -0
- vllm/entrypoints/openai/api_server.py +186 -0
- vllm/entrypoints/openai/cli_args.py +115 -0
- vllm/entrypoints/openai/protocol.py +460 -0
- vllm/entrypoints/openai/serving_chat.py +392 -0
- vllm/entrypoints/openai/serving_completion.py +347 -0
- vllm/entrypoints/openai/serving_engine.py +234 -0
- vllm/envs.py +217 -0
- vllm/executor/__init__.py +0 -0
- vllm/executor/cpu_executor.py +152 -0
- vllm/executor/distributed_gpu_executor.py +115 -0
- vllm/executor/executor_base.py +115 -0
- vllm/executor/gpu_executor.py +150 -0
- vllm/executor/multiproc_worker_utils.py +263 -0
- vllm/executor/neuron_executor.py +91 -0
- vllm/executor/ray_gpu_executor.py +327 -0
- vllm/executor/ray_utils.py +119 -0
- vllm/logger.py +153 -0
- vllm/logging/__init__.py +5 -0
- vllm/logging/formatter.py +15 -0
- vllm/lora/__init__.py +0 -0
- vllm/lora/fully_sharded_layers.py +262 -0
- vllm/lora/layers.py +1181 -0
- vllm/lora/lora.py +167 -0
- vllm/lora/models.py +645 -0
- vllm/lora/punica.py +213 -0
- vllm/lora/request.py +32 -0
- vllm/lora/utils.py +98 -0
- vllm/lora/worker_manager.py +251 -0
- vllm/model_executor/__init__.py +7 -0
- vllm/model_executor/guided_decoding/__init__.py +25 -0
- vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py +70 -0
- vllm/model_executor/guided_decoding/outlines_decoding.py +130 -0
- vllm/model_executor/guided_decoding/outlines_logits_processors.py +184 -0
- vllm/model_executor/layers/__init__.py +0 -0
- vllm/model_executor/layers/activation.py +173 -0
- vllm/model_executor/layers/fused_moe/__init__.py +7 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +140 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/fused_moe.py +479 -0
- vllm/model_executor/layers/layernorm.py +71 -0
- vllm/model_executor/layers/linear.py +709 -0
- vllm/model_executor/layers/logits_processor.py +115 -0
- vllm/model_executor/layers/ops/__init__.py +0 -0
- vllm/model_executor/layers/ops/rand.py +157 -0
- vllm/model_executor/layers/ops/sample.py +406 -0
- vllm/model_executor/layers/quantization/__init__.py +35 -0
- vllm/model_executor/layers/quantization/aqlm.py +376 -0
- vllm/model_executor/layers/quantization/awq.py +175 -0
- vllm/model_executor/layers/quantization/base_config.py +97 -0
- vllm/model_executor/layers/quantization/fp8.py +265 -0
- vllm/model_executor/layers/quantization/gptq.py +224 -0
- vllm/model_executor/layers/quantization/gptq_marlin.py +438 -0
- vllm/model_executor/layers/quantization/marlin.py +227 -0
- vllm/model_executor/layers/quantization/schema.py +84 -0
- vllm/model_executor/layers/quantization/squeezellm.py +137 -0
- vllm/model_executor/layers/rejection_sampler.py +405 -0
- vllm/model_executor/layers/rotary_embedding.py +525 -0
- vllm/model_executor/layers/sampler.py +1051 -0
- vllm/model_executor/layers/vocab_parallel_embedding.py +155 -0
- vllm/model_executor/model_loader/__init__.py +30 -0
- vllm/model_executor/model_loader/loader.py +362 -0
- vllm/model_executor/model_loader/neuron.py +136 -0
- vllm/model_executor/model_loader/tensorizer.py +368 -0
- vllm/model_executor/model_loader/utils.py +41 -0
- vllm/model_executor/model_loader/weight_utils.py +372 -0
- vllm/model_executor/models/__init__.py +119 -0
- vllm/model_executor/models/baichuan.py +410 -0
- vllm/model_executor/models/bloom.py +327 -0
- vllm/model_executor/models/chatglm.py +386 -0
- vllm/model_executor/models/commandr.py +373 -0
- vllm/model_executor/models/dbrx.py +413 -0
- vllm/model_executor/models/decilm.py +122 -0
- vllm/model_executor/models/deepseek.py +438 -0
- vllm/model_executor/models/falcon.py +444 -0
- vllm/model_executor/models/gemma.py +393 -0
- vllm/model_executor/models/gpt2.py +266 -0
- vllm/model_executor/models/gpt_bigcode.py +274 -0
- vllm/model_executor/models/gpt_j.py +281 -0
- vllm/model_executor/models/gpt_neox.py +295 -0
- vllm/model_executor/models/internlm2.py +323 -0
- vllm/model_executor/models/jais.py +333 -0
- vllm/model_executor/models/llama.py +442 -0
- vllm/model_executor/models/llava.py +239 -0
- vllm/model_executor/models/minicpm.py +531 -0
- vllm/model_executor/models/mixtral.py +583 -0
- vllm/model_executor/models/mixtral_quant.py +404 -0
- vllm/model_executor/models/mpt.py +295 -0
- vllm/model_executor/models/olmo.py +356 -0
- vllm/model_executor/models/opt.py +349 -0
- vllm/model_executor/models/orion.py +319 -0
- vllm/model_executor/models/phi.py +300 -0
- vllm/model_executor/models/qwen.py +284 -0
- vllm/model_executor/models/qwen2.py +367 -0
- vllm/model_executor/models/qwen2_moe.py +447 -0
- vllm/model_executor/models/stablelm.py +301 -0
- vllm/model_executor/models/starcoder2.py +302 -0
- vllm/model_executor/models/xverse.py +366 -0
- vllm/model_executor/sampling_metadata.py +588 -0
- vllm/model_executor/utils.py +35 -0
- vllm/outputs.py +150 -0
- vllm/py.typed +2 -0
- vllm/sampling_params.py +340 -0
- vllm/sequence.py +766 -0
- vllm/spec_decode/__init__.py +0 -0
- vllm/spec_decode/batch_expansion.py +397 -0
- vllm/spec_decode/interfaces.py +73 -0
- vllm/spec_decode/metrics.py +191 -0
- vllm/spec_decode/multi_step_worker.py +203 -0
- vllm/spec_decode/ngram_worker.py +176 -0
- vllm/spec_decode/spec_decode_worker.py +472 -0
- vllm/spec_decode/top1_proposer.py +200 -0
- vllm/spec_decode/util.py +228 -0
- vllm/test_utils.py +41 -0
- vllm/transformers_utils/__init__.py +0 -0
- vllm/transformers_utils/config.py +58 -0
- vllm/transformers_utils/configs/__init__.py +16 -0
- vllm/transformers_utils/configs/chatglm.py +68 -0
- vllm/transformers_utils/configs/dbrx.py +278 -0
- vllm/transformers_utils/configs/falcon.py +87 -0
- vllm/transformers_utils/configs/jais.py +236 -0
- vllm/transformers_utils/configs/mpt.py +178 -0
- vllm/transformers_utils/detokenizer.py +313 -0
- vllm/transformers_utils/tokenizer.py +149 -0
- vllm/transformers_utils/tokenizer_group/__init__.py +33 -0
- vllm/transformers_utils/tokenizer_group/base_tokenizer_group.py +55 -0
- vllm/transformers_utils/tokenizer_group/ray_tokenizer_group.py +169 -0
- vllm/transformers_utils/tokenizer_group/tokenizer_group.py +78 -0
- vllm/transformers_utils/tokenizers/__init__.py +5 -0
- vllm/transformers_utils/tokenizers/baichuan.py +255 -0
- vllm/usage/__init__.py +0 -0
- vllm/usage/usage_lib.py +209 -0
- vllm/utils.py +677 -0
- vllm/worker/__init__.py +0 -0
- vllm/worker/cache_engine.py +105 -0
- vllm/worker/cpu_model_runner.py +346 -0
- vllm/worker/cpu_worker.py +321 -0
- vllm/worker/model_runner.py +1168 -0
- vllm/worker/neuron_model_runner.py +196 -0
- vllm/worker/neuron_worker.py +98 -0
- vllm/worker/worker.py +345 -0
- vllm/worker/worker_base.py +146 -0
- vllm_npu-0.4.2.dist-info/LICENSE +201 -0
- vllm_npu-0.4.2.dist-info/METADATA +173 -0
- vllm_npu-0.4.2.dist-info/RECORD +219 -0
- vllm_npu-0.4.2.dist-info/WHEEL +5 -0
- vllm_npu-0.4.2.dist-info/top_level.txt +1 -0
vllm/lora/lora.py
ADDED
@@ -0,0 +1,167 @@
|
|
1
|
+
from typing import List, Optional
|
2
|
+
|
3
|
+
import torch
|
4
|
+
|
5
|
+
from vllm.utils import is_pin_memory_available
|
6
|
+
|
7
|
+
|
8
|
+
class LoRALayerWeights:
|
9
|
+
"""LoRA weights for a layer composed of two low rank matrixes."""
|
10
|
+
|
11
|
+
def __init__(
|
12
|
+
self,
|
13
|
+
module_name: str,
|
14
|
+
rank: int,
|
15
|
+
lora_alpha: int,
|
16
|
+
lora_a: torch.Tensor,
|
17
|
+
lora_b: torch.Tensor,
|
18
|
+
embeddings_tensor: Optional[torch.Tensor] = None,
|
19
|
+
scaling: Optional[float] = None,
|
20
|
+
) -> None:
|
21
|
+
self.module_name = module_name
|
22
|
+
self.rank = rank
|
23
|
+
self.lora_alpha = lora_alpha
|
24
|
+
self.lora_a = lora_a
|
25
|
+
self.lora_b = lora_b
|
26
|
+
self.embeddings_tensor = embeddings_tensor
|
27
|
+
|
28
|
+
if scaling is None:
|
29
|
+
self.scaling = self.lora_alpha / self.rank
|
30
|
+
else:
|
31
|
+
self.scaling = scaling
|
32
|
+
|
33
|
+
def optimize(self) -> "LoRALayerWeights":
|
34
|
+
"""Optimize the LoRA by merging the scaling into lora_b."""
|
35
|
+
if self.scaling == 1:
|
36
|
+
return self
|
37
|
+
self.lora_b *= self.scaling
|
38
|
+
self.scaling = 1
|
39
|
+
return self
|
40
|
+
|
41
|
+
@property
|
42
|
+
def input_dim(self) -> int:
|
43
|
+
return self.lora_a.shape[0]
|
44
|
+
|
45
|
+
@property
|
46
|
+
def output_dim(self) -> int:
|
47
|
+
return self.lora_b.shape[1]
|
48
|
+
|
49
|
+
@property
|
50
|
+
def is_packed(self) -> bool:
|
51
|
+
return False
|
52
|
+
|
53
|
+
@property
|
54
|
+
def extra_vocab_size(self) -> int:
|
55
|
+
return self.embeddings_tensor.shape[
|
56
|
+
0] if self.embeddings_tensor is not None else 0
|
57
|
+
|
58
|
+
@classmethod
|
59
|
+
def create_dummy_lora_weights(
|
60
|
+
cls,
|
61
|
+
module_name: str,
|
62
|
+
input_dim: int,
|
63
|
+
output_dim: int,
|
64
|
+
rank: int,
|
65
|
+
dtype: torch.dtype,
|
66
|
+
device: torch.device,
|
67
|
+
embeddings_tensor_dim: Optional[int] = None) -> "LoRALayerWeights":
|
68
|
+
pin_memory = str(device) == "cpu" and is_pin_memory_available()
|
69
|
+
lora_a = torch.zeros([input_dim, rank],
|
70
|
+
dtype=dtype,
|
71
|
+
device=device,
|
72
|
+
pin_memory=pin_memory)
|
73
|
+
lora_b = torch.zeros([rank, output_dim],
|
74
|
+
dtype=dtype,
|
75
|
+
device=device,
|
76
|
+
pin_memory=pin_memory)
|
77
|
+
embeddings_tensor = torch.rand(
|
78
|
+
10,
|
79
|
+
embeddings_tensor_dim,
|
80
|
+
dtype=dtype,
|
81
|
+
device=device,
|
82
|
+
pin_memory=pin_memory) if embeddings_tensor_dim else None
|
83
|
+
return cls(
|
84
|
+
module_name,
|
85
|
+
rank=rank,
|
86
|
+
lora_alpha=1,
|
87
|
+
lora_a=lora_a,
|
88
|
+
lora_b=lora_b,
|
89
|
+
embeddings_tensor=embeddings_tensor,
|
90
|
+
)
|
91
|
+
|
92
|
+
|
93
|
+
class PackedLoRALayerWeights(LoRALayerWeights):
|
94
|
+
"""LoRA used for packed layers (eg. qkv_proj)."""
|
95
|
+
|
96
|
+
def __init__(
|
97
|
+
self,
|
98
|
+
module_name: str,
|
99
|
+
rank: int,
|
100
|
+
lora_alphas: List[Optional[int]],
|
101
|
+
lora_a: List[Optional[torch.Tensor]],
|
102
|
+
lora_b: List[Optional[torch.Tensor]],
|
103
|
+
scaling: Optional[List[float]] = None,
|
104
|
+
) -> None:
|
105
|
+
super().__init__(
|
106
|
+
module_name=module_name,
|
107
|
+
rank=rank,
|
108
|
+
lora_alpha=0,
|
109
|
+
lora_a=lora_a,
|
110
|
+
lora_b=lora_b,
|
111
|
+
scaling=scaling, # type: ignore
|
112
|
+
embeddings_tensor=None,
|
113
|
+
)
|
114
|
+
self.lora_alphas = lora_alphas
|
115
|
+
if scaling is None:
|
116
|
+
self.scaling = [ # type: ignore
|
117
|
+
lora_alpha / self.rank # type: ignore # noqa
|
118
|
+
for lora_alpha in self.lora_alphas
|
119
|
+
]
|
120
|
+
|
121
|
+
@classmethod
|
122
|
+
def pack(
|
123
|
+
cls, loras: List[Optional["LoRALayerWeights"]]
|
124
|
+
) -> "PackedLoRALayerWeights":
|
125
|
+
"""Pack a list of LoRAs into a single LoRA.
|
126
|
+
|
127
|
+
If LoRA is None, it signifies that the submodule does not have a LoRA.
|
128
|
+
"""
|
129
|
+
first_lora = next(lora for lora in loras if lora is not None)
|
130
|
+
for lora in loras:
|
131
|
+
if lora is None:
|
132
|
+
continue
|
133
|
+
lora.optimize()
|
134
|
+
rank = first_lora.rank
|
135
|
+
module_name = first_lora.module_name
|
136
|
+
obj = cls(
|
137
|
+
module_name,
|
138
|
+
rank,
|
139
|
+
[lora.lora_alpha if lora is not None else None for lora in loras],
|
140
|
+
[lora.lora_a if lora is not None else None for lora in loras],
|
141
|
+
[lora.lora_b if lora is not None else None for lora in loras],
|
142
|
+
scaling=[
|
143
|
+
1 if lora is not None else None # type: ignore
|
144
|
+
for lora in loras
|
145
|
+
])
|
146
|
+
return obj
|
147
|
+
|
148
|
+
def optimize(self) -> "PackedLoRALayerWeights":
|
149
|
+
"""Optimize the LoRA by merging the scaling into lora_b."""
|
150
|
+
for i in range(len(self.lora_b)):
|
151
|
+
if self.scaling[i] == 1 or self.lora_b[i] is None: # type: ignore
|
152
|
+
continue
|
153
|
+
self.lora_b[i] *= self.scaling[i] # type: ignore
|
154
|
+
self.scaling[i] = 1 # type: ignore
|
155
|
+
return self
|
156
|
+
|
157
|
+
@property
|
158
|
+
def input_dim(self) -> int:
|
159
|
+
raise NotImplementedError()
|
160
|
+
|
161
|
+
@property
|
162
|
+
def output_dim(self) -> int:
|
163
|
+
raise NotImplementedError()
|
164
|
+
|
165
|
+
@property
|
166
|
+
def is_packed(self) -> bool:
|
167
|
+
return True
|