vllm-npu 0.4.2__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- vllm/__init__.py +23 -0
- vllm/_custom_ops.py +251 -0
- vllm/attention/__init__.py +13 -0
- vllm/attention/backends/__init__.py +0 -0
- vllm/attention/backends/abstract.py +127 -0
- vllm/attention/backends/flash_attn.py +271 -0
- vllm/attention/backends/flashinfer.py +220 -0
- vllm/attention/backends/rocm_flash_attn.py +374 -0
- vllm/attention/backends/torch_sdpa.py +250 -0
- vllm/attention/backends/xformers.py +393 -0
- vllm/attention/layer.py +56 -0
- vllm/attention/ops/__init__.py +0 -0
- vllm/attention/ops/paged_attn.py +216 -0
- vllm/attention/ops/prefix_prefill.py +792 -0
- vllm/attention/ops/triton_flash_attention.py +810 -0
- vllm/attention/selector.py +91 -0
- vllm/block.py +84 -0
- vllm/config.py +1225 -0
- vllm/core/__init__.py +0 -0
- vllm/core/block/__init__.py +0 -0
- vllm/core/block/block_table.py +295 -0
- vllm/core/block/common.py +199 -0
- vllm/core/block/cpu_gpu_block_allocator.py +228 -0
- vllm/core/block/interfaces.py +205 -0
- vllm/core/block/naive_block.py +318 -0
- vllm/core/block/prefix_caching_block.py +606 -0
- vllm/core/block_manager_v1.py +625 -0
- vllm/core/block_manager_v2.py +258 -0
- vllm/core/evictor_v1.py +105 -0
- vllm/core/evictor_v2.py +127 -0
- vllm/core/interfaces.py +113 -0
- vllm/core/policy.py +45 -0
- vllm/core/scheduler.py +1163 -0
- vllm/distributed/__init__.py +3 -0
- vllm/distributed/communication_op.py +237 -0
- vllm/distributed/device_communicators/__init__.py +0 -0
- vllm/distributed/device_communicators/custom_all_reduce.py +274 -0
- vllm/distributed/device_communicators/pynccl.py +287 -0
- vllm/distributed/device_communicators/pynccl_utils.py +66 -0
- vllm/distributed/parallel_state.py +339 -0
- vllm/distributed/utils.py +136 -0
- vllm/engine/__init__.py +0 -0
- vllm/engine/arg_utils.py +649 -0
- vllm/engine/async_llm_engine.py +737 -0
- vllm/engine/llm_engine.py +784 -0
- vllm/engine/metrics.py +368 -0
- vllm/engine/output_processor/__init__.py +0 -0
- vllm/engine/output_processor/interfaces.py +76 -0
- vllm/engine/output_processor/multi_step.py +142 -0
- vllm/engine/output_processor/single_step.py +284 -0
- vllm/engine/output_processor/stop_checker.py +101 -0
- vllm/engine/output_processor/util.py +19 -0
- vllm/entrypoints/__init__.py +0 -0
- vllm/entrypoints/api_server.py +119 -0
- vllm/entrypoints/llm.py +259 -0
- vllm/entrypoints/openai/__init__.py +0 -0
- vllm/entrypoints/openai/api_server.py +186 -0
- vllm/entrypoints/openai/cli_args.py +115 -0
- vllm/entrypoints/openai/protocol.py +460 -0
- vllm/entrypoints/openai/serving_chat.py +392 -0
- vllm/entrypoints/openai/serving_completion.py +347 -0
- vllm/entrypoints/openai/serving_engine.py +234 -0
- vllm/envs.py +217 -0
- vllm/executor/__init__.py +0 -0
- vllm/executor/cpu_executor.py +152 -0
- vllm/executor/distributed_gpu_executor.py +115 -0
- vllm/executor/executor_base.py +115 -0
- vllm/executor/gpu_executor.py +150 -0
- vllm/executor/multiproc_worker_utils.py +263 -0
- vllm/executor/neuron_executor.py +91 -0
- vllm/executor/ray_gpu_executor.py +327 -0
- vllm/executor/ray_utils.py +119 -0
- vllm/logger.py +153 -0
- vllm/logging/__init__.py +5 -0
- vllm/logging/formatter.py +15 -0
- vllm/lora/__init__.py +0 -0
- vllm/lora/fully_sharded_layers.py +262 -0
- vllm/lora/layers.py +1181 -0
- vllm/lora/lora.py +167 -0
- vllm/lora/models.py +645 -0
- vllm/lora/punica.py +213 -0
- vllm/lora/request.py +32 -0
- vllm/lora/utils.py +98 -0
- vllm/lora/worker_manager.py +251 -0
- vllm/model_executor/__init__.py +7 -0
- vllm/model_executor/guided_decoding/__init__.py +25 -0
- vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py +70 -0
- vllm/model_executor/guided_decoding/outlines_decoding.py +130 -0
- vllm/model_executor/guided_decoding/outlines_logits_processors.py +184 -0
- vllm/model_executor/layers/__init__.py +0 -0
- vllm/model_executor/layers/activation.py +173 -0
- vllm/model_executor/layers/fused_moe/__init__.py +7 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +140 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/fused_moe.py +479 -0
- vllm/model_executor/layers/layernorm.py +71 -0
- vllm/model_executor/layers/linear.py +709 -0
- vllm/model_executor/layers/logits_processor.py +115 -0
- vllm/model_executor/layers/ops/__init__.py +0 -0
- vllm/model_executor/layers/ops/rand.py +157 -0
- vllm/model_executor/layers/ops/sample.py +406 -0
- vllm/model_executor/layers/quantization/__init__.py +35 -0
- vllm/model_executor/layers/quantization/aqlm.py +376 -0
- vllm/model_executor/layers/quantization/awq.py +175 -0
- vllm/model_executor/layers/quantization/base_config.py +97 -0
- vllm/model_executor/layers/quantization/fp8.py +265 -0
- vllm/model_executor/layers/quantization/gptq.py +224 -0
- vllm/model_executor/layers/quantization/gptq_marlin.py +438 -0
- vllm/model_executor/layers/quantization/marlin.py +227 -0
- vllm/model_executor/layers/quantization/schema.py +84 -0
- vllm/model_executor/layers/quantization/squeezellm.py +137 -0
- vllm/model_executor/layers/rejection_sampler.py +405 -0
- vllm/model_executor/layers/rotary_embedding.py +525 -0
- vllm/model_executor/layers/sampler.py +1051 -0
- vllm/model_executor/layers/vocab_parallel_embedding.py +155 -0
- vllm/model_executor/model_loader/__init__.py +30 -0
- vllm/model_executor/model_loader/loader.py +362 -0
- vllm/model_executor/model_loader/neuron.py +136 -0
- vllm/model_executor/model_loader/tensorizer.py +368 -0
- vllm/model_executor/model_loader/utils.py +41 -0
- vllm/model_executor/model_loader/weight_utils.py +372 -0
- vllm/model_executor/models/__init__.py +119 -0
- vllm/model_executor/models/baichuan.py +410 -0
- vllm/model_executor/models/bloom.py +327 -0
- vllm/model_executor/models/chatglm.py +386 -0
- vllm/model_executor/models/commandr.py +373 -0
- vllm/model_executor/models/dbrx.py +413 -0
- vllm/model_executor/models/decilm.py +122 -0
- vllm/model_executor/models/deepseek.py +438 -0
- vllm/model_executor/models/falcon.py +444 -0
- vllm/model_executor/models/gemma.py +393 -0
- vllm/model_executor/models/gpt2.py +266 -0
- vllm/model_executor/models/gpt_bigcode.py +274 -0
- vllm/model_executor/models/gpt_j.py +281 -0
- vllm/model_executor/models/gpt_neox.py +295 -0
- vllm/model_executor/models/internlm2.py +323 -0
- vllm/model_executor/models/jais.py +333 -0
- vllm/model_executor/models/llama.py +442 -0
- vllm/model_executor/models/llava.py +239 -0
- vllm/model_executor/models/minicpm.py +531 -0
- vllm/model_executor/models/mixtral.py +583 -0
- vllm/model_executor/models/mixtral_quant.py +404 -0
- vllm/model_executor/models/mpt.py +295 -0
- vllm/model_executor/models/olmo.py +356 -0
- vllm/model_executor/models/opt.py +349 -0
- vllm/model_executor/models/orion.py +319 -0
- vllm/model_executor/models/phi.py +300 -0
- vllm/model_executor/models/qwen.py +284 -0
- vllm/model_executor/models/qwen2.py +367 -0
- vllm/model_executor/models/qwen2_moe.py +447 -0
- vllm/model_executor/models/stablelm.py +301 -0
- vllm/model_executor/models/starcoder2.py +302 -0
- vllm/model_executor/models/xverse.py +366 -0
- vllm/model_executor/sampling_metadata.py +588 -0
- vllm/model_executor/utils.py +35 -0
- vllm/outputs.py +150 -0
- vllm/py.typed +2 -0
- vllm/sampling_params.py +340 -0
- vllm/sequence.py +766 -0
- vllm/spec_decode/__init__.py +0 -0
- vllm/spec_decode/batch_expansion.py +397 -0
- vllm/spec_decode/interfaces.py +73 -0
- vllm/spec_decode/metrics.py +191 -0
- vllm/spec_decode/multi_step_worker.py +203 -0
- vllm/spec_decode/ngram_worker.py +176 -0
- vllm/spec_decode/spec_decode_worker.py +472 -0
- vllm/spec_decode/top1_proposer.py +200 -0
- vllm/spec_decode/util.py +228 -0
- vllm/test_utils.py +41 -0
- vllm/transformers_utils/__init__.py +0 -0
- vllm/transformers_utils/config.py +58 -0
- vllm/transformers_utils/configs/__init__.py +16 -0
- vllm/transformers_utils/configs/chatglm.py +68 -0
- vllm/transformers_utils/configs/dbrx.py +278 -0
- vllm/transformers_utils/configs/falcon.py +87 -0
- vllm/transformers_utils/configs/jais.py +236 -0
- vllm/transformers_utils/configs/mpt.py +178 -0
- vllm/transformers_utils/detokenizer.py +313 -0
- vllm/transformers_utils/tokenizer.py +149 -0
- vllm/transformers_utils/tokenizer_group/__init__.py +33 -0
- vllm/transformers_utils/tokenizer_group/base_tokenizer_group.py +55 -0
- vllm/transformers_utils/tokenizer_group/ray_tokenizer_group.py +169 -0
- vllm/transformers_utils/tokenizer_group/tokenizer_group.py +78 -0
- vllm/transformers_utils/tokenizers/__init__.py +5 -0
- vllm/transformers_utils/tokenizers/baichuan.py +255 -0
- vllm/usage/__init__.py +0 -0
- vllm/usage/usage_lib.py +209 -0
- vllm/utils.py +677 -0
- vllm/worker/__init__.py +0 -0
- vllm/worker/cache_engine.py +105 -0
- vllm/worker/cpu_model_runner.py +346 -0
- vllm/worker/cpu_worker.py +321 -0
- vllm/worker/model_runner.py +1168 -0
- vllm/worker/neuron_model_runner.py +196 -0
- vllm/worker/neuron_worker.py +98 -0
- vllm/worker/worker.py +345 -0
- vllm/worker/worker_base.py +146 -0
- vllm_npu-0.4.2.dist-info/LICENSE +201 -0
- vllm_npu-0.4.2.dist-info/METADATA +173 -0
- vllm_npu-0.4.2.dist-info/RECORD +219 -0
- vllm_npu-0.4.2.dist-info/WHEEL +5 -0
- vllm_npu-0.4.2.dist-info/top_level.txt +1 -0
vllm/lora/layers.py
ADDED
@@ -0,0 +1,1181 @@
|
|
1
|
+
# pylint: disable=unused-argument
|
2
|
+
import math
|
3
|
+
from dataclasses import dataclass
|
4
|
+
from typing import TYPE_CHECKING, List, Optional, Tuple
|
5
|
+
|
6
|
+
import torch
|
7
|
+
import torch.nn as nn
|
8
|
+
import torch.nn.functional as F
|
9
|
+
from transformers import PretrainedConfig
|
10
|
+
|
11
|
+
from vllm.config import LoRAConfig
|
12
|
+
from vllm.distributed import (get_tensor_model_parallel_rank,
|
13
|
+
get_tensor_model_parallel_world_size,
|
14
|
+
split_tensor_along_last_dim,
|
15
|
+
tensor_model_parallel_all_gather,
|
16
|
+
tensor_model_parallel_all_reduce,
|
17
|
+
tensor_model_parallel_gather)
|
18
|
+
from vllm.distributed.utils import divide
|
19
|
+
from vllm.lora.punica import add_lora, add_lora_slice, bgmv
|
20
|
+
from vllm.model_executor.layers.linear import (ColumnParallelLinear,
|
21
|
+
MergedColumnParallelLinear,
|
22
|
+
QKVParallelLinear,
|
23
|
+
RowParallelLinear)
|
24
|
+
from vllm.model_executor.layers.logits_processor import LogitsProcessor
|
25
|
+
from vllm.model_executor.layers.vocab_parallel_embedding import (
|
26
|
+
VocabParallelEmbedding)
|
27
|
+
|
28
|
+
if TYPE_CHECKING:
|
29
|
+
pass
|
30
|
+
|
31
|
+
|
32
|
+
def _get_lora_device(base_layer: nn.Module) -> torch.device:
|
33
|
+
# code borrowed from https://github.com/fmmoret/vllm/blob/fm-support-lora-on-quantized-models/vllm/lora/layers.py#L34
|
34
|
+
"""Returns the device for where to place the LoRA tensors."""
|
35
|
+
# unquantizedLinear
|
36
|
+
if hasattr(base_layer, "weight"):
|
37
|
+
return base_layer.weight.device
|
38
|
+
# GPTQ/AWQ/SqueezeLLM
|
39
|
+
elif hasattr(base_layer, "qweight"):
|
40
|
+
return base_layer.qweight.device
|
41
|
+
# marlin
|
42
|
+
elif hasattr(base_layer, "B"):
|
43
|
+
return base_layer.B.device
|
44
|
+
else:
|
45
|
+
raise ValueError(f"Unsupported base layer: {base_layer}")
|
46
|
+
|
47
|
+
|
48
|
+
def _not_fully_sharded_can_replace(can_replace):
|
49
|
+
"""
|
50
|
+
decorator which adds the condition of not using fully sharded loras
|
51
|
+
intended to wrap can_replace_layer()
|
52
|
+
"""
|
53
|
+
|
54
|
+
def dec(*args, **kwargs):
|
55
|
+
decorate = kwargs.pop('decorate') if 'decorate' in kwargs else True
|
56
|
+
condition = (not kwargs['lora_config'].fully_sharded_loras
|
57
|
+
if decorate else True)
|
58
|
+
return can_replace(*args, **kwargs) and condition
|
59
|
+
|
60
|
+
return dec
|
61
|
+
|
62
|
+
|
63
|
+
def _apply_lora(
|
64
|
+
x: torch.Tensor,
|
65
|
+
lora_a_stacked: torch.Tensor,
|
66
|
+
lora_b_stacked: torch.Tensor,
|
67
|
+
indices: torch.Tensor,
|
68
|
+
output: torch.Tensor,
|
69
|
+
):
|
70
|
+
"""Applies lora to each input.
|
71
|
+
|
72
|
+
This method applies all loras to each input. It uses the
|
73
|
+
indices vector to determine which lora yields the
|
74
|
+
correct output. An index of -1 means no lora should be
|
75
|
+
applied. This method adds the final lora results to the
|
76
|
+
output.
|
77
|
+
|
78
|
+
Input shapes:
|
79
|
+
x: (batch_size, hidden_dim)
|
80
|
+
lora_a_stacked: (num_loras, lora_rank, hidden_dim)
|
81
|
+
lora_b_stacked: (num_loras, output_dim, lora_rank)
|
82
|
+
indices: (batch_size)
|
83
|
+
output: (batch_size, output_dim)
|
84
|
+
"""
|
85
|
+
org_output = output
|
86
|
+
x = x.view(-1, x.shape[-1])
|
87
|
+
output = output.view(-1, output.shape[-1])
|
88
|
+
indices = indices.view(-1)
|
89
|
+
add_lora(output, x, lora_a_stacked, lora_b_stacked, indices, 0, 1.0)
|
90
|
+
return output.view_as(org_output)
|
91
|
+
|
92
|
+
|
93
|
+
def _apply_lora_packed_nslice(
|
94
|
+
x: torch.Tensor,
|
95
|
+
lora_a_stacked: Tuple[torch.Tensor, torch.Tensor, torch.Tensor],
|
96
|
+
lora_b_stacked: Tuple[torch.Tensor, torch.Tensor, torch.Tensor],
|
97
|
+
indices: torch.Tensor,
|
98
|
+
output: torch.Tensor,
|
99
|
+
output_slices: Tuple[int, ...],
|
100
|
+
):
|
101
|
+
"""Applies lora to each input.
|
102
|
+
|
103
|
+
This method applies all loras to each input. It uses the
|
104
|
+
indices vector to determine which lora yields the
|
105
|
+
correct output. An index of -1 means no lora should be
|
106
|
+
applied. This method adds the final lora results to the
|
107
|
+
output.
|
108
|
+
|
109
|
+
This method is used for layers that are composed of multiple sublayers
|
110
|
+
(slices) packed together.
|
111
|
+
|
112
|
+
Input shapes:
|
113
|
+
x: (batch_size, hidden_dim)
|
114
|
+
lora_a_stacked: 3 element tuple of (num_loras, lora_rank, hidden_dim)
|
115
|
+
lora_b_stacked: 3 element tuple of (num_loras, output_dim, lora_rank)
|
116
|
+
indices: (batch_size)
|
117
|
+
output: (batch_size, q_slice_size + 2*kv_slice_size)
|
118
|
+
output_slices: n-1 element tuple of (slice_size...),
|
119
|
+
where n is number of slices
|
120
|
+
"""
|
121
|
+
org_output = output
|
122
|
+
x = x.view(-1, x.shape[-1])
|
123
|
+
output = output.view(-1, output.shape[-1])
|
124
|
+
indices = indices.view(-1)
|
125
|
+
offset_left = 0
|
126
|
+
for slice_idx in range(len(output_slices)):
|
127
|
+
add_lora_slice(output, x, lora_a_stacked[slice_idx],
|
128
|
+
lora_b_stacked[slice_idx], indices, 0, 1.0, offset_left,
|
129
|
+
output_slices[slice_idx])
|
130
|
+
offset_left += output_slices[slice_idx]
|
131
|
+
return output.view_as(org_output)
|
132
|
+
|
133
|
+
|
134
|
+
@dataclass
|
135
|
+
class LoRAMapping:
|
136
|
+
# Per every token in input_ids:
|
137
|
+
index_mapping: Tuple[int, ...]
|
138
|
+
# Per sampled token:
|
139
|
+
prompt_mapping: Tuple[int, ...]
|
140
|
+
|
141
|
+
def __post_init__(self):
|
142
|
+
self.index_mapping = tuple(self.index_mapping)
|
143
|
+
self.prompt_mapping = tuple(self.prompt_mapping)
|
144
|
+
|
145
|
+
|
146
|
+
class BaseLayerWithLoRA(nn.Module):
|
147
|
+
|
148
|
+
def slice_lora_a(self, lora_a: torch.Tensor) -> torch.Tensor:
|
149
|
+
"""Slice lora a if splitting for tensor parallelism."""
|
150
|
+
...
|
151
|
+
|
152
|
+
def slice_lora_b(self, lora_b: torch.Tensor) -> torch.Tensor:
|
153
|
+
"""Slice lora b if splitting with tensor parallelism."""
|
154
|
+
...
|
155
|
+
|
156
|
+
def create_lora_weights(
|
157
|
+
self,
|
158
|
+
max_loras: int,
|
159
|
+
lora_config: LoRAConfig,
|
160
|
+
model_config: Optional[PretrainedConfig] = None) -> None:
|
161
|
+
"""Initializes lora matrices."""
|
162
|
+
...
|
163
|
+
|
164
|
+
def reset_lora(self, index: int):
|
165
|
+
"""Resets the lora weights at index back to 0."""
|
166
|
+
...
|
167
|
+
|
168
|
+
def set_lora(
|
169
|
+
self,
|
170
|
+
index: int,
|
171
|
+
lora_a: torch.Tensor,
|
172
|
+
lora_b: torch.Tensor,
|
173
|
+
embeddings_tensor: Optional[torch.Tensor],
|
174
|
+
):
|
175
|
+
"""Overwrites lora tensors at index."""
|
176
|
+
...
|
177
|
+
|
178
|
+
def set_mapping(
|
179
|
+
self,
|
180
|
+
base_indices: torch.Tensor,
|
181
|
+
sampler_indices: torch.Tensor,
|
182
|
+
sampler_indices_padded: torch.Tensor,
|
183
|
+
embeddings_indices: torch.Tensor,
|
184
|
+
indices_len: List[int],
|
185
|
+
):
|
186
|
+
"""Sets the mapping indices."""
|
187
|
+
...
|
188
|
+
|
189
|
+
@classmethod
|
190
|
+
def can_replace_layer(cls, source_layer: nn.Module,
|
191
|
+
lora_config: LoRAConfig, packed_modules_list: List,
|
192
|
+
model_config: Optional[PretrainedConfig]) -> bool:
|
193
|
+
"""Returns True if the layer can be replaced by this LoRA layer."""
|
194
|
+
raise NotImplementedError
|
195
|
+
|
196
|
+
|
197
|
+
class VocabParallelEmbeddingWithLoRA(BaseLayerWithLoRA):
|
198
|
+
|
199
|
+
def __init__(self, base_layer: VocabParallelEmbedding) -> None:
|
200
|
+
super().__init__()
|
201
|
+
self.base_layer = base_layer
|
202
|
+
self.embeddings_slice: Optional[Tuple[int, int]]
|
203
|
+
self.embeddings_weights: Optional[torch.Tensor]
|
204
|
+
|
205
|
+
def create_lora_weights(
|
206
|
+
self,
|
207
|
+
max_loras: int,
|
208
|
+
lora_config: LoRAConfig,
|
209
|
+
model_config: Optional[PretrainedConfig] = None) -> None:
|
210
|
+
|
211
|
+
lora_vocab_start_idx = self.base_layer.org_vocab_size
|
212
|
+
weights_idx = None
|
213
|
+
if self.base_layer.vocab_end_index > lora_vocab_start_idx:
|
214
|
+
# We can start adding lora weights
|
215
|
+
weights_idx = max(
|
216
|
+
lora_vocab_start_idx - self.base_layer.vocab_start_index, 0)
|
217
|
+
self.embeddings_slice = (self.base_layer.vocab_start_index -
|
218
|
+
self.base_layer.org_vocab_size +
|
219
|
+
weights_idx,
|
220
|
+
self.base_layer.vocab_end_index -
|
221
|
+
self.base_layer.org_vocab_size)
|
222
|
+
self.embeddings_weights = self.base_layer.weight.data[weights_idx:]
|
223
|
+
self.embeddings_weights.fill_(0)
|
224
|
+
else:
|
225
|
+
self.embeddings_slice = None
|
226
|
+
self.embeddings_weights = None
|
227
|
+
|
228
|
+
self.embeddings_tensors = torch.zeros(
|
229
|
+
(
|
230
|
+
max_loras,
|
231
|
+
lora_config.lora_extra_vocab_size,
|
232
|
+
self.base_layer.embedding_dim,
|
233
|
+
),
|
234
|
+
dtype=self.base_layer.weight.dtype,
|
235
|
+
device=self.base_layer.weight.device,
|
236
|
+
)
|
237
|
+
self.lora_a_stacked = torch.zeros(
|
238
|
+
(
|
239
|
+
max_loras,
|
240
|
+
self.base_layer.org_vocab_size +
|
241
|
+
lora_config.lora_extra_vocab_size,
|
242
|
+
lora_config.max_lora_rank,
|
243
|
+
),
|
244
|
+
dtype=lora_config.lora_dtype,
|
245
|
+
device=self.base_layer.weight.device,
|
246
|
+
)
|
247
|
+
self.lora_b_stacked = torch.zeros(
|
248
|
+
(
|
249
|
+
max_loras,
|
250
|
+
1,
|
251
|
+
self.base_layer.embedding_dim,
|
252
|
+
lora_config.max_lora_rank,
|
253
|
+
),
|
254
|
+
dtype=lora_config.lora_dtype,
|
255
|
+
device=self.base_layer.weight.device,
|
256
|
+
)
|
257
|
+
self.lora_a_stacked_2d = self.lora_a_stacked.view(
|
258
|
+
self.lora_a_stacked.shape[0] * self.lora_a_stacked.shape[1],
|
259
|
+
self.lora_a_stacked.shape[2],
|
260
|
+
)
|
261
|
+
# Lazily initialized.
|
262
|
+
self.indices: torch.Tensor
|
263
|
+
self.indices_len: List[int]
|
264
|
+
self.embeddings_indices: torch.Tensor
|
265
|
+
|
266
|
+
def reset_lora(self, index: int):
|
267
|
+
self.lora_a_stacked[index] = 0
|
268
|
+
self.lora_b_stacked[index] = 0
|
269
|
+
self.embeddings_tensors[index] = 0
|
270
|
+
|
271
|
+
def set_lora(
|
272
|
+
self,
|
273
|
+
index: int,
|
274
|
+
lora_a: torch.Tensor,
|
275
|
+
lora_b: torch.Tensor,
|
276
|
+
embeddings_tensor: Optional[torch.Tensor],
|
277
|
+
):
|
278
|
+
self.reset_lora(index)
|
279
|
+
self.lora_a_stacked[index, :lora_a.shape[0], :lora_a.shape[1]].copy_(
|
280
|
+
lora_a, non_blocking=True)
|
281
|
+
self.lora_b_stacked[index,
|
282
|
+
0, :lora_b.shape[1], :lora_b.shape[0]].copy_(
|
283
|
+
lora_b.T, non_blocking=True)
|
284
|
+
if embeddings_tensor is not None:
|
285
|
+
self.embeddings_tensors[
|
286
|
+
index, :embeddings_tensor.shape[0], :embeddings_tensor.
|
287
|
+
shape[1]].copy_(embeddings_tensor, non_blocking=True)
|
288
|
+
if self.embeddings_slice is not None:
|
289
|
+
# TODO(yard1): Optimize this copy, we don't need to copy
|
290
|
+
# everything, just the modified part
|
291
|
+
embeddings = self.embeddings_tensors.view(
|
292
|
+
self.embeddings_tensors.shape[0] *
|
293
|
+
self.embeddings_tensors.shape[1],
|
294
|
+
self.embeddings_tensors.shape[2]
|
295
|
+
)[self.embeddings_slice[0]:self.embeddings_slice[1]]
|
296
|
+
assert self.embeddings_weights is not None
|
297
|
+
self.embeddings_weights[:embeddings.shape[0]].copy_(embeddings)
|
298
|
+
|
299
|
+
def set_mapping(
|
300
|
+
self,
|
301
|
+
base_indices: torch.Tensor,
|
302
|
+
sampler_indices: torch.Tensor,
|
303
|
+
sampler_indices_padded: torch.Tensor,
|
304
|
+
embeddings_indices: torch.Tensor,
|
305
|
+
indices_len: List[int],
|
306
|
+
):
|
307
|
+
self.indices = base_indices
|
308
|
+
self.embeddings_indices = embeddings_indices
|
309
|
+
self.indices_len = indices_len
|
310
|
+
|
311
|
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
312
|
+
added_tokens_mask = x > self.base_layer.org_vocab_size - 1
|
313
|
+
embedding_len = self.indices_len[3]
|
314
|
+
indices = self.embeddings_indices[1][:embedding_len].view_as(x)
|
315
|
+
full_lora_a_embeddings = F.embedding(
|
316
|
+
x + indices,
|
317
|
+
self.lora_a_stacked_2d,
|
318
|
+
)
|
319
|
+
indices = self.embeddings_indices[0][:embedding_len].view_as(x)
|
320
|
+
full_output = self.base_layer.forward(
|
321
|
+
x.add_(indices * added_tokens_mask))
|
322
|
+
|
323
|
+
full_output_org = full_output
|
324
|
+
if full_output.ndim == 3:
|
325
|
+
full_output = full_output.view(
|
326
|
+
full_output.shape[0] * full_output.shape[1], -1)
|
327
|
+
if full_lora_a_embeddings.ndim == 3:
|
328
|
+
full_lora_a_embeddings = full_lora_a_embeddings.view(
|
329
|
+
full_lora_a_embeddings.shape[0] *
|
330
|
+
full_lora_a_embeddings.shape[1], -1)
|
331
|
+
bgmv(full_output, full_lora_a_embeddings, self.lora_b_stacked,
|
332
|
+
self.indices[:self.indices_len[0]], 0, 1.0)
|
333
|
+
return full_output.view_as(full_output_org)
|
334
|
+
|
335
|
+
@classmethod
|
336
|
+
def can_replace_layer(cls, source_layer: nn.Module,
|
337
|
+
lora_config: LoRAConfig, packed_modules_list: List,
|
338
|
+
model_config: Optional[PretrainedConfig]) -> bool:
|
339
|
+
return type(source_layer) is VocabParallelEmbedding
|
340
|
+
|
341
|
+
|
342
|
+
class ColumnParallelLinearWithLoRA(BaseLayerWithLoRA):
|
343
|
+
"""
|
344
|
+
LoRA on top of ColumnParallelLinear layer.
|
345
|
+
|
346
|
+
LoRA B is sliced for tensor parallelism.
|
347
|
+
"""
|
348
|
+
|
349
|
+
def __init__(self, base_layer: ColumnParallelLinear) -> None:
|
350
|
+
super().__init__()
|
351
|
+
self.base_layer = base_layer
|
352
|
+
self.tp_size = get_tensor_model_parallel_world_size()
|
353
|
+
self.input_size = self.base_layer.input_size
|
354
|
+
self.output_size = self.base_layer.output_size_per_partition
|
355
|
+
self.device = _get_lora_device(self.base_layer)
|
356
|
+
|
357
|
+
def create_lora_weights(
|
358
|
+
self,
|
359
|
+
max_loras: int,
|
360
|
+
lora_config: LoRAConfig,
|
361
|
+
model_config: Optional[PretrainedConfig] = None) -> None:
|
362
|
+
self.lora_config = lora_config
|
363
|
+
self.tp_size = get_tensor_model_parallel_world_size()
|
364
|
+
lora_a_output_size_per_partition = (
|
365
|
+
lora_config.max_lora_rank if not lora_config.fully_sharded_loras
|
366
|
+
else divide(lora_config.max_lora_rank, self.tp_size))
|
367
|
+
self.lora_a_stacked = torch.zeros(
|
368
|
+
max_loras,
|
369
|
+
1,
|
370
|
+
lora_a_output_size_per_partition,
|
371
|
+
self.input_size,
|
372
|
+
dtype=lora_config.lora_dtype,
|
373
|
+
device=self.device,
|
374
|
+
)
|
375
|
+
self.lora_b_stacked = torch.zeros(
|
376
|
+
max_loras,
|
377
|
+
1,
|
378
|
+
self.output_size,
|
379
|
+
lora_config.max_lora_rank,
|
380
|
+
dtype=lora_config.lora_dtype,
|
381
|
+
device=self.device,
|
382
|
+
)
|
383
|
+
self.output_dim = self.lora_b_stacked.shape[2]
|
384
|
+
|
385
|
+
# lazily initialized.
|
386
|
+
self.indices: torch.Tensor
|
387
|
+
self.indices_len: List[int]
|
388
|
+
|
389
|
+
def reset_lora(self, index: int):
|
390
|
+
self.lora_a_stacked[index] = 0
|
391
|
+
self.lora_b_stacked[index] = 0
|
392
|
+
|
393
|
+
def slice_lora_a(self, lora_a: torch.Tensor) -> torch.Tensor:
|
394
|
+
return lora_a
|
395
|
+
|
396
|
+
def slice_lora_b(self, lora_b: torch.Tensor) -> torch.Tensor:
|
397
|
+
tensor_model_parallel_rank = get_tensor_model_parallel_rank()
|
398
|
+
shard_size = self.output_dim
|
399
|
+
start_idx = tensor_model_parallel_rank * shard_size
|
400
|
+
end_idx = (tensor_model_parallel_rank + 1) * shard_size
|
401
|
+
lora_b = lora_b[:, start_idx:end_idx]
|
402
|
+
return lora_b
|
403
|
+
|
404
|
+
def set_lora(
|
405
|
+
self,
|
406
|
+
index: int,
|
407
|
+
lora_a: torch.Tensor,
|
408
|
+
lora_b: torch.Tensor,
|
409
|
+
embeddings_tensor: Optional[torch.Tensor],
|
410
|
+
):
|
411
|
+
self.reset_lora(index)
|
412
|
+
|
413
|
+
if self.tp_size > 1:
|
414
|
+
lora_a = self.slice_lora_a(lora_a)
|
415
|
+
lora_b = self.slice_lora_b(lora_b)
|
416
|
+
|
417
|
+
self.lora_a_stacked[index,
|
418
|
+
0, :lora_a.shape[1], :lora_a.shape[0]].copy_(
|
419
|
+
lora_a.T, non_blocking=True)
|
420
|
+
self.lora_b_stacked[index,
|
421
|
+
0, :lora_b.shape[1], :lora_b.shape[0]].copy_(
|
422
|
+
lora_b.T, non_blocking=True)
|
423
|
+
|
424
|
+
def set_mapping(
|
425
|
+
self,
|
426
|
+
base_indices: torch.Tensor,
|
427
|
+
sampler_indices: torch.Tensor,
|
428
|
+
sampler_indices_padded: torch.Tensor,
|
429
|
+
embeddings_indices: torch.Tensor,
|
430
|
+
indices_len: List[int],
|
431
|
+
):
|
432
|
+
self.indices = base_indices
|
433
|
+
self.indices_len = indices_len
|
434
|
+
|
435
|
+
def apply(self, x: torch.Tensor,
|
436
|
+
bias: Optional[torch.Tensor]) -> torch.Tensor:
|
437
|
+
output = self.base_layer.quant_method.apply(self.base_layer, x, bias)
|
438
|
+
_apply_lora(
|
439
|
+
x,
|
440
|
+
self.lora_a_stacked,
|
441
|
+
self.lora_b_stacked,
|
442
|
+
self.indices[:self.indices_len[0]],
|
443
|
+
output,
|
444
|
+
)
|
445
|
+
return output
|
446
|
+
|
447
|
+
def forward(self, input_):
|
448
|
+
"""Forward of ColumnParallelLinear
|
449
|
+
|
450
|
+
Args:
|
451
|
+
input_: Tensor whose last dimension is `input_size`.
|
452
|
+
|
453
|
+
Returns:
|
454
|
+
- output
|
455
|
+
- bias
|
456
|
+
"""
|
457
|
+
bias = (self.base_layer.bias
|
458
|
+
if not self.base_layer.skip_bias_add else None)
|
459
|
+
|
460
|
+
# Matrix multiply.
|
461
|
+
output_parallel = self.apply(input_, bias)
|
462
|
+
if self.base_layer.gather_output:
|
463
|
+
# All-gather across the partitions.
|
464
|
+
output = tensor_model_parallel_all_gather(output_parallel)
|
465
|
+
else:
|
466
|
+
output = output_parallel
|
467
|
+
output_bias = (self.base_layer.bias
|
468
|
+
if self.base_layer.skip_bias_add else None)
|
469
|
+
return output, output_bias
|
470
|
+
|
471
|
+
@classmethod
|
472
|
+
@_not_fully_sharded_can_replace
|
473
|
+
def can_replace_layer(cls, source_layer: nn.Module,
|
474
|
+
lora_config: LoRAConfig, packed_modules_list: List,
|
475
|
+
model_config: Optional[PretrainedConfig]) -> bool:
|
476
|
+
return type(source_layer) is ColumnParallelLinear or (
|
477
|
+
type(source_layer) is MergedColumnParallelLinear
|
478
|
+
and len(packed_modules_list) == 1)
|
479
|
+
|
480
|
+
|
481
|
+
class MergedColumnParallelLinearWithLoRA(ColumnParallelLinearWithLoRA):
|
482
|
+
"""ColumnParallelLinear layer that is composed of 2 sublayers (slices)
|
483
|
+
packed together (eg. gate_proj + up_proj -> gate_up_proj).
|
484
|
+
|
485
|
+
This means we have 2 LoRAs, each applied to one half of the layer.
|
486
|
+
|
487
|
+
Both slices must have the same size.
|
488
|
+
"""
|
489
|
+
|
490
|
+
def __init__(self, base_layer: MergedColumnParallelLinear) -> None:
|
491
|
+
super().__init__(base_layer)
|
492
|
+
|
493
|
+
def create_lora_weights(
|
494
|
+
self,
|
495
|
+
max_loras: int,
|
496
|
+
lora_config: LoRAConfig,
|
497
|
+
model_config: Optional[PretrainedConfig] = None) -> None:
|
498
|
+
self.lora_config = lora_config
|
499
|
+
n_slices = 2
|
500
|
+
if not (len(self.base_layer.output_sizes) == n_slices
|
501
|
+
and self.base_layer.output_sizes[0]
|
502
|
+
== self.base_layer.output_sizes[1]):
|
503
|
+
raise ValueError(
|
504
|
+
"LoRAColumnParallelLinear2Slice requires 2 slices with "
|
505
|
+
"the same size.")
|
506
|
+
self.tp_size = get_tensor_model_parallel_world_size()
|
507
|
+
self.tp_rank = get_tensor_model_parallel_rank()
|
508
|
+
|
509
|
+
lora_a_output_size_per_partition = (
|
510
|
+
lora_config.max_lora_rank if not lora_config.fully_sharded_loras
|
511
|
+
else divide(lora_config.max_lora_rank, self.tp_size))
|
512
|
+
|
513
|
+
self.lora_a_stacked = tuple(
|
514
|
+
torch.zeros(
|
515
|
+
max_loras,
|
516
|
+
1,
|
517
|
+
lora_a_output_size_per_partition,
|
518
|
+
self.input_size,
|
519
|
+
dtype=lora_config.lora_dtype,
|
520
|
+
device=self.device,
|
521
|
+
) for _ in range(n_slices))
|
522
|
+
self.lora_b_stacked = tuple(
|
523
|
+
torch.zeros(
|
524
|
+
max_loras,
|
525
|
+
1,
|
526
|
+
self.output_size // 2,
|
527
|
+
lora_config.max_lora_rank,
|
528
|
+
dtype=lora_config.lora_dtype,
|
529
|
+
device=self.device,
|
530
|
+
) for _ in range(n_slices))
|
531
|
+
|
532
|
+
self.output_dim = self.lora_b_stacked[0].shape[2]
|
533
|
+
# Lazily initialized.
|
534
|
+
self.indices: torch.Tensor
|
535
|
+
|
536
|
+
def reset_lora(self, index: int):
|
537
|
+
self.lora_a_stacked[0][index] = 0
|
538
|
+
self.lora_a_stacked[1][index] = 0
|
539
|
+
self.lora_b_stacked[0][index] = 0
|
540
|
+
self.lora_b_stacked[1][index] = 0
|
541
|
+
|
542
|
+
def slice_lora_a(self, lora_a: List[torch.Tensor]) -> List[torch.Tensor]:
|
543
|
+
return lora_a
|
544
|
+
|
545
|
+
def slice_lora_b(self, lora_b: List[torch.Tensor]) -> List[torch.Tensor]:
|
546
|
+
shard_size = self.output_dim
|
547
|
+
start_idx = self.tp_rank * shard_size
|
548
|
+
end_idx = (self.tp_rank + 1) * shard_size
|
549
|
+
lora_b = [
|
550
|
+
lora_b[0][:, start_idx:end_idx], lora_b[1][:, start_idx:end_idx]
|
551
|
+
]
|
552
|
+
return lora_b
|
553
|
+
|
554
|
+
def set_lora(
|
555
|
+
self,
|
556
|
+
index: int,
|
557
|
+
lora_a: torch.Tensor,
|
558
|
+
lora_b: torch.Tensor,
|
559
|
+
embeddings_tensor: Optional[torch.Tensor],
|
560
|
+
):
|
561
|
+
self.reset_lora(index)
|
562
|
+
|
563
|
+
if self.tp_size > 1:
|
564
|
+
lora_a = self.slice_lora_a(lora_a)
|
565
|
+
lora_b = self.slice_lora_b(lora_b)
|
566
|
+
|
567
|
+
if lora_a[0] is not None:
|
568
|
+
self.lora_a_stacked[0][
|
569
|
+
index, 0, :lora_a[0].shape[1], :lora_a[0].shape[0]].copy_(
|
570
|
+
lora_a[0].T, non_blocking=True)
|
571
|
+
self.lora_b_stacked[0][
|
572
|
+
index, 0, :lora_b[0].shape[1], :lora_b[0].shape[0]].copy_(
|
573
|
+
lora_b[0].T, non_blocking=True)
|
574
|
+
if lora_a[1] is not None:
|
575
|
+
self.lora_a_stacked[1][
|
576
|
+
index, 0, :lora_a[1].shape[1], :lora_a[1].shape[0]].copy_(
|
577
|
+
lora_a[1].T, non_blocking=True)
|
578
|
+
self.lora_b_stacked[1][
|
579
|
+
index, 0, :lora_b[1].shape[1], :lora_b[1].shape[0]].copy_(
|
580
|
+
lora_b[1].T, non_blocking=True)
|
581
|
+
|
582
|
+
def apply(self, x: torch.Tensor,
|
583
|
+
bias: Optional[torch.Tensor]) -> torch.Tensor:
|
584
|
+
output = self.base_layer.quant_method.apply(self.base_layer, x, bias)
|
585
|
+
_apply_lora_packed_nslice(
|
586
|
+
x,
|
587
|
+
self.lora_a_stacked,
|
588
|
+
self.lora_b_stacked,
|
589
|
+
self.indices[:self.indices_len[0]],
|
590
|
+
output,
|
591
|
+
(self.output_dim, self.output_dim),
|
592
|
+
)
|
593
|
+
return output
|
594
|
+
|
595
|
+
@classmethod
|
596
|
+
@_not_fully_sharded_can_replace
|
597
|
+
def can_replace_layer(cls, source_layer: nn.Module,
|
598
|
+
lora_config: LoRAConfig, packed_modules_list: List,
|
599
|
+
model_config: Optional[PretrainedConfig]) -> bool:
|
600
|
+
return type(source_layer) is MergedColumnParallelLinear and len(
|
601
|
+
packed_modules_list) == 2
|
602
|
+
|
603
|
+
|
604
|
+
class QKVParallelLinearWithLora(ColumnParallelLinearWithLoRA):
|
605
|
+
"""
|
606
|
+
ColumnParallelLinear layer that is specifically designed for
|
607
|
+
qkv_proj. Certain models, such as chtglm3 and baichuan-7b,
|
608
|
+
only contains a single LoRA within their qkv_proj layer.
|
609
|
+
|
610
|
+
During inference with Tensor Parallel, the weights of lora_b
|
611
|
+
must be accurately partitioned according to the respective ranks.
|
612
|
+
|
613
|
+
Q slice may have different shape than K and V slices (which both have
|
614
|
+
the same shape).
|
615
|
+
"""
|
616
|
+
|
617
|
+
def __init__(self, base_layer: QKVParallelLinear) -> None:
|
618
|
+
super().__init__(base_layer)
|
619
|
+
self.tp_size = get_tensor_model_parallel_world_size()
|
620
|
+
self.q_proj_total_size = (self.base_layer.total_num_heads *
|
621
|
+
self.base_layer.head_size)
|
622
|
+
self.q_proj_shard_size = (self.base_layer.num_heads *
|
623
|
+
self.base_layer.head_size)
|
624
|
+
self.kv_proj_shard_size = (self.base_layer.num_kv_heads *
|
625
|
+
self.base_layer.head_size)
|
626
|
+
self.kv_proj_total_size = (self.base_layer.total_num_kv_heads *
|
627
|
+
self.base_layer.head_size)
|
628
|
+
|
629
|
+
def set_lora(
|
630
|
+
self,
|
631
|
+
index: int,
|
632
|
+
lora_a: torch.Tensor,
|
633
|
+
lora_b: torch.Tensor,
|
634
|
+
embeddings_tensor: Optional[torch.Tensor],
|
635
|
+
):
|
636
|
+
self.reset_lora(index)
|
637
|
+
if self.tp_size > 1:
|
638
|
+
tp_rank = get_tensor_model_parallel_rank()
|
639
|
+
self.q_shard_id = tp_rank
|
640
|
+
self.kv_shard_id = tp_rank // self.base_layer.num_kv_head_replicas
|
641
|
+
lora_b_q = lora_b[:, self.q_proj_shard_size *
|
642
|
+
self.q_shard_id:self.q_proj_shard_size *
|
643
|
+
(self.q_shard_id + 1)]
|
644
|
+
k_offset = self.q_proj_total_size
|
645
|
+
lora_b_k = lora_b[:, k_offset + self.kv_proj_shard_size *
|
646
|
+
self.kv_shard_id:k_offset +
|
647
|
+
self.kv_proj_shard_size * (self.kv_shard_id + 1)]
|
648
|
+
v_offset = k_offset + self.kv_proj_total_size
|
649
|
+
lora_b_v = lora_b[:, v_offset + self.kv_proj_shard_size *
|
650
|
+
self.kv_shard_id:v_offset +
|
651
|
+
self.kv_proj_shard_size * (self.kv_shard_id + 1)]
|
652
|
+
lora_b = torch.cat([lora_b_q, lora_b_k, lora_b_v], dim=1)
|
653
|
+
|
654
|
+
self.lora_a_stacked[index,
|
655
|
+
0, :lora_a.shape[1], :lora_a.shape[0]].copy_(
|
656
|
+
lora_a.T, non_blocking=True)
|
657
|
+
self.lora_b_stacked[index,
|
658
|
+
0, :lora_b.shape[1], :lora_b.shape[0]].copy_(
|
659
|
+
lora_b.T, non_blocking=True)
|
660
|
+
|
661
|
+
@classmethod
|
662
|
+
def can_replace_layer(cls, source_layer: nn.Module,
|
663
|
+
lora_config: LoRAConfig, packed_modules_list: List,
|
664
|
+
model_config: Optional[PretrainedConfig]) -> bool:
|
665
|
+
return type(source_layer) is QKVParallelLinear and len(
|
666
|
+
packed_modules_list) == 1
|
667
|
+
|
668
|
+
|
669
|
+
class MergedQKVParallelLinearWithLora(ColumnParallelLinearWithLoRA):
|
670
|
+
"""ColumnParallelLinear layer that is composed of 3 sublayers (slices)
|
671
|
+
packed together in qkv proj fashion
|
672
|
+
(q_proj + k_proj + v_proj -> qkv_proj).
|
673
|
+
|
674
|
+
This means we have 3 LoRAs, each applied to one slice of the layer.
|
675
|
+
|
676
|
+
Q slice may have different shape than K and V slices (which both have
|
677
|
+
the same shape).
|
678
|
+
"""
|
679
|
+
|
680
|
+
def __init__(self, base_layer: QKVParallelLinear) -> None:
|
681
|
+
super().__init__(base_layer)
|
682
|
+
|
683
|
+
def create_lora_weights(
|
684
|
+
self,
|
685
|
+
max_loras: int,
|
686
|
+
lora_config: LoRAConfig,
|
687
|
+
model_config: Optional[PretrainedConfig] = None) -> None:
|
688
|
+
self.lora_config = lora_config
|
689
|
+
self.tp_size = get_tensor_model_parallel_world_size()
|
690
|
+
self.tp_rank = get_tensor_model_parallel_rank()
|
691
|
+
self.q_proj_shard_size = (self.base_layer.num_heads *
|
692
|
+
self.base_layer.head_size)
|
693
|
+
self.kv_proj_shard_size = (self.base_layer.num_kv_heads *
|
694
|
+
self.base_layer.head_size)
|
695
|
+
self.q_shard_id = self.tp_rank
|
696
|
+
self.kv_shard_id = self.tp_rank // self.base_layer.num_kv_head_replicas
|
697
|
+
|
698
|
+
lora_a_output_size_per_partition = (
|
699
|
+
lora_config.max_lora_rank if not lora_config.fully_sharded_loras
|
700
|
+
else divide(lora_config.max_lora_rank, self.tp_size))
|
701
|
+
# q, k, v
|
702
|
+
self.lora_a_stacked = (
|
703
|
+
torch.zeros(
|
704
|
+
max_loras,
|
705
|
+
1,
|
706
|
+
lora_a_output_size_per_partition,
|
707
|
+
self.input_size,
|
708
|
+
dtype=lora_config.lora_dtype,
|
709
|
+
device=self.device,
|
710
|
+
),
|
711
|
+
torch.zeros(
|
712
|
+
max_loras,
|
713
|
+
1,
|
714
|
+
lora_a_output_size_per_partition,
|
715
|
+
self.input_size,
|
716
|
+
dtype=lora_config.lora_dtype,
|
717
|
+
device=self.device,
|
718
|
+
),
|
719
|
+
torch.zeros(
|
720
|
+
max_loras,
|
721
|
+
1,
|
722
|
+
lora_a_output_size_per_partition,
|
723
|
+
self.input_size,
|
724
|
+
dtype=lora_config.lora_dtype,
|
725
|
+
device=self.device,
|
726
|
+
),
|
727
|
+
)
|
728
|
+
self.lora_b_stacked = (
|
729
|
+
torch.zeros(
|
730
|
+
max_loras,
|
731
|
+
1,
|
732
|
+
self.q_proj_shard_size,
|
733
|
+
lora_config.max_lora_rank,
|
734
|
+
dtype=lora_config.lora_dtype,
|
735
|
+
device=self.device,
|
736
|
+
),
|
737
|
+
torch.zeros(
|
738
|
+
max_loras,
|
739
|
+
1,
|
740
|
+
self.kv_proj_shard_size,
|
741
|
+
lora_config.max_lora_rank,
|
742
|
+
dtype=lora_config.lora_dtype,
|
743
|
+
device=self.device,
|
744
|
+
),
|
745
|
+
torch.zeros(
|
746
|
+
max_loras,
|
747
|
+
1,
|
748
|
+
self.kv_proj_shard_size,
|
749
|
+
lora_config.max_lora_rank,
|
750
|
+
dtype=lora_config.lora_dtype,
|
751
|
+
device=self.device,
|
752
|
+
),
|
753
|
+
)
|
754
|
+
|
755
|
+
self.output_slices = (self.q_proj_shard_size, self.kv_proj_shard_size,
|
756
|
+
self.kv_proj_shard_size)
|
757
|
+
self.packed_indices: Optional[torch.Tensor] = None
|
758
|
+
self.standard_indices: Optional[torch.Tensor] = None
|
759
|
+
# lazily initialized.
|
760
|
+
self.indices_len: List[int]
|
761
|
+
|
762
|
+
def reset_lora(self, index: int):
|
763
|
+
self.lora_a_stacked[0][index] = 0
|
764
|
+
self.lora_b_stacked[0][index] = 0
|
765
|
+
self.lora_a_stacked[1][index] = 0
|
766
|
+
self.lora_b_stacked[1][index] = 0
|
767
|
+
self.lora_a_stacked[2][index] = 0
|
768
|
+
self.lora_b_stacked[2][index] = 0
|
769
|
+
|
770
|
+
def slice_lora_a(self, lora_a: List[torch.Tensor]) -> List[torch.Tensor]:
|
771
|
+
return lora_a
|
772
|
+
|
773
|
+
def slice_lora_b(self, lora_b: List[torch.Tensor]) -> List[torch.Tensor]:
|
774
|
+
if lora_b[0] is not None:
|
775
|
+
lora_b_q = lora_b[0][:, self.q_proj_shard_size *
|
776
|
+
self.q_shard_id:self.q_proj_shard_size *
|
777
|
+
(self.q_shard_id + 1)]
|
778
|
+
if lora_b[1] is not None:
|
779
|
+
lora_b_k = lora_b[1][:, self.kv_proj_shard_size *
|
780
|
+
self.kv_shard_id:self.kv_proj_shard_size *
|
781
|
+
(self.kv_shard_id + 1)]
|
782
|
+
if lora_b[2] is not None:
|
783
|
+
lora_b_v = lora_b[2][:, self.kv_proj_shard_size *
|
784
|
+
self.kv_shard_id:self.kv_proj_shard_size *
|
785
|
+
(self.kv_shard_id + 1)]
|
786
|
+
lora_b = [lora_b_q, lora_b_k, lora_b_v]
|
787
|
+
return lora_b
|
788
|
+
|
789
|
+
def set_lora(
|
790
|
+
self,
|
791
|
+
index: int,
|
792
|
+
lora_a: torch.Tensor,
|
793
|
+
lora_b: torch.Tensor,
|
794
|
+
embeddings_tensor: Optional[torch.Tensor],
|
795
|
+
):
|
796
|
+
self.reset_lora(index)
|
797
|
+
|
798
|
+
if self.tp_size > 1:
|
799
|
+
lora_a = self.slice_lora_a(lora_a)
|
800
|
+
lora_b = self.slice_lora_b(lora_b)
|
801
|
+
|
802
|
+
if lora_b[0] is not None:
|
803
|
+
lora_b_q = lora_b[0]
|
804
|
+
self.lora_b_stacked[0][
|
805
|
+
index, 0, :lora_b_q.shape[1], :lora_b_q.shape[0]].copy_(
|
806
|
+
lora_b_q.T, non_blocking=True)
|
807
|
+
if lora_b[1] is not None:
|
808
|
+
lora_b_k = lora_b[1]
|
809
|
+
self.lora_b_stacked[1][
|
810
|
+
index, 0, :lora_b_k.shape[1], :lora_b_k.shape[0]].copy_(
|
811
|
+
lora_b_k.T, non_blocking=True)
|
812
|
+
if lora_b[2] is not None:
|
813
|
+
lora_b_v = lora_b[2]
|
814
|
+
self.lora_b_stacked[2][
|
815
|
+
index, 0, :lora_b_v.shape[1], :lora_b_v.shape[0]].copy_(
|
816
|
+
lora_b_v.T, non_blocking=True)
|
817
|
+
|
818
|
+
if lora_a[0] is not None:
|
819
|
+
self.lora_a_stacked[0][
|
820
|
+
index, 0, :lora_a[0].shape[1], :lora_a[0].shape[0]].copy_(
|
821
|
+
lora_a[0].T, non_blocking=True)
|
822
|
+
if lora_a[1] is not None:
|
823
|
+
self.lora_a_stacked[1][
|
824
|
+
index, 0, :lora_a[1].shape[1], :lora_a[1].shape[0]].copy_(
|
825
|
+
lora_a[1].T, non_blocking=True)
|
826
|
+
if lora_a[2] is not None:
|
827
|
+
self.lora_a_stacked[2][
|
828
|
+
index, 0, :lora_a[2].shape[1], :lora_a[2].shape[0]].copy_(
|
829
|
+
lora_a[2].T, non_blocking=True)
|
830
|
+
|
831
|
+
def apply(self, x: torch.Tensor,
|
832
|
+
bias: Optional[torch.Tensor]) -> torch.Tensor:
|
833
|
+
output = self.base_layer.quant_method.apply(self.base_layer, x, bias)
|
834
|
+
_apply_lora_packed_nslice(
|
835
|
+
x,
|
836
|
+
self.lora_a_stacked,
|
837
|
+
self.lora_b_stacked,
|
838
|
+
self.indices[:self.indices_len[0]],
|
839
|
+
output,
|
840
|
+
self.output_slices,
|
841
|
+
)
|
842
|
+
return output
|
843
|
+
|
844
|
+
@classmethod
|
845
|
+
@_not_fully_sharded_can_replace
|
846
|
+
def can_replace_layer(cls, source_layer: nn.Module,
|
847
|
+
lora_config: LoRAConfig, packed_modules_list: List,
|
848
|
+
model_config: Optional[PretrainedConfig]) -> bool:
|
849
|
+
return type(source_layer) is QKVParallelLinear and len(
|
850
|
+
packed_modules_list) == 3
|
851
|
+
|
852
|
+
|
853
|
+
class RowParallelLinearWithLoRA(BaseLayerWithLoRA):
|
854
|
+
|
855
|
+
def __init__(self, base_layer: RowParallelLinear) -> None:
|
856
|
+
super().__init__()
|
857
|
+
self.base_layer = base_layer
|
858
|
+
self.input_size = self.base_layer.input_size_per_partition
|
859
|
+
self.output_size = self.base_layer.output_size
|
860
|
+
self.device = _get_lora_device(self.base_layer)
|
861
|
+
|
862
|
+
def create_lora_weights(
|
863
|
+
self,
|
864
|
+
max_loras: int,
|
865
|
+
lora_config: LoRAConfig,
|
866
|
+
model_config: Optional[PretrainedConfig] = None) -> None:
|
867
|
+
self.lora_config = lora_config
|
868
|
+
self.tp_rank = get_tensor_model_parallel_rank()
|
869
|
+
self.lora_a_stacked = torch.zeros(
|
870
|
+
(
|
871
|
+
max_loras,
|
872
|
+
1,
|
873
|
+
lora_config.max_lora_rank,
|
874
|
+
self.input_size,
|
875
|
+
),
|
876
|
+
dtype=lora_config.lora_dtype,
|
877
|
+
device=self.device,
|
878
|
+
)
|
879
|
+
tp_size = get_tensor_model_parallel_world_size()
|
880
|
+
lora_b_output_size_per_partition = (
|
881
|
+
self.output_size if not lora_config.fully_sharded_loras else
|
882
|
+
divide(self.output_size, tp_size))
|
883
|
+
|
884
|
+
self.lora_b_stacked = torch.zeros(
|
885
|
+
(
|
886
|
+
max_loras,
|
887
|
+
1,
|
888
|
+
lora_b_output_size_per_partition,
|
889
|
+
lora_config.max_lora_rank,
|
890
|
+
),
|
891
|
+
dtype=lora_config.lora_dtype,
|
892
|
+
device=self.device,
|
893
|
+
)
|
894
|
+
# Lazily initialized
|
895
|
+
self.indices: torch.Tensor
|
896
|
+
self.indices_len: List[int]
|
897
|
+
|
898
|
+
def reset_lora(self, index: int):
|
899
|
+
self.lora_a_stacked[index] = 0
|
900
|
+
self.lora_b_stacked[index] = 0
|
901
|
+
|
902
|
+
def slice_lora_a(self, lora_a: torch.Tensor) -> torch.Tensor:
|
903
|
+
tensor_model_parallel_rank = get_tensor_model_parallel_rank()
|
904
|
+
shard_size = self.input_size
|
905
|
+
start_idx = tensor_model_parallel_rank * shard_size
|
906
|
+
end_idx = (tensor_model_parallel_rank + 1) * shard_size
|
907
|
+
lora_a = lora_a[start_idx:end_idx, :]
|
908
|
+
return lora_a
|
909
|
+
|
910
|
+
def slice_lora_b(self, lora_b: torch.Tensor) -> torch.Tensor:
|
911
|
+
return lora_b
|
912
|
+
|
913
|
+
def set_lora(
|
914
|
+
self,
|
915
|
+
index: int,
|
916
|
+
lora_a: torch.Tensor,
|
917
|
+
lora_b: torch.Tensor,
|
918
|
+
embeddings_tensor: Optional[torch.Tensor],
|
919
|
+
):
|
920
|
+
self.reset_lora(index)
|
921
|
+
|
922
|
+
if self.base_layer.tp_size > 1:
|
923
|
+
lora_a = self.slice_lora_a(lora_a)
|
924
|
+
lora_b = self.slice_lora_b(lora_b)
|
925
|
+
|
926
|
+
self.lora_a_stacked[index,
|
927
|
+
0, :lora_a.shape[1], :lora_a.shape[0]].copy_(
|
928
|
+
lora_a.T, non_blocking=True)
|
929
|
+
self.lora_b_stacked[index,
|
930
|
+
0, :lora_b.shape[1], :lora_b.shape[0]].copy_(
|
931
|
+
lora_b.T, non_blocking=True)
|
932
|
+
|
933
|
+
def set_mapping(
|
934
|
+
self,
|
935
|
+
base_indices: torch.Tensor,
|
936
|
+
sampler_indices: torch.Tensor,
|
937
|
+
sampler_indices_padded: torch.Tensor,
|
938
|
+
embeddings_indices: torch.Tensor,
|
939
|
+
indices_len: List[int],
|
940
|
+
):
|
941
|
+
self.indices = base_indices
|
942
|
+
self.indices_len = indices_len
|
943
|
+
|
944
|
+
def apply(self, x: torch.Tensor) -> torch.Tensor:
|
945
|
+
output = self.base_layer.quant_method.apply(self.base_layer, x)
|
946
|
+
_apply_lora(
|
947
|
+
x,
|
948
|
+
self.lora_a_stacked,
|
949
|
+
self.lora_b_stacked,
|
950
|
+
self.indices[:self.indices_len[0]],
|
951
|
+
output,
|
952
|
+
)
|
953
|
+
return output
|
954
|
+
|
955
|
+
def forward(self, input_):
|
956
|
+
"""Forward of RowParallelLinear
|
957
|
+
|
958
|
+
Args:
|
959
|
+
input_: tensor whose last dimension is `input_size`. If
|
960
|
+
`input_is_parallel` is set, then the last dimension
|
961
|
+
is `input_size // tp_size`.
|
962
|
+
|
963
|
+
Returns:
|
964
|
+
- output
|
965
|
+
- bias
|
966
|
+
"""
|
967
|
+
# Set up backprop all-reduce.
|
968
|
+
if self.base_layer.input_is_parallel:
|
969
|
+
input_parallel = input_
|
970
|
+
else:
|
971
|
+
# TODO: simplify code below
|
972
|
+
tp_rank = get_tensor_model_parallel_rank()
|
973
|
+
splitted_input = split_tensor_along_last_dim(
|
974
|
+
input_, num_partitions=self.base_layer.tp_size)
|
975
|
+
input_parallel = splitted_input[tp_rank].contiguous()
|
976
|
+
|
977
|
+
# Matrix multiply.
|
978
|
+
output_parallel = self.apply(input_parallel)
|
979
|
+
if self.base_layer.reduce_results and self.base_layer.tp_size > 1:
|
980
|
+
output_ = tensor_model_parallel_all_reduce(output_parallel)
|
981
|
+
else:
|
982
|
+
output_ = output_parallel
|
983
|
+
|
984
|
+
if not self.base_layer.skip_bias_add:
|
985
|
+
output = (output_ + self.base_layer.bias
|
986
|
+
if self.base_layer.bias is not None else output_)
|
987
|
+
output_bias = None
|
988
|
+
else:
|
989
|
+
output = output_
|
990
|
+
output_bias = self.base_layer.bias
|
991
|
+
return output, output_bias
|
992
|
+
|
993
|
+
@property
|
994
|
+
def weight(self):
|
995
|
+
|
996
|
+
return self.base_layer.weight if hasattr(
|
997
|
+
self.base_layer, "weight") else self.base_layer.qweight
|
998
|
+
|
999
|
+
@classmethod
|
1000
|
+
@_not_fully_sharded_can_replace
|
1001
|
+
def can_replace_layer(cls, source_layer: nn.Module,
|
1002
|
+
lora_config: LoRAConfig, packed_modules_list: List,
|
1003
|
+
model_config: Optional[PretrainedConfig]) -> bool:
|
1004
|
+
return type(source_layer) is RowParallelLinear
|
1005
|
+
|
1006
|
+
|
1007
|
+
class LogitsProcessorWithLoRA(BaseLayerWithLoRA):
|
1008
|
+
|
1009
|
+
def __init__(
|
1010
|
+
self,
|
1011
|
+
base_layer: LogitsProcessor,
|
1012
|
+
hidden_size: int,
|
1013
|
+
dtype: torch.dtype,
|
1014
|
+
device: torch.device,
|
1015
|
+
) -> None:
|
1016
|
+
super().__init__()
|
1017
|
+
self.base_layer = base_layer
|
1018
|
+
self.hidden_size = hidden_size
|
1019
|
+
self.dtype = dtype
|
1020
|
+
self.device = device
|
1021
|
+
|
1022
|
+
@property
|
1023
|
+
def logits_as_input(self):
|
1024
|
+
return self.base_layer.logits_as_input
|
1025
|
+
|
1026
|
+
@property
|
1027
|
+
def vocab_size(self):
|
1028
|
+
return self.base_layer.vocab_size
|
1029
|
+
|
1030
|
+
@property
|
1031
|
+
def scale(self):
|
1032
|
+
return self.base_layer.scale
|
1033
|
+
|
1034
|
+
@property
|
1035
|
+
def org_vocab_size(self):
|
1036
|
+
return self.base_layer.org_vocab_size
|
1037
|
+
|
1038
|
+
@property
|
1039
|
+
def include_gpu_probs_tensor(self):
|
1040
|
+
return self.base_layer.include_gpu_probs_tensor
|
1041
|
+
|
1042
|
+
def create_lora_weights(
|
1043
|
+
self,
|
1044
|
+
max_loras: int,
|
1045
|
+
lora_config: LoRAConfig,
|
1046
|
+
model_config: Optional[PretrainedConfig] = None,
|
1047
|
+
) -> None:
|
1048
|
+
# Keep this in sync with csrc/punica/bgmv/bgmv_config.h
|
1049
|
+
if 32000 < self.base_layer.vocab_size > 128512:
|
1050
|
+
raise ValueError("When using LoRA, vocab size must be "
|
1051
|
+
"32000 >= vocab_size <= 128512")
|
1052
|
+
self.lora_a_stacked = torch.zeros(
|
1053
|
+
(
|
1054
|
+
max_loras,
|
1055
|
+
1,
|
1056
|
+
lora_config.max_lora_rank,
|
1057
|
+
self.hidden_size,
|
1058
|
+
),
|
1059
|
+
dtype=lora_config.lora_dtype,
|
1060
|
+
device=self.device,
|
1061
|
+
)
|
1062
|
+
self.lora_b_stacked = torch.zeros(
|
1063
|
+
(
|
1064
|
+
max_loras,
|
1065
|
+
1,
|
1066
|
+
# Pad for kernel compatibility
|
1067
|
+
math.ceil(self.base_layer.vocab_size /
|
1068
|
+
lora_config.lora_vocab_padding_size) *
|
1069
|
+
lora_config.lora_vocab_padding_size,
|
1070
|
+
lora_config.max_lora_rank,
|
1071
|
+
),
|
1072
|
+
dtype=lora_config.lora_dtype,
|
1073
|
+
device=self.device,
|
1074
|
+
)
|
1075
|
+
self.embeddings_tensors = torch.full(
|
1076
|
+
(max_loras, lora_config.lora_extra_vocab_size, self.hidden_size),
|
1077
|
+
fill_value=float("-inf"),
|
1078
|
+
dtype=self.dtype,
|
1079
|
+
device=self.device,
|
1080
|
+
)
|
1081
|
+
# Lazily initialized.
|
1082
|
+
self.indices: torch.Tensor
|
1083
|
+
self.indices_len: List[int]
|
1084
|
+
self.indices_padded: torch.Tensor
|
1085
|
+
|
1086
|
+
def reset_lora(self, index: int):
|
1087
|
+
self.lora_a_stacked[index] = 0
|
1088
|
+
self.lora_b_stacked[index] = 0
|
1089
|
+
self.embeddings_tensors[index] = float("-inf")
|
1090
|
+
|
1091
|
+
def set_lora(
|
1092
|
+
self,
|
1093
|
+
index: int,
|
1094
|
+
lora_a: torch.Tensor,
|
1095
|
+
lora_b: torch.Tensor,
|
1096
|
+
embeddings_tensor: Optional[torch.Tensor],
|
1097
|
+
):
|
1098
|
+
self.reset_lora(index)
|
1099
|
+
self.lora_a_stacked[index,
|
1100
|
+
0, :lora_a.shape[1], :lora_a.shape[0]].copy_(
|
1101
|
+
lora_a.T, non_blocking=True)
|
1102
|
+
self.lora_b_stacked[index,
|
1103
|
+
0, :lora_b.shape[1], :lora_b.shape[0]].copy_(
|
1104
|
+
lora_b.T, non_blocking=True)
|
1105
|
+
if embeddings_tensor is not None:
|
1106
|
+
self.embeddings_tensors[
|
1107
|
+
index, :embeddings_tensor.shape[0], :embeddings_tensor.
|
1108
|
+
shape[1], ] = embeddings_tensor
|
1109
|
+
|
1110
|
+
def set_mapping(
|
1111
|
+
self,
|
1112
|
+
base_indices: torch.Tensor,
|
1113
|
+
sampler_indices: torch.Tensor,
|
1114
|
+
sampler_indices_padded: torch.Tensor,
|
1115
|
+
embeddings_indices: torch.Tensor,
|
1116
|
+
indices_len: List[int],
|
1117
|
+
):
|
1118
|
+
self.indices = sampler_indices
|
1119
|
+
self.indices_padded = sampler_indices_padded
|
1120
|
+
self.indices_len = indices_len
|
1121
|
+
|
1122
|
+
def _get_logits(
|
1123
|
+
self,
|
1124
|
+
hidden_states: torch.Tensor,
|
1125
|
+
embedding: torch.Tensor,
|
1126
|
+
embedding_bias: Optional[torch.Tensor] = None,
|
1127
|
+
) -> Optional[torch.Tensor]:
|
1128
|
+
# Get the logits for the next tokens.
|
1129
|
+
logits = torch.matmul(hidden_states, embedding.t())
|
1130
|
+
if embedding_bias is not None:
|
1131
|
+
logits += embedding_bias
|
1132
|
+
logits = tensor_model_parallel_gather(logits)
|
1133
|
+
if logits is None:
|
1134
|
+
return None
|
1135
|
+
|
1136
|
+
lora_logits = torch.empty(
|
1137
|
+
self.embeddings_tensors.shape[0] + 1,
|
1138
|
+
self.embeddings_tensors.shape[1],
|
1139
|
+
hidden_states.shape[0],
|
1140
|
+
dtype=self.embeddings_tensors.dtype,
|
1141
|
+
device=self.embeddings_tensors.device,
|
1142
|
+
)
|
1143
|
+
torch.matmul(self.embeddings_tensors,
|
1144
|
+
hidden_states.T,
|
1145
|
+
out=lora_logits[:-1])
|
1146
|
+
lora_logits[-1] = float("-inf")
|
1147
|
+
lora_logits = lora_logits.mT
|
1148
|
+
lora_logits = (lora_logits.reshape(
|
1149
|
+
lora_logits.shape[0] * lora_logits.shape[1],
|
1150
|
+
lora_logits.shape[2],
|
1151
|
+
).index_select(0,
|
1152
|
+
self.indices_padded[:self.indices_len[2]]).nan_to_num_(
|
1153
|
+
nan=float("-inf"),
|
1154
|
+
posinf=float("inf"),
|
1155
|
+
neginf=float("-inf")))
|
1156
|
+
logits[:,
|
1157
|
+
self.base_layer.org_vocab_size:self.base_layer.org_vocab_size +
|
1158
|
+
lora_logits.shape[1]] = lora_logits
|
1159
|
+
|
1160
|
+
_apply_lora(
|
1161
|
+
hidden_states,
|
1162
|
+
self.lora_a_stacked,
|
1163
|
+
self.lora_b_stacked,
|
1164
|
+
self.indices[:self.indices_len[1]],
|
1165
|
+
logits,
|
1166
|
+
)
|
1167
|
+
|
1168
|
+
# Remove paddings in vocab (if any).
|
1169
|
+
logits = logits[:, :self.base_layer.vocab_size]
|
1170
|
+
|
1171
|
+
return logits
|
1172
|
+
|
1173
|
+
def forward(self, *args, **kwargs):
|
1174
|
+
return type(self.base_layer).forward(self, *args, **kwargs)
|
1175
|
+
|
1176
|
+
@classmethod
|
1177
|
+
def can_replace_layer(cls, source_layer: nn.Module,
|
1178
|
+
lora_config: LoRAConfig, packed_modules_list: List,
|
1179
|
+
model_config: Optional[PretrainedConfig]) -> bool:
|
1180
|
+
# Special handling for the LogitsProcessor.
|
1181
|
+
return False
|