vllm-npu 0.4.2__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- vllm/__init__.py +23 -0
- vllm/_custom_ops.py +251 -0
- vllm/attention/__init__.py +13 -0
- vllm/attention/backends/__init__.py +0 -0
- vllm/attention/backends/abstract.py +127 -0
- vllm/attention/backends/flash_attn.py +271 -0
- vllm/attention/backends/flashinfer.py +220 -0
- vllm/attention/backends/rocm_flash_attn.py +374 -0
- vllm/attention/backends/torch_sdpa.py +250 -0
- vllm/attention/backends/xformers.py +393 -0
- vllm/attention/layer.py +56 -0
- vllm/attention/ops/__init__.py +0 -0
- vllm/attention/ops/paged_attn.py +216 -0
- vllm/attention/ops/prefix_prefill.py +792 -0
- vllm/attention/ops/triton_flash_attention.py +810 -0
- vllm/attention/selector.py +91 -0
- vllm/block.py +84 -0
- vllm/config.py +1225 -0
- vllm/core/__init__.py +0 -0
- vllm/core/block/__init__.py +0 -0
- vllm/core/block/block_table.py +295 -0
- vllm/core/block/common.py +199 -0
- vllm/core/block/cpu_gpu_block_allocator.py +228 -0
- vllm/core/block/interfaces.py +205 -0
- vllm/core/block/naive_block.py +318 -0
- vllm/core/block/prefix_caching_block.py +606 -0
- vllm/core/block_manager_v1.py +625 -0
- vllm/core/block_manager_v2.py +258 -0
- vllm/core/evictor_v1.py +105 -0
- vllm/core/evictor_v2.py +127 -0
- vllm/core/interfaces.py +113 -0
- vllm/core/policy.py +45 -0
- vllm/core/scheduler.py +1163 -0
- vllm/distributed/__init__.py +3 -0
- vllm/distributed/communication_op.py +237 -0
- vllm/distributed/device_communicators/__init__.py +0 -0
- vllm/distributed/device_communicators/custom_all_reduce.py +274 -0
- vllm/distributed/device_communicators/pynccl.py +287 -0
- vllm/distributed/device_communicators/pynccl_utils.py +66 -0
- vllm/distributed/parallel_state.py +339 -0
- vllm/distributed/utils.py +136 -0
- vllm/engine/__init__.py +0 -0
- vllm/engine/arg_utils.py +649 -0
- vllm/engine/async_llm_engine.py +737 -0
- vllm/engine/llm_engine.py +784 -0
- vllm/engine/metrics.py +368 -0
- vllm/engine/output_processor/__init__.py +0 -0
- vllm/engine/output_processor/interfaces.py +76 -0
- vllm/engine/output_processor/multi_step.py +142 -0
- vllm/engine/output_processor/single_step.py +284 -0
- vllm/engine/output_processor/stop_checker.py +101 -0
- vllm/engine/output_processor/util.py +19 -0
- vllm/entrypoints/__init__.py +0 -0
- vllm/entrypoints/api_server.py +119 -0
- vllm/entrypoints/llm.py +259 -0
- vllm/entrypoints/openai/__init__.py +0 -0
- vllm/entrypoints/openai/api_server.py +186 -0
- vllm/entrypoints/openai/cli_args.py +115 -0
- vllm/entrypoints/openai/protocol.py +460 -0
- vllm/entrypoints/openai/serving_chat.py +392 -0
- vllm/entrypoints/openai/serving_completion.py +347 -0
- vllm/entrypoints/openai/serving_engine.py +234 -0
- vllm/envs.py +217 -0
- vllm/executor/__init__.py +0 -0
- vllm/executor/cpu_executor.py +152 -0
- vllm/executor/distributed_gpu_executor.py +115 -0
- vllm/executor/executor_base.py +115 -0
- vllm/executor/gpu_executor.py +150 -0
- vllm/executor/multiproc_worker_utils.py +263 -0
- vllm/executor/neuron_executor.py +91 -0
- vllm/executor/ray_gpu_executor.py +327 -0
- vllm/executor/ray_utils.py +119 -0
- vllm/logger.py +153 -0
- vllm/logging/__init__.py +5 -0
- vllm/logging/formatter.py +15 -0
- vllm/lora/__init__.py +0 -0
- vllm/lora/fully_sharded_layers.py +262 -0
- vllm/lora/layers.py +1181 -0
- vllm/lora/lora.py +167 -0
- vllm/lora/models.py +645 -0
- vllm/lora/punica.py +213 -0
- vllm/lora/request.py +32 -0
- vllm/lora/utils.py +98 -0
- vllm/lora/worker_manager.py +251 -0
- vllm/model_executor/__init__.py +7 -0
- vllm/model_executor/guided_decoding/__init__.py +25 -0
- vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py +70 -0
- vllm/model_executor/guided_decoding/outlines_decoding.py +130 -0
- vllm/model_executor/guided_decoding/outlines_logits_processors.py +184 -0
- vllm/model_executor/layers/__init__.py +0 -0
- vllm/model_executor/layers/activation.py +173 -0
- vllm/model_executor/layers/fused_moe/__init__.py +7 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +140 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/fused_moe.py +479 -0
- vllm/model_executor/layers/layernorm.py +71 -0
- vllm/model_executor/layers/linear.py +709 -0
- vllm/model_executor/layers/logits_processor.py +115 -0
- vllm/model_executor/layers/ops/__init__.py +0 -0
- vllm/model_executor/layers/ops/rand.py +157 -0
- vllm/model_executor/layers/ops/sample.py +406 -0
- vllm/model_executor/layers/quantization/__init__.py +35 -0
- vllm/model_executor/layers/quantization/aqlm.py +376 -0
- vllm/model_executor/layers/quantization/awq.py +175 -0
- vllm/model_executor/layers/quantization/base_config.py +97 -0
- vllm/model_executor/layers/quantization/fp8.py +265 -0
- vllm/model_executor/layers/quantization/gptq.py +224 -0
- vllm/model_executor/layers/quantization/gptq_marlin.py +438 -0
- vllm/model_executor/layers/quantization/marlin.py +227 -0
- vllm/model_executor/layers/quantization/schema.py +84 -0
- vllm/model_executor/layers/quantization/squeezellm.py +137 -0
- vllm/model_executor/layers/rejection_sampler.py +405 -0
- vllm/model_executor/layers/rotary_embedding.py +525 -0
- vllm/model_executor/layers/sampler.py +1051 -0
- vllm/model_executor/layers/vocab_parallel_embedding.py +155 -0
- vllm/model_executor/model_loader/__init__.py +30 -0
- vllm/model_executor/model_loader/loader.py +362 -0
- vllm/model_executor/model_loader/neuron.py +136 -0
- vllm/model_executor/model_loader/tensorizer.py +368 -0
- vllm/model_executor/model_loader/utils.py +41 -0
- vllm/model_executor/model_loader/weight_utils.py +372 -0
- vllm/model_executor/models/__init__.py +119 -0
- vllm/model_executor/models/baichuan.py +410 -0
- vllm/model_executor/models/bloom.py +327 -0
- vllm/model_executor/models/chatglm.py +386 -0
- vllm/model_executor/models/commandr.py +373 -0
- vllm/model_executor/models/dbrx.py +413 -0
- vllm/model_executor/models/decilm.py +122 -0
- vllm/model_executor/models/deepseek.py +438 -0
- vllm/model_executor/models/falcon.py +444 -0
- vllm/model_executor/models/gemma.py +393 -0
- vllm/model_executor/models/gpt2.py +266 -0
- vllm/model_executor/models/gpt_bigcode.py +274 -0
- vllm/model_executor/models/gpt_j.py +281 -0
- vllm/model_executor/models/gpt_neox.py +295 -0
- vllm/model_executor/models/internlm2.py +323 -0
- vllm/model_executor/models/jais.py +333 -0
- vllm/model_executor/models/llama.py +442 -0
- vllm/model_executor/models/llava.py +239 -0
- vllm/model_executor/models/minicpm.py +531 -0
- vllm/model_executor/models/mixtral.py +583 -0
- vllm/model_executor/models/mixtral_quant.py +404 -0
- vllm/model_executor/models/mpt.py +295 -0
- vllm/model_executor/models/olmo.py +356 -0
- vllm/model_executor/models/opt.py +349 -0
- vllm/model_executor/models/orion.py +319 -0
- vllm/model_executor/models/phi.py +300 -0
- vllm/model_executor/models/qwen.py +284 -0
- vllm/model_executor/models/qwen2.py +367 -0
- vllm/model_executor/models/qwen2_moe.py +447 -0
- vllm/model_executor/models/stablelm.py +301 -0
- vllm/model_executor/models/starcoder2.py +302 -0
- vllm/model_executor/models/xverse.py +366 -0
- vllm/model_executor/sampling_metadata.py +588 -0
- vllm/model_executor/utils.py +35 -0
- vllm/outputs.py +150 -0
- vllm/py.typed +2 -0
- vllm/sampling_params.py +340 -0
- vllm/sequence.py +766 -0
- vllm/spec_decode/__init__.py +0 -0
- vllm/spec_decode/batch_expansion.py +397 -0
- vllm/spec_decode/interfaces.py +73 -0
- vllm/spec_decode/metrics.py +191 -0
- vllm/spec_decode/multi_step_worker.py +203 -0
- vllm/spec_decode/ngram_worker.py +176 -0
- vllm/spec_decode/spec_decode_worker.py +472 -0
- vllm/spec_decode/top1_proposer.py +200 -0
- vllm/spec_decode/util.py +228 -0
- vllm/test_utils.py +41 -0
- vllm/transformers_utils/__init__.py +0 -0
- vllm/transformers_utils/config.py +58 -0
- vllm/transformers_utils/configs/__init__.py +16 -0
- vllm/transformers_utils/configs/chatglm.py +68 -0
- vllm/transformers_utils/configs/dbrx.py +278 -0
- vllm/transformers_utils/configs/falcon.py +87 -0
- vllm/transformers_utils/configs/jais.py +236 -0
- vllm/transformers_utils/configs/mpt.py +178 -0
- vllm/transformers_utils/detokenizer.py +313 -0
- vllm/transformers_utils/tokenizer.py +149 -0
- vllm/transformers_utils/tokenizer_group/__init__.py +33 -0
- vllm/transformers_utils/tokenizer_group/base_tokenizer_group.py +55 -0
- vllm/transformers_utils/tokenizer_group/ray_tokenizer_group.py +169 -0
- vllm/transformers_utils/tokenizer_group/tokenizer_group.py +78 -0
- vllm/transformers_utils/tokenizers/__init__.py +5 -0
- vllm/transformers_utils/tokenizers/baichuan.py +255 -0
- vllm/usage/__init__.py +0 -0
- vllm/usage/usage_lib.py +209 -0
- vllm/utils.py +677 -0
- vllm/worker/__init__.py +0 -0
- vllm/worker/cache_engine.py +105 -0
- vllm/worker/cpu_model_runner.py +346 -0
- vllm/worker/cpu_worker.py +321 -0
- vllm/worker/model_runner.py +1168 -0
- vllm/worker/neuron_model_runner.py +196 -0
- vllm/worker/neuron_worker.py +98 -0
- vllm/worker/worker.py +345 -0
- vllm/worker/worker_base.py +146 -0
- vllm_npu-0.4.2.dist-info/LICENSE +201 -0
- vllm_npu-0.4.2.dist-info/METADATA +173 -0
- vllm_npu-0.4.2.dist-info/RECORD +219 -0
- vllm_npu-0.4.2.dist-info/WHEEL +5 -0
- vllm_npu-0.4.2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,216 @@
|
|
1
|
+
from dataclasses import dataclass
|
2
|
+
from typing import Dict, List, Optional, Tuple
|
3
|
+
|
4
|
+
import torch
|
5
|
+
|
6
|
+
from vllm import _custom_ops as ops
|
7
|
+
from vllm.attention.ops.prefix_prefill import context_attention_fwd
|
8
|
+
|
9
|
+
# Should be the same as PARTITION_SIZE in `paged_attention_v2_launcher`.
|
10
|
+
_PARTITION_SIZE = 512
|
11
|
+
|
12
|
+
|
13
|
+
@dataclass
|
14
|
+
class PagedAttentionMetadata:
|
15
|
+
"""Metadata for PagedAttention."""
|
16
|
+
# (batch_size,). The length of sequences (entire tokens seen so far) per
|
17
|
+
# sequence.
|
18
|
+
seq_lens_tensor: Optional[torch.Tensor]
|
19
|
+
# Maximum sequence length in the batch.
|
20
|
+
max_seq_len: Optional[int]
|
21
|
+
# (batch_size, max_blocks_per_seq).
|
22
|
+
# Block addresses per sequence. (Seq id -> list of physical block)
|
23
|
+
# E.g., [0, 1, 2] means tokens are stored in 0th, 1st, and 2nd blocks
|
24
|
+
# in the kv cache. Each block can contain up to block_size tokens.
|
25
|
+
# 2nd dimensions are padded up to max_blocks_per_seq if it is cuda-graph
|
26
|
+
# captured.
|
27
|
+
block_tables: Optional[torch.Tensor]
|
28
|
+
|
29
|
+
|
30
|
+
class PagedAttention:
|
31
|
+
|
32
|
+
@staticmethod
|
33
|
+
def get_supported_head_sizes() -> List[int]:
|
34
|
+
return [64, 80, 96, 112, 128, 256]
|
35
|
+
|
36
|
+
@staticmethod
|
37
|
+
def get_kv_cache_shape(
|
38
|
+
num_blocks: int,
|
39
|
+
block_size: int,
|
40
|
+
num_kv_heads: int,
|
41
|
+
head_size: int,
|
42
|
+
) -> Tuple[int, ...]:
|
43
|
+
return (2, num_blocks, block_size * num_kv_heads * head_size)
|
44
|
+
|
45
|
+
@staticmethod
|
46
|
+
def split_kv_cache(
|
47
|
+
kv_cache: torch.Tensor,
|
48
|
+
num_kv_heads: int,
|
49
|
+
head_size: int,
|
50
|
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
51
|
+
x = 16 // kv_cache.element_size()
|
52
|
+
num_blocks = kv_cache.shape[1]
|
53
|
+
|
54
|
+
key_cache = kv_cache[0]
|
55
|
+
key_cache = key_cache.view(num_blocks, num_kv_heads, head_size // x,
|
56
|
+
-1, x)
|
57
|
+
value_cache = kv_cache[1]
|
58
|
+
value_cache = value_cache.view(num_blocks, num_kv_heads, head_size, -1)
|
59
|
+
return key_cache, value_cache
|
60
|
+
|
61
|
+
@staticmethod
|
62
|
+
def write_to_paged_cache(
|
63
|
+
key: torch.Tensor,
|
64
|
+
value: torch.Tensor,
|
65
|
+
key_cache: torch.Tensor,
|
66
|
+
value_cache: torch.Tensor,
|
67
|
+
slot_mapping: torch.Tensor,
|
68
|
+
kv_cache_dtype: str,
|
69
|
+
kv_scale: float,
|
70
|
+
) -> None:
|
71
|
+
ops.reshape_and_cache(
|
72
|
+
key,
|
73
|
+
value,
|
74
|
+
key_cache,
|
75
|
+
value_cache,
|
76
|
+
slot_mapping.flatten(),
|
77
|
+
kv_cache_dtype,
|
78
|
+
kv_scale,
|
79
|
+
)
|
80
|
+
|
81
|
+
@staticmethod
|
82
|
+
def forward_decode(
|
83
|
+
query: torch.Tensor,
|
84
|
+
key_cache: torch.Tensor,
|
85
|
+
value_cache: torch.Tensor,
|
86
|
+
block_tables: torch.Tensor,
|
87
|
+
seq_lens: torch.Tensor,
|
88
|
+
max_seq_len: int,
|
89
|
+
kv_cache_dtype: str,
|
90
|
+
num_kv_heads: int,
|
91
|
+
scale: float,
|
92
|
+
alibi_slopes: Optional[torch.Tensor],
|
93
|
+
kv_scale: float,
|
94
|
+
) -> torch.Tensor:
|
95
|
+
output = torch.empty_like(query)
|
96
|
+
|
97
|
+
block_size = value_cache.shape[3]
|
98
|
+
num_seqs, num_heads, head_size = query.shape
|
99
|
+
max_num_partitions = ((max_seq_len + _PARTITION_SIZE - 1) //
|
100
|
+
_PARTITION_SIZE)
|
101
|
+
# NOTE(woosuk): We use a simple heuristic to decide whether to use
|
102
|
+
# PagedAttention V1 or V2. If the number of partitions is 1, we use
|
103
|
+
# V1 to avoid the overhead of reduction. Also, if the number of
|
104
|
+
# sequences or heads is large, we use V1 since there is enough work
|
105
|
+
# to parallelize.
|
106
|
+
# TODO(woosuk): Tune this heuristic.
|
107
|
+
# For context len > 8192, use V2 kernel to avoid shared memory shortage.
|
108
|
+
use_v1 = (max_seq_len <= 8192
|
109
|
+
and (max_num_partitions == 1 or num_seqs * num_heads > 512))
|
110
|
+
if use_v1:
|
111
|
+
# Run PagedAttention V1.
|
112
|
+
ops.paged_attention_v1(
|
113
|
+
output,
|
114
|
+
query,
|
115
|
+
key_cache,
|
116
|
+
value_cache,
|
117
|
+
num_kv_heads,
|
118
|
+
scale,
|
119
|
+
block_tables,
|
120
|
+
seq_lens,
|
121
|
+
block_size,
|
122
|
+
max_seq_len,
|
123
|
+
alibi_slopes,
|
124
|
+
kv_cache_dtype,
|
125
|
+
kv_scale,
|
126
|
+
)
|
127
|
+
else:
|
128
|
+
# Run PagedAttention V2.
|
129
|
+
assert _PARTITION_SIZE % block_size == 0
|
130
|
+
tmp_output = torch.empty(
|
131
|
+
size=(num_seqs, num_heads, max_num_partitions, head_size),
|
132
|
+
dtype=output.dtype,
|
133
|
+
device=output.device,
|
134
|
+
)
|
135
|
+
exp_sums = torch.empty(
|
136
|
+
size=(num_seqs, num_heads, max_num_partitions),
|
137
|
+
dtype=torch.float32,
|
138
|
+
device=output.device,
|
139
|
+
)
|
140
|
+
max_logits = torch.empty_like(exp_sums)
|
141
|
+
ops.paged_attention_v2(
|
142
|
+
output,
|
143
|
+
exp_sums,
|
144
|
+
max_logits,
|
145
|
+
tmp_output,
|
146
|
+
query,
|
147
|
+
key_cache,
|
148
|
+
value_cache,
|
149
|
+
num_kv_heads,
|
150
|
+
scale,
|
151
|
+
block_tables,
|
152
|
+
seq_lens,
|
153
|
+
block_size,
|
154
|
+
max_seq_len,
|
155
|
+
alibi_slopes,
|
156
|
+
kv_cache_dtype,
|
157
|
+
kv_scale,
|
158
|
+
)
|
159
|
+
return output
|
160
|
+
|
161
|
+
@staticmethod
|
162
|
+
def forward_prefix(
|
163
|
+
query: torch.Tensor,
|
164
|
+
key: torch.Tensor,
|
165
|
+
value: torch.Tensor,
|
166
|
+
key_cache: torch.Tensor,
|
167
|
+
value_cache: torch.Tensor,
|
168
|
+
block_tables: torch.Tensor,
|
169
|
+
subquery_start_loc: torch.Tensor,
|
170
|
+
seq_lens_tensor: torch.Tensor,
|
171
|
+
context_lens: torch.Tensor,
|
172
|
+
max_query_len: int,
|
173
|
+
alibi_slopes: Optional[torch.Tensor],
|
174
|
+
sliding_window: Optional[int],
|
175
|
+
) -> torch.Tensor:
|
176
|
+
output = torch.empty_like(query)
|
177
|
+
context_attention_fwd(
|
178
|
+
query,
|
179
|
+
key,
|
180
|
+
value,
|
181
|
+
output,
|
182
|
+
key_cache,
|
183
|
+
value_cache,
|
184
|
+
block_tables,
|
185
|
+
# subquery_start_loc is (batch_size + 1,)
|
186
|
+
subquery_start_loc[:-1],
|
187
|
+
seq_lens_tensor,
|
188
|
+
context_lens,
|
189
|
+
max_query_len,
|
190
|
+
alibi_slopes,
|
191
|
+
sliding_window,
|
192
|
+
)
|
193
|
+
return output
|
194
|
+
|
195
|
+
@staticmethod
|
196
|
+
def swap_blocks(
|
197
|
+
src_kv_cache: torch.Tensor,
|
198
|
+
dst_kv_cache: torch.Tensor,
|
199
|
+
src_to_dst: Dict[int, int],
|
200
|
+
) -> None:
|
201
|
+
src_key_cache = src_kv_cache[0]
|
202
|
+
dst_key_cache = dst_kv_cache[0]
|
203
|
+
ops.swap_blocks(src_key_cache, dst_key_cache, src_to_dst)
|
204
|
+
|
205
|
+
src_value_cache = src_kv_cache[1]
|
206
|
+
dst_value_cache = dst_kv_cache[1]
|
207
|
+
ops.swap_blocks(src_value_cache, dst_value_cache, src_to_dst)
|
208
|
+
|
209
|
+
@staticmethod
|
210
|
+
def copy_blocks(
|
211
|
+
kv_caches: List[torch.Tensor],
|
212
|
+
src_to_dists: Dict[int, List[int]],
|
213
|
+
) -> None:
|
214
|
+
key_caches = [kv_cache[0] for kv_cache in kv_caches]
|
215
|
+
value_caches = [kv_cache[1] for kv_cache in kv_caches]
|
216
|
+
ops.copy_blocks(key_caches, value_caches, src_to_dists)
|