divergent-beamsearch 0.1.7__tar.gz → 0.2.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {divergent_beamsearch-0.1.7 → divergent_beamsearch-0.2.0}/PKG-INFO +1 -1
- {divergent_beamsearch-0.1.7 → divergent_beamsearch-0.2.0}/pyproject.toml +1 -1
- {divergent_beamsearch-0.1.7 → divergent_beamsearch-0.2.0}/src/divergent_beamsearch/algorithm.py +28 -9
- {divergent_beamsearch-0.1.7 → divergent_beamsearch-0.2.0}/tests/test_beamsearch.py +38 -4
- {divergent_beamsearch-0.1.7 → divergent_beamsearch-0.2.0}/.gitignore +0 -0
- {divergent_beamsearch-0.1.7 → divergent_beamsearch-0.2.0}/.python-version +0 -0
- {divergent_beamsearch-0.1.7 → divergent_beamsearch-0.2.0}/LICENCE +0 -0
- {divergent_beamsearch-0.1.7 → divergent_beamsearch-0.2.0}/README.md +0 -0
- {divergent_beamsearch-0.1.7 → divergent_beamsearch-0.2.0}/src/divergent_beamsearch/__init__.py +0 -0
- {divergent_beamsearch-0.1.7 → divergent_beamsearch-0.2.0}/uv.lock +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: divergent-beamsearch
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.2.0
|
4
4
|
Summary: A variant of the beam search algorithm that focuses on finding answers that maximize the probability of generating an answer before diverging into another subject.
|
5
5
|
License-File: LICENCE
|
6
6
|
Requires-Python: >=3.11
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[project]
|
2
2
|
name = "divergent-beamsearch"
|
3
|
-
version = "0.
|
3
|
+
version = "0.2.0"
|
4
4
|
description = "A variant of the beam search algorithm that focuses on finding answers that maximize the probability of generating an answer before diverging into another subject."
|
5
5
|
readme = "README.md"
|
6
6
|
requires-python = ">=3.11"
|
{divergent_beamsearch-0.1.7 → divergent_beamsearch-0.2.0}/src/divergent_beamsearch/algorithm.py
RENAMED
@@ -35,12 +35,17 @@ def apply_mask_tokens(pred : torch.Tensor, parsers_tokens):
|
|
35
35
|
return pred[~pred.isinf().all(dim=-1)]
|
36
36
|
|
37
37
|
|
38
|
-
def batched_inference_logits(model : GPT2LMHeadModel, input_ids : torch.Tensor,
|
38
|
+
def batched_inference_logits(model : GPT2LMHeadModel, input_ids : torch.Tensor,
|
39
|
+
attention_mask : torch.Tensor | None = None, batch_size : int = 32,
|
40
|
+
to_cpu=False) -> torch.Tensor:
|
39
41
|
logits = []
|
40
42
|
if attention_mask is None:
|
41
43
|
attention_mask = torch.ones_like(input_ids)
|
42
44
|
for i in range(0, input_ids.shape[0], batch_size):
|
43
|
-
|
45
|
+
l = model(input_ids[i:i+batch_size], attention_mask=attention_mask[i:i+batch_size]).logits
|
46
|
+
if to_cpu:
|
47
|
+
l = l.cpu()
|
48
|
+
logits.append(l)
|
44
49
|
return torch.cat(logits, dim=0)
|
45
50
|
|
46
51
|
def select_mask(source : list, mask : list[bool]) -> list:
|
@@ -82,8 +87,18 @@ def index_reduce_lists(x : torch.Tensor, indices : list[list[int]], reduce_func=
|
|
82
87
|
values.append(reduce_func(x[i, index], dim=-1))
|
83
88
|
return torch.tensor(values, dtype=x.dtype, device=x.device, requires_grad=x.requires_grad)
|
84
89
|
|
90
|
+
def pad_to_same_size(tensors : list[torch.Tensor], padding_value : int) -> torch.Tensor:
|
91
|
+
max_size = max(x.shape[-1] for x in tensors)
|
92
|
+
padded_tensors = []
|
93
|
+
for tensor in tensors:
|
94
|
+
pad = torch.full((tensor.shape[0], max_size - tensor.shape[1]), padding_value, dtype=torch.long)
|
95
|
+
padded_tensors.append(torch.cat([tensor, pad], dim=-1))
|
96
|
+
return torch.cat(padded_tensors, dim=0)
|
97
|
+
|
85
98
|
@torch.no_grad()
|
86
|
-
def divergent_beamsearch(input_ids : torch.Tensor, model : GPT2LMHeadModel, beam_size : int,
|
99
|
+
def divergent_beamsearch(input_ids : torch.Tensor, model : GPT2LMHeadModel, beam_size : int,
|
100
|
+
max_length : int, parser : Parser, pad_token_id : int, batch_size=32,
|
101
|
+
num_solutions = None, end_symb=DEFAULT_END_SYMB, optimize_gpu_mem=True) -> tuple[torch.Tensor, torch.Tensor]:
|
87
102
|
assert input_ids.shape[0] == 1, "Batch size must be 1"
|
88
103
|
device = input_ids.device
|
89
104
|
input_ids = input_ids.cpu()
|
@@ -106,7 +121,7 @@ def divergent_beamsearch(input_ids : torch.Tensor, model : GPT2LMHeadModel, beam
|
|
106
121
|
for _ in range(max_length):
|
107
122
|
if len(input_ids_unfinished) == 0:
|
108
123
|
break
|
109
|
-
pred = batched_inference_logits(model, input_ids_unfinished.to(device), batch_size=batch_size)[:, -1].cpu()
|
124
|
+
pred = batched_inference_logits(model, input_ids_unfinished.to(device), batch_size=batch_size, to_cpu=optimize_gpu_mem)[:, -1].cpu()
|
110
125
|
parsers_tokens, can_end = get_parsers_tokens(parsers_unfinished, end_symb)
|
111
126
|
logprobs = torch.log_softmax(pred, dim=-1)
|
112
127
|
logprobs_filtered = apply_mask_tokens(logprobs, parsers_tokens)
|
@@ -130,8 +145,11 @@ def divergent_beamsearch(input_ids : torch.Tensor, model : GPT2LMHeadModel, beam
|
|
130
145
|
scores_finished_current = scores_finished_current + log1mexp(logprob_other_ans)
|
131
146
|
scores_finished = torch.cat([scores_finished, scores_finished_current])
|
132
147
|
if len(solutions_finished_current):
|
133
|
-
|
134
|
-
|
148
|
+
if len(solutions_finished):
|
149
|
+
solutions_finished = pad_to_same_size([solutions_finished, solutions_finished_current],
|
150
|
+
padding_value=pad_token_id)
|
151
|
+
else:
|
152
|
+
solutions_finished = solutions_finished_current
|
135
153
|
if solutions_finished.numel():
|
136
154
|
# Keep num_solutions best solutions in finished
|
137
155
|
order = scores_finished.argsort(descending=True)
|
@@ -164,19 +182,20 @@ def set_slice_row(x : torch.Tensor, slices : torch.IntTensor, value) -> torch.Te
|
|
164
182
|
@torch.no_grad()
|
165
183
|
def divergent_logprob(input_ids : torch.Tensor, attention_mask : torch.Tensor | None, model : GPT2LMHeadModel,
|
166
184
|
parsers : Parser | list[Parser] | None, batch_size=32,
|
167
|
-
start : int | torch.IntTensor = None, end_symb=DEFAULT_END_SYMB) -> torch.FloatTensor:
|
185
|
+
start : int | torch.IntTensor = None, end_symb=DEFAULT_END_SYMB, optimize_gpu_mem=True) -> torch.FloatTensor:
|
168
186
|
if start is None:
|
169
|
-
start =
|
187
|
+
start = 1
|
170
188
|
if isinstance(start, int):
|
171
189
|
start = torch.tensor([start]*input_ids.shape[0])
|
172
190
|
assert start.shape[0] == input_ids.shape[0]
|
191
|
+
assert (start > 0).all()
|
173
192
|
# -1 because next token offset
|
174
193
|
start = start - 1
|
175
194
|
|
176
195
|
if attention_mask is None:
|
177
196
|
attention_mask = torch.ones_like(input_ids)
|
178
197
|
|
179
|
-
logits = batched_inference_logits(model, input_ids, attention_mask, batch_size).cpu()
|
198
|
+
logits = batched_inference_logits(model, input_ids, attention_mask, batch_size, to_cpu=optimize_gpu_mem).cpu()
|
180
199
|
input_ids = input_ids.cpu()
|
181
200
|
attention_mask = attention_mask.cpu()
|
182
201
|
|
@@ -13,6 +13,7 @@ TEST_END_SYMBS = [DEFAULT_END_SYMB, 'tokenizer']
|
|
13
13
|
def model_and_tokenizer():
|
14
14
|
model = GPT2LMHeadModel.from_pretrained("gpt2")
|
15
15
|
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
|
16
|
+
tokenizer.pad_token = tokenizer.eos_token
|
16
17
|
return model, tokenizer
|
17
18
|
|
18
19
|
@pytest.fixture
|
@@ -32,6 +33,7 @@ def fakemodel_and_tokenizer():
|
|
32
33
|
# Instantiate a model with the custom configuration
|
33
34
|
model = GPT2LMHeadModel(config)
|
34
35
|
model.eval()
|
36
|
+
tokenizer.pad_token = tokenizer.eos_token
|
35
37
|
|
36
38
|
return model, tokenizer
|
37
39
|
|
@@ -44,11 +46,11 @@ def test_divergent_beamsearch(model_and_tokenizer, device, end_symb):
|
|
44
46
|
model.to(device)
|
45
47
|
prompt = "The capital of France is"
|
46
48
|
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(device)
|
47
|
-
beam_size =
|
49
|
+
beam_size = 10
|
48
50
|
max_length = 10
|
49
51
|
pad_token_id = tokenizer.eos_token_id
|
50
52
|
|
51
|
-
possible_answers = [' Paris', ' Madrid', ' Paris Hilton']
|
53
|
+
possible_answers = [' Paris', ' Madrid', ' Paris Hilton', ' Bri bra brouuu Mario Brooos']
|
52
54
|
tokenized_answers = tokenizer(possible_answers).input_ids
|
53
55
|
|
54
56
|
if end_symb == 'tokenizer':
|
@@ -62,6 +64,9 @@ def test_divergent_beamsearch(model_and_tokenizer, device, end_symb):
|
|
62
64
|
logprob_paris_hilton = logprob_paris + logprob_hilton
|
63
65
|
logprob_madrid = model(input_ids).logits.cpu().log_softmax(dim=-1)[0, -1, tokenized_answers[1][0]]
|
64
66
|
logprob_paris_diverge = logprob_paris + log1mexp(logprob_hilton)
|
67
|
+
input_garbage = torch.tensor(input_ids.tolist()[0] + tokenized_answers[-1]).unsqueeze(0).to(device)
|
68
|
+
logsoftmax_garbage = model(input_garbage).logits.log_softmax(-1)
|
69
|
+
logprob_garbage = torch.gather(logsoftmax_garbage[:, 4:-1, :], 2, input_garbage[:, 5:, None]).squeeze(-1).sum(-1)
|
65
70
|
|
66
71
|
scores, solutions = divergent_beamsearch(
|
67
72
|
input_ids=input_ids,
|
@@ -70,7 +75,7 @@ def test_divergent_beamsearch(model_and_tokenizer, device, end_symb):
|
|
70
75
|
max_length=max_length,
|
71
76
|
parser=multi_choices_parser,
|
72
77
|
pad_token_id=pad_token_id,
|
73
|
-
num_solutions=
|
78
|
+
num_solutions=beam_size,
|
74
79
|
end_symb=end_symb
|
75
80
|
)
|
76
81
|
true_solutions = torch.nn.utils.rnn.pad_sequence([torch.tensor(ans) for ans in tokenized_answers], batch_first=True, padding_value=pad_token_id)
|
@@ -78,6 +83,7 @@ def test_divergent_beamsearch(model_and_tokenizer, device, end_symb):
|
|
78
83
|
assert torch.isclose(scores[0], logprob_paris_diverge), "Beam search did not return the expected score"
|
79
84
|
assert torch.isclose(scores[1], logprob_madrid), "Beam search did not return the expected score"
|
80
85
|
assert torch.isclose(scores[2], logprob_paris_hilton), "Beam search did not return the expected score"
|
86
|
+
assert torch.isclose(scores[3], logprob_garbage), "Beam search did not return the expected score"
|
81
87
|
|
82
88
|
|
83
89
|
@pytest.mark.parametrize("device", ['cpu', 'cuda'])
|
@@ -91,7 +97,6 @@ def test_divergent_logprob(fakemodel_and_tokenizer, device, end_symb):
|
|
91
97
|
"The capital of France is Paris",
|
92
98
|
"The top model Paris Hilton"
|
93
99
|
]
|
94
|
-
tokenizer.pad_token = tokenizer.eos_token
|
95
100
|
inp = tokenizer(prompts, return_tensors="pt", padding=True)
|
96
101
|
input_ids = inp.input_ids.to(device)
|
97
102
|
attention_mask = inp.attention_mask.to(device)
|
@@ -196,3 +201,32 @@ def test_vanilla_beamsearch(model_and_tokenizer, device):
|
|
196
201
|
assert np.isclose(
|
197
202
|
scores.cpu().numpy(), np.array([-8.1361, -8.7745, -9.1053]), atol=0.0001
|
198
203
|
).all()
|
204
|
+
|
205
|
+
@pytest.mark.parametrize("device", ['cpu', 'cuda'])
|
206
|
+
@pytest.mark.parametrize("dtype", [torch.bfloat16, torch.float32])
|
207
|
+
def test_element_wise_equivalence_divergent_logprob(fakemodel_and_tokenizer, device, dtype):
|
208
|
+
model, tokenizer = fakemodel_and_tokenizer
|
209
|
+
model.to(device)
|
210
|
+
model.to(dtype)
|
211
|
+
|
212
|
+
texts = [
|
213
|
+
'My name is Roger',
|
214
|
+
'The capital of Morocco is Rabat',
|
215
|
+
'Google is owned by Alphabet'
|
216
|
+
]
|
217
|
+
|
218
|
+
multi_choices_parser = MultiChoicesParser([texts])
|
219
|
+
|
220
|
+
inputs = tokenizer(texts, return_tensors='pt', padding=True).to(device)
|
221
|
+
|
222
|
+
logprobs_global = divergent_logprob(inputs.input_ids, inputs.attention_mask, model, multi_choices_parser)
|
223
|
+
|
224
|
+
logprobs_individual = []
|
225
|
+
|
226
|
+
for text in texts:
|
227
|
+
inputs = tokenizer(text, return_tensors='pt', padding=True).to(device)
|
228
|
+
input_ids, attention_mask = inputs.input_ids, inputs.attention_mask
|
229
|
+
logprobs_individual.append(divergent_logprob(input_ids, attention_mask, model, multi_choices_parser))
|
230
|
+
logprobs_individual = torch.tensor(logprobs_global)
|
231
|
+
|
232
|
+
assert (logprobs_individual == logprobs_global).all()
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{divergent_beamsearch-0.1.7 → divergent_beamsearch-0.2.0}/src/divergent_beamsearch/__init__.py
RENAMED
File without changes
|
File without changes
|