divergent-beamsearch 0.1.8__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,9 @@
1
1
  import math
2
2
  import torch
3
- from transformers import GPT2LMHeadModel
3
+ try:
4
+ from transformers import GPT2LMHeadModel
5
+ except ImportError:
6
+ pass
4
7
  from multi_choices_parser import DEFAULT_END_SYMB
5
8
 
6
9
 
@@ -35,12 +38,17 @@ def apply_mask_tokens(pred : torch.Tensor, parsers_tokens):
35
38
  return pred[~pred.isinf().all(dim=-1)]
36
39
 
37
40
 
38
- def batched_inference_logits(model : GPT2LMHeadModel, input_ids : torch.Tensor, attention_mask : torch.Tensor | None = None, batch_size : int = 32) -> torch.Tensor:
41
+ def batched_inference_logits(model : "GPT2LMHeadModel", input_ids : torch.Tensor,
42
+ attention_mask : torch.Tensor | None = None, batch_size : int = 32,
43
+ to_cpu=False) -> torch.Tensor:
39
44
  logits = []
40
45
  if attention_mask is None:
41
46
  attention_mask = torch.ones_like(input_ids)
42
47
  for i in range(0, input_ids.shape[0], batch_size):
43
- logits.append(model(input_ids[i:i+batch_size], attention_mask=attention_mask[i:i+batch_size]).logits)
48
+ l = model(input_ids[i:i+batch_size], attention_mask=attention_mask[i:i+batch_size]).logits
49
+ if to_cpu:
50
+ l = l.cpu()
51
+ logits.append(l)
44
52
  return torch.cat(logits, dim=0)
45
53
 
46
54
  def select_mask(source : list, mask : list[bool]) -> list:
@@ -91,7 +99,9 @@ def pad_to_same_size(tensors : list[torch.Tensor], padding_value : int) -> torch
91
99
  return torch.cat(padded_tensors, dim=0)
92
100
 
93
101
  @torch.no_grad()
94
- def divergent_beamsearch(input_ids : torch.Tensor, model : GPT2LMHeadModel, beam_size : int, max_length : int, parser : Parser, pad_token_id : int, batch_size=32, num_solutions = None, end_symb=DEFAULT_END_SYMB) -> tuple[torch.Tensor, torch.Tensor]:
102
+ def divergent_beamsearch(input_ids : torch.Tensor, model : "GPT2LMHeadModel", beam_size : int,
103
+ max_length : int, parser : Parser, pad_token_id : int, batch_size=32,
104
+ num_solutions = None, end_symb=DEFAULT_END_SYMB, optimize_gpu_mem=True) -> tuple[torch.Tensor, torch.Tensor]:
95
105
  assert input_ids.shape[0] == 1, "Batch size must be 1"
96
106
  device = input_ids.device
97
107
  input_ids = input_ids.cpu()
@@ -114,7 +124,7 @@ def divergent_beamsearch(input_ids : torch.Tensor, model : GPT2LMHeadModel, beam
114
124
  for _ in range(max_length):
115
125
  if len(input_ids_unfinished) == 0:
116
126
  break
117
- pred = batched_inference_logits(model, input_ids_unfinished.to(device), batch_size=batch_size)[:, -1].cpu()
127
+ pred = batched_inference_logits(model, input_ids_unfinished.to(device), batch_size=batch_size, to_cpu=optimize_gpu_mem)[:, -1].cpu()
118
128
  parsers_tokens, can_end = get_parsers_tokens(parsers_unfinished, end_symb)
119
129
  logprobs = torch.log_softmax(pred, dim=-1)
120
130
  logprobs_filtered = apply_mask_tokens(logprobs, parsers_tokens)
@@ -173,21 +183,22 @@ def set_slice_row(x : torch.Tensor, slices : torch.IntTensor, value) -> torch.Te
173
183
  x[i].index_fill_(0, indices[i], 0)
174
184
 
175
185
  @torch.no_grad()
176
- def divergent_logprob(input_ids : torch.Tensor, attention_mask : torch.Tensor | None, model : GPT2LMHeadModel,
186
+ def divergent_logprob(input_ids : torch.Tensor, attention_mask : torch.Tensor | None, model : "GPT2LMHeadModel",
177
187
  parsers : Parser | list[Parser] | None, batch_size=32,
178
- start : int | torch.IntTensor = None, end_symb=DEFAULT_END_SYMB) -> torch.FloatTensor:
188
+ start : int | torch.IntTensor = None, end_symb=DEFAULT_END_SYMB, optimize_gpu_mem=True) -> torch.FloatTensor:
179
189
  if start is None:
180
- start = 0
190
+ start = 1
181
191
  if isinstance(start, int):
182
192
  start = torch.tensor([start]*input_ids.shape[0])
183
193
  assert start.shape[0] == input_ids.shape[0]
194
+ assert (start > 0).all()
184
195
  # -1 because next token offset
185
196
  start = start - 1
186
197
 
187
198
  if attention_mask is None:
188
199
  attention_mask = torch.ones_like(input_ids)
189
200
 
190
- logits = batched_inference_logits(model, input_ids, attention_mask, batch_size).cpu()
201
+ logits = batched_inference_logits(model, input_ids, attention_mask, batch_size, to_cpu=optimize_gpu_mem).cpu()
191
202
  input_ids = input_ids.cpu()
192
203
  attention_mask = attention_mask.cpu()
193
204
 
@@ -1,12 +1,11 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: divergent-beamsearch
3
- Version: 0.1.8
3
+ Version: 0.2.1
4
4
  Summary: A variant of the beam search algorithm that focuses on finding answers that maximize the probability of generating an answer before diverging into another subject.
5
5
  License-File: LICENCE
6
6
  Requires-Python: >=3.11
7
- Requires-Dist: multi-choices-parser>=0.9.61
7
+ Requires-Dist: multi-choices-parser>=0.9.72
8
8
  Requires-Dist: torch>=2.0.0
9
- Requires-Dist: transformers>=4.47.1
10
9
  Description-Content-Type: text/markdown
11
10
 
12
11
  # Divergent Beam Search
@@ -0,0 +1,6 @@
1
+ divergent_beamsearch/__init__.py,sha256=qrpVRoT3d-q1N9fJnzHI2X13e71LDY4-6eLOQ_gwCqQ,62
2
+ divergent_beamsearch/algorithm.py,sha256=GKFwi6aKNmJRu9SR6X96JT93SbOpy84fxyKJ5Pq5vQs,9961
3
+ divergent_beamsearch-0.2.1.dist-info/METADATA,sha256=0JAVae-tlHYFQkaEqBOE9ZDtExKsS-gpFFFb9oNTRdg,2790
4
+ divergent_beamsearch-0.2.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
5
+ divergent_beamsearch-0.2.1.dist-info/licenses/LICENCE,sha256=gnISbTzmuQC7NwJaGOdjoq26QYgSuKndq5q2JykifKw,1075
6
+ divergent_beamsearch-0.2.1.dist-info/RECORD,,
@@ -1,6 +0,0 @@
1
- divergent_beamsearch/__init__.py,sha256=qrpVRoT3d-q1N9fJnzHI2X13e71LDY4-6eLOQ_gwCqQ,62
2
- divergent_beamsearch/algorithm.py,sha256=rywmvaIoo66aksaNdCXOPfqtd8WnCazVqYoxySi6G9s,9610
3
- divergent_beamsearch-0.1.8.dist-info/METADATA,sha256=iZjtT-uUwN1X2EfFzPI5_ermjIMu9Myz3d4H8FWR4nw,2826
4
- divergent_beamsearch-0.1.8.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
5
- divergent_beamsearch-0.1.8.dist-info/licenses/LICENCE,sha256=gnISbTzmuQC7NwJaGOdjoq26QYgSuKndq5q2JykifKw,1075
6
- divergent_beamsearch-0.1.8.dist-info/RECORD,,