boostedprob 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- boostedprob-0.1.0/LICENSE +22 -0
- boostedprob-0.1.0/MANIFEST.in +2 -0
- boostedprob-0.1.0/PKG-INFO +58 -0
- boostedprob-0.1.0/README.md +44 -0
- boostedprob-0.1.0/pyproject.toml +31 -0
- boostedprob-0.1.0/setup.cfg +4 -0
- boostedprob-0.1.0/src/boostedprob/__init__.py +166 -0
- boostedprob-0.1.0/src/boostedprob.egg-info/PKG-INFO +58 -0
- boostedprob-0.1.0/src/boostedprob.egg-info/SOURCES.txt +10 -0
- boostedprob-0.1.0/src/boostedprob.egg-info/dependency_links.txt +1 -0
- boostedprob-0.1.0/src/boostedprob.egg-info/requires.txt +1 -0
- boostedprob-0.1.0/src/boostedprob.egg-info/top_level.txt +1 -0
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
Copyright (c) 2025 Tu Anh Dinh
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
5
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
6
|
+
in the Software without restriction, including without limitation the rights
|
|
7
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
8
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
9
|
+
furnished to do so, subject to the following conditions:
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
17
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
18
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
19
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
20
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
21
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
22
|
+
SOFTWARE.
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: boostedprob
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Utilities to compute boosted probabilities and identify dominant tokens.
|
|
5
|
+
Author-email: Tu Anh Dinh <dinhtuanh23@gmail.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/TuAnh23/BoostedProb
|
|
8
|
+
Project-URL: Repository, https://github.com/TuAnh23/BoostedProb
|
|
9
|
+
Requires-Python: >=3.9
|
|
10
|
+
Description-Content-Type: text/markdown
|
|
11
|
+
License-File: LICENSE
|
|
12
|
+
Requires-Dist: torch>=2.0.1
|
|
13
|
+
Dynamic: license-file
|
|
14
|
+
|
|
15
|
+
# boostedprob
|
|
16
|
+
|
|
17
|
+
Utilities to compute "dominant tokens" and derive boosted probabilities from model log-probabilities.
|
|
18
|
+
|
|
19
|
+
## Install
|
|
20
|
+
|
|
21
|
+
- Locally (development editable install):
|
|
22
|
+
|
|
23
|
+
```bash
|
|
24
|
+
python -m pip install -e .
|
|
25
|
+
````
|
|
26
|
+
|
|
27
|
+
* From GitHub (example):
|
|
28
|
+
|
|
29
|
+
```bash
|
|
30
|
+
python -m pip install "git+https://github.com/yourusername/boostedprob.git"
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
## Example
|
|
34
|
+
|
|
35
|
+
```python
|
|
36
|
+
import torch
|
|
37
|
+
import boostedprob
|
|
38
|
+
|
|
39
|
+
# log_probs: shape [batch, seq_len, vocab]
|
|
40
|
+
# target: shape [batch, seq_len]
|
|
41
|
+
# (fill with your model outputs)
|
|
42
|
+
|
|
43
|
+
log_probs = torch.log_softmax(torch.randn(2, 4, 1000), dim=-1)
|
|
44
|
+
target = torch.randint(0, 1000, (2, 4))
|
|
45
|
+
|
|
46
|
+
scores = boostedprob.calculate_boostedprob(log_probs, target)
|
|
47
|
+
print(scores.shape) # -> (2, 4)
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
## Build & publish
|
|
51
|
+
|
|
52
|
+
```bash
|
|
53
|
+
python -m pip install --upgrade build twine
|
|
54
|
+
python -m build
|
|
55
|
+
python -m twine upload dist/*
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
Or test first on TestPyPI (recommended).
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
# boostedprob
|
|
2
|
+
|
|
3
|
+
Utilities to compute "dominant tokens" and derive boosted probabilities from model log-probabilities.
|
|
4
|
+
|
|
5
|
+
## Install
|
|
6
|
+
|
|
7
|
+
- Locally (development editable install):
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
python -m pip install -e .
|
|
11
|
+
````
|
|
12
|
+
|
|
13
|
+
* From GitHub (example):
|
|
14
|
+
|
|
15
|
+
```bash
|
|
16
|
+
python -m pip install "git+https://github.com/yourusername/boostedprob.git"
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
## Example
|
|
20
|
+
|
|
21
|
+
```python
|
|
22
|
+
import torch
|
|
23
|
+
import boostedprob
|
|
24
|
+
|
|
25
|
+
# log_probs: shape [batch, seq_len, vocab]
|
|
26
|
+
# target: shape [batch, seq_len]
|
|
27
|
+
# (fill with your model outputs)
|
|
28
|
+
|
|
29
|
+
log_probs = torch.log_softmax(torch.randn(2, 4, 1000), dim=-1)
|
|
30
|
+
target = torch.randint(0, 1000, (2, 4))
|
|
31
|
+
|
|
32
|
+
scores = boostedprob.calculate_boostedprob(log_probs, target)
|
|
33
|
+
print(scores.shape) # -> (2, 4)
|
|
34
|
+
```
|
|
35
|
+
|
|
36
|
+
## Build & publish
|
|
37
|
+
|
|
38
|
+
```bash
|
|
39
|
+
python -m pip install --upgrade build twine
|
|
40
|
+
python -m build
|
|
41
|
+
python -m twine upload dist/*
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
Or test first on TestPyPI (recommended).
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=61.0", "wheel"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
[project]
|
|
7
|
+
name = "boostedprob"
|
|
8
|
+
version = "0.1.0"
|
|
9
|
+
description = "Utilities to compute boosted probabilities and identify dominant tokens."
|
|
10
|
+
readme = "README.md"
|
|
11
|
+
requires-python = ">=3.9"
|
|
12
|
+
authors = [
|
|
13
|
+
{ name = "Tu Anh Dinh", email = "dinhtuanh23@gmail.com" }
|
|
14
|
+
]
|
|
15
|
+
license = { text = "MIT" }
|
|
16
|
+
dependencies = [
|
|
17
|
+
"torch>=2.0.1"
|
|
18
|
+
]
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
[project.urls]
|
|
22
|
+
Homepage = "https://github.com/TuAnh23/BoostedProb"
|
|
23
|
+
Repository = "https://github.com/TuAnh23/BoostedProb"
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
[tool.setuptools]
|
|
27
|
+
package-dir = { "" = "src" }
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
[tool.setuptools.packages.find]
|
|
31
|
+
where = ["src"]
|
|
@@ -0,0 +1,166 @@
|
|
|
1
|
+
"""boostedprob
|
|
2
|
+
|
|
3
|
+
Small utilities to compute "dominant tokens" and boosted probabilities.
|
|
4
|
+
|
|
5
|
+
Public API:
|
|
6
|
+
- find_dominant(log_probs, ...)
|
|
7
|
+
- calculate_boostedprob(log_probs, target, ...)
|
|
8
|
+
"""
|
|
9
|
+
from typing import Optional
|
|
10
|
+
import torch
|
|
11
|
+
import itertools
|
|
12
|
+
|
|
13
|
+
__all__ = ["find_dominant", "calculate_boostedprob"]
|
|
14
|
+
|
|
15
|
+
def find_dominant(
|
|
16
|
+
log_probs: torch.Tensor,
|
|
17
|
+
find_dominant_method: str = "difference_jump",
|
|
18
|
+
epsilon: Optional[float] = 0.005,
|
|
19
|
+
k: Optional[int] = 5,
|
|
20
|
+
p_jump: Optional[float] = 0.3,
|
|
21
|
+
minp: Optional[float] = 0.9,
|
|
22
|
+
topp: Optional[float] = 0.9,
|
|
23
|
+
) -> torch.Tensor:
|
|
24
|
+
"""
|
|
25
|
+
Find dominant tokens based on various methods.
|
|
26
|
+
Args:
|
|
27
|
+
log_probs (torch.Tensor): Model log probabilities (output of final softmax)
|
|
28
|
+
of shape [batch_size, nr_tokens, vocab_size].
|
|
29
|
+
find_dominant_method (str): Method to find dominant tokens. Options include:
|
|
30
|
+
- "epsilon-cut": Tokens with probability above a certain threshold.
|
|
31
|
+
- "eta-cut": Tokens based on entropy and a threshold epsilon.
|
|
32
|
+
- "top-k": Top k tokens by probability.
|
|
33
|
+
- "top-p": Tokens whose cumulative probability is below p.
|
|
34
|
+
- "min-p": Tokens with probability above a fraction of the highest probability token.
|
|
35
|
+
- "difference_jump": Tokens where the difference in sorted probabilities exceeds a jump threshold.
|
|
36
|
+
p_threshold (float, optional): Probability threshold for "prob_threshold" method.
|
|
37
|
+
epsilon (float, optional): Minimum threshold for
|
|
38
|
+
k (int, optional): Number of top tokens for "top-k" method.
|
|
39
|
+
p_jump (float, optional): Jump threshold for "difference_jump" method.
|
|
40
|
+
minp (float, optional): Minimum probability fraction for "min-p" method.
|
|
41
|
+
topp (float, optional): Cumulative probability threshold for "top-p" method.
|
|
42
|
+
Returns:
|
|
43
|
+
torch.Tensor: Indices of dominant tokens, shape [batch_size, nr_tokens, vocab_size]. -1 values are used to mask out non-dominant tokens.
|
|
44
|
+
"""
|
|
45
|
+
|
|
46
|
+
prob_dist = torch.exp(log_probs)
|
|
47
|
+
sorted_prob_dist, indices = torch.sort(prob_dist, descending=True, dim=-1)
|
|
48
|
+
if find_dominant_method == "epsilon-cut":
|
|
49
|
+
assert epsilon is not None
|
|
50
|
+
mask = sorted_prob_dist > epsilon
|
|
51
|
+
elif find_dominant_method == "eta-cut":
|
|
52
|
+
assert epsilon is not None
|
|
53
|
+
epsilon = torch.tensor(epsilon)
|
|
54
|
+
entropy = -torch.mul(log_probs, torch.exp(log_probs)).sum(dim=-1)
|
|
55
|
+
entropy = entropy.unsqueeze(-1).expand_as(log_probs)
|
|
56
|
+
mask = (sorted_prob_dist > epsilon) | (sorted_prob_dist > torch.sqrt(epsilon) * torch.exp(-entropy))
|
|
57
|
+
elif find_dominant_method == "top-k":
|
|
58
|
+
assert k is not None
|
|
59
|
+
mask = torch.zeros_like(sorted_prob_dist, dtype=torch.bool)
|
|
60
|
+
mask[...,:k] = True
|
|
61
|
+
elif find_dominant_method == "top-p":
|
|
62
|
+
assert topp is not None
|
|
63
|
+
cumulative_sum = torch.cumsum(sorted_prob_dist, dim=-1)
|
|
64
|
+
mask = cumulative_sum < topp
|
|
65
|
+
elif find_dominant_method == "min-p":
|
|
66
|
+
assert minp is not None
|
|
67
|
+
mask = sorted_prob_dist > minp * sorted_prob_dist[...,0].unsqueeze(-1).expand_as(sorted_prob_dist)
|
|
68
|
+
elif find_dominant_method == "difference_jump":
|
|
69
|
+
assert p_jump is not None
|
|
70
|
+
diff = sorted_prob_dist[..., :-1] - sorted_prob_dist[..., 1:]
|
|
71
|
+
# Identify the cutoff condition along the last dimension
|
|
72
|
+
mask = (diff > p_jump * sorted_prob_dist[..., :-1]) & (diff > epsilon)
|
|
73
|
+
else:
|
|
74
|
+
raise RuntimeError(f"Unknown find_dominant_method {find_dominant_method}")
|
|
75
|
+
|
|
76
|
+
# Get the last occurrence of True along the last axis
|
|
77
|
+
cut_points = mask.shape[-1] - 1 - torch.argmax(torch.flip(mask, dims=[-1]).int(),
|
|
78
|
+
dim=-1) # Shape: [batch_size, nr_tokens]
|
|
79
|
+
|
|
80
|
+
# Handle cases where no cutoff is found (all False)
|
|
81
|
+
no_cutoff = ~mask.any(axis=-1)
|
|
82
|
+
cut_points[no_cutoff] = -1 # Use -1 to indicate no valid cutoff found
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
# Assuming `indices` is of shape [batch_size, nr_tokens, vocab_size]
|
|
86
|
+
batch_indices = torch.arange(indices.shape[-1], device=indices.device).expand_as(indices)
|
|
87
|
+
|
|
88
|
+
# Ensure cut_point has the same shape as batch_indices (for broadcasting)
|
|
89
|
+
cut_point_expanded = cut_points.unsqueeze(-1) # Shape: [batch_size, nr_tokens, 1]
|
|
90
|
+
|
|
91
|
+
# Create mask: Select elements up to cut_point, but disable selection when cut_point == -1
|
|
92
|
+
mask = (batch_indices <= cut_point_expanded) & (cut_point_expanded != -1)
|
|
93
|
+
|
|
94
|
+
# Mask out indices beyond the cut-off point with value -1
|
|
95
|
+
indices = torch.where(mask, indices, -1)
|
|
96
|
+
return indices
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def calculate_boostedprob(
|
|
100
|
+
log_probs: torch.Tensor,
|
|
101
|
+
target: torch.Tensor,
|
|
102
|
+
ue_method: str = "sum_dominant_mass",
|
|
103
|
+
find_dominant_method: str = "difference_jump",
|
|
104
|
+
epsilon: Optional[float] = 0.005,
|
|
105
|
+
k: Optional[int] = 5,
|
|
106
|
+
p_jump: Optional[float] = 0.3,
|
|
107
|
+
minp: Optional[float] = 0.9,
|
|
108
|
+
topp: Optional[float] = 0.9,
|
|
109
|
+
):
|
|
110
|
+
"""
|
|
111
|
+
Calculate boosted probabilities based on dominant tokens.
|
|
112
|
+
Args:
|
|
113
|
+
log_probs (torch.Tensor): Model log probabilities (output of final softmax)
|
|
114
|
+
of shape [batch_size, nr_tokens, vocab_size].
|
|
115
|
+
target (torch.Tensor): Indices finally output tokens of shape [batch_size, nr_tokens].
|
|
116
|
+
ue_method (str): Uncertainty estimation method. Options include:
|
|
117
|
+
- "is_dominant": Returns 1 if the predicted token is dominant, else 0.
|
|
118
|
+
- "sum_dominant_mass": Returns the sum of probabilities of dominant tokens, or the predicted token's probability if it's not dominant.
|
|
119
|
+
find_dominant_method (str): Method to find dominant tokens. Options include:
|
|
120
|
+
- "epsilon-cut": Tokens with probability above a certain threshold.
|
|
121
|
+
- "eta-cut": Tokens based on entropy and a threshold epsilon.
|
|
122
|
+
- "top-k": Top k tokens by probability.
|
|
123
|
+
- "top-p": Tokens whose cumulative probability is below p.
|
|
124
|
+
- "min-p": Tokens with probability above a fraction of the highest probability token.
|
|
125
|
+
- "difference_jump": Tokens where the difference in sorted probabilities exceeds a jump threshold.
|
|
126
|
+
epsilon (float, optional): Minimum threshold for
|
|
127
|
+
k (int, optional): Number of top tokens for "top-k" method.
|
|
128
|
+
p_jump (float, optional): Jump threshold for "difference_jump" method.
|
|
129
|
+
minp (float, optional): Minimum probability fraction for "min-p" method.
|
|
130
|
+
topp (float, optional): Cumulative probability threshold for "top-p" method.
|
|
131
|
+
Returns:
|
|
132
|
+
torch.Tensor: Boosted probabilities or binary indicators, shape [batch_size, nr_tokens].
|
|
133
|
+
"""
|
|
134
|
+
dominant_indices = find_dominant(
|
|
135
|
+
log_probs=log_probs,
|
|
136
|
+
find_dominant_method=find_dominant_method,
|
|
137
|
+
epsilon=epsilon,
|
|
138
|
+
k=k,
|
|
139
|
+
p_jump=p_jump,
|
|
140
|
+
minp=minp,
|
|
141
|
+
topp=topp,
|
|
142
|
+
) # Shape: [batch_size, nr_tokens, vocab_size]
|
|
143
|
+
|
|
144
|
+
# Check if each predicted_id is in the corresponding dominant indices
|
|
145
|
+
is_dominant = (target.unsqueeze(-1) == dominant_indices).any(dim=-1) # Shape: [batch_size, nr_tokens]
|
|
146
|
+
|
|
147
|
+
if ue_method == "is_dominant":
|
|
148
|
+
return is_dominant.int() # Convert to 0/1 format
|
|
149
|
+
elif ue_method == "sum_dominant_mass":
|
|
150
|
+
trans_probs = torch.exp(log_probs)
|
|
151
|
+
dominant_binary = torch.zeros_like(trans_probs, dtype=torch.uint8) # Initialize binary tensor
|
|
152
|
+
|
|
153
|
+
lists = [list(range(x)) for x in trans_probs.shape[:-1]]
|
|
154
|
+
if len(lists) > 0:
|
|
155
|
+
combs = tuple(itertools.product(*lists))
|
|
156
|
+
for comb in combs:
|
|
157
|
+
dominant_binary[comb][dominant_indices[comb][dominant_indices[comb] != -1]] = 1
|
|
158
|
+
else:
|
|
159
|
+
dominant_binary[dominant_indices[dominant_indices != -1]] = 1
|
|
160
|
+
|
|
161
|
+
dominant_mass = (dominant_binary * trans_probs).sum(dim=-1)
|
|
162
|
+
selected_prob = trans_probs.gather(dim=-1, index=target.unsqueeze(-1)).squeeze(-1)
|
|
163
|
+
sum_dominant_mass = torch.where(is_dominant, dominant_mass, selected_prob)
|
|
164
|
+
return sum_dominant_mass
|
|
165
|
+
else:
|
|
166
|
+
raise RuntimeError(f"Unknown ue_method {ue_method}")
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: boostedprob
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Utilities to compute boosted probabilities and identify dominant tokens.
|
|
5
|
+
Author-email: Tu Anh Dinh <dinhtuanh23@gmail.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/TuAnh23/BoostedProb
|
|
8
|
+
Project-URL: Repository, https://github.com/TuAnh23/BoostedProb
|
|
9
|
+
Requires-Python: >=3.9
|
|
10
|
+
Description-Content-Type: text/markdown
|
|
11
|
+
License-File: LICENSE
|
|
12
|
+
Requires-Dist: torch>=2.0.1
|
|
13
|
+
Dynamic: license-file
|
|
14
|
+
|
|
15
|
+
# boostedprob
|
|
16
|
+
|
|
17
|
+
Utilities to compute "dominant tokens" and derive boosted probabilities from model log-probabilities.
|
|
18
|
+
|
|
19
|
+
## Install
|
|
20
|
+
|
|
21
|
+
- Locally (development editable install):
|
|
22
|
+
|
|
23
|
+
```bash
|
|
24
|
+
python -m pip install -e .
|
|
25
|
+
````
|
|
26
|
+
|
|
27
|
+
* From GitHub (example):
|
|
28
|
+
|
|
29
|
+
```bash
|
|
30
|
+
python -m pip install "git+https://github.com/yourusername/boostedprob.git"
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
## Example
|
|
34
|
+
|
|
35
|
+
```python
|
|
36
|
+
import torch
|
|
37
|
+
import boostedprob
|
|
38
|
+
|
|
39
|
+
# log_probs: shape [batch, seq_len, vocab]
|
|
40
|
+
# target: shape [batch, seq_len]
|
|
41
|
+
# (fill with your model outputs)
|
|
42
|
+
|
|
43
|
+
log_probs = torch.log_softmax(torch.randn(2, 4, 1000), dim=-1)
|
|
44
|
+
target = torch.randint(0, 1000, (2, 4))
|
|
45
|
+
|
|
46
|
+
scores = boostedprob.calculate_boostedprob(log_probs, target)
|
|
47
|
+
print(scores.shape) # -> (2, 4)
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
## Build & publish
|
|
51
|
+
|
|
52
|
+
```bash
|
|
53
|
+
python -m pip install --upgrade build twine
|
|
54
|
+
python -m build
|
|
55
|
+
python -m twine upload dist/*
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
Or test first on TestPyPI (recommended).
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
LICENSE
|
|
2
|
+
MANIFEST.in
|
|
3
|
+
README.md
|
|
4
|
+
pyproject.toml
|
|
5
|
+
src/boostedprob/__init__.py
|
|
6
|
+
src/boostedprob.egg-info/PKG-INFO
|
|
7
|
+
src/boostedprob.egg-info/SOURCES.txt
|
|
8
|
+
src/boostedprob.egg-info/dependency_links.txt
|
|
9
|
+
src/boostedprob.egg-info/requires.txt
|
|
10
|
+
src/boostedprob.egg-info/top_level.txt
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
torch>=2.0.1
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
boostedprob
|