psaiops 0.0.7__tar.gz → 0.0.9__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of psaiops might be problematic. Click here for more details.
- {psaiops-0.0.7 → psaiops-0.0.9}/PKG-INFO +1 -1
- {psaiops-0.0.7 → psaiops-0.0.9}/psaiops/score/attention/lib.py +11 -10
- {psaiops-0.0.7 → psaiops-0.0.9}/pyproject.toml +1 -1
- {psaiops-0.0.7 → psaiops-0.0.9}/.github/README.md +0 -0
- {psaiops-0.0.7 → psaiops-0.0.9}/psaiops/__init__.py +0 -0
- {psaiops-0.0.7 → psaiops-0.0.9}/psaiops/combine/__init__.py +0 -0
- {psaiops-0.0.7 → psaiops-0.0.9}/psaiops/compose/__init__.py +0 -0
- {psaiops-0.0.7 → psaiops-0.0.9}/psaiops/compose/contrast/__init__.py +0 -0
- {psaiops-0.0.7 → psaiops-0.0.9}/psaiops/edit/__init__.py +0 -0
- {psaiops-0.0.7 → psaiops-0.0.9}/psaiops/elements/__init__.py +0 -0
- {psaiops-0.0.7 → psaiops-0.0.9}/psaiops/elements/data.py +0 -0
- {psaiops-0.0.7 → psaiops-0.0.9}/psaiops/score/__init__.py +0 -0
- {psaiops-0.0.7 → psaiops-0.0.9}/psaiops/score/attention/__init__.py +0 -0
- {psaiops-0.0.7 → psaiops-0.0.9}/psaiops/score/attention/app.py +0 -0
- {psaiops-0.0.7 → psaiops-0.0.9}/psaiops/steer/__init__.py +0 -0
|
@@ -59,7 +59,7 @@ def generate_token_ids(
|
|
|
59
59
|
output_hidden_states=False,
|
|
60
60
|
output_attentions=False,
|
|
61
61
|
output_scores=False,
|
|
62
|
-
early_stopping=True,
|
|
62
|
+
# early_stopping=True,
|
|
63
63
|
use_cache=True)
|
|
64
64
|
# full sequence
|
|
65
65
|
return __outputs.sequences # (1, T)
|
|
@@ -142,16 +142,17 @@ def postprocess_token_ids(
|
|
|
142
142
|
# COMPUTE ########################################################################
|
|
143
143
|
|
|
144
144
|
def score_tokens(
|
|
145
|
+
prompt_str: str,
|
|
146
|
+
token_num: int,
|
|
147
|
+
topk_num: int,
|
|
148
|
+
topp_num: float,
|
|
149
|
+
token_idx: int,
|
|
150
|
+
layer_idx: int,
|
|
151
|
+
head_idx: int,
|
|
152
|
+
*,
|
|
153
|
+
device_str: str,
|
|
145
154
|
model_obj: object,
|
|
146
155
|
tokenizer_obj: object,
|
|
147
|
-
prompt_str: str,
|
|
148
|
-
token_num: int=32,
|
|
149
|
-
topk_num: int = 4,
|
|
150
|
-
topp_num: float = 0.9,
|
|
151
|
-
token_idx: int=-1, # -1 => avg over all tokens
|
|
152
|
-
layer_idx: int=-1, # -1 => avg over layers
|
|
153
|
-
head_idx: int=-1, # -1 => avg over heads
|
|
154
|
-
device_str: str='cuda',
|
|
155
156
|
) -> list:
|
|
156
157
|
# dictionary {'input_ids': _, 'attention_mask': _}
|
|
157
158
|
__inputs = preprocess_token_ids(
|
|
@@ -185,7 +186,7 @@ def score_tokens(
|
|
|
185
186
|
token_idx=token_idx)
|
|
186
187
|
# detokenize the IDs
|
|
187
188
|
__tokens = postprocess_token_ids(
|
|
188
|
-
tokenizer_obj=
|
|
189
|
+
tokenizer_obj=tokenizer_obj,
|
|
189
190
|
token_obj=__outputs)
|
|
190
191
|
# match tokens and labels for the HighlightedText field
|
|
191
192
|
return list(zip(__tokens, __labels))
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|