psaiops 0.0.14__tar.gz → 0.0.15__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of psaiops might be problematic. Click here for more details.
- {psaiops-0.0.14 → psaiops-0.0.15}/PKG-INFO +1 -1
- {psaiops-0.0.14 → psaiops-0.0.15}/psaiops/score/attention/app.py +41 -28
- {psaiops-0.0.14 → psaiops-0.0.15}/pyproject.toml +1 -1
- {psaiops-0.0.14 → psaiops-0.0.15}/.github/README.md +0 -0
- {psaiops-0.0.14 → psaiops-0.0.15}/psaiops/__init__.py +0 -0
- {psaiops-0.0.14 → psaiops-0.0.15}/psaiops/combine/__init__.py +0 -0
- {psaiops-0.0.14 → psaiops-0.0.15}/psaiops/compose/__init__.py +0 -0
- {psaiops-0.0.14 → psaiops-0.0.15}/psaiops/compose/contrast/__init__.py +0 -0
- {psaiops-0.0.14 → psaiops-0.0.15}/psaiops/edit/__init__.py +0 -0
- {psaiops-0.0.14 → psaiops-0.0.15}/psaiops/elements/__init__.py +0 -0
- {psaiops-0.0.14 → psaiops-0.0.15}/psaiops/elements/data.py +0 -0
- {psaiops-0.0.14 → psaiops-0.0.15}/psaiops/score/__init__.py +0 -0
- {psaiops-0.0.14 → psaiops-0.0.15}/psaiops/score/attention/__init__.py +0 -0
- {psaiops-0.0.14 → psaiops-0.0.15}/psaiops/score/attention/lib.py +0 -0
- {psaiops-0.0.14 → psaiops-0.0.15}/psaiops/steer/__init__.py +0 -0
|
@@ -128,50 +128,69 @@ def update_computation_state(
|
|
|
128
128
|
token_num: float,
|
|
129
129
|
topk_num: float,
|
|
130
130
|
topp_num: float,
|
|
131
|
+
token_idx: float,
|
|
132
|
+
layer_idx: float,
|
|
133
|
+
head_idx: float,
|
|
131
134
|
prompt_str: str,
|
|
132
135
|
device_str: str,
|
|
133
136
|
model_obj: object,
|
|
134
137
|
tokenizer_obj: object,
|
|
135
138
|
) -> tuple:
|
|
136
139
|
# sanitize the inputs
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
140
|
+
__token_num = max(1, min(128, int(token_num)))
|
|
141
|
+
__topk_num = max(1, min(8, int(topk_num)))
|
|
142
|
+
__topp_num = max(0.0, min(1.0, float(topp_num)))
|
|
143
|
+
__token_idx = max(0, min(__token_num, int(token_idx)))
|
|
144
|
+
__layer_idx = max(0, int(layer_idx))
|
|
145
|
+
__head_idx = max(0, int(head_idx))
|
|
146
|
+
__prompt_str = prompt_str.strip()
|
|
147
|
+
__device_str = device_str if (device_str in ['cpu', 'cuda']) else 'cpu'
|
|
142
148
|
# handle all exceptions at once
|
|
143
149
|
try:
|
|
144
150
|
# dictionary {'input_ids': _, 'attention_mask': _}
|
|
145
|
-
|
|
151
|
+
__input_data = psaiops.score.attention.lib.preprocess_token_ids(
|
|
146
152
|
tokenizer_obj=tokenizer_obj,
|
|
147
|
-
prompt_str=
|
|
148
|
-
device_str=
|
|
153
|
+
prompt_str=__prompt_str,
|
|
154
|
+
device_str=__device_str)
|
|
149
155
|
# parse the inputs
|
|
150
|
-
__input_dim = int(
|
|
156
|
+
__input_dim = int(__input_data['input_ids'].shape[-1])
|
|
151
157
|
# tensor (1, T)
|
|
152
|
-
|
|
158
|
+
__output_data = psaiops.score.attention.lib.generate_token_ids(
|
|
153
159
|
model_obj=model_obj,
|
|
154
|
-
input_args=
|
|
155
|
-
token_num=
|
|
156
|
-
topk_num=
|
|
157
|
-
topp_num=
|
|
160
|
+
input_args=__input_data,
|
|
161
|
+
token_num=__token_num,
|
|
162
|
+
topk_num=__topk_num,
|
|
163
|
+
topp_num=__topp_num)
|
|
158
164
|
# tensor (L, S, H, T, T)
|
|
159
|
-
|
|
165
|
+
__attention_data = psaiops.score.attention.lib.compute_attention_weights(
|
|
160
166
|
model_obj=model_obj,
|
|
161
|
-
token_obj=
|
|
167
|
+
token_obj=__output_data)
|
|
168
|
+
# reduce the layer, sample, head and output token axes => tensor (T,)
|
|
169
|
+
__score_data = psaiops.score.attention.lib.reduce_attention_weights(
|
|
170
|
+
attention_data=__attention_data,
|
|
171
|
+
token_idx=__token_idx,
|
|
172
|
+
layer_idx=__layer_idx,
|
|
173
|
+
head_idx=__head_idx,
|
|
174
|
+
input_dim=__input_dim)
|
|
175
|
+
# translate the scores into integer labels
|
|
176
|
+
__labels = psaiops.score.attention.lib.postprocess_attention_scores(
|
|
177
|
+
attention_data=__score_data,
|
|
178
|
+
input_dim=__input_dim,
|
|
179
|
+
token_idx=__token_idx)
|
|
162
180
|
# detokenize the IDs
|
|
163
181
|
__tokens = psaiops.score.attention.lib.postprocess_token_ids(
|
|
164
182
|
tokenizer_obj=tokenizer_obj,
|
|
165
|
-
token_obj=
|
|
166
|
-
# update each component => (input, output, attention) states
|
|
183
|
+
token_obj=__output_data)
|
|
184
|
+
# update each component => (input, output, attention, highligh) states
|
|
167
185
|
return (
|
|
168
186
|
gradio.update(value=__tokens[:__input_dim]),
|
|
169
187
|
gradio.update(value=__tokens[__input_dim:]),
|
|
170
|
-
gradio.update(value=
|
|
188
|
+
gradio.update(value=__attention_data),
|
|
189
|
+
gradio.update(value=list(zip(__tokens, __labels))))
|
|
171
190
|
except:
|
|
172
191
|
raise Exception('Attention generation aborted with an error.')
|
|
173
192
|
finally:
|
|
174
|
-
return (gradio.update(), gradio.update(), gradio.update())
|
|
193
|
+
return (gradio.update(), gradio.update(), gradio.update(), gradio.update())
|
|
175
194
|
|
|
176
195
|
def update_text_highlight(
|
|
177
196
|
token_idx: float,
|
|
@@ -236,8 +255,8 @@ def create_app(title: str=TITLE, intro: str=INTRO, style: str=STYLE, model: str=
|
|
|
236
255
|
# wire the input fields
|
|
237
256
|
__button_block.click(
|
|
238
257
|
fn=__compute,
|
|
239
|
-
inputs=[__fields[__k] for __k in ['tokens_block', 'topk_block', 'topp_block', 'input_block']],
|
|
240
|
-
outputs=[__fields[__k] for __k in ['input_state', 'output_state', 'attention_state']],
|
|
258
|
+
inputs=[__fields[__k] for __k in ['tokens_block', 'topk_block', 'topp_block', 'position_block', 'layer_block', 'head_block', 'input_block']],
|
|
259
|
+
outputs=[__fields[__k] for __k in ['input_state', 'output_state', 'attention_state', 'output_block']],
|
|
241
260
|
queue=False,
|
|
242
261
|
show_progress='full')
|
|
243
262
|
__output_state.change(
|
|
@@ -246,12 +265,6 @@ def create_app(title: str=TITLE, intro: str=INTRO, style: str=STYLE, model: str=
|
|
|
246
265
|
outputs=__position_block,
|
|
247
266
|
queue=False,
|
|
248
267
|
show_progress='hidden')
|
|
249
|
-
__attention_state.change(
|
|
250
|
-
fn=update_text_highlight,
|
|
251
|
-
inputs=[__fields[__k] for __k in ['position_block', 'layer_block', 'head_block', 'input_state', 'output_state', 'attention_state']],
|
|
252
|
-
outputs=__output_block,
|
|
253
|
-
queue=False,
|
|
254
|
-
show_progress='hidden')
|
|
255
268
|
__position_block.change(
|
|
256
269
|
fn=update_text_highlight,
|
|
257
270
|
inputs=[__fields[__k] for __k in ['position_block', 'layer_block', 'head_block', 'input_state', 'output_state', 'attention_state']],
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|