psaiops 0.2.1__tar.gz → 0.3.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of psaiops might be problematic. Click here for more details.
- {psaiops-0.2.1 → psaiops-0.3.0}/PKG-INFO +1 -1
- {psaiops-0.2.1 → psaiops-0.3.0}/psaiops/compose/contrast/app.py +6 -5
- {psaiops-0.2.1 → psaiops-0.3.0}/psaiops/compose/contrast/lib.py +0 -1
- psaiops-0.3.0/psaiops/compose/maths/app.py +320 -0
- psaiops-0.3.0/psaiops/compose/maths/lib.py +42 -0
- {psaiops-0.2.1 → psaiops-0.3.0}/psaiops/score/attention/app.py +3 -3
- psaiops-0.3.0/psaiops/steer/__init__.py +0 -0
- {psaiops-0.2.1 → psaiops-0.3.0}/pyproject.toml +1 -1
- {psaiops-0.2.1 → psaiops-0.3.0}/.github/README.md +0 -0
- {psaiops-0.2.1 → psaiops-0.3.0}/psaiops/__init__.py +0 -0
- {psaiops-0.2.1 → psaiops-0.3.0}/psaiops/combine/__init__.py +0 -0
- {psaiops-0.2.1 → psaiops-0.3.0}/psaiops/common/__init__.py +0 -0
- {psaiops-0.2.1 → psaiops-0.3.0}/psaiops/common/data.py +0 -0
- {psaiops-0.2.1 → psaiops-0.3.0}/psaiops/common/dropdown.py +0 -0
- {psaiops-0.2.1 → psaiops-0.3.0}/psaiops/compose/__init__.py +0 -0
- {psaiops-0.2.1 → psaiops-0.3.0}/psaiops/compose/contrast/__init__.py +0 -0
- {psaiops-0.2.1/psaiops/edit → psaiops-0.3.0/psaiops/compose/maths}/__init__.py +0 -0
- {psaiops-0.2.1/psaiops/reverse → psaiops-0.3.0/psaiops/edit}/__init__.py +0 -0
- {psaiops-0.2.1/psaiops/score → psaiops-0.3.0/psaiops/reverse}/__init__.py +0 -0
- {psaiops-0.2.1/psaiops/score/attention → psaiops-0.3.0/psaiops/score}/__init__.py +0 -0
- {psaiops-0.2.1/psaiops/steer → psaiops-0.3.0/psaiops/score/attention}/__init__.py +0 -0
- {psaiops-0.2.1 → psaiops-0.3.0}/psaiops/score/attention/lib.py +0 -0
|
@@ -9,9 +9,9 @@ import psaiops.compose.contrast.lib
|
|
|
9
9
|
|
|
10
10
|
# META #########################################################################
|
|
11
11
|
|
|
12
|
-
TITLE = '''Contrastive Steering'''
|
|
13
|
-
INTRO = '''Add a delta of activation to a prompt to steer the model output in a specific latent direction.'''
|
|
14
12
|
STYLE = '''.giga-text input { font-size: 32px; }'''
|
|
13
|
+
TITLE = '''Contrastive Steering'''
|
|
14
|
+
INTRO = '''Add a delta of activation to a prompt to steer the model output in a specific latent direction.\nUnder construction, only "openai/gpt-oss-20b" is available for now.'''
|
|
15
15
|
|
|
16
16
|
MODEL = 'openai/gpt-oss-20b'
|
|
17
17
|
|
|
@@ -25,7 +25,7 @@ def create_color_map() -> dict:
|
|
|
25
25
|
# INTRO ########################################################################
|
|
26
26
|
|
|
27
27
|
def create_intro_block(intro: str) -> dict:
|
|
28
|
-
__intro = gradio.Markdown(intro)
|
|
28
|
+
__intro = gradio.Markdown(intro, line_breaks=True)
|
|
29
29
|
return {'intro_block': __intro}
|
|
30
30
|
|
|
31
31
|
# MODEL ########################################################################
|
|
@@ -40,7 +40,7 @@ def create_model_block() -> dict:
|
|
|
40
40
|
# SAMPLING #####################################################################
|
|
41
41
|
|
|
42
42
|
def create_sampling_block() -> dict:
|
|
43
|
-
__tokens = gradio.Slider(label='Tokens', value=
|
|
43
|
+
__tokens = gradio.Slider(label='Tokens', value=32, minimum=1, maximum=128, step=1, scale=1, interactive=True)
|
|
44
44
|
__topk = gradio.Slider(label='Top K', value=4, minimum=1, maximum=8, step=1, scale=1, interactive=True)
|
|
45
45
|
__topp = gradio.Slider(label='Top P', value=0.9, minimum=0.0, maximum=1.0, step=0.1, scale=1, interactive=True)
|
|
46
46
|
return {
|
|
@@ -111,6 +111,7 @@ def create_layout(intro: str=INTRO) -> dict:
|
|
|
111
111
|
with gradio.Row(equal_height=True):
|
|
112
112
|
__fields.update(create_actions_block())
|
|
113
113
|
with gradio.Tab('Details') as __details_tab:
|
|
114
|
+
__fields.update({'details_tab': __details_tab})
|
|
114
115
|
with gradio.Row(equal_height=True):
|
|
115
116
|
__fields.update(create_table_block())
|
|
116
117
|
with gradio.Tab('Settings') as __settings_tab:
|
|
@@ -170,7 +171,7 @@ def create_app(title: str=TITLE, intro: str=INTRO, style: str=STYLE, model: str=
|
|
|
170
171
|
outputs=__fields['layer_block'],
|
|
171
172
|
queue=False,
|
|
172
173
|
show_progress='hidden')
|
|
173
|
-
__fields['
|
|
174
|
+
__fields['details_tab'].select(
|
|
174
175
|
fn=__format,
|
|
175
176
|
inputs=[__fields[__k] for __k in ['prompt_0_block', 'prompt_1_block', 'prompt_2_block', 'output_block']],
|
|
176
177
|
outputs=__fields['table_block'],
|
|
@@ -0,0 +1,320 @@
|
|
|
1
|
+
import functools
|
|
2
|
+
import itertools
|
|
3
|
+
|
|
4
|
+
import gradio
|
|
5
|
+
import pandas
|
|
6
|
+
import torch
|
|
7
|
+
import torch.cuda
|
|
8
|
+
|
|
9
|
+
import psaiops.compose.maths.lib
|
|
10
|
+
|
|
11
|
+
# META #########################################################################
|
|
12
|
+
|
|
13
|
+
MODEL = 'openai/gpt-oss-20b'
|
|
14
|
+
|
|
15
|
+
STYLE = '''.giga-text input { font-size: 32px; }'''
|
|
16
|
+
TITLE = '''Activation Maths'''
|
|
17
|
+
INTRO = '''Compose prompts in the latent space.\nUnder construction, only "openai/gpt-oss-20b" is available for now.'''
|
|
18
|
+
|
|
19
|
+
COUNT = 8
|
|
20
|
+
|
|
21
|
+
# COLORS #######################################################################
|
|
22
|
+
|
|
23
|
+
def create_color_map() -> dict:
|
|
24
|
+
return {
|
|
25
|
+
'-1': '#004444',
|
|
26
|
+
**{str(__i): '#{:02x}0000'.format(int(2.55 * __i)) for __i in range(101)}}
|
|
27
|
+
|
|
28
|
+
# INTRO ########################################################################
|
|
29
|
+
|
|
30
|
+
def create_intro_block(intro: str) -> dict:
|
|
31
|
+
__intro = gradio.Markdown(intro, line_breaks=True)
|
|
32
|
+
return {'intro_block': __intro}
|
|
33
|
+
|
|
34
|
+
# MODEL ########################################################################
|
|
35
|
+
|
|
36
|
+
def create_model_block() -> dict:
|
|
37
|
+
__model = gradio.Dropdown(label='Model ID', value='openai/gpt-oss-20b', choices=['openai/gpt-oss-20b'], scale=1, allow_custom_value=False, multiselect=False, interactive=True) # 'openai/gpt-oss-120b'
|
|
38
|
+
__layer = gradio.Slider(label='Layer Depth', value=12, minimum=0, maximum=23, step=1, scale=1, interactive=True)
|
|
39
|
+
return {
|
|
40
|
+
'model_block': __model,
|
|
41
|
+
'layer_block': __layer,}
|
|
42
|
+
|
|
43
|
+
# SAMPLING #####################################################################
|
|
44
|
+
|
|
45
|
+
def create_sampling_block() -> dict:
|
|
46
|
+
__tokens = gradio.Slider(label='Tokens', value=32, minimum=1, maximum=128, step=1, scale=1, interactive=True)
|
|
47
|
+
__topk = gradio.Slider(label='Top K', value=4, minimum=1, maximum=8, step=1, scale=1, interactive=True)
|
|
48
|
+
__topp = gradio.Slider(label='Top P', value=0.9, minimum=0.0, maximum=1.0, step=0.1, scale=1, interactive=True)
|
|
49
|
+
return {
|
|
50
|
+
'tokens_block': __tokens,
|
|
51
|
+
'topk_block': __topk,
|
|
52
|
+
'topp_block': __topp,}
|
|
53
|
+
|
|
54
|
+
# REDUCTION ####################################################################
|
|
55
|
+
|
|
56
|
+
def create_reduction_block() -> dict:
|
|
57
|
+
__from = gradio.Slider(label='Average From', value=0, minimum=0, maximum=256, step=1, scale=1, interactive=True)
|
|
58
|
+
__to = gradio.Slider(label='Average To', value=256, minimum=0, maximum=256, step=1, scale=1, interactive=True)
|
|
59
|
+
return {
|
|
60
|
+
'from_block': __from,
|
|
61
|
+
'to_block': __to,}
|
|
62
|
+
|
|
63
|
+
# INPUTS #######################################################################
|
|
64
|
+
|
|
65
|
+
def create_inputs_row(operation: str='', index: int=0) -> dict:
|
|
66
|
+
with gradio.Row(equal_height=True, visible=(index == 0)) as __row:
|
|
67
|
+
__operation = gradio.Dropdown(
|
|
68
|
+
label=f'Operation',
|
|
69
|
+
value='' if (index == 0) else operation,
|
|
70
|
+
choices=(index == 0) * [''] + ['+', '-', 'x', '.', 'µ', '='],
|
|
71
|
+
elem_classes='giga-text',
|
|
72
|
+
scale=1,
|
|
73
|
+
show_label=(index == 0),
|
|
74
|
+
allow_custom_value=False,
|
|
75
|
+
multiselect=False,
|
|
76
|
+
interactive=(index != 0),
|
|
77
|
+
visible=(index == 0))
|
|
78
|
+
__alpha = gradio.Slider(
|
|
79
|
+
label='Factor',
|
|
80
|
+
value=1.0,
|
|
81
|
+
minimum=0.0,
|
|
82
|
+
maximum=8.0,
|
|
83
|
+
step=0.1,
|
|
84
|
+
scale=1,
|
|
85
|
+
show_label=(index == 0),
|
|
86
|
+
interactive=True,
|
|
87
|
+
visible=(index == 0))
|
|
88
|
+
__input = gradio.Textbox(
|
|
89
|
+
label=f'Prompt',
|
|
90
|
+
value='',
|
|
91
|
+
placeholder='Some text.',
|
|
92
|
+
lines=2,
|
|
93
|
+
max_lines=2,
|
|
94
|
+
scale=8,
|
|
95
|
+
show_label=(index == 0),
|
|
96
|
+
show_copy_button=True,
|
|
97
|
+
interactive=True,
|
|
98
|
+
visible=(index == 0))
|
|
99
|
+
__delete = gradio.Button(
|
|
100
|
+
value='✖',
|
|
101
|
+
variant='secondary',
|
|
102
|
+
size='lg',
|
|
103
|
+
scale=1,
|
|
104
|
+
interactive=(index != 0),
|
|
105
|
+
visible=(index == 0))
|
|
106
|
+
return {
|
|
107
|
+
f'row_{index}_block': __row,
|
|
108
|
+
f'operation_{index}_block': __operation,
|
|
109
|
+
f'factor_{index}_block': __alpha,
|
|
110
|
+
f'prompt_{index}_block': __input,
|
|
111
|
+
f'button_{index}_block': __delete,}
|
|
112
|
+
|
|
113
|
+
# OUTPUTS ######################################################################
|
|
114
|
+
|
|
115
|
+
def create_outputs_block() -> dict:
|
|
116
|
+
__output = gradio.Textbox(label='= Total', value='', placeholder='Some text.', lines=2, max_lines=8, scale=1, show_label=True, show_copy_button=True, interactive=False)
|
|
117
|
+
return {'output_block': __output}
|
|
118
|
+
|
|
119
|
+
# ACTIONS ######################################################################
|
|
120
|
+
|
|
121
|
+
def create_actions_block() -> dict:
|
|
122
|
+
__add = gradio.Button(value='Add', variant='primary', size='lg', scale=1, interactive=True)
|
|
123
|
+
__process = gradio.Button(value='Process', variant='primary', size='lg', scale=1, interactive=True)
|
|
124
|
+
return {
|
|
125
|
+
'show_block': __add,
|
|
126
|
+
'process_block': __process,}
|
|
127
|
+
|
|
128
|
+
# TABLE ########################################################################
|
|
129
|
+
|
|
130
|
+
def create_table_block() -> dict:
|
|
131
|
+
__table = gradio.DataFrame(label='Summary', type='numpy', headers=None, row_count=4, col_count=256, scale=1, interactive=False)
|
|
132
|
+
return {'table_block': __table,}
|
|
133
|
+
|
|
134
|
+
# STATE ########################################################################
|
|
135
|
+
|
|
136
|
+
def create_state(limit: int=COUNT) -> dict:
|
|
137
|
+
return {
|
|
138
|
+
'cache_block': gradio.State(
|
|
139
|
+
[{'visible': True, 'operation': '', 'factor': 1.0, 'prompt': ''}]
|
|
140
|
+
+ max(0, limit - 1) * [{'visible': False, 'operation': '+', 'factor': 1.0, 'prompt': ''}])}
|
|
141
|
+
|
|
142
|
+
# LAYOUT #######################################################################
|
|
143
|
+
|
|
144
|
+
def create_layout(intro: str=INTRO, limit: int=COUNT) -> dict:
|
|
145
|
+
__fields = {}
|
|
146
|
+
__fields.update(create_intro_block(intro=intro))
|
|
147
|
+
with gradio.Tabs():
|
|
148
|
+
with gradio.Tab('Equation') as __main_tab:
|
|
149
|
+
__fields.update({'main_tab': __main_tab})
|
|
150
|
+
for __i in range(limit):
|
|
151
|
+
__fields.update(create_inputs_row(operation='+', index=__i))
|
|
152
|
+
with gradio.Row(equal_height=True):
|
|
153
|
+
__fields.update(create_outputs_block())
|
|
154
|
+
with gradio.Row(equal_height=True):
|
|
155
|
+
__fields.update(create_actions_block())
|
|
156
|
+
with gradio.Tab('Details') as __details_tab:
|
|
157
|
+
__fields.update({'details_tab': __details_tab})
|
|
158
|
+
with gradio.Row(equal_height=True):
|
|
159
|
+
__fields.update(create_table_block())
|
|
160
|
+
with gradio.Tab('Settings') as __settings_tab:
|
|
161
|
+
__fields.update({'settings_tab': __settings_tab})
|
|
162
|
+
with gradio.Column(scale=1):
|
|
163
|
+
with gradio.Row(equal_height=True):
|
|
164
|
+
__fields.update(create_model_block())
|
|
165
|
+
with gradio.Row(equal_height=True):
|
|
166
|
+
__fields.update(create_sampling_block())
|
|
167
|
+
with gradio.Row(equal_height=True):
|
|
168
|
+
__fields.update(create_reduction_block())
|
|
169
|
+
# __fields.update(create_display_block())
|
|
170
|
+
return __fields
|
|
171
|
+
|
|
172
|
+
# DYNAMIC ######################################################################
|
|
173
|
+
|
|
174
|
+
def get_input_rows(inputs: dict, limit: int=COUNT) -> list:
|
|
175
|
+
return list(itertools.chain.from_iterable([
|
|
176
|
+
[
|
|
177
|
+
inputs.get(f'row_{__i}_block', None),
|
|
178
|
+
inputs.get(f'operation_{__i}_block', None),
|
|
179
|
+
inputs.get(f'factor_{__i}_block', None),
|
|
180
|
+
inputs.get(f'prompt_{__i}_block', None),
|
|
181
|
+
inputs.get(f'button_{__i}_block', None),]
|
|
182
|
+
for __i in range(limit)]))
|
|
183
|
+
|
|
184
|
+
def render_input_rows(rows: list) -> list:
|
|
185
|
+
return list(itertools.chain.from_iterable([
|
|
186
|
+
[
|
|
187
|
+
gradio.update(visible=__r.get('visible', False)),
|
|
188
|
+
gradio.update(visible=__r.get('visible', False), value=__r.get('operation', '')),
|
|
189
|
+
gradio.update(visible=__r.get('visible', False), value=__r.get('factor', 1.0)),
|
|
190
|
+
gradio.update(visible=__r.get('visible', False), value=__r.get('prompt', '')),
|
|
191
|
+
gradio.update(visible=__r.get('visible', False))]
|
|
192
|
+
for __r in rows]))
|
|
193
|
+
|
|
194
|
+
def show_input_row(rows: list) -> tuple:
|
|
195
|
+
__count = 0
|
|
196
|
+
__rows = list(rows)
|
|
197
|
+
for __i in range(len(__rows)):
|
|
198
|
+
# count the number of hidden rows (before changing their state)
|
|
199
|
+
__count = __count + int(not __rows[__i]['visible'])
|
|
200
|
+
# all the visible rows stay the same and the first hidden row is toggled
|
|
201
|
+
__rows[__i]['visible'] = __rows[__i]['visible'] or (__count < 2)
|
|
202
|
+
# update state and components
|
|
203
|
+
return __rows, *render_input_rows(__rows)
|
|
204
|
+
|
|
205
|
+
def hide_input_row(rows: list, index: int) -> tuple:
|
|
206
|
+
__rows = list(rows)
|
|
207
|
+
# always show the first row
|
|
208
|
+
if 0 < index < len(__rows):
|
|
209
|
+
# remove the target row
|
|
210
|
+
__rows.pop(index)
|
|
211
|
+
# keep the number of rows constant
|
|
212
|
+
__rows.append({'visible': False, 'operation': '+', 'factor': 1.0, 'prompt': ''})
|
|
213
|
+
# update state and components
|
|
214
|
+
return __rows, *render_input_rows(__rows)
|
|
215
|
+
|
|
216
|
+
# EVENTS #######################################################################
|
|
217
|
+
|
|
218
|
+
def update_layer_range(value: float, model: str) -> dict:
|
|
219
|
+
return gradio.update(maximum=35, value=min(35, int(value))) if '120b' in model else gradio.update(maximum=23, value=min(23, int(value)))
|
|
220
|
+
|
|
221
|
+
def update_input_cache(cache: list, value: any, index: int, field: str) -> list:
|
|
222
|
+
__cache = list(cache)
|
|
223
|
+
__cache[index][field] = value
|
|
224
|
+
return __cache
|
|
225
|
+
|
|
226
|
+
def update_operation_cache(cache: list, index: int, value: any) -> list:
|
|
227
|
+
return update_input_cache(cache=cache, index=int(index), value=str(value), field='operation')
|
|
228
|
+
|
|
229
|
+
def update_factor_cache(cache: list, index: int, value: any) -> list:
|
|
230
|
+
return update_input_cache(cache=cache, index=int(index), value=float(value), field='factor')
|
|
231
|
+
|
|
232
|
+
def update_prompt_cache(cache: list, index: int, value: any) -> list:
|
|
233
|
+
return update_input_cache(cache=cache, index=int(index), value=str(value), field='prompt')
|
|
234
|
+
|
|
235
|
+
def update_table_data(tokenizer: object) -> callable:
|
|
236
|
+
# called with unpacked arguments
|
|
237
|
+
def __update_table_data(*prompts: list) -> list:
|
|
238
|
+
# array of token IDs
|
|
239
|
+
__outputs = tokenizer(prompts, return_tensors='pt', padding=True)
|
|
240
|
+
# array of token strings
|
|
241
|
+
__tokens = [tokenizer.convert_ids_to_tokens(__s) for __s in __outputs['input_ids']]
|
|
242
|
+
# shift the special characters
|
|
243
|
+
return [[__t.replace(chr(0x0120), ' ').replace(chr(0x010a), '\\n') for __t in __s] for __s in __tokens]
|
|
244
|
+
# fixed to a given tokenizer
|
|
245
|
+
return __update_table_data
|
|
246
|
+
|
|
247
|
+
# APP ##########################################################################
|
|
248
|
+
|
|
249
|
+
def create_app(title: str=TITLE, intro: str=INTRO, style: str=STYLE, limit: int=COUNT, model: str=MODEL) -> gradio.Blocks:
|
|
250
|
+
__inputs = {}
|
|
251
|
+
with gradio.Blocks(theme=gradio.themes.Soft(), title=title, css=style) as __app:
|
|
252
|
+
# load the model
|
|
253
|
+
__device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
|
254
|
+
# __model = psaiops.compose.maths.lib.get_model(name=model, device=__device)
|
|
255
|
+
__tokenizer = psaiops.compose.maths.lib.get_tokenizer(name=model, device=__device)
|
|
256
|
+
# create the UI
|
|
257
|
+
__inputs.update(create_layout(intro=intro, limit=limit))
|
|
258
|
+
# init the state
|
|
259
|
+
__inputs.update(create_state(limit=limit))
|
|
260
|
+
# apply the configuration
|
|
261
|
+
__format = update_table_data(tokenizer=__tokenizer)
|
|
262
|
+
# change the depth of the model
|
|
263
|
+
__inputs['model_block'].change(
|
|
264
|
+
fn=update_layer_range,
|
|
265
|
+
inputs=[__inputs[__k] for __k in ['layer_block', 'model_block']],
|
|
266
|
+
outputs=__inputs['layer_block'],
|
|
267
|
+
queue=False,
|
|
268
|
+
show_progress='hidden')
|
|
269
|
+
# show hidden row
|
|
270
|
+
__inputs['show_block'].click(
|
|
271
|
+
fn=show_input_row,
|
|
272
|
+
inputs=[__inputs['cache_block']],
|
|
273
|
+
outputs=[__inputs['cache_block']] + get_input_rows(inputs=__inputs, limit=limit),
|
|
274
|
+
queue=False,
|
|
275
|
+
show_progress='hidden')
|
|
276
|
+
# update the table
|
|
277
|
+
__inputs['details_tab'].select(
|
|
278
|
+
fn=__format,
|
|
279
|
+
inputs=[__inputs[f'prompt_{__i}_block'] for __i in range(limit)] + [__inputs['output_block']],
|
|
280
|
+
outputs=__inputs['table_block'],
|
|
281
|
+
queue=False,
|
|
282
|
+
show_progress='hidden')
|
|
283
|
+
# link each row of inputs to the cache
|
|
284
|
+
for __i in range(limit):
|
|
285
|
+
# update the target operation in the cache
|
|
286
|
+
__inputs[f'operation_{__i}_block'].change(
|
|
287
|
+
fn=update_operation_cache,
|
|
288
|
+
inputs=[__inputs['cache_block'], gradio.State(__i), __inputs[f'operation_{__i}_block']],
|
|
289
|
+
outputs=__inputs['cache_block'],
|
|
290
|
+
queue=False,
|
|
291
|
+
show_progress='hidden')
|
|
292
|
+
# update the target factor in the cache
|
|
293
|
+
__inputs[f'factor_{__i}_block'].change(
|
|
294
|
+
fn=update_factor_cache,
|
|
295
|
+
inputs=[__inputs['cache_block'], gradio.State(__i), __inputs[f'factor_{__i}_block']],
|
|
296
|
+
outputs=__inputs['cache_block'],
|
|
297
|
+
queue=False,
|
|
298
|
+
show_progress='hidden')
|
|
299
|
+
# update the target prompt in the cache
|
|
300
|
+
__inputs[f'prompt_{__i}_block'].change(
|
|
301
|
+
fn=update_prompt_cache,
|
|
302
|
+
inputs=[__inputs['cache_block'], gradio.State(__i), __inputs[f'prompt_{__i}_block']],
|
|
303
|
+
outputs=__inputs['cache_block'],
|
|
304
|
+
queue=False,
|
|
305
|
+
show_progress='hidden')
|
|
306
|
+
# hide the target row
|
|
307
|
+
__inputs[f'button_{__i}_block'].click(
|
|
308
|
+
fn=hide_input_row,
|
|
309
|
+
inputs=[__inputs['cache_block'], gradio.State(__i)],
|
|
310
|
+
outputs=[__inputs['cache_block']] + get_input_rows(inputs=__inputs, limit=limit),
|
|
311
|
+
queue=False,
|
|
312
|
+
show_progress='hidden')
|
|
313
|
+
# gradio application
|
|
314
|
+
return __app
|
|
315
|
+
|
|
316
|
+
# MAIN #########################################################################
|
|
317
|
+
|
|
318
|
+
if __name__ == '__main__':
|
|
319
|
+
__app = create_app()
|
|
320
|
+
__app.launch(share=True, debug=True)
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
import functools
|
|
2
|
+
|
|
3
|
+
import torch
|
|
4
|
+
import torch.nn.modules
|
|
5
|
+
import transformers
|
|
6
|
+
|
|
7
|
+
import deformers.models.openai.gptoss
|
|
8
|
+
import mlable.shapes
|
|
9
|
+
|
|
10
|
+
# LOAD #########################################################################
|
|
11
|
+
|
|
12
|
+
@functools.lru_cache(maxsize=4)
|
|
13
|
+
def get_tokenizer(name: str, device: str='cpu'):
|
|
14
|
+
return transformers.AutoTokenizer.from_pretrained(
|
|
15
|
+
name,
|
|
16
|
+
use_fast=True,
|
|
17
|
+
dtype='auto',
|
|
18
|
+
device_map=device)
|
|
19
|
+
|
|
20
|
+
@functools.lru_cache(maxsize=2)
|
|
21
|
+
def get_model(name: str, device: str='cpu'):
|
|
22
|
+
__model = deformers.models.openai.gptoss.GptOssForCausalInference.from_pretrained(
|
|
23
|
+
name,
|
|
24
|
+
dtype='auto',
|
|
25
|
+
device_map=device)
|
|
26
|
+
# toggle the inference mode (not training)
|
|
27
|
+
__model.eval()
|
|
28
|
+
# transformers model
|
|
29
|
+
return __model
|
|
30
|
+
|
|
31
|
+
# PREPROCESS #####################################################################
|
|
32
|
+
|
|
33
|
+
@functools.lru_cache(maxsize=4)
|
|
34
|
+
def preprocess_token_ids(
|
|
35
|
+
tokenizer: object,
|
|
36
|
+
prompts: list,
|
|
37
|
+
device: str='cpu'
|
|
38
|
+
) -> dict:
|
|
39
|
+
# tokenize
|
|
40
|
+
__inputs = tokenizer(prompts, return_tensors='pt', padding=True)
|
|
41
|
+
# move to the main device
|
|
42
|
+
return {__k: __v.to(device) for __k, __v in __inputs.items()}
|
|
@@ -8,9 +8,9 @@ import psaiops.score.attention.lib
|
|
|
8
8
|
|
|
9
9
|
# META #########################################################################
|
|
10
10
|
|
|
11
|
-
TITLE = '''Attention Scoring'''
|
|
12
|
-
INTRO = '''Score each token according to the weights of the attention layers. The model is fixed to "openai/gpt-oss-20b" for now.'''
|
|
13
11
|
STYLE = '''.white-text span { color: white; }'''
|
|
12
|
+
TITLE = '''Attention Scoring'''
|
|
13
|
+
INTRO = '''Score each token according to the weights of the attention layers.\nUnder construction, only "openai/gpt-oss-20b" is available for now.'''
|
|
14
14
|
|
|
15
15
|
MODEL = 'openai/gpt-oss-20b'
|
|
16
16
|
|
|
@@ -24,7 +24,7 @@ def create_color_map() -> dict:
|
|
|
24
24
|
# INTRO ########################################################################
|
|
25
25
|
|
|
26
26
|
def create_intro_block(intro: str) -> dict:
|
|
27
|
-
__intro = gradio.Markdown(intro)
|
|
27
|
+
__intro = gradio.Markdown(intro, line_breaks=True)
|
|
28
28
|
return {'intro_block': __intro}
|
|
29
29
|
|
|
30
30
|
# MODEL ########################################################################
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|