psaiops 0.0.13__py3-none-any.whl → 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- psaiops/combine/app.py +366 -0
- psaiops/{elements → common}/data.py +8 -0
- psaiops/common/model.py +45 -0
- psaiops/common/tokenizer.py +41 -0
- psaiops/compose/contrast/app.py +195 -0
- psaiops/compose/contrast/lib.py +143 -0
- psaiops/compose/maths/app.py +323 -0
- psaiops/compose/maths/lib.py +1 -0
- psaiops/reverse/__init__.py +0 -0
- psaiops/score/attention/app.py +106 -72
- psaiops/score/attention/lib.py +9 -84
- psaiops/score/residual/__init__.py +0 -0
- psaiops/score/residual/app.py +290 -0
- psaiops/score/residual/lib.py +134 -0
- psaiops/score/router/__init__.py +0 -0
- psaiops/score/router/app.py +281 -0
- psaiops/score/router/lib.py +59 -0
- psaiops/score/shapley/__init__.py +0 -0
- psaiops/score/shapley/app.py +158 -0
- psaiops/score/shapley/lib.py +1 -0
- psaiops/score/similarity/__init__.py +0 -0
- psaiops/score/similarity/app.py +152 -0
- psaiops/score/similarity/lib.py +1 -0
- {psaiops-0.0.13.dist-info → psaiops-0.4.0.dist-info}/METADATA +14 -16
- psaiops-0.4.0.dist-info/RECORD +36 -0
- {psaiops-0.0.13.dist-info → psaiops-0.4.0.dist-info}/WHEEL +1 -1
- psaiops-0.4.0.dist-info/licenses/.github/LICENSE.md +661 -0
- psaiops-0.0.13.dist-info/RECORD +0 -15
- /psaiops/{elements → common}/__init__.py +0 -0
- /psaiops/{steer → compose/maths}/__init__.py +0 -0
psaiops/combine/app.py
ADDED
|
@@ -0,0 +1,366 @@
|
|
|
1
|
+
import functools
|
|
2
|
+
import itertools
|
|
3
|
+
|
|
4
|
+
import gradio
|
|
5
|
+
import pandas
|
|
6
|
+
import torch
|
|
7
|
+
import torch.cuda
|
|
8
|
+
|
|
9
|
+
import psaiops.common.tokenizer
|
|
10
|
+
import psaiops.common.data
|
|
11
|
+
|
|
12
|
+
# META #########################################################################
|
|
13
|
+
|
|
14
|
+
MODEL = 'openai/gpt-oss-20b'
|
|
15
|
+
|
|
16
|
+
STYLE = '''.giga-text input { font-size: 32px; }'''
|
|
17
|
+
TITLE = '''Combine Datasets'''
|
|
18
|
+
INTRO = '''Combine and wrap prompts to form new datasets.'''
|
|
19
|
+
|
|
20
|
+
COUNT = 8
|
|
21
|
+
|
|
22
|
+
# TEMPLATE #####################################################################
|
|
23
|
+
|
|
24
|
+
ROLES = ['system', 'developer', 'user', 'assistant', 'tool']
|
|
25
|
+
CHANNELS = ['analysis', 'commentary', 'final']
|
|
26
|
+
|
|
27
|
+
# INTRO ########################################################################
|
|
28
|
+
|
|
29
|
+
def create_intro_block(intro: str) -> dict:
|
|
30
|
+
__intro = gradio.Markdown(intro, line_breaks=True)
|
|
31
|
+
return {'intro_block': __intro}
|
|
32
|
+
|
|
33
|
+
# MODEL ########################################################################
|
|
34
|
+
|
|
35
|
+
def create_template_block() -> dict:
|
|
36
|
+
__template = gradio.Dropdown(label='Template', value='openai/gpt-oss-20b', choices=['openai/gpt-oss-20b', ''], scale=1, allow_custom_value=False, multiselect=False, interactive=True)
|
|
37
|
+
return {
|
|
38
|
+
'template_block': __template,}
|
|
39
|
+
|
|
40
|
+
# SAMPLING #####################################################################
|
|
41
|
+
|
|
42
|
+
def create_huggingface_block() -> dict:
|
|
43
|
+
__token = gradio.Textbox(label='Token', value='', placeholder='Hugging Face authentication token.', lines=1, max_lines=1, scale=4, show_label=True, show_copy_button=False, interactive=True)
|
|
44
|
+
return {
|
|
45
|
+
'token_block': __token,}
|
|
46
|
+
|
|
47
|
+
# SAMPLING #####################################################################
|
|
48
|
+
|
|
49
|
+
def create_source_block() -> dict:
|
|
50
|
+
__search = gradio.Dropdown(label='Search', value='', choices=[''], scale=7, allow_custom_value=True, multiselect=False, interactive=True)
|
|
51
|
+
__append = gradio.Button(value='>', variant='primary', size='lg', scale=1, interactive=True)
|
|
52
|
+
__dataset = gradio.Dropdown(label='Datasets', value='', choices=[''], scale=8, allow_custom_value=False, multiselect=True, interactive=True)
|
|
53
|
+
return {
|
|
54
|
+
'search_block': __search,
|
|
55
|
+
'append_block': __append,
|
|
56
|
+
'sources_block': __dataset,}
|
|
57
|
+
|
|
58
|
+
# SAMPLING #####################################################################
|
|
59
|
+
|
|
60
|
+
def create_download_block() -> dict:
|
|
61
|
+
__download = gradio.Button(value='Download', variant='primary', size='lg', scale=1, interactive=True)
|
|
62
|
+
return {
|
|
63
|
+
'download_block': __download,}
|
|
64
|
+
|
|
65
|
+
# ACTIONS ######################################################################
|
|
66
|
+
|
|
67
|
+
def create_meta_block() -> dict:
|
|
68
|
+
__name = gradio.Textbox(label='Path', value='', placeholder='Dataset ID: user/name.', lines=1, max_lines=1, scale=1, show_label=True, show_copy_button=False, interactive=True)
|
|
69
|
+
__col0 = gradio.Textbox(label='Column 0', value='', placeholder='Name of the column 0.', lines=1, max_lines=1, scale=1, show_label=True, show_copy_button=False, interactive=True)
|
|
70
|
+
__col1 = gradio.Textbox(label='Column 1', value='', placeholder='Name of the column 1.', lines=1, max_lines=1, scale=1, show_label=True, show_copy_button=False, interactive=True)
|
|
71
|
+
return {
|
|
72
|
+
'name_block': __name,
|
|
73
|
+
'column_0_block': __col0,
|
|
74
|
+
'column_1_block': __col1,}
|
|
75
|
+
|
|
76
|
+
# INPUTS #######################################################################
|
|
77
|
+
|
|
78
|
+
def create_inputs_row(index: int=0) -> dict:
|
|
79
|
+
with gradio.Row(equal_height=True, visible=(index == 0)) as __row:
|
|
80
|
+
__role = gradio.Dropdown(
|
|
81
|
+
type='value',
|
|
82
|
+
label=f'Role',
|
|
83
|
+
value='user',
|
|
84
|
+
choices=[__r for __r in ROLES],
|
|
85
|
+
# elem_classes='giga-text',
|
|
86
|
+
scale=1,
|
|
87
|
+
show_label=(index == 0),
|
|
88
|
+
allow_custom_value=False,
|
|
89
|
+
multiselect=False,
|
|
90
|
+
interactive=True,
|
|
91
|
+
visible=(index == 0))
|
|
92
|
+
__channel = gradio.Dropdown(
|
|
93
|
+
type='value',
|
|
94
|
+
label=f'Channel',
|
|
95
|
+
value='final',
|
|
96
|
+
choices=[__c for __c in CHANNELS],
|
|
97
|
+
# elem_classes='giga-text',
|
|
98
|
+
scale=1,
|
|
99
|
+
show_label=(index == 0),
|
|
100
|
+
allow_custom_value=False,
|
|
101
|
+
multiselect=False,
|
|
102
|
+
interactive=True,
|
|
103
|
+
visible=(index == 0))
|
|
104
|
+
__source = gradio.Dropdown(
|
|
105
|
+
type='value',
|
|
106
|
+
label=f'Source',
|
|
107
|
+
value='mnaual',
|
|
108
|
+
choices=['manual'],
|
|
109
|
+
# elem_classes='giga-text',
|
|
110
|
+
scale=4,
|
|
111
|
+
show_label=(index == 0),
|
|
112
|
+
allow_custom_value=False,
|
|
113
|
+
multiselect=False,
|
|
114
|
+
interactive=True,
|
|
115
|
+
visible=(index == 0))
|
|
116
|
+
__content = gradio.Textbox(
|
|
117
|
+
label=f'Prompt',
|
|
118
|
+
value='',
|
|
119
|
+
placeholder='Some text.',
|
|
120
|
+
lines=1,
|
|
121
|
+
max_lines=1,
|
|
122
|
+
scale=9,
|
|
123
|
+
show_label=(index == 0),
|
|
124
|
+
show_copy_button=True,
|
|
125
|
+
interactive=True,
|
|
126
|
+
visible=(index == 0))
|
|
127
|
+
__hide = gradio.Button(
|
|
128
|
+
value='X',
|
|
129
|
+
variant='secondary',
|
|
130
|
+
size='lg',
|
|
131
|
+
scale=1,
|
|
132
|
+
interactive=True,
|
|
133
|
+
visible=(index == 0))
|
|
134
|
+
return {
|
|
135
|
+
f'row_{index}_block': __row,
|
|
136
|
+
f'role_{index}_block': __role,
|
|
137
|
+
f'channel_{index}_block': __channel,
|
|
138
|
+
f'source_{index}_block': __source,
|
|
139
|
+
f'content_{index}_block': __content,
|
|
140
|
+
f'button_{index}_block': __hide,}
|
|
141
|
+
|
|
142
|
+
# OUTPUTS ######################################################################
|
|
143
|
+
|
|
144
|
+
def create_outputs_block() -> dict:
|
|
145
|
+
__output = gradio.Textbox(label='Sample', value='', placeholder='Resulting combination of the prompts.', lines=2, max_lines=8, scale=1, show_label=True, show_copy_button=True, interactive=False)
|
|
146
|
+
return {'output_block': __output,}
|
|
147
|
+
|
|
148
|
+
# ACTIONS ######################################################################
|
|
149
|
+
|
|
150
|
+
def create_action_block() -> dict:
|
|
151
|
+
__show = gradio.Button(value='Add', variant='primary', size='lg', scale=1, interactive=True)
|
|
152
|
+
__upload = gradio.Button(value='Upload', variant='primary', size='lg', scale=1, interactive=True)
|
|
153
|
+
return {
|
|
154
|
+
'show_block': __show,
|
|
155
|
+
'upload_block': __upload,}
|
|
156
|
+
|
|
157
|
+
# TABLE ########################################################################
|
|
158
|
+
|
|
159
|
+
def create_table_block() -> dict:
|
|
160
|
+
__table = gradio.DataFrame(label='Table', type='numpy', headers=None, row_count=4, col_count=256, scale=1, interactive=False)
|
|
161
|
+
return {'table_block': __table,}
|
|
162
|
+
|
|
163
|
+
# STATE ########################################################################
|
|
164
|
+
|
|
165
|
+
def default_state(visible: bool=False) -> dict:
|
|
166
|
+
return {'visible': visible, 'role': 'user', 'channel': 'final', 'source': 'manual', 'content': ''}
|
|
167
|
+
|
|
168
|
+
def create_state(limit: int=COUNT) -> dict:
|
|
169
|
+
return {
|
|
170
|
+
'cache_block': gradio.State(
|
|
171
|
+
[default_state(True)] + [default_state(False) for _ in range(limit - 1)])}
|
|
172
|
+
|
|
173
|
+
# LAYOUT #######################################################################
|
|
174
|
+
|
|
175
|
+
def create_layout(intro: str=INTRO, limit: int=COUNT) -> dict:
|
|
176
|
+
__fields = {}
|
|
177
|
+
__fields.update(create_intro_block(intro=intro))
|
|
178
|
+
with gradio.Row(equal_height=True):
|
|
179
|
+
__fields.update(create_meta_block())
|
|
180
|
+
with gradio.Tabs():
|
|
181
|
+
with gradio.Tab('Column 0') as __col0_tab:
|
|
182
|
+
__fields.update({'column_0_tab': __col0_tab})
|
|
183
|
+
for __i in range(limit):
|
|
184
|
+
__fields.update(create_inputs_row(index=__i))
|
|
185
|
+
with gradio.Row(equal_height=True):
|
|
186
|
+
__fields.update(create_outputs_block())
|
|
187
|
+
with gradio.Row(equal_height=True):
|
|
188
|
+
__fields.update(create_action_block())
|
|
189
|
+
with gradio.Tab('Details') as __details_tab:
|
|
190
|
+
__fields.update({'details_tab': __details_tab})
|
|
191
|
+
with gradio.Row(equal_height=True):
|
|
192
|
+
__fields.update(create_table_block())
|
|
193
|
+
with gradio.Tab('Settings') as __settings_tab:
|
|
194
|
+
__fields.update({'settings_tab': __settings_tab})
|
|
195
|
+
with gradio.Row(equal_height=True):
|
|
196
|
+
__fields.update(create_template_block())
|
|
197
|
+
with gradio.Row(equal_height=True):
|
|
198
|
+
__fields.update(create_huggingface_block())
|
|
199
|
+
with gradio.Row(equal_height=True):
|
|
200
|
+
__fields.update(create_source_block())
|
|
201
|
+
with gradio.Row(equal_height=True):
|
|
202
|
+
__fields.update(create_download_block())
|
|
203
|
+
return __fields
|
|
204
|
+
|
|
205
|
+
# DYNAMIC ######################################################################
|
|
206
|
+
|
|
207
|
+
def get_input_rows(inputs: dict, limit: int=COUNT) -> list:
|
|
208
|
+
return list(itertools.chain.from_iterable([
|
|
209
|
+
[
|
|
210
|
+
inputs.get(f'row_{__i}_block', None),
|
|
211
|
+
inputs.get(f'role_{__i}_block', None),
|
|
212
|
+
inputs.get(f'channel_{__i}_block', None),
|
|
213
|
+
inputs.get(f'source_{__i}_block', None),
|
|
214
|
+
inputs.get(f'content_{__i}_block', None),
|
|
215
|
+
inputs.get(f'button_{__i}_block', None),]
|
|
216
|
+
for __i in range(limit)]))
|
|
217
|
+
|
|
218
|
+
def render_input_rows(rows: list) -> list:
|
|
219
|
+
return list(itertools.chain.from_iterable([
|
|
220
|
+
[
|
|
221
|
+
gradio.update(visible=__r.get('visible', False)),
|
|
222
|
+
gradio.update(visible=__r.get('visible', False), value=__r.get('role', 'user')),
|
|
223
|
+
gradio.update(visible=__r.get('visible', False), value=__r.get('channel', 'final')),
|
|
224
|
+
gradio.update(visible=__r.get('visible', False), value=__r.get('source', 'manual')),
|
|
225
|
+
gradio.update(visible=__r.get('visible', False), value=__r.get('content', '')),
|
|
226
|
+
gradio.update(visible=__r.get('visible', False))]
|
|
227
|
+
for __r in rows]))
|
|
228
|
+
|
|
229
|
+
def show_input_row(rows: list) -> tuple:
|
|
230
|
+
__count = 0
|
|
231
|
+
__rows = list(rows)
|
|
232
|
+
for __i in range(len(__rows)):
|
|
233
|
+
# count the number of hidden rows (before changing their state)
|
|
234
|
+
__count = __count + int(not __rows[__i]['visible'])
|
|
235
|
+
# all the visible rows stay the same and the first hidden row is toggled
|
|
236
|
+
__rows[__i]['visible'] = __rows[__i]['visible'] or (__count < 2)
|
|
237
|
+
# update state and components
|
|
238
|
+
return __rows, *render_input_rows(__rows)
|
|
239
|
+
|
|
240
|
+
def hide_input_row(rows: list, index: int) -> tuple:
|
|
241
|
+
__rows = list(rows)
|
|
242
|
+
# always show the first row
|
|
243
|
+
if 0 < index < len(__rows):
|
|
244
|
+
# remove the target row
|
|
245
|
+
__rows.pop(index)
|
|
246
|
+
# keep the number of rows constant
|
|
247
|
+
__rows.append(default_state(False))
|
|
248
|
+
# update state and components
|
|
249
|
+
return __rows, *render_input_rows(__rows)
|
|
250
|
+
|
|
251
|
+
# EVENTS #######################################################################
|
|
252
|
+
|
|
253
|
+
def update_input_cache(cache: list, index: int, value: any, field: str) -> list:
|
|
254
|
+
__cache = list(cache)
|
|
255
|
+
__cache[index][field] = value
|
|
256
|
+
return __cache
|
|
257
|
+
|
|
258
|
+
def update_role_cache(cache: list, index: int, value: any) -> list:
|
|
259
|
+
return update_input_cache(cache=cache, index=int(index), value=str(value), field='role')
|
|
260
|
+
|
|
261
|
+
def update_channel_cache(cache: list, index: int, value: any) -> list:
|
|
262
|
+
return update_input_cache(cache=cache, index=int(index), value=str(value), field='channel')
|
|
263
|
+
|
|
264
|
+
def update_source_cache(cache: list, index: int, value: any) -> list:
|
|
265
|
+
return update_input_cache(cache=cache, index=int(index), value=str(value), field='source')
|
|
266
|
+
|
|
267
|
+
def update_content_cache(cache: list, index: int, value: any) -> list:
|
|
268
|
+
return update_input_cache(cache=cache, index=int(index), value=str(value), field='content')
|
|
269
|
+
|
|
270
|
+
def update_table_data(tokenizer: object) -> callable:
|
|
271
|
+
# called with unpacked arguments
|
|
272
|
+
def __update_table_data(*prompts: list) -> list:
|
|
273
|
+
# array of token IDs
|
|
274
|
+
__outputs = tokenizer(prompts, return_tensors='pt', padding=True)
|
|
275
|
+
# array of token strings
|
|
276
|
+
__tokens = [tokenizer.convert_ids_to_tokens(__s) for __s in __outputs['input_ids']]
|
|
277
|
+
# shift the special characters
|
|
278
|
+
return [[__t.replace(chr(0x0120), ' ').replace(chr(0x010a), '\\n') for __t in __s] for __s in __tokens]
|
|
279
|
+
# fixed to a given tokenizer
|
|
280
|
+
return __update_table_data
|
|
281
|
+
|
|
282
|
+
def update_dataset_list(data: str) -> dict:
|
|
283
|
+
__datasets = []
|
|
284
|
+
if len(data) > 3:
|
|
285
|
+
__datasets = psaiops.common.data.query_huggingface(target=data, label='dataset', limit=8)
|
|
286
|
+
return gradio.update(choices=__datasets, visible=True)
|
|
287
|
+
|
|
288
|
+
# APP ##########################################################################
|
|
289
|
+
|
|
290
|
+
def create_app(title: str=TITLE, intro: str=INTRO, style: str=STYLE, limit: int=COUNT, model: str=MODEL) -> gradio.Blocks:
|
|
291
|
+
__inputs = {}
|
|
292
|
+
with gradio.Blocks(theme=gradio.themes.Soft(), title=title, css=style) as __app:
|
|
293
|
+
# load the tokenizer
|
|
294
|
+
__tokenizer = psaiops.common.tokenizer.get_tokenizer(name=model, device='cpu')
|
|
295
|
+
# create the UI
|
|
296
|
+
__inputs.update(create_layout(intro=intro, limit=limit))
|
|
297
|
+
# init the state
|
|
298
|
+
__inputs.update(create_state(limit=limit))
|
|
299
|
+
# apply the configuration
|
|
300
|
+
__format = update_table_data(tokenizer=__tokenizer)
|
|
301
|
+
# show hidden row
|
|
302
|
+
__inputs['show_block'].click(
|
|
303
|
+
fn=show_input_row,
|
|
304
|
+
inputs=[__inputs['cache_block']],
|
|
305
|
+
outputs=[__inputs['cache_block']] + get_input_rows(inputs=__inputs, limit=limit),
|
|
306
|
+
queue=False,
|
|
307
|
+
show_progress='hidden')
|
|
308
|
+
# update the table TODO
|
|
309
|
+
__inputs['details_tab'].select(
|
|
310
|
+
fn=__format,
|
|
311
|
+
inputs=[__inputs[f'content_{__i}_block'] for __i in range(limit)] + [__inputs['output_block']],
|
|
312
|
+
outputs=__inputs['table_block'],
|
|
313
|
+
queue=False,
|
|
314
|
+
show_progress='hidden')
|
|
315
|
+
# fetch the list of matching datasets
|
|
316
|
+
__inputs['search_block'].change(
|
|
317
|
+
fn=update_dataset_list,
|
|
318
|
+
inputs=__inputs['search_block'],
|
|
319
|
+
outputs=__inputs['search_block'],
|
|
320
|
+
queue=False,
|
|
321
|
+
show_progress='hidden')
|
|
322
|
+
# link each row of inputs to the cache
|
|
323
|
+
for __i in range(limit):
|
|
324
|
+
# update the target role in the cache
|
|
325
|
+
__inputs[f'role_{__i}_block'].change(
|
|
326
|
+
fn=update_role_cache,
|
|
327
|
+
inputs=[__inputs['cache_block'], gradio.State(__i), __inputs[f'role_{__i}_block']],
|
|
328
|
+
outputs=__inputs['cache_block'],
|
|
329
|
+
queue=False,
|
|
330
|
+
show_progress='hidden')
|
|
331
|
+
# update the target channel in the cache
|
|
332
|
+
__inputs[f'channel_{__i}_block'].change(
|
|
333
|
+
fn=update_channel_cache,
|
|
334
|
+
inputs=[__inputs['cache_block'], gradio.State(__i), __inputs[f'channel_{__i}_block']],
|
|
335
|
+
outputs=__inputs['cache_block'],
|
|
336
|
+
queue=False,
|
|
337
|
+
show_progress='hidden')
|
|
338
|
+
# update the target column in the cache
|
|
339
|
+
__inputs[f'source_{__i}_block'].change(
|
|
340
|
+
fn=update_source_cache,
|
|
341
|
+
inputs=[__inputs['cache_block'], gradio.State(__i), __inputs[f'source_{__i}_block']],
|
|
342
|
+
outputs=__inputs['cache_block'],
|
|
343
|
+
queue=False,
|
|
344
|
+
show_progress='hidden')
|
|
345
|
+
# update the target content in the cache
|
|
346
|
+
__inputs[f'content_{__i}_block'].change(
|
|
347
|
+
fn=update_content_cache,
|
|
348
|
+
inputs=[__inputs['cache_block'], gradio.State(__i), __inputs[f'content_{__i}_block']],
|
|
349
|
+
outputs=__inputs['cache_block'],
|
|
350
|
+
queue=False,
|
|
351
|
+
show_progress='hidden')
|
|
352
|
+
# hide the target row
|
|
353
|
+
__inputs[f'button_{__i}_block'].click(
|
|
354
|
+
fn=hide_input_row,
|
|
355
|
+
inputs=[__inputs['cache_block'], gradio.State(__i)],
|
|
356
|
+
outputs=[__inputs['cache_block']] + get_input_rows(inputs=__inputs, limit=limit),
|
|
357
|
+
queue=False,
|
|
358
|
+
show_progress='hidden')
|
|
359
|
+
# gradio application
|
|
360
|
+
return __app
|
|
361
|
+
|
|
362
|
+
# MAIN #########################################################################
|
|
363
|
+
|
|
364
|
+
if __name__ == '__main__':
|
|
365
|
+
__app = create_app()
|
|
366
|
+
__app.launch(share=True, debug=True)
|
|
@@ -21,3 +21,11 @@ def query_huggingface(target: str, label: str='model', limit: int=16, endpoint:
|
|
|
21
21
|
__results = []
|
|
22
22
|
# list of strings
|
|
23
23
|
return __results
|
|
24
|
+
|
|
25
|
+
# EXAMPLES #####################################################################
|
|
26
|
+
|
|
27
|
+
def update_dropdown(target: str, label: str) -> dict:
|
|
28
|
+
# query huggingface
|
|
29
|
+
__data = psaiops.elements.data.query_huggingface(target=target, label=label, limit=16)
|
|
30
|
+
# list choices in the dropdown
|
|
31
|
+
return gradio.update(choices=__data, visible=True)
|
psaiops/common/model.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
import functools
|
|
2
|
+
|
|
3
|
+
import torch
|
|
4
|
+
|
|
5
|
+
import deformers.models.openai.gptoss
|
|
6
|
+
|
|
7
|
+
# LOAD #########################################################################
|
|
8
|
+
|
|
9
|
+
@functools.lru_cache(maxsize=1)
|
|
10
|
+
def get_model(name: str, device: str='cpu'):
|
|
11
|
+
__model = deformers.models.openai.gptoss.GptOssForCausalInference.from_pretrained(
|
|
12
|
+
name,
|
|
13
|
+
dtype='auto',
|
|
14
|
+
device_map=device)
|
|
15
|
+
# toggle the inference mode (not training)
|
|
16
|
+
__model.eval()
|
|
17
|
+
# transformers model
|
|
18
|
+
return __model
|
|
19
|
+
|
|
20
|
+
# GENERATE #######################################################################
|
|
21
|
+
|
|
22
|
+
@functools.lru_cache(maxsize=32)
|
|
23
|
+
def generate_token_ids(
|
|
24
|
+
model_obj: object,
|
|
25
|
+
input_args: dict,
|
|
26
|
+
token_num: int,
|
|
27
|
+
topk_num: int = 4,
|
|
28
|
+
topp_num: float = 0.9,
|
|
29
|
+
) -> torch.Tensor:
|
|
30
|
+
# generate completion
|
|
31
|
+
with torch.no_grad():
|
|
32
|
+
__outputs = model_obj.generate(
|
|
33
|
+
**input_args,
|
|
34
|
+
max_new_tokens=token_num,
|
|
35
|
+
do_sample=(0.0 < topp_num < 1.0) or (topk_num > 0),
|
|
36
|
+
top_k=topk_num if (topk_num > 0) else None,
|
|
37
|
+
top_p=topp_num if (0.0 < topp_num < 1.0) else None,
|
|
38
|
+
return_dict_in_generate=True,
|
|
39
|
+
output_hidden_states=False,
|
|
40
|
+
output_attentions=False,
|
|
41
|
+
output_scores=False,
|
|
42
|
+
early_stopping=True,
|
|
43
|
+
use_cache=True)
|
|
44
|
+
# full sequence
|
|
45
|
+
return __outputs.sequences # (1, T)
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
import functools
|
|
2
|
+
|
|
3
|
+
import torch
|
|
4
|
+
import transformers
|
|
5
|
+
|
|
6
|
+
# LOAD #########################################################################
|
|
7
|
+
|
|
8
|
+
@functools.lru_cache(maxsize=4)
|
|
9
|
+
def get_tokenizer(name: str, device: str='cpu'):
|
|
10
|
+
return transformers.AutoTokenizer.from_pretrained(
|
|
11
|
+
name,
|
|
12
|
+
use_fast=True,
|
|
13
|
+
dtype='auto',
|
|
14
|
+
device_map=device)
|
|
15
|
+
|
|
16
|
+
# PREPROCESS #####################################################################
|
|
17
|
+
|
|
18
|
+
@functools.lru_cache(maxsize=32)
|
|
19
|
+
def preprocess_token_ids(
|
|
20
|
+
tokenizer_obj: object,
|
|
21
|
+
prompt_str: str,
|
|
22
|
+
device_str: str='cpu'
|
|
23
|
+
) -> dict:
|
|
24
|
+
# tokenize
|
|
25
|
+
__inputs = tokenizer_obj(prompt_str, return_tensors='pt')
|
|
26
|
+
# move to the main device
|
|
27
|
+
return {__k: __v.to(device_str) for __k, __v in __inputs.items()}
|
|
28
|
+
|
|
29
|
+
# POSTPROCESS ####################################################################
|
|
30
|
+
|
|
31
|
+
@functools.lru_cache(maxsize=32)
|
|
32
|
+
def postprocess_token_ids(
|
|
33
|
+
tokenizer_obj: object,
|
|
34
|
+
token_data: torch.Tensor,
|
|
35
|
+
) -> list:
|
|
36
|
+
# remove the batch axis
|
|
37
|
+
__indices = token_data.squeeze().tolist()
|
|
38
|
+
# back to token strings
|
|
39
|
+
__tokens = tokenizer_obj.convert_ids_to_tokens(__indices)
|
|
40
|
+
# normalize the tokens
|
|
41
|
+
return [__t.replace(chr(0x0120), ' ').replace(chr(0x010a), '\n') for __t in __tokens]
|
|
@@ -0,0 +1,195 @@
|
|
|
1
|
+
import functools
|
|
2
|
+
|
|
3
|
+
import gradio
|
|
4
|
+
import pandas
|
|
5
|
+
import torch
|
|
6
|
+
import torch.cuda
|
|
7
|
+
|
|
8
|
+
import psaiops.common.model
|
|
9
|
+
import psaiops.common.tokenizer
|
|
10
|
+
import psaiops.compose.contrast.lib
|
|
11
|
+
|
|
12
|
+
# META #########################################################################
|
|
13
|
+
|
|
14
|
+
STYLE = '''.giga-text input { font-size: 32px; }'''
|
|
15
|
+
TITLE = '''Contrastive Steering'''
|
|
16
|
+
INTRO = '''Add a delta of activation to a prompt to steer the model output in a specific latent direction.\nUnder construction, only "openai/gpt-oss-20b" is available for now.'''
|
|
17
|
+
|
|
18
|
+
MODEL = 'openai/gpt-oss-20b'
|
|
19
|
+
|
|
20
|
+
# COLORS #######################################################################
|
|
21
|
+
|
|
22
|
+
def create_color_map() -> dict:
|
|
23
|
+
return {
|
|
24
|
+
'-1': '#004444',
|
|
25
|
+
**{str(__i): '#{:02x}0000'.format(int(2.55 * __i)) for __i in range(101)}}
|
|
26
|
+
|
|
27
|
+
# INTRO ########################################################################
|
|
28
|
+
|
|
29
|
+
def create_intro_block(intro: str) -> dict:
|
|
30
|
+
__intro = gradio.Markdown(intro, line_breaks=True)
|
|
31
|
+
return {'intro_block': __intro}
|
|
32
|
+
|
|
33
|
+
# MODEL ########################################################################
|
|
34
|
+
|
|
35
|
+
def create_model_block() -> dict:
|
|
36
|
+
__model = gradio.Dropdown(label='Model ID', value='openai/gpt-oss-20b', choices=['openai/gpt-oss-20b'], scale=1, allow_custom_value=False, multiselect=False, interactive=True) # 'openai/gpt-oss-120b'
|
|
37
|
+
__layer = gradio.Slider(label='Layer Depth', value=12, minimum=0, maximum=23, step=1, scale=1, interactive=True)
|
|
38
|
+
return {
|
|
39
|
+
'model_block': __model,
|
|
40
|
+
'layer_block': __layer,}
|
|
41
|
+
|
|
42
|
+
# SAMPLING #####################################################################
|
|
43
|
+
|
|
44
|
+
def create_sampling_block() -> dict:
|
|
45
|
+
__tokens = gradio.Slider(label='Tokens', value=32, minimum=1, maximum=128, step=1, scale=1, interactive=True)
|
|
46
|
+
__topk = gradio.Slider(label='Top K', value=4, minimum=1, maximum=8, step=1, scale=1, interactive=True)
|
|
47
|
+
__topp = gradio.Slider(label='Top P', value=0.9, minimum=0.0, maximum=1.0, step=0.1, scale=1, interactive=True)
|
|
48
|
+
return {
|
|
49
|
+
'tokens_block': __tokens,
|
|
50
|
+
'topk_block': __topk,
|
|
51
|
+
'topp_block': __topp,}
|
|
52
|
+
|
|
53
|
+
# REDUCTION ####################################################################
|
|
54
|
+
|
|
55
|
+
def create_reduction_block() -> dict:
|
|
56
|
+
__from = gradio.Slider(label='Average From', value=0, minimum=0, maximum=256, step=1, scale=1, interactive=True)
|
|
57
|
+
__to = gradio.Slider(label='Average To', value=256, minimum=0, maximum=256, step=1, scale=1, interactive=True)
|
|
58
|
+
return {
|
|
59
|
+
'from_block': __from,
|
|
60
|
+
'to_block': __to,}
|
|
61
|
+
|
|
62
|
+
# INPUTS #######################################################################
|
|
63
|
+
|
|
64
|
+
def create_inputs_row(operation: str='', index: int=0, label: bool=False) -> dict:
|
|
65
|
+
# __operation = gradio.Button(value=operation, variant='primary', size='lg', elem_classes='white-text', scale=1, interactive=False)
|
|
66
|
+
__operation = gradio.Dropdown(label=f'Operation', value=operation, choices=['', '+', '-', 'x', '.', '='], elem_classes='giga-text', scale=1, show_label=label, allow_custom_value=False, multiselect=False, interactive=False)
|
|
67
|
+
__alpha = gradio.Slider(label='Factor', value=1.0, minimum=0.0, maximum=16.0, step=0.1, scale=1, show_label=label, interactive=True)
|
|
68
|
+
__input = gradio.Textbox(label=f'Prompt', value='', placeholder='Some text.', lines=2, max_lines=2, scale=8, show_label=label, show_copy_button=True, interactive=True)
|
|
69
|
+
return {
|
|
70
|
+
f'operation_{index}_block': __operation,
|
|
71
|
+
f'factor_{index}_block': __alpha,
|
|
72
|
+
f'prompt_{index}_block': __input,}
|
|
73
|
+
|
|
74
|
+
# OUTPUTS ######################################################################
|
|
75
|
+
|
|
76
|
+
def create_outputs_block() -> dict:
|
|
77
|
+
__output = gradio.Textbox(label='= Total', value='', placeholder='Some text.', lines=2, max_lines=8, scale=1, show_label=True, show_copy_button=True, interactive=False)
|
|
78
|
+
return {'output_block': __output}
|
|
79
|
+
|
|
80
|
+
# ACTIONS ######################################################################
|
|
81
|
+
|
|
82
|
+
def create_actions_block() -> dict:
|
|
83
|
+
__process = gradio.Button(value='Process', variant='primary', size='lg', scale=1, interactive=True)
|
|
84
|
+
return {'process_block': __process,}
|
|
85
|
+
|
|
86
|
+
# TABLE ########################################################################
|
|
87
|
+
|
|
88
|
+
def create_table_block() -> dict:
|
|
89
|
+
__table = gradio.DataFrame(label='Summary', type='numpy', headers=None, row_count=4, col_count=256, scale=1, interactive=False)
|
|
90
|
+
return {'table_block': __table,}
|
|
91
|
+
|
|
92
|
+
# STATE ########################################################################
|
|
93
|
+
|
|
94
|
+
def create_state() -> dict:
|
|
95
|
+
return {}
|
|
96
|
+
|
|
97
|
+
# LAYOUT #######################################################################
|
|
98
|
+
|
|
99
|
+
def create_layout(intro: str=INTRO) -> dict:
|
|
100
|
+
__fields = {}
|
|
101
|
+
__fields.update(create_intro_block(intro=intro))
|
|
102
|
+
with gradio.Tabs():
|
|
103
|
+
with gradio.Tab('Equation') as __main_tab:
|
|
104
|
+
__fields.update({'main_tab': __main_tab})
|
|
105
|
+
with gradio.Row(equal_height=True):
|
|
106
|
+
__fields.update(create_inputs_row(operation='', index=0, label=True))
|
|
107
|
+
with gradio.Row(equal_height=True):
|
|
108
|
+
__fields.update(create_inputs_row(operation='-', index=1, label=False))
|
|
109
|
+
with gradio.Row(equal_height=True):
|
|
110
|
+
__fields.update(create_inputs_row(operation='+', index=2, label=False))
|
|
111
|
+
with gradio.Row(equal_height=True):
|
|
112
|
+
__fields.update(create_outputs_block())
|
|
113
|
+
with gradio.Row(equal_height=True):
|
|
114
|
+
__fields.update(create_actions_block())
|
|
115
|
+
with gradio.Tab('Details') as __details_tab:
|
|
116
|
+
__fields.update({'details_tab': __details_tab})
|
|
117
|
+
with gradio.Row(equal_height=True):
|
|
118
|
+
__fields.update(create_table_block())
|
|
119
|
+
with gradio.Tab('Settings') as __settings_tab:
|
|
120
|
+
__fields.update({'settings_tab': __settings_tab})
|
|
121
|
+
with gradio.Column(scale=1):
|
|
122
|
+
with gradio.Row(equal_height=True):
|
|
123
|
+
__fields.update(create_model_block())
|
|
124
|
+
with gradio.Row(equal_height=True):
|
|
125
|
+
__fields.update(create_sampling_block())
|
|
126
|
+
with gradio.Row(equal_height=True):
|
|
127
|
+
__fields.update(create_reduction_block())
|
|
128
|
+
# __fields.update(create_display_block())
|
|
129
|
+
return __fields
|
|
130
|
+
|
|
131
|
+
# EVENTS #######################################################################
|
|
132
|
+
|
|
133
|
+
def update_layer_range(value: float, model: str) -> dict:
|
|
134
|
+
return gradio.update(maximum=35, value=min(35, int(value))) if '120b' in model else gradio.update(maximum=23, value=min(23, int(value)))
|
|
135
|
+
|
|
136
|
+
def update_table_data(positive: str, negative: str, prompt: str, output: str, tokenizer: object) -> pandas.DataFrame:
|
|
137
|
+
# array of token IDs
|
|
138
|
+
__outputs = tokenizer([positive, negative, prompt, output], return_tensors='pt', padding=True)
|
|
139
|
+
# array of token strings
|
|
140
|
+
__tokens = [tokenizer.convert_ids_to_tokens(__s) for __s in __outputs['input_ids']]
|
|
141
|
+
# shift the special characters
|
|
142
|
+
__tokens = [[__t.replace(chr(0x0120), ' ').replace(chr(0x010a), '\\n') for __t in __s] for __s in __tokens]
|
|
143
|
+
# mask the tokens that differ between positive and negative prompts
|
|
144
|
+
__masks = psaiops.compose.contrast.lib.compute_sequence_mask(tokens=__outputs['input_ids'])
|
|
145
|
+
# convert into a data frame
|
|
146
|
+
__data = pandas.DataFrame(__tokens)
|
|
147
|
+
# color the background in red for the positions marked by the mask
|
|
148
|
+
return __data.style.apply(update_table_style, masks=pandas.DataFrame(__masks), axis=None)
|
|
149
|
+
|
|
150
|
+
def update_table_style(data: pandas.DataFrame, masks: pandas.DataFrame) -> pandas.DataFrame:
|
|
151
|
+
return pandas.DataFrame(masks.replace({True: 'background-color: rgb(255, 0, 0, 64%)', False: 'background-color: rgb(0, 0, 0, 0%)',}))
|
|
152
|
+
|
|
153
|
+
# APP ##########################################################################
|
|
154
|
+
|
|
155
|
+
def create_app(title: str=TITLE, intro: str=INTRO, style: str=STYLE, model: str=MODEL) -> gradio.Blocks:
|
|
156
|
+
__fields = {}
|
|
157
|
+
with gradio.Blocks(theme=gradio.themes.Soft(), title=title, css=style) as __app:
|
|
158
|
+
# load the model
|
|
159
|
+
__device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
|
160
|
+
__model = psaiops.common.model.get_model(name=model, device=__device)
|
|
161
|
+
__tokenizer = psaiops.common.tokenizer.get_tokenizer(name=model, device=__device)
|
|
162
|
+
# adapt the computing functions
|
|
163
|
+
__compute = functools.partial(psaiops.compose.contrast.lib.steer_model_output, model_obj=__model, tokenizer_obj=__tokenizer, device_str=__device)
|
|
164
|
+
__format = functools.partial(update_table_data, tokenizer=__tokenizer)
|
|
165
|
+
# create the UI
|
|
166
|
+
__fields.update(create_layout(intro=intro))
|
|
167
|
+
# init the state
|
|
168
|
+
__fields.update(create_state())
|
|
169
|
+
# wire the input fields
|
|
170
|
+
__fields['model_block'].change(
|
|
171
|
+
fn=update_layer_range,
|
|
172
|
+
inputs=[__fields[__k] for __k in ['layer_block', 'model_block']],
|
|
173
|
+
outputs=__fields['layer_block'],
|
|
174
|
+
queue=False,
|
|
175
|
+
show_progress='hidden')
|
|
176
|
+
__fields['details_tab'].select(
|
|
177
|
+
fn=__format,
|
|
178
|
+
inputs=[__fields[__k] for __k in ['prompt_0_block', 'prompt_1_block', 'prompt_2_block', 'output_block']],
|
|
179
|
+
outputs=__fields['table_block'],
|
|
180
|
+
queue=False,
|
|
181
|
+
show_progress='hidden')
|
|
182
|
+
__fields['process_block'].click(
|
|
183
|
+
fn=__compute,
|
|
184
|
+
inputs=[__fields[__k] for __k in ['prompt_0_block', 'prompt_1_block', 'prompt_2_block', 'factor_0_block', 'factor_1_block', 'factor_2_block', 'tokens_block', 'topk_block', 'topp_block', 'layer_block']],
|
|
185
|
+
outputs=__fields['output_block'],
|
|
186
|
+
queue=False,
|
|
187
|
+
show_progress='full')
|
|
188
|
+
# gradio application
|
|
189
|
+
return __app
|
|
190
|
+
|
|
191
|
+
# MAIN #########################################################################
|
|
192
|
+
|
|
193
|
+
if __name__ == '__main__':
|
|
194
|
+
__app = create_app()
|
|
195
|
+
__app.launch(share=True, debug=True)
|