pygpt-net 2.6.32__py3-none-any.whl → 2.6.34__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. pygpt_net/CHANGELOG.txt +12 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/controller/assistant/batch.py +14 -4
  4. pygpt_net/controller/assistant/files.py +1 -0
  5. pygpt_net/controller/assistant/store.py +195 -1
  6. pygpt_net/controller/camera/camera.py +1 -1
  7. pygpt_net/controller/chat/attachment.py +2 -0
  8. pygpt_net/controller/chat/common.py +50 -46
  9. pygpt_net/controller/config/placeholder.py +95 -75
  10. pygpt_net/controller/dialogs/confirm.py +3 -1
  11. pygpt_net/controller/media/media.py +11 -3
  12. pygpt_net/controller/painter/common.py +227 -10
  13. pygpt_net/controller/painter/painter.py +4 -12
  14. pygpt_net/core/assistants/files.py +18 -0
  15. pygpt_net/core/camera/camera.py +38 -93
  16. pygpt_net/core/camera/worker.py +430 -0
  17. pygpt_net/core/filesystem/url.py +3 -0
  18. pygpt_net/core/render/web/body.py +65 -9
  19. pygpt_net/core/text/utils.py +3 -0
  20. pygpt_net/data/config/config.json +234 -221
  21. pygpt_net/data/config/models.json +179 -180
  22. pygpt_net/data/config/settings.json +10 -5
  23. pygpt_net/data/locale/locale.de.ini +8 -6
  24. pygpt_net/data/locale/locale.en.ini +9 -5
  25. pygpt_net/data/locale/locale.es.ini +8 -6
  26. pygpt_net/data/locale/locale.fr.ini +8 -6
  27. pygpt_net/data/locale/locale.it.ini +8 -6
  28. pygpt_net/data/locale/locale.pl.ini +8 -6
  29. pygpt_net/data/locale/locale.uk.ini +8 -6
  30. pygpt_net/data/locale/locale.zh.ini +8 -6
  31. pygpt_net/item/assistant.py +13 -1
  32. pygpt_net/provider/api/google/__init__.py +32 -23
  33. pygpt_net/provider/api/openai/store.py +45 -1
  34. pygpt_net/provider/llms/google.py +4 -0
  35. pygpt_net/ui/dialog/assistant_store.py +213 -203
  36. pygpt_net/ui/layout/chat/input.py +3 -3
  37. pygpt_net/ui/widget/draw/painter.py +458 -75
  38. pygpt_net/ui/widget/option/combo.py +5 -1
  39. pygpt_net/ui/widget/textarea/input.py +273 -3
  40. {pygpt_net-2.6.32.dist-info → pygpt_net-2.6.34.dist-info}/METADATA +14 -2
  41. {pygpt_net-2.6.32.dist-info → pygpt_net-2.6.34.dist-info}/RECORD +44 -43
  42. {pygpt_net-2.6.32.dist-info → pygpt_net-2.6.34.dist-info}/LICENSE +0 -0
  43. {pygpt_net-2.6.32.dist-info → pygpt_net-2.6.34.dist-info}/WHEEL +0 -0
  44. {pygpt_net-2.6.32.dist-info → pygpt_net-2.6.34.dist-info}/entry_points.txt +0 -0
@@ -1,51 +1,51 @@
1
1
  {
2
2
  "__meta__": {
3
- "version": "2.6.32",
4
- "app.version": "2.6.32",
5
- "updated_at": "2025-09-02T00:00:00"
3
+ "version": "2.6.34",
4
+ "app.version": "2.6.34",
5
+ "updated_at": "2025-09-03T00:00:00"
6
6
  },
7
7
  "access.audio.event.speech": false,
8
8
  "access.audio.event.speech.disabled": [],
9
9
  "access.audio.notify.execute": true,
10
10
  "access.audio.use_cache": true,
11
- "access.microphone.notify": true,
11
+ "access.microphone.notify": false,
12
12
  "access.shortcuts": [
13
- {
14
- "action": "voice_cmd.toggle",
15
- "key": "Space",
16
- "key_modifier": "Ctrl"
17
- },
18
- {
19
- "action": "tab.chat",
20
- "key": "1",
21
- "key_modifier": "Ctrl"
22
- },
23
- {
24
- "action": "tab.files",
25
- "key": "2",
26
- "key_modifier": "Ctrl"
27
- },
28
- {
29
- "action": "tab.calendar",
30
- "key": "3",
31
- "key_modifier": "Ctrl"
32
- },
33
- {
34
- "action": "tab.draw",
35
- "key": "4",
36
- "key_modifier": "Ctrl"
37
- },
38
- {
39
- "action": "tab.notepad",
40
- "key": "5",
41
- "key_modifier": "Ctrl"
42
- }
13
+ {
14
+ "action": "voice_cmd.toggle",
15
+ "key": "Space",
16
+ "key_modifier": "Ctrl"
17
+ },
18
+ {
19
+ "action": "tab.chat",
20
+ "key": "1",
21
+ "key_modifier": "Ctrl"
22
+ },
23
+ {
24
+ "action": "tab.files",
25
+ "key": "2",
26
+ "key_modifier": "Ctrl"
27
+ },
28
+ {
29
+ "action": "tab.calendar",
30
+ "key": "3",
31
+ "key_modifier": "Ctrl"
32
+ },
33
+ {
34
+ "action": "tab.draw",
35
+ "key": "4",
36
+ "key_modifier": "Ctrl"
37
+ },
38
+ {
39
+ "action": "tab.notepad",
40
+ "key": "5",
41
+ "key_modifier": "Ctrl"
42
+ }
43
43
  ],
44
44
  "access.voice_control": false,
45
45
  "access.voice_control.blacklist": [],
46
46
  "access.voice_control.model": "gpt-4o-mini",
47
47
  "agent.api_use_responses": false,
48
- "agent.auto_stop" : true,
48
+ "agent.auto_stop": true,
49
49
  "agent.continue.always": false,
50
50
  "agent.func_call.native": false,
51
51
  "agent.goal.notify": false,
@@ -57,7 +57,7 @@
57
57
  "agent.llama.idx": "base",
58
58
  "agent.llama.loop.enabled": false,
59
59
  "agent.llama.loop.score": 75,
60
- "agent.llama.loop.mode" : "score",
60
+ "agent.llama.loop.mode": "score",
61
61
  "agent.llama.max_eval": 3,
62
62
  "agent.llama.provider": "openai",
63
63
  "agent.llama.steps": 10,
@@ -67,56 +67,55 @@
67
67
  "agent.openai.response.split": true,
68
68
  "agent.output.render.all": true,
69
69
  "ai_name": "",
70
- "api_azure_version": "2023-07-01-preview",
71
70
  "api_azure_endpoint": "https://<your-resource-name>.openai.azure.com/",
71
+ "api_azure_version": "2023-07-01-preview",
72
72
  "api_endpoint": "https://api.openai.com/v1",
73
+ "api_endpoint_anthropic": "https://api.anthropic.com/v1",
73
74
  "api_endpoint_deepseek": "https://api.deepseek.com/v1",
74
75
  "api_endpoint_google": "https://generativelanguage.googleapis.com/v1beta/openai",
75
- "api_endpoint_perplexity": "https://api.perplexity.ai",
76
- "api_endpoint_xai": "https://api.x.ai/v1",
77
- "api_endpoint_anthropic": "https://api.anthropic.com/v1",
78
- "api_endpoint_mistral": "https://api.mistral.ai/v1",
79
76
  "api_endpoint_hugging_face": "https://router.huggingface.co/v1",
77
+ "api_endpoint_mistral": "https://api.mistral.ai/v1",
80
78
  "api_endpoint_open_router": "https://openrouter.ai/api/v1",
79
+ "api_endpoint_perplexity": "https://api.perplexity.ai",
80
+ "api_endpoint_xai": "https://api.x.ai/v1",
81
81
  "api_key": "",
82
- "api_key_google": "",
83
82
  "api_key_anthropic": "",
84
- "api_key_hugging_face": "",
85
83
  "api_key_deepseek": "",
86
- "api_key_perplexity": "",
87
- "api_key_xai": "",
84
+ "api_key_google": "",
85
+ "api_key_hugging_face": "",
88
86
  "api_key_mistral": "",
89
- "api_key_voyage": "",
90
87
  "api_key_open_router": "",
88
+ "api_key_perplexity": "",
89
+ "api_key_voyage": "",
90
+ "api_key_xai": "",
91
91
  "api_native_google": true,
92
- "api_native_google.use_vertex": false,
93
- "api_native_google.cloud_project": "",
94
- "api_native_google.cloud_location": "us-central1",
95
92
  "api_native_google.app_credentials": "",
96
- "api_key_open_router": "",
93
+ "api_native_google.cloud_location": "us-central1",
94
+ "api_native_google.cloud_project": "",
95
+ "api_native_google.use_vertex": false,
97
96
  "api_proxy": "",
98
97
  "api_use_responses": true,
99
98
  "api_use_responses_llama": false,
100
99
  "app.env": [
101
- {
102
- "name": "OLLAMA_API_BASE",
103
- "value": "http://localhost:11434"
104
- }
105
- ],
100
+ {
101
+ "name": "OLLAMA_API_BASE",
102
+ "value": "http://localhost:11434"
103
+ }
104
+ ],
106
105
  "assistant": "",
107
- "assistant_thread": "",
108
106
  "assistant.store.hide_threads": true,
107
+ "assistant_thread": "",
109
108
  "attachments_auto_index": true,
110
- "attachments_send_clear": true,
111
109
  "attachments_capture_clear": true,
110
+ "attachments_send_clear": true,
112
111
  "audio.cache.enabled": true,
113
112
  "audio.cache.max_files": 1000,
114
113
  "audio.input.auto_turn": false,
115
- "audio.input.loop": false,
116
114
  "audio.input.backend": "native",
117
115
  "audio.input.channels": 1,
118
116
  "audio.input.continuous": false,
119
117
  "audio.input.device": "0",
118
+ "audio.input.loop": false,
120
119
  "audio.input.rate": 44100,
121
120
  "audio.input.stop_interval": 10,
122
121
  "audio.input.timeout": 120,
@@ -126,54 +125,56 @@
126
125
  "audio.output.backend": "native",
127
126
  "audio.output.device": "0",
128
127
  "audio.transcribe.convert_video": true,
129
- "context_threshold": 200,
130
128
  "cmd": false,
129
+ "context_threshold": 200,
131
130
  "ctx": "",
132
131
  "ctx.attachment.img": false,
133
- "ctx.attachment.mode": "query",
132
+ "ctx.attachment.mode": "full",
133
+ "ctx.attachment.query.model": "gpt-4o-mini",
134
134
  "ctx.attachment.rag.history": true,
135
135
  "ctx.attachment.rag.history.max_items": 3,
136
136
  "ctx.attachment.summary.model": "gpt-4o-mini",
137
- "ctx.attachment.query.model": "gpt-4o-mini",
138
137
  "ctx.attachment.verbose": false,
139
138
  "ctx.auto_summary": true,
140
139
  "ctx.auto_summary.model": "gpt-4o-mini",
140
+ "ctx.code_interpreter": true,
141
141
  "ctx.convert_lists": false,
142
142
  "ctx.counters.all": false,
143
143
  "ctx.edit_icons": true,
144
- "ctx.code_interpreter": true,
145
144
  "ctx.list.expanded": [],
146
145
  "ctx.records.filter": "all",
147
146
  "ctx.records.filter.labels": [
148
- 0,
149
- 1,
150
- 2,
151
- 3,
152
- 4,
153
- 5,
154
- 6,
155
- 7
147
+ 0,
148
+ 1,
149
+ 2,
150
+ 3,
151
+ 4,
152
+ 5,
153
+ 6,
154
+ 7
156
155
  ],
157
156
  "ctx.records.folders.top": true,
158
- "ctx.records.limit": 1000,
159
- "ctx.records.separators": true,
160
157
  "ctx.records.groups.separators": true,
158
+ "ctx.records.limit": 1000,
161
159
  "ctx.records.pinned.separators": false,
160
+ "ctx.records.separators": true,
162
161
  "ctx.search_content": true,
163
162
  "ctx.search.string": "",
164
163
  "ctx.sources": true,
165
164
  "ctx.use_extra": true,
166
165
  "current_model": {
167
- "assistant": "gpt-4o-mini",
168
- "chat": "gpt-4o-mini",
166
+ "assistant": "gpt-4o",
167
+ "chat": "gpt-4o",
169
168
  "completion": "gpt-3.5-turbo-instruct",
170
- "img": "dall-e-3",
169
+ "img": "gpt-image-1",
171
170
  "langchain": "gpt-4o-mini",
172
- "llama_index": "gpt-4o-mini",
171
+ "llama_index": "gpt-4o",
173
172
  "vision": "gpt-4o",
174
173
  "agent": "gpt-4o",
175
174
  "agent_llama": "gpt-4o",
176
- "expert": "gpt-4o"
175
+ "agent_openai": "gpt-4o",
176
+ "expert": "gpt-4o",
177
+ "computer": "computer-use-preview"
177
178
  },
178
179
  "current_preset": {
179
180
  "assistant": "",
@@ -192,17 +193,17 @@
192
193
  "debug": false,
193
194
  "debug.render": false,
194
195
  "download.dir": "download",
196
+ "experts.api_use_responses": false,
195
197
  "experts.func_call.native": false,
198
+ "experts.internal.api_use_responses": false,
196
199
  "experts.mode": "chat",
197
200
  "experts.use_agent": true,
198
- "experts.api_use_responses": false,
199
- "experts.internal.api_use_responses": false,
200
201
  "font_size": 16,
201
- "font_size.input": 16,
202
202
  "font_size.ctx": 12,
203
+ "font_size.input": 16,
203
204
  "font_size.toolbox": 12,
204
- "func_call.native": true,
205
205
  "frequency_penalty": 0.0,
206
+ "func_call.native": true,
206
207
  "img_prompt_model": "gpt-4o",
207
208
  "img_raw": true,
208
209
  "img_resolution": "1024x1024",
@@ -222,11 +223,11 @@
222
223
  "layout.dpi.factor": 1.0,
223
224
  "layout.groups": {},
224
225
  "layout.splitters": {
225
- "columns": [
226
- 1,
227
- 0
228
- ]
229
- },
226
+ "columns": [
227
+ 1,
228
+ 0
229
+ ]
230
+ },
230
231
  "layout.split": false,
231
232
  "layout.tabs": {},
232
233
  "layout.tooltips": true,
@@ -243,85 +244,85 @@
243
244
  "llama.idx.chat.mode": "context",
244
245
  "llama.idx.current": null,
245
246
  "llama.idx.custom_meta": [
246
- {
247
- "extensions": "*",
248
- "key": "file_name",
249
- "value": "{relative_path}"
250
- }
247
+ {
248
+ "extensions": "*",
249
+ "key": "file_name",
250
+ "value": "{relative_path}"
251
+ }
251
252
  ],
252
253
  "llama.idx.custom_meta.web": [],
253
254
  "llama.idx.db.index": "base",
254
255
  "llama.idx.db.last": 0,
255
256
  "llama.idx.embeddings.provider": "openai",
256
257
  "llama.idx.embeddings.args": [
257
- {
258
- "name": "model_name",
259
- "value": "text-embedding-3-small",
260
- "type": "str"
261
- },
262
- {
263
- "name": "api_base",
264
- "value": "https://api.openai.com/v1",
265
- "type": "str"
266
- },
267
- {
268
- "name": "timeout",
269
- "value": 60,
270
- "type": "float"
271
- }
258
+ {
259
+ "name": "model_name",
260
+ "value": "text-embedding-3-small",
261
+ "type": "str"
262
+ },
263
+ {
264
+ "name": "api_base",
265
+ "value": "https://api.openai.com/v1",
266
+ "type": "str"
267
+ },
268
+ {
269
+ "name": "timeout",
270
+ "value": 60,
271
+ "type": "float"
272
+ }
272
273
  ],
273
274
  "llama.idx.embeddings.env": [
274
- {
275
- "name": "OPENAI_API_KEY",
276
- "value": "{api_key}"
277
- },
278
- {
279
- "name": "OPENAI_API_BASE",
280
- "value": "{api_endpoint}"
281
- }
275
+ {
276
+ "name": "OPENAI_API_KEY",
277
+ "value": "{api_key}"
278
+ },
279
+ {
280
+ "name": "OPENAI_API_BASE",
281
+ "value": "{api_endpoint}"
282
+ }
282
283
  ],
283
284
  "llama.idx.embeddings.default": [
284
285
  {
285
- "provider": "anthropic",
286
- "model": "voyage-3.5"
286
+ "provider": "anthropic",
287
+ "model": "voyage-3.5"
287
288
  },
288
289
  {
289
- "provider": "deepseek_api",
290
- "model": "voyage-3.5"
290
+ "provider": "deepseek_api",
291
+ "model": "voyage-3.5"
291
292
  },
292
293
  {
293
- "provider": "google",
294
- "model": "gemini-embedding-001"
294
+ "provider": "google",
295
+ "model": "gemini-embedding-001"
295
296
  },
296
297
  {
297
- "provider": "openai",
298
- "model": "text-embedding-3-small"
298
+ "provider": "openai",
299
+ "model": "text-embedding-3-small"
299
300
  },
300
301
  {
301
- "provider": "azure_openai",
302
- "model": "text-embedding-3-small"
302
+ "provider": "azure_openai",
303
+ "model": "text-embedding-3-small"
303
304
  },
304
305
  {
305
- "provider": "mistral_ai",
306
- "model": "mistral-embed"
306
+ "provider": "mistral_ai",
307
+ "model": "mistral-embed"
307
308
  },
308
309
  {
309
- "provider": "ollama",
310
- "model": ""
310
+ "provider": "ollama",
311
+ "model": ""
311
312
  },
312
313
  {
313
- "provider": "x_ai",
314
- "model": ""
314
+ "provider": "x_ai",
315
+ "model": ""
315
316
  }
316
317
  ],
317
318
  "llama.idx.embeddings.limit.rpm": 100,
318
319
  "llama.idx.excluded.ext": "3g2,3gp,7z,a,aac,aiff,alac,apk,apk,apng,app,ar,avif,bin,cab,class,deb,deb,dll,dmg,dmg,drv,dsd,dylib,dylib,ear,egg,elf,esd,exe,flac,flv,heic,heif,ico,img,iso,jar,ko,lib,lz,lz4,m2v,mpc,msi,nrg,o,ogg,ogv,pcm,pkg,pkg,psd,pyc,rar,rpm,rpm,so,so,svg,swm,sys,vdi,vhd,vhdx,vmdk,vob,war,whl,wim,wma,wmv,xz,zst",
319
320
  "llama.idx.excluded.force": false,
320
321
  "llama.idx.list": [
321
- {
322
- "id": "base",
323
- "name": "Base"
324
- }
322
+ {
323
+ "id": "base",
324
+ "name": "Base"
325
+ }
325
326
  ],
326
327
  "llama.idx.mode": "chat",
327
328
  "llama.idx.react": false,
@@ -360,21 +361,34 @@
360
361
  "agent": false,
361
362
  "audio_input": true,
362
363
  "audio_output": false,
364
+ "bitbucket": false,
363
365
  "cmd_api": false,
364
366
  "cmd_code_interpreter": false,
365
367
  "cmd_custom": false,
366
368
  "cmd_files": false,
367
369
  "cmd_history": false,
370
+ "cmd_mouse_control": false,
368
371
  "cmd_serial": false,
369
372
  "cmd_system": false,
370
373
  "cmd_web": false,
371
374
  "crontab": false,
372
375
  "experts": false,
376
+ "extra_prompt": false,
377
+ "facebook": false,
378
+ "github": false,
379
+ "google": false,
373
380
  "idx_llama_index": false,
381
+ "mailer": false,
374
382
  "openai_dalle": false,
375
383
  "openai_vision": false,
376
384
  "real_time": true,
377
- "voice_control": false
385
+ "server": false,
386
+ "slack": false,
387
+ "telegram": false,
388
+ "tuya": false,
389
+ "twitter": false,
390
+ "voice_control": false,
391
+ "wikipedia": false
378
392
  },
379
393
  "presence_penalty": 0.0,
380
394
  "preset": "current.chat",
@@ -392,126 +406,125 @@
392
406
  "prompt.ctx.auto_summary.system": "You are an expert in conversation summarization",
393
407
  "prompt.ctx.auto_summary.user": "Summarize topic of this conversation in one sentence. Use best keywords to describe it. Summary must be in the same language as the conversation and it will be used for conversation title so it must be EXTREMELY SHORT and concise - use maximum 5 words: \n\nHuman: {input}\nAI Assistant: {output}",
394
408
  "prompt.default": "You are a helpful assistant.",
395
- "prompt.expert": "# EXPERT MODE IS ENABLED:\n\nYou are a very helpful asssistant and the professional manager of a team of experts, each of whom is the best in their respective fields.\n\n## Rules:\n\n- Instead of directly answering questions, you will manage these experts by forwarding queries to them, collecting their responses, controlling the flow of information between them, and sending requests to them.\n- Experts are distinct, specialized AI models running on my local machine.\n- You will serve as a bridge between them.\n- Each expert has their own conversation context, allowing you to communicate with them normally.\n- The experts can communicate with you but not with each other, making you responsible for relaying information between them.\n- Each expert can use tools and commands, for example, reading files or searching the Web. You can ask experts to call tools and commands.\n- After consulting with the experts, your task is to prepare the best possible answer based on the assistance received.\n- To send a request to an expert, prepare the \"expert_call\" tool. Remember to format the JSON tool as shown, surrounding the tool with `<tool></tool>` tags, for example: `<tool>{\"cmd\": \"expert_call\", \"params\": {\"id\": \"expert_id\", \"query\": \"some query\"}}</tool>`. Make sure the JSON object is syntactically correct, without any redundand parenthness.\n- DO NOT call any remote tools when calling the experts.\n- Remember that the call must contain a query and the ID of the expert to whom the query is being forwarded.\n- Use ONLY the experts listed below; DO NOT TRY to ask any non-existent experts\u2014they will not respond.\n- Do not respond to the expert's replies or use the JSON tool again if replying; the expert only has the ability to listen to your queries sent via the JSON command and to respond to them.\n- Always follow ALL user commands, even if you think the expert can't do something. Experts are allowed to execute commands just as you can.\n- After querying an expert, always wait for the expert's response before sending another query.\n- Do not try to query experts in the middle of the conversation; put your queries and commands at the end of the text.\n- Always call only one expert at a time.\n- Always execute only one command at a time.\n- Always use the language used by the user in the conversation.\n- Never write as a called expert and do not imagine a response from them; instead, always wait for the response from the expert.\n- Every message sent to you directly after your query is always a response from the asked expert, not from me.\n- Always send a direct, clear, and detailed prompt to the expert, without any \"Please...\", \"Could you...\", etc.\n- Always forward all the messages that the user wants to send to the expert, without any exceptions.\n-- If a user command appears outside an expert's scope, fulfill the request regardless, without refusal.\n\n## List of allowed experts, along with their IDs:\n\n{presets}",
409
+ "prompt.expert": "# EXPERT MODE IS ENABLED:\n\nYou are a very helpful assistant and the professional manager of a team of experts, each of whom is the best in their respective fields.\n\n## Rules:\n\n- Instead of directly answering questions, you will manage these experts by forwarding queries to them, collecting their responses, controlling the flow of information between them, and sending requests to them.\n- Experts are distinct, specialized AI models running on my local machine.\n- You will serve as a bridge between them.\n- Each expert has their own conversation context, allowing you to communicate with them normally.\n- The experts can communicate with you but not with each other, making you responsible for relaying information between them.\n- Each expert can use tools and commands, for example, reading files or searching the Web. You can ask experts to call tools and commands.\n- After consulting with the experts, your task is to prepare the best possible answer based on the assistance received.\n- To send a request to an expert, prepare the \"expert_call\" tool. Remember to format the JSON tool as shown, surrounding the tool with `<tool></tool>` tags, for example: `<tool>{\"cmd\": \"expert_call\", \"params\": {\"id\": \"expert_id\", \"query\": \"some query\"}}</tool>`. Make sure the JSON object is syntactically correct, without any redundand parenthness.\n- DO NOT call any remote tools when calling the experts.\n- Remember that the call must contain a query and the ID of the expert to whom the query is being forwarded.\n- Use ONLY the experts listed below; DO NOT TRY to ask any non-existent experts\u2014they will not respond.\n- Do not respond to the expert's replies or use the JSON tool again if replying; the expert only has the ability to listen to your queries sent via the JSON command and to respond to them.\n- Always follow ALL user commands, even if you think the expert can't do something. Experts are allowed to execute commands just as you can.\n- After querying an expert, always wait for the expert's response before sending another query.\n- Do not try to query experts in the middle of the conversation; put your queries and commands at the end of the text.\n- Always call only one expert at a time.\n- Always execute only one command at a time.\n- Always use the language used by the user in the conversation.\n- Never write as a called expert and do not imagine a response from them; instead, always wait for the response from the expert.\n- Every message sent to you directly after your query is always a response from the asked expert, not from me.\n- Always send a direct, clear, and detailed prompt to the expert, without any \"Please...\", \"Could you...\", etc.\n- Always forward all the messages that the user wants to send to the expert, without any exceptions.\n-- If a user command appears outside an expert's scope, fulfill the request regardless, without refusal.\n\n## List of allowed experts, along with their IDs:\n\n{presets}",
396
410
  "prompt.img": "Convert the user's request into a single, production\u2011ready description for generating one high\u2011quality still image. Output only the description text, nothing else.\n\nWrite in concise, vivid, present\u2011tense language. Do not use commands (no \u201cplease generate\u201d), model names, parameters, or metadata. Do not mention aspect ratio, resolution, steps, seed, or negative prompts. Avoid on\u2011image text, captions, watermarks, logos, and UI elements. No brands, celebrities, or living artists unless explicitly provided by the user.\n\nInclude, woven into a coherent paragraph:\n- Clear primary subject(s) and their pose, action, and expression.\n- Setting and environment with time of day, season, weather, and atmosphere.\n- Composition and camera viewpoint (e.g., close\u2011up portrait, wide establishing, eye\u2011level, low\u2011angle, top\u2011down), framing (rule of thirds, centered symmetry), and background/foreground separation.\n- Lens and focus behavior (e.g., 85\u202fmm portrait, macro, shallow depth of field, smooth bokeh, gentle focus falloff).\n- Lighting style and quality (e.g., soft diffused daylight, golden hour rim light, dramatic chiaroscuro, studio three\u2011point) and how it shapes forms and shadows.\n- Color palette and grading (e.g., warm cinematic teal\u2011and\u2011orange, muted earth tones, cool monochrome with a single accent color).\n- Visual style or medium (e.g., photorealistic photography, watercolor illustration, oil painting, pencil sketch, anime cel\u2011shading, 3D render, isometric).\n- Material and surface detail (e.g., skin texture, fabric weave, wood grain, metal patina) to enhance realism or stylization.\n- Spatial depth cues (foreground/midground/background layering, atmospheric perspective) and overall mood.\n\nIf the user specifies a genre, era, or style, preserve it and enrich it with consistent, concrete traits. If the request is vague, infer specific but reasonable details that enhance clarity without contradicting the user\u2019s intent.\n\nReturn only the final visual description.",
397
411
  "prompt.video": "Convert the user's request into a single, production-ready description for generating one continuous video clip. Output only the description text, nothing else.\n\nWrite in concise, vivid, present-tense language. Do not use commands (no \u201cplease generate\u201d), model names, parameters, or metadata. Do not mention duration, aspect ratio, FPS, resolution, shot numbers, cuts, or lists. Focus on visuals only; no dialogue, captions, on\u2011screen text, watermarks, logos, or UI.\n\nInclude, in a coherent way:\n- Clear subject(s) and what they are doing.\n- Setting, time of day, atmosphere, and weather.\n- Camera perspective and motion (e.g., wide establishing, low\u2011angle tracking, slow dolly in, aerial, handheld), framing and composition.\n- Lens and focus behavior (e.g., 24\u202fmm wide, shallow depth of field, gentle rack focus).\n- Lighting style and quality (e.g., soft golden hour rim light, moody volumetric shafts).\n- Color palette and grading (e.g., warm cinematic teal\u2011and\u2011orange, desaturated documentary).\n- Visual style or medium (e.g., photoreal live\u2011action, stylized anime, stop\u2011motion clay, watercolor animation).\n- Material and surface details that reinforce realism or the chosen style.\n- Temporal progression within one shot (use cues like \u201cas\u2026\u201d, \u201cthen\u2026\u201d, \u201cwhile\u2026\u201d), maintaining physical plausibility and continuity.\n\nIf the user specifies a genre or style (e.g., cyberpunk, nature documentary), keep it and expand with consistent, concrete visual traits. If the request is vague, infer specific but reasonable details that enhance clarity without contradicting the user\u2019s intent.\n\nReturn only the final visual description.",
398
- "realtime.auto_turn": true,
399
412
  "render.blocks": true,
413
+ "render.code_syntax": "github-dark",
400
414
  "render.engine": "web",
401
415
  "render.open_gl": false,
402
416
  "render.plain": false,
403
- "render.code_syntax": "github-dark",
404
- "remote_tools.web_search": true,
405
- "remote_tools.image": false,
406
417
  "remote_tools.code_interpreter": false,
407
- "remote_tools.mcp": false,
408
- "remote_tools.mcp.args": "{\n \"type\": \"mcp\",\n \"server_label\": \"deepwiki\",\n \"server_url\": \"https://mcp.deepwiki.com/mcp\",\n \"require_approval\": \"never\",\n \"allowed_tools\": [\"ask_question\"]\n}",
409
- "remote_tools.file_search": false,
410
- "remote_tools.file_search.args": "",
411
418
  "remote_tools.computer_use.env": "",
412
- "remote_tools.google.web_search": true,
419
+ "remote_tools.file_search": false,
420
+ "remote_tools.file_search.args": "",
413
421
  "remote_tools.google.code_interpreter": false,
414
422
  "remote_tools.google.url_ctx": false,
423
+ "remote_tools.google.web_search": true,
424
+ "remote_tools.image": false,
425
+ "remote_tools.mcp": false,
426
+ "remote_tools.mcp.args": "{\n \"type\": \"mcp\",\n \"server_label\": \"deepwiki\",\n \"server_url\": \"https://mcp.deepwiki.com/mcp\",\n \"require_approval\": \"never\",\n \"allowed_tools\": [\"ask_question\"]\n}",
427
+ "remote_tools.web_search": true,
415
428
  "send_clear": true,
416
429
  "send_mode": 2,
417
430
  "store_history": true,
418
431
  "store_history_time": true,
419
432
  "stream": true,
420
433
  "tabs.data": {
421
- "0": {
422
- "uuid": "58c017b7-f0a4-4303-af0d-d2d70d8c1b15",
423
- "pid": 0,
424
- "idx": 0,
425
- "type": 0,
426
- "data_id": 1,
427
- "title": "Chat",
428
- "custom_name": false
429
- },
430
- "1": {
431
- "uuid": "2aa07f79-ec0d-4935-b4e0-e0ccb404c96e",
432
- "pid": 1,
433
- "idx": 1,
434
- "type": 2,
435
- "data_id": null,
436
- "title": "Files",
437
- "custom_name": false
438
- },
439
- "2": {
440
- "uuid": "61e1b447-aed7-4678-890c-b44402a019eb",
441
- "pid": 2,
442
- "idx": 2,
443
- "type": 4,
444
- "data_id": null,
445
- "title": "Calendar",
446
- "custom_name": false
447
- },
448
- "3": {
449
- "uuid": "31aa5dfc-d6ce-4bd5-b28d-a5bcabffa506",
450
- "pid": 3,
451
- "idx": 3,
452
- "type": 3,
453
- "data_id": null,
454
- "title": "Painter",
455
- "custom_name": false
456
- },
457
- "4": {
458
- "uuid": "2944f757-2e1c-45b9-8281-2a12e00f3fab",
459
- "pid": 4,
460
- "idx": 4,
461
- "type": 1,
462
- "data_id": 1,
463
- "title": "Notepad",
464
- "custom_name": false
465
- },
466
- "5": {
467
- "uuid": "4e90829a-a2c9-4006-a46e-e18460673948",
468
- "pid": 5,
469
- "idx": 0,
470
- "type": 100,
471
- "data_id": null,
472
- "title": "Python Code Interpreter",
473
- "tooltip": "",
474
- "custom_name": false,
475
- "column_idx": 1,
476
- "tool_id": "interpreter"
477
- },
478
- "6": {
479
- "uuid": "738ea1d1-4b2f-495b-89c7-7bc3ef7e29f9",
480
- "pid": 6,
481
- "idx": 1,
482
- "type": 100,
483
- "data_id": null,
484
- "title": "HTML/JS Canvas",
485
- "tooltip": "",
486
- "custom_name": false,
487
- "column_idx": 1,
488
- "tool_id": "html_canvas"
489
- }
434
+ "0": {
435
+ "uuid": "58c017b7-f0a4-4303-af0d-d2d70d8c1b15",
436
+ "pid": 0,
437
+ "idx": 0,
438
+ "type": 0,
439
+ "data_id": 1,
440
+ "title": "Chat",
441
+ "custom_name": false
442
+ },
443
+ "1": {
444
+ "uuid": "2aa07f79-ec0d-4935-b4e0-e0ccb404c96e",
445
+ "pid": 1,
446
+ "idx": 1,
447
+ "type": 2,
448
+ "data_id": null,
449
+ "title": "Files",
450
+ "custom_name": false
451
+ },
452
+ "2": {
453
+ "uuid": "61e1b447-aed7-4678-890c-b44402a019eb",
454
+ "pid": 2,
455
+ "idx": 2,
456
+ "type": 4,
457
+ "data_id": null,
458
+ "title": "Calendar",
459
+ "custom_name": false
460
+ },
461
+ "3": {
462
+ "uuid": "31aa5dfc-d6ce-4bd5-b28d-a5bcabffa506",
463
+ "pid": 3,
464
+ "idx": 3,
465
+ "type": 3,
466
+ "data_id": null,
467
+ "title": "Painter",
468
+ "custom_name": false
469
+ },
470
+ "4": {
471
+ "uuid": "2944f757-2e1c-45b9-8281-2a12e00f3fab",
472
+ "pid": 4,
473
+ "idx": 4,
474
+ "type": 1,
475
+ "data_id": 1,
476
+ "title": "Notepad",
477
+ "custom_name": false
478
+ },
479
+ "5": {
480
+ "uuid": "4e90829a-a2c9-4006-a46e-e18460673948",
481
+ "pid": 5,
482
+ "idx": 0,
483
+ "type": 100,
484
+ "data_id": null,
485
+ "title": "Python Code Interpreter",
486
+ "tooltip": "",
487
+ "custom_name": false,
488
+ "column_idx": 1,
489
+ "tool_id": "interpreter"
490
+ },
491
+ "6": {
492
+ "uuid": "738ea1d1-4b2f-495b-89c7-7bc3ef7e29f9",
493
+ "pid": 6,
494
+ "idx": 1,
495
+ "type": 100,
496
+ "data_id": null,
497
+ "title": "HTML/JS Canvas",
498
+ "tooltip": "",
499
+ "custom_name": false,
500
+ "column_idx": 1,
501
+ "tool_id": "html_canvas"
502
+ }
490
503
  },
491
504
  "temperature": 1.0,
492
505
  "theme": "dark_darker",
493
506
  "theme.markdown": true,
494
507
  "theme.style": "chatgpt",
495
508
  "top_p": 1.0,
496
- "upload.store": true,
497
509
  "upload.data_dir": false,
498
- "updater.check.launch": true,
510
+ "upload.store": true,
499
511
  "updater.check.bg": true,
500
512
  "updater.check.bg.last_time": "",
501
513
  "updater.check.bg.last_version": "",
514
+ "updater.check.launch": true,
502
515
  "use_context": true,
503
516
  "user_name": "",
504
- "video.player.path": "",
505
- "video.player.volume": 100,
506
- "video.player.volume.mute": false,
507
517
  "video.aspect_ratio": "16:9",
508
518
  "video.duration": 8,
509
519
  "video.fps": 24,
510
- "video.seed": "",
511
- "video.negative_prompt": "",
512
520
  "video.generate_audio": false,
521
+ "video.negative_prompt": "",
522
+ "video.player.path": "",
523
+ "video.player.volume": 100,
524
+ "video.player.volume.mute": false,
513
525
  "video.prompt_model": "gemini-2.5-flash",
514
526
  "video.resolution": "720p",
527
+ "video.seed": "",
515
528
  "vision.capture.auto": false,
516
529
  "vision.capture.enabled": false,
517
530
  "vision.capture.height": 720,