pygpt-net 2.6.42__py3-none-any.whl → 2.6.43__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,2510 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.09.12 00:00:00 #
10
+ # ================================================== #
11
+
12
+ import copy
13
+ import os
14
+ from typing import Tuple
15
+
16
+ from packaging.version import parse as parse_version, Version
17
+
18
+
19
+ class Patch:
20
+ def __init__(self, window=None):
21
+ self.window = window
22
+
23
+ def execute(self, version: Version) -> Tuple[dict, bool, bool]:
24
+ """
25
+ Migrate config to current app version
26
+
27
+ :param version: current app version
28
+ :return: Updated data, and True if updated, and True if old version
29
+ """
30
+ data = self.window.core.config.all()
31
+ cfg_get_base = self.window.core.config.get_base
32
+ patch_css = self.window.core.updater.patch_css
33
+ current = "0.0.0"
34
+ updated = False
35
+ is_old = False
36
+
37
+ # get version of config file
38
+ if '__meta__' in data and 'version' in data['__meta__']:
39
+ current = data['__meta__']['version']
40
+ old = parse_version(current)
41
+
42
+ # check if config file is older than current app version
43
+ if old < version:
44
+
45
+ # mark as older version
46
+ is_old = True
47
+
48
+ # < 0.9.1
49
+ if old < parse_version("0.9.1"):
50
+ print("Migrating config from < 0.9.1...")
51
+ keys_to_remove = ['user_id', 'custom'] # not needed anymore
52
+ for key in keys_to_remove:
53
+ if key in data:
54
+ del data[key]
55
+ keys_to_add = ['organization_key']
56
+ for key in keys_to_add:
57
+ if key not in data:
58
+ data[key] = ""
59
+ updated = True
60
+
61
+ # < 0.9.2
62
+ if old < parse_version("0.9.2"):
63
+ print("Migrating config from < 0.9.2...")
64
+ keys_to_remove = ['ui.ctx.min_width',
65
+ 'ui.ctx.max_width',
66
+ 'ui.toolbox.min_width',
67
+ 'ui.toolbox.max_width',
68
+ 'ui.dialog.settings.width',
69
+ 'ui.dialog.settings.height',
70
+ 'ui.chatbox.font.color']
71
+ for key in keys_to_remove:
72
+ if key in data:
73
+ del data[key]
74
+ if 'theme' not in data:
75
+ data['theme'] = "dark_teal"
76
+ updated = True
77
+
78
+ # < 0.9.4
79
+ if old < parse_version("0.9.4"):
80
+ print("Migrating config from < 0.9.4...")
81
+ if 'plugins' not in data:
82
+ data['plugins'] = {}
83
+ if 'plugins_enabled' not in data:
84
+ data['plugins_enabled'] = {}
85
+ updated = True
86
+
87
+ # < 0.9.6
88
+ if old < parse_version("0.9.6"):
89
+ print("Migrating config from < 0.9.6...")
90
+ data['debug'] = True # enable debug by default
91
+ updated = True
92
+
93
+ # < 2.0.0
94
+ if old < parse_version("2.0.0"):
95
+ print("Migrating config from < 2.0.0...")
96
+ data['theme'] = 'dark_teal' # force, because removed light themes!
97
+ if 'cmd' not in data:
98
+ data['cmd'] = True
99
+ if 'stream' not in data:
100
+ data['stream'] = True
101
+ if 'attachments_send_clear' not in data:
102
+ data['attachments_send_clear'] = True
103
+ if 'assistant' not in data:
104
+ data['assistant'] = None
105
+ if 'assistant_thread' not in data:
106
+ data['assistant_thread'] = None
107
+ updated = True
108
+
109
+ # < 2.0.1
110
+ if old < parse_version("2.0.1"):
111
+ print("Migrating config from < 2.0.1...")
112
+ if 'send_mode' not in data:
113
+ data['send_mode'] = 1
114
+ if 'send_shift_enter' in data:
115
+ del data['send_shift_enter']
116
+ if 'font_size.input' not in data:
117
+ data['font_size.input'] = 11
118
+ if 'font_size.ctx' not in data:
119
+ data['font_size.ctx'] = 9
120
+ if 'ctx.auto_summary' not in data:
121
+ data['ctx.auto_summary'] = True
122
+ if 'ctx.auto_summary.system' not in data:
123
+ data['ctx.auto_summary.system'] = "You are an expert in conversation summarization"
124
+ if 'ctx.auto_summary.prompt' not in data:
125
+ data['ctx.auto_summary.prompt'] = "Summarize topic of this conversation in one sentence. Use best " \
126
+ "keywords to describe it. Summary must be in the same language " \
127
+ "as the conversation and it will be used for conversation title " \
128
+ "so it must be EXTREMELY SHORT and concise - use maximum 5 " \
129
+ "words: \n\nUser: {input}\nAI Assistant: {output}"
130
+ updated = True
131
+
132
+ # < 2.0.6
133
+ if old < parse_version("2.0.6"):
134
+ print("Migrating config from < 2.0.6...")
135
+ if 'layout.density' not in data:
136
+ data['layout.density'] = -2
137
+ updated = True
138
+
139
+ # < 2.0.8
140
+ if old < parse_version("2.0.8"):
141
+ print("Migrating config from < 2.0.8...")
142
+ if 'plugins' not in data:
143
+ data['plugins'] = {}
144
+ if 'cmd_web_google' not in data['plugins']:
145
+ data['plugins']['cmd_web_google'] = {}
146
+ data['plugins']['cmd_web_google'][
147
+ 'prompt_summarize'] = "Summarize the English text in a maximum of 3 " \
148
+ "paragraphs, trying to find the most " \
149
+ "important content that can help answer the " \
150
+ "following question: "
151
+ data['plugins']['cmd_web_google']['chunk_size'] = 100000
152
+ data['plugins']['cmd_web_google']['max_page_content_length'] = 0
153
+ updated = True
154
+
155
+ # < 2.0.13
156
+ if old < parse_version("2.0.13"):
157
+ print("Migrating config from < 2.0.13...")
158
+ if 'layout.density' not in data:
159
+ data['layout.density'] = 0
160
+ else:
161
+ if data['layout.density'] == -2:
162
+ data['layout.density'] = 0
163
+ updated = True
164
+
165
+ # < 2.0.14
166
+ if old < parse_version("2.0.14"):
167
+ print("Migrating config from < 2.0.14...")
168
+ if 'vision.capture.enabled' not in data:
169
+ data['vision.capture.enabled'] = True
170
+ if 'vision.capture.auto' not in data:
171
+ data['vision.capture.auto'] = True
172
+ if 'vision.capture.width' not in data:
173
+ data['vision.capture.width'] = 800
174
+ if 'vision.capture.height' not in data:
175
+ data['vision.capture.height'] = 600
176
+ updated = True
177
+
178
+ # < 2.0.16
179
+ if old < parse_version("2.0.16"):
180
+ print("Migrating config from < 2.0.16...")
181
+ if 'vision.capture.idx' not in data:
182
+ data['vision.capture.idx'] = 0
183
+ if 'img_raw' not in data:
184
+ data['img_raw'] = True
185
+ if 'img_prompt_model' not in data:
186
+ data['img_prompt_model'] = "gpt-4-1106-preview"
187
+ updated = True
188
+
189
+ # < 2.0.19
190
+ if old < parse_version("2.0.19"):
191
+ print("Migrating config from < 2.0.19...")
192
+ if 'img_raw' not in data:
193
+ data['img_raw'] = True
194
+ if not data['img_raw']:
195
+ data['img_raw'] = True
196
+ updated = True
197
+
198
+ # < 2.0.25
199
+ if old < parse_version("2.0.25"):
200
+ print("Migrating config from < 2.0.25...")
201
+ if 'cmd.prompt' not in data:
202
+ data['cmd.prompt'] = cfg_get_base('cmd.prompt')
203
+ if 'img_prompt' not in data:
204
+ data['img_prompt'] = cfg_get_base('img_prompt')
205
+ if 'vision.capture.quality' not in data:
206
+ data['vision.capture.quality'] = 85
207
+ if 'attachments_capture_clear' not in data:
208
+ data['attachments_capture_clear'] = True
209
+ if 'plugins' not in data:
210
+ data['plugins'] = {}
211
+ if 'cmd_web_google' not in data['plugins']:
212
+ data['plugins']['cmd_web_google'] = {}
213
+ data['plugins']['cmd_web_google']['prompt_summarize'] = "Summarize the English text in a maximum of 3 " \
214
+ "paragraphs, trying to find the most " \
215
+ "important content that can help answer the " \
216
+ "following question: "
217
+ updated = True
218
+
219
+ # < 2.0.26
220
+ if old < parse_version("2.0.26"):
221
+ print("Migrating config from < 2.0.26...")
222
+ if 'ctx.auto_summary.model' not in data:
223
+ data['ctx.auto_summary.model'] = 'gpt-3.5-turbo-1106'
224
+ updated = True
225
+
226
+ # < 2.0.27
227
+ if old < parse_version("2.0.27"):
228
+ print("Migrating config from < 2.0.27...")
229
+ if 'plugins' not in data:
230
+ data['plugins'] = {}
231
+ if 'cmd_web_google' not in data['plugins']:
232
+ data['plugins']['cmd_web_google'] = {}
233
+ data['plugins']['cmd_web_google'][
234
+ 'prompt_summarize'] = "Summarize text in English in a maximum of 3 " \
235
+ "paragraphs, trying to find the most " \
236
+ "important content that can help answer the " \
237
+ "following question: {query}"
238
+ data['cmd.prompt'] = cfg_get_base('cmd.prompt') # fix
239
+ updated = True
240
+
241
+ # < 2.0.30
242
+ if old < parse_version("2.0.30"):
243
+ print("Migrating config from < 2.0.30...")
244
+ if 'plugins' not in data:
245
+ data['plugins'] = {}
246
+ if 'audio_openai_whisper' not in data['plugins']:
247
+ data['plugins']['audio_openai_whisper'] = {}
248
+ data['plugins']['audio_openai_whisper']['timeout'] = 1
249
+ data['plugins']['audio_openai_whisper']['phrase_length'] = 5
250
+ data['plugins']['audio_openai_whisper']['min_energy'] = 2000
251
+ updated = True
252
+
253
+ # < 2.0.31
254
+ if old < parse_version("2.0.31"):
255
+ print("Migrating config from < 2.0.31...")
256
+ if 'plugins' not in data:
257
+ data['plugins'] = {}
258
+ if 'audio_openai_whisper' not in data['plugins']:
259
+ data['plugins']['audio_openai_whisper'] = {}
260
+ data['plugins']['audio_openai_whisper']['continuous_listen'] = False
261
+ data['plugins']['audio_openai_whisper']['timeout'] = 2
262
+ data['plugins']['audio_openai_whisper']['phrase_length'] = 4
263
+ data['plugins']['audio_openai_whisper']['magic_word_timeout'] = 1
264
+ data['plugins']['audio_openai_whisper']['magic_word_phrase_length'] = 2
265
+ data['plugins']['audio_openai_whisper']['min_energy'] = 1.3
266
+ updated = True
267
+
268
+ # < 2.0.34
269
+ if old < parse_version("2.0.34"):
270
+ print("Migrating config from < 2.0.34...")
271
+ if 'lock_modes' not in data:
272
+ data['lock_modes'] = True
273
+ updated = True
274
+
275
+ # < 2.0.37
276
+ if old < parse_version("2.0.37"):
277
+ print("Migrating config from < 2.0.37...")
278
+ if 'font_size.toolbox' not in data:
279
+ data['font_size.toolbox'] = 12
280
+ updated = True
281
+
282
+ # < 2.0.46
283
+ if old < parse_version("2.0.46"):
284
+ print("Migrating config from < 2.0.46...")
285
+ data['cmd'] = False # disable on default
286
+ updated = True
287
+
288
+ # < 2.0.47
289
+ if old < parse_version("2.0.47"):
290
+ print("Migrating config from < 2.0.47...")
291
+ if 'notepad.num' not in data:
292
+ data['notepad.num'] = 5
293
+ updated = True
294
+
295
+ # < 2.0.52
296
+ if old < parse_version("2.0.52"):
297
+ print("Migrating config from < 2.0.52...")
298
+ if 'layout.dpi.scaling' not in data:
299
+ data['layout.dpi.scaling'] = True
300
+ if 'layout.dpi.factor' not in data:
301
+ data['layout.dpi.factor'] = 1.0
302
+ updated = True
303
+
304
+ # < 2.0.62
305
+ if old < parse_version("2.0.62"):
306
+ print("Migrating config from < 2.0.62...")
307
+ if 'ctx.records.limit' not in data:
308
+ data['ctx.records.limit'] = 0
309
+ updated = True
310
+
311
+ # < 2.0.65
312
+ if old < parse_version("2.0.65"):
313
+ print("Migrating config from < 2.0.65...")
314
+ if 'ctx.search.string' not in data:
315
+ data['ctx.search.string'] = ""
316
+ updated = True
317
+
318
+ # < 2.0.69
319
+ if old < parse_version("2.0.69"):
320
+ print("Migrating config from < 2.0.69...")
321
+ data['img_prompt'] = "Whenever I provide a basic idea or concept for an image, such as 'a picture " \
322
+ "of mountains', I want you to ALWAYS translate it into English and expand " \
323
+ "and elaborate on this idea. Use your knowledge and creativity to add " \
324
+ "details that would make the image more vivid and interesting. This could " \
325
+ "include specifying the time of day, weather conditions, surrounding " \
326
+ "environment, and any additional elements that could enhance the scene. Your " \
327
+ "goal is to create a detailed and descriptive prompt that provides DALL-E " \
328
+ "with enough information to generate a rich and visually appealing image. " \
329
+ "Remember to maintain the original intent of my request while enriching the " \
330
+ "description with your imaginative details."
331
+ data['img_raw'] = False
332
+ data['img_prompt_model'] = "gpt-4-1106-preview"
333
+ updated = True
334
+
335
+ # < 2.0.71
336
+ if old < parse_version("2.0.71"):
337
+ print("Migrating config from < 2.0.71...")
338
+ prompt = 'IMAGE GENERATION: Whenever I provide a basic idea or concept for an image, such as \'a picture of ' \
339
+ 'mountains\', I want you to ALWAYS translate it into English and expand and elaborate on this idea. ' \
340
+ 'Use your knowledge and creativity to add details that would make the image more vivid and ' \
341
+ 'interesting. This could include specifying the time of day, weather conditions, surrounding ' \
342
+ 'environment, and any additional elements that could enhance the scene. Your goal is to create a ' \
343
+ 'detailed and descriptive prompt that provides DALL-E with enough information to generate a rich ' \
344
+ 'and visually appealing image. Remember to maintain the original intent of my request while ' \
345
+ 'enriching the description with your imaginative details. HOW TO START IMAGE GENERATION: to start ' \
346
+ 'image generation return to me prepared prompt in JSON format, all in one line, using following ' \
347
+ 'syntax: ~###~{"cmd": "image", "params": {"query": "your query here"}}~###~. Use ONLY this syntax ' \
348
+ 'and remember to surround JSON string with ~###~. DO NOT use any other syntax. Use English in the ' \
349
+ 'generated JSON command, but conduct all the remaining parts of the discussion with me in the ' \
350
+ 'language in which I am speaking to you. The image will be generated on my machine immediately ' \
351
+ 'after the command is issued, allowing us to discuss the photo once it has been created. Please ' \
352
+ 'engage with me about the photo itself, not only by giving the generate command. '
353
+ if 'openai_dalle' not in data['plugins']:
354
+ data['plugins']['openai_dalle'] = {}
355
+ data['plugins']['openai_dalle']['prompt'] = prompt # fixed prompt
356
+
357
+ data['plugins_enabled']['openai_dalle'] = True
358
+ data['plugins_enabled']['openai_vision'] = True
359
+
360
+ # deprecate vision and img modes
361
+ if data['mode'] == 'vision' or data['mode'] == 'img':
362
+ data['mode'] = 'chat'
363
+
364
+ updated = True
365
+
366
+ # < 2.0.72
367
+ if old < parse_version("2.0.72"):
368
+ print("Migrating config from < 2.0.72...")
369
+ if 'theme.markdown' not in data:
370
+ data['theme.markdown'] = True
371
+ prompt = 'IMAGE GENERATION: Whenever I provide a basic idea or concept for an image, such as \'a picture of ' \
372
+ 'mountains\', I want you to ALWAYS translate it into English and expand and elaborate on this idea. ' \
373
+ 'Use your knowledge and creativity to add details that would make the image more vivid and ' \
374
+ 'interesting. This could include specifying the time of day, weather conditions, surrounding ' \
375
+ 'environment, and any additional elements that could enhance the scene. Your goal is to create a ' \
376
+ 'detailed and descriptive prompt that provides DALL-E with enough information to generate a rich ' \
377
+ 'and visually appealing image. Remember to maintain the original intent of my request while ' \
378
+ 'enriching the description with your imaginative details. HOW TO START IMAGE GENERATION: to start ' \
379
+ 'image generation return to me prepared prompt in JSON format, all in one line, using following ' \
380
+ 'syntax: ~###~{"cmd": "image", "params": {"query": "your query here"}}~###~. Use ONLY this syntax ' \
381
+ 'and remember to surround JSON string with ~###~. DO NOT use any other syntax. Use English in the ' \
382
+ 'generated JSON command, but conduct all the remaining parts of the discussion with me in the ' \
383
+ 'language in which I am speaking to you. The image will be generated on my machine immediately ' \
384
+ 'after the command is issued, allowing us to discuss the photo once it has been created. Please ' \
385
+ 'engage with me about the photo itself, not only by giving the generate command. '
386
+ if 'openai_dalle' not in data['plugins']:
387
+ data['plugins']['openai_dalle'] = {}
388
+ data['plugins']['openai_dalle']['prompt'] = prompt # fixed prompt
389
+ updated = True
390
+
391
+ # < 2.0.75
392
+ if old < parse_version("2.0.75"):
393
+ print("Migrating config from < 2.0.75...")
394
+ if 'updater.check.launch' not in data:
395
+ data['updater.check.launch'] = True
396
+ if 'updater.check.bg' not in data:
397
+ data['updater.check.bg'] = False
398
+ updated = True
399
+
400
+ # < 2.0.78
401
+ if old < parse_version("2.0.78"):
402
+ print("Migrating config from < 2.0.78...")
403
+ if 'render.plain' not in data:
404
+ data['render.plain'] = False
405
+ updated = True
406
+
407
+ # < 2.0.81
408
+ if old < parse_version("2.0.81"):
409
+ print("Migrating config from < 2.0.81...")
410
+ patch_css('markdown.light.css', True) # force replace file
411
+ updated = True
412
+
413
+ # < 2.0.85
414
+ if old < parse_version("2.0.85"):
415
+ print("Migrating config from < 2.0.85...")
416
+ prompt = "AUTONOMOUS MODE:\n1. You will now enter self-dialogue mode, where you will be conversing with " \
417
+ "yourself, not with a human.\n2. When you enter self-dialogue mode, remember that you are engaging " \
418
+ "in a conversation with yourself. Any user input will be considered a reply featuring your previous response.\n" \
419
+ "3. The objective of this self-conversation is well-defined—focus on achieving it.\n" \
420
+ "4. Your new message should be a continuation of the last response you generated, essentially replying" \
421
+ " to yourself and extending it.\n5. After each response, critically evaluate its effectiveness " \
422
+ "and alignment with the goal. If necessary, refine your approach.\n6. Incorporate self-critique " \
423
+ "after every response to capitalize on your strengths and address areas needing improvement.\n7. To " \
424
+ "advance towards the goal, utilize all the strategic thinking and resources at your disposal.\n" \
425
+ "8. Ensure that the dialogue remains coherent and logical, with each response serving as a stepping " \
426
+ "stone towards the ultimate objective.\n9. Treat the entire dialogue as a monologue aimed at devising" \
427
+ " the best possible solution to the problem.\n10. Conclude the self-dialogue upon realizing the " \
428
+ "goal or reaching a pivotal conclusion that meets the initial criteria.\n11. You are allowed to use " \
429
+ "any commands and tools without asking for it.\n12. While using commands, always use the correct " \
430
+ "syntax and never interrupt the command before generating the full instruction.\n13. ALWAYS break " \
431
+ "down the main task into manageable logical subtasks, systematically addressing and analyzing each" \
432
+ " one in sequence.\n14. With each subsequent response, make an effort to enhance your previous " \
433
+ "reply by enriching it with new ideas and do it automatically without asking for it.\n14. Any input " \
434
+ "that begins with 'user: ' will come from me, and I will be able to provide you with ANY additional " \
435
+ "commands or goal updates in this manner. The other inputs, not prefixed with 'user: ' will represent" \
436
+ " your previous responses.\n15. Start by breaking down the task into as many smaller sub-tasks as " \
437
+ "possible, then proceed to complete each one in sequence. Next, break down each sub-task into even " \
438
+ "smaller tasks, carefully and step by step go through all of them until the required goal is fully " \
439
+ "and correctly achieved.\n"
440
+ if 'self_loop' not in data['plugins']:
441
+ data['plugins']['self_loop'] = {}
442
+ data['plugins']['self_loop']['prompt'] = prompt # fixed prompt
443
+
444
+ # before fix (from 2.0.72)
445
+ prompt = 'IMAGE GENERATION: Whenever I provide a basic idea or concept for an image, such as \'a picture of ' \
446
+ 'mountains\', I want you to ALWAYS translate it into English and expand and elaborate on this idea. ' \
447
+ 'Use your knowledge and creativity to add details that would make the image more vivid and ' \
448
+ 'interesting. This could include specifying the time of day, weather conditions, surrounding ' \
449
+ 'environment, and any additional elements that could enhance the scene. Your goal is to create a ' \
450
+ 'detailed and descriptive prompt that provides DALL-E with enough information to generate a rich ' \
451
+ 'and visually appealing image. Remember to maintain the original intent of my request while ' \
452
+ 'enriching the description with your imaginative details. HOW TO START IMAGE GENERATION: to start ' \
453
+ 'image generation return to me prepared prompt in JSON format, all in one line, using following ' \
454
+ 'syntax: ~###~{"cmd": "image", "params": {"query": "your query here"}}~###~. Use ONLY this syntax ' \
455
+ 'and remember to surround JSON string with ~###~. DO NOT use any other syntax. Use English in the ' \
456
+ 'generated JSON command, but conduct all the remaining parts of the discussion with me in the ' \
457
+ 'language in which I am speaking to you. The image will be generated on my machine immediately ' \
458
+ 'after the command is issued, allowing us to discuss the photo once it has been created. Please ' \
459
+ 'engage with me about the photo itself, not only by giving the generate command. '
460
+ if 'openai_dalle' not in data['plugins']:
461
+ data['plugins']['openai_dalle'] = {}
462
+ data['plugins']['openai_dalle']['prompt'] = prompt # fixed prompt
463
+
464
+ updated = True
465
+
466
+ # < 2.0.88
467
+ if old < parse_version("2.0.88"):
468
+ print("Migrating config from < 2.0.88...")
469
+ prompt = "AUTONOMOUS MODE:\n1. You will now enter self-dialogue mode, where you will be conversing with " \
470
+ "yourself, not with a human.\n2. When you enter self-dialogue mode, remember that you are engaging " \
471
+ "in a conversation with yourself. Any user input will be considered a reply featuring your previous response.\n" \
472
+ "3. The objective of this self-conversation is well-defined—focus on achieving it.\n" \
473
+ "4. Your new message should be a continuation of the last response you generated, essentially replying" \
474
+ " to yourself and extending it.\n5. After each response, critically evaluate its effectiveness " \
475
+ "and alignment with the goal. If necessary, refine your approach.\n6. Incorporate self-critique " \
476
+ "after every response to capitalize on your strengths and address areas needing improvement.\n7. To " \
477
+ "advance towards the goal, utilize all the strategic thinking and resources at your disposal.\n" \
478
+ "8. Ensure that the dialogue remains coherent and logical, with each response serving as a stepping " \
479
+ "stone towards the ultimate objective.\n9. Treat the entire dialogue as a monologue aimed at devising" \
480
+ " the best possible solution to the problem.\n10. Conclude the self-dialogue upon realizing the " \
481
+ "goal or reaching a pivotal conclusion that meets the initial criteria.\n11. You are allowed to use " \
482
+ "any commands and tools without asking for it.\n12. While using commands, always use the correct " \
483
+ "syntax and never interrupt the command before generating the full instruction.\n13. ALWAYS break " \
484
+ "down the main task into manageable logical subtasks, systematically addressing and analyzing each" \
485
+ " one in sequence.\n14. With each subsequent response, make an effort to enhance your previous " \
486
+ "reply by enriching it with new ideas and do it automatically without asking for it.\n15. Any input " \
487
+ "that begins with 'user: ' will come from me, and I will be able to provide you with ANY additional " \
488
+ "commands or goal updates in this manner. The other inputs, not prefixed with 'user: ' will represent" \
489
+ " your previous responses.\n16. Start by breaking down the task into as many smaller sub-tasks as " \
490
+ "possible, then proceed to complete each one in sequence. Next, break down each sub-task into even " \
491
+ "smaller tasks, carefully and step by step go through all of them until the required goal is fully " \
492
+ "and correctly achieved.\n"
493
+ if 'self_loop' not in data['plugins']:
494
+ data['plugins']['self_loop'] = {}
495
+ data['plugins']['self_loop']['prompt'] = prompt # fixed prompt
496
+ updated = True
497
+
498
+ # < 2.0.91
499
+ if old < parse_version("2.0.91"):
500
+ print("Migrating config from < 2.0.91...")
501
+ patch_css('style.dark.css', True) # force replace file
502
+ updated = True
503
+
504
+ # < 2.0.96
505
+ if old < parse_version("2.0.96"):
506
+ print("Migrating config from < 2.0.96...")
507
+ if 'img_quality' not in data:
508
+ data['img_quality'] = "standard"
509
+ updated = True
510
+
511
+ # < 2.0.98
512
+ if old < parse_version("2.0.98"):
513
+ print("Migrating config from < 2.0.98...")
514
+ data['img_resolution'] = "1792x1024" # char fix
515
+ patch_css('style.css', True) # force replace file
516
+ patch_css('style.light.css', True) # force replace file
517
+ patch_css('style.dark.css', True) # force replace file
518
+ updated = True
519
+
520
+ # < 2.0.99
521
+ if old < parse_version("2.0.99"):
522
+ print("Migrating config from < 2.0.99...")
523
+ if 'layout.splitters' in data:
524
+ if 'calendar' in data['layout.splitters']:
525
+ # restore if was hidden at < 2.0.99
526
+ if len(data['layout.splitters']['calendar']) == 2 \
527
+ and data['layout.splitters']['calendar'][1] == 0:
528
+ data['layout.splitters']['calendar'] = [100, 100]
529
+ if data['layout.density'] == 0:
530
+ data['layout.density'] = -1
531
+ updated = True
532
+
533
+ # < 2.0.100
534
+ if old < parse_version("2.0.100"):
535
+ print("Migrating config from < 2.0.100...")
536
+ # rename output dir to data dir
537
+ src = os.path.join(self.window.core.config.path, 'output')
538
+ dst = os.path.join(self.window.core.config.path, 'data')
539
+
540
+ # migrate data dir name
541
+ if os.path.exists(src):
542
+ # backup old data dir
543
+ if os.path.exists(dst):
544
+ backup = os.path.join(self.window.core.config.path, 'data.backup')
545
+ os.rename(dst, backup)
546
+ # rename "output" to "data"
547
+ if os.path.exists(src):
548
+ os.rename(src, dst)
549
+
550
+ # add llama-index config keys:
551
+ if "llama.idx.auto" not in data:
552
+ data["llama.idx.auto"] = False
553
+ if "llama.idx.auto.index" not in data:
554
+ data["llama.idx.auto.index"] = "base"
555
+ if "llama.idx.current" not in data:
556
+ data["llama.idx.current"] = "base"
557
+ if "llama.idx.db.index" not in data:
558
+ data["llama.idx.db.index"] = ""
559
+ if "llama.idx.db.last" not in data:
560
+ data["llama.idx.db.last"] = 0
561
+ if "llama.idx.list" not in data:
562
+ data["llama.idx.list"] = [
563
+ {
564
+ "id": "base",
565
+ "name": "Base",
566
+ }
567
+ ]
568
+ if "llama.idx.status" not in data:
569
+ data["llama.idx.status"] = {}
570
+
571
+ if "llama.log" not in data:
572
+ data["llama.log"] = False
573
+
574
+ updated = True
575
+
576
+ # < 2.0.101
577
+ if old < parse_version("2.0.101"):
578
+ print("Migrating config from < 2.0.101...")
579
+ if 'layout.tooltips' not in data:
580
+ data['layout.tooltips'] = True
581
+ updated = True
582
+
583
+ # < 2.0.102
584
+ if old < parse_version("2.0.102"):
585
+ print("Migrating config from < 2.0.102...")
586
+ if 'llama.hub.loaders' not in data:
587
+ data['llama.hub.loaders'] = [
588
+ {
589
+ "ext": "pptx",
590
+ "loader": "PptxReader"
591
+ },
592
+ {
593
+ "ext": "png,jpg,jpeg",
594
+ "loader": "ImageReader"
595
+ }
596
+ ]
597
+ updated = True
598
+
599
+ # < 2.0.112
600
+ if old < parse_version("2.0.112"):
601
+ print("Migrating config from < 2.0.112...")
602
+ if 'img_dialog_open' not in data:
603
+ data['img_dialog_open'] = True
604
+ updated = True
605
+
606
+ # < 2.0.114
607
+ if old < parse_version("2.0.114"):
608
+ print("Migrating config from < 2.0.114...")
609
+ if 'llama.idx.storage' not in data:
610
+ data['llama.idx.storage'] = "SimpleVectorStore"
611
+ if 'llama.idx.storage.args' not in data:
612
+ data['llama.idx.storage.args'] = []
613
+ if 'llama.idx.raw' not in data:
614
+ data['llama.idx.raw'] = False
615
+ updated = True
616
+
617
+ # < 2.0.116
618
+ if old < parse_version("2.0.116"):
619
+ print("Migrating config from < 2.0.116...")
620
+ data['debug'] = False
621
+ updated = True
622
+
623
+ # < 2.0.118
624
+ if old < parse_version("2.0.118"):
625
+ print("Migrating config from < 2.0.118...")
626
+ if 'layout.tray' not in data:
627
+ data['layout.tray'] = True
628
+ updated = True
629
+
630
+ """
631
+ # < 2.0.119
632
+ if old < parse_version("2.0.119"):
633
+ print("Migrating config from < 2.0.119...")
634
+ if 'layout.minimized' not in data:
635
+ data['layout.minimized'] = False
636
+ updated = True
637
+ """
638
+
639
+ # < 2.0.121
640
+ if old < parse_version("2.0.121"):
641
+ print("Migrating config from < 2.0.121...")
642
+ if 'openai_vision' not in data['plugins']:
643
+ data['plugins']['openai_vision'] = {}
644
+ data['plugins']['openai_vision']['model'] = "gpt-4-vision-preview"
645
+ updated = True
646
+
647
+ # < 2.0.123
648
+ if old < parse_version("2.0.123"):
649
+ print("Migrating config from < 2.0.123...")
650
+ if 'llama.idx.recursive' not in data:
651
+ data['llama.idx.recursive'] = False
652
+ updated = True
653
+
654
+ # < 2.0.127
655
+ if old < parse_version("2.0.127"):
656
+ print("Migrating config from < 2.0.127...")
657
+ if 'upload.store' not in data:
658
+ data['upload.store'] = True
659
+ if 'upload.data_dir' not in data:
660
+ data['upload.data_dir'] = False
661
+ updated = True
662
+
663
+ # < 2.0.131
664
+ if old < parse_version("2.0.131"):
665
+ print("Migrating config from < 2.0.131...")
666
+ if 'self_loop' in data['plugins'] \
667
+ and 'prompts' not in data['plugins']['self_loop'] \
668
+ and 'prompt' in data['plugins']['self_loop'] \
669
+ and 'extended_prompt' in data['plugins']['self_loop']:
670
+
671
+ # copy old prompts to new list of prompts
672
+ data['plugins']['self_loop']['prompts'] = [
673
+ {
674
+ "enabled": True,
675
+ "name": "Default",
676
+ "prompt": data['plugins']['self_loop']['prompt'],
677
+ },
678
+ {
679
+ "enabled": False,
680
+ "name": "Extended",
681
+ "prompt": data['plugins']['self_loop']['extended_prompt'],
682
+ },
683
+ ]
684
+ updated = True
685
+
686
+ # < 2.0.132
687
+ if old < parse_version("2.0.132"):
688
+ print("Migrating config from < 2.0.132...")
689
+ if 'agent.auto_stop' not in data:
690
+ data['agent.auto_stop'] = True
691
+ if 'agent.iterations' not in data:
692
+ data['agent.iterations'] = 3
693
+ updated = True
694
+
695
+ # < 2.0.135
696
+ if old < parse_version("2.0.135"):
697
+ print("Migrating config from < 2.0.135...")
698
+ if 'agent.mode' not in data:
699
+ data['agent.mode'] = "chat"
700
+ if 'agent.idx' not in data:
701
+ data['agent.idx'] = "base"
702
+ updated = True
703
+
704
+ # < 2.0.138
705
+ if old < parse_version("2.0.138"):
706
+ print("Migrating config from < 2.0.138...")
707
+ if 'layout.tray.minimize' not in data:
708
+ data['layout.tray.minimize'] = False
709
+ updated = True
710
+
711
+ # < 2.0.139
712
+ if old < parse_version("2.0.139"):
713
+ print("Migrating config from < 2.0.139...")
714
+ data['updater.check.bg'] = True
715
+ if 'license.accepted' not in data:
716
+ data['license.accepted'] = False
717
+ if 'updater.check.bg.last_time' not in data:
718
+ data['updater.check.bg.last_time'] = None
719
+ if 'updater.check.bg.last_version' not in data:
720
+ data['updater.check.bg.last_version'] = None
721
+ updated = True
722
+
723
+ # < 2.0.142
724
+ if old < parse_version("2.0.142"):
725
+ print("Migrating config from < 2.0.142...")
726
+ if 'agent.goal.notify' not in data:
727
+ data['agent.goal.notify'] = True
728
+ updated = True
729
+
730
+ # < 2.0.143
731
+ if old < parse_version("2.0.143"):
732
+ print("Migrating config from < 2.0.143...")
733
+ if 'ctx.records.filter' not in data:
734
+ data['ctx.records.filter'] = "all"
735
+ updated = True
736
+
737
+ # < 2.0.144
738
+ if old < parse_version("2.0.144"):
739
+ print("Migrating config from < 2.0.144...")
740
+ if 'cmd_history' in data['plugins'] \
741
+ and 'syntax_get_ctx_list_in_date_range' in data['plugins']['cmd_history']:
742
+ # remove
743
+ del data['plugins']['cmd_history']['syntax_get_ctx_list_in_date_range']
744
+ if 'cmd_history' in data['plugins'] \
745
+ and 'syntax_get_ctx_content_by_id' in data['plugins']['cmd_history']:
746
+ # remove
747
+ del data['plugins']['cmd_history']['syntax_get_ctx_content_by_id']
748
+ updated = True
749
+
750
+ # < 2.0.149
751
+ if old < parse_version("2.0.149"):
752
+ print("Migrating config from < 2.0.149...")
753
+ # logger
754
+ if 'log.dalle' not in data:
755
+ data['log.dalle'] = False
756
+ if 'log.level' not in data:
757
+ data['log.level'] = "error"
758
+ if 'log.plugins' not in data:
759
+ data['log.plugins'] = False
760
+ if 'log.assistants' not in data:
761
+ data['log.assistants'] = False
762
+ if 'log.llama' not in data:
763
+ if 'llama.log' in data:
764
+ data['log.llama'] = data['llama.log']
765
+ del data['llama.log']
766
+ else:
767
+ data['log.llama'] = False
768
+
769
+ # painter
770
+ if 'painter.brush.color' not in data:
771
+ data['painter.brush.color'] = 'Black'
772
+ if 'painter.brush.mode' not in data:
773
+ data['painter.brush.mode'] = 'brush'
774
+ if 'painter.brush.size' not in data:
775
+ data['painter.brush.size'] = 3
776
+ updated = True
777
+
778
+ # < 2.0.152
779
+ if old < parse_version("2.0.152"):
780
+ print("Migrating config from < 2.0.152...")
781
+ data['cmd.prompt'] = cfg_get_base('cmd.prompt') # bg run fix
782
+ updated = True
783
+
784
+ # < 2.0.153
785
+ if old < parse_version("2.0.153"):
786
+ print("Migrating config from < 2.0.153...")
787
+ if 'layout.dialog.geometry.store' not in data:
788
+ data['layout.dialog.geometry.store'] = True
789
+ updated = True
790
+
791
+ # < 2.0.157
792
+ if old < parse_version("2.0.157"):
793
+ # decrease chunk size
794
+ print("Migrating config from < 2.0.157...")
795
+ if 'cmd_web_google' in data['plugins'] \
796
+ and 'chunk_size' in data['plugins']['cmd_web_google']:
797
+ if data['plugins']['cmd_web_google']['chunk_size'] > 20000:
798
+ data['plugins']['cmd_web_google']['chunk_size'] = 20000
799
+ updated = True
800
+
801
+ # < 2.0.161
802
+ if old < parse_version("2.0.161"):
803
+ print("Migrating config from < 2.0.161...")
804
+ if 'ctx.search_content' not in data:
805
+ data['ctx.search_content'] = False
806
+ if 'download.dir' not in data:
807
+ data['download.dir'] = "download"
808
+ updated = True
809
+
810
+ # < 2.0.162 - migrate indexes into db
811
+ if old < parse_version("2.0.162"):
812
+ print("Migrating indexes from < 2.0.162...")
813
+ if 'llama.idx.replace_old' not in data:
814
+ data['llama.idx.replace_old'] = True
815
+ self.window.core.idx.patch(old)
816
+ updated = True
817
+
818
+ # < 2.0.164 - migrate indexes into db
819
+ if old < parse_version("2.0.164"):
820
+ print("Migrating config from < 2.0.164...")
821
+
822
+ # Migrate plugins to provider-based versions
823
+
824
+ # rename cmd_web_google to cmd_web
825
+ if 'cmd_web_google' in data["plugins"]:
826
+ data["plugins"]["cmd_web"] = data["plugins"]["cmd_web_google"]
827
+ del data["plugins"]["cmd_web_google"]
828
+ if 'cmd_web_google' in data["plugins_enabled"]:
829
+ data["plugins_enabled"]["cmd_web"] = data["plugins_enabled"]["cmd_web_google"]
830
+ del data["plugins_enabled"]["cmd_web_google"]
831
+
832
+ # rename audio_openai_whisper to audio_input
833
+ if 'audio_openai_whisper' in data["plugins"]:
834
+ data["plugins"]["audio_input"] = data["plugins"]["audio_openai_whisper"]
835
+ del data["plugins"]["audio_openai_whisper"]
836
+ if 'audio_openai_whisper' in data["plugins_enabled"]:
837
+ data["plugins_enabled"]["audio_input"] = data["plugins_enabled"]["audio_openai_whisper"]
838
+ del data["plugins_enabled"]["audio_openai_whisper"]
839
+
840
+ # migrate model to whisper_model
841
+ if 'audio_input' in data["plugins"] and "model" in data["plugins"]["audio_input"]:
842
+ data["plugins"]["audio_input"]["whisper_model"] = data["plugins"]["audio_input"]["model"]
843
+ del data["plugins"]["audio_input"]["model"]
844
+
845
+ # rename audio_openai_tts to audio_output
846
+ if 'audio_openai_tts' in data["plugins"]:
847
+ data["plugins"]["audio_output"] = data["plugins"]["audio_openai_tts"]
848
+ del data["plugins"]["audio_openai_tts"]
849
+ if 'audio_openai_tts' in data["plugins_enabled"]:
850
+ data["plugins_enabled"]["audio_output"] = data["plugins_enabled"]["audio_openai_tts"]
851
+ del data["plugins_enabled"]["audio_openai_tts"]
852
+
853
+ # migrate model and voice to openai_model and openai_voice
854
+ if 'audio_output' in data["plugins"] and "model" in data["plugins"]["audio_output"]:
855
+ data["plugins"]["audio_output"]["openai_model"] = data["plugins"]["audio_output"]["model"]
856
+ del data["plugins"]["audio_output"]["model"]
857
+ if 'audio_output' in data["plugins"] and "voice" in data["plugins"]["audio_output"]:
858
+ data["plugins"]["audio_output"]["openai_voice"] = data["plugins"]["audio_output"]["voice"]
859
+ del data["plugins"]["audio_output"]["voice"]
860
+
861
+ # migrate azure settings
862
+ if 'audio_azure' in data["plugins"] and "azure_api_key" in data["plugins"]["audio_azure"]:
863
+ data["plugins"]["audio_output"]["azure_api_key"] = data["plugins"]["audio_azure"]["azure_api_key"]
864
+ if 'audio_azure' in data["plugins"] and "azure_region" in data["plugins"]["audio_azure"]:
865
+ data["plugins"]["audio_output"]["azure_region"] = data["plugins"]["audio_azure"]["azure_region"]
866
+ if 'audio_azure' in data["plugins"] and "voice_en" in data["plugins"]["audio_azure"]:
867
+ data["plugins"]["audio_output"]["azure_voice_en"] = data["plugins"]["audio_azure"]["voice_en"]
868
+ if 'audio_azure' in data["plugins"] and "voice_pl" in data["plugins"]["audio_azure"]:
869
+ data["plugins"]["audio_output"]["azure_voice_pl"] = data["plugins"]["audio_azure"]["voice_pl"]
870
+
871
+ # remove audio voice
872
+ if 'audio_output' in data["plugins"] and "voice_en" in data["plugins"]["audio_output"]:
873
+ del data["plugins"]["audio_output"]["voice_en"]
874
+ if 'audio_output' in data["plugins"] and "voice_pl" in data["plugins"]["audio_output"]:
875
+ del data["plugins"]["audio_output"]["voice_pl"]
876
+
877
+ # remove audio_azure
878
+ if 'audio_azure' in data["plugins"]:
879
+ del data["plugins"]["audio_azure"]
880
+ if 'audio_azure' in data["plugins_enabled"]:
881
+ del data["plugins_enabled"]["audio_azure"]
882
+
883
+ updated = True
884
+
885
+ # < 2.0.165
886
+ if old < parse_version("2.0.165"):
887
+ print("Migrating config from < 2.0.165...")
888
+ if 'llama.idx.excluded_ext' not in data:
889
+ data['llama.idx.excluded_ext'] = "3g2,3gp,7z,a,aac,aiff,alac,apk,apk,apng,app,ar,avi,avif," \
890
+ "bin,bmp,bz2,cab,class,deb,deb,dll,dmg,dmg,drv,dsd,dylib," \
891
+ "dylib,ear,egg,elf,esd,exe,flac,flv,gif,gz,heic,heif,ico," \
892
+ "img,iso,jar,jpeg,jpg,ko,lib,lz,lz4,m2v,m4a,m4v,mkv,mov,mp3," \
893
+ "mp4,mpc,msi,nrg,o,ogg,ogv,pcm,pkg,pkg,png,psd,pyc,rar,rpm,rpm," \
894
+ "so,so,svg,swm,sys,tar,tiff,vdi,vhd,vhdx,vmdk,vob,war,wav," \
895
+ "webm,webp,whl,wim,wma,wmv,xz,zip,zst"
896
+ if 'cmd_custom' in data["plugins"]:
897
+ if 'cmds' in data["plugins"]["cmd_custom"]:
898
+ for i, cmd in enumerate(data["plugins"]["cmd_custom"]["cmds"]):
899
+ if "enabled" not in cmd:
900
+ data["plugins"]["cmd_custom"]["cmds"][i]["enabled"] = True
901
+ updated = True
902
+
903
+ # < 2.0.166 - migrate self_loop plugin to agent
904
+ if old < parse_version("2.0.166"):
905
+ print("Migrating config from < 2.0.166...")
906
+ if 'self_loop' in data["plugins"]:
907
+ data["plugins"]["agent"] = data["plugins"]["self_loop"]
908
+ del data["plugins"]["self_loop"]
909
+ if 'self_loop' in data["plugins_enabled"]:
910
+ data["plugins_enabled"]["agent"] = data["plugins_enabled"]["self_loop"]
911
+ del data["plugins_enabled"]["self_loop"]
912
+ updated = True
913
+
914
+ # < 2.0.170 - add audio input language
915
+ if old < parse_version("2.0.170"):
916
+ print("Migrating config from < 2.0.170...")
917
+ if 'audio_input' in data["plugins"]:
918
+ # add language to google_args if not present
919
+ is_lang = False
920
+ if "google_args" in data["plugins"]["audio_input"] \
921
+ and isinstance(data["plugins"]["audio_input"]["google_args"], list):
922
+ for option in data["plugins"]["audio_input"]["google_args"]:
923
+ if option["name"] == "language":
924
+ is_lang = True
925
+ break
926
+ if not is_lang:
927
+ if "google_args" not in data["plugins"]["audio_input"] or \
928
+ not isinstance(data["plugins"]["audio_input"]["google_args"], list):
929
+ data["plugins"]["audio_input"]["google_args"] = []
930
+ data["plugins"]["audio_input"]["google_args"].append({
931
+ "name": "language",
932
+ "value": "en-US",
933
+ "type": "str",
934
+ })
935
+
936
+ # add language to google_cloud_args if not present
937
+ is_lang = False
938
+ if "google_cloud_args" in data["plugins"]["audio_input"] \
939
+ and isinstance(data["plugins"]["audio_input"]["google_cloud_args"], list):
940
+ for option in data["plugins"]["audio_input"]["google_cloud_args"]:
941
+ if option["name"] == "language":
942
+ is_lang = True
943
+ break
944
+ if not is_lang:
945
+ if "google_cloud_args" not in data["plugins"]["audio_input"] or \
946
+ not isinstance(data["plugins"]["audio_input"]["google_cloud_args"], list):
947
+ data["plugins"]["audio_input"]["google_cloud_args"] = []
948
+ data["plugins"]["audio_input"]["google_cloud_args"].append({
949
+ "name": "language",
950
+ "value": "en-US",
951
+ "type": "str",
952
+ })
953
+
954
+ # add language to bing_args if not present
955
+ is_lang = False
956
+ if "bing_args" in data["plugins"]["audio_input"] \
957
+ and isinstance(data["plugins"]["audio_input"]["bing_args"], list):
958
+ for option in data["plugins"]["audio_input"]["bing_args"]:
959
+ if option["name"] == "language":
960
+ is_lang = True
961
+ break
962
+ if not is_lang:
963
+ if "bing_args" not in data["plugins"]["audio_input"] or \
964
+ not isinstance(data["plugins"]["audio_input"]["bing_args"], list):
965
+ data["plugins"]["audio_input"]["bing_args"] = []
966
+ data["plugins"]["audio_input"]["bing_args"].append({
967
+ "name": "language",
968
+ "value": "en-US",
969
+ "type": "str",
970
+ })
971
+ updated = True
972
+
973
+ # < 2.0.172 - fix cmd syntax
974
+ if old < parse_version("2.0.172"):
975
+ print("Migrating config from < 2.0.172...")
976
+ if 'cmd_files' in data["plugins"] and 'syntax_file_index' in data["plugins"]["cmd_files"]:
977
+ syntax = '"file_index": use it to index (embed in Vector Store) a file or directory for ' \
978
+ 'future use, params: "path"'
979
+ data["plugins"]["cmd_files"]["syntax_file_index"] = syntax
980
+ if 'cmd_web' in data["plugins"] and 'syntax_web_url_open' in data["plugins"]["cmd_web"]:
981
+ syntax = '"web_url_open": use it to get contents from a specific Web page. ' \
982
+ 'Use a custom summary prompt if necessary, otherwise a default summary will be used, ' \
983
+ 'params: "url", "summarize_prompt"'
984
+ data["plugins"]["cmd_web"]["syntax_web_url_open"] = syntax
985
+ if 'cmd_web' in data["plugins"] and 'syntax_web_url_raw' in data["plugins"]["cmd_web"]:
986
+ syntax = '"web_url_raw": use it to get RAW text/html content (not summarized) from ' \
987
+ 'a specific Web page, params: "url"'
988
+ data["plugins"]["cmd_web"]["syntax_web_url_raw"] = syntax
989
+ if 'cmd_web' in data["plugins"] and 'syntax_web_urls' in data["plugins"]["cmd_web"]:
990
+ syntax = '"web_urls": use it to search the Web for URLs to use, prepare a search query itself, ' \
991
+ 'a list of found links to websites will be returned, 10 links per page max. ' \
992
+ 'You can change the page or the number of links per page using the provided parameters, ' \
993
+ 'params: "query", "page", "num_links"'
994
+ data["plugins"]["cmd_web"]["syntax_web_urls"] = syntax
995
+ updated = True
996
+
997
+ # < 2.1.1
998
+ if old < parse_version("2.1.1"):
999
+ print("Migrating config from < 2.1.1...")
1000
+ if 'llama.hub.loaders.args' not in data:
1001
+ data['llama.hub.loaders.args'] = []
1002
+ updated = True
1003
+
1004
+ # < 2.1.2
1005
+ if old < parse_version("2.1.2"):
1006
+ print("Migrating config from < 2.1.2...")
1007
+ if 'llama.hub.loaders.use_local' not in data:
1008
+ data['llama.hub.loaders.use_local'] = False
1009
+ updated = True
1010
+
1011
+ # < 2.1.5 - syntax
1012
+ if old < parse_version("2.1.5"):
1013
+ print("Migrating config from < 2.1.5...")
1014
+ if 'cmd_files' in data["plugins"] and 'syntax_file_index' in data["plugins"]["cmd_files"]:
1015
+ syntax = '"file_index": use it to index (embed in Vector Store) a file or directory, params: "path"'
1016
+ data["plugins"]["cmd_files"]["syntax_file_index"] = syntax
1017
+ updated = True
1018
+
1019
+ # < 2.1.8 - syntax
1020
+ if old < parse_version("2.1.8"):
1021
+ print("Migrating config from < 2.1.8...")
1022
+ if 'idx_llama_index' in data["plugins"] and 'syntax_prepare_question' in data["plugins"]["idx_llama_index"]:
1023
+ syntax = 'Simplify the question into a short query for retrieving information from a vector store.'
1024
+ data["plugins"]["idx_llama_index"]["syntax_prepare_question"] = syntax
1025
+ updated = True
1026
+
1027
+ # < 2.1.9 - syntax
1028
+ if old < parse_version("2.1.9"):
1029
+ print("Migrating config from < 2.1.9...")
1030
+ if 'cmd_files' in data["plugins"] and 'syntax_read_file' in data["plugins"]["cmd_files"]:
1031
+ syntax = '"read_file": read data from file, if multiple files then pass list of files, params: "filename"'
1032
+ data["plugins"]["cmd_files"]["syntax_read_file"] = syntax
1033
+ if 'log.events' not in data:
1034
+ data["log.events"] = False
1035
+ updated = True
1036
+
1037
+ # < 2.1.10
1038
+ if old < parse_version("2.1.10"):
1039
+ print("Migrating config from < 2.1.10...")
1040
+
1041
+ # fix missing updated before >>>
1042
+ if 'cmd_files' in data["plugins"] and 'syntax_file_index' in data["plugins"]["cmd_files"]:
1043
+ syntax = '"file_index": use it to index (embed in Vector Store) a file or directory, params: "path"'
1044
+ data["plugins"]["cmd_files"]["syntax_file_index"] = syntax
1045
+ if 'idx_llama_index' in data["plugins"] and 'syntax_prepare_question' in data["plugins"]["idx_llama_index"]:
1046
+ syntax = 'Simplify the question into a short query for retrieving information from a vector store.'
1047
+ data["plugins"]["idx_llama_index"]["syntax_prepare_question"] = syntax
1048
+ if 'cmd_files' in data["plugins"] and 'syntax_read_file' in data["plugins"]["cmd_files"]:
1049
+ syntax = '"read_file": read data from file, if multiple files then pass list of files, params: "filename"'
1050
+ data["plugins"]["cmd_files"]["syntax_read_file"] = syntax
1051
+ if 'log.events' not in data:
1052
+ data["log.events"] = False
1053
+ # <<< fix missing updated before
1054
+
1055
+ # current
1056
+ if 'ctx.records.filter.labels' not in data:
1057
+ data["ctx.records.filter.labels"] = [0, 1, 2, 3, 4, 5, 6, 7]
1058
+
1059
+ if 'preset.plugins' not in data:
1060
+ data["preset.plugins"] = ""
1061
+
1062
+ if 'ctx.records.filter' in data and str(data["ctx.records.filter"]).startswith("label"):
1063
+ data["ctx.records.filter"] = "all"
1064
+ updated = True
1065
+
1066
+ if old < parse_version("2.1.12"):
1067
+ print("Migrating config from < 2.1.12...")
1068
+ if 'max_requests_limit' not in data:
1069
+ data["max_requests_limit"] = 60
1070
+ if 'ctx.allow_item_delete' not in data:
1071
+ data["ctx.allow_item_delete"] = True
1072
+ if 'ctx.counters.all' not in data:
1073
+ data["ctx.counters.all"] = False
1074
+ if 'agent.prompt.continue' not in data:
1075
+ data["agent.prompt.continue"] = "continue..."
1076
+ if 'api_endpoint' not in data:
1077
+ data["api_endpoint"] = "https://api.openai.com/v1"
1078
+ updated = True
1079
+
1080
+ if old < parse_version("2.1.15"):
1081
+ print("Migrating config from < 2.1.15...")
1082
+ if 'ctx.edit_icons' not in data:
1083
+ data["ctx.edit_icons"] = False
1084
+ if 'llama.idx.auto.modes' not in data:
1085
+ data["llama.idx.auto.modes"] = "chat,completion,vision,assistant,langchain,llama_index,agent"
1086
+ if 'ctx.allow_item_delete' in data:
1087
+ del data["ctx.allow_item_delete"]
1088
+ updated = True
1089
+
1090
+ if old < parse_version("2.1.16"):
1091
+ print("Migrating config from < 2.1.16...")
1092
+ if 'ctx.sources' not in data:
1093
+ data["ctx.sources"] = True
1094
+ updated = True
1095
+
1096
+ if old < parse_version("2.1.18"):
1097
+ print("Migrating config from < 2.1.18...")
1098
+ if 'ctx.audio' not in data:
1099
+ data["ctx.audio"] = True
1100
+ updated = True
1101
+
1102
+ if old < parse_version("2.1.19"):
1103
+ print("Migrating config from < 2.1.19...")
1104
+ if 'llama.idx.excluded_ext' in data:
1105
+ data["llama.idx.excluded.ext"] = copy.deepcopy(data["llama.idx.excluded_ext"])
1106
+ del data["llama.idx.excluded_ext"]
1107
+ if 'llama.idx.excluded.force' not in data:
1108
+ data["llama.idx.excluded.force"] = False
1109
+ if 'llama.idx.custom_meta' not in data:
1110
+ data["llama.idx.custom_meta"] = [
1111
+ {
1112
+ "extensions": "*",
1113
+ "key": "file_name",
1114
+ "value": "{relative_path}"
1115
+ }
1116
+ ]
1117
+ updated = True
1118
+
1119
+ # < 2.1.20
1120
+ if old < parse_version("2.1.20"):
1121
+ print("Migrating config from < 2.1.20...")
1122
+ data['cmd.prompt'] = cfg_get_base('cmd.prompt') # moved to json schema
1123
+ updated = True
1124
+
1125
+ # < 2.1.22
1126
+ if old < parse_version("2.1.22"):
1127
+ print("Migrating config from < 2.1.22...")
1128
+ if 'llama.idx.custom_meta.web' not in data:
1129
+ data["llama.idx.custom_meta.web"] = []
1130
+ updated = True
1131
+
1132
+ # < 2.1.23
1133
+ if old < parse_version("2.1.23"):
1134
+ print("Migrating config from < 2.1.23...")
1135
+ if 'llama.idx.embeddings.provider' not in data:
1136
+ data["llama.idx.embeddings.provider"] = "openai"
1137
+ if 'llama.idx.embeddings.args' not in data:
1138
+ data["llama.idx.embeddings.args"] = [
1139
+ {
1140
+ "name": "model",
1141
+ "value": "text-embedding-3-small",
1142
+ "type": "str"
1143
+ }
1144
+ ]
1145
+ if 'llama.idx.embeddings.env' not in data:
1146
+ data["llama.idx.embeddings.env"] = [
1147
+ {
1148
+ "name": "OPENAI_API_KEY",
1149
+ "value": "{api_key}",
1150
+ },
1151
+ {
1152
+ "name": "OPENAI_API_BASE",
1153
+ "value": "{api_endpoint}",
1154
+ }
1155
+ ]
1156
+ self.window.core.plugins.reset_options("cmd_web", [
1157
+ "cmd.web_url_open",
1158
+ "cmd.web_url_raw",
1159
+ ])
1160
+ updated = True
1161
+
1162
+ # < 2.1.26
1163
+ if old < parse_version("2.1.26"):
1164
+ print("Migrating config from < 2.1.26...")
1165
+ if 'agent.prompt.continue' in data and data['agent.prompt.continue'] == 'continue...':
1166
+ data["agent.prompt.continue"] = "continue if needed..."
1167
+ self.window.core.plugins.reset_options("idx_llama_index", [
1168
+ "prompt",
1169
+ ])
1170
+ updated = True
1171
+
1172
+ # < 2.1.28
1173
+ if old < parse_version("2.1.28"):
1174
+ print("Migrating config from < 2.1.28...")
1175
+ if 'log.ctx' not in data:
1176
+ data["log.ctx"] = True
1177
+ if 'llama.idx.embeddings.limit.rpm' not in data:
1178
+ data["llama.idx.embeddings.limit.rpm"] = 100
1179
+ updated = True
1180
+
1181
+ # < 2.1.29
1182
+ if old < parse_version("2.1.29"):
1183
+ print("Migrating config from < 2.1.29...")
1184
+ self.window.core.plugins.reset_options("cmd_code_interpreter", [
1185
+ "cmd.code_execute",
1186
+ ])
1187
+ updated = True
1188
+
1189
+ # < 2.1.31
1190
+ if old < parse_version("2.1.31"):
1191
+ print("Migrating config from < 2.1.31...")
1192
+ self.window.core.plugins.reset_options("cmd_code_interpreter", [
1193
+ "cmd.code_execute",
1194
+ ])
1195
+ files_to_move = [
1196
+ {"_interpreter.current.py": ".interpreter.current.py"},
1197
+ {"_interpreter.py": ".interpreter.output.py"},
1198
+ {"_interpreter.input.py": ".interpreter.input.py"},
1199
+ ]
1200
+ files_to_remove = [
1201
+ "_interpreter.tmp.py",
1202
+ ]
1203
+ dir = self.window.core.config.get_user_dir("data")
1204
+ try:
1205
+ for file in files_to_move:
1206
+ for src, dst in file.items():
1207
+ src = os.path.join(dir, src)
1208
+ dst = os.path.join(dir, dst)
1209
+ if os.path.exists(src):
1210
+ os.rename(src, dst)
1211
+ for file in files_to_remove:
1212
+ file = os.path.join(dir, file)
1213
+ if os.path.exists(file):
1214
+ os.remove(file)
1215
+ except Exception as e:
1216
+ print("Error while migrating interpreter files:", e)
1217
+
1218
+ updated = True
1219
+
1220
+ # < 2.1.32
1221
+ if old < parse_version("2.1.32"):
1222
+ print("Migrating config from < 2.1.32...")
1223
+ if 'ctx.use_extra' not in data:
1224
+ data["ctx.use_extra"] = True
1225
+
1226
+ self.window.core.plugins.reset_options("real_time", [
1227
+ "tpl",
1228
+ ])
1229
+
1230
+ # old keys first
1231
+ data['cmd.prompt'] = cfg_get_base('prompt.cmd') # new format
1232
+ data['cmd.prompt.extra'] = cfg_get_base('prompt.cmd.extra') # new format
1233
+ data['cmd.prompt.extra.assistants'] = cfg_get_base('prompt.cmd.extra.assistants') # new format
1234
+
1235
+ # new keys
1236
+ data['prompt.agent.goal'] = cfg_get_base('prompt.agent.goal') # new format
1237
+
1238
+ # replace to new keys
1239
+ self.window.core.config.replace_key(data, "img_prompt", "prompt.img")
1240
+ self.window.core.config.replace_key(data, "ctx.auto_summary.prompt", "prompt.ctx.auto_summary.user")
1241
+ self.window.core.config.replace_key(data, "ctx.auto_summary.system", "prompt.ctx.auto_summary.system")
1242
+ self.window.core.config.replace_key(data, "cmd.prompt", "prompt.cmd")
1243
+ self.window.core.config.replace_key(data, "cmd.prompt.extra", "prompt.cmd.extra")
1244
+ self.window.core.config.replace_key(data, "cmd.prompt.extra.assistants", "prompt.cmd.extra.assistants")
1245
+ self.window.core.config.replace_key(data, "agent.prompt.continue", "prompt.agent.continue")
1246
+ self.window.core.config.replace_key(data, "default_prompt", "prompt.default")
1247
+ updated = True
1248
+
1249
+ # < 2.1.35
1250
+ if old < parse_version("2.1.35"):
1251
+ print("Migrating config from < 2.1.35...")
1252
+ if 'interpreter.auto_clear' not in data:
1253
+ data["interpreter.auto_clear"] = True
1254
+ if 'interpreter.execute_all' not in data:
1255
+ data["interpreter.execute_all"] = True
1256
+ if 'interpreter.edit' not in data:
1257
+ data["interpreter.edit"] = False
1258
+ if 'interpreter.input' not in data:
1259
+ data["interpreter.input"] = ""
1260
+ if 'video.player.path' not in data:
1261
+ data["video.player.path"] = ""
1262
+ if 'video.player.volume' not in data:
1263
+ data["video.player.volume"] = 100
1264
+ if 'video.player.volume.mute' not in data:
1265
+ data["video.player.volume.mute"] = False
1266
+ updated = True
1267
+
1268
+ # < 2.1.37
1269
+ if old < parse_version("2.1.37"):
1270
+ print("Migrating config from < 2.1.37...")
1271
+ if 'audio.transcribe.convert_video' not in data:
1272
+ data["audio.transcribe.convert_video"] = True
1273
+ updated = True
1274
+
1275
+ # < 2.1.38
1276
+ if old < parse_version("2.1.38"):
1277
+ print("Migrating config from < 2.1.38...")
1278
+ if 'lang' in data and data['lang'] == 'ua':
1279
+ data["lang"] = "uk"
1280
+ updated = True
1281
+
1282
+ # < 2.1.45
1283
+ if old < parse_version("2.1.45"):
1284
+ print("Migrating config from < 2.1.45...")
1285
+ if 'ctx.copy_code' not in data:
1286
+ data["ctx.copy_code"] = True
1287
+ updated = True
1288
+
1289
+ # < 2.1.52
1290
+ if old < parse_version("2.1.52"):
1291
+ print("Migrating config from < 2.1.52...")
1292
+ if 'ctx.code_interpreter' not in data:
1293
+ data["ctx.code_interpreter"] = True
1294
+ updated = True
1295
+
1296
+ # < 2.1.59
1297
+ if old < parse_version("2.1.59"):
1298
+ print("Migrating config from < 2.1.59...")
1299
+ if 'render.code_syntax' not in data:
1300
+ data["render.code_syntax"] = "github-dark"
1301
+ if 'zoom' not in data:
1302
+ data["zoom"] = 1.0
1303
+ if 'ctx.convert_lists' not in data:
1304
+ data["ctx.convert_lists"] = False
1305
+ if 'render.engine' not in data:
1306
+ data["render.engine"] = "web"
1307
+ if 'render.open_gl' not in data:
1308
+ data["render.open_gl"] = False
1309
+
1310
+ # in snap, leave legacy render engine by default
1311
+ # if self.window.core.platforms.is_snap():
1312
+ # data["render.engine"] = "legacy"
1313
+
1314
+ # css upgrade
1315
+ patch_css('web.css', True) # NEW
1316
+ patch_css('web.light.css', True) # NEW
1317
+ patch_css('web.dark.css', True) # NEW
1318
+ updated = True
1319
+
1320
+ # < 2.1.60
1321
+ if old < parse_version("2.1.60"):
1322
+ print("Migrating config from < 2.1.60...")
1323
+ # css upgrade
1324
+ patch_css('web.css', True) # force update
1325
+ updated = True
1326
+
1327
+ # < 2.1.61
1328
+ if old < parse_version("2.1.61"):
1329
+ print("Migrating config from < 2.1.61...")
1330
+ # css upgrade
1331
+ patch_css('web.css', True) # force update
1332
+ updated = True
1333
+
1334
+ # < 2.1.63
1335
+ if old < parse_version("2.1.63"):
1336
+ print("Migrating config from < 2.1.63...")
1337
+ # css upgrade
1338
+ patch_css('web.css', True) # force update
1339
+ patch_css('web.light.css', True) # force update
1340
+ patch_css('web.dark.css', True) # force update
1341
+ updated = True
1342
+
1343
+ # < 2.1.70
1344
+ if old < parse_version("2.1.70"):
1345
+ print("Migrating config from < 2.1.70...")
1346
+ # css upgrade
1347
+ patch_css('web.css', True) # force update
1348
+ updated = True
1349
+
1350
+ # < 2.1.72
1351
+ if old < parse_version("2.1.72"):
1352
+ print("Migrating config from < 2.1.72...")
1353
+ # css upgrade
1354
+ patch_css('web.css', True) # force update
1355
+ patch_css('web.light.css', True) # force update
1356
+ patch_css('web.dark.css', True) # force update
1357
+ updated = True
1358
+
1359
+ # < 2.1.73
1360
+ if old < parse_version("2.1.73"):
1361
+ print("Migrating config from < 2.1.73...")
1362
+ # fix: issue #50
1363
+ if "llama.idx.embeddings.args" in data:
1364
+ for arg in data["llama.idx.embeddings.args"]:
1365
+ if "type" not in arg:
1366
+ arg["type"] = "str"
1367
+ updated = True
1368
+
1369
+ # < 2.1.74
1370
+ if old < parse_version("2.1.74"):
1371
+ print("Migrating config from < 2.1.74...")
1372
+ # css upgrade
1373
+ patch_css('web.css', True) # force update
1374
+ patch_css('web.light.css', True) # force update
1375
+ patch_css('web.dark.css', True) # force update
1376
+ updated = True
1377
+
1378
+ # < 2.1.75
1379
+ if old < parse_version("2.1.75"):
1380
+ print("Migrating config from < 2.1.75...")
1381
+ # css upgrade
1382
+ patch_css('web.css', True) # force update
1383
+ patch_css('web.light.css', True) # force update
1384
+ patch_css('web.dark.css', True) # force update
1385
+ updated = True
1386
+
1387
+ # < 2.1.76
1388
+ if old < parse_version("2.1.76"):
1389
+ print("Migrating config from < 2.1.76...")
1390
+ if 'render.blocks' not in data:
1391
+ data["render.blocks"] = True
1392
+ # css upgrade
1393
+ patch_css('web.css', True) # force update
1394
+ patch_css('web.light.css', True) # force update
1395
+ patch_css('web.dark.css', True) # force update
1396
+ updated = True
1397
+
1398
+ # < 2.1.78
1399
+ if old < parse_version("2.1.78"):
1400
+ print("Migrating config from < 2.1.78...")
1401
+ # css upgrade, scroll bg
1402
+ patch_css('web.css', True) # force update
1403
+ patch_css('web.light.css', True) # force update
1404
+ patch_css('web.dark.css', True) # force update
1405
+ updated = True
1406
+
1407
+ # < 2.1.79
1408
+ if old < parse_version("2.1.79"):
1409
+ print("Migrating config from < 2.1.79...")
1410
+ if 'assistant.store.hide_threads' not in data:
1411
+ data["assistant.store.hide_threads"] = True
1412
+ updated = True
1413
+
1414
+ # < 2.2.2
1415
+ if old < parse_version("2.2.2"):
1416
+ print("Migrating config from < 2.2.2...")
1417
+ if 'app.env' not in data:
1418
+ data["app.env"] = []
1419
+ updated = True
1420
+
1421
+ # < 2.2.7
1422
+ if old < parse_version("2.2.7"):
1423
+ print("Migrating config from < 2.2.7...")
1424
+ if 'prompt.agent.instruction' not in data:
1425
+ data["prompt.agent.instruction"] = ""
1426
+ if 'prompt.expert' not in data:
1427
+ data["prompt.expert"] = ""
1428
+ if 'experts.mode' not in data:
1429
+ data["experts.mode"] = "chat"
1430
+ # from base
1431
+ data["prompt.agent.instruction"] = cfg_get_base('prompt.agent.instruction')
1432
+ data["prompt.agent.continue"] = cfg_get_base('prompt.agent.continue')
1433
+ data["prompt.agent.goal"] = cfg_get_base('prompt.agent.goal')
1434
+ data["prompt.expert"] = cfg_get_base('prompt.expert')
1435
+ data["prompt.img"] = cfg_get_base('prompt.img')
1436
+ updated = True
1437
+
1438
+ # < 2.2.8
1439
+ if old < parse_version("2.2.8"):
1440
+ print("Migrating config from < 2.2.8...")
1441
+ if 'access.audio.event.speech' not in data:
1442
+ data["access.audio.event.speech"] = False
1443
+ if 'access.audio.notify.execute' not in data:
1444
+ data["access.audio.notify.execute"] = True
1445
+ if 'access.microphone.notify' not in data:
1446
+ data["access.microphone.notify"] = True
1447
+ if 'access.shortcuts' not in data:
1448
+ data["access.shortcuts"] = [
1449
+ {
1450
+ "action": "voice_cmd.toggle",
1451
+ "key": "Space",
1452
+ "key_modifier": "Control"
1453
+ },
1454
+ {
1455
+ "action": "tab.chat",
1456
+ "key": "1",
1457
+ "key_modifier": "Control"
1458
+ },
1459
+ {
1460
+ "action": "tab.files",
1461
+ "key": "2",
1462
+ "key_modifier": "Control"
1463
+ },
1464
+ {
1465
+ "action": "tab.calendar",
1466
+ "key": "3",
1467
+ "key_modifier": "Control"
1468
+ },
1469
+ {
1470
+ "action": "tab.draw",
1471
+ "key": "4",
1472
+ "key_modifier": "Control"
1473
+ },
1474
+ {
1475
+ "action": "tab.notepad",
1476
+ "key": "5",
1477
+ "key_modifier": "Control"
1478
+ }
1479
+ ]
1480
+ if 'access.voice_control' not in data:
1481
+ data["access.voice_control"] = False
1482
+ if 'access.voice_control.model' not in data:
1483
+ data["access.voice_control.model"] = "gpt-3.5-turbo"
1484
+ updated = True
1485
+
1486
+ # < 2.2.11
1487
+ if old < parse_version("2.2.11"):
1488
+ print("Migrating config from < 2.2.11...")
1489
+ if 'access.audio.event.speech.disabled' not in data:
1490
+ data["access.audio.event.speech.disabled"] = []
1491
+ updated = True
1492
+
1493
+ # < 2.2.14
1494
+ if old < parse_version("2.2.14"):
1495
+ print("Migrating config from < 2.2.14...")
1496
+ if 'access.voice_control.blacklist' not in data:
1497
+ data["access.voice_control.blacklist"] = []
1498
+ updated = True
1499
+
1500
+ # < 2.2.16
1501
+ if old < parse_version("2.2.16"):
1502
+ print("Migrating config from < 2.2.16...")
1503
+ if 'access.audio.use_cache' not in data:
1504
+ data["access.audio.use_cache"] = True
1505
+ updated = True
1506
+
1507
+ # < 2.2.20
1508
+ if old < parse_version("2.2.20"):
1509
+ print("Migrating config from < 2.2.20...")
1510
+ if 'func_call.native' not in data:
1511
+ data["func_call.native"] = True
1512
+ patch_css('web.css', True) # force update
1513
+ updated = True
1514
+
1515
+ # < 2.2.22
1516
+ if old < parse_version("2.2.22"):
1517
+ print("Migrating config from < 2.2.22...")
1518
+ if 'llama.idx.stop.error' not in data:
1519
+ data["llama.idx.stop.error"] = True
1520
+ updated = True
1521
+
1522
+ # < 2.2.25
1523
+ if old < parse_version("2.2.25"):
1524
+ print("Migrating config from < 2.2.25...")
1525
+ data["prompt.expert"] = cfg_get_base('prompt.expert')
1526
+ updated = True
1527
+
1528
+ # < 2.2.26
1529
+ if old < parse_version("2.2.26"):
1530
+ print("Migrating config from < 2.2.26...")
1531
+ if 'ctx.records.folders.top' not in data:
1532
+ data["ctx.records.folders.top"] = False
1533
+ if 'ctx.records.separators' not in data:
1534
+ data["ctx.records.separators"] = True
1535
+ if 'ctx.records.groups.separators' not in data:
1536
+ data["ctx.records.groups.separators"] = True
1537
+ updated = True
1538
+
1539
+ # < 2.2.27
1540
+ if old < parse_version("2.2.27"):
1541
+ print("Migrating config from < 2.2.27...")
1542
+ if 'ctx.records.pinned.separators' not in data:
1543
+ data["ctx.records.pinned.separators"] = False
1544
+ updated = True
1545
+
1546
+ # < 2.2.28
1547
+ if old < parse_version("2.2.28"):
1548
+ print("Migrating config from < 2.2.28...")
1549
+ if 'llama.idx.chat.mode' not in data:
1550
+ data["llama.idx.chat.mode"] = "context"
1551
+ if 'llama.idx.mode' not in data:
1552
+ data["llama.idx.mode"] = "chat"
1553
+ updated = True
1554
+
1555
+ # < 2.2.31
1556
+ if old < parse_version("2.2.31"):
1557
+ print("Migrating config from < 2.2.31...")
1558
+ if 'agent.continue.always' not in data:
1559
+ data["agent.continue.always"] = False
1560
+ if 'prompt.agent.continue.always' not in data:
1561
+ data["prompt.agent.continue.always"] = "Continue reasoning..."
1562
+ updated = True
1563
+
1564
+ # < 2.4.0
1565
+ if old < parse_version("2.4.0"):
1566
+ print("Migrating config from < 2.4.0...")
1567
+ if 'tabs.data' not in data:
1568
+ data["tabs.data"] = self.window.core.tabs.from_defaults()
1569
+ updated = True
1570
+
1571
+ # < 2.4.7
1572
+ if old < parse_version("2.4.7"):
1573
+ print("Migrating config from < 2.4.7...")
1574
+ self.window.core.plugins.reset_options("cmd_mouse_control", [
1575
+ "prompt",
1576
+ ])
1577
+ updated = True
1578
+
1579
+ # < 2.4.10
1580
+ if old < parse_version("2.4.10"):
1581
+ print("Migrating config from < 2.4.10...")
1582
+ if 'prompt.agent.continue.llama' not in data:
1583
+ data["prompt.agent.continue.llama"] = cfg_get_base('prompt.agent.continue.llama')
1584
+ if 'agent.llama.idx' not in data:
1585
+ data["agent.llama.idx"] = cfg_get_base('agent.llama.idx')
1586
+ if 'agent.llama.steps' not in data:
1587
+ data["agent.llama.steps"] = cfg_get_base('agent.llama.steps')
1588
+ if 'agent.llama.provider' not in data:
1589
+ data["agent.llama.provider"] = cfg_get_base('agent.llama.provider')
1590
+ if 'agent.llama.verbose' not in data:
1591
+ data["agent.llama.verbose"] = cfg_get_base('agent.llama.verbose')
1592
+ data["agent.goal.notify"] = False # disable by default
1593
+ updated = True
1594
+
1595
+ # < 2.4.11
1596
+ if old < parse_version("2.4.11"):
1597
+ print("Migrating config from < 2.4.11...")
1598
+ if 'api_proxy' not in data:
1599
+ data["api_proxy"] =""
1600
+ updated = True
1601
+
1602
+ # < 2.4.13
1603
+ if old < parse_version("2.4.13"):
1604
+ print("Migrating config from < 2.4.13...")
1605
+ data["interpreter.auto_clear"] = False
1606
+ if 'cmd_code_interpreter' in data['plugins'] \
1607
+ and 'cmd.code_execute' in data['plugins']['cmd_code_interpreter']:
1608
+ # remove
1609
+ del data['plugins']['cmd_code_interpreter']['cmd.code_execute']
1610
+ if 'cmd_code_interpreter' in data['plugins'] \
1611
+ and 'cmd.code_execute_all' in data['plugins']['cmd_code_interpreter']:
1612
+ # remove
1613
+ del data['plugins']['cmd_code_interpreter']['cmd.code_execute_all']
1614
+ if 'cmd_code_interpreter' in data['plugins'] \
1615
+ and 'cmd.code_execute_file' in data['plugins']['cmd_code_interpreter']:
1616
+ # remove
1617
+ del data['plugins']['cmd_code_interpreter']['cmd.code_execute_file']
1618
+ updated = True
1619
+
1620
+ # < 2.4.14
1621
+ if old < parse_version("2.4.14"):
1622
+ print("Migrating config from < 2.4.14...")
1623
+ if 'prompt.agent.llama.max_eval' not in data:
1624
+ data["prompt.agent.llama.max_eval"] = cfg_get_base('prompt.agent.llama.max_eval')
1625
+ if 'prompt.agent.llama.append_eval' not in data:
1626
+ data["prompt.agent.llama.append_eval"] = cfg_get_base('prompt.agent.llama.append_eval')
1627
+ if 'agent.llama.loop.enabled' not in data:
1628
+ data["agent.llama.loop.enabled"] = cfg_get_base('agent.llama.loop.enabled')
1629
+ if 'agent.llama.loop.score' not in data:
1630
+ data["agent.llama.loop.score"] = cfg_get_base('agent.llama.loop.score')
1631
+ updated = True
1632
+
1633
+ # < 2.4.15
1634
+ if old < parse_version("2.4.15"):
1635
+ print("Migrating config from < 2.4.15...")
1636
+ data["interpreter.auto_clear"] = False
1637
+ if 'cmd_code_interpreter' in data['plugins'] \
1638
+ and 'cmd.ipython_execute_new' in data['plugins']['cmd_code_interpreter']:
1639
+ # remove
1640
+ del data['plugins']['cmd_code_interpreter']['cmd.ipython_execute_new']
1641
+ if 'cmd_code_interpreter' in data['plugins'] \
1642
+ and 'cmd.ipython_execute' in data['plugins']['cmd_code_interpreter']:
1643
+ # remove
1644
+ del data['plugins']['cmd_code_interpreter']['cmd.ipython_execute']
1645
+ if 'cmd_code_interpreter' in data['plugins'] \
1646
+ and 'cmd.sys_exec' in data['plugins']['cmd_code_interpreter']:
1647
+ # remove
1648
+ del data['plugins']['cmd_code_interpreter']['cmd.sys_exec']
1649
+ if 'cmd_mouse_control' in data['plugins']:
1650
+ del data['plugins']['cmd_mouse_control']
1651
+ updated = True
1652
+
1653
+ # < 2.4.16
1654
+ if old < parse_version("2.4.16"):
1655
+ print("Migrating config from < 2.4.16...")
1656
+ data["interpreter.auto_clear"] = False
1657
+ if 'cmd_code_interpreter' in data['plugins'] \
1658
+ and 'cmd.ipython_execute_new' in data['plugins']['cmd_code_interpreter']:
1659
+ # remove
1660
+ del data['plugins']['cmd_code_interpreter']['cmd.ipython_execute_new']
1661
+ if 'cmd_code_interpreter' in data['plugins'] \
1662
+ and 'cmd.ipython_execute' in data['plugins']['cmd_code_interpreter']:
1663
+ # remove
1664
+ del data['plugins']['cmd_code_interpreter']['cmd.ipython_execute']
1665
+ updated = True
1666
+
1667
+ # < 2.4.19
1668
+ if old < parse_version("2.4.19"):
1669
+ print("Migrating config from < 2.4.19...")
1670
+ if 'layout.animation.disable' not in data:
1671
+ data["layout.animation.disable"] = cfg_get_base('layout.animation.disable')
1672
+ if 'cmd_code_interpreter' in data['plugins'] \
1673
+ and 'cmd.ipython_execute' in data['plugins']['cmd_code_interpreter']:
1674
+ # remove
1675
+ del data['plugins']['cmd_code_interpreter']['cmd.ipython_execute']
1676
+ updated = True
1677
+
1678
+ # < 2.4.21
1679
+ if old < parse_version("2.4.21"):
1680
+ print("Migrating config from < 2.4.21...")
1681
+ if 'ctx.attachment.mode' not in data:
1682
+ data["ctx.attachment.mode"] = cfg_get_base('ctx.attachment.mode')
1683
+ if 'ctx.attachment.summary.model' not in data:
1684
+ data["ctx.attachment.summary.model"] = cfg_get_base('ctx.attachment.summary.model')
1685
+ if 'ctx.attachment.verbose' not in data:
1686
+ data["ctx.attachment.verbose"] = cfg_get_base('ctx.attachment.verbose')
1687
+ updated = True
1688
+
1689
+ # < 2.4.22
1690
+ if old < parse_version("2.4.22"):
1691
+ print("Migrating config from < 2.4.22...")
1692
+ data["ctx.attachment.mode"] = cfg_get_base('ctx.attachment.mode')
1693
+ if 'ctx.attachment.img' not in data:
1694
+ data["ctx.attachment.img"] = cfg_get_base(
1695
+ 'ctx.attachment.img')
1696
+ updated = True
1697
+
1698
+ # < 2.4.29
1699
+ if old < parse_version("2.4.29"):
1700
+ print("Migrating config from < 2.4.29...")
1701
+ if 'cmd_code_interpreter' in data['plugins'] \
1702
+ and 'ipython_dockerfile' in data['plugins']['cmd_code_interpreter']:
1703
+ # remove
1704
+ del data['plugins']['cmd_code_interpreter']['ipython_dockerfile']
1705
+ updated = True
1706
+
1707
+ # < 2.4.31
1708
+ if old < parse_version("2.4.31"):
1709
+ print("Migrating config from < 2.4.31...")
1710
+ if 'attachments_auto_index' not in data:
1711
+ data["attachments_auto_index"] = cfg_get_base(
1712
+ 'attachments_auto_index')
1713
+ updated = True
1714
+
1715
+ # < 2.4.34
1716
+ if old < parse_version("2.4.34"):
1717
+ print("Migrating config from < 2.4.34...")
1718
+ if 'ctx.attachment.query.model' not in data:
1719
+ data["ctx.attachment.query.model"] = cfg_get_base(
1720
+ 'ctx.attachment.query.model')
1721
+ updated = True
1722
+
1723
+ # < 2.4.35
1724
+ if old < parse_version("2.4.35"):
1725
+ print("Migrating config from < 2.4.35...")
1726
+ data["ctx.edit_icons"] = True
1727
+ updated = True
1728
+
1729
+ # < 2.4.37
1730
+ if old < parse_version("2.4.37"):
1731
+ print("Migrating config from < 2.4.37...")
1732
+ if 'ctx.attachment.rag.history' not in data:
1733
+ data["ctx.attachment.rag.history"] = cfg_get_base(
1734
+ 'ctx.attachment.rag.history')
1735
+ if 'ctx.attachment.rag.history.max_items' not in data:
1736
+ data["ctx.attachment.rag.history.max_items"] = cfg_get_base(
1737
+ 'ctx.attachment.rag.history.max_items')
1738
+ updated = True
1739
+
1740
+ # < 2.4.38
1741
+ if old < parse_version("2.4.38"):
1742
+ print("Migrating config from < 2.4.38...")
1743
+ if 'theme.style' not in data:
1744
+ data["theme.style"] = "blocks"
1745
+ if 'audio.input.device' not in data:
1746
+ data["audio.input.device"] = "0"
1747
+ if 'audio.input.channels' not in data:
1748
+ data["audio.input.channels"] = 1
1749
+ if 'audio.input.rate' not in data:
1750
+ data["audio.input.rate"] = 44100
1751
+ patch_css('style.light.css', True) # force update
1752
+ updated = True
1753
+
1754
+ # < 2.4.39
1755
+ if old < parse_version("2.4.39"):
1756
+ print("Migrating config from < 2.4.39...")
1757
+ if 'layout.split' not in data:
1758
+ data["layout.split"] = False
1759
+ updated = True
1760
+
1761
+ # < 2.4.40
1762
+ if old < parse_version("2.4.40"):
1763
+ print("Migrating config from < 2.4.40...")
1764
+ if 'cmd_web' in data['plugins'] \
1765
+ and 'max_result_length' in data['plugins']['cmd_web']:
1766
+ del data['plugins']['cmd_web']['max_result_length']
1767
+ if 'cmd_web' in data['plugins'] \
1768
+ and 'cmd.web_search' in data['plugins']['cmd_web']:
1769
+ del data['plugins']['cmd_web']['cmd.web_search']
1770
+ if 'cmd_web' in data['plugins'] \
1771
+ and 'cmd.web_url_open' in data['plugins']['cmd_web']:
1772
+ del data['plugins']['cmd_web']['cmd.web_url_open']
1773
+ if 'cmd_web' in data['plugins'] \
1774
+ and 'cmd.web_url_raw' in data['plugins']['cmd_web']:
1775
+ del data['plugins']['cmd_web']['cmd.web_url_raw']
1776
+ patch_css('web-blocks.css', True) # force update
1777
+ patch_css('web-blocks.light.css', True) # force update
1778
+ patch_css('web-chatgpt.css', True) # force update
1779
+ patch_css('web-chatgpt_wide.css', True) # force update
1780
+ updated = True
1781
+
1782
+ # < 2.4.44
1783
+ if old < parse_version("2.4.44"):
1784
+ print("Migrating config from < 2.4.44...")
1785
+ data["ctx.records.folders.top"] = True
1786
+ updated = True
1787
+
1788
+ # < 2.4.45
1789
+ if old < parse_version("2.4.45"):
1790
+ print("Migrating config from < 2.4.45...")
1791
+ patch_css('style.css', True) # force update
1792
+ updated = True
1793
+
1794
+ # < 2.4.46
1795
+ if old < parse_version("2.4.46"):
1796
+ print("Migrating config from < 2.4.46...")
1797
+ if 'api_azure_version' not in data:
1798
+ data["api_azure_version"] = "2023-07-01-preview"
1799
+ if 'api_azure_endpoint' not in data:
1800
+ data["api_azure_endpoint"] = "https://<your-resource-name>.openai.azure.com/"
1801
+ if 'api_key_google' not in data:
1802
+ data["api_key_google"] = ""
1803
+ if 'api_key_anthropic' not in data:
1804
+ data["api_key_anthropic"] = ""
1805
+ if 'api_key_hugging_face' not in data:
1806
+ data["api_key_hugging_face"] = ""
1807
+ updated = True
1808
+
1809
+ # < 2.4.51
1810
+ if old < parse_version("2.4.51"):
1811
+ print("Migrating config from < 2.4.51...")
1812
+ if 'audio.input.stop_interval' not in data:
1813
+ data["audio.input.stop_interval"] = 10
1814
+ if 'audio.input.continuous' not in data:
1815
+ data["audio.input.continuous"] = False
1816
+
1817
+ # < 2.4.55
1818
+ if old < parse_version("2.4.55"):
1819
+ print("Migrating config from < 2.4.55...")
1820
+ if 'audio.input.timeout' not in data:
1821
+ data["audio.input.timeout"] = 120
1822
+ if 'audio.input.timeout.continuous' not in data:
1823
+ data["audio.input.timeout.continuous"] = False
1824
+
1825
+ # < 2.4.56
1826
+ if old < parse_version("2.4.56"):
1827
+ print("Migrating config from < 2.4.56...")
1828
+ remove_modifiers = ["Meta", "Keypad", "GroupSwitch"]
1829
+ if 'access.shortcuts' in data:
1830
+ for item in data['access.shortcuts']:
1831
+ if 'key_modifier' in item and item['key_modifier'] == 'Control':
1832
+ item['key_modifier'] = 'Ctrl'
1833
+ elif 'key_modifier' in item and item['key_modifier'] in remove_modifiers:
1834
+ item['key_modifier'] = ''
1835
+ updated = True
1836
+
1837
+ # < 2.5.0
1838
+ if old < parse_version("2.5.0"):
1839
+ print("Migrating config from < 2.5.0...")
1840
+ if 'api_key_deepseek' not in data:
1841
+ data["api_key_deepseek"] = ""
1842
+ updated = True
1843
+
1844
+ # < 2.5.7
1845
+ if old < parse_version("2.5.7"):
1846
+ print("Migrating config from < 2.5.7...")
1847
+ patch_css('web-blocks.css', True) # force update
1848
+ patch_css('web-chatgpt.css', True) # force update
1849
+ patch_css('web-chatgpt_wide.css', True) # force update
1850
+ updated = True
1851
+
1852
+ # < 2.5.8
1853
+ if old < parse_version("2.5.8"):
1854
+ print("Migrating config from < 2.5.8...")
1855
+ if 'api_key_perplexity' not in data:
1856
+ data["api_key_perplexity"] = ""
1857
+ if 'api_endpoint_perplexity' not in data:
1858
+ data["api_endpoint_perplexity"] = "https://api.perplexity.ai"
1859
+ updated = True
1860
+
1861
+ # < 2.5.17
1862
+ if old < parse_version("2.5.17"):
1863
+ print("Migrating config from < 2.5.17...")
1864
+ if 'remote_tools.web_search' not in data:
1865
+ data["remote_tools.web_search"] = True
1866
+ updated = True
1867
+
1868
+ # < 2.5.18
1869
+ if old < parse_version("2.5.18"):
1870
+ print("Migrating config from < 2.5.18...")
1871
+ if 'remote_tools.image' not in data:
1872
+ data["remote_tools.image"] = False
1873
+ updated = True
1874
+
1875
+ # < 2.5.19
1876
+ if old < parse_version("2.5.19"):
1877
+ print("Migrating config from < 2.5.19...")
1878
+ if 'api_use_responses' not in data:
1879
+ data["api_use_responses"] = True
1880
+ if 'api_key_xai' not in data:
1881
+ data["api_key_xai"] = ""
1882
+ if 'api_endpoint_xai' not in data:
1883
+ data["api_endpoint_xai"] = "https://api.x.ai/v1"
1884
+ updated = True
1885
+
1886
+ # < 2.5.20
1887
+ if old < parse_version("2.5.20"):
1888
+ print("Migrating config from < 2.5.20...")
1889
+ if 'api_endpoint_deepseek' not in data:
1890
+ data["api_endpoint_deepseek"] = "https://api.deepseek.com/v1"
1891
+ if 'api_endpoint_google' not in data:
1892
+ data["api_endpoint_google"] = "https://generativelanguage.googleapis.com/v1beta/openai"
1893
+ if "mode" in data and "mode" == "langchain": # deprecated mode
1894
+ data["mode"] = "chat"
1895
+ updated = True
1896
+
1897
+ # < 2.5.21
1898
+ if old < parse_version("2.5.21"):
1899
+ print("Migrating config from < 2.5.21...")
1900
+ patch_css('web-chatgpt.css', True) # force replace file
1901
+ updated = True
1902
+
1903
+ # < 2.5.24
1904
+ if old < parse_version("2.5.24"):
1905
+ print("Migrating config from < 2.5.24...")
1906
+ patch_css('web-chatgpt.css', True) # force replace file
1907
+ updated = True
1908
+
1909
+ # < 2.5.25
1910
+ if old < parse_version("2.5.25"):
1911
+ print("Migrating config from < 2.5.25...")
1912
+ if 'api_use_responses_llama' not in data:
1913
+ data["api_use_responses_llama"] = False
1914
+ updated = True
1915
+
1916
+ # < 2.5.27
1917
+ if old < parse_version("2.5.27"):
1918
+ print("Migrating config from < 2.5.27...")
1919
+ if 'remote_tools.code_interpreter' not in data:
1920
+ data["remote_tools.code_interpreter"] = False
1921
+ if 'llama.idx.react' not in data:
1922
+ data["llama.idx.react"] = True
1923
+ updated = True
1924
+
1925
+ # < 2.5.29
1926
+ if old < parse_version("2.5.29"):
1927
+ print("Migrating config from < 2.5.29...")
1928
+ if 'api_endpoint_anthropic' not in data:
1929
+ data["api_endpoint_anthropic"] = "https://api.anthropic.com/v1"
1930
+ updated = True
1931
+
1932
+ # < 2.5.31
1933
+ if old < parse_version("2.5.31"):
1934
+ print("Migrating config from < 2.5.31...")
1935
+ if 'llama.idx.chat.auto_retrieve' not in data:
1936
+ data["llama.idx.chat.auto_retrieve"] = True
1937
+ if 'api_key_mistral' not in data:
1938
+ data["api_key_mistral"] = ""
1939
+ if 'api_endpoint_mistral' not in data:
1940
+ data["api_endpoint_mistral"] = "https://api.mistral.ai/v1"
1941
+ updated = True
1942
+
1943
+ # < 2.5.35
1944
+ if old < parse_version("2.5.35"):
1945
+ print("Migrating config from < 2.5.35...")
1946
+ data["img_dialog_open"] = False
1947
+ updated = True
1948
+
1949
+ # < 2.5.36
1950
+ if old < parse_version("2.5.36"):
1951
+ print("Migrating config from < 2.5.36...")
1952
+ patch_css('style.css', True) # force replace file
1953
+ patch_css('style.dark.css', True) # force replace file
1954
+ patch_css('style.light.css', True) # force replace file
1955
+ patch_css('web-chatgpt.css', True) # force replace file
1956
+ patch_css('web-chatgpt.light.css', True) # force replace file
1957
+ patch_css('web-chatgpt.dark.css', True) # force replace file
1958
+ patch_css('web-chatgpt_wide.css', True) # force replace file
1959
+ patch_css('web-chatgpt_wide.light.css', True) # force replace file
1960
+ patch_css('web-chatgpt_wide.dark.css', True) # force replace file
1961
+ updated = True
1962
+
1963
+ # < 2.5.37
1964
+ if old < parse_version("2.5.37"):
1965
+ print("Migrating config from < 2.5.37...")
1966
+ patch_css('style.css', True) # force replace file
1967
+ patch_css('style.dark.css', True) # force replace file
1968
+ patch_css('style.light.css', True) # force replace file
1969
+ patch_css('web-chatgpt.css', True) # force replace file
1970
+ patch_css('web-chatgpt.light.css', True) # force replace file
1971
+ patch_css('web-chatgpt.dark.css', True) # force replace file
1972
+ patch_css('web-chatgpt_wide.css', True) # force replace file
1973
+ patch_css('web-chatgpt_wide.light.css', True) # force replace file
1974
+ patch_css('web-chatgpt_wide.dark.css', True) # force replace file
1975
+ updated = True
1976
+
1977
+ # < 2.5.40 - update tool prompts
1978
+ if old < parse_version("2.5.40"):
1979
+ print("Migrating config from < 2.5.40...")
1980
+
1981
+ # config
1982
+ data['prompt.agent.goal'] = cfg_get_base('prompt.agent.goal')
1983
+ data['prompt.agent.instruction'] = cfg_get_base('prompt.agent.instruction')
1984
+ data['prompt.ctx.auto_summary.user'] = cfg_get_base('prompt.ctx.auto_summary.user')
1985
+ data['prompt.cmd'] = cfg_get_base('prompt.cmd')
1986
+ data['prompt.cmd.extra'] = cfg_get_base('prompt.cmd.extra')
1987
+ data['prompt.cmd.extra.assistants'] = cfg_get_base('prompt.cmd.extra.assistants')
1988
+ data['prompt.expert'] = cfg_get_base('prompt.expert')
1989
+
1990
+ # plugins
1991
+ if 'openai_dalle' in data['plugins'] \
1992
+ and 'prompt' in data['plugins']['openai_dalle']:
1993
+ del data['plugins']['openai_dalle']['prompt']
1994
+ if 'idx_llama_index' in data['plugins'] \
1995
+ and 'prompt' in data['plugins']['idx_llama_index']:
1996
+ del data['plugins']['idx_llama_index']['prompt']
1997
+
1998
+ # < 2.5.41
1999
+ if old < parse_version("2.5.41"):
2000
+ print("Migrating config from < 2.5.41...")
2001
+ if "max_output_tokens" in data and "max_output_tokens" == 1024:
2002
+ data["max_output_tokens"] = 0 # update default value
2003
+ updated = True
2004
+
2005
+ # < 2.5.42 - action img padding
2006
+ if old < parse_version("2.5.42"):
2007
+ print("Migrating config from < 2.5.42...")
2008
+ patch_css('web-chatgpt.css', True) # force replace file
2009
+ patch_css('web-chatgpt_wide.css', True) # force replace file
2010
+ updated = True
2011
+
2012
+ # < 2.5.43 - remove cmd_code_interpreter fresh_kernel option
2013
+ if old < parse_version("2.5.43"):
2014
+ print("Migrating config from < 2.5.43...")
2015
+ # plugins
2016
+ if 'cmd_code_interpreter' in data['plugins'] \
2017
+ and 'fresh_kernel' in data['plugins']['cmd_code_interpreter']:
2018
+ del data['plugins']['cmd_code_interpreter']['fresh_kernel']
2019
+
2020
+ # < 2.5.51
2021
+ if old < parse_version("2.5.51"):
2022
+ print("Migrating config from < 2.5.51...")
2023
+ patch_css('style.css', True) # force replace file
2024
+ patch_css('style.dark.css', True) # force replace file
2025
+ patch_css('style.light.css', True) # force replace file
2026
+ updated = True
2027
+
2028
+ # < 2.5.54
2029
+ if old < parse_version("2.5.54"):
2030
+ print("Migrating config from < 2.5.54...")
2031
+ patch_css('web-chatgpt.css', True) # force replace file
2032
+ updated = True
2033
+
2034
+ # < 2.5.55
2035
+ if old < parse_version("2.5.55"):
2036
+ print("Migrating config from < 2.5.55...")
2037
+ patch_css('web-chatgpt.css', True) # force replace file
2038
+ updated = True
2039
+
2040
+ # < 2.5.60
2041
+ if old < parse_version("2.5.60"):
2042
+ print("Migrating config from < 2.5.60...")
2043
+ patch_css('style.css', True) # force replace file
2044
+ patch_css('style.dark.css', True) # force replace file
2045
+ patch_css('style.light.css', True) # force replace file
2046
+ patch_css('web-chatgpt.css', True) # force replace file
2047
+ patch_css('web-chatgpt.light.css', True) # force replace file
2048
+ patch_css('web-chatgpt.dark.css', True) # force replace file
2049
+ patch_css('web-chatgpt_wide.css', True) # force replace file
2050
+ patch_css('web-chatgpt_wide.light.css', True) # force replace file
2051
+ patch_css('web-chatgpt_wide.dark.css', True) # force replace file
2052
+ updated = True
2053
+
2054
+ # < 2.5.61
2055
+ if old < parse_version("2.5.61"):
2056
+ print("Migrating config from < 2.5.61..")
2057
+ if "agent.output.render.all" not in data:
2058
+ data["agent.output.render.all"] = False
2059
+ data["prompt.expert"] = cfg_get_base(
2060
+ 'prompt.expert')
2061
+ if "experts.use_agent" not in data:
2062
+ data["experts.use_agent"] = True
2063
+ updated = True
2064
+
2065
+ # < 2.5.63 - disable cmd.get_time in real_time plugin by default
2066
+ if old < parse_version("2.5.63"):
2067
+ print("Migrating config from < 2.5.63...")
2068
+ if 'real_time' in data['plugins'] \
2069
+ and 'cmd.get_time' in data['plugins']['real_time']:
2070
+ del data['plugins']['real_time']['cmd.get_time']
2071
+ updated = True
2072
+
2073
+ # < 2.5.64
2074
+ if old < parse_version("2.5.64"):
2075
+ print("Migrating config from < 2.5.64..")
2076
+ data["prompt.cmd"] = cfg_get_base(
2077
+ 'prompt.cmd')
2078
+ data["prompt.cmd.extra"] = cfg_get_base(
2079
+ 'prompt.cmd.extra')
2080
+ data["prompt.expert"] = cfg_get_base(
2081
+ 'prompt.expert')
2082
+ data["experts.use_agent"] = False
2083
+ updated = True
2084
+
2085
+ # < 2.5.65
2086
+ if old < parse_version("2.5.65"):
2087
+ print("Migrating config from < 2.5.65..")
2088
+ data["prompt.expert"] = cfg_get_base(
2089
+ 'prompt.expert')
2090
+ updated = True
2091
+
2092
+ # < 2.5.68
2093
+ if old < parse_version("2.5.68"):
2094
+ print("Migrating config from < 2.5.68..")
2095
+ if "agent.func_call.native" not in data:
2096
+ data["agent.func_call.native"] = False
2097
+ if "experts.func_call.native" not in data:
2098
+ data["experts.func_call.native"] = False
2099
+ updated = True
2100
+
2101
+ # < 2.5.69
2102
+ if old < parse_version("2.5.69"):
2103
+ print("Migrating config from < 2.5.69.")
2104
+ data["prompt.agent.continue"] = cfg_get_base(
2105
+ 'prompt.agent.continue')
2106
+ data["prompt.agent.goal"] = cfg_get_base(
2107
+ 'prompt.agent.goal')
2108
+ data["prompt.expert"] = cfg_get_base(
2109
+ 'prompt.expert')
2110
+ if "agent.api_use_responses" not in data:
2111
+ data["agent.api_use_responses"] = False
2112
+ if "experts.api_use_responses" not in data:
2113
+ data["experts.api_use_responses"] = False
2114
+ if "experts.internal.api_use_responses" not in data:
2115
+ data["experts.internal.api_use_responses"] = False
2116
+ if 'cmd_web' in data['plugins'] \
2117
+ and 'cmd.web_search' in data['plugins']['cmd_web']:
2118
+ del data['plugins']['cmd_web']['cmd.web_search']
2119
+ updated = True
2120
+
2121
+ # < 2.5.71
2122
+ if old < parse_version("2.5.71"):
2123
+ print("Migrating config from < 2.5.71.")
2124
+ if "remote_tools.computer_use.env" not in data:
2125
+ data["remote_tools.computer_use.env"] = ""
2126
+ if "remote_tools.mcp" not in data:
2127
+ data["remote_tools.mcp"] = False
2128
+ if "remote_tools.mcp.args" not in data:
2129
+ data["remote_tools.mcp.args"] = cfg_get_base(
2130
+ 'remote_tools.mcp.args')
2131
+ if "remote_tools.file_search" not in data:
2132
+ data["remote_tools.file_search"] = False
2133
+ if "remote_tools.file_search.args" not in data:
2134
+ data["remote_tools.file_search.args"] = ""
2135
+ if 'cmd_mouse_control' in data['plugins'] \
2136
+ and 'cmd.mouse_move' in data['plugins']['cmd_mouse_control']:
2137
+ del data['plugins']['cmd_mouse_control']['cmd.mouse_move']
2138
+ if 'cmd_mouse_control' in data['plugins'] \
2139
+ and 'cmd.mouse_scroll' in data['plugins']['cmd_mouse_control']:
2140
+ del data['plugins']['cmd_mouse_control']['cmd.mouse_scroll']
2141
+ if 'cmd_mouse_control' in data['plugins'] \
2142
+ and 'cmd.keyboard_type' in data['plugins']['cmd_mouse_control']:
2143
+ del data['plugins']['cmd_mouse_control']['cmd.keyboard_type']
2144
+ updated = True
2145
+
2146
+ # < 2.5.73
2147
+ if old < parse_version("2.5.73"):
2148
+ print("Migrating config from < 2.5.73...")
2149
+ patch_css('web-chatgpt.css', True) # force replace file
2150
+ patch_css('web-chatgpt.light.css', True) # force replace file
2151
+ patch_css('web-chatgpt_wide.css', True) # force replace file
2152
+ patch_css('web-chatgpt_wide.light.css', True) # force replace file
2153
+ patch_css('web-chatgpt_wide.dark.css', True) # force replace file
2154
+ updated = True
2155
+
2156
+ # < 2.5.76
2157
+ if old < parse_version("2.5.76"):
2158
+ print("Migrating config from < 2.5.76.")
2159
+ if "agent.llama.loop.mode" not in data:
2160
+ data["agent.llama.loop.mode"] = cfg_get_base(
2161
+ 'agent.llama.loop.mode')
2162
+ if "prompt.agent.llama.eval.complete" not in data:
2163
+ data["prompt.agent.llama.eval.complete"] = cfg_get_base(
2164
+ 'prompt.agent.llama.eval.complete')
2165
+ data["prompt.agent.llama.eval"] = cfg_get_base(
2166
+ 'prompt.agent.llama.eval')
2167
+
2168
+ # < 2.5.89
2169
+ if old < parse_version("2.5.89"):
2170
+ print("Migrating config from < 2.5.89.")
2171
+ if "audio.input.backend" not in data:
2172
+ data["audio.input.backend"] = "native"
2173
+ if "audio.output.backend" not in data:
2174
+ data["audio.output.backend"] = "native"
2175
+ if "audio.input.device" not in data:
2176
+ data["audio.input.device"] = "0"
2177
+ if "audio.output.device" not in data:
2178
+ data["audio.output.device"] = "0"
2179
+
2180
+ # < 2.5.90
2181
+ if old < parse_version("2.5.90"):
2182
+ print("Migrating config from < 2.5.90...")
2183
+ patch_css('style.dark.css', True) # force replace file
2184
+ patch_css('style.light.css', True) # force replace file
2185
+ patch_css('web-blocks.css', True) # force replace file
2186
+ patch_css('web-blocks.dark.css', True) # force replace file
2187
+ patch_css('web-blocks.light.css', True) # force replace file
2188
+ updated = True
2189
+
2190
+ # < 2.5.91
2191
+ if old < parse_version("2.5.91"):
2192
+ print("Migrating config from < 2.5.91...")
2193
+ if "audio.cache.enabled" not in data:
2194
+ data["audio.cache.enabled"] = True
2195
+ updated = True
2196
+
2197
+ # < 2.5.92
2198
+ if old < parse_version("2.5.92"):
2199
+ print("Migrating config from < 2.5.92...")
2200
+ if "audio.cache.max_files" not in data:
2201
+ data["audio.cache.max_files"] = 1000
2202
+ updated = True
2203
+
2204
+ # < 2.5.94
2205
+ if old < parse_version("2.5.94"):
2206
+ print("Migrating config from < 2.5.94...")
2207
+ if "api_endpoint_hugging_face" not in data:
2208
+ data["api_endpoint_hugging_face"] = "https://router.huggingface.co/v1"
2209
+
2210
+ # tips
2211
+ patch_css('web-chatgpt.css', True) # force replace file
2212
+ patch_css('web-chatgpt.light.css', True) # force replace file
2213
+ patch_css('web-chatgpt.dark.css', True) # force replace file
2214
+ patch_css('web-chatgpt_wide.css', True) # force replace file
2215
+ patch_css('web-chatgpt_wide.light.css', True) # force replace file
2216
+ patch_css('web-chatgpt_wide.dark.css', True) # force replace file
2217
+ patch_css('web-blocks.css', True) # force replace file
2218
+ patch_css('web-blocks.light.css', True) # force replace file
2219
+ patch_css('web-blocks.dark.css', True) # force replace file
2220
+ updated = True
2221
+
2222
+ # < 2.5.95
2223
+ if old < parse_version("2.5.95"):
2224
+ print("Migrating config from < 2.5.95...")
2225
+ if "personalize.about" not in data:
2226
+ data["personalize.about"] = ""
2227
+ if "personalize.modes" not in data:
2228
+ data["personalize.modes"] = "chat"
2229
+
2230
+ # avatars css
2231
+ patch_css('web-chatgpt.css', True) # force replace file
2232
+ patch_css('web-chatgpt.light.css', True) # force replace file
2233
+ patch_css('web-chatgpt.dark.css', True) # force replace file
2234
+ patch_css('web-chatgpt_wide.css', True) # force replace file
2235
+ patch_css('web-chatgpt_wide.light.css', True) # force replace file
2236
+ patch_css('web-chatgpt_wide.dark.css', True) # force replace file
2237
+ patch_css('web-blocks.css', True) # force replace file
2238
+ patch_css('web-blocks.light.css', True) # force replace file
2239
+ patch_css('web-blocks.dark.css', True) # force replace file
2240
+ updated = True
2241
+
2242
+ # < 2.5.98
2243
+ if old < parse_version("2.5.98"):
2244
+ print("Migrating config from < 2.5.98...")
2245
+ if "agent.openai.response.split" not in data:
2246
+ data["agent.openai.response.split"] = True
2247
+ updated = True
2248
+
2249
+ # < 2.6.0
2250
+ if old < parse_version("2.6.0"):
2251
+ print("Migrating config from < 2.6.0...")
2252
+ patch_css('style.light.css', True) # scrollbar, calendar fix
2253
+ patch_css('style.dark.css', True) # calendar fix
2254
+ updated = True
2255
+
2256
+ # < 2.6.8
2257
+ if old < parse_version("2.6.8"):
2258
+ print("Migrating config from < 2.6.8...")
2259
+ patch_css('web-chatgpt.light.css', True) # p color
2260
+ patch_css('web-chatgpt.dark.css', True) # p color
2261
+ patch_css('web-chatgpt_wide.light.css', True) # p color
2262
+ patch_css('web-chatgpt_wide.dark.css', True) # p color
2263
+ patch_css('style.light.css', True) # tree
2264
+ patch_css('style.dark.css', True) # tree
2265
+ updated = True
2266
+
2267
+ # < 2.6.10
2268
+ if old < parse_version("2.6.10"):
2269
+ print("Migrating config from < 2.6.10...")
2270
+ if "agent.idx.auto_retrieve" not in data:
2271
+ data["agent.idx.auto_retrieve"] = True
2272
+ if 'google' in data['plugins'] \
2273
+ and 'oauth_scopes' in data['plugins']['google']:
2274
+ # add documents scope
2275
+ if "https://www.googleapis.com/auth/documents" not in data['plugins']['google']['oauth_scopes']:
2276
+ data['plugins']['google']['oauth_scopes'] += " https://www.googleapis.com/auth/documents"
2277
+ updated = True
2278
+
2279
+ # < 2.6.21
2280
+ if old < parse_version("2.6.21"):
2281
+ print("Migrating config from < 2.6.21...")
2282
+ if "agent.output.render.all" not in data:
2283
+ data["agent.output.render.all"] = True
2284
+ updated = True
2285
+
2286
+ # < 2.6.23 -- fix: restore <p> color
2287
+ if old < parse_version("2.6.23"):
2288
+ print("Migrating config from < 2.6.23...")
2289
+ patch_css('web-chatgpt.dark.css', True)
2290
+ patch_css('web-chatgpt_wide.dark.css', True)
2291
+ patch_css('web-chatgpt.light.css', True)
2292
+ patch_css('web-chatgpt_wide.light.css', True)
2293
+ patch_css('web-blocks.dark.css', True)
2294
+ patch_css('web-blocks.light.css', True)
2295
+ updated = True
2296
+
2297
+ # < 2.6.24
2298
+ if old < parse_version("2.6.24"):
2299
+ print("Migrating config from < 2.6.24...")
2300
+ if "llama.idx.embeddings.default" not in data:
2301
+ data["llama.idx.embeddings.default"] = cfg_get_base(
2302
+ 'llama.idx.embeddings.default')
2303
+ updated = True
2304
+
2305
+ # < 2.6.25
2306
+ if old < parse_version("2.6.25"):
2307
+ print("Migrating config from < 2.6.25...")
2308
+ if "api_key_voyage" not in data:
2309
+ data["api_key_voyage"] = ""
2310
+ if "agent.llama.eval_model" not in data:
2311
+ data["agent.llama.eval_model"] = "_"
2312
+ if "llama.idx.embeddings.default" in data:
2313
+ providers = []
2314
+ for item in data["llama.idx.embeddings.default"]:
2315
+ p = item.get('provider', '')
2316
+ if p and p not in providers:
2317
+ providers.append(p)
2318
+
2319
+ if "anthropic" not in providers:
2320
+ data["llama.idx.embeddings.default"].append({
2321
+ "provider": "anthropic",
2322
+ "model": "voyage-3.5",
2323
+ })
2324
+ if "deepseek_api" not in providers:
2325
+ data["llama.idx.embeddings.default"].append({
2326
+ "provider": "deepseek_api",
2327
+ "model": "voyage-3.5",
2328
+ })
2329
+ if "mistral_ai" not in providers:
2330
+ data["llama.idx.embeddings.default"].append({
2331
+ "provider": "mistral_ai",
2332
+ "model": "mistral-embed",
2333
+ })
2334
+ if "x_ai" not in providers:
2335
+ data["llama.idx.embeddings.default"].append({
2336
+ "provider": "x_ai",
2337
+ "model": "",
2338
+ })
2339
+ updated = True
2340
+
2341
+ # < 2.6.26
2342
+ if old < parse_version("2.6.26"):
2343
+ print("Migrating config from < 2.6.26...")
2344
+ if "api_key_open_router" not in data:
2345
+ data["api_key_open_router"] = ""
2346
+ if "api_endpoint_open_router" not in data:
2347
+ data["api_endpoint_open_router"] = "https://openrouter.ai/api/v1"
2348
+ updated = True
2349
+
2350
+ # < 2.6.28 -- fix: cmd color
2351
+ if old < parse_version("2.6.28"):
2352
+ print("Migrating config from < 2.6.28...")
2353
+ patch_css('web-chatgpt.css', True)
2354
+ patch_css('web-chatgpt_wide.css', True)
2355
+ patch_css('web-chatgpt.dark.css', True)
2356
+ patch_css('web-chatgpt_wide.dark.css', True)
2357
+ patch_css('web-chatgpt.light.css', True)
2358
+ patch_css('web-chatgpt_wide.light.css', True)
2359
+ updated = True
2360
+
2361
+ # < 2.6.30
2362
+ if old < parse_version("2.6.30"):
2363
+ print("Migrating config from < 2.6.30...")
2364
+ if "api_native_google" not in data:
2365
+ data["api_native_google"] = True
2366
+ if "remote_tools.google.web_search" not in data:
2367
+ data["remote_tools.google.web_search"] = True
2368
+ if "remote_tools.google.code_interpreter" not in data:
2369
+ data["remote_tools.google.code_interpreter"] = False
2370
+ updated = True
2371
+
2372
+ # < 2.6.31
2373
+ if old < parse_version("2.6.31"):
2374
+ print("Migrating config from < 2.6.31...")
2375
+ if "log.realtime" not in data:
2376
+ data["log.realtime"] = False
2377
+ if "remote_tools.google.url_ctx" not in data:
2378
+ data["remote_tools.google.url_ctx"] = False
2379
+ if "audio.input.auto_turn" not in data:
2380
+ data["audio.input.auto_turn"] = False
2381
+ if "audio.input.vad.prefix" not in data:
2382
+ data["audio.input.vad.prefix"] = 300
2383
+ if "audio.input.vad.silence" not in data:
2384
+ data["audio.input.vad.silence"] = 2000
2385
+ updated = True
2386
+
2387
+ # < 2.6.32
2388
+ if old < parse_version("2.6.32"):
2389
+ print("Migrating config from < 2.6.32...")
2390
+
2391
+ data["prompt.img"] = cfg_get_base(
2392
+ 'prompt.img')
2393
+
2394
+ if "prompt.video" not in data:
2395
+ data["prompt.video"] = cfg_get_base('prompt.video')
2396
+ if "video.prompt_model" not in data:
2397
+ data["video.prompt_model"] = cfg_get_base( 'video.prompt_model')
2398
+ if "video.aspect_ratio" not in data:
2399
+ data["video.aspect_ratio"] = cfg_get_base( 'video.aspect_ratio')
2400
+ if "video.duration" not in data:
2401
+ data["video.duration"] = cfg_get_base('video.duration')
2402
+ if "video.fps" not in data:
2403
+ data["video.fps"] = cfg_get_base('video.fps')
2404
+ if "video.seed" not in data:
2405
+ data["video.seed"] = cfg_get_base('video.seed')
2406
+ if "video.negative_prompt" not in data:
2407
+ data["video.negative_prompt"] = cfg_get_base('video.negative_prompt')
2408
+ if "video.generate_audio" not in data:
2409
+ data["video.generate_audio"] = cfg_get_base('video.generate_audio')
2410
+ if "video.resolution" not in data:
2411
+ data["video.resolution"] = cfg_get_base('video.resolution')
2412
+
2413
+ # google vertex
2414
+ if "api_native_google.use_vertex" not in data:
2415
+ data["api_native_google.use_vertex"] = cfg_get_base('api_native_google.use_vertex')
2416
+ if "api_native_google.cloud_project" not in data:
2417
+ data["api_native_google.cloud_project"] = cfg_get_base('api_native_google.cloud_project')
2418
+ if "api_native_google.cloud_location" not in data:
2419
+ data["api_native_google.cloud_location"] = cfg_get_base('api_native_google.cloud_location')
2420
+ if "api_native_google.app_credentials" not in data:
2421
+ data["api_native_google.app_credentials"] = cfg_get_base('api_native_google.app_credentials')
2422
+
2423
+ # audio loop
2424
+ if "audio.input.loop" not in data:
2425
+ data["audio.input.loop"] = False
2426
+
2427
+ # add video player CSS
2428
+ patch_css('web-chatgpt.css', True)
2429
+ patch_css('web-chatgpt_wide.css', True)
2430
+ patch_css('web-blocks.css', True)
2431
+ updated = True
2432
+
2433
+ # < 2.6.35
2434
+ if old < parse_version("2.6.35"):
2435
+ print("Migrating config from < 2.6.35...")
2436
+ # remove will-change
2437
+ patch_css('web-chatgpt.css', True)
2438
+ patch_css('web-chatgpt_wide.css', True)
2439
+ patch_css('web-blocks.css', True)
2440
+ updated = True
2441
+
2442
+ # < 2.6.36
2443
+ if old < parse_version("2.6.36"):
2444
+ print("Migrating config from < 2.6.36...")
2445
+ # perf css
2446
+ patch_css('web-chatgpt.css', True)
2447
+ patch_css('web-chatgpt_wide.css', True)
2448
+ patch_css('web-blocks.css', True)
2449
+ updated = True
2450
+
2451
+ # < 2.6.37
2452
+ if old < parse_version("2.6.37"):
2453
+ print("Migrating config from < 2.6.37...")
2454
+
2455
+ # add: label-desc CSS
2456
+ patch_css('style.dark.css', True)
2457
+ patch_css('style.light.css', True)
2458
+
2459
+ # add: Anthropic SDK
2460
+ if "api_native_anthropic" not in data:
2461
+ data["api_native_anthropic"] = True
2462
+ if "remote_tools.anthropic.web_search" not in data:
2463
+ data["remote_tools.anthropic.web_search"] = True
2464
+
2465
+ # add: xAI SDK
2466
+ if "api_native_xai" not in data:
2467
+ data["api_native_xai"] = True
2468
+ if "remote_tools.xai.mode" not in data:
2469
+ data["remote_tools.xai.mode"] = "auto"
2470
+ if "remote_tools.xai.sources.web" not in data:
2471
+ data["remote_tools.xai.sources.web"] = True
2472
+ if "remote_tools.xai.sources.x" not in data:
2473
+ data["remote_tools.xai.sources.x"] = True
2474
+ if "remote_tools.xai.sources.news" not in data:
2475
+ data["remote_tools.xai.sources.news"] = False
2476
+
2477
+ updated = True
2478
+
2479
+ # < 2.6.40
2480
+ if old < parse_version("2.6.40"):
2481
+ print("Migrating config from < 2.6.40...")
2482
+ # perf css
2483
+ patch_css('web-chatgpt.css', True)
2484
+ patch_css('web-chatgpt_wide.css', True)
2485
+ patch_css('web-blocks.css', True)
2486
+ updated = True
2487
+
2488
+ # < 2.6.41
2489
+ if old < parse_version("2.6.41"):
2490
+ print("Migrating config from < 2.6.41...")
2491
+ if "render.memory.limit" not in data:
2492
+ data["render.memory.limit"] = "2.5GB"
2493
+ # ul p
2494
+ patch_css('web-chatgpt.css', True)
2495
+ patch_css('web-chatgpt_wide.css', True)
2496
+ patch_css('web-blocks.css', True)
2497
+ updated = True
2498
+
2499
+ # < 2.6.42
2500
+ if old < parse_version("2.6.42"):
2501
+ print("Migrating config from < 2.6.42...")
2502
+ if "render.code_syntax.stream_max_lines" not in data:
2503
+ data["render.code_syntax.stream_max_lines"] = 1000
2504
+ if "render.code_syntax.final_max_lines" not in data:
2505
+ data["render.code_syntax.final_max_lines"] = 1500
2506
+ if "render.code_syntax.final_max_chars" not in data:
2507
+ data["render.code_syntax.final_max_chars"] = 350000
2508
+ updated = True
2509
+
2510
+ return data, updated, is_old