pygpt-net 2.6.42__py3-none-any.whl → 2.6.43__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,813 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.09.12 00:00:00 #
10
+ # ================================================== #
11
+
12
+ from typing import Tuple
13
+
14
+ from packaging.version import parse as parse_version, Version
15
+
16
+ from pygpt_net.core.types import (
17
+ MODE_RESEARCH,
18
+ MODE_CHAT,
19
+ MODE_AGENT_OPENAI,
20
+ MODE_COMPUTER,
21
+ MODE_EXPERT
22
+ )
23
+
24
+
25
+ class Patch:
26
+ def __init__(self, window=None):
27
+ self.window = window
28
+
29
+ def execute(self, version: Version) -> Tuple[dict, bool]:
30
+ """
31
+ Migrate to current app version
32
+
33
+ :param version: current app version
34
+ :return: Updated data and True if updated
35
+ """
36
+ data = self.window.core.models.items
37
+ base_data = self.window.core.models.get_base()
38
+ from_base = self.window.core.models.from_base
39
+ updated = False
40
+
41
+ # get version of models config
42
+ current = self.window.core.models.get_version()
43
+ old = parse_version(current)
44
+
45
+ # check if models file is older than current app version
46
+ is_old = False
47
+ if old < version:
48
+ is_old = True
49
+
50
+ # < 0.9.1
51
+ if old < parse_version("0.9.1"):
52
+ # apply meta only (not attached in 0.9.0)
53
+ print("Migrating models from < 0.9.1...")
54
+ updated = True
55
+
56
+ # < 2.0.1
57
+ if old < parse_version("2.0.1"):
58
+ print("Migrating models from < 2.0.1...")
59
+ self.window.core.updater.patch_file('models.json', True) # force replace file
60
+ self.window.core.models.load()
61
+ data = self.window.core.models.items
62
+ updated = True
63
+
64
+ # < 2.0.96 <--- patch for llama-index modes
65
+ if old < parse_version("2.0.96"):
66
+ print("Migrating models from < 2.0.96...")
67
+ self.window.core.updater.patch_file('models.json', True) # force replace file
68
+ self.window.core.models.load()
69
+ data = self.window.core.models.items
70
+ updated = True
71
+
72
+ # < 2.0.105 <--- patch for llama-index gpt4-turbo
73
+ if old < parse_version("2.0.105"):
74
+ print("Migrating models from < 2.0.105...")
75
+ self.window.core.updater.patch_file('models.json', True) # force replace file
76
+ self.window.core.models.load()
77
+ data = self.window.core.models.items
78
+ updated = True
79
+
80
+ '''
81
+ # < 2.0.104 <--- patch to new format
82
+ if old < parse_version("2.0.104"):
83
+ print("Migrating models from < 2.0.104...")
84
+ for id in data:
85
+ model = data[id]
86
+ dict_name = model.to_dict()
87
+ model.from_dict(dict_name)
88
+
89
+ # patch missing llama_index provider
90
+ if "llama_index" in model.mode:
91
+ if model.id.startswith("gpt-") or model.id.startswith("text-davinci-"):
92
+ model.llama_index["provider"] = "openai"
93
+ if model.id.startswith("gpt-"):
94
+ model.llama_index['mode'] = ["chat"]
95
+ model.llama_index['args'] = [
96
+ {
97
+ "name": "model_name",
98
+ "value": model.id,
99
+ "type": "str",
100
+ }
101
+ ]
102
+ model.llama_index['env'] = [
103
+ {
104
+ "name": "OPENAI_API_KEY",
105
+ "value": "{api_key}",
106
+ }
107
+ ]
108
+ if "langchain" in model.mode:
109
+ if model.id.startswith("gpt-") or model.id.startswith("text-davinci-"):
110
+ model.langchain['args'] = [
111
+ {
112
+ "name": "model_name",
113
+ "value": model.id,
114
+ "type": "str",
115
+ }
116
+ ]
117
+ model.langchain['env'] = [
118
+ {
119
+ "name": "OPENAI_API_KEY",
120
+ "value": "{api_key}",
121
+ }
122
+ ]
123
+ updated = True
124
+ '''
125
+
126
+ # < 2.0.107 <--- patch for deprecated davinci, replace with gpt-3.5-turbo-instruct
127
+ if old < parse_version("2.0.107"):
128
+ print("Migrating models from < 2.0.107...")
129
+ if "text-davinci-002" in data:
130
+ del data["text-davinci-002"]
131
+ if "text-davinci-003" in data:
132
+ data["text-davinci-003"].id = "gpt-3.5-turbo-instruct"
133
+ data["text-davinci-003"].name = "gpt-3.5-turbo-instruct"
134
+ if "llama_index" in data["text-davinci-003"].mode:
135
+ data["text-davinci-003"].mode.remove("llama_index")
136
+ if len(data["text-davinci-003"].langchain["args"]) > 0:
137
+ if data["text-davinci-003"].langchain["args"][0]["name"] == "model_name":
138
+ data["text-davinci-003"].langchain["args"][0]["value"] = "gpt-3.5-turbo-instruct"
139
+ data["text-davinci-003"].llama_index["args"] = []
140
+ data["text-davinci-003"].llama_index["env"] = []
141
+ data["text-davinci-003"].llama_index["provider"] = None
142
+ # replace "text-davinci-003" with "gpt-3.5-turbo-instruct"
143
+ if "gpt-3.5-turbo-instruct" not in data:
144
+ data["gpt-3.5-turbo-instruct"] = data["text-davinci-003"]
145
+ del data["text-davinci-003"]
146
+ updated = True
147
+
148
+ # < 2.0.123 <--- update names to models IDs
149
+ if old < parse_version("2.0.123"):
150
+ print("Migrating models from < 2.0.123...")
151
+ if "gpt-4-1106-preview" in data:
152
+ data["gpt-4-1106-preview"].name = "gpt-4-1106-preview"
153
+ if "gpt-4-vision-preview" in data:
154
+ data["gpt-4-vision-preview"].name = "gpt-4-vision-preview"
155
+ updated = True
156
+
157
+ # < 2.0.134 <--- add agent mode
158
+ if old < parse_version("2.0.134"):
159
+ print("Migrating models from < 2.0.134...")
160
+ exclude = ["gpt-3.5-turbo-instruct", "gpt-4-vision-preview"]
161
+ for id in data:
162
+ model = data[id]
163
+ if model.id.startswith("gpt-") and model.id not in exclude:
164
+ if "agent" not in model.mode:
165
+ model.mode.append("agent")
166
+ updated = True
167
+
168
+ # fix typo in gpt-4 turbo preview for llama
169
+ if old < parse_version("2.1.15"):
170
+ print("Migrating models from < 2.1.15...")
171
+ if "gpt-4-turbo-preview" in data:
172
+ data["gpt-4-turbo-preview"].llama_index["args"] = [
173
+ {
174
+ "name": "model",
175
+ "value": "gpt-4-turbo-preview",
176
+ "type": "str",
177
+ }
178
+ ]
179
+ updated = True
180
+
181
+ # add API endpoint
182
+ if old < parse_version("2.1.19"):
183
+ print("Migrating models from < 2.1.19...")
184
+ for id in data:
185
+ model = data[id]
186
+ if model.id.startswith("gpt-"):
187
+ if "env" not in model.llama_index:
188
+ model.llama_index["env"] = []
189
+ is_endpoint = False
190
+ for arg in model.llama_index["env"]:
191
+ if "OPENAI_API_BASE" in arg["name"]:
192
+ is_endpoint = True
193
+ break
194
+ if not is_endpoint:
195
+ model.llama_index["env"].append(
196
+ {
197
+ "name": "OPENAI_API_BASE",
198
+ "value": "{api_endpoint}",
199
+ }
200
+ )
201
+ if "env" not in model.langchain:
202
+ model.langchain["env"] = []
203
+ is_endpoint = False
204
+ for arg in model.langchain["env"]:
205
+ if "OPENAI_API_BASE" in arg["name"]:
206
+ is_endpoint = True
207
+ break
208
+ if not is_endpoint:
209
+ model.langchain["env"].append(
210
+ {
211
+ "name": "OPENAI_API_BASE",
212
+ "value": "{api_endpoint}",
213
+ }
214
+ )
215
+ updated = True
216
+
217
+ if old < parse_version("2.1.45"):
218
+ print("Migrating models from < 2.1.45...")
219
+ # add missing 2024-04-09
220
+ updated = True
221
+
222
+ if old < parse_version("2.2.6"):
223
+ print("Migrating models from < 2.2.6...")
224
+ # add missing gpt-4-turbo
225
+ updated = True
226
+
227
+ # < 2.2.7 <--- add expert mode
228
+ if old < parse_version("2.2.7"):
229
+ print("Migrating models from < 2.2.7...")
230
+ exclude = ["gpt-3.5-turbo-instruct", "gpt-4-vision-preview"]
231
+ for id in data:
232
+ model = data[id]
233
+ if model.id.startswith("gpt-") and model.id not in exclude:
234
+ if "expert" not in model.mode:
235
+ model.mode.append("expert")
236
+ updated = True
237
+
238
+ # < 2.2.19 <--- add gpt-4o
239
+ if old < parse_version("2.2.19"):
240
+ print("Migrating models from < 2.2.19...")
241
+ # add gpt-4o
242
+ updated = True
243
+
244
+ # < 2.2.20 <--- add gpt-4o-mini
245
+ if old < parse_version("2.2.20"):
246
+ print("Migrating models from < 2.2.20...")
247
+ # add gpt-4o-mini
248
+ updated = True
249
+
250
+ # < 2.2.22 <--- add Llama index models
251
+ if old < parse_version("2.2.22"):
252
+ print("Migrating models from < 2.2.22...")
253
+ # add Gemini, Claude, Llama3, Mistral and etc.
254
+ updated = True
255
+
256
+ # < 2.2.28 <--- add Llama index models
257
+ if old < parse_version("2.2.28"):
258
+ print("Migrating models from < 2.2.28...")
259
+ # add Llama3.1 70b and 405b, mistral-large
260
+ updated = True
261
+
262
+ # < 2.2.33 <--- add agent and expert modes
263
+ if old < parse_version("2.2.33"):
264
+ print("Migrating models from < 2.2.33...")
265
+ exclude = ["dall-e-2", "dall-e-3", "gpt-3.5-turbo-instruct"]
266
+ for id in data:
267
+ model = data[id]
268
+ if model.id not in exclude:
269
+ if "agent" not in model.mode:
270
+ model.mode.append("agent")
271
+ if "expert" not in model.mode:
272
+ model.mode.append("expert")
273
+ # change dalle model names
274
+ if "dall-e-2" in data:
275
+ data["dall-e-2"].name = "dall-e-2"
276
+ if "dall-e-3" in data:
277
+ data["dall-e-3"].name = "dall-e-3"
278
+ updated = True
279
+
280
+ # < 2.3.3 <--- add o1-preview, o1-mini, Bielik v2.2
281
+ if old < parse_version("2.3.3"):
282
+ print("Migrating models from < 2.3.3...")
283
+ # add o1-preview, o1-mini, Bielik v2.2
284
+ updated = True
285
+
286
+ # < 2.4.0 <--- add langchain
287
+ if old < parse_version("2.4.0"):
288
+ print("Migrating models from < 2.4.0...")
289
+ if 'bielik-11b-v2.2-instruct:Q4_K_M' in data:
290
+ model = data['bielik-11b-v2.2-instruct:Q4_K_M']
291
+ if "langchain" not in model.mode:
292
+ model.mode.append("langchain")
293
+ updated = True
294
+
295
+ # < 2.4.10 <--- add agent_llama mode
296
+ if old < parse_version("2.4.10"):
297
+ print("Migrating models from < 2.4.10...")
298
+ exclude = ["gpt-3.5-turbo-instruct"]
299
+ for id in data:
300
+ model = data[id]
301
+ if model.id.startswith("gpt-") and model.id not in exclude:
302
+ if "agent_llama" not in model.mode:
303
+ model.mode.append("agent_llama")
304
+ updated = True
305
+
306
+ # < 2.4.11 <--- add agent_llama mode to rest of models
307
+ if old < parse_version("2.4.11"):
308
+ print("Migrating models from < 2.4.11...")
309
+ exclude = [
310
+ "gpt-3.5-turbo-instruct",
311
+ "dall-e-2",
312
+ "dall-e-3",
313
+ "o1-preview",
314
+ "o1-mini",
315
+ ]
316
+ for id in data:
317
+ model = data[id]
318
+ if model.id not in exclude:
319
+ if "agent_llama" not in model.mode:
320
+ model.mode.append("agent_llama")
321
+ updated = True
322
+
323
+ # < 2.4.34 <--- add gpt-4o-audio-preview, gpt-4o-2024-11-20
324
+ if old < parse_version("2.4.34"):
325
+ print("Migrating models from < 2.4.34...")
326
+ # add missing gpt-4o-audio-preview, gpt-4o-2024-11-20
327
+ updated = True
328
+
329
+ # < 2.4.46 <--- add separated API keys
330
+ if old < parse_version("2.4.46"):
331
+ print("Migrating models from < 2.4.46...")
332
+ azure_endpoint = ""
333
+ azure_api_version = ""
334
+ google_key = ""
335
+ anthropic_key = ""
336
+ for id in data:
337
+ model = data[id]
338
+ # OpenAI
339
+ if model.id.startswith("gpt-") or model.id.startswith("o1-"):
340
+ # langchain
341
+ is_endpoint = False
342
+ is_version = False
343
+ """
344
+ for item in model.langchain["env"]:
345
+ if item["name"] == "AZURE_OPENAI_ENDPOINT":
346
+ is_endpoint = True
347
+ if (item["value"]
348
+ and item["value"] not in ["{api_azure_endpoint}", "{api_endpoint}"]):
349
+ azure_endpoint = item["value"]
350
+ item["value"] = "{api_azure_endpoint}"
351
+ elif item["name"] == "OPENAI_API_VERSION":
352
+ is_version = True
353
+ if (item["value"]
354
+ and item["value"] not in ["{api_azure_version}"]):
355
+ azure_api_version = item["value"]
356
+ item["value"] = "{api_azure_version}"
357
+ if not is_endpoint:
358
+ model.langchain["env"].append(
359
+ {
360
+ "name": "AZURE_OPENAI_ENDPOINT",
361
+ "value": "{api_azure_endpoint}",
362
+ }
363
+ )
364
+ if not is_version:
365
+ model.langchain["env"].append(
366
+ {
367
+ "name": "OPENAI_API_VERSION",
368
+ "value": "{api_azure_version}",
369
+ }
370
+ )
371
+ """
372
+
373
+ # llama
374
+ is_endpoint = False
375
+ is_version = False
376
+ for item in model.llama_index["env"]:
377
+ if item["name"] == "AZURE_OPENAI_ENDPOINT":
378
+ is_endpoint = True
379
+ if (item["value"]
380
+ and item["value"] not in ["{api_azure_endpoint}", "{api_endpoint}"]):
381
+ azure_endpoint = item["value"]
382
+ item["value"] = "{api_azure_endpoint}"
383
+ elif item["name"] == "OPENAI_API_VERSION":
384
+ is_version = True
385
+ if (item["value"]
386
+ and item["value"] not in ["{api_azure_version}"]):
387
+ azure_api_version = item["value"]
388
+ item["value"] = "{api_azure_version}"
389
+ if not is_endpoint:
390
+ model.llama_index["env"].append(
391
+ {
392
+ "name": "AZURE_OPENAI_ENDPOINT",
393
+ "value": "{api_azure_endpoint}",
394
+ }
395
+ )
396
+ if not is_version:
397
+ model.llama_index["env"].append(
398
+ {
399
+ "name": "OPENAI_API_VERSION",
400
+ "value": "{api_azure_version}",
401
+ }
402
+ )
403
+
404
+ # Anthropic
405
+ elif model.id.startswith("claude-"):
406
+ is_key = False
407
+ """
408
+ for item in model.langchain["env"]:
409
+ if item["name"] == "ANTHROPIC_API_KEY":
410
+ is_key = True
411
+ if (item["value"]
412
+ and item["value"] not in ["{api_key}"]):
413
+ anthropic_key = item["value"]
414
+ item["value"] = "{api_key_anthropic}"
415
+ if not is_key:
416
+ model.langchain["env"].append(
417
+ {
418
+ "name": "ANTHROPIC_API_KEY",
419
+ "value": "{api_key_anthropic}",
420
+ }
421
+ )
422
+ """
423
+ is_key = False
424
+ for item in model.llama_index["env"]:
425
+ if item["name"] == "ANTHROPIC_API_KEY":
426
+ is_key = True
427
+ if (item["value"]
428
+ and item["value"] not in ["{api_key}"]):
429
+ anthropic_key = item["value"]
430
+ item["value"] = "{api_key_anthropic}"
431
+ if not is_key:
432
+ model.llama_index["env"].append(
433
+ {
434
+ "name": "ANTHROPIC_API_KEY",
435
+ "value": "{api_key_anthropic}",
436
+ }
437
+ )
438
+ # Google
439
+ elif model.id.startswith("gemini-"):
440
+ is_key = False
441
+ """
442
+ for item in model.langchain["env"]:
443
+ if item["name"] == "GOOGLE_API_KEY":
444
+ is_key = True
445
+ if (item["value"]
446
+ and item["value"] not in ["{api_key}"]):
447
+ google_key = item["value"]
448
+ item["value"] = "{api_key_google}"
449
+ if not is_key:
450
+ model.langchain["env"].append(
451
+ {
452
+ "name": "GOOGLE_API_KEY",
453
+ "value": "{api_key_google}",
454
+ }
455
+ )
456
+ """
457
+ is_key = False
458
+ for item in model.llama_index["env"]:
459
+ if item["name"] == "GOOGLE_API_KEY":
460
+ is_key = True
461
+ if (item["value"]
462
+ and item["value"] not in ["{api_key}"]):
463
+ google_key = item["value"]
464
+ item["value"] = "{api_key_google}"
465
+ if not is_key:
466
+ model.llama_index["env"].append(
467
+ {
468
+ "name": "GOOGLE_API_KEY",
469
+ "value": "{api_key_google}",
470
+ }
471
+ )
472
+ # move API keys to config
473
+ config_updated = False
474
+ if azure_endpoint:
475
+ self.window.core.config.set("api_azure_endpoint", azure_endpoint)
476
+ config_updated = True
477
+ if azure_api_version:
478
+ self.window.core.config.set("api_azure_version", azure_api_version)
479
+ config_updated = True
480
+ if google_key:
481
+ self.window.core.config.set("api_key_google", google_key)
482
+ config_updated = True
483
+ if anthropic_key:
484
+ self.window.core.config.set("api_key_anthropic", anthropic_key)
485
+ config_updated = True
486
+ if config_updated:
487
+ self.window.core.config.save()
488
+ updated = True
489
+
490
+ # < 2.4.47 <--- add gemini-2.0-flash-exp
491
+ if old < parse_version("2.4.47"):
492
+ print("Migrating models from < 2.4.47...")
493
+ # add gemini-2.0-flash-exp
494
+ updated = True
495
+
496
+ # < 2.5.0 <--- add o1, DeepSeek R1, V3
497
+ if old < parse_version("2.5.0"):
498
+ print("Migrating models from < 2.5.0...")
499
+ # add o1, DeepSeek R1, V3
500
+ updated = True
501
+
502
+ # < 2.5.2 <--- update names to models IDs
503
+ if old < parse_version("2.5.2"):
504
+ print("Migrating models from < 2.5.2...")
505
+ for id in data:
506
+ model = data[id]
507
+ if model.name.startswith("DeepSeek Ollama"):
508
+ model.name = model.id
509
+ updated = True
510
+
511
+ # < 2.5.4 <--- add o3-mini, update output tokens in o1, o1-mini, o1-preview
512
+ if old < parse_version("2.5.4"):
513
+ print("Migrating models from < 2.5.4...")
514
+ for id in data:
515
+ model = data[id]
516
+ if model.id == "o1":
517
+ model.tokens = 100000
518
+ elif model.id == "o1-mini":
519
+ model.tokens = 65536
520
+ elif model.id == "o1-preview":
521
+ model.tokens = 65536
522
+ updated = True
523
+
524
+ # < 2.5.8 <--- add gpt-4.5-preview and sonar models (Perplexity)
525
+ if old < parse_version("2.5.8"):
526
+ print("Migrating models from < 2.5.8...")
527
+ # add gpt-4.5-preview, sonar, R1
528
+ updated = True
529
+
530
+ # < 2.5.10 <--- add claude-3-7-sonnet-latest
531
+ if old < parse_version("2.5.10"):
532
+ print("Migrating models from < 2.5.10...")
533
+ # add claude-3-7-sonnet-latest
534
+ updated = True
535
+
536
+ # < 2.5.11 <--- update Bielik from v2.2 to v2.3
537
+ if old < parse_version("2.5.11"):
538
+ print("Migrating models from < 2.5.11...")
539
+ # update Bielik from v2.2 to v2.3
540
+ updated = True
541
+
542
+ # < 2.5.12 <--- add gpt-4.1-mini, qwen2.5-coder
543
+ if old < parse_version("2.5.12"):
544
+ print("Migrating models from < 2.5.12...")
545
+ # add gpt-4.1-mini, qwen2.5-coder
546
+ updated = True
547
+
548
+ # < 2.5.15 <--- update deepseek IDs
549
+ if old < parse_version("2.5.15"):
550
+ print("Migrating models from < 2.5.15...")
551
+ replace = [
552
+ ["deepseek_ollama_r1_1.5b", "deepseek-r1:1.5b"],
553
+ ["deepseek_ollama_r1_7b", "deepseek-r1:7b"],
554
+ ["deepseek_ollama_r1_14b", "deepseek-r1:14b"],
555
+ ["deepseek_ollama_r1_32b", "deepseek-r1:32b"],
556
+ ["deepseek_ollama_r1_70b", "deepseek-r1:70b"],
557
+ ["deepseek_ollama_r1_671b", "deepseek-r1:671b"],
558
+ ["deepseek_ollama_v3", "deepseek-v3:671b"]
559
+ ]
560
+ for m in replace:
561
+ name_to_replace = m[0]
562
+ new_name = m[1]
563
+ if name_to_replace in data:
564
+ model = data[name_to_replace]
565
+ model.id = new_name
566
+ model.name = new_name
567
+ data[new_name] = model
568
+ del data[name_to_replace]
569
+ updated = True
570
+
571
+ # < 2.5.18 <--- update openai flag
572
+ if old < parse_version("2.5.18"):
573
+ print("Migrating models from < 2.5.18...")
574
+ for id in data:
575
+ model = data[id]
576
+ if model.is_supported("llama_index"):
577
+ if "chat" not in model.mode:
578
+ model.mode.append("chat")
579
+ updated = True
580
+
581
+ # < 2.5.19 <--- add Grok models
582
+ if old < parse_version("2.5.19"):
583
+ updated = True
584
+
585
+ # < 2.5.20 <--- add provider field
586
+ if old < parse_version("2.5.20"):
587
+ print("Migrating models from < 2.5.20...")
588
+ for id in data:
589
+ model = data[id]
590
+
591
+ # add global providers
592
+ if model.is_ollama():
593
+ model.provider = "ollama"
594
+ if (model.id.startswith("gpt-")
595
+ or model.id.startswith("chatgpt")
596
+ or model.id.startswith("o1")
597
+ or model.id.startswith("o3")
598
+ or model.id.startswith("o4")
599
+ or model.id.startswith("o5")
600
+ or model.id.startswith("dall-e")):
601
+ model.provider = "openai"
602
+ if model.id.startswith("claude-"):
603
+ model.provider = "anthropic"
604
+ if model.id.startswith("gemini-"):
605
+ model.provider = "google"
606
+ if MODE_RESEARCH in model.mode:
607
+ model.provider = "perplexity"
608
+ if model.id.startswith("grok-"):
609
+ model.provider = "x_ai"
610
+ if id.startswith("deepseek_api"):
611
+ model.provider = "deepseek_api"
612
+ if model.provider is None or model.provider == "":
613
+ model.provider = "local_ai"
614
+
615
+ # patch llama_index config
616
+ if model.llama_index:
617
+ if 'mode' in model.llama_index:
618
+ del model.llama_index['mode']
619
+ if 'provider' in model.llama_index:
620
+ del model.llama_index['provider']
621
+
622
+ # add llama_index mode to o1, o3
623
+ if model.id.startswith("o1") or model.id.startswith("o3"):
624
+ if "llama_index" not in model.mode:
625
+ model.mode.append("llama_index")
626
+
627
+ # del langchain config
628
+ if 'langchain' in model.mode:
629
+ model.mode.remove("langchain")
630
+ updated = True
631
+
632
+ # < 2.5.23 <--- add Perplexity to rest of modes
633
+ if old < parse_version("2.5.23"):
634
+ print("Migrating models from < 2.5.23...")
635
+ for id in data:
636
+ model = data[id]
637
+ if model.provider == "perplexity":
638
+ if "llama_index" not in model.mode:
639
+ model.mode.append("llama_index")
640
+ if "agent" not in model.mode:
641
+ model.mode.append("agent")
642
+ if "agent_llama" not in model.mode:
643
+ model.mode.append("agent_llama")
644
+ if "expert" not in model.mode:
645
+ model.mode.append("expert")
646
+ if "chat" not in model.mode:
647
+ model.mode.append("chat")
648
+ updated = True
649
+
650
+ # < 2.5.27 <--- add o3, o4 deep research
651
+ if old < parse_version("2.5.27"):
652
+ print("Migrating models from < 2.5.27...")
653
+ updated = True
654
+
655
+ # < 2.5.29 <--- add multimodal
656
+ if old < parse_version("2.5.29"):
657
+ print("Migrating models from < 2.5.29...")
658
+ updated = True
659
+
660
+ # < 2.5.36 <--- add grok-4
661
+ if old < parse_version("2.5.36"):
662
+ print("Migrating models from < 2.5.36...")
663
+ updated = True
664
+
665
+ # < 2.5.40 <--- add tool calls flag
666
+ if old < parse_version("2.5.40"):
667
+ print("Migrating models from < 2.5.40...")
668
+ for id in data:
669
+ model = data[id]
670
+ if id in base_data:
671
+ model.tool_calls = base_data[id].tool_calls
672
+ updated = True
673
+
674
+ # < 2.5.48 <--- add LlamaIndex modes to x_ai
675
+ if old < parse_version("2.5.48"):
676
+ print("Migrating models from < 2.5.48...")
677
+ for id in data:
678
+ model = data[id]
679
+ if model.provider == "x_ai":
680
+ if model.id != "grok-2-vision":
681
+ if "llama_index" not in model.mode:
682
+ model.mode.append("llama_index")
683
+ if "agent" not in model.mode:
684
+ model.mode.append("agent")
685
+ if "agent_llama" not in model.mode:
686
+ model.mode.append("agent_llama")
687
+ if "expert" not in model.mode:
688
+ model.mode.append("expert")
689
+ updated = True
690
+
691
+ # < 2.5.70 <--- add mistral-small3.1
692
+ if old < parse_version("2.5.70"):
693
+ print("Migrating models from < 2.5.70...")
694
+ updated = True
695
+
696
+ # < 2.5.71 <--- computer-use-preview
697
+ if old < parse_version("2.5.71"):
698
+ print("Migrating models from < 2.5.71...")
699
+ updated = True
700
+
701
+ # < 2.5.76 <--- add MODE_AGENT_OPENAI
702
+ if old < parse_version("2.5.76"):
703
+ print("Migrating models from < 2.5.76...")
704
+ for id in data:
705
+ model = data[id]
706
+ if (MODE_CHAT in model.mode or MODE_COMPUTER in model.mode) and MODE_AGENT_OPENAI not in model.mode:
707
+ model.mode.append(MODE_AGENT_OPENAI)
708
+ if MODE_COMPUTER in model.mode and MODE_AGENT_OPENAI not in model.mode:
709
+ model.mode.append(MODE_EXPERT)
710
+ updated = True
711
+
712
+ # < 2.5.81 <--- remove MODE_AGENT_OPENAI from unsupported models
713
+ if old < parse_version("2.5.81"):
714
+ print("Migrating models from < 2.5.81...")
715
+ for id in data:
716
+ model = data[id]
717
+ if model.provider in ["ollama", "mistral_ai"]:
718
+ if "agent_openai" in model.mode:
719
+ model.mode.remove("agent_openai")
720
+ updated = True
721
+
722
+ # < 2.5.91 <--- GPT-5
723
+ if old < parse_version("2.5.91"):
724
+ print("Migrating models from < 2.5.91...")
725
+ if "gpt-5" not in data:
726
+ data["gpt-5"] = from_base("gpt-5")
727
+ if "gpt-5-mini" not in data:
728
+ data["gpt-5-mini"] = from_base("gpt-5-mini")
729
+ if "gpt-5-nano" not in data:
730
+ data["gpt-5-nano"] = from_base("gpt-5-nano")
731
+ updated = True
732
+
733
+ # < 2.5.93 <--- GPT-5 low and high
734
+ if old < parse_version("2.5.93"):
735
+ print("Migrating models from < 2.5.93...")
736
+ if "gpt-5-low" not in data:
737
+ data["gpt-5-low"] = from_base("gpt-5-low")
738
+ if "gpt-5-mini-low" not in data:
739
+ data["gpt-5-mini-low"] = from_base("gpt-5-mini-low")
740
+ if "gpt-5-nano-low" not in data:
741
+ data["gpt-5-nano-low"] = from_base("gpt-5-nano-low")
742
+ if "gpt-5-high" not in data:
743
+ data["gpt-5-high"] = from_base("gpt-5-high")
744
+ if "gpt-5-mini-high" not in data:
745
+ data["gpt-5-mini-high"] = from_base("gpt-5-mini-high")
746
+ if "gpt-5-nano-high" not in data:
747
+ data["gpt-5-nano-high"] = from_base("gpt-5-nano-high")
748
+ updated = True
749
+
750
+ # < 2.5.94 <--- gpt-oss
751
+ if old < parse_version("2.5.94"):
752
+ print("Migrating models from < 2.5.94...")
753
+ if "gpt-oss-20b" not in data:
754
+ data["gpt-oss-20b"] = from_base("gpt-oss-20b")
755
+ if "gpt-oss-120b" not in data:
756
+ data["gpt-oss-120b"] = from_base("gpt-oss-120b")
757
+ if "gpt-oss-20b-huggingface-router" not in data:
758
+ data["gpt-oss-20b-huggingface-router"] = from_base("gpt-oss-20b-huggingface-router")
759
+ if "gpt-oss-120b-huggingface-router" not in data:
760
+ data["gpt-oss-120b-huggingface-router"] = from_base("gpt-oss-120b-huggingface-router")
761
+ if "gpt-4.1-nano" not in data:
762
+ data["gpt-4.1-nano"] = from_base("gpt-4.1-nano")
763
+ updated = True
764
+
765
+ # < 2.6.21 <-- add OpenAI Agents to Ollama
766
+ if old < parse_version("2.6.21"):
767
+ print("Migrating models from < 2.6.21...")
768
+ for id in data:
769
+ model = data[id]
770
+ if model.provider in ["ollama"]:
771
+ if "agent_openai" not in model.mode:
772
+ model.mode.append(MODE_AGENT_OPENAI)
773
+ updated = True
774
+
775
+ # < 2.6.30 <--- add Google Imagen models
776
+ if old < parse_version("2.6.30"):
777
+ print("Migrating models from < 2.6.30...")
778
+ if "imagen-3.0-generate-002" not in data:
779
+ data["imagen-3.0-generate-002"] = from_base("imagen-3.0-generate-002")
780
+ if "imagen-4.0-generate-001" not in data:
781
+ data["imagen-4.0-generate-001"] = from_base("imagen-4.0-generate-001")
782
+ updated = True
783
+
784
+ # < 2.6.31 <--- add realtime models
785
+ if old < parse_version("2.6.31"):
786
+ print("Migrating models from < 2.6.31...")
787
+ if "gemini-2.5-flash-preview-native-audio-dialog" not in data:
788
+ data["gemini-2.5-flash-preview-native-audio-dialog"] = from_base("gemini-2.5-flash-preview-native-audio-dialog")
789
+ if "gpt-realtime" not in data:
790
+ data["gpt-realtime"] = from_base("gpt-realtime")
791
+ if "gpt-4o-realtime-preview" not in data:
792
+ data["gpt-4o-realtime-preview"] = from_base("gpt-4o-realtime-preview")
793
+ updated = True
794
+
795
+ # add veo-3.0-generate-preview
796
+ if old < parse_version("2.6.32"):
797
+ print("Migrating models from < 2.6.32...")
798
+ if "veo-3.0-generate-preview" not in data:
799
+ data["veo-3.0-generate-preview"] = from_base("veo-3.0-generate-preview")
800
+ if "veo-3.0-fast-generate-preview" not in data:
801
+ data["veo-3.0-fast-generate-preview"] = from_base("veo-3.0-fast-generate-preview")
802
+ updated = True
803
+
804
+ # add image models
805
+ if old < parse_version("2.6.42"):
806
+ print("Migrating models from < 2.6.42...")
807
+ if "grok-2-image-1212" not in data:
808
+ data["grok-2-image-1212"] = from_base("grok-2-image-1212")
809
+ if "gemini-2.5-flash-image-preview" not in data:
810
+ data["gemini-2.5-flash-image-preview"] = from_base("gemini-2.5-flash-image-preview")
811
+ updated = True
812
+
813
+ return data, updated