pygpt-net 2.6.42__py3-none-any.whl → 2.6.43__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.09.01 23:00:00 #
9
+ # Updated Date: 2025.09.12 00:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from packaging.version import parse as parse_version, Version
@@ -19,6 +19,8 @@ from pygpt_net.core.types import (
19
19
  MODE_EXPERT
20
20
  )
21
21
 
22
+ # old patches moved here
23
+ from .patches.patch_before_2_6_42 import Patch as PatchBefore2_6_42
22
24
 
23
25
  class Patch:
24
26
  def __init__(self, window=None):
@@ -33,6 +35,7 @@ class Patch:
33
35
  """
34
36
  data = self.window.core.models.items
35
37
  base_data = self.window.core.models.get_base()
38
+ from_base = self.window.core.models.from_base
36
39
  updated = False
37
40
 
38
41
  # get version of models config
@@ -40,772 +43,17 @@ class Patch:
40
43
  old = parse_version(current)
41
44
 
42
45
  # check if models file is older than current app version
43
- is_old = False
44
46
  if old < version:
45
- is_old = True
46
47
 
47
- # < 0.9.1
48
- if old < parse_version("0.9.1"):
49
- # apply meta only (not attached in 0.9.0)
50
- print("Migrating models from < 0.9.1...")
51
- updated = True
52
-
53
- # < 2.0.1
54
- if old < parse_version("2.0.1"):
55
- print("Migrating models from < 2.0.1...")
56
- self.window.core.updater.patch_file('models.json', True) # force replace file
57
- self.window.core.models.load()
58
- data = self.window.core.models.items
59
- updated = True
60
-
61
- # < 2.0.96 <--- patch for llama-index modes
62
- if old < parse_version("2.0.96"):
63
- print("Migrating models from < 2.0.96...")
64
- self.window.core.updater.patch_file('models.json', True) # force replace file
65
- self.window.core.models.load()
66
- data = self.window.core.models.items
67
- updated = True
68
-
69
- # < 2.0.105 <--- patch for llama-index gpt4-turbo
70
- if old < parse_version("2.0.105"):
71
- print("Migrating models from < 2.0.105...")
72
- self.window.core.updater.patch_file('models.json', True) # force replace file
73
- self.window.core.models.load()
74
- data = self.window.core.models.items
75
- updated = True
76
-
77
- '''
78
- # < 2.0.104 <--- patch to new format
79
- if old < parse_version("2.0.104"):
80
- print("Migrating models from < 2.0.104...")
81
- for id in data:
82
- model = data[id]
83
- dict_name = model.to_dict()
84
- model.from_dict(dict_name)
85
-
86
- # patch missing llama_index provider
87
- if "llama_index" in model.mode:
88
- if model.id.startswith("gpt-") or model.id.startswith("text-davinci-"):
89
- model.llama_index["provider"] = "openai"
90
- if model.id.startswith("gpt-"):
91
- model.llama_index['mode'] = ["chat"]
92
- model.llama_index['args'] = [
93
- {
94
- "name": "model_name",
95
- "value": model.id,
96
- "type": "str",
97
- }
98
- ]
99
- model.llama_index['env'] = [
100
- {
101
- "name": "OPENAI_API_KEY",
102
- "value": "{api_key}",
103
- }
104
- ]
105
- if "langchain" in model.mode:
106
- if model.id.startswith("gpt-") or model.id.startswith("text-davinci-"):
107
- model.langchain['args'] = [
108
- {
109
- "name": "model_name",
110
- "value": model.id,
111
- "type": "str",
112
- }
113
- ]
114
- model.langchain['env'] = [
115
- {
116
- "name": "OPENAI_API_KEY",
117
- "value": "{api_key}",
118
- }
119
- ]
120
- updated = True
121
- '''
122
-
123
- # < 2.0.107 <--- patch for deprecated davinci, replace with gpt-3.5-turbo-instruct
124
- if old < parse_version("2.0.107"):
125
- print("Migrating models from < 2.0.107...")
126
- if "text-davinci-002" in data:
127
- del data["text-davinci-002"]
128
- if "text-davinci-003" in data:
129
- data["text-davinci-003"].id = "gpt-3.5-turbo-instruct"
130
- data["text-davinci-003"].name = "gpt-3.5-turbo-instruct"
131
- if "llama_index" in data["text-davinci-003"].mode:
132
- data["text-davinci-003"].mode.remove("llama_index")
133
- if len(data["text-davinci-003"].langchain["args"]) > 0:
134
- if data["text-davinci-003"].langchain["args"][0]["name"] == "model_name":
135
- data["text-davinci-003"].langchain["args"][0]["value"] = "gpt-3.5-turbo-instruct"
136
- data["text-davinci-003"].llama_index["args"] = []
137
- data["text-davinci-003"].llama_index["env"] = []
138
- data["text-davinci-003"].llama_index["provider"] = None
139
- # replace "text-davinci-003" with "gpt-3.5-turbo-instruct"
140
- if "gpt-3.5-turbo-instruct" not in data:
141
- data["gpt-3.5-turbo-instruct"] = data["text-davinci-003"]
142
- del data["text-davinci-003"]
143
- updated = True
144
-
145
- # < 2.0.123 <--- update names to models IDs
146
- if old < parse_version("2.0.123"):
147
- print("Migrating models from < 2.0.123...")
148
- if "gpt-4-1106-preview" in data:
149
- data["gpt-4-1106-preview"].name = "gpt-4-1106-preview"
150
- if "gpt-4-vision-preview" in data:
151
- data["gpt-4-vision-preview"].name = "gpt-4-vision-preview"
152
- updated = True
153
-
154
- # < 2.0.134 <--- add agent mode
155
- if old < parse_version("2.0.134"):
156
- print("Migrating models from < 2.0.134...")
157
- exclude = ["gpt-3.5-turbo-instruct", "gpt-4-vision-preview"]
158
- for id in data:
159
- model = data[id]
160
- if model.id.startswith("gpt-") and model.id not in exclude:
161
- if "agent" not in model.mode:
162
- model.mode.append("agent")
163
- updated = True
164
-
165
- # fix typo in gpt-4 turbo preview for llama
166
- if old < parse_version("2.1.15"):
167
- print("Migrating models from < 2.1.15...")
168
- if "gpt-4-turbo-preview" in data:
169
- data["gpt-4-turbo-preview"].llama_index["args"] = [
170
- {
171
- "name": "model",
172
- "value": "gpt-4-turbo-preview",
173
- "type": "str",
174
- }
175
- ]
176
- updated = True
177
-
178
- # add API endpoint
179
- if old < parse_version("2.1.19"):
180
- print("Migrating models from < 2.1.19...")
181
- for id in data:
182
- model = data[id]
183
- if model.id.startswith("gpt-"):
184
- if "env" not in model.llama_index:
185
- model.llama_index["env"] = []
186
- is_endpoint = False
187
- for arg in model.llama_index["env"]:
188
- if "OPENAI_API_BASE" in arg["name"]:
189
- is_endpoint = True
190
- break
191
- if not is_endpoint:
192
- model.llama_index["env"].append(
193
- {
194
- "name": "OPENAI_API_BASE",
195
- "value": "{api_endpoint}",
196
- }
197
- )
198
- if "env" not in model.langchain:
199
- model.langchain["env"] = []
200
- is_endpoint = False
201
- for arg in model.langchain["env"]:
202
- if "OPENAI_API_BASE" in arg["name"]:
203
- is_endpoint = True
204
- break
205
- if not is_endpoint:
206
- model.langchain["env"].append(
207
- {
208
- "name": "OPENAI_API_BASE",
209
- "value": "{api_endpoint}",
210
- }
211
- )
212
- updated = True
213
-
214
- if old < parse_version("2.1.45"):
215
- print("Migrating models from < 2.1.45...")
216
- # add missing 2024-04-09
217
- updated = True
218
-
219
- if old < parse_version("2.2.6"):
220
- print("Migrating models from < 2.2.6...")
221
- # add missing gpt-4-turbo
222
- updated = True
223
-
224
- # < 2.2.7 <--- add expert mode
225
- if old < parse_version("2.2.7"):
226
- print("Migrating models from < 2.2.7...")
227
- exclude = ["gpt-3.5-turbo-instruct", "gpt-4-vision-preview"]
228
- for id in data:
229
- model = data[id]
230
- if model.id.startswith("gpt-") and model.id not in exclude:
231
- if "expert" not in model.mode:
232
- model.mode.append("expert")
233
- updated = True
234
-
235
- # < 2.2.19 <--- add gpt-4o
236
- if old < parse_version("2.2.19"):
237
- print("Migrating models from < 2.2.19...")
238
- # add gpt-4o
239
- updated = True
240
-
241
- # < 2.2.20 <--- add gpt-4o-mini
242
- if old < parse_version("2.2.20"):
243
- print("Migrating models from < 2.2.20...")
244
- # add gpt-4o-mini
245
- updated = True
246
-
247
- # < 2.2.22 <--- add Llama index models
248
- if old < parse_version("2.2.22"):
249
- print("Migrating models from < 2.2.22...")
250
- # add Gemini, Claude, Llama3, Mistral and etc.
251
- updated = True
252
-
253
- # < 2.2.28 <--- add Llama index models
254
- if old < parse_version("2.2.28"):
255
- print("Migrating models from < 2.2.28...")
256
- # add Llama3.1 70b and 405b, mistral-large
257
- updated = True
258
-
259
- # < 2.2.33 <--- add agent and expert modes
260
- if old < parse_version("2.2.33"):
261
- print("Migrating models from < 2.2.33...")
262
- exclude = ["dall-e-2", "dall-e-3", "gpt-3.5-turbo-instruct"]
263
- for id in data:
264
- model = data[id]
265
- if model.id not in exclude:
266
- if "agent" not in model.mode:
267
- model.mode.append("agent")
268
- if "expert" not in model.mode:
269
- model.mode.append("expert")
270
- # change dalle model names
271
- if "dall-e-2" in data:
272
- data["dall-e-2"].name = "dall-e-2"
273
- if "dall-e-3" in data:
274
- data["dall-e-3"].name = "dall-e-3"
275
- updated = True
276
-
277
- # < 2.3.3 <--- add o1-preview, o1-mini, Bielik v2.2
278
- if old < parse_version("2.3.3"):
279
- print("Migrating models from < 2.3.3...")
280
- # add o1-preview, o1-mini, Bielik v2.2
281
- updated = True
282
-
283
- # < 2.4.0 <--- add langchain
284
- if old < parse_version("2.4.0"):
285
- print("Migrating models from < 2.4.0...")
286
- if 'bielik-11b-v2.2-instruct:Q4_K_M' in data:
287
- model = data['bielik-11b-v2.2-instruct:Q4_K_M']
288
- if "langchain" not in model.mode:
289
- model.mode.append("langchain")
290
- updated = True
291
-
292
- # < 2.4.10 <--- add agent_llama mode
293
- if old < parse_version("2.4.10"):
294
- print("Migrating models from < 2.4.10...")
295
- exclude = ["gpt-3.5-turbo-instruct"]
296
- for id in data:
297
- model = data[id]
298
- if model.id.startswith("gpt-") and model.id not in exclude:
299
- if "agent_llama" not in model.mode:
300
- model.mode.append("agent_llama")
301
- updated = True
302
-
303
- # < 2.4.11 <--- add agent_llama mode to rest of models
304
- if old < parse_version("2.4.11"):
305
- print("Migrating models from < 2.4.11...")
306
- exclude = [
307
- "gpt-3.5-turbo-instruct",
308
- "dall-e-2",
309
- "dall-e-3",
310
- "o1-preview",
311
- "o1-mini",
312
- ]
313
- for id in data:
314
- model = data[id]
315
- if model.id not in exclude:
316
- if "agent_llama" not in model.mode:
317
- model.mode.append("agent_llama")
318
- updated = True
319
-
320
- # < 2.4.34 <--- add gpt-4o-audio-preview, gpt-4o-2024-11-20
321
- if old < parse_version("2.4.34"):
322
- print("Migrating models from < 2.4.34...")
323
- # add missing gpt-4o-audio-preview, gpt-4o-2024-11-20
324
- updated = True
325
-
326
- # < 2.4.46 <--- add separated API keys
327
- if old < parse_version("2.4.46"):
328
- print("Migrating models from < 2.4.46...")
329
- azure_endpoint = ""
330
- azure_api_version = ""
331
- google_key = ""
332
- anthropic_key = ""
333
- for id in data:
334
- model = data[id]
335
- # OpenAI
336
- if model.id.startswith("gpt-") or model.id.startswith("o1-"):
337
- # langchain
338
- is_endpoint = False
339
- is_version = False
340
- """
341
- for item in model.langchain["env"]:
342
- if item["name"] == "AZURE_OPENAI_ENDPOINT":
343
- is_endpoint = True
344
- if (item["value"]
345
- and item["value"] not in ["{api_azure_endpoint}", "{api_endpoint}"]):
346
- azure_endpoint = item["value"]
347
- item["value"] = "{api_azure_endpoint}"
348
- elif item["name"] == "OPENAI_API_VERSION":
349
- is_version = True
350
- if (item["value"]
351
- and item["value"] not in ["{api_azure_version}"]):
352
- azure_api_version = item["value"]
353
- item["value"] = "{api_azure_version}"
354
- if not is_endpoint:
355
- model.langchain["env"].append(
356
- {
357
- "name": "AZURE_OPENAI_ENDPOINT",
358
- "value": "{api_azure_endpoint}",
359
- }
360
- )
361
- if not is_version:
362
- model.langchain["env"].append(
363
- {
364
- "name": "OPENAI_API_VERSION",
365
- "value": "{api_azure_version}",
366
- }
367
- )
368
- """
369
-
370
- # llama
371
- is_endpoint = False
372
- is_version = False
373
- for item in model.llama_index["env"]:
374
- if item["name"] == "AZURE_OPENAI_ENDPOINT":
375
- is_endpoint = True
376
- if (item["value"]
377
- and item["value"] not in ["{api_azure_endpoint}", "{api_endpoint}"]):
378
- azure_endpoint = item["value"]
379
- item["value"] = "{api_azure_endpoint}"
380
- elif item["name"] == "OPENAI_API_VERSION":
381
- is_version = True
382
- if (item["value"]
383
- and item["value"] not in ["{api_azure_version}"]):
384
- azure_api_version = item["value"]
385
- item["value"] = "{api_azure_version}"
386
- if not is_endpoint:
387
- model.llama_index["env"].append(
388
- {
389
- "name": "AZURE_OPENAI_ENDPOINT",
390
- "value": "{api_azure_endpoint}",
391
- }
392
- )
393
- if not is_version:
394
- model.llama_index["env"].append(
395
- {
396
- "name": "OPENAI_API_VERSION",
397
- "value": "{api_azure_version}",
398
- }
399
- )
400
-
401
- # Anthropic
402
- elif model.id.startswith("claude-"):
403
- is_key = False
404
- """
405
- for item in model.langchain["env"]:
406
- if item["name"] == "ANTHROPIC_API_KEY":
407
- is_key = True
408
- if (item["value"]
409
- and item["value"] not in ["{api_key}"]):
410
- anthropic_key = item["value"]
411
- item["value"] = "{api_key_anthropic}"
412
- if not is_key:
413
- model.langchain["env"].append(
414
- {
415
- "name": "ANTHROPIC_API_KEY",
416
- "value": "{api_key_anthropic}",
417
- }
418
- )
419
- """
420
- is_key = False
421
- for item in model.llama_index["env"]:
422
- if item["name"] == "ANTHROPIC_API_KEY":
423
- is_key = True
424
- if (item["value"]
425
- and item["value"] not in ["{api_key}"]):
426
- anthropic_key = item["value"]
427
- item["value"] = "{api_key_anthropic}"
428
- if not is_key:
429
- model.llama_index["env"].append(
430
- {
431
- "name": "ANTHROPIC_API_KEY",
432
- "value": "{api_key_anthropic}",
433
- }
434
- )
435
- # Google
436
- elif model.id.startswith("gemini-"):
437
- is_key = False
438
- """
439
- for item in model.langchain["env"]:
440
- if item["name"] == "GOOGLE_API_KEY":
441
- is_key = True
442
- if (item["value"]
443
- and item["value"] not in ["{api_key}"]):
444
- google_key = item["value"]
445
- item["value"] = "{api_key_google}"
446
- if not is_key:
447
- model.langchain["env"].append(
448
- {
449
- "name": "GOOGLE_API_KEY",
450
- "value": "{api_key_google}",
451
- }
452
- )
453
- """
454
- is_key = False
455
- for item in model.llama_index["env"]:
456
- if item["name"] == "GOOGLE_API_KEY":
457
- is_key = True
458
- if (item["value"]
459
- and item["value"] not in ["{api_key}"]):
460
- google_key = item["value"]
461
- item["value"] = "{api_key_google}"
462
- if not is_key:
463
- model.llama_index["env"].append(
464
- {
465
- "name": "GOOGLE_API_KEY",
466
- "value": "{api_key_google}",
467
- }
468
- )
469
- # move API keys to config
470
- config_updated = False
471
- if azure_endpoint:
472
- self.window.core.config.set("api_azure_endpoint", azure_endpoint)
473
- config_updated = True
474
- if azure_api_version:
475
- self.window.core.config.set("api_azure_version", azure_api_version)
476
- config_updated = True
477
- if google_key:
478
- self.window.core.config.set("api_key_google", google_key)
479
- config_updated = True
480
- if anthropic_key:
481
- self.window.core.config.set("api_key_anthropic", anthropic_key)
482
- config_updated = True
483
- if config_updated:
484
- self.window.core.config.save()
485
- updated = True
486
-
487
- # < 2.4.47 <--- add gemini-2.0-flash-exp
488
- if old < parse_version("2.4.47"):
489
- print("Migrating models from < 2.4.47...")
490
- # add gemini-2.0-flash-exp
491
- updated = True
492
-
493
- # < 2.5.0 <--- add o1, DeepSeek R1, V3
494
- if old < parse_version("2.5.0"):
495
- print("Migrating models from < 2.5.0...")
496
- # add o1, DeepSeek R1, V3
497
- updated = True
498
-
499
- # < 2.5.2 <--- update names to models IDs
500
- if old < parse_version("2.5.2"):
501
- print("Migrating models from < 2.5.2...")
502
- for id in data:
503
- model = data[id]
504
- if model.name.startswith("DeepSeek Ollama"):
505
- model.name = model.id
506
- updated = True
507
-
508
- # < 2.5.4 <--- add o3-mini, update output tokens in o1, o1-mini, o1-preview
509
- if old < parse_version("2.5.4"):
510
- print("Migrating models from < 2.5.4...")
511
- for id in data:
512
- model = data[id]
513
- if model.id == "o1":
514
- model.tokens = 100000
515
- elif model.id == "o1-mini":
516
- model.tokens = 65536
517
- elif model.id == "o1-preview":
518
- model.tokens = 65536
519
- updated = True
520
-
521
- # < 2.5.8 <--- add gpt-4.5-preview and sonar models (Perplexity)
522
- if old < parse_version("2.5.8"):
523
- print("Migrating models from < 2.5.8...")
524
- # add gpt-4.5-preview, sonar, R1
525
- updated = True
526
-
527
- # < 2.5.10 <--- add claude-3-7-sonnet-latest
528
- if old < parse_version("2.5.10"):
529
- print("Migrating models from < 2.5.10...")
530
- # add claude-3-7-sonnet-latest
531
- updated = True
532
-
533
- # < 2.5.11 <--- update Bielik from v2.2 to v2.3
534
- if old < parse_version("2.5.11"):
535
- print("Migrating models from < 2.5.11...")
536
- # update Bielik from v2.2 to v2.3
537
- updated = True
538
-
539
- # < 2.5.12 <--- add gpt-4.1-mini, qwen2.5-coder
540
- if old < parse_version("2.5.12"):
541
- print("Migrating models from < 2.5.12...")
542
- # add gpt-4.1-mini, qwen2.5-coder
543
- updated = True
544
-
545
- # < 2.5.15 <--- update deepseek IDs
546
- if old < parse_version("2.5.15"):
547
- print("Migrating models from < 2.5.15...")
548
- replace = [
549
- ["deepseek_ollama_r1_1.5b", "deepseek-r1:1.5b"],
550
- ["deepseek_ollama_r1_7b", "deepseek-r1:7b"],
551
- ["deepseek_ollama_r1_14b", "deepseek-r1:14b"],
552
- ["deepseek_ollama_r1_32b", "deepseek-r1:32b"],
553
- ["deepseek_ollama_r1_70b", "deepseek-r1:70b"],
554
- ["deepseek_ollama_r1_671b", "deepseek-r1:671b"],
555
- ["deepseek_ollama_v3", "deepseek-v3:671b"]
556
- ]
557
- for m in replace:
558
- name_to_replace = m[0]
559
- new_name = m[1]
560
- if name_to_replace in data:
561
- model = data[name_to_replace]
562
- model.id = new_name
563
- model.name = new_name
564
- data[new_name] = model
565
- del data[name_to_replace]
566
- updated = True
567
-
568
- # < 2.5.18 <--- update openai flag
569
- if old < parse_version("2.5.18"):
570
- print("Migrating models from < 2.5.18...")
571
- for id in data:
572
- model = data[id]
573
- if model.is_supported("llama_index"):
574
- if "chat" not in model.mode:
575
- model.mode.append("chat")
576
- updated = True
577
-
578
- # < 2.5.19 <--- add Grok models
579
- if old < parse_version("2.5.19"):
580
- updated = True
581
-
582
- # < 2.5.20 <--- add provider field
583
- if old < parse_version("2.5.20"):
584
- print("Migrating models from < 2.5.20...")
585
- for id in data:
586
- model = data[id]
587
-
588
- # add global providers
589
- if model.is_ollama():
590
- model.provider = "ollama"
591
- if (model.id.startswith("gpt-")
592
- or model.id.startswith("chatgpt")
593
- or model.id.startswith("o1")
594
- or model.id.startswith("o3")
595
- or model.id.startswith("o4")
596
- or model.id.startswith("o5")
597
- or model.id.startswith("dall-e")):
598
- model.provider = "openai"
599
- if model.id.startswith("claude-"):
600
- model.provider = "anthropic"
601
- if model.id.startswith("gemini-"):
602
- model.provider = "google"
603
- if MODE_RESEARCH in model.mode:
604
- model.provider = "perplexity"
605
- if model.id.startswith("grok-"):
606
- model.provider = "x_ai"
607
- if id.startswith("deepseek_api"):
608
- model.provider = "deepseek_api"
609
- if model.provider is None or model.provider == "":
610
- model.provider = "local_ai"
611
-
612
- # patch llama_index config
613
- if model.llama_index:
614
- if 'mode' in model.llama_index:
615
- del model.llama_index['mode']
616
- if 'provider' in model.llama_index:
617
- del model.llama_index['provider']
618
-
619
- # add llama_index mode to o1, o3
620
- if model.id.startswith("o1") or model.id.startswith("o3"):
621
- if "llama_index" not in model.mode:
622
- model.mode.append("llama_index")
623
-
624
- # del langchain config
625
- if 'langchain' in model.mode:
626
- model.mode.remove("langchain")
627
- updated = True
628
-
629
- # < 2.5.23 <--- add Perplexity to rest of modes
630
- if old < parse_version("2.5.23"):
631
- print("Migrating models from < 2.5.23...")
632
- for id in data:
633
- model = data[id]
634
- if model.provider == "perplexity":
635
- if "llama_index" not in model.mode:
636
- model.mode.append("llama_index")
637
- if "agent" not in model.mode:
638
- model.mode.append("agent")
639
- if "agent_llama" not in model.mode:
640
- model.mode.append("agent_llama")
641
- if "expert" not in model.mode:
642
- model.mode.append("expert")
643
- if "chat" not in model.mode:
644
- model.mode.append("chat")
645
- updated = True
646
-
647
- # < 2.5.27 <--- add o3, o4 deep research
648
- if old < parse_version("2.5.27"):
649
- print("Migrating models from < 2.5.27...")
650
- updated = True
651
-
652
- # < 2.5.29 <--- add multimodal
653
- if old < parse_version("2.5.29"):
654
- print("Migrating models from < 2.5.29...")
655
- updated = True
656
-
657
- # < 2.5.36 <--- add grok-4
658
- if old < parse_version("2.5.36"):
659
- print("Migrating models from < 2.5.36...")
660
- updated = True
661
-
662
- # < 2.5.40 <--- add tool calls flag
663
- if old < parse_version("2.5.40"):
664
- print("Migrating models from < 2.5.40...")
665
- for id in data:
666
- model = data[id]
667
- if id in base_data:
668
- model.tool_calls = base_data[id].tool_calls
669
- updated = True
670
-
671
- # < 2.5.48 <--- add LlamaIndex modes to x_ai
672
- if old < parse_version("2.5.48"):
673
- print("Migrating models from < 2.5.48...")
674
- for id in data:
675
- model = data[id]
676
- if model.provider == "x_ai":
677
- if model.id != "grok-2-vision":
678
- if "llama_index" not in model.mode:
679
- model.mode.append("llama_index")
680
- if "agent" not in model.mode:
681
- model.mode.append("agent")
682
- if "agent_llama" not in model.mode:
683
- model.mode.append("agent_llama")
684
- if "expert" not in model.mode:
685
- model.mode.append("expert")
686
- updated = True
687
-
688
- # < 2.5.70 <--- add mistral-small3.1
689
- if old < parse_version("2.5.70"):
690
- print("Migrating models from < 2.5.70...")
691
- updated = True
692
-
693
- # < 2.5.71 <--- computer-use-preview
694
- if old < parse_version("2.5.71"):
695
- print("Migrating models from < 2.5.71...")
696
- updated = True
697
-
698
- # < 2.5.76 <--- add MODE_AGENT_OPENAI
699
- if old < parse_version("2.5.76"):
700
- print("Migrating models from < 2.5.76...")
701
- for id in data:
702
- model = data[id]
703
- if (MODE_CHAT in model.mode or MODE_COMPUTER in model.mode) and MODE_AGENT_OPENAI not in model.mode:
704
- model.mode.append(MODE_AGENT_OPENAI)
705
- if MODE_COMPUTER in model.mode and MODE_AGENT_OPENAI not in model.mode:
706
- model.mode.append(MODE_EXPERT)
707
- updated = True
708
-
709
- # < 2.5.81 <--- remove MODE_AGENT_OPENAI from unsupported models
710
- if old < parse_version("2.5.81"):
711
- print("Migrating models from < 2.5.81...")
712
- for id in data:
713
- model = data[id]
714
- if model.provider in ["ollama", "mistral_ai"]:
715
- if "agent_openai" in model.mode:
716
- model.mode.remove("agent_openai")
717
- updated = True
718
-
719
- # < 2.5.91 <--- GPT-5
720
- if old < parse_version("2.5.91"):
721
- print("Migrating models from < 2.5.91...")
722
- if "gpt-5" not in data:
723
- data["gpt-5"] = base_data["gpt-5"]
724
- if "gpt-5-mini" not in data:
725
- data["gpt-5-mini"] = base_data["gpt-5-mini"]
726
- if "gpt-5-nano" not in data:
727
- data["gpt-5-nano"] = base_data["gpt-5-nano"]
728
- updated = True
729
-
730
- # < 2.5.93 <--- GPT-5 low and high
731
- if old < parse_version("2.5.93"):
732
- print("Migrating models from < 2.5.93...")
733
- if "gpt-5-low" not in data:
734
- data["gpt-5-low"] = base_data["gpt-5-low"]
735
- if "gpt-5-mini-low" not in data:
736
- data["gpt-5-mini-low"] = base_data["gpt-5-mini-low"]
737
- if "gpt-5-nano-low" not in data:
738
- data["gpt-5-nano-low"] = base_data["gpt-5-nano-low"]
739
- if "gpt-5-high" not in data:
740
- data["gpt-5-high"] = base_data["gpt-5-high"]
741
- if "gpt-5-mini-high" not in data:
742
- data["gpt-5-mini-high"] = base_data["gpt-5-mini-high"]
743
- if "gpt-5-nano-high" not in data:
744
- data["gpt-5-nano-high"] = base_data["gpt-5-nano-high"]
745
- updated = True
746
-
747
- # < 2.5.94 <--- gpt-oss
748
- if old < parse_version("2.5.94"):
749
- print("Migrating models from < 2.5.94...")
750
- if "gpt-oss-20b" not in data:
751
- data["gpt-oss-20b"] = base_data["gpt-oss-20b"]
752
- if "gpt-oss-120b" not in data:
753
- data["gpt-oss-120b"] = base_data["gpt-oss-120b"]
754
- if "gpt-oss-20b-huggingface-router" not in data:
755
- data["gpt-oss-20b-huggingface-router"] = base_data["gpt-oss-20b-huggingface-router"]
756
- if "gpt-oss-120b-huggingface-router" not in data:
757
- data["gpt-oss-120b-huggingface-router"] = base_data["gpt-oss-120b-huggingface-router"]
758
- if "gpt-4.1-nano" not in data:
759
- data["gpt-4.1-nano"] = base_data["gpt-4.1-nano"]
760
- updated = True
761
-
762
- # < 2.6.21 <-- add OpenAI Agents to Ollama
763
- if old < parse_version("2.6.21"):
764
- print("Migrating models from < 2.6.21...")
765
- for id in data:
766
- model = data[id]
767
- if model.provider in ["ollama"]:
768
- if "agent_openai" not in model.mode:
769
- model.mode.append(MODE_AGENT_OPENAI)
770
- updated = True
771
-
772
- # < 2.6.30 <--- add Google Imagen models
773
- if old < parse_version("2.6.30"):
774
- print("Migrating models from < 2.6.30...")
775
- if "imagen-3.0-generate-002" not in data:
776
- data["imagen-3.0-generate-002"] = base_data["imagen-3.0-generate-002"]
777
- if "imagen-4.0-generate-001" not in data:
778
- data["imagen-4.0-generate-001"] = base_data["imagen-4.0-generate-001"]
779
- updated = True
780
-
781
- # < 2.6.31 <--- add realtime models
782
- if old < parse_version("2.6.31"):
783
- print("Migrating models from < 2.6.31...")
784
- if "gemini-2.5-flash-preview-native-audio-dialog" not in data:
785
- data["gemini-2.5-flash-preview-native-audio-dialog"] = base_data["gemini-2.5-flash-preview-native-audio-dialog"]
786
- if "gpt-realtime" not in data:
787
- data["gpt-realtime"] = base_data["gpt-realtime"]
788
- if "gpt-4o-realtime-preview" not in data:
789
- data["gpt-4o-realtime-preview"] = base_data["gpt-4o-realtime-preview"]
790
- updated = True
791
-
792
- # add veo-3.0-generate-preview
793
- if old < parse_version("2.6.32"):
794
- print("Migrating models from < 2.6.32...")
795
- if "veo-3.0-generate-preview" not in data:
796
- data["veo-3.0-generate-preview"] = base_data["veo-3.0-generate-preview"]
797
- if "veo-3.0-fast-generate-preview" not in data:
798
- data["veo-3.0-fast-generate-preview"] = base_data["veo-3.0-fast-generate-preview"]
799
- updated = True
800
-
801
- # add image models
48
+ # --------------------------------------------
49
+ # previous patches for versions before 2.6.42
802
50
  if old < parse_version("2.6.42"):
803
- print("Migrating models from < 2.6.42...")
804
- if "grok-2-image-1212" not in data:
805
- data["grok-2-image-1212"] = base_data["grok-2-image-1212"]
806
- if "gemini-2.5-flash-image-preview" not in data:
807
- data["gemini-2.5-flash-image-preview"] = base_data["gemini-2.5-flash-image-preview"]
808
- updated = True
51
+ patcher = PatchBefore2_6_42(self.window)
52
+ data, updated = patcher.execute(version)
53
+ # --------------------------------------------
54
+
55
+ # > 2.6.42 below:
56
+ # pass
809
57
 
810
58
  # update file
811
59
  if updated: