pygpt-net 2.6.24__py3-none-any.whl → 2.6.26__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. pygpt_net/CHANGELOG.txt +10 -0
  2. pygpt_net/__init__.py +1 -1
  3. pygpt_net/app.py +3 -1
  4. pygpt_net/controller/config/placeholder.py +3 -1
  5. pygpt_net/controller/model/importer.py +42 -5
  6. pygpt_net/core/agents/runners/loop.py +36 -3
  7. pygpt_net/core/models/models.py +5 -1
  8. pygpt_net/core/types/openai.py +2 -1
  9. pygpt_net/data/config/config.json +34 -3
  10. pygpt_net/data/config/models.json +2 -2
  11. pygpt_net/data/config/settings.json +72 -1
  12. pygpt_net/data/locale/locale.en.ini +10 -0
  13. pygpt_net/provider/core/config/patch.py +46 -1
  14. pygpt_net/provider/llms/anthropic.py +33 -3
  15. pygpt_net/provider/llms/azure_openai.py +9 -4
  16. pygpt_net/provider/llms/deepseek_api.py +36 -3
  17. pygpt_net/provider/llms/google.py +7 -1
  18. pygpt_net/provider/llms/hugging_face_api.py +9 -3
  19. pygpt_net/provider/llms/hugging_face_router.py +17 -3
  20. pygpt_net/provider/llms/llama_index/x_ai/__init__.py +0 -0
  21. pygpt_net/provider/llms/llama_index/x_ai/embedding.py +71 -0
  22. pygpt_net/provider/llms/local.py +31 -1
  23. pygpt_net/provider/llms/mistral.py +29 -1
  24. pygpt_net/provider/llms/ollama.py +3 -1
  25. pygpt_net/provider/llms/open_router.py +104 -0
  26. pygpt_net/provider/llms/openai.py +7 -2
  27. pygpt_net/provider/llms/x_ai.py +19 -3
  28. {pygpt_net-2.6.24.dist-info → pygpt_net-2.6.26.dist-info}/METADATA +51 -28
  29. {pygpt_net-2.6.24.dist-info → pygpt_net-2.6.26.dist-info}/RECORD +32 -29
  30. {pygpt_net-2.6.24.dist-info → pygpt_net-2.6.26.dist-info}/LICENSE +0 -0
  31. {pygpt_net-2.6.24.dist-info → pygpt_net-2.6.26.dist-info}/WHEEL +0 -0
  32. {pygpt_net-2.6.24.dist-info → pygpt_net-2.6.26.dist-info}/entry_points.txt +0 -0
pygpt_net/CHANGELOG.txt CHANGED
@@ -1,3 +1,13 @@
1
+ 2.6.26 (2025-08-26)
2
+
3
+ - Added new provider: OpenRouter (#133).
4
+
5
+ 2.6.25 (2025-08-26)
6
+
7
+ - Fixed the empty agent ID issue in OpenAI Agents evaluation.
8
+ - Added the ability to select a custom model for evaluation.
9
+ - Added embedding providers: Anthropic, Deepseek, MistralAI, xAI, VoyageAI.
10
+
1
11
  2.6.24 (2025-08-26)
2
12
 
3
13
  - Added a new option: LlamaIndex -> Embeddings -> Default embedding providers for attachments.
pygpt_net/__init__.py CHANGED
@@ -13,7 +13,7 @@ __author__ = "Marcin Szczygliński"
13
13
  __copyright__ = "Copyright 2025, Marcin Szczygliński"
14
14
  __credits__ = ["Marcin Szczygliński"]
15
15
  __license__ = "MIT"
16
- __version__ = "2.6.24"
16
+ __version__ = "2.6.26"
17
17
  __build__ = "2025-08-26"
18
18
  __maintainer__ = "Marcin Szczygliński"
19
19
  __github__ = "https://github.com/szczyglis-dev/py-gpt"
pygpt_net/app.py CHANGED
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.24 23:00:00 #
9
+ # Updated Date: 2025.08.26 23:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import os
@@ -124,6 +124,7 @@ from pygpt_net.provider.llms.ollama import OllamaLLM
124
124
  from pygpt_net.provider.llms.openai import OpenAILLM
125
125
  from pygpt_net.provider.llms.perplexity import PerplexityLLM
126
126
  from pygpt_net.provider.llms.x_ai import xAILLM
127
+ from pygpt_net.provider.llms.open_router import OpenRouterLLM
127
128
 
128
129
  # vector store providers (llama-index)
129
130
  from pygpt_net.provider.vector_stores.chroma import ChromaProvider
@@ -430,6 +431,7 @@ def run(**kwargs):
430
431
  launcher.add_llm(DeepseekApiLLM())
431
432
  launcher.add_llm(PerplexityLLM())
432
433
  launcher.add_llm(xAILLM())
434
+ launcher.add_llm(OpenRouterLLM())
433
435
 
434
436
  # register LLMs
435
437
  llms = kwargs.get('llms', None)
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.15 23:00:00 #
9
+ # Updated Date: 2025.08.26 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Dict, Any, List
@@ -337,6 +337,8 @@ class Placeholder:
337
337
  items[mid] = model.name
338
338
 
339
339
  data: List[Dict[str, str]] = []
340
+ if "allow_empty" in params and params["allow_empty"] is True:
341
+ data.append({'_': '---'})
340
342
  providers = self.window.core.llm.get_choices()
341
343
  if not providers:
342
344
  for mid, name in sorted(items.items(), key=lambda kv: kv[1].lower()):
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.24 23:00:00 #
9
+ # Updated Date: 2025.08.26 23:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import copy
@@ -391,7 +391,7 @@ class Importer:
391
391
  'type': 'str'
392
392
  }
393
393
  ]
394
- elif self.provider == "openai":
394
+ elif self.provider in ["openai", "azure_openai"]:
395
395
  m.tool_calls = True
396
396
  m.llama_index['env'] = [
397
397
  {
@@ -429,12 +429,49 @@ class Importer:
429
429
  'type': 'str'
430
430
  }
431
431
  ]
432
+ elif self.provider == "mistral_ai":
433
+ m.tool_calls = True
434
+ m.llama_index['args'].append(
435
+ {
436
+ 'name': 'api_key',
437
+ 'value': '{api_key_mistral}',
438
+ 'type': 'str'
439
+ }
440
+ )
441
+ elif self.provider == "local_ai":
442
+ m.tool_calls = True
443
+ m.llama_index['env'] = [
444
+ {
445
+ 'name': 'OPENAI_API_KEY',
446
+ 'value': '{api_key}',
447
+ 'type': 'str'
448
+ },
449
+ {
450
+ 'name': 'OPENAI_API_BASE',
451
+ 'value': '{api_endpoint}',
452
+ 'type': 'str'
453
+ }
454
+ ]
455
+ elif self.provider == "open_router":
456
+ m.tool_calls = True
457
+ m.llama_index['env'] = [
458
+ {
459
+ 'name': 'OPENAI_API_KEY',
460
+ 'value': '{api_key_open_router}',
461
+ 'type': 'str'
462
+ },
463
+ {
464
+ 'name': 'OPENAI_API_BASE',
465
+ 'value': '{api_endpoint_open_router}',
466
+ 'type': 'str'
467
+ }
468
+ ]
432
469
  elif self.provider == "x_ai":
433
470
  m.tool_calls = True
434
471
  m.llama_index['env'] = [
435
472
  {
436
473
  'name': 'OPENAI_API_KEY',
437
- 'value': '{api_key_xai',
474
+ 'value': '{api_key_xai}',
438
475
  'type': 'str'
439
476
  },
440
477
  {
@@ -587,9 +624,9 @@ class Importer:
587
624
  :return: Dict with keys and values
588
625
  """
589
626
  excluded = [
590
- "azure_openai",
627
+ #"azure_openai",
591
628
  "huggingface_api",
592
- "mistral_ai",
629
+ #"mistral_ai",
593
630
  "local_ai",
594
631
  "perplexity",
595
632
  ]
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.24 02:00:00 #
9
+ # Updated Date: 2025.08.26 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Optional, List
@@ -92,9 +92,11 @@ class Loop(BaseRunner):
92
92
 
93
93
  ctx = context.ctx
94
94
  self.send_response(ctx, signals, KernelEvent.APPEND_BEGIN) # lock input, show stop btn
95
+
95
96
  history = context.history
96
97
  tools = self.window.core.agents.observer.evaluation.get_tools()
97
98
  mode = self.window.core.config.get('agent.llama.loop.mode', "score")
99
+
98
100
  prompt = ""
99
101
  if mode == "score":
100
102
  prompt = self.window.core.agents.observer.evaluation.get_prompt_score(history)
@@ -106,8 +108,18 @@ class Loop(BaseRunner):
106
108
  self.next_instruction = "" # reset
107
109
  self.prev_score = -1 # reset
108
110
 
111
+ # select evaluation model
112
+ eval_model = ctx.model
113
+ custom_model = self.window.core.config.get('agent.llama.eval_model', None)
114
+ if custom_model and custom_model != "_":
115
+ eval_model = custom_model
116
+
117
+ if self.is_verbose():
118
+ print("[Evaluation] Prompt:", prompt)
119
+ print("[Evaluation] Running with model:", eval_model)
120
+
109
121
  # run agent once
110
- self.run_once(prompt, tools, ctx.model) # tool will update evaluation
122
+ self.run_once(prompt, tools, eval_model) # tool will update evaluation
111
123
  return self.handle_evaluation(ctx, self.next_instruction, self.prev_score, signals)
112
124
 
113
125
  def handle_evaluation(
@@ -134,11 +146,17 @@ class Loop(BaseRunner):
134
146
  score=str(score)
135
147
  )
136
148
  self.set_status(signals, msg)
149
+
150
+ if self.is_verbose():
151
+ print("[Evaluation] Score:", score)
152
+
137
153
  if score < 0:
138
154
  self.send_response(ctx, signals, KernelEvent.APPEND_END)
139
155
  self.set_idle(signals)
140
156
  return True
141
157
  good_score = self.window.core.config.get("agent.llama.loop.score", 75)
158
+ if self.is_verbose():
159
+ print("[Evaluation] Score needed:", good_score)
142
160
  if score >= good_score != 0:
143
161
  msg = "{status_finished} {score_label}: {score}%".format(
144
162
  status_finished=trans('status.finished'),
@@ -146,6 +164,8 @@ class Loop(BaseRunner):
146
164
  score=str(score)
147
165
  )
148
166
  ctx.extra["agent_eval_finish"] = True
167
+ if self.is_verbose():
168
+ print("[Evaluation] Stopping. Finish with score:", score)
149
169
  self.send_response(ctx, signals, KernelEvent.APPEND_END, msg=msg)
150
170
  self.set_idle(signals)
151
171
  return True
@@ -182,5 +202,18 @@ class Loop(BaseRunner):
182
202
  "agent_idx": preset.idx,
183
203
  "agent_provider": preset.agent_provider,
184
204
  }
205
+ if preset.agent_openai:
206
+ extra["agent_provider"] = preset.agent_provider_openai
207
+ if self.is_verbose():
208
+ print("[Evaluation] Instruction:", instruction)
209
+ print("[Evaluation] Running next step...")
185
210
  context.model = self.window.core.models.get(self.window.core.config.get('model'))
186
- return self.window.core.agents.runner.call(context, extra, signals)
211
+ return self.window.core.agents.runner.call(context, extra, signals)
212
+
213
+ def is_verbose(self) -> bool:
214
+ """
215
+ Check if verbose mode is enabled
216
+
217
+ :return: True if verbose mode is enabled
218
+ """
219
+ return self.window.core.config.get("agent.llama.verbose", False)
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.24 03:00:00 #
9
+ # Updated Date: 2025.08.26 23:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import copy
@@ -498,6 +498,10 @@ class Models:
498
498
  args["api_key"] = cfg.get('api_key_hugging_face', "")
499
499
  args["base_url"] = cfg.get('api_endpoint_hugging_face', "")
500
500
  self.window.core.debug.info("[api] Using client: HuggingFace Router API")
501
+ elif model.provider == "open_router":
502
+ args["api_key"] = cfg.get('api_key_open_router', "")
503
+ args["base_url"] = cfg.get('api_endpoint_open_router', "")
504
+ self.window.core.debug.info("[api] Using client: OpenRouter API")
501
505
  elif model.provider == "ollama":
502
506
  args["api_key"] = "ollama"
503
507
  args["base_url"] = self.window.core.models.ollama.get_base_url() + "/v1"
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.24 03:00:00 #
9
+ # Updated Date: 2025.08.26 23:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  OPENAI_COMPATIBLE_PROVIDERS = [
@@ -21,6 +21,7 @@ OPENAI_COMPATIBLE_PROVIDERS = [
21
21
  "perplexity",
22
22
  "deepseek_api",
23
23
  "x_ai",
24
+ "open_router",
24
25
  ]
25
26
 
26
27
  OPENAI_DISABLE_TOOLS = [
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "__meta__": {
3
- "version": "2.6.24",
4
- "app.version": "2.6.24",
3
+ "version": "2.6.26",
4
+ "app.version": "2.6.26",
5
5
  "updated_at": "2025-08-26T00:00:00"
6
6
  },
7
7
  "access.audio.event.speech": false,
@@ -52,6 +52,8 @@
52
52
  "agent.idx": "base",
53
53
  "agent.idx.auto_retrieve": false,
54
54
  "agent.iterations": 3,
55
+ "agent.llama.append_eval": false,
56
+ "agent.llama.eval_model": "_",
55
57
  "agent.llama.idx": "base",
56
58
  "agent.llama.loop.enabled": false,
57
59
  "agent.llama.loop.score": 75,
@@ -75,6 +77,7 @@
75
77
  "api_endpoint_anthropic": "https://api.anthropic.com/v1",
76
78
  "api_endpoint_mistral": "https://api.mistral.ai/v1",
77
79
  "api_endpoint_hugging_face": "https://router.huggingface.co/v1",
80
+ "api_endpoint_open_router": "https://openrouter.ai/api/v1",
78
81
  "api_key": "",
79
82
  "api_key_google": "",
80
83
  "api_key_anthropic": "",
@@ -83,6 +86,8 @@
83
86
  "api_key_perplexity": "",
84
87
  "api_key_xai": "",
85
88
  "api_key_mistral": "",
89
+ "api_key_voyage": "",
90
+ "api_key_open_router": "",
86
91
  "api_proxy": "",
87
92
  "api_use_responses": true,
88
93
  "api_use_responses_llama": false,
@@ -238,9 +243,19 @@
238
243
  "llama.idx.embeddings.provider": "openai",
239
244
  "llama.idx.embeddings.args": [
240
245
  {
241
- "name": "model",
246
+ "name": "model_name",
242
247
  "value": "text-embedding-3-small",
243
248
  "type": "str"
249
+ },
250
+ {
251
+ "name": "api_base",
252
+ "value": "https://api.openai.com/v1",
253
+ "type": "str"
254
+ },
255
+ {
256
+ "name": "timeout",
257
+ "value": 60,
258
+ "type": "float"
244
259
  }
245
260
  ],
246
261
  "llama.idx.embeddings.env": [
@@ -254,6 +269,14 @@
254
269
  }
255
270
  ],
256
271
  "llama.idx.embeddings.default": [
272
+ {
273
+ "provider": "anthropic",
274
+ "model": "voyage-3.5"
275
+ },
276
+ {
277
+ "provider": "deepseek_api",
278
+ "model": "voyage-3.5"
279
+ },
257
280
  {
258
281
  "provider": "google",
259
282
  "model": "gemini-embedding-001"
@@ -266,9 +289,17 @@
266
289
  "provider": "azure_openai",
267
290
  "model": "text-embedding-3-small"
268
291
  },
292
+ {
293
+ "provider": "mistral_ai",
294
+ "model": "mistral-embed"
295
+ },
269
296
  {
270
297
  "provider": "ollama",
271
298
  "model": ""
299
+ },
300
+ {
301
+ "provider": "x_ai",
302
+ "model": ""
272
303
  }
273
304
  ],
274
305
  "llama.idx.embeddings.limit.rpm": 100,
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "__meta__": {
3
- "version": "2.6.24",
4
- "app.version": "2.6.24",
3
+ "version": "2.6.26",
4
+ "app.version": "2.6.26",
5
5
  "updated_at": "2025-08-26T23:07:35"
6
6
  },
7
7
  "items": {
@@ -369,6 +369,59 @@
369
369
  "advanced": false,
370
370
  "tab": "Mistral AI"
371
371
  },
372
+ "api_key_voyage": {
373
+ "section": "api_keys",
374
+ "type": "text",
375
+ "slider": false,
376
+ "label": "settings.api_key.voyage",
377
+ "description": "settings.api_key.voyage.desc",
378
+ "value": "",
379
+ "min": null,
380
+ "max": null,
381
+ "multiplier": null,
382
+ "step": null,
383
+ "extra": {
384
+ "bold": true
385
+ },
386
+ "secret": true,
387
+ "persist": true,
388
+ "advanced": false,
389
+ "tab": "Voyage"
390
+ },
391
+ "api_key_open_router": {
392
+ "section": "api_keys",
393
+ "type": "text",
394
+ "slider": false,
395
+ "label": "settings.api_key.open_router",
396
+ "description": "settings.api_key.open_router.desc",
397
+ "value": "",
398
+ "min": null,
399
+ "max": null,
400
+ "multiplier": null,
401
+ "step": null,
402
+ "extra": {
403
+ "bold": true
404
+ },
405
+ "secret": true,
406
+ "persist": true,
407
+ "advanced": false,
408
+ "tab": "OpenRouter"
409
+ },
410
+ "api_endpoint_open_router": {
411
+ "section": "api_keys",
412
+ "type": "text",
413
+ "slider": false,
414
+ "label": "settings.api_endpoint_open_router",
415
+ "description": "settings.api_endpoint_open_router.desc",
416
+ "value": "https://openrouter.ai/api/v1",
417
+ "min": null,
418
+ "max": null,
419
+ "multiplier": null,
420
+ "step": null,
421
+ "secret": false,
422
+ "advanced": false,
423
+ "tab": "OpenRouter"
424
+ },
372
425
  "app.env": {
373
426
  "section": "general",
374
427
  "type": "dict",
@@ -1935,6 +1988,24 @@
1935
1988
  "advanced": false,
1936
1989
  "tab": "llama"
1937
1990
  },
1991
+ "agent.llama.eval_model": {
1992
+ "section": "agent",
1993
+ "description": "settings.agent.llama.eval_model.desc",
1994
+ "type": "combo",
1995
+ "use": "models",
1996
+ "use_params": {
1997
+ "allow_empty": true
1998
+ },
1999
+ "slider": true,
2000
+ "label": "settings.agent.llama.eval_model",
2001
+ "value": null,
2002
+ "min": 0,
2003
+ "max": 100,
2004
+ "multiplier": 1,
2005
+ "step": 1,
2006
+ "advanced": false,
2007
+ "tab": "llama"
2008
+ },
1938
2009
  "agent.llama.append_eval": {
1939
2010
  "section": "agent",
1940
2011
  "description": "settings.agent.llama.append_eval.desc",
@@ -1948,7 +2019,7 @@
1948
2019
  "step": 1,
1949
2020
  "advanced": false,
1950
2021
  "tab": "llama"
1951
- },
2022
+ },
1952
2023
  "agent.openai.response.split": {
1953
2024
  "section": "agent",
1954
2025
  "description": "settings.agent.openai.response.split.desc",
@@ -1061,6 +1061,8 @@ settings.agent.idx.auto_retrieve.desc = Auto retrieve additional context from RA
1061
1061
  settings.agent.idx.desc = Only if sub-mode is Chat with Files, choose the index to use in Autonomous and Experts modes
1062
1062
  settings.agent.llama.append_eval = Append and compare previous evaluation prompt in next evaluation
1063
1063
  settings.agent.llama.append_eval.desc = If enabled, previous improvement prompt will be checked in next eval in loop
1064
+ settings.agent.llama.eval_model = Model for evaluation
1065
+ settings.agent.llama.eval_model.desc = Model used for evaluation with score/percentage (loop). If not selected, then current active model will be used.
1064
1066
  settings.agent.llama.iterations = Max run iterations
1065
1067
  settings.agent.llama.iterations.desc = Max run iterations before goal achieved in Always-continue mode
1066
1068
  settings.agent.llama.max_eval = Max evaluation steps in loop
@@ -1090,6 +1092,8 @@ settings.api_endpoint_hugging_face = Router API Endpoint
1090
1092
  settings.api_endpoint_hugging_face.desc = API Endpoint for HuggingFace Router provider (OpenAI compatible ChatCompletions)
1091
1093
  settings.api_endpoint_mistral = API Endpoint
1092
1094
  settings.api_endpoint_mistral.desc = Mistral AI API endpoint URL, default: https://api.mistral.ai/v1
1095
+ settings.api_endpoint_open_router = API Endpoint
1096
+ settings.api_endpoint_open_router.desc = OpenRouter API endpoint URL, default: https://openrouter.ai/api/v1
1093
1097
  settings.api_endpoint_perplexity = API Endpoint
1094
1098
  settings.api_endpoint_perplexity.desc = Perplexity API endpoint URL, default: https://api.perplexity.ai
1095
1099
  settings.api_endpoint_xai = API Endpoint
@@ -1106,8 +1110,12 @@ settings.api_key.hugging_face = HuggingFace API KEY
1106
1110
  settings.api_key.hugging_face.desc = Required for the HuggingFace API.
1107
1111
  settings.api_key.mistral = Mistral AI API KEY
1108
1112
  settings.api_key.mistral.desc = Required for the Mistral AI API.
1113
+ settings.api_key.open_router = OpenRouter API KEY
1114
+ settings.api_key.open_router.desc = Required for the OpenRouter API.
1109
1115
  settings.api_key.perplexity = Perplexity API KEY
1110
1116
  settings.api_key.perplexity.desc = Required for the Perplexity API.
1117
+ settings.api_key.voyage = VoyageAI API KEY
1118
+ settings.api_key.voyage.desc = Required for the Voyage API - embeddings for Anthropic and DeepSeek API.
1111
1119
  settings.api_key.xai = xAI API KEY
1112
1120
  settings.api_key.xai.desc = Required for the xAI API and Grok models.
1113
1121
  settings.api_proxy = Proxy address
@@ -1341,7 +1349,9 @@ settings.section.api_keys.google = Google
1341
1349
  settings.section.api_keys.huggingface = HuggingFace
1342
1350
  settings.section.api_keys.mistral_ai = Mistral AI
1343
1351
  settings.section.api_keys.openai = OpenAI
1352
+ settings.section.api_keys.openrouter = OpenRouter
1344
1353
  settings.section.api_keys.perplexity = Perplexity
1354
+ settings.section.api_keys.voyage = VoyageAI
1345
1355
  settings.section.api_keys.xai = xAI
1346
1356
  settings.section.audio = Audio
1347
1357
  settings.section.audio.cache = Cache
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.25 20:00:00 #
9
+ # Updated Date: 2025.08.26 23:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import copy
@@ -2299,6 +2299,51 @@ class Patch:
2299
2299
  'llama.idx.embeddings.default')
2300
2300
  updated = True
2301
2301
 
2302
+ # < 2.6.25
2303
+ if old < parse_version("2.6.25"):
2304
+ print("Migrating config from < 2.6.25...")
2305
+ if "api_key_voyage" not in data:
2306
+ data["api_key_voyage"] = ""
2307
+ if "agent.llama.eval_model" not in data:
2308
+ data["agent.llama.eval_model"] = "_"
2309
+ if "llama.idx.embeddings.default" in data:
2310
+ providers = []
2311
+ for item in data["llama.idx.embeddings.default"]:
2312
+ p = item.get('provider', '')
2313
+ if p and p not in providers:
2314
+ providers.append(p)
2315
+
2316
+ if "anthropic" not in providers:
2317
+ data["llama.idx.embeddings.default"].append({
2318
+ "provider": "anthropic",
2319
+ "model": "voyage-3.5",
2320
+ })
2321
+ if "deepseek_api" not in providers:
2322
+ data["llama.idx.embeddings.default"].append({
2323
+ "provider": "deepseek_api",
2324
+ "model": "voyage-3.5",
2325
+ })
2326
+ if "mistral_ai" not in providers:
2327
+ data["llama.idx.embeddings.default"].append({
2328
+ "provider": "mistral_ai",
2329
+ "model": "mistral-embed",
2330
+ })
2331
+ if "x_ai" not in providers:
2332
+ data["llama.idx.embeddings.default"].append({
2333
+ "provider": "x_ai",
2334
+ "model": "",
2335
+ })
2336
+ updated = True
2337
+
2338
+ # < 2.6.26
2339
+ if old < parse_version("2.6.26"):
2340
+ print("Migrating config from < 2.6.26...")
2341
+ if "api_key_open_router" not in data:
2342
+ data["api_key_open_router"] = ""
2343
+ if "api_endpoint_open_router" not in data:
2344
+ data["api_endpoint_open_router"] = "https://openrouter.ai/api/v1"
2345
+ updated = True
2346
+
2302
2347
  # update file
2303
2348
  migrated = False
2304
2349
  if updated:
@@ -6,10 +6,12 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.06 01:00:00 #
9
+ # Updated Date: 2025.08.26 19:00:00 #
10
10
  # ================================================== #
11
- from typing import List, Dict
12
11
 
12
+ from typing import List, Dict, Optional
13
+
14
+ from llama_index.core.base.embeddings.base import BaseEmbedding
13
15
  from llama_index.core.llms.llm import BaseLLM as LlamaBaseLLM
14
16
 
15
17
  from pygpt_net.core.types import (
@@ -31,7 +33,7 @@ class AnthropicLLM(BaseLLM):
31
33
  """
32
34
  self.id = "anthropic"
33
35
  self.name = "Anthropic"
34
- self.type = [MODE_LLAMA_INDEX]
36
+ self.type = [MODE_LLAMA_INDEX, "embeddings"]
35
37
 
36
38
  def llama(
37
39
  self,
@@ -51,8 +53,36 @@ class AnthropicLLM(BaseLLM):
51
53
  args = self.parse_args(model.llama_index, window)
52
54
  if "model" not in args:
53
55
  args["model"] = model.id
56
+ if "api_key" not in args or args["api_key"] == "":
57
+ args["api_key"] = window.core.config.get("api_key_anthropic", "")
54
58
  return Anthropic(**args)
55
59
 
60
+ def get_embeddings_model(
61
+ self,
62
+ window,
63
+ config: Optional[List[Dict]] = None
64
+ ) -> BaseEmbedding:
65
+ """
66
+ Return provider instance for embeddings
67
+
68
+ :param window: window instance
69
+ :param config: config keyword arguments list
70
+ :return: Embedding provider instance
71
+ """
72
+ from llama_index.embeddings.voyageai import VoyageEmbedding
73
+ args = {}
74
+ if config is not None:
75
+ args = self.parse_args({
76
+ "args": config,
77
+ }, window)
78
+ if "api_key" in args:
79
+ args["voyage_api_key"] = args.pop("api_key")
80
+ if "voyage_api_key" not in args or args["voyage_api_key"] == "":
81
+ args["voyage_api_key"] = window.core.config.get("api_key_voyage", "")
82
+ if "model" in args and "model_name" not in args:
83
+ args["model_name"] = args.pop("model")
84
+ return VoyageEmbedding(**args)
85
+
56
86
  def get_models(
57
87
  self,
58
88
  window,
@@ -6,14 +6,11 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.06 01:00:00 #
9
+ # Updated Date: 2025.08.26 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Optional, List, Dict
13
13
 
14
- # from langchain_openai import AzureOpenAI
15
- # from langchain_openai import AzureChatOpenAI
16
-
17
14
  from llama_index.core.llms.llm import BaseLLM as LlamaBaseLLM
18
15
  from llama_index.core.base.embeddings.base import BaseEmbedding
19
16
 
@@ -93,6 +90,10 @@ class AzureOpenAILLM(BaseLLM):
93
90
  """
94
91
  from llama_index.llms.azure_openai import AzureOpenAI as LlamaAzureOpenAI
95
92
  args = self.parse_args(model.llama_index, window)
93
+ if "api_key" not in args:
94
+ args["api_key"] = window.core.config.get("api_key", "")
95
+ if "model" not in args:
96
+ args["model"] = model.id
96
97
  return LlamaAzureOpenAI(**args)
97
98
 
98
99
  def get_embeddings_model(
@@ -113,4 +114,8 @@ class AzureOpenAILLM(BaseLLM):
113
114
  args = self.parse_args({
114
115
  "args": config,
115
116
  }, window)
117
+ if "api_key" not in args:
118
+ args["api_key"] = window.core.config.get("api_key", "")
119
+ if "model" in args and "model_name" not in args:
120
+ args["model_name"] = args.pop("model")
116
121
  return AzureOpenAIEmbedding(**args)