pygpt-net 2.6.23__py3-none-any.whl → 2.6.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. pygpt_net/CHANGELOG.txt +14 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/controller/chat/response.py +6 -5
  4. pygpt_net/controller/config/placeholder.py +3 -1
  5. pygpt_net/controller/model/importer.py +28 -5
  6. pygpt_net/core/agents/runners/loop.py +36 -3
  7. pygpt_net/core/attachments/context.py +4 -4
  8. pygpt_net/core/idx/chat.py +1 -1
  9. pygpt_net/core/idx/indexing.py +3 -3
  10. pygpt_net/core/idx/llm.py +61 -2
  11. pygpt_net/data/config/config.json +41 -4
  12. pygpt_net/data/config/models.json +3 -3
  13. pygpt_net/data/config/settings.json +56 -1
  14. pygpt_net/data/locale/locale.de.ini +46 -0
  15. pygpt_net/data/locale/locale.en.ini +53 -1
  16. pygpt_net/data/locale/locale.es.ini +46 -0
  17. pygpt_net/data/locale/locale.fr.ini +46 -0
  18. pygpt_net/data/locale/locale.it.ini +46 -0
  19. pygpt_net/data/locale/locale.pl.ini +47 -1
  20. pygpt_net/data/locale/locale.uk.ini +46 -0
  21. pygpt_net/data/locale/locale.zh.ini +46 -0
  22. pygpt_net/provider/agents/llama_index/codeact_workflow.py +8 -7
  23. pygpt_net/provider/agents/llama_index/planner_workflow.py +11 -10
  24. pygpt_net/provider/agents/llama_index/supervisor_workflow.py +9 -8
  25. pygpt_net/provider/agents/openai/agent_b2b.py +30 -17
  26. pygpt_net/provider/agents/openai/agent_planner.py +29 -29
  27. pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +21 -23
  28. pygpt_net/provider/agents/openai/agent_with_feedback.py +21 -23
  29. pygpt_net/provider/agents/openai/bot_researcher.py +25 -30
  30. pygpt_net/provider/agents/openai/evolve.py +37 -39
  31. pygpt_net/provider/agents/openai/supervisor.py +16 -18
  32. pygpt_net/provider/core/config/patch.py +45 -1
  33. pygpt_net/provider/llms/anthropic.py +38 -7
  34. pygpt_net/provider/llms/azure_openai.py +9 -4
  35. pygpt_net/provider/llms/deepseek_api.py +36 -3
  36. pygpt_net/provider/llms/google.py +9 -3
  37. pygpt_net/provider/llms/hugging_face_api.py +9 -3
  38. pygpt_net/provider/llms/hugging_face_router.py +17 -3
  39. pygpt_net/provider/llms/llama_index/x_ai/__init__.py +0 -0
  40. pygpt_net/provider/llms/llama_index/x_ai/embedding.py +71 -0
  41. pygpt_net/provider/llms/local.py +25 -1
  42. pygpt_net/provider/llms/mistral.py +29 -1
  43. pygpt_net/provider/llms/ollama.py +3 -1
  44. pygpt_net/provider/llms/openai.py +7 -2
  45. pygpt_net/provider/llms/x_ai.py +19 -3
  46. pygpt_net/ui/widget/textarea/input.py +3 -3
  47. {pygpt_net-2.6.23.dist-info → pygpt_net-2.6.25.dist-info}/METADATA +54 -28
  48. {pygpt_net-2.6.23.dist-info → pygpt_net-2.6.25.dist-info}/RECORD +51 -49
  49. {pygpt_net-2.6.23.dist-info → pygpt_net-2.6.25.dist-info}/LICENSE +0 -0
  50. {pygpt_net-2.6.23.dist-info → pygpt_net-2.6.25.dist-info}/WHEEL +0 -0
  51. {pygpt_net-2.6.23.dist-info → pygpt_net-2.6.25.dist-info}/entry_points.txt +0 -0
@@ -6,16 +6,13 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.24 03:00:00 #
9
+ # Updated Date: 2025.08.26 01:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Dict, Any, Tuple, Union, Optional
13
13
 
14
14
  from agents import (
15
15
  Agent as OpenAIAgent,
16
- Runner,
17
- RunConfig,
18
- ModelSettings,
19
16
  )
20
17
 
21
18
  from pygpt_net.core.agents.bridge import ConnectionContext
@@ -27,11 +24,9 @@ from pygpt_net.core.types import (
27
24
 
28
25
  from pygpt_net.item.ctx import CtxItem
29
26
  from pygpt_net.item.model import ModelItem
30
- from pygpt_net.item.preset import PresetItem
31
27
 
32
- from pygpt_net.provider.gpt.agents.client import get_custom_model_provider, set_openai_env
33
28
  from pygpt_net.provider.gpt.agents.remote_tools import append_tools
34
- from pygpt_net.provider.gpt.agents.response import StreamHandler
29
+ from pygpt_net.utils import trans
35
30
 
36
31
  from ..base import BaseAgent
37
32
  from .bots.research_bot.manager import ResearchManager
@@ -207,82 +202,82 @@ class Agent(BaseAgent):
207
202
  """
208
203
  return {
209
204
  "writer": {
210
- "label": "Base agent",
205
+ "label": trans("agent.option.section.base"),
211
206
  "options": {
212
207
  "prompt": {
213
208
  "type": "textarea",
214
- "label": "Prompt",
215
- "description": "Prompt for base agent",
209
+ "label": trans("agent.option.prompt"),
210
+ "description": trans("agent.option.prompt.base.desc"),
216
211
  "default": self.PROMPT_WRITER,
217
212
  },
218
213
  "allow_local_tools": {
219
214
  "type": "bool",
220
- "label": "Allow local tools",
221
- "description": "Allow usage of local tools for this agent",
215
+ "label": trans("agent.option.tools.local"),
216
+ "description": trans("agent.option.tools.local.desc"),
222
217
  "default": False,
223
218
  },
224
219
  "allow_remote_tools": {
225
220
  "type": "bool",
226
- "label": "Allow remote tools",
227
- "description": "Allow usage of remote tools for this agent",
221
+ "label": trans("agent.option.tools.remote"),
222
+ "description": trans("agent.option.tools.remote.desc"),
228
223
  "default": False,
229
224
  },
230
225
  }
231
226
  },
232
227
  "planner": {
233
- "label": "Planner",
228
+ "label": trans("agent.option.section.planner"),
234
229
  "options": {
235
230
  "model": {
236
- "label": "Model",
231
+ "label": trans("agent.option.model"),
237
232
  "type": "combo",
238
233
  "use": "models",
239
234
  "default": "gpt-4o",
240
235
  },
241
236
  "prompt": {
242
237
  "type": "textarea",
243
- "label": "Prompt",
244
- "description": "Prompt for planner agent",
238
+ "label": trans("agent.option.prompt"),
239
+ "description": trans("agent.option.prompt.planner.desc"),
245
240
  "default": self.PROMPT_PLANNER,
246
241
  },
247
242
  "allow_local_tools": {
248
243
  "type": "bool",
249
- "label": "Allow local tools",
250
- "description": "Allow usage of local tools for this agent",
244
+ "label": trans("agent.option.tools.local"),
245
+ "description": trans("agent.option.tools.local.desc"),
251
246
  "default": False,
252
247
  },
253
248
  "allow_remote_tools": {
254
249
  "type": "bool",
255
- "label": "Allow remote tools",
256
- "description": "Allow usage of remote tools for this agent",
250
+ "label": trans("agent.option.tools.remote"),
251
+ "description": trans("agent.option.tools.remote.desc"),
257
252
  "default": False,
258
253
  },
259
254
  }
260
255
  },
261
256
  "search": {
262
- "label": "Search",
257
+ "label": trans("agent.option.section.search"),
263
258
  "options": {
264
259
  "model": {
265
- "label": "Model",
260
+ "label": trans("agent.option.model"),
266
261
  "type": "combo",
267
262
  "use": "models",
268
263
  "default": "gpt-4o",
269
264
  },
270
265
  "prompt": {
271
266
  "type": "textarea",
272
- "label": "Prompt",
273
- "description": "Prompt for search agent",
267
+ "label": trans("agent.option.prompt"),
268
+ "description": trans("agent.option.prompt.search.desc"),
274
269
  "default": self.PROMPT_SEARCH,
275
270
  },
276
271
  "allow_local_tools": {
277
272
  "type": "bool",
278
- "label": "Allow local tools",
279
- "description": "Allow usage of local tools for this agent",
273
+ "label": trans("agent.option.tools.local"),
274
+ "description": trans("agent.option.tools.local.desc"),
280
275
  "default": False,
281
276
  },
282
277
  "allow_remote_tools": {
283
278
  "type": "bool",
284
- "label": "Allow remote tools",
285
- "description": "Allow usage of remote tools for this agent",
279
+ "label": trans("agent.option.tools.remote"),
280
+ "description": trans("agent.option.tools.remote.desc"),
286
281
  "default": True,
287
282
  },
288
283
  }
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.24 03:00:00 #
9
+ # Updated Date: 2025.08.26 01:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import copy
@@ -16,8 +16,6 @@ from typing import Dict, Any, Tuple, Literal, Optional
16
16
  from agents import (
17
17
  Agent as OpenAIAgent,
18
18
  Runner,
19
- RunConfig,
20
- ModelSettings,
21
19
  TResponseInputItem,
22
20
  )
23
21
 
@@ -32,9 +30,9 @@ from pygpt_net.item.ctx import CtxItem
32
30
  from pygpt_net.item.model import ModelItem
33
31
  from pygpt_net.item.preset import PresetItem
34
32
 
35
- from pygpt_net.provider.gpt.agents.client import get_custom_model_provider, set_openai_env
36
- from pygpt_net.provider.gpt.agents.remote_tools import get_remote_tools, is_computer_tool, append_tools
33
+ from pygpt_net.provider.gpt.agents.remote_tools import append_tools
37
34
  from pygpt_net.provider.gpt.agents.response import StreamHandler
35
+ from pygpt_net.utils import trans
38
36
 
39
37
  from ..base import BaseAgent
40
38
  from ...gpt.agents.experts import get_experts
@@ -379,7 +377,7 @@ class Agent(BaseAgent):
379
377
  )
380
378
 
381
379
  if num_generation >= max_generations > 0:
382
- info = f"\n\n**Max generations reached ({max_generations}), exiting.**\n"
380
+ info = f"\n\n**{trans('agent.evolve.maxgen_limit')}**\n"
383
381
  ctx.stream = info
384
382
  bridge.on_step(ctx, False)
385
383
  final_output += info
@@ -390,7 +388,7 @@ class Agent(BaseAgent):
390
388
  handler = StreamHandler(window, bridge)
391
389
  begin = True
392
390
  while True:
393
- ctx.stream = f"\n\n\n\n**Generation {num_generation}**\n\n"
391
+ ctx.stream = f"\n\n\n\n**{trans('agent.evolve.generation')} {num_generation}**\n\n"
394
392
  bridge.on_step(ctx, begin)
395
393
  handler.begin = False
396
394
  begin = False
@@ -403,7 +401,7 @@ class Agent(BaseAgent):
403
401
  parents[j],
404
402
  **parent_kwargs
405
403
  )
406
- ctx.stream = f"\n\n**Running agent {j} ...**\n\n"
404
+ ctx.stream = f"\n\n**{trans('agent.evolve.running')} {j} ...**\n\n"
407
405
  bridge.on_step(ctx)
408
406
  handler.reset()
409
407
  async for event in results[j].stream_events():
@@ -432,7 +430,7 @@ class Agent(BaseAgent):
432
430
 
433
431
  handler.to_buffer(results[choose].final_output)
434
432
  final_output = handler.buffer
435
- ctx.stream = f"**Winner: agent {result.answer_number}**\n\n"
433
+ ctx.stream = f"**{trans('agent.evolve.winner')} {result.answer_number}**\n\n"
436
434
  bridge.on_step(ctx)
437
435
 
438
436
  if bridge.stopped():
@@ -445,9 +443,9 @@ class Agent(BaseAgent):
445
443
  evaluator_result = await Runner.run(evaluator, input_items)
446
444
  result: EvaluationFeedback = evaluator_result.final_output
447
445
 
448
- info = f"\n___\n**Evaluator score: {result.score}**\n\n"
446
+ info = f"\n___\n**{trans('agent.eval.score')}: {result.score}**\n\n"
449
447
  if result.score == "pass":
450
- info += "\n\n**Response is good enough, exiting.**\n"
448
+ info += f"\n\n**{trans('agent.eval.score.good')}.**\n"
451
449
  if use_partial_ctx:
452
450
  ctx = bridge.on_next_ctx(
453
451
  ctx=ctx,
@@ -463,9 +461,9 @@ class Agent(BaseAgent):
463
461
  final_output += info
464
462
  break
465
463
  else:
466
- info = f"\n___\n**Evaluator score: {result.score}**\n\n"
464
+ info = f"\n___\n**{trans('agent.eval.score')}: {result.score}**\n\n"
467
465
 
468
- info += "\n\n**Re-running with feedback**\n\n" + f"Feedback: {result.feedback}\n___\n"
466
+ info += f"\n\n**{trans('agent.eval.next')}**\n\nFeedback: {result.feedback}\n___\n"
469
467
  input_items.append({"content": f"Feedback: {result.feedback}", "role": "user"})
470
468
 
471
469
  if use_partial_ctx:
@@ -483,7 +481,7 @@ class Agent(BaseAgent):
483
481
  handler.to_buffer(info)
484
482
 
485
483
  if num_generation >= max_generations > 0:
486
- info = f"\n\n**Max generations reached ({max_generations}), exiting.**\n"
484
+ info = f"\n\n**{trans('agent.evolve.maxgen_limit')}**\n"
487
485
  ctx.stream = info
488
486
  bridge.on_step(ctx, False)
489
487
  final_output += info
@@ -502,94 +500,94 @@ class Agent(BaseAgent):
502
500
  """
503
501
  return {
504
502
  "base": {
505
- "label": "Base agent",
503
+ "label": trans("agent.option.section.base"),
506
504
  "options": {
507
505
  "num_parents": {
508
506
  "type": "int",
509
- "label": "Num of parents",
507
+ "label": trans("agent.evolve.option.num_parents"),
510
508
  "min": 1,
511
509
  "default": 2,
512
510
  },
513
511
  "max_generations": {
514
512
  "type": "int",
515
- "label": "Max generations",
513
+ "label": trans("agent.evolve.option.max_generations"),
516
514
  "min": 1,
517
515
  "default": 10,
518
516
  },
519
517
  "prompt": {
520
518
  "type": "textarea",
521
- "label": "Prompt",
522
- "description": "Prompt for base agent",
519
+ "label": trans("agent.option.prompt"),
520
+ "description": trans("agent.option.prompt.desc"),
523
521
  "default": self.PROMPT,
524
522
  },
525
523
  "allow_local_tools": {
526
524
  "type": "bool",
527
- "label": "Allow local tools",
528
- "description": "Allow usage of local tools for this agent",
525
+ "label": trans("agent.option.tools.local"),
526
+ "description": trans("agent.option.tools.local.desc"),
529
527
  "default": False,
530
528
  },
531
529
  "allow_remote_tools": {
532
530
  "type": "bool",
533
- "label": "Allow remote tools",
534
- "description": "Allow usage of remote tools for this agent",
531
+ "label": trans("agent.option.tools.remote"),
532
+ "description": trans("agent.option.tools.remote.desc"),
535
533
  "default": False,
536
534
  },
537
535
  }
538
536
  },
539
537
  "chooser": {
540
- "label": "Chooser",
538
+ "label": trans("agent.option.section.chooser"),
541
539
  "options": {
542
540
  "model": {
543
- "label": "Model",
541
+ "label": trans("agent.option.model"),
544
542
  "type": "combo",
545
543
  "use": "models",
546
544
  "default": "gpt-4o",
547
545
  },
548
546
  "prompt": {
549
547
  "type": "textarea",
550
- "label": "Prompt",
551
- "description": "Prompt for chooser agent",
548
+ "label": trans("agent.option.prompt"),
549
+ "description": trans("agent.option.prompt.chooser.desc"),
552
550
  "default": self.PROMPT_CHOOSE,
553
551
  },
554
552
  "allow_local_tools": {
555
553
  "type": "bool",
556
- "label": "Allow local tools",
557
- "description": "Allow usage of local tools for this agent",
554
+ "label": trans("agent.option.tools.local"),
555
+ "description": trans("agent.option.tools.local.desc"),
558
556
  "default": False,
559
557
  },
560
558
  "allow_remote_tools": {
561
559
  "type": "bool",
562
- "label": "Allow remote tools",
563
- "description": "Allow usage of remote tools for this agent",
560
+ "label": trans("agent.option.tools.remote"),
561
+ "description": trans("agent.option.tools.remote.desc"),
564
562
  "default": False,
565
563
  },
566
564
  }
567
565
  },
568
566
  "feedback": {
569
- "label": "Feedback",
567
+ "label": trans("agent.option.section.feedback"),
570
568
  "options": {
571
569
  "model": {
572
- "label": "Model",
570
+ "label": trans("agent.option.model"),
573
571
  "type": "combo",
574
572
  "use": "models",
575
573
  "default": "gpt-4o",
576
574
  },
577
575
  "prompt": {
578
576
  "type": "textarea",
579
- "label": "Prompt",
580
- "description": "Prompt for feedback evaluation",
577
+ "label": trans("agent.option.prompt"),
578
+ "description": trans("agent.option.prompt.feedback.desc"),
581
579
  "default": self.PROMPT_FEEDBACK,
582
580
  },
583
581
  "allow_local_tools": {
584
582
  "type": "bool",
585
- "label": "Allow local tools",
586
- "description": "Allow usage of local tools for this agent",
583
+ "label": trans("agent.option.tools.local"),
584
+ "description": trans("agent.option.tools.local.desc"),
587
585
  "default": False,
588
586
  },
589
587
  "allow_remote_tools": {
590
588
  "type": "bool",
591
- "label": "Allow remote tools",
592
- "description": "Allow usage of remote tools for this agent",
589
+ "label": trans("agent.option.tools.remote"),
590
+ "description": trans("agent.option.tools.remote.desc"),
593
591
  "default": False,
594
592
  },
595
593
  }
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.24 03:00:00 #
9
+ # Updated Date: 2025.08.26 01:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import json
@@ -16,10 +16,8 @@ from typing import Dict, Any, Tuple, Optional
16
16
  from agents import (
17
17
  Agent as OpenAIAgent,
18
18
  Runner,
19
- RunConfig,
20
19
  RunContextWrapper,
21
20
  SQLiteSession,
22
- ModelSettings,
23
21
  function_tool,
24
22
  )
25
23
 
@@ -33,10 +31,10 @@ from pygpt_net.core.types import (
33
31
  from pygpt_net.item.ctx import CtxItem
34
32
  from pygpt_net.item.model import ModelItem
35
33
 
36
- from pygpt_net.provider.gpt.agents.client import get_custom_model_provider, set_openai_env
37
34
  from pygpt_net.provider.gpt.agents.remote_tools import append_tools
38
35
  from pygpt_net.provider.gpt.agents.response import StreamHandler
39
36
  from pygpt_net.provider.gpt.agents.experts import get_experts
37
+ from pygpt_net.utils import trans
40
38
 
41
39
  from ..base import BaseAgent
42
40
 
@@ -199,7 +197,7 @@ class Agent(BaseAgent):
199
197
  :param instruction: Instruction for the Worker
200
198
  :return: Output from the Worker
201
199
  """
202
- item_ctx.stream = f"\n\n**Supervisor --> Worker:** {instruction}\n\n"
200
+ item_ctx.stream = f"\n\n**{trans('agent.name.supervisor')} --> {trans('agent.name.worker')}:** {instruction}\n\n"
203
201
  bridge.on_step(item_ctx, True)
204
202
  handler.begin = False
205
203
  result = await Runner.run(
@@ -295,11 +293,11 @@ class Agent(BaseAgent):
295
293
  if action == "ask_user":
296
294
  question = response.get("question", "")
297
295
  reasoning = response.get("reasoning", "")
298
- return f"**Supervisor:** {reasoning}\n\n{question}"
296
+ return f"**{trans('agent.name.supervisor')}:** {reasoning}\n\n{question}"
299
297
  elif action == "final":
300
298
  final_answer = response.get("final_answer", "")
301
299
  reasoning = response.get("reasoning", "")
302
- return f"**Supervisor:** {reasoning}\n\n{final_answer}\n\n"
300
+ return f"**{trans('agent.name.supervisor')}:** {reasoning}\n\n{final_answer}\n\n"
303
301
  else:
304
302
  return response.get("final_answer", "")
305
303
 
@@ -311,41 +309,41 @@ class Agent(BaseAgent):
311
309
  """
312
310
  return {
313
311
  "supervisor": {
314
- "label": "Supervisor",
312
+ "label": trans("agent.option.section.supervisor"),
315
313
  "options": {
316
314
  "prompt": {
317
315
  "type": "textarea",
318
- "label": "Prompt",
319
- "description": "Prompt for supervisor",
316
+ "label": trans("agent.option.prompt"),
317
+ "description": trans("agent.option.prompt.supervisor.desc"),
320
318
  "default": SUPERVISOR_PROMPT,
321
319
  },
322
320
  }
323
321
  },
324
322
  "worker": {
325
- "label": "Worker",
323
+ "label": trans("agent.option.section.worker"),
326
324
  "options": {
327
325
  "model": {
328
- "label": "Model",
326
+ "label": trans("agent.option.model"),
329
327
  "type": "combo",
330
328
  "use": "models",
331
329
  "default": "gpt-4o",
332
330
  },
333
331
  "prompt": {
334
332
  "type": "textarea",
335
- "label": "Prompt",
336
- "description": "Prompt for worker",
333
+ "label": trans("agent.option.prompt"),
334
+ "description": trans("agent.option.prompt.worker.desc"),
337
335
  "default": WORKER_PROMPT,
338
336
  },
339
337
  "allow_local_tools": {
340
338
  "type": "bool",
341
- "label": "Allow local tools",
342
- "description": "Allow usage of local tools for this agent",
339
+ "label": trans("agent.option.tools.local"),
340
+ "description": trans("agent.option.tools.local.desc"),
343
341
  "default": True,
344
342
  },
345
343
  "allow_remote_tools": {
346
344
  "type": "bool",
347
- "label": "Allow remote tools",
348
- "description": "Allow usage of remote tools for this agent",
345
+ "label": trans("agent.option.tools.remote"),
346
+ "description": trans("agent.option.tools.remote.desc"),
349
347
  "default": True,
350
348
  },
351
349
  }
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.25 20:00:00 #
9
+ # Updated Date: 2025.08.26 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import copy
@@ -2291,6 +2291,50 @@ class Patch:
2291
2291
  self.window.core.updater.patch_css('web-blocks.light.css', True)
2292
2292
  updated = True
2293
2293
 
2294
+ # < 2.6.24
2295
+ if old < parse_version("2.6.24"):
2296
+ print("Migrating config from < 2.6.24...")
2297
+ if "llama.idx.embeddings.default" not in data:
2298
+ data["llama.idx.embeddings.default"] = self.window.core.config.get_base(
2299
+ 'llama.idx.embeddings.default')
2300
+ updated = True
2301
+
2302
+ # < 2.6.25
2303
+ if old < parse_version("2.6.25"):
2304
+ print("Migrating config from < 2.6.25...")
2305
+ if "api_key_voyage" not in data:
2306
+ data["api_key_voyage"] = ""
2307
+ if "agent.llama.eval_model" not in data:
2308
+ data["agent.llama.eval_model"] = "_"
2309
+ if "llama.idx.embeddings.default" in data:
2310
+ providers = []
2311
+ for item in data["llama.idx.embeddings.default"]:
2312
+ p = item.get('provider', '')
2313
+ if p and p not in providers:
2314
+ providers.append(p)
2315
+
2316
+ if "anthropic" not in providers:
2317
+ data["llama.idx.embeddings.default"].append({
2318
+ "provider": "anthropic",
2319
+ "model": "voyage-3.5",
2320
+ })
2321
+ if "deepseek_api" not in providers:
2322
+ data["llama.idx.embeddings.default"].append({
2323
+ "provider": "deepseek_api",
2324
+ "model": "voyage-3.5",
2325
+ })
2326
+ if "mistral_ai" not in providers:
2327
+ data["llama.idx.embeddings.default"].append({
2328
+ "provider": "mistral_ai",
2329
+ "model": "mistral-embed",
2330
+ })
2331
+ if "x_ai" not in providers:
2332
+ data["llama.idx.embeddings.default"].append({
2333
+ "provider": "x_ai",
2334
+ "model": "",
2335
+ })
2336
+ updated = True
2337
+
2294
2338
  # update file
2295
2339
  migrated = False
2296
2340
  if updated:
@@ -6,10 +6,12 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.06 01:00:00 #
9
+ # Updated Date: 2025.08.26 19:00:00 #
10
10
  # ================================================== #
11
- from typing import List, Dict
12
11
 
12
+ from typing import List, Dict, Optional
13
+
14
+ from llama_index.core.base.embeddings.base import BaseEmbedding
13
15
  from llama_index.core.llms.llm import BaseLLM as LlamaBaseLLM
14
16
 
15
17
  from pygpt_net.core.types import (
@@ -31,7 +33,7 @@ class AnthropicLLM(BaseLLM):
31
33
  """
32
34
  self.id = "anthropic"
33
35
  self.name = "Anthropic"
34
- self.type = [MODE_LLAMA_INDEX]
36
+ self.type = [MODE_LLAMA_INDEX, "embeddings"]
35
37
 
36
38
  def llama(
37
39
  self,
@@ -51,8 +53,36 @@ class AnthropicLLM(BaseLLM):
51
53
  args = self.parse_args(model.llama_index, window)
52
54
  if "model" not in args:
53
55
  args["model"] = model.id
56
+ if "api_key" not in args or args["api_key"] == "":
57
+ args["api_key"] = window.core.config.get("api_key_anthropic", "")
54
58
  return Anthropic(**args)
55
59
 
60
+ def get_embeddings_model(
61
+ self,
62
+ window,
63
+ config: Optional[List[Dict]] = None
64
+ ) -> BaseEmbedding:
65
+ """
66
+ Return provider instance for embeddings
67
+
68
+ :param window: window instance
69
+ :param config: config keyword arguments list
70
+ :return: Embedding provider instance
71
+ """
72
+ from llama_index.embeddings.voyageai import VoyageEmbedding
73
+ args = {}
74
+ if config is not None:
75
+ args = self.parse_args({
76
+ "args": config,
77
+ }, window)
78
+ if "api_key" in args:
79
+ args["voyage_api_key"] = args.pop("api_key")
80
+ if "voyage_api_key" not in args or args["voyage_api_key"] == "":
81
+ args["voyage_api_key"] = window.core.config.get("api_key_voyage", "")
82
+ if "model" in args and "model_name" not in args:
83
+ args["model_name"] = args.pop("model")
84
+ return VoyageEmbedding(**args)
85
+
56
86
  def get_models(
57
87
  self,
58
88
  window,
@@ -63,11 +93,12 @@ class AnthropicLLM(BaseLLM):
63
93
  :param window: window instance
64
94
  :return: list of models
65
95
  """
66
- items = []
67
- from llama_index.llms.anthropic import Anthropic
68
- api_key = window.core.config.get('api_key_anthropic', "")
69
- client = Anthropic(api_key=api_key)
96
+ import anthropic
97
+ client = anthropic.Anthropic(
98
+ api_key=window.core.config.get('api_key_anthropic', "")
99
+ )
70
100
  models_list = client.models.list()
101
+ items = []
71
102
  if models_list.data:
72
103
  for item in models_list.data:
73
104
  items.append({
@@ -6,14 +6,11 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.06 01:00:00 #
9
+ # Updated Date: 2025.08.26 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Optional, List, Dict
13
13
 
14
- # from langchain_openai import AzureOpenAI
15
- # from langchain_openai import AzureChatOpenAI
16
-
17
14
  from llama_index.core.llms.llm import BaseLLM as LlamaBaseLLM
18
15
  from llama_index.core.base.embeddings.base import BaseEmbedding
19
16
 
@@ -93,6 +90,10 @@ class AzureOpenAILLM(BaseLLM):
93
90
  """
94
91
  from llama_index.llms.azure_openai import AzureOpenAI as LlamaAzureOpenAI
95
92
  args = self.parse_args(model.llama_index, window)
93
+ if "api_key" not in args:
94
+ args["api_key"] = window.core.config.get("api_key", "")
95
+ if "model" not in args:
96
+ args["model"] = model.id
96
97
  return LlamaAzureOpenAI(**args)
97
98
 
98
99
  def get_embeddings_model(
@@ -113,4 +114,8 @@ class AzureOpenAILLM(BaseLLM):
113
114
  args = self.parse_args({
114
115
  "args": config,
115
116
  }, window)
117
+ if "api_key" not in args:
118
+ args["api_key"] = window.core.config.get("api_key", "")
119
+ if "model" in args and "model_name" not in args:
120
+ args["model_name"] = args.pop("model")
116
121
  return AzureOpenAIEmbedding(**args)