pygpt-net 2.6.28__py3-none-any.whl → 2.6.30__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (115) hide show
  1. pygpt_net/CHANGELOG.txt +13 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/{container.py → app_core.py} +5 -6
  4. pygpt_net/controller/access/control.py +1 -9
  5. pygpt_net/controller/assistant/assistant.py +4 -4
  6. pygpt_net/controller/assistant/batch.py +7 -7
  7. pygpt_net/controller/assistant/files.py +4 -4
  8. pygpt_net/controller/assistant/threads.py +3 -3
  9. pygpt_net/controller/attachment/attachment.py +4 -7
  10. pygpt_net/controller/chat/common.py +1 -1
  11. pygpt_net/controller/chat/stream.py +961 -294
  12. pygpt_net/controller/chat/vision.py +11 -19
  13. pygpt_net/controller/config/placeholder.py +1 -1
  14. pygpt_net/controller/ctx/ctx.py +1 -1
  15. pygpt_net/controller/ctx/summarizer.py +1 -1
  16. pygpt_net/controller/mode/mode.py +21 -12
  17. pygpt_net/controller/plugins/settings.py +3 -2
  18. pygpt_net/controller/presets/editor.py +112 -99
  19. pygpt_net/controller/theme/common.py +2 -0
  20. pygpt_net/controller/theme/theme.py +6 -2
  21. pygpt_net/controller/ui/vision.py +4 -4
  22. pygpt_net/core/agents/legacy.py +2 -2
  23. pygpt_net/core/agents/runners/openai_workflow.py +2 -2
  24. pygpt_net/core/assistants/files.py +5 -5
  25. pygpt_net/core/assistants/store.py +4 -4
  26. pygpt_net/core/bridge/bridge.py +3 -3
  27. pygpt_net/core/bridge/worker.py +28 -9
  28. pygpt_net/core/debug/console/console.py +2 -2
  29. pygpt_net/core/debug/presets.py +2 -2
  30. pygpt_net/core/experts/experts.py +2 -2
  31. pygpt_net/core/idx/llm.py +21 -3
  32. pygpt_net/core/modes/modes.py +2 -2
  33. pygpt_net/core/presets/presets.py +3 -3
  34. pygpt_net/core/tokens/tokens.py +4 -4
  35. pygpt_net/core/types/mode.py +5 -2
  36. pygpt_net/core/vision/analyzer.py +1 -1
  37. pygpt_net/data/config/config.json +6 -3
  38. pygpt_net/data/config/models.json +75 -3
  39. pygpt_net/data/config/modes.json +3 -9
  40. pygpt_net/data/config/settings.json +112 -55
  41. pygpt_net/data/config/settings_section.json +2 -2
  42. pygpt_net/data/locale/locale.de.ini +2 -2
  43. pygpt_net/data/locale/locale.en.ini +9 -2
  44. pygpt_net/data/locale/locale.es.ini +2 -2
  45. pygpt_net/data/locale/locale.fr.ini +2 -2
  46. pygpt_net/data/locale/locale.it.ini +2 -2
  47. pygpt_net/data/locale/locale.pl.ini +3 -3
  48. pygpt_net/data/locale/locale.uk.ini +2 -2
  49. pygpt_net/data/locale/locale.zh.ini +2 -2
  50. pygpt_net/item/model.py +23 -3
  51. pygpt_net/plugin/openai_dalle/plugin.py +4 -4
  52. pygpt_net/plugin/openai_vision/plugin.py +12 -13
  53. pygpt_net/provider/agents/openai/agent.py +5 -5
  54. pygpt_net/provider/agents/openai/agent_b2b.py +5 -5
  55. pygpt_net/provider/agents/openai/agent_planner.py +5 -6
  56. pygpt_net/provider/agents/openai/agent_with_experts.py +5 -5
  57. pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +4 -4
  58. pygpt_net/provider/agents/openai/agent_with_feedback.py +4 -4
  59. pygpt_net/provider/agents/openai/bot_researcher.py +2 -2
  60. pygpt_net/provider/agents/openai/bots/research_bot/agents/planner_agent.py +1 -1
  61. pygpt_net/provider/agents/openai/bots/research_bot/agents/search_agent.py +1 -1
  62. pygpt_net/provider/agents/openai/bots/research_bot/agents/writer_agent.py +1 -1
  63. pygpt_net/provider/agents/openai/evolve.py +5 -5
  64. pygpt_net/provider/agents/openai/supervisor.py +4 -4
  65. pygpt_net/provider/api/__init__.py +27 -0
  66. pygpt_net/provider/api/anthropic/__init__.py +68 -0
  67. pygpt_net/provider/api/google/__init__.py +262 -0
  68. pygpt_net/provider/api/google/audio.py +114 -0
  69. pygpt_net/provider/api/google/chat.py +552 -0
  70. pygpt_net/provider/api/google/image.py +287 -0
  71. pygpt_net/provider/api/google/tools.py +222 -0
  72. pygpt_net/provider/api/google/vision.py +129 -0
  73. pygpt_net/provider/{gpt → api/openai}/__init__.py +2 -2
  74. pygpt_net/provider/{gpt → api/openai}/agents/computer.py +1 -1
  75. pygpt_net/provider/{gpt → api/openai}/agents/experts.py +1 -1
  76. pygpt_net/provider/{gpt → api/openai}/agents/response.py +1 -1
  77. pygpt_net/provider/{gpt → api/openai}/assistants.py +1 -1
  78. pygpt_net/provider/{gpt → api/openai}/chat.py +15 -8
  79. pygpt_net/provider/{gpt → api/openai}/completion.py +1 -1
  80. pygpt_net/provider/{gpt → api/openai}/image.py +1 -1
  81. pygpt_net/provider/{gpt → api/openai}/remote_tools.py +1 -1
  82. pygpt_net/provider/{gpt → api/openai}/responses.py +34 -20
  83. pygpt_net/provider/{gpt → api/openai}/store.py +2 -2
  84. pygpt_net/provider/{gpt → api/openai}/vision.py +1 -1
  85. pygpt_net/provider/{gpt → api/openai}/worker/assistants.py +4 -4
  86. pygpt_net/provider/{gpt → api/openai}/worker/importer.py +10 -10
  87. pygpt_net/provider/audio_input/openai_whisper.py +1 -1
  88. pygpt_net/provider/audio_output/google_tts.py +12 -0
  89. pygpt_net/provider/audio_output/openai_tts.py +1 -1
  90. pygpt_net/provider/core/config/patch.py +11 -0
  91. pygpt_net/provider/core/model/patch.py +9 -0
  92. pygpt_net/provider/core/preset/json_file.py +2 -4
  93. pygpt_net/provider/llms/anthropic.py +2 -5
  94. pygpt_net/provider/llms/base.py +4 -3
  95. pygpt_net/provider/llms/openai.py +1 -1
  96. pygpt_net/provider/loaders/hub/image_vision/base.py +1 -1
  97. pygpt_net/ui/dialog/preset.py +71 -55
  98. pygpt_net/ui/main.py +6 -4
  99. pygpt_net/utils.py +9 -0
  100. {pygpt_net-2.6.28.dist-info → pygpt_net-2.6.30.dist-info}/METADATA +42 -48
  101. {pygpt_net-2.6.28.dist-info → pygpt_net-2.6.30.dist-info}/RECORD +115 -107
  102. /pygpt_net/provider/{gpt → api/openai}/agents/__init__.py +0 -0
  103. /pygpt_net/provider/{gpt → api/openai}/agents/client.py +0 -0
  104. /pygpt_net/provider/{gpt → api/openai}/agents/remote_tools.py +0 -0
  105. /pygpt_net/provider/{gpt → api/openai}/agents/utils.py +0 -0
  106. /pygpt_net/provider/{gpt → api/openai}/audio.py +0 -0
  107. /pygpt_net/provider/{gpt → api/openai}/computer.py +0 -0
  108. /pygpt_net/provider/{gpt → api/openai}/container.py +0 -0
  109. /pygpt_net/provider/{gpt → api/openai}/summarizer.py +0 -0
  110. /pygpt_net/provider/{gpt → api/openai}/tools.py +0 -0
  111. /pygpt_net/provider/{gpt → api/openai}/utils.py +0 -0
  112. /pygpt_net/provider/{gpt → api/openai}/worker/__init__.py +0 -0
  113. {pygpt_net-2.6.28.dist-info → pygpt_net-2.6.30.dist-info}/LICENSE +0 -0
  114. {pygpt_net-2.6.28.dist-info → pygpt_net-2.6.30.dist-info}/WHEEL +0 -0
  115. {pygpt_net-2.6.28.dist-info → pygpt_net-2.6.30.dist-info}/entry_points.txt +0 -0
@@ -26,12 +26,12 @@ from pygpt_net.core.types import (
26
26
  from pygpt_net.item.ctx import CtxItem
27
27
  from pygpt_net.item.model import ModelItem
28
28
 
29
- from pygpt_net.provider.gpt.agents.remote_tools import is_computer_tool, append_tools
30
- from pygpt_net.provider.gpt.agents.computer import Agent as ComputerAgent, LocalComputer
31
- from pygpt_net.provider.gpt.agents.response import StreamHandler
29
+ from pygpt_net.provider.api.openai.agents.remote_tools import is_computer_tool, append_tools
30
+ from pygpt_net.provider.api.openai.agents.computer import Agent as ComputerAgent, LocalComputer
31
+ from pygpt_net.provider.api.openai.agents.response import StreamHandler
32
+ from pygpt_net.provider.api.openai.agents.experts import get_experts
32
33
 
33
34
  from ..base import BaseAgent
34
- from ...gpt.agents.experts import get_experts
35
35
 
36
36
  class Agent(BaseAgent):
37
37
  def __init__(self, *args, **kwargs):
@@ -159,7 +159,7 @@ class Agent(BaseAgent):
159
159
  agent,
160
160
  **kwargs
161
161
  )
162
- final_output, last_response_id = window.core.gpt.responses.unpack_agent_response(result, ctx)
162
+ final_output, last_response_id = window.core.api.openai.responses.unpack_agent_response(result, ctx)
163
163
  response_id = result.last_response_id
164
164
  if verbose:
165
165
  print("Final response:", result)
@@ -29,12 +29,12 @@ from pygpt_net.item.ctx import CtxItem
29
29
  from pygpt_net.item.model import ModelItem
30
30
  from pygpt_net.item.preset import PresetItem
31
31
 
32
- from pygpt_net.provider.gpt.agents.remote_tools import append_tools
33
- from pygpt_net.provider.gpt.agents.response import StreamHandler
32
+ from pygpt_net.provider.api.openai.agents.remote_tools import append_tools
33
+ from pygpt_net.provider.api.openai.agents.response import StreamHandler
34
+ from pygpt_net.provider.api.openai.agents.experts import get_experts
34
35
  from pygpt_net.utils import trans
35
36
 
36
37
  from ..base import BaseAgent
37
- from ...gpt.agents.experts import get_experts
38
38
 
39
39
 
40
40
  class Agent(BaseAgent):
@@ -274,7 +274,7 @@ class Agent(BaseAgent):
274
274
  if verbose:
275
275
  print("Final response:", result)
276
276
 
277
- final_output, last_response_id = window.core.gpt.responses.unpack_agent_response(result, ctx)
277
+ final_output, last_response_id = window.core.api.openai.responses.unpack_agent_response(result, ctx)
278
278
 
279
279
  if bridge.stopped():
280
280
  bridge.on_stop(ctx)
@@ -305,7 +305,7 @@ class Agent(BaseAgent):
305
305
  if verbose:
306
306
  print("Final response:", result)
307
307
 
308
- final_output, last_response_id = window.core.gpt.responses.unpack_agent_response(result, ctx)
308
+ final_output, last_response_id = window.core.api.openai.responses.unpack_agent_response(result, ctx)
309
309
  if bridge.stopped():
310
310
  bridge.on_stop(ctx)
311
311
  break
@@ -30,14 +30,13 @@ from pygpt_net.item.ctx import CtxItem
30
30
  from pygpt_net.item.model import ModelItem
31
31
  from pygpt_net.item.preset import PresetItem
32
32
 
33
- from pygpt_net.provider.gpt.agents.client import get_custom_model_provider, set_openai_env
34
- from pygpt_net.provider.gpt.agents.remote_tools import append_tools
35
- from pygpt_net.provider.gpt.agents.response import StreamHandler
33
+ from pygpt_net.provider.api.openai.agents.client import get_custom_model_provider, set_openai_env
34
+ from pygpt_net.provider.api.openai.agents.remote_tools import append_tools
35
+ from pygpt_net.provider.api.openai.agents.response import StreamHandler
36
+ from pygpt_net.provider.api.openai.agents.experts import get_experts
36
37
  from pygpt_net.utils import trans
37
38
 
38
39
  from ..base import BaseAgent
39
- from ...gpt.agents.experts import get_experts
40
-
41
40
 
42
41
  @dataclass
43
42
  class EvaluationFeedback:
@@ -327,7 +326,7 @@ class Agent(BaseAgent):
327
326
  print("Final response:", result)
328
327
 
329
328
  input_items = result.to_input_list()
330
- final_output, last_response_id = window.core.gpt.responses.unpack_agent_response(result, ctx)
329
+ final_output, last_response_id = window.core.api.openai.responses.unpack_agent_response(result, ctx)
331
330
 
332
331
  if bridge.stopped():
333
332
  bridge.on_stop(ctx)
@@ -29,12 +29,12 @@ from pygpt_net.item.ctx import CtxItem
29
29
  from pygpt_net.item.model import ModelItem
30
30
  from pygpt_net.item.preset import PresetItem
31
31
 
32
- from pygpt_net.provider.gpt.agents.client import get_custom_model_provider, set_openai_env
33
- from pygpt_net.provider.gpt.agents.remote_tools import append_tools
34
- from pygpt_net.provider.gpt.agents.response import StreamHandler
32
+ from pygpt_net.provider.api.openai.agents.client import get_custom_model_provider, set_openai_env
33
+ from pygpt_net.provider.api.openai.agents.remote_tools import append_tools
34
+ from pygpt_net.provider.api.openai.agents.response import StreamHandler
35
+ from pygpt_net.provider.api.openai.agents.experts import get_experts
35
36
 
36
37
  from ..base import BaseAgent
37
- from ...gpt.agents.experts import get_experts
38
38
 
39
39
 
40
40
  class Agent(BaseAgent):
@@ -137,7 +137,7 @@ class Agent(BaseAgent):
137
137
  agent,
138
138
  **kwargs
139
139
  )
140
- final_output, last_response_id = window.core.gpt.responses.unpack_agent_response(result, ctx)
140
+ final_output, last_response_id = window.core.api.openai.responses.unpack_agent_response(result, ctx)
141
141
  response_id = result.last_response_id
142
142
  if verbose:
143
143
  print("Final response:", result)
@@ -29,12 +29,12 @@ from pygpt_net.item.ctx import CtxItem
29
29
  from pygpt_net.item.model import ModelItem
30
30
  from pygpt_net.item.preset import PresetItem
31
31
 
32
- from pygpt_net.provider.gpt.agents.remote_tools import append_tools
33
- from pygpt_net.provider.gpt.agents.response import StreamHandler
32
+ from pygpt_net.provider.api.openai.agents.remote_tools import append_tools
33
+ from pygpt_net.provider.api.openai.agents.response import StreamHandler
34
+ from pygpt_net.provider.api.openai.agents.experts import get_experts
34
35
  from pygpt_net.utils import trans
35
36
 
36
37
  from ..base import BaseAgent
37
- from ...gpt.agents.experts import get_experts
38
38
 
39
39
 
40
40
  @dataclass
@@ -221,7 +221,7 @@ class Agent(BaseAgent):
221
221
  print("Final response:", result)
222
222
 
223
223
  input_items = result.to_input_list()
224
- final_output, last_response_id = window.core.gpt.responses.unpack_agent_response(result, ctx)
224
+ final_output, last_response_id = window.core.api.openai.responses.unpack_agent_response(result, ctx)
225
225
 
226
226
  if bridge.stopped():
227
227
  bridge.on_stop(ctx)
@@ -29,12 +29,12 @@ from pygpt_net.item.ctx import CtxItem
29
29
  from pygpt_net.item.model import ModelItem
30
30
  from pygpt_net.item.preset import PresetItem
31
31
 
32
- from pygpt_net.provider.gpt.agents.remote_tools import append_tools
33
- from pygpt_net.provider.gpt.agents.response import StreamHandler
32
+ from pygpt_net.provider.api.openai.agents.remote_tools import append_tools
33
+ from pygpt_net.provider.api.openai.agents.response import StreamHandler
34
+ from pygpt_net.provider.api.openai.agents.experts import get_experts
34
35
  from pygpt_net.utils import trans
35
36
 
36
37
  from ..base import BaseAgent
37
- from ...gpt.agents.experts import get_experts
38
38
 
39
39
 
40
40
  @dataclass
@@ -221,7 +221,7 @@ class Agent(BaseAgent):
221
221
  print("Final response:", result)
222
222
 
223
223
  input_items = result.to_input_list()
224
- final_output, last_response_id = window.core.gpt.responses.unpack_agent_response(result, ctx)
224
+ final_output, last_response_id = window.core.api.openai.responses.unpack_agent_response(result, ctx)
225
225
 
226
226
  if bridge.stopped():
227
227
  bridge.on_stop(ctx)
@@ -25,12 +25,12 @@ from pygpt_net.core.types import (
25
25
  from pygpt_net.item.ctx import CtxItem
26
26
  from pygpt_net.item.model import ModelItem
27
27
 
28
- from pygpt_net.provider.gpt.agents.remote_tools import append_tools
28
+ from pygpt_net.provider.api.openai.agents.remote_tools import append_tools
29
+ from pygpt_net.provider.api.openai.agents.experts import get_experts
29
30
  from pygpt_net.utils import trans
30
31
 
31
32
  from ..base import BaseAgent
32
33
  from .bots.research_bot.manager import ResearchManager
33
- from ...gpt.agents.experts import get_experts
34
34
 
35
35
 
36
36
  class Agent(BaseAgent):
@@ -15,7 +15,7 @@ from pydantic import BaseModel
15
15
  from agents import Agent
16
16
 
17
17
  from pygpt_net.item.preset import PresetItem
18
- from pygpt_net.provider.gpt.agents.remote_tools import append_tools
18
+ from pygpt_net.provider.api.openai.agents.remote_tools import append_tools
19
19
 
20
20
 
21
21
  class WebSearchItem(BaseModel):
@@ -14,7 +14,7 @@ from agents.model_settings import ModelSettings
14
14
 
15
15
  from pygpt_net.core.types import OPENAI_REMOTE_TOOL_DISABLE_WEB_SEARCH
16
16
  from pygpt_net.item.preset import PresetItem
17
- from pygpt_net.provider.gpt.agents.remote_tools import append_tools
17
+ from pygpt_net.provider.api.openai.agents.remote_tools import append_tools
18
18
 
19
19
 
20
20
  def get_search_agent(
@@ -15,7 +15,7 @@ from pydantic import BaseModel
15
15
  from agents import Agent
16
16
 
17
17
  from pygpt_net.item.preset import PresetItem
18
- from pygpt_net.provider.gpt.agents.remote_tools import append_tools
18
+ from pygpt_net.provider.api.openai.agents.remote_tools import append_tools
19
19
 
20
20
 
21
21
  class ReportData(BaseModel):
@@ -30,12 +30,12 @@ from pygpt_net.item.ctx import CtxItem
30
30
  from pygpt_net.item.model import ModelItem
31
31
  from pygpt_net.item.preset import PresetItem
32
32
 
33
- from pygpt_net.provider.gpt.agents.remote_tools import append_tools
34
- from pygpt_net.provider.gpt.agents.response import StreamHandler
33
+ from pygpt_net.provider.api.openai.agents.remote_tools import append_tools
34
+ from pygpt_net.provider.api.openai.agents.response import StreamHandler
35
+ from pygpt_net.provider.api.openai.agents.experts import get_experts
35
36
  from pygpt_net.utils import trans
36
37
 
37
38
  from ..base import BaseAgent
38
- from ...gpt.agents.experts import get_experts
39
39
 
40
40
 
41
41
  @dataclass
@@ -340,7 +340,7 @@ class Agent(BaseAgent):
340
340
 
341
341
  print("Winner: agent ", choose)
342
342
 
343
- final_output, last_response_id = window.core.gpt.responses.unpack_agent_response(results[choose], ctx)
343
+ final_output, last_response_id = window.core.api.openai.responses.unpack_agent_response(results[choose], ctx)
344
344
  input_items = results[choose].to_input_list()
345
345
 
346
346
  if bridge.stopped():
@@ -437,7 +437,7 @@ class Agent(BaseAgent):
437
437
  bridge.on_stop(ctx)
438
438
  break
439
439
 
440
- window.core.gpt.responses.unpack_agent_response(results[choose], ctx)
440
+ window.core.api.openai.responses.unpack_agent_response(results[choose], ctx)
441
441
  input_items = results[choose].to_input_list()
442
442
 
443
443
  evaluator_result = await Runner.run(evaluator, input_items)
@@ -31,9 +31,9 @@ from pygpt_net.core.types import (
31
31
  from pygpt_net.item.ctx import CtxItem
32
32
  from pygpt_net.item.model import ModelItem
33
33
 
34
- from pygpt_net.provider.gpt.agents.remote_tools import append_tools
35
- from pygpt_net.provider.gpt.agents.response import StreamHandler
36
- from pygpt_net.provider.gpt.agents.experts import get_experts
34
+ from pygpt_net.provider.api.openai.agents.remote_tools import append_tools
35
+ from pygpt_net.provider.api.openai.agents.response import StreamHandler
36
+ from pygpt_net.provider.api.openai.agents.experts import get_experts
37
37
  from pygpt_net.utils import trans
38
38
 
39
39
  from ..base import BaseAgent
@@ -218,7 +218,7 @@ class Agent(BaseAgent):
218
218
  agent,
219
219
  **kwargs
220
220
  )
221
- final_output, last_response_id = window.core.gpt.responses.unpack_agent_response(result, ctx)
221
+ final_output, last_response_id = window.core.api.openai.responses.unpack_agent_response(result, ctx)
222
222
  response_id = result.last_response_id
223
223
  if verbose:
224
224
  print("Final response:", result)
@@ -0,0 +1,27 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.08.28 20:00:00 #
10
+ # ================================================== #
11
+
12
+ from .anthropic import ApiAnthropic
13
+ from .google import ApiGoogle
14
+ from .openai import ApiOpenAI
15
+
16
+ class Api:
17
+
18
+ def __init__(self, window=None):
19
+ """
20
+ API wrappers core
21
+
22
+ :param window: Window instance
23
+ """
24
+ self.window = window
25
+ self.anthropic = ApiAnthropic(window)
26
+ self.google = ApiGoogle(window)
27
+ self.openai = ApiOpenAI(window)
@@ -0,0 +1,68 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.08.28 09:00:00 #
10
+ # ================================================== #
11
+
12
+ from anthropic import Anthropic
13
+
14
+ from pygpt_net.core.types import (
15
+ MODE_CHAT,
16
+ )
17
+ from pygpt_net.item.model import ModelItem
18
+
19
+ class ApiAnthropic:
20
+
21
+ def __init__(self, window=None):
22
+ """
23
+ Anthropic API wrapper core
24
+
25
+ :param window: Window instance
26
+ """
27
+ self.window = window
28
+ self.client = None
29
+ self.locked = False
30
+
31
+ def get_client(
32
+ self,
33
+ mode: str = MODE_CHAT,
34
+ model: ModelItem = None
35
+ ) -> Anthropic:
36
+ """
37
+ Return Anthropic client
38
+
39
+ :param mode: Mode
40
+ :param model: Model
41
+ :return: Anthropic client
42
+ """
43
+ if self.client is not None:
44
+ try:
45
+ self.client.close() # close previous client if exists
46
+ except Exception as e:
47
+ self.window.core.debug.log(e)
48
+ print("Error closing previous Anthropic client:", e)
49
+ self.client = Anthropic(
50
+ api_key=self.window.core.config.get('api_key_anthropic', "")
51
+ )
52
+ return self.client
53
+
54
+ def stop(self):
55
+ """On global event stop"""
56
+ pass
57
+
58
+ def close(self):
59
+ """Close Anthropic client"""
60
+ if self.locked:
61
+ return
62
+ if self.client is not None:
63
+ try:
64
+ pass
65
+ # self.client.close()
66
+ except Exception as e:
67
+ self.window.core.debug.log(e)
68
+ print("Error closing Anthropic client:", e)
@@ -0,0 +1,262 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.08.28 20:00:00 #
10
+ # ================================================== #
11
+
12
+ from typing import Optional, Dict, Any
13
+
14
+ from google.genai import types as gtypes
15
+ from google import genai
16
+ from pygpt_net.core.types import (
17
+ MODE_ASSISTANT,
18
+ MODE_AUDIO,
19
+ MODE_CHAT,
20
+ MODE_COMPLETION,
21
+ MODE_IMAGE,
22
+ MODE_RESEARCH,
23
+ )
24
+ from pygpt_net.core.bridge.context import BridgeContext
25
+ from pygpt_net.item.model import ModelItem
26
+
27
+ from .chat import Chat
28
+ from .vision import Vision
29
+ from .tools import Tools
30
+ from .audio import Audio
31
+ from .image import Image
32
+
33
+
34
+ class ApiGoogle:
35
+ def __init__(self, window=None):
36
+ """
37
+ Google GenAI API SDK wrapper
38
+
39
+ :param window: Window instance
40
+ """
41
+ self.window = window
42
+ self.chat = Chat(window)
43
+ self.vision = Vision(window)
44
+ self.tools = Tools(window)
45
+ self.audio = Audio(window)
46
+ self.image = Image(window)
47
+ self.client: Optional[genai.Client] = None
48
+ self.locked = False
49
+ self.last_client_args: Optional[Dict[str, Any]] = None
50
+
51
+ def get_client(
52
+ self,
53
+ mode: str = MODE_CHAT,
54
+ model: ModelItem = None
55
+ ) -> genai.Client:
56
+ """
57
+ Get or create Google GenAI client
58
+
59
+ :param mode: Mode (chat, completion, image, etc.)
60
+ :param model: ModelItem
61
+ :return: genai.Client instance
62
+ """
63
+ if not model:
64
+ model = ModelItem()
65
+ model.provider = "google"
66
+ args = self.window.core.models.prepare_client_args(mode, model)
67
+ filtered = {}
68
+ if args.get("api_key"):
69
+ filtered["api_key"] = args["api_key"]
70
+ if self.client is None or self.last_client_args != filtered:
71
+ self.client = genai.Client(**filtered)
72
+ self.last_client_args = filtered
73
+ return self.client
74
+
75
+ def call(self, context: BridgeContext, extra: dict = None) -> bool:
76
+ """
77
+ Make an API call to Google GenAI
78
+
79
+ :param context: BridgeContext
80
+ :param extra: Extra parameters
81
+ :return: True if successful, False otherwise
82
+ """
83
+ mode = context.mode
84
+ model = context.model
85
+ stream = context.stream
86
+ ctx = context.ctx
87
+ ai_name = ctx.output_name if ctx else "assistant"
88
+
89
+ # No Responses API in google-genai
90
+ if ctx:
91
+ ctx.use_responses_api = False
92
+
93
+ used_tokens = 0
94
+ response = None
95
+
96
+ if mode in [MODE_COMPLETION, MODE_CHAT, MODE_AUDIO, MODE_RESEARCH]:
97
+ response = self.chat.send(context=context, extra=extra)
98
+ used_tokens = self.chat.get_used_tokens()
99
+ if ctx:
100
+ self.vision.append_images(ctx)
101
+
102
+ elif mode == MODE_IMAGE:
103
+ return self.image.generate(context=context, extra=extra)
104
+
105
+ elif mode == MODE_ASSISTANT:
106
+ return False # not implemented for Google
107
+
108
+ if stream:
109
+ if ctx:
110
+ ctx.stream = response
111
+ ctx.set_output("", ai_name)
112
+ ctx.input_tokens = used_tokens
113
+ return True
114
+
115
+ if response is None:
116
+ return False
117
+
118
+ if isinstance(response, dict) and "error" in response:
119
+ return False
120
+
121
+ if ctx:
122
+ ctx.ai_name = ai_name
123
+ self.chat.unpack_response(mode, response, ctx)
124
+ try:
125
+ import json
126
+ for tc in getattr(ctx, "tool_calls", []) or []:
127
+ fn = tc.get("function") or {}
128
+ args = fn.get("arguments")
129
+ if isinstance(args, str):
130
+ try:
131
+ fn["arguments"] = json.loads(args)
132
+ except Exception:
133
+ fn["arguments"] = {}
134
+ except Exception:
135
+ pass
136
+ return True
137
+
138
+ def quick_call(self, context: BridgeContext, extra: dict = None) -> str:
139
+ """
140
+ Make a quick API call to Google GenAI and return the output text
141
+
142
+ :param context: BridgeContext
143
+ :param extra: Extra parameters
144
+ :return: Output text
145
+ """
146
+ if context.request:
147
+ context.stream = False
148
+ context.mode = MODE_CHAT
149
+ self.locked = True
150
+ self.call(context, extra)
151
+ self.locked = False
152
+ return context.ctx.output
153
+
154
+ self.locked = True
155
+ try:
156
+ ctx = context.ctx
157
+ prompt = context.prompt
158
+ system_prompt = context.system_prompt
159
+ temperature = context.temperature
160
+ history = context.history
161
+ functions = context.external_functions
162
+ model = context.model or self.window.core.models.from_defaults()
163
+
164
+ client = self.get_client(MODE_CHAT, model)
165
+ tools = self.tools.prepare(model, functions)
166
+
167
+ """
168
+ # with remote tools
169
+ base_tools = self.tools.prepare(model, functions)
170
+ remote_tools = self.build_remote_tools(model)
171
+ tools = (base_tools or []) + (remote_tools or [])
172
+ """
173
+
174
+ inputs = self.chat.build_input(
175
+ prompt=prompt,
176
+ system_prompt=system_prompt,
177
+ model=model,
178
+ history=history,
179
+ attachments=context.attachments,
180
+ multimodal_ctx=context.multimodal_ctx,
181
+ )
182
+ cfg = genai.types.GenerateContentConfig(
183
+ temperature=temperature if temperature is not None else self.window.core.config.get('temperature'),
184
+ top_p=self.window.core.config.get('top_p'),
185
+ max_output_tokens=context.max_tokens if context.max_tokens else None,
186
+ system_instruction=system_prompt if system_prompt else None,
187
+ tools=tools if tools else None,
188
+ )
189
+ resp = client.models.generate_content(
190
+ model=model.id,
191
+ contents=inputs,
192
+ config=cfg,
193
+ )
194
+
195
+ if ctx:
196
+ calls = self.chat.extract_tool_calls(resp)
197
+ if calls:
198
+ ctx.tool_calls = calls
199
+ return self.chat.extract_text(resp)
200
+ except Exception as e:
201
+ self.window.core.debug.log(e)
202
+ return ""
203
+ finally:
204
+ self.locked = False
205
+
206
+ def build_remote_tools(self, model: ModelItem = None) -> list:
207
+ """
208
+ Build Google GenAI remote tools based on config flags.
209
+ - google_tool_search: enables grounding via Google Search (Gemini 2.x)
210
+ or GoogleSearchRetrieval (Gemini 1.5 fallback).
211
+ - google_tool_code_execution: enables code execution tool.
212
+
213
+ Returns a list of gtypes.Tool objects (can be empty).
214
+
215
+ :param model: ModelItem
216
+ :return: list of gtypes.Tool
217
+ """
218
+ tools: list = []
219
+ cfg = self.window.core.config
220
+ model_id = (model.id if model and getattr(model, "id", None) else "").lower()
221
+
222
+ # Google Search tool
223
+ if cfg.get("remote_tools.google.web_search") and "image" not in model.id:
224
+ try:
225
+ if not model_id.startswith("gemini-1.5") and not model_id.startswith("models/gemini-1.5"):
226
+ # Gemini 2.x uses GoogleSearch
227
+ tools.append(gtypes.Tool(google_search=gtypes.GoogleSearch()))
228
+ else:
229
+ # Gemini 1.5 fallback uses GoogleSearchRetrieval
230
+ # Note: Supported only for 1.5 models.
231
+ tools.append(gtypes.Tool(
232
+ google_search_retrieval=gtypes.GoogleSearchRetrieval()
233
+ ))
234
+ except Exception as e:
235
+ # Do not break the request if tool construction fails
236
+ self.window.core.debug.log(e)
237
+
238
+ # Code Execution tool
239
+ if cfg.get("remote_tools.google.code_interpreter") and "image" not in model.id:
240
+ try:
241
+ tools.append(gtypes.Tool(code_execution=gtypes.ToolCodeExecution))
242
+ except Exception as e:
243
+ self.window.core.debug.log(e)
244
+
245
+ return tools
246
+
247
+
248
+ def stop(self):
249
+ """On global event stop"""
250
+ pass
251
+
252
+ def close(self):
253
+ """Close Google client"""
254
+ if self.locked:
255
+ return
256
+ if self.client is not None:
257
+ try:
258
+ pass
259
+ # self.client.close()
260
+ except Exception as e:
261
+ self.window.core.debug.log(e)
262
+ print("Error closing Google client:", e)