pygpt-net 2.6.36__py3-none-any.whl → 2.6.37__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. pygpt_net/CHANGELOG.txt +5 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/controller/chat/handler/anthropic_stream.py +166 -0
  4. pygpt_net/controller/chat/handler/google_stream.py +181 -0
  5. pygpt_net/controller/chat/handler/langchain_stream.py +24 -0
  6. pygpt_net/controller/chat/handler/llamaindex_stream.py +47 -0
  7. pygpt_net/controller/chat/handler/openai_stream.py +260 -0
  8. pygpt_net/controller/chat/handler/utils.py +210 -0
  9. pygpt_net/controller/chat/handler/worker.py +566 -0
  10. pygpt_net/controller/chat/handler/xai_stream.py +135 -0
  11. pygpt_net/controller/chat/stream.py +1 -1
  12. pygpt_net/controller/ctx/ctx.py +1 -1
  13. pygpt_net/controller/model/editor.py +3 -0
  14. pygpt_net/core/bridge/context.py +35 -35
  15. pygpt_net/core/bridge/worker.py +40 -16
  16. pygpt_net/core/render/web/body.py +29 -34
  17. pygpt_net/data/config/config.json +10 -3
  18. pygpt_net/data/config/models.json +3 -3
  19. pygpt_net/data/config/settings.json +105 -0
  20. pygpt_net/data/css/style.dark.css +2 -3
  21. pygpt_net/data/css/style.light.css +2 -3
  22. pygpt_net/data/locale/locale.de.ini +3 -1
  23. pygpt_net/data/locale/locale.en.ini +19 -1
  24. pygpt_net/data/locale/locale.es.ini +3 -1
  25. pygpt_net/data/locale/locale.fr.ini +3 -1
  26. pygpt_net/data/locale/locale.it.ini +3 -1
  27. pygpt_net/data/locale/locale.pl.ini +4 -2
  28. pygpt_net/data/locale/locale.uk.ini +3 -1
  29. pygpt_net/data/locale/locale.zh.ini +3 -1
  30. pygpt_net/provider/api/__init__.py +5 -3
  31. pygpt_net/provider/api/anthropic/__init__.py +190 -29
  32. pygpt_net/provider/api/anthropic/audio.py +30 -0
  33. pygpt_net/provider/api/anthropic/chat.py +341 -0
  34. pygpt_net/provider/api/anthropic/image.py +25 -0
  35. pygpt_net/provider/api/anthropic/tools.py +266 -0
  36. pygpt_net/provider/api/anthropic/vision.py +142 -0
  37. pygpt_net/provider/api/google/chat.py +2 -2
  38. pygpt_net/provider/api/google/tools.py +58 -48
  39. pygpt_net/provider/api/google/vision.py +7 -1
  40. pygpt_net/provider/api/openai/chat.py +1 -0
  41. pygpt_net/provider/api/openai/vision.py +6 -0
  42. pygpt_net/provider/api/x_ai/__init__.py +247 -0
  43. pygpt_net/provider/api/x_ai/audio.py +32 -0
  44. pygpt_net/provider/api/x_ai/chat.py +968 -0
  45. pygpt_net/provider/api/x_ai/image.py +208 -0
  46. pygpt_net/provider/api/x_ai/remote.py +262 -0
  47. pygpt_net/provider/api/x_ai/tools.py +120 -0
  48. pygpt_net/provider/api/x_ai/vision.py +119 -0
  49. pygpt_net/provider/core/config/patch.py +28 -0
  50. pygpt_net/provider/llms/anthropic.py +4 -2
  51. pygpt_net/ui/base/config_dialog.py +5 -11
  52. pygpt_net/ui/dialog/models.py +2 -4
  53. pygpt_net/ui/dialog/plugins.py +40 -43
  54. pygpt_net/ui/widget/element/labels.py +19 -3
  55. pygpt_net/ui/widget/textarea/web.py +1 -1
  56. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.37.dist-info}/METADATA +11 -6
  57. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.37.dist-info}/RECORD +60 -41
  58. pygpt_net/controller/chat/handler/stream_worker.py +0 -1136
  59. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.37.dist-info}/LICENSE +0 -0
  60. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.37.dist-info}/WHEEL +0 -0
  61. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.37.dist-info}/entry_points.txt +0 -0
@@ -866,6 +866,7 @@ model.default = Predeterminado en modo
866
866
  model.extra = Parámetros adicionales (JSON)
867
867
  model.extra.desc = Un objeto JSON que contiene parámetros adicionales para el modelo (como el esfuerzo de razonamiento, etc.).
868
868
  model.id = ID del modelo
869
+ model.id.desc = Introduzca el ID del modelo exacto proporcionado por el proveedor
869
870
  model.input = Entrada
870
871
  mode.llama_index = Chat con archivos
871
872
  mode.llama_index.tooltip = Chatear con contexto adicional proporcionado por LlamaIndex
@@ -889,11 +890,12 @@ model.mode = Modo(s)
889
890
  model.mode.desc = Modos disponibles: chat, llama_index, audio, research, completion, img, vision, assistant, agent_llama, agent, expert
890
891
  model.multimodal = Multimodal
891
892
  model.name = Nombre
893
+ model.name.desc = Nombre para mostrar en la lista, puede ser cualquiera
892
894
  model.openai = API de OpenAI
893
895
  model.openai.desc = Soporta OpenAI API (o compatible)
894
896
  model.output = Salida
895
897
  model.provider = Proveedor
896
- model.provider.desc = Proveedor LLM
898
+ model.provider.desc = Elija el proveedor para el modelo
897
899
  models.importer.all = Mostrar todo
898
900
  models.importer.available.label = Modelos disponibles
899
901
  models.importer.current.default = Por favor, seleccione un proveedor de la lista.
@@ -865,6 +865,7 @@ model.default = Par défaut dans le mode
865
865
  model.extra = Paramètres supplémentaires (JSON)
866
866
  model.extra.desc = Un objet JSON contenant des paramètres supplémentaires pour le modèle (tel que l'effort de raisonnement, etc.).
867
867
  model.id = ID du modèle
868
+ model.id.desc = Entrez l'ID de modèle exact fourni par le fournisseur
868
869
  model.input = Entrée
869
870
  mode.llama_index = Chat avec fichiers
870
871
  mode.llama_index.tooltip = Discussion avec un contexte supplémentaire fourni par LlamaIndex
@@ -888,11 +889,12 @@ model.mode = Mode(s)
888
889
  model.mode.desc = Modes disponibles : chat, llama_index, audio, recherche, complétion, img, vision, assistant, agent_llama, agent, expert
889
890
  model.multimodal = Multimodal
890
891
  model.name = Nom
892
+ model.name.desc = Nom d'affichage dans la liste, peut être n'importe quoi
891
893
  model.openai = API OpenAI
892
894
  model.openai.desc = Supporte l'API OpenAI (ou compatible)
893
895
  model.output = Sortie
894
896
  model.provider = Fournisseur
895
- model.provider.desc = Fournisseur LLM
897
+ model.provider.desc = Choisissez le fournisseur pour le modèle
896
898
  models.importer.all = Tout afficher
897
899
  models.importer.available.label = Modèles disponibles
898
900
  models.importer.current.default = Veuillez sélectionner un fournisseur dans la liste.
@@ -865,6 +865,7 @@ model.default = Predefinito in modalità
865
865
  model.extra = Parametri aggiuntivi (JSON)
866
866
  model.extra.desc = Un oggetto JSON contenente parametri aggiuntivi per il modello (come lo sforzo di ragionamento, ecc.).
867
867
  model.id = ID modello
868
+ model.id.desc = Inserisci l'ID del modello esatto fornito dal fornitore
868
869
  model.input = Ingresso
869
870
  mode.llama_index = Chat con file
870
871
  mode.llama_index.tooltip = Chattare con contesto aggiuntivo fornito da LlamaIndex
@@ -888,11 +889,12 @@ model.mode = Modalità disponibili
888
889
  model.mode.desc = Modalità disponibili: chat, llama_index, audio, research, completion, img, vision, assistant, agent_llama, agent, expert
889
890
  model.multimodal = Multimodale
890
891
  model.name = Nome
892
+ model.name.desc = Nome visualizzato nell'elenco, può essere qualsiasi cosa
891
893
  model.openai = OpenAI API
892
894
  model.openai.desc = Supporta l'API OpenAI (o compatibile)
893
895
  model.output = Uscita
894
896
  model.provider = Fornitore
895
- model.provider.desc = Fornitore LLM
897
+ model.provider.desc = Scegli il fornitore per il modello
896
898
  models.importer.all = Mostra tutto
897
899
  models.importer.available.label = Modelli disponibili
898
900
  models.importer.current.default = Seleziona un fornitore dall'elenco.
@@ -869,6 +869,7 @@ model.default = Domyślnie w trybie
869
869
  model.extra = Dodatkowe parametry (JSON)
870
870
  model.extra.desc = Obiekt JSON zawierający dodatkowe parametry dla modelu (takie jak wysiłek intelektualny itp.).
871
871
  model.id = ID modelu
872
+ model.id.desc = Wprowadź dokładny identyfikator modelu podany przez dostawcę
872
873
  model.input = Wejście
873
874
  mode.llama_index = Czat z plikami
874
875
  mode.llama_index.tooltip = Czat z dodatkowym kontekstem dostarczonym przez LlamaIndex
@@ -892,11 +893,12 @@ model.mode = Tryb(y)
892
893
  model.mode.desc = Dostępne tryby: chat, llama_index, audio, research, completion, img, vision, assistant, agent_llama, agent, expert
893
894
  model.multimodal = Multimodalny
894
895
  model.name = Nazwa
896
+ model.name.desc = Nazwa wyświetlana na liście, może być dowolna
895
897
  model.openai = OpenAI API
896
898
  model.openai.desc = Wspiera OpenAI API (lub kompatybilny)
897
899
  model.output = Wyjście
898
900
  model.provider = Dostawca
899
- model.provider.desc = Dostawca LLM
901
+ model.provider.desc = Wybierz dostawcę dla modelu
900
902
  models.importer.all = Pokaż wszystkie
901
903
  models.importer.available.label = Dostępne modele
902
904
  models.importer.current.default = Wybierz dostawcę z listy.
@@ -965,7 +967,7 @@ painter.btn.camera.capture = Z kamery
965
967
  painter.btn.capture = Użyj obrazu
966
968
  painter.btn.clear = Wyczyść
967
969
  painter.btn.crop = Przytnij
968
- painter.btn.fit = Dopasuj
970
+ painter.btn.fit = Dopasuj
969
971
  painter.capture.manual.captured.success = Image captured:
970
972
  painter.capture.name.prefix = Drawing from
971
973
  painter.mode.erase = Gumka
@@ -865,6 +865,7 @@ model.default = За замовчуванням у режимі
865
865
  model.extra = Додаткові параметри (JSON)
866
866
  model.extra.desc = Об'єкт JSON, що містить додаткові параметри для моделі (такі як розумові зусилля тощо).
867
867
  model.id = ID моделі
868
+ model.id.desc = Введіть точний ідентифікатор моделі, наданий постачальником
868
869
  model.input = Вхід
869
870
  mode.llama_index = Чат з файлами
870
871
  mode.llama_index.tooltip = Чат з додатковим контекстом, наданим LlamaIndex
@@ -888,11 +889,12 @@ model.mode = Режим(и)
888
889
  model.mode.desc = Доступні режими: chat, llama_index, audio, research, completion, img, vision, assistant, agent_llama, agent, expert
889
890
  model.multimodal = Мультимодальний
890
891
  model.name = Назва
892
+ model.name.desc = Відображуване ім'я в списку, може бути будь-яким
891
893
  model.openai = OpenAI API
892
894
  model.openai.desc = Підтримує OpenAI API (або сумісний)
893
895
  model.output = Вихід
894
896
  model.provider = Постачальник
895
- model.provider.desc = Постачальник LLM
897
+ model.provider.desc = Виберіть постачальника для моделі
896
898
  models.importer.all = Показати всі
897
899
  models.importer.available.label = Доступні моделі
898
900
  models.importer.current.default = Виберіть постачальника зі списку.
@@ -865,6 +865,7 @@ model.default = 預設模式
865
865
  model.extra = 额外参数 (JSON)
866
866
  model.extra.desc = 包含模型附加参数的JSON对象(例如推理努力等)。
867
867
  model.id = 模型ID
868
+ model.id.desc = 输入提供商提供的精确模型ID
868
869
  model.input = 输入
869
870
  mode.llama_index = 文件聊天模式
870
871
  mode.llama_index.tooltip = 使用LlamaIndex提供的額外上下文進行聊天
@@ -888,11 +889,12 @@ model.mode = 模式
888
889
  model.mode.desc = 可用模式:聊天、完成、圖像、視覺、助手、langchain、llama_index、代理
889
890
  model.multimodal = 多模态
890
891
  model.name = 名稱
892
+ model.name.desc = 列表上显示的名称,可以是任何东西
891
893
  model.openai = OpenAI API
892
894
  model.openai.desc = 支持 OpenAI API (或兼容)
893
895
  model.output = 输出
894
896
  model.provider = 提供者
895
- model.provider.desc = LLM 提供者
897
+ model.provider.desc = 选择模型的提供商
896
898
  models.importer.all = 显示全部
897
899
  models.importer.available.label = 可用模型
898
900
  models.importer.current.default = 请从列表中选择一个提供商。
@@ -6,22 +6,24 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.28 20:00:00 #
9
+ # Updated Date: 2025.09.05 01:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from .anthropic import ApiAnthropic
13
13
  from .google import ApiGoogle
14
14
  from .openai import ApiOpenAI
15
+ from .x_ai import ApiXAI
15
16
 
16
17
  class Api:
17
18
 
18
19
  def __init__(self, window=None):
19
20
  """
20
- API wrappers core
21
+ API wrappers
21
22
 
22
23
  :param window: Window instance
23
24
  """
24
25
  self.window = window
25
26
  self.anthropic = ApiAnthropic(window)
26
27
  self.google = ApiGoogle(window)
27
- self.openai = ApiOpenAI(window)
28
+ self.openai = ApiOpenAI(window)
29
+ self.xai = ApiXAI(window)
@@ -6,63 +6,224 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.28 09:00:00 #
9
+ # Updated Date: 2025.09.05 01:00:00 #
10
10
  # ================================================== #
11
11
 
12
- from anthropic import Anthropic
12
+ from typing import Optional, Dict, Any
13
13
 
14
+ import anthropic
14
15
  from pygpt_net.core.types import (
16
+ MODE_ASSISTANT,
17
+ MODE_AUDIO,
15
18
  MODE_CHAT,
19
+ MODE_COMPLETION,
20
+ MODE_IMAGE,
21
+ MODE_RESEARCH,
16
22
  )
23
+ from pygpt_net.core.bridge.context import BridgeContext
17
24
  from pygpt_net.item.model import ModelItem
18
25
 
19
- class ApiAnthropic:
26
+ from .chat import Chat
27
+ from .tools import Tools
28
+ from .vision import Vision
29
+ from .audio import Audio
30
+ from .image import Image
31
+
20
32
 
33
+ class ApiAnthropic:
21
34
  def __init__(self, window=None):
22
35
  """
23
- Anthropic API wrapper core
36
+ Anthropic Messages API SDK wrapper
24
37
 
25
38
  :param window: Window instance
26
39
  """
27
40
  self.window = window
28
- self.client = None
41
+ self.chat = Chat(window)
42
+ self.tools = Tools(window)
43
+ self.vision = Vision(window)
44
+ self.audio = Audio(window) # stub helpers (no official audio out/in in SDK as of now)
45
+ self.image = Image(window) # stub: no image generation in Anthropic
46
+ self.client: Optional[anthropic.Anthropic] = None
29
47
  self.locked = False
48
+ self.last_client_args: Optional[Dict[str, Any]] = None
30
49
 
31
50
  def get_client(
32
51
  self,
33
52
  mode: str = MODE_CHAT,
34
- model: ModelItem = None
35
- ) -> Anthropic:
53
+ model: ModelItem = None,
54
+ ) -> anthropic.Anthropic:
55
+ """
56
+ Get or create Anthropic client
57
+
58
+ :param mode: Mode (chat, completion, image, etc.)
59
+ :param model: ModelItem
60
+ :return: anthropic.Anthropic instance
36
61
  """
37
- Return Anthropic client
62
+ # Build minimal args from app config
63
+ args = self.window.core.models.prepare_client_args(mode, model)
64
+ filtered = {}
65
+ if args.get("api_key"):
66
+ filtered["api_key"] = args["api_key"]
67
+
68
+ # Optionally honor custom base_url if present in config (advanced)
69
+ # base_url = self.window.core.config.get("api_native_anthropic.base_url", "").strip()
70
+ # if base_url:
71
+ # filtered["base_url"] = base_url
72
+
73
+ # Keep a fresh client per call; Anthropic client is lightweight
74
+ return anthropic.Anthropic(**filtered)
38
75
 
39
- :param mode: Mode
40
- :param model: Model
41
- :return: Anthropic client
76
+ def call(
77
+ self,
78
+ context: BridgeContext,
79
+ extra: dict = None,
80
+ rt_signals=None, # unused for Anthropic
81
+ ) -> bool:
42
82
  """
43
- if self.client is not None:
83
+ Make an API call to Anthropic Messages API
84
+
85
+ :param context: BridgeContext
86
+ :param extra: Extra parameters
87
+ :param rt_signals: Not used (no realtime Voice API)
88
+ :return: True if successful, False otherwise
89
+ """
90
+ mode = context.mode
91
+ model = context.model
92
+ stream = context.stream
93
+ ctx = context.ctx
94
+ ai_name = ctx.output_name if ctx else "assistant"
95
+
96
+ # Anthropic: no Responses API; stream events are custom to Anthropic
97
+ if ctx:
98
+ ctx.use_responses_api = False
99
+
100
+ used_tokens = 0
101
+ response = None
102
+
103
+ if mode in (MODE_COMPLETION, MODE_CHAT, MODE_AUDIO, MODE_RESEARCH):
104
+ # MODE_AUDIO fallback: treat as normal chat (no native audio API)
105
+ response = self.chat.send(context=context, extra=extra)
106
+ used_tokens = self.chat.get_used_tokens()
107
+ if ctx:
108
+ self.vision.append_images(ctx)
109
+
110
+ elif mode == MODE_IMAGE:
111
+ # Anthropic does not support image generation – only vision (image input in chat)
112
+ return self.image.generate(context=context, extra=extra) # always returns False
113
+
114
+ elif mode == MODE_ASSISTANT:
115
+ return False # not implemented for Anthropic
116
+
117
+ if stream:
118
+ if ctx:
119
+ ctx.stream = response
120
+ ctx.set_output("", ai_name)
121
+ ctx.input_tokens = used_tokens
122
+ return True
123
+
124
+ if response is None:
125
+ return False
126
+
127
+ if isinstance(response, dict) and "error" in response:
128
+ return False
129
+
130
+ if ctx:
131
+ ctx.ai_name = ai_name
132
+ self.chat.unpack_response(mode, response, ctx)
44
133
  try:
45
- self.client.close() # close previous client if exists
46
- except Exception as e:
47
- self.window.core.debug.log(e)
48
- print("Error closing previous Anthropic client:", e)
49
- self.client = Anthropic(
50
- api_key=self.window.core.config.get('api_key_anthropic', "")
51
- )
52
- return self.client
134
+ import json
135
+ for tc in getattr(ctx, "tool_calls", []) or []:
136
+ fn = tc.get("function") or {}
137
+ args = fn.get("arguments")
138
+ if isinstance(args, str):
139
+ try:
140
+ fn["arguments"] = json.loads(args)
141
+ except Exception:
142
+ fn["arguments"] = {}
143
+ except Exception:
144
+ pass
145
+ return True
146
+
147
+ def quick_call(
148
+ self,
149
+ context: BridgeContext,
150
+ extra: dict = None
151
+ ) -> str:
152
+ """
153
+ Make a quick API call to Anthropic and return the output text
154
+
155
+ :param context: BridgeContext
156
+ :param extra: Extra parameters
157
+ :return: Output text
158
+ """
159
+ if context.request:
160
+ context.stream = False
161
+ context.mode = MODE_CHAT
162
+ self.locked = True
163
+ self.call(context, extra)
164
+ self.locked = False
165
+ return context.ctx.output
166
+
167
+ self.locked = True
168
+ try:
169
+ ctx = context.ctx
170
+ prompt = context.prompt
171
+ system_prompt = context.system_prompt
172
+ temperature = context.temperature
173
+ history = context.history
174
+ functions = context.external_functions
175
+ model = context.model or self.window.core.models.from_defaults()
176
+
177
+ client = self.get_client(MODE_CHAT, model)
178
+ tools = self.tools.get_all_tools(model, functions)
179
+
180
+ inputs = self.chat.build_input(
181
+ prompt=prompt,
182
+ system_prompt=system_prompt,
183
+ model=model,
184
+ history=history,
185
+ attachments=context.attachments,
186
+ multimodal_ctx=context.multimodal_ctx,
187
+ )
188
+
189
+ # Anthropic params
190
+ params: Dict[str, Any] = {
191
+ "model": model.id,
192
+ "max_tokens": context.max_tokens if context.max_tokens else 1024,
193
+ "messages": inputs,
194
+ }
195
+ if system_prompt:
196
+ params["system"] = system_prompt
197
+ if temperature is not None:
198
+ params["temperature"] = temperature
199
+ if tools: # only include when non-empty list
200
+ params["tools"] = tools
201
+
202
+ resp = client.messages.create(**params)
203
+
204
+ if ctx:
205
+ calls = self.chat.extract_tool_calls(resp)
206
+ if calls:
207
+ ctx.tool_calls = calls
208
+ return self.chat.extract_text(resp)
209
+ except Exception as e:
210
+ self.window.core.debug.log(e)
211
+ return ""
212
+ finally:
213
+ self.locked = False
53
214
 
54
215
  def stop(self):
55
- """On global event stop"""
216
+ """On global event stop (no-op for Anthropic)"""
56
217
  pass
57
218
 
58
219
  def close(self):
59
- """Close Anthropic client"""
220
+ """Close client (no persistent resources to close)"""
60
221
  if self.locked:
61
222
  return
62
- if self.client is not None:
63
- try:
64
- pass
65
- # self.client.close()
66
- except Exception as e:
67
- self.window.core.debug.log(e)
68
- print("Error closing Anthropic client:", e)
223
+ self.client = None
224
+
225
+ def safe_close(self):
226
+ """Close client (safe)"""
227
+ if self.locked:
228
+ return
229
+ self.client = None
@@ -0,0 +1,30 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.09.05 01:00:00 #
10
+ # ================================================== #
11
+
12
+ from typing import Optional
13
+ from pygpt_net.core.bridge.context import MultimodalContext
14
+
15
+
16
+ class Audio:
17
+ def __init__(self, window=None):
18
+ """
19
+ Audio helpers for Anthropic (currently no official input/output audio in Python SDK).
20
+
21
+ :param window: Window instance
22
+ """
23
+ self.window = window
24
+
25
+ def build_input_block(self, multimodal_ctx: Optional[MultimodalContext]) -> Optional[dict]:
26
+ """
27
+ Future hook: build input_audio block if Anthropic exposes it publicly.
28
+ Currently returns None to avoid 400 errors.
29
+ """
30
+ return None