pygpt-net 2.6.36__py3-none-any.whl → 2.6.37__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +5 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/controller/chat/handler/anthropic_stream.py +166 -0
- pygpt_net/controller/chat/handler/google_stream.py +181 -0
- pygpt_net/controller/chat/handler/langchain_stream.py +24 -0
- pygpt_net/controller/chat/handler/llamaindex_stream.py +47 -0
- pygpt_net/controller/chat/handler/openai_stream.py +260 -0
- pygpt_net/controller/chat/handler/utils.py +210 -0
- pygpt_net/controller/chat/handler/worker.py +566 -0
- pygpt_net/controller/chat/handler/xai_stream.py +135 -0
- pygpt_net/controller/chat/stream.py +1 -1
- pygpt_net/controller/ctx/ctx.py +1 -1
- pygpt_net/controller/model/editor.py +3 -0
- pygpt_net/core/bridge/context.py +35 -35
- pygpt_net/core/bridge/worker.py +40 -16
- pygpt_net/core/render/web/body.py +29 -34
- pygpt_net/data/config/config.json +10 -3
- pygpt_net/data/config/models.json +3 -3
- pygpt_net/data/config/settings.json +105 -0
- pygpt_net/data/css/style.dark.css +2 -3
- pygpt_net/data/css/style.light.css +2 -3
- pygpt_net/data/locale/locale.de.ini +3 -1
- pygpt_net/data/locale/locale.en.ini +19 -1
- pygpt_net/data/locale/locale.es.ini +3 -1
- pygpt_net/data/locale/locale.fr.ini +3 -1
- pygpt_net/data/locale/locale.it.ini +3 -1
- pygpt_net/data/locale/locale.pl.ini +4 -2
- pygpt_net/data/locale/locale.uk.ini +3 -1
- pygpt_net/data/locale/locale.zh.ini +3 -1
- pygpt_net/provider/api/__init__.py +5 -3
- pygpt_net/provider/api/anthropic/__init__.py +190 -29
- pygpt_net/provider/api/anthropic/audio.py +30 -0
- pygpt_net/provider/api/anthropic/chat.py +341 -0
- pygpt_net/provider/api/anthropic/image.py +25 -0
- pygpt_net/provider/api/anthropic/tools.py +266 -0
- pygpt_net/provider/api/anthropic/vision.py +142 -0
- pygpt_net/provider/api/google/chat.py +2 -2
- pygpt_net/provider/api/google/tools.py +58 -48
- pygpt_net/provider/api/google/vision.py +7 -1
- pygpt_net/provider/api/openai/chat.py +1 -0
- pygpt_net/provider/api/openai/vision.py +6 -0
- pygpt_net/provider/api/x_ai/__init__.py +247 -0
- pygpt_net/provider/api/x_ai/audio.py +32 -0
- pygpt_net/provider/api/x_ai/chat.py +968 -0
- pygpt_net/provider/api/x_ai/image.py +208 -0
- pygpt_net/provider/api/x_ai/remote.py +262 -0
- pygpt_net/provider/api/x_ai/tools.py +120 -0
- pygpt_net/provider/api/x_ai/vision.py +119 -0
- pygpt_net/provider/core/config/patch.py +28 -0
- pygpt_net/provider/llms/anthropic.py +4 -2
- pygpt_net/ui/base/config_dialog.py +5 -11
- pygpt_net/ui/dialog/models.py +2 -4
- pygpt_net/ui/dialog/plugins.py +40 -43
- pygpt_net/ui/widget/element/labels.py +19 -3
- pygpt_net/ui/widget/textarea/web.py +1 -1
- {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.37.dist-info}/METADATA +11 -6
- {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.37.dist-info}/RECORD +60 -41
- pygpt_net/controller/chat/handler/stream_worker.py +0 -1136
- {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.37.dist-info}/LICENSE +0 -0
- {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.37.dist-info}/WHEEL +0 -0
- {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.37.dist-info}/entry_points.txt +0 -0
|
@@ -866,6 +866,7 @@ model.default = Predeterminado en modo
|
|
|
866
866
|
model.extra = Parámetros adicionales (JSON)
|
|
867
867
|
model.extra.desc = Un objeto JSON que contiene parámetros adicionales para el modelo (como el esfuerzo de razonamiento, etc.).
|
|
868
868
|
model.id = ID del modelo
|
|
869
|
+
model.id.desc = Introduzca el ID del modelo exacto proporcionado por el proveedor
|
|
869
870
|
model.input = Entrada
|
|
870
871
|
mode.llama_index = Chat con archivos
|
|
871
872
|
mode.llama_index.tooltip = Chatear con contexto adicional proporcionado por LlamaIndex
|
|
@@ -889,11 +890,12 @@ model.mode = Modo(s)
|
|
|
889
890
|
model.mode.desc = Modos disponibles: chat, llama_index, audio, research, completion, img, vision, assistant, agent_llama, agent, expert
|
|
890
891
|
model.multimodal = Multimodal
|
|
891
892
|
model.name = Nombre
|
|
893
|
+
model.name.desc = Nombre para mostrar en la lista, puede ser cualquiera
|
|
892
894
|
model.openai = API de OpenAI
|
|
893
895
|
model.openai.desc = Soporta OpenAI API (o compatible)
|
|
894
896
|
model.output = Salida
|
|
895
897
|
model.provider = Proveedor
|
|
896
|
-
model.provider.desc =
|
|
898
|
+
model.provider.desc = Elija el proveedor para el modelo
|
|
897
899
|
models.importer.all = Mostrar todo
|
|
898
900
|
models.importer.available.label = Modelos disponibles
|
|
899
901
|
models.importer.current.default = Por favor, seleccione un proveedor de la lista.
|
|
@@ -865,6 +865,7 @@ model.default = Par défaut dans le mode
|
|
|
865
865
|
model.extra = Paramètres supplémentaires (JSON)
|
|
866
866
|
model.extra.desc = Un objet JSON contenant des paramètres supplémentaires pour le modèle (tel que l'effort de raisonnement, etc.).
|
|
867
867
|
model.id = ID du modèle
|
|
868
|
+
model.id.desc = Entrez l'ID de modèle exact fourni par le fournisseur
|
|
868
869
|
model.input = Entrée
|
|
869
870
|
mode.llama_index = Chat avec fichiers
|
|
870
871
|
mode.llama_index.tooltip = Discussion avec un contexte supplémentaire fourni par LlamaIndex
|
|
@@ -888,11 +889,12 @@ model.mode = Mode(s)
|
|
|
888
889
|
model.mode.desc = Modes disponibles : chat, llama_index, audio, recherche, complétion, img, vision, assistant, agent_llama, agent, expert
|
|
889
890
|
model.multimodal = Multimodal
|
|
890
891
|
model.name = Nom
|
|
892
|
+
model.name.desc = Nom d'affichage dans la liste, peut être n'importe quoi
|
|
891
893
|
model.openai = API OpenAI
|
|
892
894
|
model.openai.desc = Supporte l'API OpenAI (ou compatible)
|
|
893
895
|
model.output = Sortie
|
|
894
896
|
model.provider = Fournisseur
|
|
895
|
-
model.provider.desc =
|
|
897
|
+
model.provider.desc = Choisissez le fournisseur pour le modèle
|
|
896
898
|
models.importer.all = Tout afficher
|
|
897
899
|
models.importer.available.label = Modèles disponibles
|
|
898
900
|
models.importer.current.default = Veuillez sélectionner un fournisseur dans la liste.
|
|
@@ -865,6 +865,7 @@ model.default = Predefinito in modalità
|
|
|
865
865
|
model.extra = Parametri aggiuntivi (JSON)
|
|
866
866
|
model.extra.desc = Un oggetto JSON contenente parametri aggiuntivi per il modello (come lo sforzo di ragionamento, ecc.).
|
|
867
867
|
model.id = ID modello
|
|
868
|
+
model.id.desc = Inserisci l'ID del modello esatto fornito dal fornitore
|
|
868
869
|
model.input = Ingresso
|
|
869
870
|
mode.llama_index = Chat con file
|
|
870
871
|
mode.llama_index.tooltip = Chattare con contesto aggiuntivo fornito da LlamaIndex
|
|
@@ -888,11 +889,12 @@ model.mode = Modalità disponibili
|
|
|
888
889
|
model.mode.desc = Modalità disponibili: chat, llama_index, audio, research, completion, img, vision, assistant, agent_llama, agent, expert
|
|
889
890
|
model.multimodal = Multimodale
|
|
890
891
|
model.name = Nome
|
|
892
|
+
model.name.desc = Nome visualizzato nell'elenco, può essere qualsiasi cosa
|
|
891
893
|
model.openai = OpenAI API
|
|
892
894
|
model.openai.desc = Supporta l'API OpenAI (o compatibile)
|
|
893
895
|
model.output = Uscita
|
|
894
896
|
model.provider = Fornitore
|
|
895
|
-
model.provider.desc =
|
|
897
|
+
model.provider.desc = Scegli il fornitore per il modello
|
|
896
898
|
models.importer.all = Mostra tutto
|
|
897
899
|
models.importer.available.label = Modelli disponibili
|
|
898
900
|
models.importer.current.default = Seleziona un fornitore dall'elenco.
|
|
@@ -869,6 +869,7 @@ model.default = Domyślnie w trybie
|
|
|
869
869
|
model.extra = Dodatkowe parametry (JSON)
|
|
870
870
|
model.extra.desc = Obiekt JSON zawierający dodatkowe parametry dla modelu (takie jak wysiłek intelektualny itp.).
|
|
871
871
|
model.id = ID modelu
|
|
872
|
+
model.id.desc = Wprowadź dokładny identyfikator modelu podany przez dostawcę
|
|
872
873
|
model.input = Wejście
|
|
873
874
|
mode.llama_index = Czat z plikami
|
|
874
875
|
mode.llama_index.tooltip = Czat z dodatkowym kontekstem dostarczonym przez LlamaIndex
|
|
@@ -892,11 +893,12 @@ model.mode = Tryb(y)
|
|
|
892
893
|
model.mode.desc = Dostępne tryby: chat, llama_index, audio, research, completion, img, vision, assistant, agent_llama, agent, expert
|
|
893
894
|
model.multimodal = Multimodalny
|
|
894
895
|
model.name = Nazwa
|
|
896
|
+
model.name.desc = Nazwa wyświetlana na liście, może być dowolna
|
|
895
897
|
model.openai = OpenAI API
|
|
896
898
|
model.openai.desc = Wspiera OpenAI API (lub kompatybilny)
|
|
897
899
|
model.output = Wyjście
|
|
898
900
|
model.provider = Dostawca
|
|
899
|
-
model.provider.desc =
|
|
901
|
+
model.provider.desc = Wybierz dostawcę dla modelu
|
|
900
902
|
models.importer.all = Pokaż wszystkie
|
|
901
903
|
models.importer.available.label = Dostępne modele
|
|
902
904
|
models.importer.current.default = Wybierz dostawcę z listy.
|
|
@@ -965,7 +967,7 @@ painter.btn.camera.capture = Z kamery
|
|
|
965
967
|
painter.btn.capture = Użyj obrazu
|
|
966
968
|
painter.btn.clear = Wyczyść
|
|
967
969
|
painter.btn.crop = Przytnij
|
|
968
|
-
painter.btn.fit = Dopasuj
|
|
970
|
+
painter.btn.fit = Dopasuj
|
|
969
971
|
painter.capture.manual.captured.success = Image captured:
|
|
970
972
|
painter.capture.name.prefix = Drawing from
|
|
971
973
|
painter.mode.erase = Gumka
|
|
@@ -865,6 +865,7 @@ model.default = За замовчуванням у режимі
|
|
|
865
865
|
model.extra = Додаткові параметри (JSON)
|
|
866
866
|
model.extra.desc = Об'єкт JSON, що містить додаткові параметри для моделі (такі як розумові зусилля тощо).
|
|
867
867
|
model.id = ID моделі
|
|
868
|
+
model.id.desc = Введіть точний ідентифікатор моделі, наданий постачальником
|
|
868
869
|
model.input = Вхід
|
|
869
870
|
mode.llama_index = Чат з файлами
|
|
870
871
|
mode.llama_index.tooltip = Чат з додатковим контекстом, наданим LlamaIndex
|
|
@@ -888,11 +889,12 @@ model.mode = Режим(и)
|
|
|
888
889
|
model.mode.desc = Доступні режими: chat, llama_index, audio, research, completion, img, vision, assistant, agent_llama, agent, expert
|
|
889
890
|
model.multimodal = Мультимодальний
|
|
890
891
|
model.name = Назва
|
|
892
|
+
model.name.desc = Відображуване ім'я в списку, може бути будь-яким
|
|
891
893
|
model.openai = OpenAI API
|
|
892
894
|
model.openai.desc = Підтримує OpenAI API (або сумісний)
|
|
893
895
|
model.output = Вихід
|
|
894
896
|
model.provider = Постачальник
|
|
895
|
-
model.provider.desc =
|
|
897
|
+
model.provider.desc = Виберіть постачальника для моделі
|
|
896
898
|
models.importer.all = Показати всі
|
|
897
899
|
models.importer.available.label = Доступні моделі
|
|
898
900
|
models.importer.current.default = Виберіть постачальника зі списку.
|
|
@@ -865,6 +865,7 @@ model.default = 預設模式
|
|
|
865
865
|
model.extra = 额外参数 (JSON)
|
|
866
866
|
model.extra.desc = 包含模型附加参数的JSON对象(例如推理努力等)。
|
|
867
867
|
model.id = 模型ID
|
|
868
|
+
model.id.desc = 输入提供商提供的精确模型ID
|
|
868
869
|
model.input = 输入
|
|
869
870
|
mode.llama_index = 文件聊天模式
|
|
870
871
|
mode.llama_index.tooltip = 使用LlamaIndex提供的額外上下文進行聊天
|
|
@@ -888,11 +889,12 @@ model.mode = 模式
|
|
|
888
889
|
model.mode.desc = 可用模式:聊天、完成、圖像、視覺、助手、langchain、llama_index、代理
|
|
889
890
|
model.multimodal = 多模态
|
|
890
891
|
model.name = 名稱
|
|
892
|
+
model.name.desc = 列表上显示的名称,可以是任何东西
|
|
891
893
|
model.openai = OpenAI API
|
|
892
894
|
model.openai.desc = 支持 OpenAI API (或兼容)
|
|
893
895
|
model.output = 输出
|
|
894
896
|
model.provider = 提供者
|
|
895
|
-
model.provider.desc =
|
|
897
|
+
model.provider.desc = 选择模型的提供商
|
|
896
898
|
models.importer.all = 显示全部
|
|
897
899
|
models.importer.available.label = 可用模型
|
|
898
900
|
models.importer.current.default = 请从列表中选择一个提供商。
|
|
@@ -6,22 +6,24 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.
|
|
9
|
+
# Updated Date: 2025.09.05 01:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from .anthropic import ApiAnthropic
|
|
13
13
|
from .google import ApiGoogle
|
|
14
14
|
from .openai import ApiOpenAI
|
|
15
|
+
from .x_ai import ApiXAI
|
|
15
16
|
|
|
16
17
|
class Api:
|
|
17
18
|
|
|
18
19
|
def __init__(self, window=None):
|
|
19
20
|
"""
|
|
20
|
-
API wrappers
|
|
21
|
+
API wrappers
|
|
21
22
|
|
|
22
23
|
:param window: Window instance
|
|
23
24
|
"""
|
|
24
25
|
self.window = window
|
|
25
26
|
self.anthropic = ApiAnthropic(window)
|
|
26
27
|
self.google = ApiGoogle(window)
|
|
27
|
-
self.openai = ApiOpenAI(window)
|
|
28
|
+
self.openai = ApiOpenAI(window)
|
|
29
|
+
self.xai = ApiXAI(window)
|
|
@@ -6,63 +6,224 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.
|
|
9
|
+
# Updated Date: 2025.09.05 01:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
|
-
from
|
|
12
|
+
from typing import Optional, Dict, Any
|
|
13
13
|
|
|
14
|
+
import anthropic
|
|
14
15
|
from pygpt_net.core.types import (
|
|
16
|
+
MODE_ASSISTANT,
|
|
17
|
+
MODE_AUDIO,
|
|
15
18
|
MODE_CHAT,
|
|
19
|
+
MODE_COMPLETION,
|
|
20
|
+
MODE_IMAGE,
|
|
21
|
+
MODE_RESEARCH,
|
|
16
22
|
)
|
|
23
|
+
from pygpt_net.core.bridge.context import BridgeContext
|
|
17
24
|
from pygpt_net.item.model import ModelItem
|
|
18
25
|
|
|
19
|
-
|
|
26
|
+
from .chat import Chat
|
|
27
|
+
from .tools import Tools
|
|
28
|
+
from .vision import Vision
|
|
29
|
+
from .audio import Audio
|
|
30
|
+
from .image import Image
|
|
31
|
+
|
|
20
32
|
|
|
33
|
+
class ApiAnthropic:
|
|
21
34
|
def __init__(self, window=None):
|
|
22
35
|
"""
|
|
23
|
-
Anthropic API wrapper
|
|
36
|
+
Anthropic Messages API SDK wrapper
|
|
24
37
|
|
|
25
38
|
:param window: Window instance
|
|
26
39
|
"""
|
|
27
40
|
self.window = window
|
|
28
|
-
self.
|
|
41
|
+
self.chat = Chat(window)
|
|
42
|
+
self.tools = Tools(window)
|
|
43
|
+
self.vision = Vision(window)
|
|
44
|
+
self.audio = Audio(window) # stub helpers (no official audio out/in in SDK as of now)
|
|
45
|
+
self.image = Image(window) # stub: no image generation in Anthropic
|
|
46
|
+
self.client: Optional[anthropic.Anthropic] = None
|
|
29
47
|
self.locked = False
|
|
48
|
+
self.last_client_args: Optional[Dict[str, Any]] = None
|
|
30
49
|
|
|
31
50
|
def get_client(
|
|
32
51
|
self,
|
|
33
52
|
mode: str = MODE_CHAT,
|
|
34
|
-
model: ModelItem = None
|
|
35
|
-
) -> Anthropic:
|
|
53
|
+
model: ModelItem = None,
|
|
54
|
+
) -> anthropic.Anthropic:
|
|
55
|
+
"""
|
|
56
|
+
Get or create Anthropic client
|
|
57
|
+
|
|
58
|
+
:param mode: Mode (chat, completion, image, etc.)
|
|
59
|
+
:param model: ModelItem
|
|
60
|
+
:return: anthropic.Anthropic instance
|
|
36
61
|
"""
|
|
37
|
-
|
|
62
|
+
# Build minimal args from app config
|
|
63
|
+
args = self.window.core.models.prepare_client_args(mode, model)
|
|
64
|
+
filtered = {}
|
|
65
|
+
if args.get("api_key"):
|
|
66
|
+
filtered["api_key"] = args["api_key"]
|
|
67
|
+
|
|
68
|
+
# Optionally honor custom base_url if present in config (advanced)
|
|
69
|
+
# base_url = self.window.core.config.get("api_native_anthropic.base_url", "").strip()
|
|
70
|
+
# if base_url:
|
|
71
|
+
# filtered["base_url"] = base_url
|
|
72
|
+
|
|
73
|
+
# Keep a fresh client per call; Anthropic client is lightweight
|
|
74
|
+
return anthropic.Anthropic(**filtered)
|
|
38
75
|
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
76
|
+
def call(
|
|
77
|
+
self,
|
|
78
|
+
context: BridgeContext,
|
|
79
|
+
extra: dict = None,
|
|
80
|
+
rt_signals=None, # unused for Anthropic
|
|
81
|
+
) -> bool:
|
|
42
82
|
"""
|
|
43
|
-
|
|
83
|
+
Make an API call to Anthropic Messages API
|
|
84
|
+
|
|
85
|
+
:param context: BridgeContext
|
|
86
|
+
:param extra: Extra parameters
|
|
87
|
+
:param rt_signals: Not used (no realtime Voice API)
|
|
88
|
+
:return: True if successful, False otherwise
|
|
89
|
+
"""
|
|
90
|
+
mode = context.mode
|
|
91
|
+
model = context.model
|
|
92
|
+
stream = context.stream
|
|
93
|
+
ctx = context.ctx
|
|
94
|
+
ai_name = ctx.output_name if ctx else "assistant"
|
|
95
|
+
|
|
96
|
+
# Anthropic: no Responses API; stream events are custom to Anthropic
|
|
97
|
+
if ctx:
|
|
98
|
+
ctx.use_responses_api = False
|
|
99
|
+
|
|
100
|
+
used_tokens = 0
|
|
101
|
+
response = None
|
|
102
|
+
|
|
103
|
+
if mode in (MODE_COMPLETION, MODE_CHAT, MODE_AUDIO, MODE_RESEARCH):
|
|
104
|
+
# MODE_AUDIO fallback: treat as normal chat (no native audio API)
|
|
105
|
+
response = self.chat.send(context=context, extra=extra)
|
|
106
|
+
used_tokens = self.chat.get_used_tokens()
|
|
107
|
+
if ctx:
|
|
108
|
+
self.vision.append_images(ctx)
|
|
109
|
+
|
|
110
|
+
elif mode == MODE_IMAGE:
|
|
111
|
+
# Anthropic does not support image generation – only vision (image input in chat)
|
|
112
|
+
return self.image.generate(context=context, extra=extra) # always returns False
|
|
113
|
+
|
|
114
|
+
elif mode == MODE_ASSISTANT:
|
|
115
|
+
return False # not implemented for Anthropic
|
|
116
|
+
|
|
117
|
+
if stream:
|
|
118
|
+
if ctx:
|
|
119
|
+
ctx.stream = response
|
|
120
|
+
ctx.set_output("", ai_name)
|
|
121
|
+
ctx.input_tokens = used_tokens
|
|
122
|
+
return True
|
|
123
|
+
|
|
124
|
+
if response is None:
|
|
125
|
+
return False
|
|
126
|
+
|
|
127
|
+
if isinstance(response, dict) and "error" in response:
|
|
128
|
+
return False
|
|
129
|
+
|
|
130
|
+
if ctx:
|
|
131
|
+
ctx.ai_name = ai_name
|
|
132
|
+
self.chat.unpack_response(mode, response, ctx)
|
|
44
133
|
try:
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
134
|
+
import json
|
|
135
|
+
for tc in getattr(ctx, "tool_calls", []) or []:
|
|
136
|
+
fn = tc.get("function") or {}
|
|
137
|
+
args = fn.get("arguments")
|
|
138
|
+
if isinstance(args, str):
|
|
139
|
+
try:
|
|
140
|
+
fn["arguments"] = json.loads(args)
|
|
141
|
+
except Exception:
|
|
142
|
+
fn["arguments"] = {}
|
|
143
|
+
except Exception:
|
|
144
|
+
pass
|
|
145
|
+
return True
|
|
146
|
+
|
|
147
|
+
def quick_call(
|
|
148
|
+
self,
|
|
149
|
+
context: BridgeContext,
|
|
150
|
+
extra: dict = None
|
|
151
|
+
) -> str:
|
|
152
|
+
"""
|
|
153
|
+
Make a quick API call to Anthropic and return the output text
|
|
154
|
+
|
|
155
|
+
:param context: BridgeContext
|
|
156
|
+
:param extra: Extra parameters
|
|
157
|
+
:return: Output text
|
|
158
|
+
"""
|
|
159
|
+
if context.request:
|
|
160
|
+
context.stream = False
|
|
161
|
+
context.mode = MODE_CHAT
|
|
162
|
+
self.locked = True
|
|
163
|
+
self.call(context, extra)
|
|
164
|
+
self.locked = False
|
|
165
|
+
return context.ctx.output
|
|
166
|
+
|
|
167
|
+
self.locked = True
|
|
168
|
+
try:
|
|
169
|
+
ctx = context.ctx
|
|
170
|
+
prompt = context.prompt
|
|
171
|
+
system_prompt = context.system_prompt
|
|
172
|
+
temperature = context.temperature
|
|
173
|
+
history = context.history
|
|
174
|
+
functions = context.external_functions
|
|
175
|
+
model = context.model or self.window.core.models.from_defaults()
|
|
176
|
+
|
|
177
|
+
client = self.get_client(MODE_CHAT, model)
|
|
178
|
+
tools = self.tools.get_all_tools(model, functions)
|
|
179
|
+
|
|
180
|
+
inputs = self.chat.build_input(
|
|
181
|
+
prompt=prompt,
|
|
182
|
+
system_prompt=system_prompt,
|
|
183
|
+
model=model,
|
|
184
|
+
history=history,
|
|
185
|
+
attachments=context.attachments,
|
|
186
|
+
multimodal_ctx=context.multimodal_ctx,
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
# Anthropic params
|
|
190
|
+
params: Dict[str, Any] = {
|
|
191
|
+
"model": model.id,
|
|
192
|
+
"max_tokens": context.max_tokens if context.max_tokens else 1024,
|
|
193
|
+
"messages": inputs,
|
|
194
|
+
}
|
|
195
|
+
if system_prompt:
|
|
196
|
+
params["system"] = system_prompt
|
|
197
|
+
if temperature is not None:
|
|
198
|
+
params["temperature"] = temperature
|
|
199
|
+
if tools: # only include when non-empty list
|
|
200
|
+
params["tools"] = tools
|
|
201
|
+
|
|
202
|
+
resp = client.messages.create(**params)
|
|
203
|
+
|
|
204
|
+
if ctx:
|
|
205
|
+
calls = self.chat.extract_tool_calls(resp)
|
|
206
|
+
if calls:
|
|
207
|
+
ctx.tool_calls = calls
|
|
208
|
+
return self.chat.extract_text(resp)
|
|
209
|
+
except Exception as e:
|
|
210
|
+
self.window.core.debug.log(e)
|
|
211
|
+
return ""
|
|
212
|
+
finally:
|
|
213
|
+
self.locked = False
|
|
53
214
|
|
|
54
215
|
def stop(self):
|
|
55
|
-
"""On global event stop"""
|
|
216
|
+
"""On global event stop (no-op for Anthropic)"""
|
|
56
217
|
pass
|
|
57
218
|
|
|
58
219
|
def close(self):
|
|
59
|
-
"""Close
|
|
220
|
+
"""Close client (no persistent resources to close)"""
|
|
60
221
|
if self.locked:
|
|
61
222
|
return
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
223
|
+
self.client = None
|
|
224
|
+
|
|
225
|
+
def safe_close(self):
|
|
226
|
+
"""Close client (safe)"""
|
|
227
|
+
if self.locked:
|
|
228
|
+
return
|
|
229
|
+
self.client = None
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
# ================================================== #
|
|
4
|
+
# This file is a part of PYGPT package #
|
|
5
|
+
# Website: https://pygpt.net #
|
|
6
|
+
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
|
+
# MIT License #
|
|
8
|
+
# Created By : Marcin Szczygliński #
|
|
9
|
+
# Updated Date: 2025.09.05 01:00:00 #
|
|
10
|
+
# ================================================== #
|
|
11
|
+
|
|
12
|
+
from typing import Optional
|
|
13
|
+
from pygpt_net.core.bridge.context import MultimodalContext
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class Audio:
|
|
17
|
+
def __init__(self, window=None):
|
|
18
|
+
"""
|
|
19
|
+
Audio helpers for Anthropic (currently no official input/output audio in Python SDK).
|
|
20
|
+
|
|
21
|
+
:param window: Window instance
|
|
22
|
+
"""
|
|
23
|
+
self.window = window
|
|
24
|
+
|
|
25
|
+
def build_input_block(self, multimodal_ctx: Optional[MultimodalContext]) -> Optional[dict]:
|
|
26
|
+
"""
|
|
27
|
+
Future hook: build input_audio block if Anthropic exposes it publicly.
|
|
28
|
+
Currently returns None to avoid 400 errors.
|
|
29
|
+
"""
|
|
30
|
+
return None
|