pygpt-net 2.6.29__py3-none-any.whl → 2.6.30__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (113) hide show
  1. pygpt_net/CHANGELOG.txt +7 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/{container.py → app_core.py} +5 -6
  4. pygpt_net/controller/access/control.py +1 -9
  5. pygpt_net/controller/assistant/assistant.py +4 -4
  6. pygpt_net/controller/assistant/batch.py +7 -7
  7. pygpt_net/controller/assistant/files.py +4 -4
  8. pygpt_net/controller/assistant/threads.py +3 -3
  9. pygpt_net/controller/attachment/attachment.py +4 -7
  10. pygpt_net/controller/chat/common.py +1 -1
  11. pygpt_net/controller/chat/stream.py +961 -294
  12. pygpt_net/controller/chat/vision.py +11 -19
  13. pygpt_net/controller/config/placeholder.py +1 -1
  14. pygpt_net/controller/ctx/ctx.py +1 -1
  15. pygpt_net/controller/ctx/summarizer.py +1 -1
  16. pygpt_net/controller/mode/mode.py +21 -12
  17. pygpt_net/controller/plugins/settings.py +3 -2
  18. pygpt_net/controller/presets/editor.py +112 -99
  19. pygpt_net/controller/theme/theme.py +3 -2
  20. pygpt_net/controller/ui/vision.py +4 -4
  21. pygpt_net/core/agents/legacy.py +2 -2
  22. pygpt_net/core/agents/runners/openai_workflow.py +2 -2
  23. pygpt_net/core/assistants/files.py +5 -5
  24. pygpt_net/core/assistants/store.py +4 -4
  25. pygpt_net/core/bridge/bridge.py +3 -3
  26. pygpt_net/core/bridge/worker.py +28 -9
  27. pygpt_net/core/debug/console/console.py +2 -2
  28. pygpt_net/core/debug/presets.py +2 -2
  29. pygpt_net/core/experts/experts.py +2 -2
  30. pygpt_net/core/modes/modes.py +2 -2
  31. pygpt_net/core/presets/presets.py +3 -3
  32. pygpt_net/core/tokens/tokens.py +4 -4
  33. pygpt_net/core/types/mode.py +5 -2
  34. pygpt_net/core/vision/analyzer.py +1 -1
  35. pygpt_net/data/config/config.json +6 -3
  36. pygpt_net/data/config/models.json +75 -3
  37. pygpt_net/data/config/modes.json +3 -9
  38. pygpt_net/data/config/settings.json +89 -31
  39. pygpt_net/data/config/settings_section.json +2 -2
  40. pygpt_net/data/locale/locale.de.ini +2 -2
  41. pygpt_net/data/locale/locale.en.ini +9 -2
  42. pygpt_net/data/locale/locale.es.ini +2 -2
  43. pygpt_net/data/locale/locale.fr.ini +2 -2
  44. pygpt_net/data/locale/locale.it.ini +2 -2
  45. pygpt_net/data/locale/locale.pl.ini +3 -3
  46. pygpt_net/data/locale/locale.uk.ini +2 -2
  47. pygpt_net/data/locale/locale.zh.ini +2 -2
  48. pygpt_net/item/model.py +23 -3
  49. pygpt_net/plugin/openai_dalle/plugin.py +4 -4
  50. pygpt_net/plugin/openai_vision/plugin.py +12 -13
  51. pygpt_net/provider/agents/openai/agent.py +5 -5
  52. pygpt_net/provider/agents/openai/agent_b2b.py +5 -5
  53. pygpt_net/provider/agents/openai/agent_planner.py +5 -6
  54. pygpt_net/provider/agents/openai/agent_with_experts.py +5 -5
  55. pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +4 -4
  56. pygpt_net/provider/agents/openai/agent_with_feedback.py +4 -4
  57. pygpt_net/provider/agents/openai/bot_researcher.py +2 -2
  58. pygpt_net/provider/agents/openai/bots/research_bot/agents/planner_agent.py +1 -1
  59. pygpt_net/provider/agents/openai/bots/research_bot/agents/search_agent.py +1 -1
  60. pygpt_net/provider/agents/openai/bots/research_bot/agents/writer_agent.py +1 -1
  61. pygpt_net/provider/agents/openai/evolve.py +5 -5
  62. pygpt_net/provider/agents/openai/supervisor.py +4 -4
  63. pygpt_net/provider/api/__init__.py +27 -0
  64. pygpt_net/provider/api/anthropic/__init__.py +68 -0
  65. pygpt_net/provider/api/google/__init__.py +262 -0
  66. pygpt_net/provider/api/google/audio.py +114 -0
  67. pygpt_net/provider/api/google/chat.py +552 -0
  68. pygpt_net/provider/api/google/image.py +287 -0
  69. pygpt_net/provider/api/google/tools.py +222 -0
  70. pygpt_net/provider/api/google/vision.py +129 -0
  71. pygpt_net/provider/{gpt → api/openai}/__init__.py +2 -2
  72. pygpt_net/provider/{gpt → api/openai}/agents/computer.py +1 -1
  73. pygpt_net/provider/{gpt → api/openai}/agents/experts.py +1 -1
  74. pygpt_net/provider/{gpt → api/openai}/agents/response.py +1 -1
  75. pygpt_net/provider/{gpt → api/openai}/assistants.py +1 -1
  76. pygpt_net/provider/{gpt → api/openai}/chat.py +15 -8
  77. pygpt_net/provider/{gpt → api/openai}/completion.py +1 -1
  78. pygpt_net/provider/{gpt → api/openai}/image.py +1 -1
  79. pygpt_net/provider/{gpt → api/openai}/remote_tools.py +1 -1
  80. pygpt_net/provider/{gpt → api/openai}/responses.py +34 -20
  81. pygpt_net/provider/{gpt → api/openai}/store.py +2 -2
  82. pygpt_net/provider/{gpt → api/openai}/vision.py +1 -1
  83. pygpt_net/provider/{gpt → api/openai}/worker/assistants.py +4 -4
  84. pygpt_net/provider/{gpt → api/openai}/worker/importer.py +10 -10
  85. pygpt_net/provider/audio_input/openai_whisper.py +1 -1
  86. pygpt_net/provider/audio_output/google_tts.py +12 -0
  87. pygpt_net/provider/audio_output/openai_tts.py +1 -1
  88. pygpt_net/provider/core/config/patch.py +11 -0
  89. pygpt_net/provider/core/model/patch.py +9 -0
  90. pygpt_net/provider/core/preset/json_file.py +2 -4
  91. pygpt_net/provider/llms/anthropic.py +2 -5
  92. pygpt_net/provider/llms/base.py +4 -3
  93. pygpt_net/provider/llms/openai.py +1 -1
  94. pygpt_net/provider/loaders/hub/image_vision/base.py +1 -1
  95. pygpt_net/ui/dialog/preset.py +71 -55
  96. pygpt_net/ui/main.py +6 -4
  97. pygpt_net/utils.py +9 -0
  98. {pygpt_net-2.6.29.dist-info → pygpt_net-2.6.30.dist-info}/METADATA +32 -44
  99. {pygpt_net-2.6.29.dist-info → pygpt_net-2.6.30.dist-info}/RECORD +113 -105
  100. /pygpt_net/provider/{gpt → api/openai}/agents/__init__.py +0 -0
  101. /pygpt_net/provider/{gpt → api/openai}/agents/client.py +0 -0
  102. /pygpt_net/provider/{gpt → api/openai}/agents/remote_tools.py +0 -0
  103. /pygpt_net/provider/{gpt → api/openai}/agents/utils.py +0 -0
  104. /pygpt_net/provider/{gpt → api/openai}/audio.py +0 -0
  105. /pygpt_net/provider/{gpt → api/openai}/computer.py +0 -0
  106. /pygpt_net/provider/{gpt → api/openai}/container.py +0 -0
  107. /pygpt_net/provider/{gpt → api/openai}/summarizer.py +0 -0
  108. /pygpt_net/provider/{gpt → api/openai}/tools.py +0 -0
  109. /pygpt_net/provider/{gpt → api/openai}/utils.py +0 -0
  110. /pygpt_net/provider/{gpt → api/openai}/worker/__init__.py +0 -0
  111. {pygpt_net-2.6.29.dist-info → pygpt_net-2.6.30.dist-info}/LICENSE +0 -0
  112. {pygpt_net-2.6.29.dist-info → pygpt_net-2.6.30.dist-info}/WHEEL +0 -0
  113. {pygpt_net-2.6.29.dist-info → pygpt_net-2.6.30.dist-info}/entry_points.txt +0 -0
@@ -1174,9 +1174,9 @@ settings.ctx.sources = Zeige Llama-Indexquellen
1174
1174
  settings.ctx.sources.desc = Falls aktiviert, werden die genutzten Quellen in der Antwort angezeigt (falls verfügbar, funktioniert nicht im gestreamten Chat)
1175
1175
  settings.ctx.use_extra = Verwenden Sie zusätzlichen Kontextoutput
1176
1176
  settings.ctx.use_extra.desc = Wenn aktiviert, wird die einfache Textausgabe (falls verfügbar) aus den Befehlsausgaben zusätzlich zur JSON-Ausgabe angezeigt.
1177
+ settings.debug.show_menu = Debug-Menü anzeigen
1177
1178
  settings.defaults.app.confirm = Werksseitige App-Einstellungen laden?
1178
1179
  settings.defaults.user.confirm = Aktuelle Änderungen rückgängig machen?
1179
- settings.developer.debug = Debug-Menü anzeigen
1180
1180
  settings.dict.delete.confirm = Eintrag aus der Liste entfernen?
1181
1181
  settings.download.dir = Verzeichnis für Dateidownloads
1182
1182
  settings.download.dir.desc = Unterordner für heruntergeladene Dateien, z.B. im Assistentenmodus, innerhalb von "data"
@@ -1336,7 +1336,7 @@ settings.section.audio.cache = Cache
1336
1336
  settings.section.audio.device = Geräte
1337
1337
  settings.section.audio.options = Optionen
1338
1338
  settings.section.ctx = Kontext
1339
- settings.section.developer = Entwickler
1339
+ settings.section.debug = Fehlerbehebung
1340
1340
  settings.section.files = Dateien und Anhänge
1341
1341
  settings.section.general = Allgemein
1342
1342
  settings.section.images = Bilder
@@ -1118,6 +1118,8 @@ settings.api_key.voyage = VoyageAI API KEY
1118
1118
  settings.api_key.voyage.desc = Required for the Voyage API - embeddings for Anthropic and DeepSeek API.
1119
1119
  settings.api_key.xai = xAI API KEY
1120
1120
  settings.api_key.xai.desc = Required for the xAI API and Grok models.
1121
+ settings.api_native_google = Use native API SDK
1122
+ settings.api_native_google.desc = Use native GenAI SDK instead of compatible OpenAI client
1121
1123
  settings.api_proxy = Proxy address
1122
1124
  settings.api_proxy.desc = Optional, proxy for OpenAI API, e.g. http://proxy.example.com or socks5://user:pass@host:port
1123
1125
  settings.api_use_responses = Use Responses API in Chat mode
@@ -1192,9 +1194,9 @@ settings.ctx.sources = Show LlamaIndex sources
1192
1194
  settings.ctx.sources.desc = If enabled, sources used will be displayed in the response (if available, it will not work in streamed chat)
1193
1195
  settings.ctx.use_extra = Use extra context output
1194
1196
  settings.ctx.use_extra.desc = If enabled, plain text output (if available) from command results will be displayed alongside the JSON output.
1197
+ settings.debug.show_menu = Show debug menu
1195
1198
  settings.defaults.app.confirm = Load factory app settings?
1196
1199
  settings.defaults.user.confirm = Undo current changes?
1197
- settings.developer.debug = Show debug menu
1198
1200
  settings.dict.delete.confirm = Remove item from list?
1199
1201
  settings.download.dir = Directory for file downloads
1200
1202
  settings.download.dir.desc = Subdirectory for downloaded files, e.g. in Assistants mode, inside "data"
@@ -1324,6 +1326,10 @@ settings.remote_tools.file_search = File search
1324
1326
  settings.remote_tools.file_search.args = File search vector store IDs
1325
1327
  settings.remote_tools.file_search.args.desc = Vector store IDs, separated by comma (,)
1326
1328
  settings.remote_tools.file_search.desc = Enable `file_search` remote tool in Chat mode / via OpenAI Responses API.
1329
+ settings.remote_tools.google.code_interpreter = Code Interpreter
1330
+ settings.remote_tools.google.code_interpreter.desc = Enable Code Interpreter remote tool in Chat mode.
1331
+ settings.remote_tools.google.web_search = Google Web Search
1332
+ settings.remote_tools.google.web_search.desc = Enable Google Search remote tool in Chat mode.
1327
1333
  settings.remote_tools.image = Image generation
1328
1334
  settings.remote_tools.image.desc = Enable `image_generation` remote tool in Chat mode / via OpenAI Responses API.
1329
1335
  settings.remote_tools.mcp = Remote MCP
@@ -1360,7 +1366,7 @@ settings.section.audio.cache = Cache
1360
1366
  settings.section.audio.device = Devices
1361
1367
  settings.section.audio.options = Options
1362
1368
  settings.section.ctx = Context
1363
- settings.section.developer = Developer
1369
+ settings.section.debug = Debug
1364
1370
  settings.section.files = Files and attachments
1365
1371
  settings.section.general = General
1366
1372
  settings.section.images = Images
@@ -1376,6 +1382,7 @@ settings.section.model = Models
1376
1382
  settings.section.personalize = Personalize
1377
1383
  settings.section.prompts = Prompts
1378
1384
  settings.section.remote_tools = Remote tools
1385
+ settings.section.remote_tools.google = Google
1379
1386
  settings.section.remote_tools.openai = OpenAI
1380
1387
  settings.section.tab.general = General
1381
1388
  settings.section.updates = Updates
@@ -1175,9 +1175,9 @@ settings.ctx.sources = Mostrar fuentes del índice Llama
1175
1175
  settings.ctx.sources.desc = Si está habilitado, las fuentes utilizadas se mostrarán en la respuesta (si están disponibles, no funcionará en el chat en vivo)
1176
1176
  settings.ctx.use_extra = Usar salida de contexto extra
1177
1177
  settings.ctx.use_extra.desc = Si está habilitado, la salida de texto sin formato (si está disponible) de los resultados de los comandos se mostrará junto con la salida JSON.
1178
+ settings.debug.show_menu = Mostrar menú de depuración
1178
1179
  settings.defaults.app.confirm = ¿Cargar ajustes predeterminados de la aplicación?
1179
1180
  settings.defaults.user.confirm = ¿Deshacer cambios actuales?
1180
- settings.developer.debug = Mostrar menú de depuración
1181
1181
  settings.dict.delete.confirm = ¿Eliminar elemento de la lista?
1182
1182
  settings.download.dir = Directorio para descargas de archivos
1183
1183
  settings.download.dir.desc = Subdirectorio para archivos descargados, por ejemplo, en modo Asistentes, dentro de "data"
@@ -1337,7 +1337,7 @@ settings.section.audio.cache = Caché
1337
1337
  settings.section.audio.device = Dispositivos
1338
1338
  settings.section.audio.options = Opciones
1339
1339
  settings.section.ctx = Contexto
1340
- settings.section.developer = Desarrollador
1340
+ settings.section.debug = Depuración
1341
1341
  settings.section.files = Archivos y adjuntos
1342
1342
  settings.section.general = General
1343
1343
  settings.section.images = Imágenes
@@ -1174,9 +1174,9 @@ settings.ctx.sources = Afficher les sources de l'index Llama
1174
1174
  settings.ctx.sources.desc = Si activé, les sources utilisées seront affichées dans la réponse (si disponibles, cela ne fonctionnera pas dans le chat en continu)
1175
1175
  settings.ctx.use_extra = Utiliser une sortie de contexte supplémentaire
1176
1176
  settings.ctx.use_extra.desc = Si activé, la sortie en texte brut (si disponible) des résultats de commande sera affichée aux côtés de la sortie JSON.
1177
+ settings.debug.show_menu = Afficher le menu de débogage
1177
1178
  settings.defaults.app.confirm = Charger les réglages par défaut de l'application ?
1178
1179
  settings.defaults.user.confirm = Annuler les modifications actuelles ?
1179
- settings.developer.debug = Afficher le menu de débogage
1180
1180
  settings.dict.delete.confirm = Supprimer l'élément de la liste ?
1181
1181
  settings.download.dir = Répertoire pour les téléchargements de fichiers
1182
1182
  settings.download.dir.desc = Sous-répertoire pour les fichiers téléchargés, par exemple en mode Assistants, à l'intérieur de "data"
@@ -1336,7 +1336,7 @@ settings.section.audio.cache = Cache
1336
1336
  settings.section.audio.device = Appareils
1337
1337
  settings.section.audio.options = Options
1338
1338
  settings.section.ctx = Contexte
1339
- settings.section.developer = Développeur
1339
+ settings.section.debug = Débogage
1340
1340
  settings.section.files = Fichiers et pièces jointes
1341
1341
  settings.section.general = Général
1342
1342
  settings.section.images = Images
@@ -1174,9 +1174,9 @@ settings.ctx.sources = Mostra le fonti dell'indice Llama
1174
1174
  settings.ctx.sources.desc = Se abilitato, le fonti utilizzate saranno mostrate nella risposta (se disponibili, non funzionerà nella chat in streaming)
1175
1175
  settings.ctx.use_extra = Usa output di contesto extra
1176
1176
  settings.ctx.use_extra.desc = Se abilitato, l'output di testo normale (se disponibile) dai risultati dei comandi sarà visualizzato accanto all'output JSON.
1177
+ settings.debug.show_menu = Mostra menu debug
1177
1178
  settings.defaults.app.confirm = Caricare le impostazioni predefinite dell'applicazione?
1178
1179
  settings.defaults.user.confirm = Annullare le modifiche correnti?
1179
- settings.developer.debug = Mostra menu debug
1180
1180
  settings.dict.delete.confirm = Rimuovere l'elemento dall'elenco?
1181
1181
  settings.download.dir = Directory per il download dei file
1182
1182
  settings.download.dir.desc = Sottodirectory per i file scaricati, ad esempio in modalità Assistenti, all'interno di "data"
@@ -1336,7 +1336,7 @@ settings.section.audio.cache = Cache
1336
1336
  settings.section.audio.device = Dispositivi
1337
1337
  settings.section.audio.options = Opzioni
1338
1338
  settings.section.ctx = Contesto
1339
- settings.section.developer = Sviluppatore
1339
+ settings.section.debug = Debug
1340
1340
  settings.section.files = File e allegati
1341
1341
  settings.section.general = Generale
1342
1342
  settings.section.images = Immagini
@@ -1118,7 +1118,7 @@ settings.audio.input.backend.desc = Wybierz backend dla wejścia audio.
1118
1118
  settings.audio.input.channels = Kanały
1119
1119
  settings.audio.input.channels.desc = Kanały wejściowe, domyślnie: 1
1120
1120
  settings.audio.input.continuous = Ciągłe Nagrywanie Dźwięku (Kawałki)
1121
- settings.audio.input.continuous.desc = Włącz nagrywanie w kawałkach dla długich nagrań audio w notatniku (notatki głosowe).
1121
+ settings.audio.input.continuous.desc = Włącz nagrywanie w kawałkach dla długich nagrań audio w notatniku (notatki głosowe).
1122
1122
  settings.audio.input.device = Urządzenie do wejścia audio
1123
1123
  settings.audio.input.device.desc = Wybierz urządzenie do wejścia mikrofonu.
1124
1124
  settings.audio.input.rate = Częstotliwość próbkowania
@@ -1175,9 +1175,9 @@ settings.ctx.sources = Pokaż źródła LlamaIndex
1175
1175
  settings.ctx.sources.desc = Jeśli opcja jest włączona, wykorzystane źródła będą wyświetlane w odpowiedzi (jeśli dostępne, nie zadziała w czacie z wł. opcją stream)
1176
1176
  settings.ctx.use_extra = Używaj dodatkowego kontekstu outputu
1177
1177
  settings.ctx.use_extra.desc = Jeśli włączone, zwykły tekst outputu (jeśli dostępny) z wyników poleceń będzie wyświetlany obok outputu JSON.
1178
+ settings.debug.show_menu = Pokaż menu debugowania
1178
1179
  settings.defaults.app.confirm = Wczytać fabryczne ustawienia aplikacji?
1179
1180
  settings.defaults.user.confirm = Przywrócić dokonane zmiany?
1180
- settings.developer.debug = Pokaż menu debugowania
1181
1181
  settings.dict.delete.confirm = Usunąć pozycję z listy?
1182
1182
  settings.download.dir = Katalog na pliki do pobrania
1183
1183
  settings.download.dir.desc = Podkatalog na pobrane pliki, np. w trybie Asystentów, wewnątrz "data"
@@ -1337,7 +1337,7 @@ settings.section.audio.cache = Pamięć podręczna
1337
1337
  settings.section.audio.device = Urządzenia
1338
1338
  settings.section.audio.options = Opcje
1339
1339
  settings.section.ctx = Kontekst
1340
- settings.section.developer = Deweloper
1340
+ settings.section.debug = Debugowanie
1341
1341
  settings.section.files = Pliki i załączniki
1342
1342
  settings.section.general = Ogólne
1343
1343
  settings.section.images = Obrazy
@@ -1174,9 +1174,9 @@ settings.ctx.sources = Показати джерела індексу Llama
1174
1174
  settings.ctx.sources.desc = Якщо включено, використані джерела будуть відображатися в відповіді (якщо доступно, не працюватиме в потоковому чаті)
1175
1175
  settings.ctx.use_extra = Використовувати додатковий контекст виводу
1176
1176
  settings.ctx.use_extra.desc = Якщо увімкнено, звичайний текстовий вивід (якщо доступний) з результатів команд буде відображений поруч з JSON виводом.
1177
+ settings.debug.show_menu = Показати меню налагодження
1177
1178
  settings.defaults.app.confirm = Завантажити заводські налаштування додатку?
1178
1179
  settings.defaults.user.confirm = Відмінити поточні зміни?
1179
- settings.developer.debug = Показати меню налагодження
1180
1180
  settings.dict.delete.confirm = Видалити елемент зі списку?
1181
1181
  settings.download.dir = Директорія для завантаження файлів
1182
1182
  settings.download.dir.desc = Піддиректорія для завантажених файлів, наприклад, у режимі помічників, всередині "data"
@@ -1336,7 +1336,7 @@ settings.section.audio.cache = Кеш
1336
1336
  settings.section.audio.device = Пристрої
1337
1337
  settings.section.audio.options = Параметри
1338
1338
  settings.section.ctx = Контекст
1339
- settings.section.developer = Розробник
1339
+ settings.section.debug = Налагодження
1340
1340
  settings.section.files = Файли та вкладення
1341
1341
  settings.section.general = Загальні
1342
1342
  settings.section.images = Зображення
@@ -1174,9 +1174,9 @@ settings.ctx.sources = 显示Llama索引源
1174
1174
  settings.ctx.sources.desc = 如果启用,使用的源将在回应中显示(如果可用,不适用于流式聊天)
1175
1175
  settings.ctx.use_extra = 使用额外的上下文输出
1176
1176
  settings.ctx.use_extra.desc = 如果启用,将在命令结果的 JSON 输出旁边显示纯文本输出(如果有)。
1177
+ settings.debug.show_menu = 显示调试菜单
1177
1178
  settings.defaults.app.confirm = 加载出厂应用程序设置?
1178
1179
  settings.defaults.user.confirm = 撤销当前更改?
1179
- settings.developer.debug = 显示调试菜单
1180
1180
  settings.dict.delete.confirm = 从列表中移除项目?
1181
1181
  settings.download.dir = 文件下载目录
1182
1182
  settings.download.dir.desc = 下载文件的子目录,例如在助手模式下,位于 "data" 内部
@@ -1336,7 +1336,7 @@ settings.section.audio.cache = 缓存
1336
1336
  settings.section.audio.device = 设备
1337
1337
  settings.section.audio.options = 选项
1338
1338
  settings.section.ctx = 上下文
1339
- settings.section.developer = 開發者
1339
+ settings.section.debug = 调试
1340
1340
  settings.section.files = 文件和附件
1341
1341
  settings.section.general = 一般
1342
1342
  settings.section.images = 圖像
pygpt_net/item/model.py CHANGED
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.23 15:00:00 #
9
+ # Updated Date: 2025.08.28 09:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import json
@@ -253,7 +253,17 @@ class ModelItem:
253
253
 
254
254
  :return: True if supports image input
255
255
  """
256
- if MODE_VISION in self.mode or MULTIMODAL_IMAGE in self.input:
256
+ if MULTIMODAL_IMAGE in self.input:
257
+ return True
258
+ return False
259
+
260
+ def is_image_output(self) -> bool:
261
+ """
262
+ Check if model supports image output
263
+
264
+ :return: True if supports image output
265
+ """
266
+ if "image" in self.output or MODE_VISION in self.mode:
257
267
  return True
258
268
  return False
259
269
 
@@ -263,7 +273,17 @@ class ModelItem:
263
273
 
264
274
  :return: True if supports audio input
265
275
  """
266
- if MODE_AUDIO in self.mode or MULTIMODAL_AUDIO in self.input:
276
+ if MULTIMODAL_AUDIO in self.input:
277
+ return True
278
+ return False
279
+
280
+ def is_audio_output(self) -> bool:
281
+ """
282
+ Check if model supports audio output
283
+
284
+ :return: True if supports audio output
285
+ """
286
+ if MULTIMODAL_AUDIO in self.output:
267
287
  return True
268
288
  return False
269
289
 
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.07.30 00:00:00 #
9
+ # Updated Date: 2025.08.28 09:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from pygpt_net.core.types import (
@@ -42,8 +42,8 @@ class Plugin(BasePlugin):
42
42
  ]
43
43
  self.allowed_modes = [
44
44
  MODE_CHAT,
45
- MODE_LANGCHAIN,
46
- MODE_VISION,
45
+ # MODE_LANGCHAIN,
46
+ # MODE_VISION,
47
47
  MODE_LLAMA_INDEX,
48
48
  MODE_ASSISTANT,
49
49
  MODE_AGENT,
@@ -166,7 +166,7 @@ class Plugin(BasePlugin):
166
166
  sync = False
167
167
  if self.window.core.config.get("mode") in [MODE_AGENT_LLAMA, MODE_AGENT_OPENAI]:
168
168
  sync = True
169
- self.window.core.gpt.image.generate(bridge_context, extra, sync) # force inline mode, async call
169
+ self.window.core.api.openai.image.generate(bridge_context, extra, sync) # force inline mode, async call
170
170
  except Exception as e:
171
171
  self.log("Error: " + str(e))
172
172
  return
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.15 23:00:00 #
9
+ # Updated Date: 2025.08.28 09:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from pygpt_net.core.types import (
@@ -102,7 +102,7 @@ class Plugin(BasePlugin):
102
102
  ) # mode change
103
103
 
104
104
  elif name == Event.MODEL_BEFORE:
105
- if "mode" in data and data["mode"] == MODE_VISION:
105
+ if data.get("mode") == MODE_CHAT:
106
106
  key = self.get_option_value("model")
107
107
  if self.window.core.models.has(key):
108
108
  data['model'] = self.window.core.models.get(key)
@@ -119,7 +119,7 @@ class Plugin(BasePlugin):
119
119
  data['value'] = self.on_system_prompt(data['value'])
120
120
 
121
121
  elif name == Event.UI_ATTACHMENTS:
122
- mode = data["mode"]
122
+ mode = data.get("mode")
123
123
  if mode in [MODE_AGENT, MODE_AGENT_LLAMA, MODE_AGENT_OPENAI] and not self.window.core.config.get("cmd"):
124
124
  pass
125
125
  else:
@@ -263,8 +263,7 @@ class Plugin(BasePlugin):
263
263
  # append vision prompt only if vision is provided or enabled
264
264
  if not self.is_vision_provided():
265
265
  return prompt
266
- prompt = "Image attachment has been already sent.\n\n" + prompt
267
- return prompt
266
+ return "Image attachment has been already sent.\n\n" + prompt
268
267
 
269
268
  def on_pre_prompt(self, prompt: str) -> str:
270
269
  """
@@ -294,12 +293,12 @@ class Plugin(BasePlugin):
294
293
  """
295
294
  mode = self.window.core.config.get('mode')
296
295
  attachments = self.window.core.attachments.get_all(mode)
297
- self.window.core.gpt.vision.build_content(
296
+ self.window.core.api.openai.vision.build_content(
298
297
  str(self.prompt),
299
298
  attachments,
300
299
  ) # tmp build content, provide attachments from global mode
301
300
 
302
- built_attachments = self.window.core.gpt.vision.attachments
301
+ built_attachments = self.window.core.api.openai.vision.attachments
303
302
  if len(built_attachments) > 0:
304
303
  return True
305
304
  return False
@@ -313,13 +312,13 @@ class Plugin(BasePlugin):
313
312
  result = False
314
313
  mode = self.window.core.config.get('mode')
315
314
  attachments = self.window.core.attachments.get_all(mode) # from global mode
316
- self.window.core.gpt.vision.build_content(
315
+ self.window.core.api.openai.vision.build_content(
317
316
  str(self.prompt),
318
317
  attachments,
319
318
  ) # tmp build content, provide attachments from global mode
320
319
 
321
- built_attachments = self.window.core.gpt.vision.attachments
322
- built_urls = self.window.core.gpt.vision.urls
320
+ built_attachments = self.window.core.api.openai.vision.attachments
321
+ built_urls = self.window.core.api.openai.vision.urls
323
322
 
324
323
  # check for images in URLs found in prompt
325
324
  img_urls = []
@@ -343,13 +342,13 @@ class Plugin(BasePlugin):
343
342
  :return: updated mode
344
343
  """
345
344
  # abort if already in vision mode or command enabled
346
- if mode == MODE_VISION or mode in self.disabled_mode_switch:
345
+ if mode in self.disabled_mode_switch:
347
346
  return mode # keep current mode
348
347
 
349
- # if already used in this ctx then keep vision mode
348
+ # if already used in this ctx then keep vision (in CHAT) mode
350
349
  if self.is_vision_provided():
351
350
  ctx.is_vision = True
352
- return MODE_VISION
351
+ return MODE_CHAT
353
352
 
354
353
  return mode # keep current mode
355
354
 
@@ -26,12 +26,12 @@ from pygpt_net.core.types import (
26
26
  from pygpt_net.item.ctx import CtxItem
27
27
  from pygpt_net.item.model import ModelItem
28
28
 
29
- from pygpt_net.provider.gpt.agents.remote_tools import is_computer_tool, append_tools
30
- from pygpt_net.provider.gpt.agents.computer import Agent as ComputerAgent, LocalComputer
31
- from pygpt_net.provider.gpt.agents.response import StreamHandler
29
+ from pygpt_net.provider.api.openai.agents.remote_tools import is_computer_tool, append_tools
30
+ from pygpt_net.provider.api.openai.agents.computer import Agent as ComputerAgent, LocalComputer
31
+ from pygpt_net.provider.api.openai.agents.response import StreamHandler
32
+ from pygpt_net.provider.api.openai.agents.experts import get_experts
32
33
 
33
34
  from ..base import BaseAgent
34
- from ...gpt.agents.experts import get_experts
35
35
 
36
36
  class Agent(BaseAgent):
37
37
  def __init__(self, *args, **kwargs):
@@ -159,7 +159,7 @@ class Agent(BaseAgent):
159
159
  agent,
160
160
  **kwargs
161
161
  )
162
- final_output, last_response_id = window.core.gpt.responses.unpack_agent_response(result, ctx)
162
+ final_output, last_response_id = window.core.api.openai.responses.unpack_agent_response(result, ctx)
163
163
  response_id = result.last_response_id
164
164
  if verbose:
165
165
  print("Final response:", result)
@@ -29,12 +29,12 @@ from pygpt_net.item.ctx import CtxItem
29
29
  from pygpt_net.item.model import ModelItem
30
30
  from pygpt_net.item.preset import PresetItem
31
31
 
32
- from pygpt_net.provider.gpt.agents.remote_tools import append_tools
33
- from pygpt_net.provider.gpt.agents.response import StreamHandler
32
+ from pygpt_net.provider.api.openai.agents.remote_tools import append_tools
33
+ from pygpt_net.provider.api.openai.agents.response import StreamHandler
34
+ from pygpt_net.provider.api.openai.agents.experts import get_experts
34
35
  from pygpt_net.utils import trans
35
36
 
36
37
  from ..base import BaseAgent
37
- from ...gpt.agents.experts import get_experts
38
38
 
39
39
 
40
40
  class Agent(BaseAgent):
@@ -274,7 +274,7 @@ class Agent(BaseAgent):
274
274
  if verbose:
275
275
  print("Final response:", result)
276
276
 
277
- final_output, last_response_id = window.core.gpt.responses.unpack_agent_response(result, ctx)
277
+ final_output, last_response_id = window.core.api.openai.responses.unpack_agent_response(result, ctx)
278
278
 
279
279
  if bridge.stopped():
280
280
  bridge.on_stop(ctx)
@@ -305,7 +305,7 @@ class Agent(BaseAgent):
305
305
  if verbose:
306
306
  print("Final response:", result)
307
307
 
308
- final_output, last_response_id = window.core.gpt.responses.unpack_agent_response(result, ctx)
308
+ final_output, last_response_id = window.core.api.openai.responses.unpack_agent_response(result, ctx)
309
309
  if bridge.stopped():
310
310
  bridge.on_stop(ctx)
311
311
  break
@@ -30,14 +30,13 @@ from pygpt_net.item.ctx import CtxItem
30
30
  from pygpt_net.item.model import ModelItem
31
31
  from pygpt_net.item.preset import PresetItem
32
32
 
33
- from pygpt_net.provider.gpt.agents.client import get_custom_model_provider, set_openai_env
34
- from pygpt_net.provider.gpt.agents.remote_tools import append_tools
35
- from pygpt_net.provider.gpt.agents.response import StreamHandler
33
+ from pygpt_net.provider.api.openai.agents.client import get_custom_model_provider, set_openai_env
34
+ from pygpt_net.provider.api.openai.agents.remote_tools import append_tools
35
+ from pygpt_net.provider.api.openai.agents.response import StreamHandler
36
+ from pygpt_net.provider.api.openai.agents.experts import get_experts
36
37
  from pygpt_net.utils import trans
37
38
 
38
39
  from ..base import BaseAgent
39
- from ...gpt.agents.experts import get_experts
40
-
41
40
 
42
41
  @dataclass
43
42
  class EvaluationFeedback:
@@ -327,7 +326,7 @@ class Agent(BaseAgent):
327
326
  print("Final response:", result)
328
327
 
329
328
  input_items = result.to_input_list()
330
- final_output, last_response_id = window.core.gpt.responses.unpack_agent_response(result, ctx)
329
+ final_output, last_response_id = window.core.api.openai.responses.unpack_agent_response(result, ctx)
331
330
 
332
331
  if bridge.stopped():
333
332
  bridge.on_stop(ctx)
@@ -29,12 +29,12 @@ from pygpt_net.item.ctx import CtxItem
29
29
  from pygpt_net.item.model import ModelItem
30
30
  from pygpt_net.item.preset import PresetItem
31
31
 
32
- from pygpt_net.provider.gpt.agents.client import get_custom_model_provider, set_openai_env
33
- from pygpt_net.provider.gpt.agents.remote_tools import append_tools
34
- from pygpt_net.provider.gpt.agents.response import StreamHandler
32
+ from pygpt_net.provider.api.openai.agents.client import get_custom_model_provider, set_openai_env
33
+ from pygpt_net.provider.api.openai.agents.remote_tools import append_tools
34
+ from pygpt_net.provider.api.openai.agents.response import StreamHandler
35
+ from pygpt_net.provider.api.openai.agents.experts import get_experts
35
36
 
36
37
  from ..base import BaseAgent
37
- from ...gpt.agents.experts import get_experts
38
38
 
39
39
 
40
40
  class Agent(BaseAgent):
@@ -137,7 +137,7 @@ class Agent(BaseAgent):
137
137
  agent,
138
138
  **kwargs
139
139
  )
140
- final_output, last_response_id = window.core.gpt.responses.unpack_agent_response(result, ctx)
140
+ final_output, last_response_id = window.core.api.openai.responses.unpack_agent_response(result, ctx)
141
141
  response_id = result.last_response_id
142
142
  if verbose:
143
143
  print("Final response:", result)
@@ -29,12 +29,12 @@ from pygpt_net.item.ctx import CtxItem
29
29
  from pygpt_net.item.model import ModelItem
30
30
  from pygpt_net.item.preset import PresetItem
31
31
 
32
- from pygpt_net.provider.gpt.agents.remote_tools import append_tools
33
- from pygpt_net.provider.gpt.agents.response import StreamHandler
32
+ from pygpt_net.provider.api.openai.agents.remote_tools import append_tools
33
+ from pygpt_net.provider.api.openai.agents.response import StreamHandler
34
+ from pygpt_net.provider.api.openai.agents.experts import get_experts
34
35
  from pygpt_net.utils import trans
35
36
 
36
37
  from ..base import BaseAgent
37
- from ...gpt.agents.experts import get_experts
38
38
 
39
39
 
40
40
  @dataclass
@@ -221,7 +221,7 @@ class Agent(BaseAgent):
221
221
  print("Final response:", result)
222
222
 
223
223
  input_items = result.to_input_list()
224
- final_output, last_response_id = window.core.gpt.responses.unpack_agent_response(result, ctx)
224
+ final_output, last_response_id = window.core.api.openai.responses.unpack_agent_response(result, ctx)
225
225
 
226
226
  if bridge.stopped():
227
227
  bridge.on_stop(ctx)
@@ -29,12 +29,12 @@ from pygpt_net.item.ctx import CtxItem
29
29
  from pygpt_net.item.model import ModelItem
30
30
  from pygpt_net.item.preset import PresetItem
31
31
 
32
- from pygpt_net.provider.gpt.agents.remote_tools import append_tools
33
- from pygpt_net.provider.gpt.agents.response import StreamHandler
32
+ from pygpt_net.provider.api.openai.agents.remote_tools import append_tools
33
+ from pygpt_net.provider.api.openai.agents.response import StreamHandler
34
+ from pygpt_net.provider.api.openai.agents.experts import get_experts
34
35
  from pygpt_net.utils import trans
35
36
 
36
37
  from ..base import BaseAgent
37
- from ...gpt.agents.experts import get_experts
38
38
 
39
39
 
40
40
  @dataclass
@@ -221,7 +221,7 @@ class Agent(BaseAgent):
221
221
  print("Final response:", result)
222
222
 
223
223
  input_items = result.to_input_list()
224
- final_output, last_response_id = window.core.gpt.responses.unpack_agent_response(result, ctx)
224
+ final_output, last_response_id = window.core.api.openai.responses.unpack_agent_response(result, ctx)
225
225
 
226
226
  if bridge.stopped():
227
227
  bridge.on_stop(ctx)
@@ -25,12 +25,12 @@ from pygpt_net.core.types import (
25
25
  from pygpt_net.item.ctx import CtxItem
26
26
  from pygpt_net.item.model import ModelItem
27
27
 
28
- from pygpt_net.provider.gpt.agents.remote_tools import append_tools
28
+ from pygpt_net.provider.api.openai.agents.remote_tools import append_tools
29
+ from pygpt_net.provider.api.openai.agents.experts import get_experts
29
30
  from pygpt_net.utils import trans
30
31
 
31
32
  from ..base import BaseAgent
32
33
  from .bots.research_bot.manager import ResearchManager
33
- from ...gpt.agents.experts import get_experts
34
34
 
35
35
 
36
36
  class Agent(BaseAgent):
@@ -15,7 +15,7 @@ from pydantic import BaseModel
15
15
  from agents import Agent
16
16
 
17
17
  from pygpt_net.item.preset import PresetItem
18
- from pygpt_net.provider.gpt.agents.remote_tools import append_tools
18
+ from pygpt_net.provider.api.openai.agents.remote_tools import append_tools
19
19
 
20
20
 
21
21
  class WebSearchItem(BaseModel):
@@ -14,7 +14,7 @@ from agents.model_settings import ModelSettings
14
14
 
15
15
  from pygpt_net.core.types import OPENAI_REMOTE_TOOL_DISABLE_WEB_SEARCH
16
16
  from pygpt_net.item.preset import PresetItem
17
- from pygpt_net.provider.gpt.agents.remote_tools import append_tools
17
+ from pygpt_net.provider.api.openai.agents.remote_tools import append_tools
18
18
 
19
19
 
20
20
  def get_search_agent(
@@ -15,7 +15,7 @@ from pydantic import BaseModel
15
15
  from agents import Agent
16
16
 
17
17
  from pygpt_net.item.preset import PresetItem
18
- from pygpt_net.provider.gpt.agents.remote_tools import append_tools
18
+ from pygpt_net.provider.api.openai.agents.remote_tools import append_tools
19
19
 
20
20
 
21
21
  class ReportData(BaseModel):
@@ -30,12 +30,12 @@ from pygpt_net.item.ctx import CtxItem
30
30
  from pygpt_net.item.model import ModelItem
31
31
  from pygpt_net.item.preset import PresetItem
32
32
 
33
- from pygpt_net.provider.gpt.agents.remote_tools import append_tools
34
- from pygpt_net.provider.gpt.agents.response import StreamHandler
33
+ from pygpt_net.provider.api.openai.agents.remote_tools import append_tools
34
+ from pygpt_net.provider.api.openai.agents.response import StreamHandler
35
+ from pygpt_net.provider.api.openai.agents.experts import get_experts
35
36
  from pygpt_net.utils import trans
36
37
 
37
38
  from ..base import BaseAgent
38
- from ...gpt.agents.experts import get_experts
39
39
 
40
40
 
41
41
  @dataclass
@@ -340,7 +340,7 @@ class Agent(BaseAgent):
340
340
 
341
341
  print("Winner: agent ", choose)
342
342
 
343
- final_output, last_response_id = window.core.gpt.responses.unpack_agent_response(results[choose], ctx)
343
+ final_output, last_response_id = window.core.api.openai.responses.unpack_agent_response(results[choose], ctx)
344
344
  input_items = results[choose].to_input_list()
345
345
 
346
346
  if bridge.stopped():
@@ -437,7 +437,7 @@ class Agent(BaseAgent):
437
437
  bridge.on_stop(ctx)
438
438
  break
439
439
 
440
- window.core.gpt.responses.unpack_agent_response(results[choose], ctx)
440
+ window.core.api.openai.responses.unpack_agent_response(results[choose], ctx)
441
441
  input_items = results[choose].to_input_list()
442
442
 
443
443
  evaluator_result = await Runner.run(evaluator, input_items)
@@ -31,9 +31,9 @@ from pygpt_net.core.types import (
31
31
  from pygpt_net.item.ctx import CtxItem
32
32
  from pygpt_net.item.model import ModelItem
33
33
 
34
- from pygpt_net.provider.gpt.agents.remote_tools import append_tools
35
- from pygpt_net.provider.gpt.agents.response import StreamHandler
36
- from pygpt_net.provider.gpt.agents.experts import get_experts
34
+ from pygpt_net.provider.api.openai.agents.remote_tools import append_tools
35
+ from pygpt_net.provider.api.openai.agents.response import StreamHandler
36
+ from pygpt_net.provider.api.openai.agents.experts import get_experts
37
37
  from pygpt_net.utils import trans
38
38
 
39
39
  from ..base import BaseAgent
@@ -218,7 +218,7 @@ class Agent(BaseAgent):
218
218
  agent,
219
219
  **kwargs
220
220
  )
221
- final_output, last_response_id = window.core.gpt.responses.unpack_agent_response(result, ctx)
221
+ final_output, last_response_id = window.core.api.openai.responses.unpack_agent_response(result, ctx)
222
222
  response_id = result.last_response_id
223
223
  if verbose:
224
224
  print("Final response:", result)