pygpt-net 2.5.15__py3-none-any.whl → 2.5.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pygpt_net/CHANGELOG.txt CHANGED
@@ -1,3 +1,14 @@
1
+ 2.5.17 (2025-06-25)
2
+
3
+ - Added settings for enable/disable Remote Tools via Responses API in Chat mode: Config -> Settings -> Remote tools. Currently only web-search-preview tool is available, rest of tools coming soon.
4
+ - Fixed context summarization in Ollama provider.
5
+
6
+ 2.5.16 (2025-06-25)
7
+
8
+ - OpenAI API upgraded to 1.91.0.
9
+ - Chat mode migrated to Responses API with native built-in web search tool. (beta)
10
+ - Fixed file_read tool in I/O plugin.
11
+
1
12
  2.5.15 (2025-06-24)
2
13
 
3
14
  - Added Ollama models importer in "Settings -> Models -> Import from Ollama".
pygpt_net/__init__.py CHANGED
@@ -6,15 +6,15 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025-06-24 19:00:00 #
9
+ # Updated Date: 2025-06-25 02:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  __author__ = "Marcin Szczygliński"
13
13
  __copyright__ = "Copyright 2025, Marcin Szczygliński"
14
14
  __credits__ = ["Marcin Szczygliński"]
15
15
  __license__ = "MIT"
16
- __version__ = "2.5.15"
17
- __build__ = "2025-06-24"
16
+ __version__ = "2.5.17"
17
+ __build__ = "2025-06-25"
18
18
  __maintainer__ = "Marcin Szczygliński"
19
19
  __github__ = "https://github.com/szczyglis-dev/py-gpt"
20
20
  __report__ = "https://github.com/szczyglis-dev/py-gpt/issues"
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.03.02 19:00:00 #
9
+ # Updated Date: 2025.06.25 02:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Any
@@ -36,6 +36,9 @@ class Stream:
36
36
  output_tokens = 0
37
37
  begin = True
38
38
  error = None
39
+ tool_calls = []
40
+ fn_args_buffers = {}
41
+ citations = []
39
42
 
40
43
  # chunks: stream begin
41
44
  data = {
@@ -60,24 +63,32 @@ class Stream:
60
63
  if error is not None:
61
64
  break # break if error
62
65
 
66
+ etype = None
63
67
  response = None
64
68
  chunk_type = "raw"
65
- if (hasattr(chunk, 'choices')
66
- and chunk.choices[0] is not None
67
- and hasattr(chunk.choices[0], 'delta')
68
- and chunk.choices[0].delta is not None):
69
- chunk_type = "api_chat"
70
- elif (hasattr(chunk, 'choices')
71
- and chunk.choices[0] is not None
72
- and hasattr(chunk.choices[0], 'text')
73
- and chunk.choices[0].text is not None):
74
- chunk_type = "api_completion"
75
- elif (hasattr(chunk, 'content')
76
- and chunk.content is not None):
77
- chunk_type = "langchain_chat"
78
- elif (hasattr(chunk, 'delta')
79
- and chunk.delta is not None):
80
- chunk_type = "llama_chat"
69
+ if ctx.use_responses_api:
70
+ if hasattr(chunk, 'type'): # streaming event type
71
+ etype = chunk.type
72
+ chunk_type = "api_chat_responses" # responses API
73
+ else:
74
+ continue
75
+ else:
76
+ if (hasattr(chunk, 'choices')
77
+ and chunk.choices[0] is not None
78
+ and hasattr(chunk.choices[0], 'delta')
79
+ and chunk.choices[0].delta is not None):
80
+ chunk_type = "api_chat" # chat completions API
81
+ elif (hasattr(chunk, 'choices')
82
+ and chunk.choices[0] is not None
83
+ and hasattr(chunk.choices[0], 'text')
84
+ and chunk.choices[0].text is not None):
85
+ chunk_type = "api_completion"
86
+ elif (hasattr(chunk, 'content')
87
+ and chunk.content is not None):
88
+ chunk_type = "langchain_chat"
89
+ elif (hasattr(chunk, 'delta')
90
+ and chunk.delta is not None):
91
+ chunk_type = "llama_chat"
81
92
 
82
93
  # OpenAI chat completion
83
94
  if chunk_type == "api_chat":
@@ -110,6 +121,43 @@ class Stream:
110
121
  if tool_chunk.function.arguments:
111
122
  tool_call["function"]["arguments"] += tool_chunk.function.arguments
112
123
 
124
+ elif chunk_type == "api_chat_responses":
125
+
126
+ if etype == "response.output_text.delta":
127
+ response = chunk.delta
128
+
129
+ # ---------- function_call ----------
130
+ elif etype == "response.output_item.added" and chunk.item.type == "function_call":
131
+ tool_calls.append({
132
+ "id": chunk.item.id,
133
+ "type": "function",
134
+ "function": {"name": chunk.item.name, "arguments": ""}
135
+ })
136
+ fn_args_buffers[chunk.item.id] = ""
137
+
138
+ elif etype == "response.function_call_arguments.delta":
139
+ fn_args_buffers[chunk.item_id] += chunk.delta
140
+
141
+ elif etype == "response.function_call_arguments.done":
142
+ for tc in tool_calls:
143
+ if tc["id"] == chunk.item_id:
144
+ tc["function"]["arguments"] = fn_args_buffers[chunk.item_id]
145
+ break
146
+ fn_args_buffers.pop(chunk.item_id, None)
147
+
148
+ # ---------- annotations ----------
149
+ elif etype == "response.output_text.annotation.added":
150
+ if chunk.annotation['type'] == "url_citation":
151
+ if citations is None:
152
+ citations = []
153
+ url_citation = chunk.annotation['url']
154
+ citations.append(url_citation)
155
+ ctx.urls = citations
156
+
157
+ # ---------- end / error ----------
158
+ elif etype in {"response.done", "response.failed", "error"}:
159
+ pass
160
+
113
161
  # OpenAI completion
114
162
  elif chunk_type == "api_completion":
115
163
  if chunk.choices[0].text is not None:
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.11.29 23:00:00 #
9
+ # Updated Date: 2025.06.25 02:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from PySide6.QtCore import QObject, Signal, QRunnable, Slot
@@ -15,6 +15,7 @@ from pygpt_net.core.types import (
15
15
  MODE_AGENT_LLAMA,
16
16
  MODE_LANGCHAIN,
17
17
  MODE_LLAMA_INDEX,
18
+ MODE_ASSISTANT,
18
19
  )
19
20
  from pygpt_net.core.events import KernelEvent, Event
20
21
 
@@ -48,7 +49,8 @@ class BridgeWorker(QObject, QRunnable):
48
49
  self.handle_post_prompt_async()
49
50
 
50
51
  # ADDITIONAL CONTEXT: append additional context from attachments
51
- self.handle_additional_context()
52
+ if self.mode != MODE_ASSISTANT:
53
+ self.handle_additional_context()
52
54
 
53
55
  # POST PROMPT END: handle post prompt end event
54
56
  self.handle_post_prompt_end()
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.02.02 02:00:00 #
9
+ # Updated Date: 2025.06.25 02:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import copy
@@ -228,6 +228,34 @@ class Command:
228
228
  print("Error parsing tool call: " + str(e))
229
229
  return parsed
230
230
 
231
+ def unpack_tool_calls_responses(
232
+ self,
233
+ tool_calls: List
234
+ ) -> List[Dict[str, Any]]:
235
+ """
236
+ Unpack tool calls from OpenAI response
237
+
238
+ :param tool_calls: tool calls list
239
+ :return: parsed tool calls list
240
+ """
241
+ parsed = []
242
+ for tool_call in tool_calls:
243
+ try:
244
+ parsed.append(
245
+ {
246
+ "id": tool_call.id,
247
+ "type": "function",
248
+ "function": {
249
+ "name": tool_call.name,
250
+ "arguments": json.loads(tool_call.arguments)
251
+ }
252
+ }
253
+ )
254
+ except Exception as e:
255
+ self.window.core.debug.log(e)
256
+ print("Error parsing tool call: " + str(e))
257
+ return parsed
258
+
231
259
  def unpack_tool_calls_chunks(
232
260
  self,
233
261
  ctx: CtxItem,
@@ -503,6 +531,9 @@ class Command:
503
531
  if "required" in param and param["required"]:
504
532
  required.append(param["name"])
505
533
 
534
+ if len(required) > 0:
535
+ params["required"] = required
536
+
506
537
  # extract params and convert to JSON schema format
507
538
  for param in cmd["params"]:
508
539
  try:
@@ -570,7 +601,7 @@ class Command:
570
601
  elif params["properties"][key]["type"] == "list":
571
602
  params["properties"][key]["type"] = "array"
572
603
  params["properties"][key]["items"] = {
573
- "$ref": "#"
604
+ "type": "string"
574
605
  }
575
606
  except Exception as e:
576
607
  print(e)
@@ -1,8 +1,8 @@
1
1
  {
2
2
  "__meta__": {
3
- "version": "2.5.15",
4
- "app.version": "2.5.15",
5
- "updated_at": "2025-06-24T00:00:00"
3
+ "version": "2.5.17",
4
+ "app.version": "2.5.17",
5
+ "updated_at": "2025-06-25T00:00:00"
6
6
  },
7
7
  "access.audio.event.speech": false,
8
8
  "access.audio.event.speech.disabled": [],
@@ -198,7 +198,7 @@
198
198
  "llama.idx.auto.index": "base",
199
199
  "llama.idx.auto.modes": "chat,completion,vision,assistant,langchain,llama_index,agent",
200
200
  "llama.idx.chat.mode": "context",
201
- "llama.idx.current": "base",
201
+ "llama.idx.current": null,
202
202
  "llama.idx.custom_meta": [
203
203
  {
204
204
  "extensions": "*",
@@ -307,6 +307,7 @@
307
307
  "render.open_gl": false,
308
308
  "render.plain": false,
309
309
  "render.code_syntax": "github-dark",
310
+ "remote_tools.web_search": true,
310
311
  "send_clear": true,
311
312
  "send_mode": 2,
312
313
  "store_history": true,
@@ -1,8 +1,8 @@
1
1
  {
2
2
  "__meta__": {
3
- "version": "2.5.15",
4
- "app.version": "2.5.15",
5
- "updated_at": "2025-06-24T00:00:00"
3
+ "version": "2.5.17",
4
+ "app.version": "2.5.17",
5
+ "updated_at": "2025-06-25T00:00:00"
6
6
  },
7
7
  "items": {
8
8
  "claude-3-5-sonnet-20240620": {
@@ -1198,6 +1198,19 @@
1198
1198
  "step": null,
1199
1199
  "advanced": false
1200
1200
  },
1201
+ "remote_tools.web_search": {
1202
+ "section": "remote_tools",
1203
+ "type": "bool",
1204
+ "slider": false,
1205
+ "label": "settings.remote_tools.web_search",
1206
+ "description": "settings.remote_tools.web_search.desc",
1207
+ "value": true,
1208
+ "min": null,
1209
+ "max": null,
1210
+ "multiplier": null,
1211
+ "step": null,
1212
+ "advanced": false
1213
+ },
1201
1214
  "llama.idx.list": {
1202
1215
  "section": "llama-index",
1203
1216
  "type": "dict",
@@ -14,6 +14,9 @@
14
14
  "ctx": {
15
15
  "label": "settings.section.ctx"
16
16
  },
17
+ "remote_tools": {
18
+ "label": "settings.section.remote_tools"
19
+ },
17
20
  "model": {
18
21
  "label": "settings.section.model"
19
22
  },
@@ -899,6 +899,8 @@ settings.prompt.expert = Experte: Masteraufforderung
899
899
  settings.prompt.expert.desc = Anweisung (Systemaufforderung) für den Master-Experten, wie man Sklavenexperten handhabt. Anweisungen für Sklavenexperten werden aus ihren Voreinstellungen gegeben.
900
900
  settings.prompt.img = DALL-E: Bildgenerierung
901
901
  settings.prompt.img.desc = Aufforderung zur Erzeugung von Anweisungen für DALL-E (falls Rohmodus deaktiviert ist). Nur im Bildmodus.
902
+ settings.remote_tools.web_search = Web Search
903
+ settings.remote_tools.web_search.desc = Aktiviert das `web_search` Remote-Tool im Chat-Modus / über die OpenAI Responses API.
902
904
  settings.render.code_syntax = Stil der Syntaxhervorhebung für Code
903
905
  settings.render.plain = Markdown-Formatierung in der Ausgabe deaktivieren (RAW-Textmodus)
904
906
  settings.restart.required = Ein Neustart der Anwendung ist notwendig, damit die Änderungen für diese Option übernommen werden.
@@ -918,6 +920,7 @@ settings.section.images = Bilder
918
920
  settings.section.layout = Layout
919
921
  settings.section.llama_index = Indizes (LlamaIndex)
920
922
  settings.section.model = Modelle
923
+ settings.section.remote_tools = Remote tools
921
924
  settings.section.updates = Aktualisierungen
922
925
  settings.section.vision = Vision
923
926
  settings.store_history = Verlauf speichern
@@ -1127,6 +1127,8 @@ settings.prompt.expert = Expert: Master prompt
1127
1127
  settings.prompt.expert.desc = Instruction (system prompt) for Master expert on how to handle slave experts. Instructions for slave experts are given from their presets.
1128
1128
  settings.prompt.img = DALL-E: image generation
1129
1129
  settings.prompt.img.desc = Prompt for generating prompts for DALL-E (if raw-mode is disabled). Image mode only.
1130
+ settings.remote_tools.web_search = Web Search
1131
+ settings.remote_tools.web_search.desc = Enable `web_search` remote tool in Chat mode / via OpenAI Responses API.
1130
1132
  settings.render.code_syntax = Code syntax highlight
1131
1133
  settings.render.engine = Rendering engine
1132
1134
  settings.render.open_gl = OpenGL hardware acceleration
@@ -1162,6 +1164,7 @@ settings.section.llama-index.store = Vector Store
1162
1164
  settings.section.llama-index.update = Update
1163
1165
  settings.section.model = Models
1164
1166
  settings.section.prompts = Prompts
1167
+ settings.section.remote_tools = Remote tools
1165
1168
  settings.section.tab.general = General
1166
1169
  settings.section.updates = Updates
1167
1170
  settings.section.vision = Vision
@@ -899,6 +899,8 @@ settings.prompt.expert = Experto: Master prompt
899
899
  settings.prompt.expert.desc = Instrucción (prompt del sistema) para el experto Master cómo manejar a los expertos subordinados. Las instrucciones para los expertos subordinados se dan desde sus presets.
900
900
  settings.prompt.img = DALL-E: generación de imagen
901
901
  settings.prompt.img.desc = Mensaje para generar comandos para DALL-E (si el modo crudo está desactivado). Solo modo de imagen.
902
+ settings.remote_tools.web_search = Web Search
903
+ settings.remote_tools.web_search.desc = Habilitar la herramienta remota `web_search` en modo Chat / a través de la API de OpenAI Responses.
902
904
  settings.render.code_syntax = Estilo de resaltado de sintaxis de código
903
905
  settings.render.plain = Desactivar el formato markdown en la salida (modo de texto plano RAW)
904
906
  settings.restart.required = Es necesario reiniciar la aplicación para que los cambios en esta opción se apliquen.
@@ -918,6 +920,7 @@ settings.section.images = Imágenes
918
920
  settings.section.layout = Diseño
919
921
  settings.section.llama_index = Índices (LlamaIndex)
920
922
  settings.section.model = Modelos
923
+ settings.section.remote_tools = Remote tools
921
924
  settings.section.updates = Actualizaciones
922
925
  settings.section.vision = Visión
923
926
  settings.store_history = Almacenar historial
@@ -899,6 +899,8 @@ settings.prompt.expert = Expert : Master prompt
899
899
  settings.prompt.expert.desc = Instruction (prompt système) pour l'expert Master sur comment gérer les experts esclaves. Les instructions pour les experts esclaves sont données à partir de leurs presets.
900
900
  settings.prompt.img = DALL-E: génération d'image
901
901
  settings.prompt.img.desc = Prompt pour générer des commandes pour DALL-E (si le mode brut est désactivé). Mode image uniquement.
902
+ settings.remote_tools.web_search = Web Search
903
+ settings.remote_tools.web_search.desc = Activer l'outil distant `web_search` en mode Chat / via l'API OpenAI Responses.
902
904
  settings.render.code_syntax = Style de mise en évidence de la syntaxe du code
903
905
  settings.render.plain = Désactiver le formatage markdown dans la sortie (mode texte brut RAW)
904
906
  settings.restart.required = Un redémarrage de l'application est requis pour que les modifications de cette option soient appliquées.
@@ -918,6 +920,7 @@ settings.section.images = Images
918
920
  settings.section.layout = Mise en page
919
921
  settings.section.llama_index = Indexes (LlamaIndex)
920
922
  settings.section.model = Modèles
923
+ settings.section.remote_tools = Remote tools
921
924
  settings.section.updates = Mises à jour
922
925
  settings.section.vision = Vision
923
926
  settings.store_history = Stocker l'historique
@@ -900,6 +900,8 @@ settings.prompt.expert = Esperto: Master prompt
900
900
  settings.prompt.expert.desc = Istruzione (prompt del sistema) per l'esperto Master su come gestire gli esperti subalterni. Le istruzioni per gli esperti subalterni sono date dalle loro preimpostazioni.
901
901
  settings.prompt.img = DALL-E: generazione immagine
902
902
  settings.prompt.img.desc = Prompt per generare comandi per DALL-E (se la modalità grezza è disabilitata). Solo modalità immagine.
903
+ settings.remote_tools.web_search = Web Search
904
+ settings.remote_tools.web_search.desc = Abilita lo strumento remoto `web_search` in modalità Chat / tramite OpenAI Responses API.
903
905
  settings.render.code_syntax = Stile di evidenziazione della sintassi del codice
904
906
  settings.restart.required = È necessario riavviare l'applicazione affinché le modifiche a questa opzione siano applicate.
905
907
  settings.section.access = Accessibilità
@@ -918,6 +920,7 @@ settings.section.images = Immagini
918
920
  settings.section.layout = Layout
919
921
  settings.section.llama_index = Indici (LlamaIndex)
920
922
  settings.section.model = Modelli
923
+ settings.section.remote_tools = Remote tools
921
924
  settings.section.updates = Aggiornamenti
922
925
  settings.section.vision = Visione
923
926
  settings.store_history = Conserva la cronologia
@@ -900,6 +900,8 @@ settings.prompt.expert = Ekspert: Główna wskazówka
900
900
  settings.prompt.expert.desc = Instrukcja (systemowa wskazówka) dla głównego eksperta, jak obsługiwać ekspertów pomocniczych. Instrukcje dla ekspertów pomocniczych są podawane z ich ustawień.
901
901
  settings.prompt.img = DALL-E: generowanie obrazu
902
902
  settings.prompt.img.desc = Prompt do generowania poleceń dla DALL-E (jeśli surowy tryb jest wyłączony). Tylko tryb obrazu.
903
+ settings.remote_tools.web_search = Web Search
904
+ settings.remote_tools.web_search.desc = Włącz zdalne narzędzie `web_search` w trybie czatu / używając OpenAI Responses API.
903
905
  settings.render.code_syntax = Styl podświetlenia składni kodu
904
906
  settings.render.plain = Wyłącz formatowanie markdown w wyjściu (tryb plain-text)
905
907
  settings.restart.required = Restart aplikacji jest wymagany, aby zmiany dla tej opcji zostały wprowadzone.
@@ -919,6 +921,7 @@ settings.section.images = Obrazy
919
921
  settings.section.layout = Wygląd
920
922
  settings.section.llama_index = Indeksy (LlamaIndex)
921
923
  settings.section.model = Modele
924
+ settings.section.remote_tools = Zdalne narzędzia
922
925
  settings.section.updates = Aktualizacje
923
926
  settings.section.vision = Wizja
924
927
  settings.store_history = Zapisuj historię
@@ -899,6 +899,8 @@ settings.prompt.expert = Експерт: Основний запит
899
899
  settings.prompt.expert.desc = Інструкція (системний запит) для ведучого експерта, як керувати підеекспертами. Інструкції для підеекспертів даються з їхніх налаштувань.
900
900
  settings.prompt.img = DALL-E: генерація зображення
901
901
  settings.prompt.img.desc = Підказка для генерації команддля DALL-E (якщо вимкнено сирівний режим). Тільки режим зображення.
902
+ settings.remote_tools.web_search = Web Search
903
+ settings.remote_tools.web_search.desc = Увімкнути віддалений інструмент `web_search` у режимі чату / через API відповідей OpenAI.
902
904
  settings.render.code_syntax = Стиль підсвічування синтаксису коду
903
905
  settings.render.plain = Вимкнути форматування markdown у виводі (режим простого тексту RAW)
904
906
  settings.restart.required = Для внесення змін у цю опцію необхідно перезапустити програму.
@@ -918,6 +920,7 @@ settings.section.images = Зображення
918
920
  settings.section.layout = Макет
919
921
  settings.section.llama_index = Індекси (LlamaIndex)
920
922
  settings.section.model = Моделі
923
+ settings.section.remote_tools = Remote tools
921
924
  settings.section.updates = Оновлення
922
925
  settings.section.vision = Візія
923
926
  settings.store_history = Зберігати історію
@@ -1008,6 +1008,8 @@ settings.prompt.expert = 专家:主提示
1008
1008
  settings.prompt.expert.desc = 对主专家如何处理奴隶专家的指令(系统提示)。奴隶专家的指令根据他们的预设给出。
1009
1009
  settings.prompt.img = DALL-E:生成图像
1010
1010
  settings.prompt.img.desc = 提示用于生成DALL-E的命令(如果原始模式被禁用)。仅图像模式。
1011
+ settings.remote_tools.web_search = Web Search
1012
+ settings.remote_tools.web_search.desc = 在聊天模式/通过 OpenAI Responses API 启用 `web_search` 远程工具。
1011
1013
  settings.render.code_syntax = 代码语法高亮样式
1012
1014
  settings.render.plain = 在輸出中禁用markdown格式化(RAW純文本模式)
1013
1015
  settings.restart.required = 此選項生效需要重新啟動應用程序。
@@ -1032,6 +1034,7 @@ settings.section.llama-index.indexing = 索引
1032
1034
  settings.section.llama-index.store = 向量存儲
1033
1035
  settings.section.llama-index.update = 更新
1034
1036
  settings.section.model = 模型
1037
+ settings.section.remote_tools = Remote tools
1035
1038
  settings.section.tab.general = 一般設置
1036
1039
  settings.section.updates = 更新
1037
1040
  settings.section.vision = 視覺
pygpt_net/item/ctx.py CHANGED
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.06.23 19:00:00 #
9
+ # Updated Date: 2025.06.25 02:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import copy
@@ -75,6 +75,7 @@ class CtxItem:
75
75
  self.sub_call = False # is sub call
76
76
  self.sub_reply = False # sub call reply
77
77
  self.hidden = False # hidden context
78
+ self.use_responses_api = False # use responses API format
78
79
  self.pid = 0
79
80
  self.audio_id = None
80
81
  self.audio_output = None
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.12.16 01:00:00 #
9
+ # Updated Date: 2025.06.25 02:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import os
@@ -141,7 +141,7 @@ class Plugin(BasePlugin):
141
141
  """
142
142
  # use_loaders = False
143
143
  if use_loaders:
144
- content, docs = str(self.window.core.idx.indexing.read_text_content(path))
144
+ content, docs = self.window.core.idx.indexing.read_text_content(path)
145
145
  return content
146
146
  else:
147
147
  data = ""
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.01.17 13:00:00 #
9
+ # Updated Date: 2025.06.25 02:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import fnmatch
@@ -847,7 +847,7 @@ class Worker(BaseWorker):
847
847
  path,
848
848
  )
849
849
 
850
- def read_files(self, paths: list) -> (dict, str):
850
+ def read_files(self, paths: list) -> (list, list):
851
851
  """
852
852
  Read files from directory
853
853
 
@@ -1855,6 +1855,13 @@ class Patch:
1855
1855
  data["api_endpoint_perplexity"] = "https://api.perplexity.ai"
1856
1856
  updated = True
1857
1857
 
1858
+ # < 2.5.17
1859
+ if old < parse_version("2.5.17"):
1860
+ print("Migrating config from < 2.5.17...")
1861
+ if 'remote_tools.web_search' not in data:
1862
+ data["remote_tools.web_search"] = True
1863
+ updated = True
1864
+
1858
1865
  # update file
1859
1866
  migrated = False
1860
1867
  if updated:
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.03.02 19:00:00 #
9
+ # Updated Date: 2025.06.25 02:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from httpx_socks import SyncProxyTransport
@@ -29,6 +29,7 @@ from .assistants import Assistants
29
29
  from .chat import Chat
30
30
  from .completion import Completion
31
31
  from .image import Image
32
+ from .responses import Responses
32
33
  from .store import Store
33
34
  from .summarizer import Summarizer
34
35
  from .vision import Vision
@@ -47,6 +48,7 @@ class Gpt:
47
48
  self.chat = Chat(window)
48
49
  self.completion = Completion(window)
49
50
  self.image = Image(window)
51
+ self.responses = Responses(window)
50
52
  self.store = Store(window)
51
53
  self.summarizer = Summarizer(window)
52
54
  self.vision = Vision(window)
@@ -108,6 +110,12 @@ class Gpt:
108
110
  ai_name = ctx.output_name
109
111
  thread_id = ctx.thread # from ctx
110
112
 
113
+ # --- Responses API ---- /beta/
114
+ use_responses_api = False
115
+ if mode == MODE_CHAT:
116
+ use_responses_api = True # use responses API for chat, audio, research modes
117
+ ctx.use_responses_api = use_responses_api # set in context
118
+
111
119
  # get model id
112
120
  model_id = None
113
121
  if model is not None:
@@ -128,20 +136,30 @@ class Gpt:
128
136
  )
129
137
  used_tokens = self.completion.get_used_tokens()
130
138
 
131
- # chat (OpenAI) | research (Perplexity)
139
+ # chat, audio (OpenAI) | research (Perplexity)
132
140
  elif mode in [
133
141
  MODE_CHAT,
134
142
  MODE_AUDIO,
135
143
  MODE_RESEARCH
136
144
  ]:
137
- response = self.chat.send(
138
- context=context,
139
- extra=extra,
140
- )
141
- if hasattr(response, "citations"):
142
- if response.citations:
143
- ctx.urls = response.citations
144
- used_tokens = self.chat.get_used_tokens()
145
+ # responses API
146
+ if use_responses_api:
147
+ response = self.responses.send(
148
+ context=context,
149
+ extra=extra,
150
+ )
151
+ used_tokens = self.responses.get_used_tokens()
152
+ else:
153
+ # chat completion API
154
+ response = self.chat.send(
155
+ context=context,
156
+ extra=extra,
157
+ )
158
+ if hasattr(response, "citations"):
159
+ if response.citations:
160
+ ctx.urls = response.citations
161
+ used_tokens = self.chat.get_used_tokens()
162
+
145
163
  self.vision.append_images(ctx) # append images to ctx if provided
146
164
 
147
165
  # image
@@ -184,7 +202,7 @@ class Gpt:
184
202
 
185
203
  # if stream
186
204
  if stream:
187
- ctx.stream = response
205
+ ctx.stream = response # generator
188
206
  ctx.set_output("", ai_name) # set empty output
189
207
  ctx.input_tokens = used_tokens # get from input tokens calculation
190
208
  return True
@@ -206,13 +224,21 @@ class Gpt:
206
224
  MODE_VISION,
207
225
  MODE_RESEARCH
208
226
  ]:
209
- if response.choices[0]:
210
- if response.choices[0].message.content:
211
- output = response.choices[0].message.content.strip()
212
- elif response.choices[0].message.tool_calls:
213
- ctx.tool_calls = self.window.core.command.unpack_tool_calls(
214
- response.choices[0].message.tool_calls,
227
+ if use_responses_api:
228
+ if response.output_text:
229
+ output = response.output_text.strip()
230
+ if response.output:
231
+ ctx.tool_calls = self.window.core.command.unpack_tool_calls_responses(
232
+ response.output,
215
233
  )
234
+ else:
235
+ if response.choices[0]:
236
+ if response.choices[0].message.content:
237
+ output = response.choices[0].message.content.strip()
238
+ elif response.choices[0].message.tool_calls:
239
+ ctx.tool_calls = self.window.core.command.unpack_tool_calls(
240
+ response.choices[0].message.tool_calls,
241
+ )
216
242
  # audio
217
243
  elif mode in [MODE_AUDIO]:
218
244
  if response.choices[0]:
@@ -234,10 +260,17 @@ class Gpt:
234
260
  )
235
261
 
236
262
  ctx.set_output(output, ai_name)
237
- ctx.set_tokens(
238
- response.usage.prompt_tokens,
239
- response.usage.completion_tokens,
240
- )
263
+
264
+ if not use_responses_api:
265
+ ctx.set_tokens(
266
+ response.usage.prompt_tokens,
267
+ response.usage.completion_tokens,
268
+ )
269
+ else:
270
+ ctx.set_tokens(
271
+ response.usage.input_tokens,
272
+ response.usage.output_tokens,
273
+ )
241
274
  return True
242
275
 
243
276
  def quick_call(self, context: BridgeContext, extra: dict = None) -> str:
@@ -0,0 +1,280 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.06.25 02:00:00 #
10
+ # ================================================== #
11
+
12
+ import json
13
+ import time
14
+ from typing import Optional, Dict, Any, List
15
+
16
+ from pygpt_net.core.types import (
17
+ MODE_CHAT,
18
+ MODE_VISION,
19
+ MODE_AUDIO,
20
+ MODE_RESEARCH,
21
+ )
22
+ from pygpt_net.core.bridge.context import BridgeContext, MultimodalContext
23
+ from pygpt_net.item.ctx import CtxItem
24
+ from pygpt_net.item.model import ModelItem
25
+
26
+ from .utils import sanitize_name
27
+ from pygpt_net.item.attachment import AttachmentItem
28
+
29
+
30
+ class Responses:
31
+ def __init__(self, window=None):
32
+ """
33
+ Responses API wrapper
34
+
35
+ :param window: Window instance
36
+ """
37
+ self.window = window
38
+ self.input_tokens = 0
39
+ self.audio_prev_id = None
40
+ self.audio_prev_expires_ts = None
41
+
42
+ def send(
43
+ self,
44
+ context: BridgeContext,
45
+ extra: Optional[Dict[str, Any]] = None
46
+ ):
47
+ """
48
+ Call OpenAI API for chat
49
+
50
+ :param context: Bridge context
51
+ :param extra: Extra arguments
52
+ :return: response or stream chunks
53
+ """
54
+ prompt = context.prompt
55
+ stream = context.stream
56
+ max_tokens = int(context.max_tokens or 0)
57
+ system_prompt = context.system_prompt
58
+ mode = context.mode
59
+ model = context.model
60
+ functions = context.external_functions
61
+ attachments = context.attachments
62
+ multimodal_ctx = context.multimodal_ctx
63
+
64
+ ctx = context.ctx
65
+ if ctx is None:
66
+ ctx = CtxItem() # create empty context
67
+ user_name = ctx.input_name # from ctx
68
+ ai_name = ctx.output_name # from ctx
69
+
70
+ client = self.window.core.gpt.get_client(mode)
71
+
72
+ # build chat messages
73
+ messages = self.build(
74
+ prompt=prompt,
75
+ system_prompt=system_prompt,
76
+ model=model,
77
+ history=context.history,
78
+ attachments=attachments,
79
+ ai_name=ai_name,
80
+ user_name=user_name,
81
+ multimodal_ctx=multimodal_ctx,
82
+ )
83
+ msg_tokens = self.window.core.tokens.from_messages(
84
+ messages,
85
+ model.id,
86
+ )
87
+ # check if max tokens not exceeded
88
+ if max_tokens > 0:
89
+ if msg_tokens + int(max_tokens) > model.ctx:
90
+ max_tokens = model.ctx - msg_tokens - 1
91
+ if max_tokens < 0:
92
+ max_tokens = 0
93
+
94
+ # extra API kwargs
95
+ response_kwargs = {}
96
+
97
+ # tools / functions
98
+ tools = []
99
+ if functions is not None and isinstance(functions, list):
100
+ for function in functions:
101
+ if str(function['name']).strip() == '' or function['name'] is None:
102
+ continue
103
+ params = {}
104
+ if function['params'] is not None and function['params'] != "":
105
+ params = json.loads(function['params']) # unpack JSON from string
106
+ tools.append({
107
+ "type": "function",
108
+ "name": function['name'],
109
+ "parameters": params,
110
+ "description": function['desc'],
111
+ })
112
+
113
+ # extra arguments, o3 only
114
+ if model.extra and "reasoning_effort" in model.extra:
115
+ response_kwargs['reasoning'] = {}
116
+ response_kwargs['reasoning']['effort'] = model.extra["reasoning_effort"]
117
+
118
+ # extend tools with external tools
119
+ if not model.id.startswith("o1") and not model.id.startswith("o3"):
120
+ if self.window.core.config.get("remote_tools.web_search", False):
121
+ tools.append({"type": "web_search_preview"})
122
+
123
+ # tool calls are not supported for o1-mini and o1-preview
124
+ if (model.id is not None
125
+ and model.id not in ["o1-mini", "o1-preview"]):
126
+ if len(tools) > 0:
127
+ response_kwargs['tools'] = tools
128
+
129
+ # audio mode
130
+ if mode in [MODE_AUDIO]:
131
+ stream = False
132
+ voice_id = "alloy"
133
+ tmp_voice = self.window.core.plugins.get_option("audio_output", "openai_voice")
134
+ if tmp_voice:
135
+ voice_id = tmp_voice
136
+ response_kwargs["modalities"] = ["text", "audio"]
137
+ response_kwargs["audio"] = {
138
+ "voice": voice_id,
139
+ "format": "wav"
140
+ }
141
+
142
+ response = client.responses.create(
143
+ input=messages,
144
+ model=model.id,
145
+ stream=stream,
146
+ **response_kwargs,
147
+ )
148
+ return response
149
+
150
+ def build(
151
+ self,
152
+ prompt: str,
153
+ system_prompt: str,
154
+ model: ModelItem,
155
+ history: Optional[List[CtxItem]] = None,
156
+ attachments: Optional[Dict[str, AttachmentItem]] = None,
157
+ ai_name: Optional[str] = None,
158
+ user_name: Optional[str] = None,
159
+ multimodal_ctx: Optional[MultimodalContext] = None,
160
+ ) -> list:
161
+ """
162
+ Build list of chat messages
163
+
164
+ :param prompt: user prompt
165
+ :param system_prompt: system prompt
166
+ :param history: history
167
+ :param model: model item
168
+ :param attachments: attachments
169
+ :param ai_name: AI name
170
+ :param user_name: username
171
+ :param multimodal_ctx: Multimodal context
172
+ :return: messages list
173
+ """
174
+ messages = []
175
+
176
+ # tokens config
177
+ mode = MODE_CHAT
178
+ allowed_system = True
179
+ if (model.id is not None
180
+ and model.id in ["o1-mini", "o1-preview"]):
181
+ allowed_system = False
182
+
183
+ used_tokens = self.window.core.tokens.from_user(
184
+ prompt,
185
+ system_prompt,
186
+ ) # threshold and extra included
187
+ max_ctx_tokens = self.window.core.config.get('max_total_tokens') # max context window
188
+
189
+ # fit to max model tokens
190
+ if max_ctx_tokens > model.ctx:
191
+ max_ctx_tokens = model.ctx
192
+
193
+ # input tokens: reset
194
+ self.reset_tokens()
195
+
196
+ # append system prompt
197
+ if allowed_system:
198
+ if system_prompt is not None and system_prompt != "":
199
+ messages.append({"role": "developer", "content": system_prompt})
200
+
201
+ # append messages from context (memory)
202
+ if self.window.core.config.get('use_context'):
203
+ items = self.window.core.ctx.get_history(
204
+ history,
205
+ model.id,
206
+ mode,
207
+ used_tokens,
208
+ max_ctx_tokens,
209
+ )
210
+ for item in items:
211
+ # input
212
+ if item.final_input is not None and item.final_input != "":
213
+ messages.append({
214
+ "role": "user",
215
+ "content": item.final_input,
216
+ })
217
+
218
+ # output
219
+ if item.final_output is not None and item.final_output != "":
220
+ msg = {
221
+ "role": "assistant",
222
+ "content": item.final_output,
223
+ }
224
+ # append previous audio ID
225
+ if MODE_AUDIO in model.mode:
226
+ if item.audio_id:
227
+ # at first check expires_at - expired audio throws error in API
228
+ current_timestamp = time.time()
229
+ audio_timestamp = int(item.audio_expires_ts) if item.audio_expires_ts else 0
230
+ if audio_timestamp and audio_timestamp > current_timestamp:
231
+ msg["audio"] = {
232
+ "id": item.audio_id
233
+ }
234
+ elif self.audio_prev_id:
235
+ current_timestamp = time.time()
236
+ audio_timestamp = int(self.audio_prev_expires_ts) if self.audio_prev_expires_ts else 0
237
+ if audio_timestamp and audio_timestamp > current_timestamp:
238
+ msg["audio"] = {
239
+ "id": self.audio_prev_id
240
+ }
241
+ messages.append(msg)
242
+
243
+ # use vision and audio if available in current model
244
+ content = str(prompt)
245
+ if MODE_VISION in model.mode:
246
+ content = self.window.core.gpt.vision.build_content(
247
+ content=content,
248
+ attachments=attachments,
249
+ responses_api=True,
250
+ )
251
+ if MODE_AUDIO in model.mode:
252
+ content = self.window.core.gpt.audio.build_content(
253
+ content=content,
254
+ multimodal_ctx=multimodal_ctx,
255
+ )
256
+
257
+ # append current prompt
258
+ messages.append({
259
+ "role": "user",
260
+ "content": content,
261
+ })
262
+
263
+ # input tokens: update
264
+ self.input_tokens += self.window.core.tokens.from_messages(
265
+ messages,
266
+ model.id,
267
+ )
268
+ return messages
269
+
270
+ def reset_tokens(self):
271
+ """Reset input tokens counter"""
272
+ self.input_tokens = 0
273
+
274
+ def get_used_tokens(self) -> int:
275
+ """
276
+ Get input tokens counter
277
+
278
+ :return: input tokens
279
+ """
280
+ return self.input_tokens
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.12.14 22:00:00 #
9
+ # Updated Date: 2025.06.25 02:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import base64
@@ -168,18 +168,26 @@ class Vision:
168
168
  self,
169
169
  content: Union[str, list],
170
170
  attachments: Optional[Dict[str, AttachmentItem]] = None,
171
+ responses_api: Optional[bool] = False,
171
172
  ) -> List[dict]:
172
173
  """
173
174
  Build vision content
174
175
 
175
176
  :param content: content (str or list)
176
177
  :param attachments: attachments (dict, optional)
178
+ :param responses_api: if True, use responses API format
177
179
  :return: List of contents
178
180
  """
181
+ type_text = "text"
182
+ type_image = "image_url"
183
+ if responses_api:
184
+ type_text = "input_text"
185
+ type_image = "input_image"
186
+
179
187
  if not isinstance(content, list):
180
188
  content = [
181
189
  {
182
- "type": "text",
190
+ "type": type_text,
183
191
  "text": str(content)
184
192
  }
185
193
  ]
@@ -193,14 +201,22 @@ class Vision:
193
201
  urls = self.extract_urls(prompt)
194
202
  if len(urls) > 0:
195
203
  for url in urls:
196
- content.append(
197
- {
198
- "type": "image_url",
199
- "image_url": {
200
- "url": url,
204
+ if not responses_api:
205
+ content.append(
206
+ {
207
+ "type": type_image,
208
+ "image_url": {
209
+ "url": url,
210
+ }
211
+ }
212
+ )
213
+ else:
214
+ content.append(
215
+ {
216
+ "type": type_image,
217
+ "image_url": url,
201
218
  }
202
- }
203
- )
219
+ )
204
220
  self.urls.append(url)
205
221
 
206
222
  # local images (attachments)
@@ -211,14 +227,22 @@ class Vision:
211
227
  # check if it's an image
212
228
  if self.is_image(attachment.path):
213
229
  base64_image = self.encode_image(attachment.path)
214
- content.append(
215
- {
216
- "type": "image_url",
217
- "image_url": {
218
- "url": f"data:image/jpeg;base64,{base64_image}",
230
+ if not responses_api:
231
+ content.append(
232
+ {
233
+ "type": type_image,
234
+ "image_url": {
235
+ "url": f"data:image/jpeg;base64,{base64_image}",
236
+ }
219
237
  }
220
- }
221
- )
238
+ )
239
+ else:
240
+ content.append(
241
+ {
242
+ "type": type_image,
243
+ "image_url": f"data:image/jpeg;base64,{base64_image}",
244
+ }
245
+ )
222
246
  self.attachments[id] = attachment.path
223
247
  attachment.consumed = True
224
248
 
@@ -299,6 +299,8 @@ class Ollama(FunctionCallingLLM):
299
299
  error_on_no_tool_call: bool = True,
300
300
  ) -> List[ToolSelection]:
301
301
  """Predict and call the tool."""
302
+ if response.message.additional_kwargs.get("tool_calls", []) is None:
303
+ response.message.additional_kwargs["tool_calls"] = []
302
304
  tool_calls = response.message.additional_kwargs.get("tool_calls", [])
303
305
  if len(tool_calls) < 1:
304
306
  if error_on_no_tool_call:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: pygpt-net
3
- Version: 2.5.15
3
+ Version: 2.5.17
4
4
  Summary: Desktop AI Assistant powered by models: OpenAI o1, GPT-4o, GPT-4, GPT-4 Vision, GPT-3.5, DALL-E 3, Llama 3, Mistral, Gemini, Claude, DeepSeek, Bielik, and other models supported by Langchain, Llama Index, and Ollama. Features include chatbot, text completion, image generation, vision analysis, speech-to-text, internet access, file handling, command execution and more.
5
5
  License: MIT
6
6
  Keywords: py_gpt,py-gpt,pygpt,desktop,app,o1,gpt,gpt4,gpt-4o,gpt-4v,gpt3.5,gpt-4,gpt-4-vision,gpt-3.5,llama3,mistral,gemini,deepseek,bielik,claude,tts,whisper,vision,chatgpt,dall-e,chat,chatbot,assistant,text completion,image generation,ai,api,openai,api key,langchain,llama-index,ollama,presets,ui,qt,pyside
@@ -71,7 +71,7 @@ Requires-Dist: llama-index-vector-stores-redis (>=0.4.0,<0.5.0)
71
71
  Requires-Dist: mss (>=9.0.2,<10.0.0)
72
72
  Requires-Dist: nbconvert (>=7.16.6,<8.0.0)
73
73
  Requires-Dist: onnxruntime (==1.20.1)
74
- Requires-Dist: openai (>=1.59.9,<1.60.0)
74
+ Requires-Dist: openai (>=1.91.0,<2.0.0)
75
75
  Requires-Dist: opencv-python (>=4.11.0.86,<5.0.0.0)
76
76
  Requires-Dist: packaging (>=24.2,<25.0)
77
77
  Requires-Dist: pandas (>=2.2.3,<3.0.0)
@@ -100,7 +100,7 @@ Description-Content-Type: text/markdown
100
100
 
101
101
  [![pygpt](https://snapcraft.io/pygpt/badge.svg)](https://snapcraft.io/pygpt)
102
102
 
103
- Release: **2.5.15** | build: **2025-06-23** | Python: **>=3.10, <3.13**
103
+ Release: **2.5.17** | build: **2025-06-25** | Python: **>=3.10, <3.13**
104
104
 
105
105
  > Official website: https://pygpt.net | Documentation: https://pygpt.readthedocs.io
106
106
  >
@@ -4124,6 +4124,17 @@ may consume additional tokens that are not displayed in the main window.
4124
4124
 
4125
4125
  ## Recent changes:
4126
4126
 
4127
+ **2.5.17 (2025-06-25)**
4128
+
4129
+ - Added settings for enable/disable Remote Tools via Responses API in Chat mode: Config -> Settings -> Remote tools. Currently only web-search-preview tool is available, rest of tools coming soon.
4130
+ - Fixed context summarization in Ollama provider.
4131
+
4132
+ **2.5.16 (2025-06-25)**
4133
+
4134
+ - OpenAI API upgraded to 1.91.0.
4135
+ - Chat mode migrated to Responses API with native built-in web search tool. (beta)
4136
+ - Fixed file_read tool in I/O plugin.
4137
+
4127
4138
  **2.5.15 (2025-06-24)**
4128
4139
 
4129
4140
  - Added Ollama models importer in "Settings -> Models -> Import from Ollama".
@@ -1,6 +1,6 @@
1
- pygpt_net/CHANGELOG.txt,sha256=Imz1HnV0a_kN67bUws1G523zwe2jA-ChvPsGBEm_4fM,83588
1
+ pygpt_net/CHANGELOG.txt,sha256=DeRVhaz-U56XcrMvkwVKFIBaBTOowSxRI8zlJ4LaqBA,84033
2
2
  pygpt_net/LICENSE,sha256=dz9sfFgYahvu2NZbx4C1xCsVn9GVer2wXcMkFRBvqzY,1146
3
- pygpt_net/__init__.py,sha256=sMHfKGrSwrMnY9ERiLfK_atiOtbygFCZ_ahbU6jWPM8,1373
3
+ pygpt_net/__init__.py,sha256=jlnpVlIhAHZhMti33ojHl73UPhBNI1XiD8QFILXDz58,1373
4
4
  pygpt_net/app.py,sha256=XXjn9XaKHGRcsHN8mMuqbRHAg8_Da0GLmACUU9ddjBc,16217
5
5
  pygpt_net/config.py,sha256=Qc1FOBtTf3O6A6-6KoqUGtoJ0u8hXQeowvCVbZFwtik,16405
6
6
  pygpt_net/container.py,sha256=BemiVZPpPNIzfB-ZvnZeeBPFu-AcX2c30OqYFylEjJc,4023
@@ -35,7 +35,7 @@ pygpt_net/controller/chat/input.py,sha256=1r-zIWwFIdc0IOuC1rHGZYCZm9tsSeLEsrrlcW
35
35
  pygpt_net/controller/chat/output.py,sha256=VuziVuI9Lj_4kZmTWvXg8t2tq4w9uD7J1g2MqlMCV6s,9272
36
36
  pygpt_net/controller/chat/render.py,sha256=h23QCvMDIAaCpInqwwADa4G43sSpSn-CE5celnk1LSc,17206
37
37
  pygpt_net/controller/chat/response.py,sha256=UnTnnn2on-Qg2_T_QcQcklTCcuq6XhyLLxs1fn-D9Tg,9450
38
- pygpt_net/controller/chat/stream.py,sha256=FSGDWC1OR3lWQ7sKyiU0oQy4D-Qv_3zWpVvbQ6bTqNc,8033
38
+ pygpt_net/controller/chat/stream.py,sha256=vgjZYruBIMJ7V0udhVOf_pnlIuNDmzXRimToQ2i95WM,10496
39
39
  pygpt_net/controller/chat/text.py,sha256=nDiHuKyuRmnDWK0YCsdMhd2k_5zvSSrNWNc9y6FWi2g,10316
40
40
  pygpt_net/controller/chat/vision.py,sha256=OFodxDRleFqY-DVfEfgNn1mpa60-ZWEBwUlu25oJwmw,2884
41
41
  pygpt_net/controller/command/__init__.py,sha256=sUvnvsKISkHTrbv7woQQ8r4SAGDR8Gy85H42q8eAg78,5671
@@ -128,13 +128,13 @@ pygpt_net/core/audio/context.py,sha256=2XpXWhDC09iUvc0FRMq9BF2_rnQ60ZG4Js6LbO5Mo
128
128
  pygpt_net/core/audio/whisper.py,sha256=WZ_fNQ06s1NBxyoYB-lTFqDO6ARcnq9MZFekRaTNxTo,993
129
129
  pygpt_net/core/bridge/__init__.py,sha256=4qEZJkMIe2o861ukwAlFy0ba_ri8sqx4nwLhUZXld0g,10007
130
130
  pygpt_net/core/bridge/context.py,sha256=zIqbbFyZYsU5JEJGvwBg07u9QeeMUKsdTnURyp8tR4Y,4351
131
- pygpt_net/core/bridge/worker.py,sha256=aq0xA6LCvEK7BHTx3I1vziZpFtE29IHl-19yzsEB7mE,5817
131
+ pygpt_net/core/bridge/worker.py,sha256=8o8HmnjtoImHFFPOfzymePPgmVUPZoFNHFd0BYUHV3c,5885
132
132
  pygpt_net/core/calendar/__init__.py,sha256=ao9kQk6Xjse95m1TbL1Mlbo1k1Q8D9eGc10L-71G9TY,7227
133
133
  pygpt_net/core/camera/__init__.py,sha256=iJ7ZIQPi3nFb5FtvH8Rig4v9pjRgccrHzSlY_ua0B_g,4077
134
134
  pygpt_net/core/chain/__init__.py,sha256=C7Xm88bRblcyM4e0wZMFG-6SQCdw_frXN9kqnWzce60,3541
135
135
  pygpt_net/core/chain/chat.py,sha256=5LxPWHkocjrIAAwrdDH1ss6knAnh4_owfbHPsOQYSws,5238
136
136
  pygpt_net/core/chain/completion.py,sha256=GGRA-q6sQgPnSibiwHBwk7jgT0MgOkka1_jK2-IiBPg,5698
137
- pygpt_net/core/command/__init__.py,sha256=Yj0nL_gtHjSTBbw6A-jpgTDz6JgE86FvfH82eqtM40g,24279
137
+ pygpt_net/core/command/__init__.py,sha256=MIA8xR1opjL61Be8IyIgunoeLfgB7JYryDN00Tul15M,25243
138
138
  pygpt_net/core/ctx/__init__.py,sha256=WUV7OuQ7GXJ4GN75WfqV9v_VtKJhmgQ8uh8tfc2GPLc,43400
139
139
  pygpt_net/core/ctx/bag.py,sha256=-LRhttDRiQkw1Msl3kbGQYaY9w8zqn1o0miNRdqjHtQ,1286
140
140
  pygpt_net/core/ctx/container.py,sha256=tdPHPRfTi8yGY1MZGgFtYtx2lvc5K9OTqhjde16wivY,4232
@@ -248,8 +248,8 @@ pygpt_net/css_rc.py,sha256=i13kX7irhbYCWZ5yJbcMmnkFp_UfS4PYnvRFSPF7XXo,11349
248
248
  pygpt_net/data/audio/click_off.mp3,sha256=aNiRDP1pt-Jy7ija4YKCNFBwvGWbzU460F4pZWZDS90,65201
249
249
  pygpt_net/data/audio/click_on.mp3,sha256=qfdsSnthAEHVXzeyN4LlC0OvXuyW8p7stb7VXtlvZ1k,65201
250
250
  pygpt_net/data/audio/ok.mp3,sha256=LTiV32pEBkpUGBkKkcOdOFB7Eyt_QoP2Nv6c5AaXftk,32256
251
- pygpt_net/data/config/config.json,sha256=P3i9t6mZraxK1paZYhrgfTqFvXKg_oQTNLwH5E780jI,20083
252
- pygpt_net/data/config/models.json,sha256=jaOQ9iwCmEbqAVimJRNObJhXY1xiE4AJ4zdNM52pQt8,124691
251
+ pygpt_net/data/config/config.json,sha256=LDAZNmri0dzoRdc8DYQq0OF1NufvMiHfxbcbsOk4KzA,20116
252
+ pygpt_net/data/config/models.json,sha256=n6ZSt8jz7az-3beeJw6VxV-MemHmrqE1gKEIWBSH90I,124691
253
253
  pygpt_net/data/config/modes.json,sha256=-q4Q4RsyoF2rLgvS0On59zXK0m0ml_kx6I0hNfLZRDY,2085
254
254
  pygpt_net/data/config/presets/agent_openai.json,sha256=vMTR-soRBiEZrpJJHuFLWyx8a3Ez_BqtqjyXgxCAM_Q,733
255
255
  pygpt_net/data/config/presets/agent_openai_assistant.json,sha256=awJw9lNTGpKML6SJUShVn7lv8AXh0oic7wBeyoN7AYs,798
@@ -271,8 +271,8 @@ pygpt_net/data/config/presets/current.vision.json,sha256=x1ll5B3ROSKYQA6l27PRGXU
271
271
  pygpt_net/data/config/presets/dalle_white_cat.json,sha256=esqUb43cqY8dAo7B5u99tRC0MBV5lmlrVLnJhTSkL8w,552
272
272
  pygpt_net/data/config/presets/joke_agent.json,sha256=R6n9P7KRb0s-vZWZE7kHdlOfXAx1yYrPmUw8uLyw8OE,474
273
273
  pygpt_net/data/config/presets/joke_expert.json,sha256=aFBFCY97Uba71rRq0MSeakXaOj8yuaUqekQ842YHv64,683
274
- pygpt_net/data/config/settings.json,sha256=HdSmnEEKqtxq22OInEZzeKP9MXP85HsKtTx-y7Kd8kE,50810
275
- pygpt_net/data/config/settings_section.json,sha256=M22jrZvly6KliNr_fhkS6rk_bdzlFK4OyoWZ6TiyRnY,1004
274
+ pygpt_net/data/config/settings.json,sha256=CcJXMZJcptyLbD1keW7-DMRMuDJR1Ixf5jXUepXCalI,51192
275
+ pygpt_net/data/config/settings_section.json,sha256=Ng6kgmgxVmvt-KYFIqZvIDAEK4DfISNjNVF55DFWNjs,1082
276
276
  pygpt_net/data/css/fix_windows.css,sha256=Mks14Vg25ncbMqZJfAMStrhvZmgHF6kU75ohTWRZeI8,664
277
277
  pygpt_net/data/css/markdown.css,sha256=yaoJPogZZ_ghbqP8vTXTycwVyD61Ik5_033NpzuUzC0,1122
278
278
  pygpt_net/data/css/markdown.dark.css,sha256=ixAwuT69QLesZttKhO4RAy-QukplZwwfXCZsWLN9TP4,730
@@ -1487,14 +1487,14 @@ pygpt_net/data/js/katex/fonts/KaTeX_Typewriter-Regular.woff,sha256=4U_tArGrp86fW
1487
1487
  pygpt_net/data/js/katex/fonts/KaTeX_Typewriter-Regular.woff2,sha256=cdUX1ngneHz6vfGGkUzDNY7aU543kxlB8rL9SiH2jAs,13568
1488
1488
  pygpt_net/data/js/katex/katex.min.css,sha256=lVaKnUaQNG4pI71WHffQZVALLQF4LMZEk4nOia8U9ow,23532
1489
1489
  pygpt_net/data/js/katex/katex.min.js,sha256=KLASOtKS2x8pUxWVzCDmlWJ4jhuLb0vtrgakbD6gDDo,276757
1490
- pygpt_net/data/locale/locale.de.ini,sha256=AUcujqpl2NkMgMnvyKMpBN-DfVS-1_bofImMnRB-nR4,65877
1491
- pygpt_net/data/locale/locale.en.ini,sha256=fnYI-O6deNJCzMzK805ydPXfd58IniPmhxYtrbMc9tU,78559
1492
- pygpt_net/data/locale/locale.es.ini,sha256=DixNe-NMlNFxnOHsxsXcfmVQpMhlmVWkd-GIox1wypo,66021
1493
- pygpt_net/data/locale/locale.fr.ini,sha256=loEqR2eY9rmn_-vY-qLKEECbVPwEGwSknztuGtq034M,68100
1494
- pygpt_net/data/locale/locale.it.ini,sha256=i4b6nhklhop6KZhIO_tLIeCP_okUce5D2fTx_OJ16fc,64729
1495
- pygpt_net/data/locale/locale.pl.ini,sha256=w2yQe5Vt97OdvZLqP_6qLv1S0fF0OKjojdPsAQseRAg,64776
1496
- pygpt_net/data/locale/locale.uk.ini,sha256=tSH_0wLoGwM-JFRfqmymwrfCBndyumPlsBABkyWl7qY,90156
1497
- pygpt_net/data/locale/locale.zh.ini,sha256=UUOLVTBjp4RY5G6Hh8T6IXVklVIZUcHvPpMjCyVSb9U,66247
1490
+ pygpt_net/data/locale/locale.de.ini,sha256=MCv-wuxxnSwN4Jwxy_6EtcqgCD4Q2fFdhzA7KbKWfLo,66095
1491
+ pygpt_net/data/locale/locale.en.ini,sha256=jx0MF-Y3xtJKJdq8dXq0NY5CcOkFYWrMdgXHjitX1Tc,78763
1492
+ pygpt_net/data/locale/locale.es.ini,sha256=OVNmCMA6Xww3dFlNGbl4z9BSO5SzoxTFB_RvpUOEq6o,66253
1493
+ pygpt_net/data/locale/locale.fr.ini,sha256=I0XkyYpE9gF6il5OJlDWA6dN5MG0H3wqP9SMJBttds0,68311
1494
+ pygpt_net/data/locale/locale.it.ini,sha256=PILGaGTCumfULasxW-2eJ0d3-06QJJJWC8_KEvjuuAs,64951
1495
+ pygpt_net/data/locale/locale.pl.ini,sha256=mc49HhxN6C1feB0W5mBFZzXAKQoHQPLKhIuh6JFwPDE,65004
1496
+ pygpt_net/data/locale/locale.uk.ini,sha256=rmD9S3iTxuOZRAj65RDxzMC6do-IM4X4zsXhGT_Gg-0,90432
1497
+ pygpt_net/data/locale/locale.zh.ini,sha256=Vf8pOvpa20yq-dAfgmxJgvNDIN-gvb6csvGHrhSUpQ0,66458
1498
1498
  pygpt_net/data/locale/plugin.agent.de.ini,sha256=BY28KpfFvgfVYJzcw2o5ScWnR4uuErIYGyc3NVHlmTw,1714
1499
1499
  pygpt_net/data/locale/plugin.agent.en.ini,sha256=88LkZUpilbV9l4QDbMyIdq_K9sbWt-CQPpavEttPjJU,1489
1500
1500
  pygpt_net/data/locale/plugin.agent.es.ini,sha256=bqaJQne8HPKFVtZ8Ukzo1TSqVW41yhYbGUqW3j2x1p8,1680
@@ -1672,7 +1672,7 @@ pygpt_net/item/__init__.py,sha256=jQQgG9u_ZLsZWXustoc1uvC-abUvj4RBKPAM30-f2Kc,48
1672
1672
  pygpt_net/item/assistant.py,sha256=AjbpL-EnolBc-esGkBSAAaNPrgFqMSbevvtIulhu9b4,9587
1673
1673
  pygpt_net/item/attachment.py,sha256=DttKEdCuWa_0O1CLoDiBLoST73bXPIbdqryeui00bD4,2818
1674
1674
  pygpt_net/item/calendar_note.py,sha256=Y9rfMmTbWwcFrHNra62aUww-NGPIE6O03wHRrF5TyAg,2113
1675
- pygpt_net/item/ctx.py,sha256=i4lKRQ9xx_SaOVuiRJlKnsA5sBjl5pRby8MtJvsp3lc,18904
1675
+ pygpt_net/item/ctx.py,sha256=XZ4aRlpISA9_f63VNAG8BmmqFxx01m33TPa1CCFX_xA,18971
1676
1676
  pygpt_net/item/index.py,sha256=gDQYPlhwHF0QVGwX4TFGxHyO7pt5tqHcuyc3DPgPCA0,1681
1677
1677
  pygpt_net/item/mode.py,sha256=bhX6ZOvTKsiLI6-N-7cuJ_9izlAqq6bsXF1FjufJvfw,600
1678
1678
  pygpt_net/item/model.py,sha256=ZtyhQbMxOikaW4EQsAMDeW9X1ksn_Yw6eCF1tZ7AvAY,8313
@@ -1729,10 +1729,10 @@ pygpt_net/plugin/cmd_code_interpreter/worker.py,sha256=U5ahn_LV7xGm_S05AfQlLM_de
1729
1729
  pygpt_net/plugin/cmd_custom/__init__.py,sha256=L8hD1WkbRBdNsbR4iHbgSXHWszF6DxNRbgEIEi2Q_og,3911
1730
1730
  pygpt_net/plugin/cmd_custom/config.py,sha256=PuD18kxtfBr2iag4WYnpqmO29ZMThktVT859KRmBnNA,2026
1731
1731
  pygpt_net/plugin/cmd_custom/worker.py,sha256=iXMZK24rNYupvOQt-wB70gJsLUvi7Zpo7TgPWBvwe4s,4091
1732
- pygpt_net/plugin/cmd_files/__init__.py,sha256=r8dWyiWBKkchSKRvaKtg_-qv0bxC4olyfS-vV-em8QE,4215
1732
+ pygpt_net/plugin/cmd_files/__init__.py,sha256=6P8brcvQDd05Yno8zF1V9tgiAQr_JV9yvkn-EAgRE_U,4210
1733
1733
  pygpt_net/plugin/cmd_files/config.py,sha256=q3W8Ur6prnzRMg8hg1bbdwhUrfPiUKA3E-9hDw547cI,13293
1734
1734
  pygpt_net/plugin/cmd_files/output.py,sha256=PiSslXXc7TeTzeuXh_MBEh3I4PuIDStr_1i2ARuPXFM,1987
1735
- pygpt_net/plugin/cmd_files/worker.py,sha256=cnbMQjxQaRpVshaKl_CvjduNJBmP6ThX5IorAA3Ct0Y,34061
1735
+ pygpt_net/plugin/cmd_files/worker.py,sha256=PiXtTW3LzTk88eyZpxENEWJbkLd7-BkmtDUDMSeCPRQ,34062
1736
1736
  pygpt_net/plugin/cmd_history/__init__.py,sha256=uR7Wyly2StrLaazvkFSiPYxLb5EN4ua5mBlMhQHY4HM,11023
1737
1737
  pygpt_net/plugin/cmd_history/config.py,sha256=bp3-MDNd5tckiSs1mV-GWb5Y7_ZO_LHaerM9-4Yx7U4,9659
1738
1738
  pygpt_net/plugin/cmd_history/worker.py,sha256=-jGdiR94QHKOMISWIPtNLmCLFDG5UzibItvkbQx9KZo,6119
@@ -1822,7 +1822,7 @@ pygpt_net/provider/core/calendar/db_sqlite/storage.py,sha256=QDclQCQdr4QyRIqjgGX
1822
1822
  pygpt_net/provider/core/config/__init__.py,sha256=jQQgG9u_ZLsZWXustoc1uvC-abUvj4RBKPAM30-f2Kc,488
1823
1823
  pygpt_net/provider/core/config/base.py,sha256=cbvzbMNqL2XgC-36gGubnU37t94AX7LEw0lecb2Nm80,1365
1824
1824
  pygpt_net/provider/core/config/json_file.py,sha256=P78SRQpNr_nF7TYftYLnHl_DVo7GLPNs4_lvw97sqq8,5122
1825
- pygpt_net/provider/core/config/patch.py,sha256=JYNOz-_7ePQRBWfcc1SYHaeCZAZKox8VoArrygfcegQ,97421
1825
+ pygpt_net/provider/core/config/patch.py,sha256=5xaSuG6x9Dtgpr_JBQR41qrj3pIJkwhZhxsgAc09waM,97698
1826
1826
  pygpt_net/provider/core/ctx/__init__.py,sha256=jQQgG9u_ZLsZWXustoc1uvC-abUvj4RBKPAM30-f2Kc,488
1827
1827
  pygpt_net/provider/core/ctx/base.py,sha256=Tfb4MDNe9BXXPU3lbzpdYwJF9S1oa2-mzgu5XT4It9g,3003
1828
1828
  pygpt_net/provider/core/ctx/db_sqlite/__init__.py,sha256=G2pB7kZfREJRLJZmfv3DKTslXC-K7EhNN2sn56q6BFA,11753
@@ -1867,16 +1867,17 @@ pygpt_net/provider/core/preset/patch.py,sha256=uGeOqz-JnFVXHAjnlto5I79O-HNXMLRSJ
1867
1867
  pygpt_net/provider/core/prompt/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
1868
1868
  pygpt_net/provider/core/prompt/base.py,sha256=EYUA30T1QwJ9RSD0uW5x6VEstgIXNwgutmaXI64BWhw,1304
1869
1869
  pygpt_net/provider/core/prompt/json_file.py,sha256=5yfW1RgEa36tX4-ntze4PavWLry0YG43D2LO23_MrzE,4838
1870
- pygpt_net/provider/gpt/__init__.py,sha256=d2UJRKfZQfC9GkA-Ln0effgDIpvmY3n7bVQVXs0HXUE,10120
1870
+ pygpt_net/provider/gpt/__init__.py,sha256=707FMcQfP-NgTZIXEUq6fr6Gv0zkTzy4RpvIYgRnAB8,11396
1871
1871
  pygpt_net/provider/gpt/assistants.py,sha256=DSw1YB_J9n2rFD5CPDWZy59I38VSG6uLpYydGLTUPMQ,14083
1872
1872
  pygpt_net/provider/gpt/audio.py,sha256=frHElxYVaHYkNDCMJ9tQMoGqxSaZ-s5oPlAEHUAckkc,2032
1873
1873
  pygpt_net/provider/gpt/chat.py,sha256=W-p6njN843JyExMcyqD_ClzmWv8de9F4-LdLwjS_4Pg,10406
1874
1874
  pygpt_net/provider/gpt/completion.py,sha256=OusKOb4G11aYRJUjRWcMsf80cRQQvee9DzRe99ubLmc,6164
1875
1875
  pygpt_net/provider/gpt/image.py,sha256=ZqYrtVTcfPa8Kf08pWLKy1Zhvi6pu61GBlslRBauoK0,8967
1876
+ pygpt_net/provider/gpt/responses.py,sha256=qhgp-6aasIrvseOhW0FSYggajeeBWfoisGUPWJ7gnGk,9639
1876
1877
  pygpt_net/provider/gpt/store.py,sha256=FaVd7SBC_QQ0W26_odJwcrLH54CSq0UZXZnuwIhRm54,17315
1877
1878
  pygpt_net/provider/gpt/summarizer.py,sha256=449yUqxwshSqeVoO7WIZasTpYlopG1Z_1ShPE5rAnvc,2260
1878
1879
  pygpt_net/provider/gpt/utils.py,sha256=O0H0EPb4lXUMfE1bFdWB56yuWLv7M5owVIGWRyDDv-E,855
1879
- pygpt_net/provider/gpt/vision.py,sha256=01fZLGo5HuD1LpWQAunsQyii83TPg132EPV9-aSpBHo,9029
1880
+ pygpt_net/provider/gpt/vision.py,sha256=1Imlr6U4xUE_N90UvAaLR60KGB_39xl5aGZbsiJ7JoQ,10001
1880
1881
  pygpt_net/provider/gpt/worker/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
1881
1882
  pygpt_net/provider/gpt/worker/assistants.py,sha256=MFUlFJ9Xe4VTJFOz5OtFHvOHkJnTr2wbeKDavCCDn00,21088
1882
1883
  pygpt_net/provider/gpt/worker/importer.py,sha256=zmu55TAWbSlRrI4Vk5llVhbiR6s7dskx3iaBgTrQ_js,15467
@@ -1890,7 +1891,7 @@ pygpt_net/provider/llms/hugging_face.py,sha256=HLw0x8O0HuFNI-7yeI4m-ksl2KPpyENqT
1890
1891
  pygpt_net/provider/llms/hugging_face_api.py,sha256=EmMQL4QJnE-2SZwHg102ZqSZzi8WMIo84inG2bRiaw8,2892
1891
1892
  pygpt_net/provider/llms/local.py,sha256=s6Myi1dZ2fTCCno6UHT-gbffe0g5b_sYxnvMj5P8LlI,1393
1892
1893
  pygpt_net/provider/llms/ollama.py,sha256=bA5m_IDSOyHPAOeCnsmxf1jaaTW8hiV4HgTT-Au7R0s,3985
1893
- pygpt_net/provider/llms/ollama_custom.py,sha256=FCbT95_ILfUwxQGD4aw8rBEohCynHN5H0Jap0koutBg,24024
1894
+ pygpt_net/provider/llms/ollama_custom.py,sha256=WVbLiEEwnz5loKiLy7EYmpuWz0Tp5Vhd1vOUB2051kI,24167
1894
1895
  pygpt_net/provider/llms/openai.py,sha256=8HUn-YAVM4YQ10fBbsnGvv0eAOFlyKURVPlv9aL8d7U,3730
1895
1896
  pygpt_net/provider/loaders/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
1896
1897
  pygpt_net/provider/loaders/base.py,sha256=3-qzzGAF2jxhriNHjE3Y2GtDXxs1_2_BIloaVJS4qzQ,3101
@@ -2183,8 +2184,8 @@ pygpt_net/ui/widget/textarea/web.py,sha256=2LebPHa_e5lvBqnIVzjwsLcFMoc11BonXgAUs
2183
2184
  pygpt_net/ui/widget/vision/__init__.py,sha256=8HT4tQFqQogEEpGYTv2RplKBthlsFKcl5egnv4lzzEw,488
2184
2185
  pygpt_net/ui/widget/vision/camera.py,sha256=T8b5cmK6uhf_WSSxzPt_Qod8JgMnst6q8sQqRvgQiSA,2584
2185
2186
  pygpt_net/utils.py,sha256=WtrdagJ-BlCjxGEEVq2rhsyAZMcU6JqltCXzOs823po,6707
2186
- pygpt_net-2.5.15.dist-info/LICENSE,sha256=rbPqNB_xxANH8hKayJyIcTwD4bj4Y2G-Mcm85r1OImM,1126
2187
- pygpt_net-2.5.15.dist-info/METADATA,sha256=uzXawCTEO0xxxSDARADtKKuS6ieUJH9yrzRiUM6Dsc8,171436
2188
- pygpt_net-2.5.15.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
2189
- pygpt_net-2.5.15.dist-info/entry_points.txt,sha256=qvpII6UHIt8XfokmQWnCYQrTgty8FeJ9hJvOuUFCN-8,43
2190
- pygpt_net-2.5.15.dist-info/RECORD,,
2187
+ pygpt_net-2.5.17.dist-info/LICENSE,sha256=rbPqNB_xxANH8hKayJyIcTwD4bj4Y2G-Mcm85r1OImM,1126
2188
+ pygpt_net-2.5.17.dist-info/METADATA,sha256=wlBPjxYTbqKADLNv3Njh_Qob-UWrQkJK2pD6Bg455YY,171888
2189
+ pygpt_net-2.5.17.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
2190
+ pygpt_net-2.5.17.dist-info/entry_points.txt,sha256=qvpII6UHIt8XfokmQWnCYQrTgty8FeJ9hJvOuUFCN-8,43
2191
+ pygpt_net-2.5.17.dist-info/RECORD,,