pygpt-net 2.5.15__py3-none-any.whl → 2.5.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pygpt_net/CHANGELOG.txt CHANGED
@@ -1,3 +1,9 @@
1
+ 2.5.16 (2025-06-25)
2
+
3
+ - OpenAI API upgraded to 1.91.0.
4
+ - Chat mode migrated to Responses API with native built-in web search tool. (beta)
5
+ - Fixed file_read tool in I/O plugin.
6
+
1
7
  2.5.15 (2025-06-24)
2
8
 
3
9
  - Added Ollama models importer in "Settings -> Models -> Import from Ollama".
pygpt_net/__init__.py CHANGED
@@ -6,15 +6,15 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025-06-24 19:00:00 #
9
+ # Updated Date: 2025-06-25 02:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  __author__ = "Marcin Szczygliński"
13
13
  __copyright__ = "Copyright 2025, Marcin Szczygliński"
14
14
  __credits__ = ["Marcin Szczygliński"]
15
15
  __license__ = "MIT"
16
- __version__ = "2.5.15"
17
- __build__ = "2025-06-24"
16
+ __version__ = "2.5.16"
17
+ __build__ = "2025-06-25"
18
18
  __maintainer__ = "Marcin Szczygliński"
19
19
  __github__ = "https://github.com/szczyglis-dev/py-gpt"
20
20
  __report__ = "https://github.com/szczyglis-dev/py-gpt/issues"
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.03.02 19:00:00 #
9
+ # Updated Date: 2025.06.25 02:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Any
@@ -36,6 +36,9 @@ class Stream:
36
36
  output_tokens = 0
37
37
  begin = True
38
38
  error = None
39
+ tool_calls = []
40
+ fn_args_buffers = {}
41
+ citations = []
39
42
 
40
43
  # chunks: stream begin
41
44
  data = {
@@ -60,24 +63,32 @@ class Stream:
60
63
  if error is not None:
61
64
  break # break if error
62
65
 
66
+ etype = None
63
67
  response = None
64
68
  chunk_type = "raw"
65
- if (hasattr(chunk, 'choices')
66
- and chunk.choices[0] is not None
67
- and hasattr(chunk.choices[0], 'delta')
68
- and chunk.choices[0].delta is not None):
69
- chunk_type = "api_chat"
70
- elif (hasattr(chunk, 'choices')
71
- and chunk.choices[0] is not None
72
- and hasattr(chunk.choices[0], 'text')
73
- and chunk.choices[0].text is not None):
74
- chunk_type = "api_completion"
75
- elif (hasattr(chunk, 'content')
76
- and chunk.content is not None):
77
- chunk_type = "langchain_chat"
78
- elif (hasattr(chunk, 'delta')
79
- and chunk.delta is not None):
80
- chunk_type = "llama_chat"
69
+ if ctx.use_responses_api:
70
+ if hasattr(chunk, 'type'): # streaming event type
71
+ etype = chunk.type
72
+ chunk_type = "api_chat_responses" # responses API
73
+ else:
74
+ continue
75
+ else:
76
+ if (hasattr(chunk, 'choices')
77
+ and chunk.choices[0] is not None
78
+ and hasattr(chunk.choices[0], 'delta')
79
+ and chunk.choices[0].delta is not None):
80
+ chunk_type = "api_chat" # chat completions API
81
+ elif (hasattr(chunk, 'choices')
82
+ and chunk.choices[0] is not None
83
+ and hasattr(chunk.choices[0], 'text')
84
+ and chunk.choices[0].text is not None):
85
+ chunk_type = "api_completion"
86
+ elif (hasattr(chunk, 'content')
87
+ and chunk.content is not None):
88
+ chunk_type = "langchain_chat"
89
+ elif (hasattr(chunk, 'delta')
90
+ and chunk.delta is not None):
91
+ chunk_type = "llama_chat"
81
92
 
82
93
  # OpenAI chat completion
83
94
  if chunk_type == "api_chat":
@@ -110,6 +121,43 @@ class Stream:
110
121
  if tool_chunk.function.arguments:
111
122
  tool_call["function"]["arguments"] += tool_chunk.function.arguments
112
123
 
124
+ elif chunk_type == "api_chat_responses":
125
+
126
+ if etype == "response.output_text.delta":
127
+ response = chunk.delta
128
+
129
+ # ---------- function_call ----------
130
+ elif etype == "response.output_item.added" and chunk.item.type == "function_call":
131
+ tool_calls.append({
132
+ "id": chunk.item.id,
133
+ "type": "function",
134
+ "function": {"name": chunk.item.name, "arguments": ""}
135
+ })
136
+ fn_args_buffers[chunk.item.id] = ""
137
+
138
+ elif etype == "response.function_call_arguments.delta":
139
+ fn_args_buffers[chunk.item_id] += chunk.delta
140
+
141
+ elif etype == "response.function_call_arguments.done":
142
+ for tc in tool_calls:
143
+ if tc["id"] == chunk.item_id:
144
+ tc["function"]["arguments"] = fn_args_buffers[chunk.item_id]
145
+ break
146
+ fn_args_buffers.pop(chunk.item_id, None)
147
+
148
+ # ---------- annotations ----------
149
+ elif etype == "response.output_text.annotation.added":
150
+ if chunk.annotation['type'] == "url_citation":
151
+ if citations is None:
152
+ citations = []
153
+ url_citation = chunk.annotation['url']
154
+ citations.append(url_citation)
155
+ ctx.urls = citations
156
+
157
+ # ---------- end / error ----------
158
+ elif etype in {"response.done", "response.failed", "error"}:
159
+ pass
160
+
113
161
  # OpenAI completion
114
162
  elif chunk_type == "api_completion":
115
163
  if chunk.choices[0].text is not None:
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.11.29 23:00:00 #
9
+ # Updated Date: 2025.06.25 02:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from PySide6.QtCore import QObject, Signal, QRunnable, Slot
@@ -15,6 +15,7 @@ from pygpt_net.core.types import (
15
15
  MODE_AGENT_LLAMA,
16
16
  MODE_LANGCHAIN,
17
17
  MODE_LLAMA_INDEX,
18
+ MODE_ASSISTANT,
18
19
  )
19
20
  from pygpt_net.core.events import KernelEvent, Event
20
21
 
@@ -48,7 +49,8 @@ class BridgeWorker(QObject, QRunnable):
48
49
  self.handle_post_prompt_async()
49
50
 
50
51
  # ADDITIONAL CONTEXT: append additional context from attachments
51
- self.handle_additional_context()
52
+ if self.mode != MODE_ASSISTANT:
53
+ self.handle_additional_context()
52
54
 
53
55
  # POST PROMPT END: handle post prompt end event
54
56
  self.handle_post_prompt_end()
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.02.02 02:00:00 #
9
+ # Updated Date: 2025.06.25 02:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import copy
@@ -228,6 +228,34 @@ class Command:
228
228
  print("Error parsing tool call: " + str(e))
229
229
  return parsed
230
230
 
231
+ def unpack_tool_calls_responses(
232
+ self,
233
+ tool_calls: List
234
+ ) -> List[Dict[str, Any]]:
235
+ """
236
+ Unpack tool calls from OpenAI response
237
+
238
+ :param tool_calls: tool calls list
239
+ :return: parsed tool calls list
240
+ """
241
+ parsed = []
242
+ for tool_call in tool_calls:
243
+ try:
244
+ parsed.append(
245
+ {
246
+ "id": tool_call.id,
247
+ "type": "function",
248
+ "function": {
249
+ "name": tool_call.name,
250
+ "arguments": json.loads(tool_call.arguments)
251
+ }
252
+ }
253
+ )
254
+ except Exception as e:
255
+ self.window.core.debug.log(e)
256
+ print("Error parsing tool call: " + str(e))
257
+ return parsed
258
+
231
259
  def unpack_tool_calls_chunks(
232
260
  self,
233
261
  ctx: CtxItem,
@@ -503,6 +531,9 @@ class Command:
503
531
  if "required" in param and param["required"]:
504
532
  required.append(param["name"])
505
533
 
534
+ if len(required) > 0:
535
+ params["required"] = required
536
+
506
537
  # extract params and convert to JSON schema format
507
538
  for param in cmd["params"]:
508
539
  try:
@@ -570,7 +601,7 @@ class Command:
570
601
  elif params["properties"][key]["type"] == "list":
571
602
  params["properties"][key]["type"] = "array"
572
603
  params["properties"][key]["items"] = {
573
- "$ref": "#"
604
+ "type": "string"
574
605
  }
575
606
  except Exception as e:
576
607
  print(e)
@@ -1,8 +1,8 @@
1
1
  {
2
2
  "__meta__": {
3
- "version": "2.5.15",
4
- "app.version": "2.5.15",
5
- "updated_at": "2025-06-24T00:00:00"
3
+ "version": "2.5.16",
4
+ "app.version": "2.5.16",
5
+ "updated_at": "2025-06-25T00:00:00"
6
6
  },
7
7
  "access.audio.event.speech": false,
8
8
  "access.audio.event.speech.disabled": [],
@@ -1,8 +1,8 @@
1
1
  {
2
2
  "__meta__": {
3
- "version": "2.5.15",
4
- "app.version": "2.5.15",
5
- "updated_at": "2025-06-24T00:00:00"
3
+ "version": "2.5.16",
4
+ "app.version": "2.5.16",
5
+ "updated_at": "2025-06-25T00:00:00"
6
6
  },
7
7
  "items": {
8
8
  "claude-3-5-sonnet-20240620": {
pygpt_net/item/ctx.py CHANGED
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.06.23 19:00:00 #
9
+ # Updated Date: 2025.06.25 02:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import copy
@@ -75,6 +75,7 @@ class CtxItem:
75
75
  self.sub_call = False # is sub call
76
76
  self.sub_reply = False # sub call reply
77
77
  self.hidden = False # hidden context
78
+ self.use_responses_api = False # use responses API format
78
79
  self.pid = 0
79
80
  self.audio_id = None
80
81
  self.audio_output = None
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.12.16 01:00:00 #
9
+ # Updated Date: 2025.06.25 02:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import os
@@ -141,7 +141,7 @@ class Plugin(BasePlugin):
141
141
  """
142
142
  # use_loaders = False
143
143
  if use_loaders:
144
- content, docs = str(self.window.core.idx.indexing.read_text_content(path))
144
+ content, docs = self.window.core.idx.indexing.read_text_content(path)
145
145
  return content
146
146
  else:
147
147
  data = ""
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.01.17 13:00:00 #
9
+ # Updated Date: 2025.06.25 02:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import fnmatch
@@ -847,7 +847,7 @@ class Worker(BaseWorker):
847
847
  path,
848
848
  )
849
849
 
850
- def read_files(self, paths: list) -> (dict, str):
850
+ def read_files(self, paths: list) -> (list, list):
851
851
  """
852
852
  Read files from directory
853
853
 
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.03.02 19:00:00 #
9
+ # Updated Date: 2025.06.25 02:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from httpx_socks import SyncProxyTransport
@@ -29,6 +29,7 @@ from .assistants import Assistants
29
29
  from .chat import Chat
30
30
  from .completion import Completion
31
31
  from .image import Image
32
+ from .responses import Responses
32
33
  from .store import Store
33
34
  from .summarizer import Summarizer
34
35
  from .vision import Vision
@@ -47,6 +48,7 @@ class Gpt:
47
48
  self.chat = Chat(window)
48
49
  self.completion = Completion(window)
49
50
  self.image = Image(window)
51
+ self.responses = Responses(window)
50
52
  self.store = Store(window)
51
53
  self.summarizer = Summarizer(window)
52
54
  self.vision = Vision(window)
@@ -108,6 +110,12 @@ class Gpt:
108
110
  ai_name = ctx.output_name
109
111
  thread_id = ctx.thread # from ctx
110
112
 
113
+ # --- Responses API ---- /beta/
114
+ use_responses_api = False
115
+ if mode == MODE_CHAT:
116
+ use_responses_api = True # use responses API for chat, audio, research modes
117
+ ctx.use_responses_api = use_responses_api # set in context
118
+
111
119
  # get model id
112
120
  model_id = None
113
121
  if model is not None:
@@ -128,20 +136,30 @@ class Gpt:
128
136
  )
129
137
  used_tokens = self.completion.get_used_tokens()
130
138
 
131
- # chat (OpenAI) | research (Perplexity)
139
+ # chat, audio (OpenAI) | research (Perplexity)
132
140
  elif mode in [
133
141
  MODE_CHAT,
134
142
  MODE_AUDIO,
135
143
  MODE_RESEARCH
136
144
  ]:
137
- response = self.chat.send(
138
- context=context,
139
- extra=extra,
140
- )
141
- if hasattr(response, "citations"):
142
- if response.citations:
143
- ctx.urls = response.citations
144
- used_tokens = self.chat.get_used_tokens()
145
+ # responses API
146
+ if use_responses_api:
147
+ response = self.responses.send(
148
+ context=context,
149
+ extra=extra,
150
+ )
151
+ used_tokens = self.responses.get_used_tokens()
152
+ else:
153
+ # chat completion API
154
+ response = self.chat.send(
155
+ context=context,
156
+ extra=extra,
157
+ )
158
+ if hasattr(response, "citations"):
159
+ if response.citations:
160
+ ctx.urls = response.citations
161
+ used_tokens = self.chat.get_used_tokens()
162
+
145
163
  self.vision.append_images(ctx) # append images to ctx if provided
146
164
 
147
165
  # image
@@ -184,7 +202,7 @@ class Gpt:
184
202
 
185
203
  # if stream
186
204
  if stream:
187
- ctx.stream = response
205
+ ctx.stream = response # generator
188
206
  ctx.set_output("", ai_name) # set empty output
189
207
  ctx.input_tokens = used_tokens # get from input tokens calculation
190
208
  return True
@@ -206,13 +224,21 @@ class Gpt:
206
224
  MODE_VISION,
207
225
  MODE_RESEARCH
208
226
  ]:
209
- if response.choices[0]:
210
- if response.choices[0].message.content:
211
- output = response.choices[0].message.content.strip()
212
- elif response.choices[0].message.tool_calls:
213
- ctx.tool_calls = self.window.core.command.unpack_tool_calls(
214
- response.choices[0].message.tool_calls,
227
+ if use_responses_api:
228
+ if response.output_text:
229
+ output = response.output_text.strip()
230
+ if response.output:
231
+ ctx.tool_calls = self.window.core.command.unpack_tool_calls_responses(
232
+ response.output,
215
233
  )
234
+ else:
235
+ if response.choices[0]:
236
+ if response.choices[0].message.content:
237
+ output = response.choices[0].message.content.strip()
238
+ elif response.choices[0].message.tool_calls:
239
+ ctx.tool_calls = self.window.core.command.unpack_tool_calls(
240
+ response.choices[0].message.tool_calls,
241
+ )
216
242
  # audio
217
243
  elif mode in [MODE_AUDIO]:
218
244
  if response.choices[0]:
@@ -234,10 +260,17 @@ class Gpt:
234
260
  )
235
261
 
236
262
  ctx.set_output(output, ai_name)
237
- ctx.set_tokens(
238
- response.usage.prompt_tokens,
239
- response.usage.completion_tokens,
240
- )
263
+
264
+ if not use_responses_api:
265
+ ctx.set_tokens(
266
+ response.usage.prompt_tokens,
267
+ response.usage.completion_tokens,
268
+ )
269
+ else:
270
+ ctx.set_tokens(
271
+ response.usage.input_tokens,
272
+ response.usage.output_tokens,
273
+ )
241
274
  return True
242
275
 
243
276
  def quick_call(self, context: BridgeContext, extra: dict = None) -> str:
@@ -0,0 +1,279 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.06.25 02:00:00 #
10
+ # ================================================== #
11
+
12
+ import json
13
+ import time
14
+ from typing import Optional, Dict, Any, List
15
+
16
+ from pygpt_net.core.types import (
17
+ MODE_CHAT,
18
+ MODE_VISION,
19
+ MODE_AUDIO,
20
+ MODE_RESEARCH,
21
+ )
22
+ from pygpt_net.core.bridge.context import BridgeContext, MultimodalContext
23
+ from pygpt_net.item.ctx import CtxItem
24
+ from pygpt_net.item.model import ModelItem
25
+
26
+ from .utils import sanitize_name
27
+ from pygpt_net.item.attachment import AttachmentItem
28
+
29
+
30
+ class Responses:
31
+ def __init__(self, window=None):
32
+ """
33
+ Responses API wrapper
34
+
35
+ :param window: Window instance
36
+ """
37
+ self.window = window
38
+ self.input_tokens = 0
39
+ self.audio_prev_id = None
40
+ self.audio_prev_expires_ts = None
41
+
42
+ def send(
43
+ self,
44
+ context: BridgeContext,
45
+ extra: Optional[Dict[str, Any]] = None
46
+ ):
47
+ """
48
+ Call OpenAI API for chat
49
+
50
+ :param context: Bridge context
51
+ :param extra: Extra arguments
52
+ :return: response or stream chunks
53
+ """
54
+ prompt = context.prompt
55
+ stream = context.stream
56
+ max_tokens = int(context.max_tokens or 0)
57
+ system_prompt = context.system_prompt
58
+ mode = context.mode
59
+ model = context.model
60
+ functions = context.external_functions
61
+ attachments = context.attachments
62
+ multimodal_ctx = context.multimodal_ctx
63
+
64
+ ctx = context.ctx
65
+ if ctx is None:
66
+ ctx = CtxItem() # create empty context
67
+ user_name = ctx.input_name # from ctx
68
+ ai_name = ctx.output_name # from ctx
69
+
70
+ client = self.window.core.gpt.get_client(mode)
71
+
72
+ # build chat messages
73
+ messages = self.build(
74
+ prompt=prompt,
75
+ system_prompt=system_prompt,
76
+ model=model,
77
+ history=context.history,
78
+ attachments=attachments,
79
+ ai_name=ai_name,
80
+ user_name=user_name,
81
+ multimodal_ctx=multimodal_ctx,
82
+ )
83
+ msg_tokens = self.window.core.tokens.from_messages(
84
+ messages,
85
+ model.id,
86
+ )
87
+ # check if max tokens not exceeded
88
+ if max_tokens > 0:
89
+ if msg_tokens + int(max_tokens) > model.ctx:
90
+ max_tokens = model.ctx - msg_tokens - 1
91
+ if max_tokens < 0:
92
+ max_tokens = 0
93
+
94
+ # extra API kwargs
95
+ response_kwargs = {}
96
+
97
+ # tools / functions
98
+ tools = []
99
+ if functions is not None and isinstance(functions, list):
100
+ for function in functions:
101
+ if str(function['name']).strip() == '' or function['name'] is None:
102
+ continue
103
+ params = {}
104
+ if function['params'] is not None and function['params'] != "":
105
+ params = json.loads(function['params']) # unpack JSON from string
106
+ tools.append({
107
+ "type": "function",
108
+ "name": function['name'],
109
+ "parameters": params,
110
+ "description": function['desc'],
111
+ })
112
+
113
+ # extra arguments, o3 only
114
+ if model.extra and "reasoning_effort" in model.extra:
115
+ response_kwargs['reasoning'] = {}
116
+ response_kwargs['reasoning']['effort'] = model.extra["reasoning_effort"]
117
+
118
+ # extend tools with external tools
119
+ if not model.id.startswith("o1") and not model.id.startswith("o3"):
120
+ tools.append({"type": "web_search_preview"})
121
+
122
+ # tool calls are not supported for o1-mini and o1-preview
123
+ if (model.id is not None
124
+ and model.id not in ["o1-mini", "o1-preview"]):
125
+ if len(tools) > 0:
126
+ response_kwargs['tools'] = tools
127
+
128
+ # audio mode
129
+ if mode in [MODE_AUDIO]:
130
+ stream = False
131
+ voice_id = "alloy"
132
+ tmp_voice = self.window.core.plugins.get_option("audio_output", "openai_voice")
133
+ if tmp_voice:
134
+ voice_id = tmp_voice
135
+ response_kwargs["modalities"] = ["text", "audio"]
136
+ response_kwargs["audio"] = {
137
+ "voice": voice_id,
138
+ "format": "wav"
139
+ }
140
+
141
+ response = client.responses.create(
142
+ input=messages,
143
+ model=model.id,
144
+ stream=stream,
145
+ **response_kwargs,
146
+ )
147
+ return response
148
+
149
+ def build(
150
+ self,
151
+ prompt: str,
152
+ system_prompt: str,
153
+ model: ModelItem,
154
+ history: Optional[List[CtxItem]] = None,
155
+ attachments: Optional[Dict[str, AttachmentItem]] = None,
156
+ ai_name: Optional[str] = None,
157
+ user_name: Optional[str] = None,
158
+ multimodal_ctx: Optional[MultimodalContext] = None,
159
+ ) -> list:
160
+ """
161
+ Build list of chat messages
162
+
163
+ :param prompt: user prompt
164
+ :param system_prompt: system prompt
165
+ :param history: history
166
+ :param model: model item
167
+ :param attachments: attachments
168
+ :param ai_name: AI name
169
+ :param user_name: username
170
+ :param multimodal_ctx: Multimodal context
171
+ :return: messages list
172
+ """
173
+ messages = []
174
+
175
+ # tokens config
176
+ mode = MODE_CHAT
177
+ allowed_system = True
178
+ if (model.id is not None
179
+ and model.id in ["o1-mini", "o1-preview"]):
180
+ allowed_system = False
181
+
182
+ used_tokens = self.window.core.tokens.from_user(
183
+ prompt,
184
+ system_prompt,
185
+ ) # threshold and extra included
186
+ max_ctx_tokens = self.window.core.config.get('max_total_tokens') # max context window
187
+
188
+ # fit to max model tokens
189
+ if max_ctx_tokens > model.ctx:
190
+ max_ctx_tokens = model.ctx
191
+
192
+ # input tokens: reset
193
+ self.reset_tokens()
194
+
195
+ # append system prompt
196
+ if allowed_system:
197
+ if system_prompt is not None and system_prompt != "":
198
+ messages.append({"role": "developer", "content": system_prompt})
199
+
200
+ # append messages from context (memory)
201
+ if self.window.core.config.get('use_context'):
202
+ items = self.window.core.ctx.get_history(
203
+ history,
204
+ model.id,
205
+ mode,
206
+ used_tokens,
207
+ max_ctx_tokens,
208
+ )
209
+ for item in items:
210
+ # input
211
+ if item.final_input is not None and item.final_input != "":
212
+ messages.append({
213
+ "role": "user",
214
+ "content": item.final_input,
215
+ })
216
+
217
+ # output
218
+ if item.final_output is not None and item.final_output != "":
219
+ msg = {
220
+ "role": "assistant",
221
+ "content": item.final_output,
222
+ }
223
+ # append previous audio ID
224
+ if MODE_AUDIO in model.mode:
225
+ if item.audio_id:
226
+ # at first check expires_at - expired audio throws error in API
227
+ current_timestamp = time.time()
228
+ audio_timestamp = int(item.audio_expires_ts) if item.audio_expires_ts else 0
229
+ if audio_timestamp and audio_timestamp > current_timestamp:
230
+ msg["audio"] = {
231
+ "id": item.audio_id
232
+ }
233
+ elif self.audio_prev_id:
234
+ current_timestamp = time.time()
235
+ audio_timestamp = int(self.audio_prev_expires_ts) if self.audio_prev_expires_ts else 0
236
+ if audio_timestamp and audio_timestamp > current_timestamp:
237
+ msg["audio"] = {
238
+ "id": self.audio_prev_id
239
+ }
240
+ messages.append(msg)
241
+
242
+ # use vision and audio if available in current model
243
+ content = str(prompt)
244
+ if MODE_VISION in model.mode:
245
+ content = self.window.core.gpt.vision.build_content(
246
+ content=content,
247
+ attachments=attachments,
248
+ responses_api=True,
249
+ )
250
+ if MODE_AUDIO in model.mode:
251
+ content = self.window.core.gpt.audio.build_content(
252
+ content=content,
253
+ multimodal_ctx=multimodal_ctx,
254
+ )
255
+
256
+ # append current prompt
257
+ messages.append({
258
+ "role": "user",
259
+ "content": content,
260
+ })
261
+
262
+ # input tokens: update
263
+ self.input_tokens += self.window.core.tokens.from_messages(
264
+ messages,
265
+ model.id,
266
+ )
267
+ return messages
268
+
269
+ def reset_tokens(self):
270
+ """Reset input tokens counter"""
271
+ self.input_tokens = 0
272
+
273
+ def get_used_tokens(self) -> int:
274
+ """
275
+ Get input tokens counter
276
+
277
+ :return: input tokens
278
+ """
279
+ return self.input_tokens
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.12.14 22:00:00 #
9
+ # Updated Date: 2025.06.25 02:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import base64
@@ -168,18 +168,26 @@ class Vision:
168
168
  self,
169
169
  content: Union[str, list],
170
170
  attachments: Optional[Dict[str, AttachmentItem]] = None,
171
+ responses_api: Optional[bool] = False,
171
172
  ) -> List[dict]:
172
173
  """
173
174
  Build vision content
174
175
 
175
176
  :param content: content (str or list)
176
177
  :param attachments: attachments (dict, optional)
178
+ :param responses_api: if True, use responses API format
177
179
  :return: List of contents
178
180
  """
181
+ type_text = "text"
182
+ type_image = "image_url"
183
+ if responses_api:
184
+ type_text = "input_text"
185
+ type_image = "input_image"
186
+
179
187
  if not isinstance(content, list):
180
188
  content = [
181
189
  {
182
- "type": "text",
190
+ "type": type_text,
183
191
  "text": str(content)
184
192
  }
185
193
  ]
@@ -193,14 +201,22 @@ class Vision:
193
201
  urls = self.extract_urls(prompt)
194
202
  if len(urls) > 0:
195
203
  for url in urls:
196
- content.append(
197
- {
198
- "type": "image_url",
199
- "image_url": {
200
- "url": url,
204
+ if not responses_api:
205
+ content.append(
206
+ {
207
+ "type": type_image,
208
+ "image_url": {
209
+ "url": url,
210
+ }
211
+ }
212
+ )
213
+ else:
214
+ content.append(
215
+ {
216
+ "type": type_image,
217
+ "image_url": url,
201
218
  }
202
- }
203
- )
219
+ )
204
220
  self.urls.append(url)
205
221
 
206
222
  # local images (attachments)
@@ -211,14 +227,22 @@ class Vision:
211
227
  # check if it's an image
212
228
  if self.is_image(attachment.path):
213
229
  base64_image = self.encode_image(attachment.path)
214
- content.append(
215
- {
216
- "type": "image_url",
217
- "image_url": {
218
- "url": f"data:image/jpeg;base64,{base64_image}",
230
+ if not responses_api:
231
+ content.append(
232
+ {
233
+ "type": type_image,
234
+ "image_url": {
235
+ "url": f"data:image/jpeg;base64,{base64_image}",
236
+ }
219
237
  }
220
- }
221
- )
238
+ )
239
+ else:
240
+ content.append(
241
+ {
242
+ "type": type_image,
243
+ "image_url": f"data:image/jpeg;base64,{base64_image}",
244
+ }
245
+ )
222
246
  self.attachments[id] = attachment.path
223
247
  attachment.consumed = True
224
248
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: pygpt-net
3
- Version: 2.5.15
3
+ Version: 2.5.16
4
4
  Summary: Desktop AI Assistant powered by models: OpenAI o1, GPT-4o, GPT-4, GPT-4 Vision, GPT-3.5, DALL-E 3, Llama 3, Mistral, Gemini, Claude, DeepSeek, Bielik, and other models supported by Langchain, Llama Index, and Ollama. Features include chatbot, text completion, image generation, vision analysis, speech-to-text, internet access, file handling, command execution and more.
5
5
  License: MIT
6
6
  Keywords: py_gpt,py-gpt,pygpt,desktop,app,o1,gpt,gpt4,gpt-4o,gpt-4v,gpt3.5,gpt-4,gpt-4-vision,gpt-3.5,llama3,mistral,gemini,deepseek,bielik,claude,tts,whisper,vision,chatgpt,dall-e,chat,chatbot,assistant,text completion,image generation,ai,api,openai,api key,langchain,llama-index,ollama,presets,ui,qt,pyside
@@ -71,7 +71,7 @@ Requires-Dist: llama-index-vector-stores-redis (>=0.4.0,<0.5.0)
71
71
  Requires-Dist: mss (>=9.0.2,<10.0.0)
72
72
  Requires-Dist: nbconvert (>=7.16.6,<8.0.0)
73
73
  Requires-Dist: onnxruntime (==1.20.1)
74
- Requires-Dist: openai (>=1.59.9,<1.60.0)
74
+ Requires-Dist: openai (>=1.91.0,<2.0.0)
75
75
  Requires-Dist: opencv-python (>=4.11.0.86,<5.0.0.0)
76
76
  Requires-Dist: packaging (>=24.2,<25.0)
77
77
  Requires-Dist: pandas (>=2.2.3,<3.0.0)
@@ -100,7 +100,7 @@ Description-Content-Type: text/markdown
100
100
 
101
101
  [![pygpt](https://snapcraft.io/pygpt/badge.svg)](https://snapcraft.io/pygpt)
102
102
 
103
- Release: **2.5.15** | build: **2025-06-23** | Python: **>=3.10, <3.13**
103
+ Release: **2.5.16** | build: **2025-06-25** | Python: **>=3.10, <3.13**
104
104
 
105
105
  > Official website: https://pygpt.net | Documentation: https://pygpt.readthedocs.io
106
106
  >
@@ -4124,6 +4124,12 @@ may consume additional tokens that are not displayed in the main window.
4124
4124
 
4125
4125
  ## Recent changes:
4126
4126
 
4127
+ **2.5.16 (2025-06-25)**
4128
+
4129
+ - OpenAI API upgraded to 1.91.0.
4130
+ - Chat mode migrated to Responses API with native built-in web search tool. (beta)
4131
+ - Fixed file_read tool in I/O plugin.
4132
+
4127
4133
  **2.5.15 (2025-06-24)**
4128
4134
 
4129
4135
  - Added Ollama models importer in "Settings -> Models -> Import from Ollama".
@@ -1,6 +1,6 @@
1
- pygpt_net/CHANGELOG.txt,sha256=Imz1HnV0a_kN67bUws1G523zwe2jA-ChvPsGBEm_4fM,83588
1
+ pygpt_net/CHANGELOG.txt,sha256=hFu7m9ErUfgHOMKcUo-CEWFdbWEdPugD8t3fldesfPE,83764
2
2
  pygpt_net/LICENSE,sha256=dz9sfFgYahvu2NZbx4C1xCsVn9GVer2wXcMkFRBvqzY,1146
3
- pygpt_net/__init__.py,sha256=sMHfKGrSwrMnY9ERiLfK_atiOtbygFCZ_ahbU6jWPM8,1373
3
+ pygpt_net/__init__.py,sha256=hxCVQq3kr3OQl8lw5kFVjHtdoZGowTS0RBw5ezfmPro,1373
4
4
  pygpt_net/app.py,sha256=XXjn9XaKHGRcsHN8mMuqbRHAg8_Da0GLmACUU9ddjBc,16217
5
5
  pygpt_net/config.py,sha256=Qc1FOBtTf3O6A6-6KoqUGtoJ0u8hXQeowvCVbZFwtik,16405
6
6
  pygpt_net/container.py,sha256=BemiVZPpPNIzfB-ZvnZeeBPFu-AcX2c30OqYFylEjJc,4023
@@ -35,7 +35,7 @@ pygpt_net/controller/chat/input.py,sha256=1r-zIWwFIdc0IOuC1rHGZYCZm9tsSeLEsrrlcW
35
35
  pygpt_net/controller/chat/output.py,sha256=VuziVuI9Lj_4kZmTWvXg8t2tq4w9uD7J1g2MqlMCV6s,9272
36
36
  pygpt_net/controller/chat/render.py,sha256=h23QCvMDIAaCpInqwwADa4G43sSpSn-CE5celnk1LSc,17206
37
37
  pygpt_net/controller/chat/response.py,sha256=UnTnnn2on-Qg2_T_QcQcklTCcuq6XhyLLxs1fn-D9Tg,9450
38
- pygpt_net/controller/chat/stream.py,sha256=FSGDWC1OR3lWQ7sKyiU0oQy4D-Qv_3zWpVvbQ6bTqNc,8033
38
+ pygpt_net/controller/chat/stream.py,sha256=vgjZYruBIMJ7V0udhVOf_pnlIuNDmzXRimToQ2i95WM,10496
39
39
  pygpt_net/controller/chat/text.py,sha256=nDiHuKyuRmnDWK0YCsdMhd2k_5zvSSrNWNc9y6FWi2g,10316
40
40
  pygpt_net/controller/chat/vision.py,sha256=OFodxDRleFqY-DVfEfgNn1mpa60-ZWEBwUlu25oJwmw,2884
41
41
  pygpt_net/controller/command/__init__.py,sha256=sUvnvsKISkHTrbv7woQQ8r4SAGDR8Gy85H42q8eAg78,5671
@@ -128,13 +128,13 @@ pygpt_net/core/audio/context.py,sha256=2XpXWhDC09iUvc0FRMq9BF2_rnQ60ZG4Js6LbO5Mo
128
128
  pygpt_net/core/audio/whisper.py,sha256=WZ_fNQ06s1NBxyoYB-lTFqDO6ARcnq9MZFekRaTNxTo,993
129
129
  pygpt_net/core/bridge/__init__.py,sha256=4qEZJkMIe2o861ukwAlFy0ba_ri8sqx4nwLhUZXld0g,10007
130
130
  pygpt_net/core/bridge/context.py,sha256=zIqbbFyZYsU5JEJGvwBg07u9QeeMUKsdTnURyp8tR4Y,4351
131
- pygpt_net/core/bridge/worker.py,sha256=aq0xA6LCvEK7BHTx3I1vziZpFtE29IHl-19yzsEB7mE,5817
131
+ pygpt_net/core/bridge/worker.py,sha256=8o8HmnjtoImHFFPOfzymePPgmVUPZoFNHFd0BYUHV3c,5885
132
132
  pygpt_net/core/calendar/__init__.py,sha256=ao9kQk6Xjse95m1TbL1Mlbo1k1Q8D9eGc10L-71G9TY,7227
133
133
  pygpt_net/core/camera/__init__.py,sha256=iJ7ZIQPi3nFb5FtvH8Rig4v9pjRgccrHzSlY_ua0B_g,4077
134
134
  pygpt_net/core/chain/__init__.py,sha256=C7Xm88bRblcyM4e0wZMFG-6SQCdw_frXN9kqnWzce60,3541
135
135
  pygpt_net/core/chain/chat.py,sha256=5LxPWHkocjrIAAwrdDH1ss6knAnh4_owfbHPsOQYSws,5238
136
136
  pygpt_net/core/chain/completion.py,sha256=GGRA-q6sQgPnSibiwHBwk7jgT0MgOkka1_jK2-IiBPg,5698
137
- pygpt_net/core/command/__init__.py,sha256=Yj0nL_gtHjSTBbw6A-jpgTDz6JgE86FvfH82eqtM40g,24279
137
+ pygpt_net/core/command/__init__.py,sha256=MIA8xR1opjL61Be8IyIgunoeLfgB7JYryDN00Tul15M,25243
138
138
  pygpt_net/core/ctx/__init__.py,sha256=WUV7OuQ7GXJ4GN75WfqV9v_VtKJhmgQ8uh8tfc2GPLc,43400
139
139
  pygpt_net/core/ctx/bag.py,sha256=-LRhttDRiQkw1Msl3kbGQYaY9w8zqn1o0miNRdqjHtQ,1286
140
140
  pygpt_net/core/ctx/container.py,sha256=tdPHPRfTi8yGY1MZGgFtYtx2lvc5K9OTqhjde16wivY,4232
@@ -248,8 +248,8 @@ pygpt_net/css_rc.py,sha256=i13kX7irhbYCWZ5yJbcMmnkFp_UfS4PYnvRFSPF7XXo,11349
248
248
  pygpt_net/data/audio/click_off.mp3,sha256=aNiRDP1pt-Jy7ija4YKCNFBwvGWbzU460F4pZWZDS90,65201
249
249
  pygpt_net/data/audio/click_on.mp3,sha256=qfdsSnthAEHVXzeyN4LlC0OvXuyW8p7stb7VXtlvZ1k,65201
250
250
  pygpt_net/data/audio/ok.mp3,sha256=LTiV32pEBkpUGBkKkcOdOFB7Eyt_QoP2Nv6c5AaXftk,32256
251
- pygpt_net/data/config/config.json,sha256=P3i9t6mZraxK1paZYhrgfTqFvXKg_oQTNLwH5E780jI,20083
252
- pygpt_net/data/config/models.json,sha256=jaOQ9iwCmEbqAVimJRNObJhXY1xiE4AJ4zdNM52pQt8,124691
251
+ pygpt_net/data/config/config.json,sha256=saGOqexv27VNKNV_wS8dlGb6Dl2dCaTEAo7ubl48hY0,20083
252
+ pygpt_net/data/config/models.json,sha256=42iGUxIPlrrN1YMWXZtw3X820I-2W69vCpT4KRefgqw,124691
253
253
  pygpt_net/data/config/modes.json,sha256=-q4Q4RsyoF2rLgvS0On59zXK0m0ml_kx6I0hNfLZRDY,2085
254
254
  pygpt_net/data/config/presets/agent_openai.json,sha256=vMTR-soRBiEZrpJJHuFLWyx8a3Ez_BqtqjyXgxCAM_Q,733
255
255
  pygpt_net/data/config/presets/agent_openai_assistant.json,sha256=awJw9lNTGpKML6SJUShVn7lv8AXh0oic7wBeyoN7AYs,798
@@ -1672,7 +1672,7 @@ pygpt_net/item/__init__.py,sha256=jQQgG9u_ZLsZWXustoc1uvC-abUvj4RBKPAM30-f2Kc,48
1672
1672
  pygpt_net/item/assistant.py,sha256=AjbpL-EnolBc-esGkBSAAaNPrgFqMSbevvtIulhu9b4,9587
1673
1673
  pygpt_net/item/attachment.py,sha256=DttKEdCuWa_0O1CLoDiBLoST73bXPIbdqryeui00bD4,2818
1674
1674
  pygpt_net/item/calendar_note.py,sha256=Y9rfMmTbWwcFrHNra62aUww-NGPIE6O03wHRrF5TyAg,2113
1675
- pygpt_net/item/ctx.py,sha256=i4lKRQ9xx_SaOVuiRJlKnsA5sBjl5pRby8MtJvsp3lc,18904
1675
+ pygpt_net/item/ctx.py,sha256=XZ4aRlpISA9_f63VNAG8BmmqFxx01m33TPa1CCFX_xA,18971
1676
1676
  pygpt_net/item/index.py,sha256=gDQYPlhwHF0QVGwX4TFGxHyO7pt5tqHcuyc3DPgPCA0,1681
1677
1677
  pygpt_net/item/mode.py,sha256=bhX6ZOvTKsiLI6-N-7cuJ_9izlAqq6bsXF1FjufJvfw,600
1678
1678
  pygpt_net/item/model.py,sha256=ZtyhQbMxOikaW4EQsAMDeW9X1ksn_Yw6eCF1tZ7AvAY,8313
@@ -1729,10 +1729,10 @@ pygpt_net/plugin/cmd_code_interpreter/worker.py,sha256=U5ahn_LV7xGm_S05AfQlLM_de
1729
1729
  pygpt_net/plugin/cmd_custom/__init__.py,sha256=L8hD1WkbRBdNsbR4iHbgSXHWszF6DxNRbgEIEi2Q_og,3911
1730
1730
  pygpt_net/plugin/cmd_custom/config.py,sha256=PuD18kxtfBr2iag4WYnpqmO29ZMThktVT859KRmBnNA,2026
1731
1731
  pygpt_net/plugin/cmd_custom/worker.py,sha256=iXMZK24rNYupvOQt-wB70gJsLUvi7Zpo7TgPWBvwe4s,4091
1732
- pygpt_net/plugin/cmd_files/__init__.py,sha256=r8dWyiWBKkchSKRvaKtg_-qv0bxC4olyfS-vV-em8QE,4215
1732
+ pygpt_net/plugin/cmd_files/__init__.py,sha256=6P8brcvQDd05Yno8zF1V9tgiAQr_JV9yvkn-EAgRE_U,4210
1733
1733
  pygpt_net/plugin/cmd_files/config.py,sha256=q3W8Ur6prnzRMg8hg1bbdwhUrfPiUKA3E-9hDw547cI,13293
1734
1734
  pygpt_net/plugin/cmd_files/output.py,sha256=PiSslXXc7TeTzeuXh_MBEh3I4PuIDStr_1i2ARuPXFM,1987
1735
- pygpt_net/plugin/cmd_files/worker.py,sha256=cnbMQjxQaRpVshaKl_CvjduNJBmP6ThX5IorAA3Ct0Y,34061
1735
+ pygpt_net/plugin/cmd_files/worker.py,sha256=PiXtTW3LzTk88eyZpxENEWJbkLd7-BkmtDUDMSeCPRQ,34062
1736
1736
  pygpt_net/plugin/cmd_history/__init__.py,sha256=uR7Wyly2StrLaazvkFSiPYxLb5EN4ua5mBlMhQHY4HM,11023
1737
1737
  pygpt_net/plugin/cmd_history/config.py,sha256=bp3-MDNd5tckiSs1mV-GWb5Y7_ZO_LHaerM9-4Yx7U4,9659
1738
1738
  pygpt_net/plugin/cmd_history/worker.py,sha256=-jGdiR94QHKOMISWIPtNLmCLFDG5UzibItvkbQx9KZo,6119
@@ -1867,16 +1867,17 @@ pygpt_net/provider/core/preset/patch.py,sha256=uGeOqz-JnFVXHAjnlto5I79O-HNXMLRSJ
1867
1867
  pygpt_net/provider/core/prompt/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
1868
1868
  pygpt_net/provider/core/prompt/base.py,sha256=EYUA30T1QwJ9RSD0uW5x6VEstgIXNwgutmaXI64BWhw,1304
1869
1869
  pygpt_net/provider/core/prompt/json_file.py,sha256=5yfW1RgEa36tX4-ntze4PavWLry0YG43D2LO23_MrzE,4838
1870
- pygpt_net/provider/gpt/__init__.py,sha256=d2UJRKfZQfC9GkA-Ln0effgDIpvmY3n7bVQVXs0HXUE,10120
1870
+ pygpt_net/provider/gpt/__init__.py,sha256=707FMcQfP-NgTZIXEUq6fr6Gv0zkTzy4RpvIYgRnAB8,11396
1871
1871
  pygpt_net/provider/gpt/assistants.py,sha256=DSw1YB_J9n2rFD5CPDWZy59I38VSG6uLpYydGLTUPMQ,14083
1872
1872
  pygpt_net/provider/gpt/audio.py,sha256=frHElxYVaHYkNDCMJ9tQMoGqxSaZ-s5oPlAEHUAckkc,2032
1873
1873
  pygpt_net/provider/gpt/chat.py,sha256=W-p6njN843JyExMcyqD_ClzmWv8de9F4-LdLwjS_4Pg,10406
1874
1874
  pygpt_net/provider/gpt/completion.py,sha256=OusKOb4G11aYRJUjRWcMsf80cRQQvee9DzRe99ubLmc,6164
1875
1875
  pygpt_net/provider/gpt/image.py,sha256=ZqYrtVTcfPa8Kf08pWLKy1Zhvi6pu61GBlslRBauoK0,8967
1876
+ pygpt_net/provider/gpt/responses.py,sha256=HUgFXDoxOcO9B1ZlHD4yIbWu9AUbbLiiUyeS5JGti3c,9557
1876
1877
  pygpt_net/provider/gpt/store.py,sha256=FaVd7SBC_QQ0W26_odJwcrLH54CSq0UZXZnuwIhRm54,17315
1877
1878
  pygpt_net/provider/gpt/summarizer.py,sha256=449yUqxwshSqeVoO7WIZasTpYlopG1Z_1ShPE5rAnvc,2260
1878
1879
  pygpt_net/provider/gpt/utils.py,sha256=O0H0EPb4lXUMfE1bFdWB56yuWLv7M5owVIGWRyDDv-E,855
1879
- pygpt_net/provider/gpt/vision.py,sha256=01fZLGo5HuD1LpWQAunsQyii83TPg132EPV9-aSpBHo,9029
1880
+ pygpt_net/provider/gpt/vision.py,sha256=1Imlr6U4xUE_N90UvAaLR60KGB_39xl5aGZbsiJ7JoQ,10001
1880
1881
  pygpt_net/provider/gpt/worker/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
1881
1882
  pygpt_net/provider/gpt/worker/assistants.py,sha256=MFUlFJ9Xe4VTJFOz5OtFHvOHkJnTr2wbeKDavCCDn00,21088
1882
1883
  pygpt_net/provider/gpt/worker/importer.py,sha256=zmu55TAWbSlRrI4Vk5llVhbiR6s7dskx3iaBgTrQ_js,15467
@@ -2183,8 +2184,8 @@ pygpt_net/ui/widget/textarea/web.py,sha256=2LebPHa_e5lvBqnIVzjwsLcFMoc11BonXgAUs
2183
2184
  pygpt_net/ui/widget/vision/__init__.py,sha256=8HT4tQFqQogEEpGYTv2RplKBthlsFKcl5egnv4lzzEw,488
2184
2185
  pygpt_net/ui/widget/vision/camera.py,sha256=T8b5cmK6uhf_WSSxzPt_Qod8JgMnst6q8sQqRvgQiSA,2584
2185
2186
  pygpt_net/utils.py,sha256=WtrdagJ-BlCjxGEEVq2rhsyAZMcU6JqltCXzOs823po,6707
2186
- pygpt_net-2.5.15.dist-info/LICENSE,sha256=rbPqNB_xxANH8hKayJyIcTwD4bj4Y2G-Mcm85r1OImM,1126
2187
- pygpt_net-2.5.15.dist-info/METADATA,sha256=uzXawCTEO0xxxSDARADtKKuS6ieUJH9yrzRiUM6Dsc8,171436
2188
- pygpt_net-2.5.15.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
2189
- pygpt_net-2.5.15.dist-info/entry_points.txt,sha256=qvpII6UHIt8XfokmQWnCYQrTgty8FeJ9hJvOuUFCN-8,43
2190
- pygpt_net-2.5.15.dist-info/RECORD,,
2187
+ pygpt_net-2.5.16.dist-info/LICENSE,sha256=rbPqNB_xxANH8hKayJyIcTwD4bj4Y2G-Mcm85r1OImM,1126
2188
+ pygpt_net-2.5.16.dist-info/METADATA,sha256=Zc5Bu31TysIpS2JbdYADOyQIsLXWlgWDnd9pq0O1-b0,171615
2189
+ pygpt_net-2.5.16.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
2190
+ pygpt_net-2.5.16.dist-info/entry_points.txt,sha256=qvpII6UHIt8XfokmQWnCYQrTgty8FeJ9hJvOuUFCN-8,43
2191
+ pygpt_net-2.5.16.dist-info/RECORD,,