pygpt-net 2.6.36__py3-none-any.whl → 2.6.38__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (96) hide show
  1. pygpt_net/CHANGELOG.txt +12 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/controller/chat/handler/anthropic_stream.py +164 -0
  4. pygpt_net/controller/chat/handler/google_stream.py +181 -0
  5. pygpt_net/controller/chat/handler/langchain_stream.py +24 -0
  6. pygpt_net/controller/chat/handler/llamaindex_stream.py +47 -0
  7. pygpt_net/controller/chat/handler/openai_stream.py +260 -0
  8. pygpt_net/controller/chat/handler/utils.py +210 -0
  9. pygpt_net/controller/chat/handler/worker.py +570 -0
  10. pygpt_net/controller/chat/handler/xai_stream.py +135 -0
  11. pygpt_net/controller/chat/stream.py +1 -1
  12. pygpt_net/controller/ctx/ctx.py +1 -1
  13. pygpt_net/controller/debug/debug.py +6 -6
  14. pygpt_net/controller/model/editor.py +3 -0
  15. pygpt_net/controller/model/importer.py +9 -2
  16. pygpt_net/controller/plugins/plugins.py +11 -3
  17. pygpt_net/controller/presets/presets.py +2 -2
  18. pygpt_net/core/bridge/context.py +35 -35
  19. pygpt_net/core/bridge/worker.py +40 -16
  20. pygpt_net/core/ctx/bag.py +7 -2
  21. pygpt_net/core/ctx/reply.py +17 -2
  22. pygpt_net/core/db/viewer.py +19 -34
  23. pygpt_net/core/render/plain/pid.py +12 -1
  24. pygpt_net/core/render/web/body.py +30 -39
  25. pygpt_net/core/tabs/tab.py +24 -1
  26. pygpt_net/data/config/config.json +10 -3
  27. pygpt_net/data/config/models.json +3 -3
  28. pygpt_net/data/config/settings.json +105 -0
  29. pygpt_net/data/css/style.dark.css +2 -3
  30. pygpt_net/data/css/style.light.css +2 -3
  31. pygpt_net/data/locale/locale.de.ini +3 -1
  32. pygpt_net/data/locale/locale.en.ini +19 -1
  33. pygpt_net/data/locale/locale.es.ini +3 -1
  34. pygpt_net/data/locale/locale.fr.ini +3 -1
  35. pygpt_net/data/locale/locale.it.ini +3 -1
  36. pygpt_net/data/locale/locale.pl.ini +4 -2
  37. pygpt_net/data/locale/locale.uk.ini +3 -1
  38. pygpt_net/data/locale/locale.zh.ini +3 -1
  39. pygpt_net/item/assistant.py +51 -2
  40. pygpt_net/item/attachment.py +21 -20
  41. pygpt_net/item/calendar_note.py +19 -2
  42. pygpt_net/item/ctx.py +115 -2
  43. pygpt_net/item/index.py +9 -2
  44. pygpt_net/item/mode.py +9 -6
  45. pygpt_net/item/model.py +20 -3
  46. pygpt_net/item/notepad.py +14 -2
  47. pygpt_net/item/preset.py +42 -2
  48. pygpt_net/item/prompt.py +8 -2
  49. pygpt_net/plugin/cmd_files/plugin.py +2 -2
  50. pygpt_net/provider/api/__init__.py +5 -3
  51. pygpt_net/provider/api/anthropic/__init__.py +190 -29
  52. pygpt_net/provider/api/anthropic/audio.py +30 -0
  53. pygpt_net/provider/api/anthropic/chat.py +341 -0
  54. pygpt_net/provider/api/anthropic/image.py +25 -0
  55. pygpt_net/provider/api/anthropic/tools.py +266 -0
  56. pygpt_net/provider/api/anthropic/vision.py +142 -0
  57. pygpt_net/provider/api/google/chat.py +2 -2
  58. pygpt_net/provider/api/google/realtime/client.py +2 -2
  59. pygpt_net/provider/api/google/tools.py +58 -48
  60. pygpt_net/provider/api/google/vision.py +7 -1
  61. pygpt_net/provider/api/openai/chat.py +1 -0
  62. pygpt_net/provider/api/openai/vision.py +6 -0
  63. pygpt_net/provider/api/x_ai/__init__.py +247 -0
  64. pygpt_net/provider/api/x_ai/audio.py +32 -0
  65. pygpt_net/provider/api/x_ai/chat.py +968 -0
  66. pygpt_net/provider/api/x_ai/image.py +208 -0
  67. pygpt_net/provider/api/x_ai/remote.py +262 -0
  68. pygpt_net/provider/api/x_ai/tools.py +120 -0
  69. pygpt_net/provider/api/x_ai/vision.py +119 -0
  70. pygpt_net/provider/core/attachment/json_file.py +2 -2
  71. pygpt_net/provider/core/config/patch.py +28 -0
  72. pygpt_net/provider/llms/anthropic.py +4 -2
  73. pygpt_net/tools/text_editor/tool.py +4 -1
  74. pygpt_net/tools/text_editor/ui/dialogs.py +1 -1
  75. pygpt_net/ui/base/config_dialog.py +5 -11
  76. pygpt_net/ui/dialog/db.py +177 -59
  77. pygpt_net/ui/dialog/dictionary.py +57 -59
  78. pygpt_net/ui/dialog/editor.py +3 -2
  79. pygpt_net/ui/dialog/image.py +1 -1
  80. pygpt_net/ui/dialog/logger.py +3 -2
  81. pygpt_net/ui/dialog/models.py +16 -16
  82. pygpt_net/ui/dialog/plugins.py +63 -60
  83. pygpt_net/ui/layout/ctx/ctx_list.py +3 -4
  84. pygpt_net/ui/layout/toolbox/__init__.py +2 -2
  85. pygpt_net/ui/layout/toolbox/assistants.py +8 -9
  86. pygpt_net/ui/layout/toolbox/presets.py +2 -2
  87. pygpt_net/ui/main.py +9 -4
  88. pygpt_net/ui/widget/element/labels.py +20 -4
  89. pygpt_net/ui/widget/textarea/editor.py +0 -4
  90. pygpt_net/ui/widget/textarea/web.py +1 -1
  91. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.38.dist-info}/METADATA +18 -6
  92. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.38.dist-info}/RECORD +95 -76
  93. pygpt_net/controller/chat/handler/stream_worker.py +0 -1136
  94. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.38.dist-info}/LICENSE +0 -0
  95. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.38.dist-info}/WHEEL +0 -0
  96. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.38.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,260 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.09.05 00:00:00 #
10
+ # ================================================== #
11
+
12
+ import base64
13
+ import io
14
+ import json
15
+ from typing import Optional, Any
16
+
17
+ from .utils import capture_openai_usage
18
+
19
+
20
+ def process_api_chat(ctx, state, chunk) -> Optional[str]:
21
+ """
22
+ OpenAI-compatible Chat Completions stream delta (robust to dict/object tool_calls).
23
+
24
+ :param ctx: Chat context
25
+ :param state: Chat state
26
+ :param chunk: Incoming streaming chunk
27
+ :return: Extracted text delta or None
28
+ """
29
+ response = None
30
+ delta = chunk.choices[0].delta if getattr(chunk, "choices", None) else None
31
+
32
+ # Capture citations (top-level) if present
33
+ try:
34
+ cits = getattr(chunk, "citations", None)
35
+ if cits:
36
+ state.citations = cits
37
+ ctx.urls = cits
38
+ except Exception:
39
+ pass
40
+
41
+ # Capture usage (top-level) if present
42
+ try:
43
+ u = getattr(chunk, "usage", None)
44
+ if u:
45
+ capture_openai_usage(state, u)
46
+ except Exception:
47
+ pass
48
+
49
+ # Text delta
50
+ if delta and getattr(delta, "content", None) is not None:
51
+ response = delta.content
52
+
53
+ # Tool calls (support OpenAI object or xAI dict)
54
+ if delta and getattr(delta, "tool_calls", None):
55
+ state.force_func_call = True
56
+ for tool_chunk in delta.tool_calls:
57
+ # Normalize fields
58
+ if isinstance(tool_chunk, dict):
59
+ idx = tool_chunk.get("index")
60
+ id_val = tool_chunk.get("id")
61
+ fn = tool_chunk.get("function") or {}
62
+ name_part = fn.get("name")
63
+ args_part = fn.get("arguments")
64
+ else:
65
+ idx = getattr(tool_chunk, "index", None)
66
+ id_val = getattr(tool_chunk, "id", None)
67
+ fn_obj = getattr(tool_chunk, "function", None)
68
+ name_part = getattr(fn_obj, "name", None) if fn_obj else None
69
+ args_part = getattr(fn_obj, "arguments", None) if fn_obj else None
70
+
71
+ # Default index when missing
72
+ if idx is None or not isinstance(idx, int):
73
+ idx = len(state.tool_calls)
74
+
75
+ # Ensure list length
76
+ while len(state.tool_calls) <= idx:
77
+ state.tool_calls.append({
78
+ "id": "",
79
+ "type": "function",
80
+ "function": {"name": "", "arguments": ""}
81
+ })
82
+ tool_call = state.tool_calls[idx]
83
+
84
+ # Append id fragment (if streamed)
85
+ if id_val:
86
+ frag = str(id_val)
87
+ if not tool_call["id"]:
88
+ tool_call["id"] = frag
89
+ else:
90
+ if not tool_call["id"].endswith(frag):
91
+ tool_call["id"] += frag
92
+
93
+ # Append name fragment
94
+ if name_part:
95
+ frag = str(name_part)
96
+ if not tool_call["function"]["name"]:
97
+ tool_call["function"]["name"] = frag
98
+ else:
99
+ if not tool_call["function"]["name"].endswith(frag):
100
+ tool_call["function"]["name"] += frag
101
+
102
+ # Append arguments fragment (string or JSON)
103
+ if args_part is not None:
104
+ if isinstance(args_part, (dict, list)):
105
+ frag = json.dumps(args_part, ensure_ascii=False)
106
+ else:
107
+ frag = str(args_part)
108
+ tool_call["function"]["arguments"] += frag
109
+
110
+ return response
111
+
112
+
113
+ def process_api_chat_responses(ctx, core, state, chunk, etype: Optional[str]) -> Optional[str]:
114
+ """
115
+ OpenAI Responses API stream events.
116
+
117
+ :param ctx: Chat context
118
+ :param core: Core controller
119
+ :param state: Chat state
120
+ :param chunk: Incoming streaming chunk
121
+ :param etype: Event type
122
+ :return: Extracted text delta or None
123
+ """
124
+ response = None
125
+
126
+ if etype == "response.completed":
127
+ # usage on final response
128
+ try:
129
+ u = getattr(chunk.response, "usage", None)
130
+ if u:
131
+ capture_openai_usage(state, u)
132
+ except Exception:
133
+ pass
134
+
135
+ for item in chunk.response.output:
136
+ if item.type == "mcp_list_tools":
137
+ core.api.openai.responses.mcp_tools = item.tools
138
+ elif item.type == "mcp_call":
139
+ call = {
140
+ "id": item.id,
141
+ "type": "mcp_call",
142
+ "approval_request_id": item.approval_request_id,
143
+ "arguments": item.arguments,
144
+ "error": item.error,
145
+ "name": item.name,
146
+ "output": item.output,
147
+ "server_label": item.server_label,
148
+ }
149
+ state.tool_calls.append({
150
+ "id": item.id,
151
+ "call_id": "",
152
+ "type": "function",
153
+ "function": {"name": item.name, "arguments": item.arguments}
154
+ })
155
+ ctx.extra["mcp_call"] = call
156
+ core.ctx.update_item(ctx)
157
+ elif item.type == "mcp_approval_request":
158
+ call = {
159
+ "id": item.id,
160
+ "type": "mcp_call",
161
+ "arguments": item.arguments,
162
+ "name": item.name,
163
+ "server_label": item.server_label,
164
+ }
165
+ ctx.extra["mcp_approval_request"] = call
166
+ core.ctx.update_item(ctx)
167
+
168
+ elif etype == "response.output_text.delta":
169
+ response = chunk.delta
170
+
171
+ elif etype == "response.output_item.added" and chunk.item.type == "function_call":
172
+ state.tool_calls.append({
173
+ "id": chunk.item.id,
174
+ "call_id": chunk.item.call_id,
175
+ "type": "function",
176
+ "function": {"name": chunk.item.name, "arguments": ""}
177
+ })
178
+ state.fn_args_buffers[chunk.item.id] = io.StringIO()
179
+
180
+ elif etype == "response.function_call_arguments.delta":
181
+ buf = state.fn_args_buffers.get(chunk.item_id)
182
+ if buf is not None:
183
+ buf.write(chunk.delta)
184
+
185
+ elif etype == "response.function_call_arguments.done":
186
+ buf = state.fn_args_buffers.pop(chunk.item_id, None)
187
+ if buf is not None:
188
+ try:
189
+ args_val = buf.getvalue()
190
+ finally:
191
+ buf.close()
192
+ for tc in state.tool_calls:
193
+ if tc["id"] == chunk.item_id:
194
+ tc["function"]["arguments"] = args_val
195
+ break
196
+
197
+ elif etype == "response.output_text.annotation.added":
198
+ ann = chunk.annotation
199
+ if ann['type'] == "url_citation":
200
+ if state.citations is None:
201
+ state.citations = []
202
+ url_citation = ann['url']
203
+ state.citations.append(url_citation)
204
+ ctx.urls = state.citations
205
+ elif ann['type'] == "container_file_citation":
206
+ state.files.append({
207
+ "container_id": ann['container_id'],
208
+ "file_id": ann['file_id'],
209
+ })
210
+
211
+ elif etype == "response.reasoning_summary_text.delta":
212
+ response = chunk.delta
213
+
214
+ elif etype == "response.output_item.done":
215
+ tool_calls, has_calls = core.api.openai.computer.handle_stream_chunk(ctx, chunk, state.tool_calls)
216
+ state.tool_calls = tool_calls
217
+ if has_calls:
218
+ state.force_func_call = True
219
+
220
+ elif etype == "response.code_interpreter_call_code.delta":
221
+ if not state.is_code:
222
+ response = "\n\n**Code interpreter**\n```python\n" + chunk.delta
223
+ state.is_code = True
224
+ else:
225
+ response = chunk.delta
226
+
227
+ elif etype == "response.code_interpreter_call_code.done":
228
+ response = "\n\n```\n-----------\n"
229
+
230
+ elif etype == "response.image_generation_call.partial_image":
231
+ image_base64 = chunk.partial_image_b64
232
+ image_bytes = base64.b64decode(image_base64)
233
+ if state.img_path:
234
+ with open(state.img_path, "wb") as f:
235
+ f.write(image_bytes)
236
+ del image_bytes
237
+ state.is_image = True
238
+
239
+ elif etype == "response.created":
240
+ ctx.msg_id = str(chunk.response.id)
241
+ core.ctx.update_item(ctx)
242
+
243
+ elif etype in {"response.done", "response.failed", "error"}:
244
+ pass
245
+
246
+ return response
247
+
248
+
249
+ def process_api_completion(chunk) -> Optional[str]:
250
+ """
251
+ OpenAI Completions stream text delta.
252
+
253
+ :param chunk: Incoming streaming chunk
254
+ :return: Extracted text delta or None
255
+ """
256
+ if getattr(chunk, "choices", None):
257
+ choice0 = chunk.choices[0]
258
+ if getattr(choice0, "text", None) is not None:
259
+ return choice0.text
260
+ return None
@@ -0,0 +1,210 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.09.05 00:00:00 #
10
+ # ================================================== #
11
+
12
+ import base64
13
+ from typing import Any, Optional
14
+
15
+
16
+ def safe_get(obj: Any, path: str) -> Any:
17
+ """
18
+ Dot-path getter for dicts and objects.
19
+
20
+ :param obj: Source object or dict
21
+ :param path: Dot-separated path, e.g. 'a.b.0.c'
22
+ :return: Value at path or None
23
+ """
24
+ cur = obj
25
+ for seg in path.split("."):
26
+ if cur is None:
27
+ return None
28
+ if isinstance(cur, dict):
29
+ cur = cur.get(seg)
30
+ else:
31
+ if seg.isdigit() and isinstance(cur, (list, tuple)):
32
+ idx = int(seg)
33
+ if 0 <= idx < len(cur):
34
+ cur = cur[idx]
35
+ else:
36
+ return None
37
+ else:
38
+ cur = getattr(cur, seg, None)
39
+ return cur
40
+
41
+
42
+ def as_int(val: Any) -> Optional[int]:
43
+ """
44
+ Coerce to int if possible, else None.
45
+
46
+ :param val: Input value
47
+ :return: int or None
48
+ """
49
+ if val is None:
50
+ return None
51
+ try:
52
+ return int(val)
53
+ except Exception:
54
+ try:
55
+ return int(float(val))
56
+ except Exception:
57
+ return None
58
+
59
+
60
+ def capture_openai_usage(state, u_obj: Any):
61
+ """
62
+ Extract usage for OpenAI/xAI-compatible chunks.
63
+
64
+ :param state: Chat state
65
+ :param u_obj: Usage object/dict
66
+ """
67
+ if not u_obj:
68
+ return
69
+ state.usage_vendor = "openai"
70
+ in_tok = as_int(safe_get(u_obj, "input_tokens")) or as_int(safe_get(u_obj, "prompt_tokens"))
71
+ out_tok = as_int(safe_get(u_obj, "output_tokens")) or as_int(safe_get(u_obj, "completion_tokens"))
72
+ total = as_int(safe_get(u_obj, "total_tokens"))
73
+ reasoning = (
74
+ as_int(safe_get(u_obj, "output_tokens_details.reasoning_tokens")) or
75
+ as_int(safe_get(u_obj, "completion_tokens_details.reasoning_tokens")) or
76
+ as_int(safe_get(u_obj, "reasoning_tokens")) or
77
+ 0
78
+ )
79
+ out_with_reason = (out_tok or 0) + (reasoning or 0)
80
+ state.usage_payload = {"in": in_tok, "out": out_with_reason, "reasoning": reasoning or 0, "total": total}
81
+
82
+
83
+ def capture_google_usage(state, um_obj: Any):
84
+ """
85
+ Extract usage for Google python-genai; prefer total - prompt to include reasoning.
86
+
87
+ :param state: Chat state
88
+ :param um_obj: Usage metadata object/dict
89
+ """
90
+ if not um_obj:
91
+ return
92
+ state.usage_vendor = "google"
93
+ prompt = (
94
+ as_int(safe_get(um_obj, "prompt_token_count")) or
95
+ as_int(safe_get(um_obj, "prompt_tokens")) or
96
+ as_int(safe_get(um_obj, "input_tokens"))
97
+ )
98
+ total = (
99
+ as_int(safe_get(um_obj, "total_token_count")) or
100
+ as_int(safe_get(um_obj, "total_tokens"))
101
+ )
102
+ candidates = (
103
+ as_int(safe_get(um_obj, "candidates_token_count")) or
104
+ as_int(safe_get(um_obj, "output_tokens"))
105
+ )
106
+ reasoning = (
107
+ as_int(safe_get(um_obj, "candidates_reasoning_token_count")) or
108
+ as_int(safe_get(um_obj, "reasoning_tokens")) or 0
109
+ )
110
+ if total is not None and prompt is not None:
111
+ out_total = max(0, total - prompt)
112
+ else:
113
+ out_total = candidates
114
+ state.usage_payload = {"in": prompt, "out": out_total, "reasoning": reasoning or 0, "total": total}
115
+
116
+
117
+ def collect_google_citations(ctx, state, chunk: Any):
118
+ """
119
+ Collect web citations (URLs) from Google GenAI stream.
120
+
121
+ :param ctx: Chat context
122
+ :param state: Chat state
123
+ :param chunk: Incoming streaming chunk
124
+ """
125
+ try:
126
+ cands = getattr(chunk, "candidates", None) or []
127
+ except Exception:
128
+ cands = []
129
+
130
+ if not isinstance(state.citations, list):
131
+ state.citations = []
132
+
133
+ def _add_url(url: Optional[str]):
134
+ if not url or not isinstance(url, str):
135
+ return
136
+ url = url.strip()
137
+ if not (url.startswith("http://") or url.startswith("https://")):
138
+ return
139
+ if ctx.urls is None:
140
+ ctx.urls = []
141
+ if url not in state.citations:
142
+ state.citations.append(url)
143
+ if url not in ctx.urls:
144
+ ctx.urls.append(url)
145
+
146
+ for cand in cands:
147
+ gm = safe_get(cand, "grounding_metadata") or safe_get(cand, "groundingMetadata")
148
+ if gm:
149
+ atts = safe_get(gm, "grounding_attributions") or safe_get(gm, "groundingAttributions") or []
150
+ try:
151
+ for att in atts or []:
152
+ for path in (
153
+ "web.uri",
154
+ "web.url",
155
+ "source.web.uri",
156
+ "source.web.url",
157
+ "source.uri",
158
+ "source.url",
159
+ "uri",
160
+ "url",
161
+ ):
162
+ _add_url(safe_get(att, path))
163
+ except Exception:
164
+ pass
165
+ for path in (
166
+ "search_entry_point.uri",
167
+ "search_entry_point.url",
168
+ "searchEntryPoint.uri",
169
+ "searchEntryPoint.url",
170
+ "search_entry_point.rendered_content_uri",
171
+ "searchEntryPoint.rendered_content_uri",
172
+ ):
173
+ _add_url(safe_get(gm, path))
174
+
175
+ cm = safe_get(cand, "citation_metadata") or safe_get(cand, "citationMetadata")
176
+ if cm:
177
+ cit_arrays = (
178
+ safe_get(cm, "citation_sources") or
179
+ safe_get(cm, "citationSources") or
180
+ safe_get(cm, "citations") or []
181
+ )
182
+ try:
183
+ for cit in cit_arrays or []:
184
+ for path in ("uri", "url", "source.uri", "source.url", "web.uri", "web.url"):
185
+ _add_url(safe_get(cit, path))
186
+ except Exception:
187
+ pass
188
+
189
+ try:
190
+ parts = safe_get(cand, "content.parts") or []
191
+ for p in parts:
192
+ pcm = safe_get(p, "citation_metadata") or safe_get(p, "citationMetadata")
193
+ if pcm:
194
+ arr = (
195
+ safe_get(pcm, "citation_sources") or
196
+ safe_get(pcm, "citationSources") or
197
+ safe_get(pcm, "citations") or []
198
+ )
199
+ for cit in arr or []:
200
+ for path in ("uri", "url", "source.uri", "source.url", "web.uri", "web.url"):
201
+ _add_url(safe_get(cit, path))
202
+ gpa = safe_get(p, "grounding_attributions") or safe_get(p, "groundingAttributions") or []
203
+ for att in gpa or []:
204
+ for path in ("web.uri", "web.url", "source.web.uri", "source.web.url", "uri", "url"):
205
+ _add_url(safe_get(att, path))
206
+ except Exception:
207
+ pass
208
+
209
+ if state.citations and (ctx.urls is None or not ctx.urls):
210
+ ctx.urls = list(state.citations)