pygpt-net 2.6.30__py3-none-any.whl → 2.6.31__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +8 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/app.py +4 -0
- pygpt_net/controller/__init__.py +5 -2
- pygpt_net/controller/audio/audio.py +25 -1
- pygpt_net/controller/audio/ui.py +2 -2
- pygpt_net/controller/chat/audio.py +1 -8
- pygpt_net/controller/chat/common.py +29 -3
- pygpt_net/controller/chat/handler/__init__.py +0 -0
- pygpt_net/controller/chat/handler/stream_worker.py +1124 -0
- pygpt_net/controller/chat/output.py +8 -3
- pygpt_net/controller/chat/stream.py +3 -1071
- pygpt_net/controller/chat/text.py +3 -2
- pygpt_net/controller/kernel/kernel.py +11 -3
- pygpt_net/controller/kernel/reply.py +5 -1
- pygpt_net/controller/realtime/__init__.py +12 -0
- pygpt_net/controller/realtime/manager.py +53 -0
- pygpt_net/controller/realtime/realtime.py +268 -0
- pygpt_net/controller/ui/mode.py +7 -0
- pygpt_net/controller/ui/ui.py +19 -1
- pygpt_net/core/audio/audio.py +6 -1
- pygpt_net/core/audio/backend/native/__init__.py +12 -0
- pygpt_net/core/audio/backend/{native.py → native/native.py} +426 -127
- pygpt_net/core/audio/backend/native/player.py +139 -0
- pygpt_net/core/audio/backend/native/realtime.py +250 -0
- pygpt_net/core/audio/backend/pyaudio/__init__.py +12 -0
- pygpt_net/core/audio/backend/pyaudio/playback.py +194 -0
- pygpt_net/core/audio/backend/pyaudio/pyaudio.py +923 -0
- pygpt_net/core/audio/backend/pyaudio/realtime.py +275 -0
- pygpt_net/core/audio/backend/pygame/__init__.py +12 -0
- pygpt_net/core/audio/backend/{pygame.py → pygame/pygame.py} +130 -19
- pygpt_net/core/audio/backend/shared/__init__.py +38 -0
- pygpt_net/core/audio/backend/shared/conversions.py +211 -0
- pygpt_net/core/audio/backend/shared/envelope.py +38 -0
- pygpt_net/core/audio/backend/shared/player.py +137 -0
- pygpt_net/core/audio/backend/shared/rt.py +52 -0
- pygpt_net/core/audio/capture.py +5 -0
- pygpt_net/core/audio/output.py +13 -2
- pygpt_net/core/audio/whisper.py +6 -2
- pygpt_net/core/bridge/bridge.py +2 -1
- pygpt_net/core/bridge/worker.py +4 -1
- pygpt_net/core/dispatcher/dispatcher.py +37 -1
- pygpt_net/core/events/__init__.py +2 -1
- pygpt_net/core/events/realtime.py +55 -0
- pygpt_net/core/image/image.py +51 -1
- pygpt_net/core/realtime/__init__.py +0 -0
- pygpt_net/core/realtime/options.py +87 -0
- pygpt_net/core/realtime/shared/__init__.py +0 -0
- pygpt_net/core/realtime/shared/audio.py +213 -0
- pygpt_net/core/realtime/shared/loop.py +64 -0
- pygpt_net/core/realtime/shared/session.py +59 -0
- pygpt_net/core/realtime/shared/text.py +37 -0
- pygpt_net/core/realtime/shared/tools.py +276 -0
- pygpt_net/core/realtime/shared/turn.py +38 -0
- pygpt_net/core/realtime/shared/types.py +16 -0
- pygpt_net/core/realtime/worker.py +164 -0
- pygpt_net/core/types/__init__.py +1 -0
- pygpt_net/core/types/image.py +48 -0
- pygpt_net/data/config/config.json +10 -4
- pygpt_net/data/config/models.json +149 -103
- pygpt_net/data/config/settings.json +50 -0
- pygpt_net/data/locale/locale.de.ini +5 -5
- pygpt_net/data/locale/locale.en.ini +19 -13
- pygpt_net/data/locale/locale.es.ini +5 -5
- pygpt_net/data/locale/locale.fr.ini +5 -5
- pygpt_net/data/locale/locale.it.ini +5 -5
- pygpt_net/data/locale/locale.pl.ini +5 -5
- pygpt_net/data/locale/locale.uk.ini +5 -5
- pygpt_net/data/locale/locale.zh.ini +1 -1
- pygpt_net/data/locale/plugin.audio_input.en.ini +4 -0
- pygpt_net/data/locale/plugin.audio_output.en.ini +4 -0
- pygpt_net/plugin/audio_input/plugin.py +37 -4
- pygpt_net/plugin/audio_input/simple.py +57 -8
- pygpt_net/plugin/cmd_files/worker.py +3 -0
- pygpt_net/provider/api/google/__init__.py +39 -6
- pygpt_net/provider/api/google/audio.py +8 -1
- pygpt_net/provider/api/google/chat.py +45 -6
- pygpt_net/provider/api/google/image.py +226 -86
- pygpt_net/provider/api/google/realtime/__init__.py +12 -0
- pygpt_net/provider/api/google/realtime/client.py +1945 -0
- pygpt_net/provider/api/google/realtime/realtime.py +186 -0
- pygpt_net/provider/api/openai/__init__.py +22 -2
- pygpt_net/provider/api/openai/realtime/__init__.py +12 -0
- pygpt_net/provider/api/openai/realtime/client.py +1828 -0
- pygpt_net/provider/api/openai/realtime/realtime.py +194 -0
- pygpt_net/provider/audio_input/google_genai.py +103 -0
- pygpt_net/provider/audio_output/google_genai_tts.py +229 -0
- pygpt_net/provider/audio_output/google_tts.py +0 -12
- pygpt_net/provider/audio_output/openai_tts.py +8 -5
- pygpt_net/provider/core/config/patch.py +15 -0
- pygpt_net/provider/core/model/patch.py +11 -0
- pygpt_net/provider/llms/google.py +8 -9
- pygpt_net/ui/layout/toolbox/footer.py +16 -0
- pygpt_net/ui/layout/toolbox/image.py +5 -0
- pygpt_net/ui/widget/option/combo.py +15 -1
- {pygpt_net-2.6.30.dist-info → pygpt_net-2.6.31.dist-info}/METADATA +26 -14
- {pygpt_net-2.6.30.dist-info → pygpt_net-2.6.31.dist-info}/RECORD +100 -62
- pygpt_net/core/audio/backend/pyaudio.py +0 -554
- {pygpt_net-2.6.30.dist-info → pygpt_net-2.6.31.dist-info}/LICENSE +0 -0
- {pygpt_net-2.6.30.dist-info → pygpt_net-2.6.31.dist-info}/WHEEL +0 -0
- {pygpt_net-2.6.30.dist-info → pygpt_net-2.6.31.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,276 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
# ================================================== #
|
|
4
|
+
# This file is a part of PYGPT package #
|
|
5
|
+
# Website: https://pygpt.net #
|
|
6
|
+
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
|
+
# MIT License #
|
|
8
|
+
# Created By : Marcin Szczygliński #
|
|
9
|
+
# Updated Date: 2025.08.31 23:00:00 #
|
|
10
|
+
# ================================================== #
|
|
11
|
+
|
|
12
|
+
import json
|
|
13
|
+
from typing import Any, Optional, List, Dict
|
|
14
|
+
|
|
15
|
+
def sanitize_function_tools(tools) -> list:
|
|
16
|
+
"""
|
|
17
|
+
OpenAI: Normalize function tools into a flat dict shape:
|
|
18
|
+
{"type":"function","name","description","parameters", ...}
|
|
19
|
+
Accepts legacy {"type":"function","function":{...}} and flattens it.
|
|
20
|
+
"""
|
|
21
|
+
out = []
|
|
22
|
+
if not tools:
|
|
23
|
+
return out
|
|
24
|
+
for t in tools:
|
|
25
|
+
if not isinstance(t, dict):
|
|
26
|
+
continue
|
|
27
|
+
tt = dict(t)
|
|
28
|
+
ttype = (tt.get("type") or "function").lower()
|
|
29
|
+
if ttype != "function":
|
|
30
|
+
continue
|
|
31
|
+
if isinstance(tt.get("function"), dict):
|
|
32
|
+
fn = tt["function"]
|
|
33
|
+
nt = {"type": "function"}
|
|
34
|
+
for k in ("name", "description", "parameters", "strict", "strict_schema"):
|
|
35
|
+
if k in fn and fn[k] is not None:
|
|
36
|
+
nt[k] = fn[k]
|
|
37
|
+
if "description" not in nt and tt.get("description"):
|
|
38
|
+
nt["description"] = tt["description"]
|
|
39
|
+
else:
|
|
40
|
+
nt = {
|
|
41
|
+
"type": "function",
|
|
42
|
+
"name": tt.get("name"),
|
|
43
|
+
"description": tt.get("description"),
|
|
44
|
+
"parameters": tt.get("parameters"),
|
|
45
|
+
}
|
|
46
|
+
for k in ("strict", "strict_schema"):
|
|
47
|
+
if k in tt:
|
|
48
|
+
nt[k] = tt[k]
|
|
49
|
+
if not nt.get("name"):
|
|
50
|
+
continue
|
|
51
|
+
if not isinstance(nt.get("parameters"), dict):
|
|
52
|
+
nt["parameters"] = {"type": "object", "properties": {}}
|
|
53
|
+
out.append(nt)
|
|
54
|
+
return out
|
|
55
|
+
|
|
56
|
+
def sanitize_remote_tools(remote_tools) -> list:
|
|
57
|
+
"""OpenAI: Pass-through for non-function tools (ensure lowercased 'type')."""
|
|
58
|
+
allowed = {"function", "mcp"} # Realtime accepts only these
|
|
59
|
+
out = []
|
|
60
|
+
if not remote_tools:
|
|
61
|
+
return out
|
|
62
|
+
for t in remote_tools:
|
|
63
|
+
if not isinstance(t, dict):
|
|
64
|
+
continue
|
|
65
|
+
tt = dict(t)
|
|
66
|
+
ttype = tt.get("type")
|
|
67
|
+
if not ttype:
|
|
68
|
+
continue
|
|
69
|
+
if allowed is not None and ttype not in allowed:
|
|
70
|
+
continue
|
|
71
|
+
tt["type"] = str(ttype).lower()
|
|
72
|
+
out.append(tt)
|
|
73
|
+
return out
|
|
74
|
+
|
|
75
|
+
def tools_signature(tools_list: list) -> str:
|
|
76
|
+
"""Order-insensitive stable signature for tools list."""
|
|
77
|
+
def canon(obj):
|
|
78
|
+
if isinstance(obj, dict):
|
|
79
|
+
return {k: canon(v) for k, v in sorted(obj.items())}
|
|
80
|
+
if isinstance(obj, list):
|
|
81
|
+
return [canon(x) for x in obj]
|
|
82
|
+
return obj
|
|
83
|
+
try:
|
|
84
|
+
canon_items = [json.dumps(canon(t), ensure_ascii=False, sort_keys=True, separators=(",", ":"))
|
|
85
|
+
for t in (tools_list or [])]
|
|
86
|
+
canon_items.sort()
|
|
87
|
+
return "|".join(canon_items)
|
|
88
|
+
except Exception:
|
|
89
|
+
return str(tools_list)
|
|
90
|
+
|
|
91
|
+
def prepare_tools_for_session(opts) -> list:
|
|
92
|
+
"""Compose session.tools from opts.remote_tools + opts.tools."""
|
|
93
|
+
fn = sanitize_function_tools(getattr(opts, "tools", None))
|
|
94
|
+
rt = sanitize_remote_tools(getattr(opts, "remote_tools", None))
|
|
95
|
+
return (rt or []) + (fn or [])
|
|
96
|
+
|
|
97
|
+
def prepare_tools_for_response(opts) -> tuple[list, Optional[str]]:
|
|
98
|
+
"""Compose per-response function tools and tool_choice."""
|
|
99
|
+
fn = sanitize_function_tools(getattr(opts, "tools", None))
|
|
100
|
+
tool_choice = getattr(opts, "tool_choice", None)
|
|
101
|
+
return fn, tool_choice
|
|
102
|
+
|
|
103
|
+
def build_tool_outputs_payload(results, last_tool_calls: List[Dict]) -> List[Dict]:
|
|
104
|
+
"""
|
|
105
|
+
Normalize 'results' into:
|
|
106
|
+
[{"call_id": str, "previous_item_id": str|None, "output": str}]
|
|
107
|
+
Matching priority: call_id -> item.id -> function name -> first unused.
|
|
108
|
+
"""
|
|
109
|
+
calls = list(last_tool_calls or [])
|
|
110
|
+
by_id = {c.get("id") or "": c for c in calls if c.get("id")}
|
|
111
|
+
by_call = {c.get("call_id") or "": c for c in calls if c.get("call_id")}
|
|
112
|
+
by_name: dict[str, list] = {}
|
|
113
|
+
for c in calls:
|
|
114
|
+
nm = ((c.get("function") or {}).get("name") or "").strip()
|
|
115
|
+
if nm:
|
|
116
|
+
by_name.setdefault(nm, []).append(c)
|
|
117
|
+
|
|
118
|
+
used: set[str] = set()
|
|
119
|
+
def to_str(val) -> str:
|
|
120
|
+
if val is None:
|
|
121
|
+
return ""
|
|
122
|
+
if isinstance(val, (dict, list)):
|
|
123
|
+
try:
|
|
124
|
+
return json.dumps(val, ensure_ascii=False)
|
|
125
|
+
except Exception:
|
|
126
|
+
return str(val)
|
|
127
|
+
return str(val)
|
|
128
|
+
|
|
129
|
+
def pick_name(name: str):
|
|
130
|
+
arr = by_name.get(name) or []
|
|
131
|
+
for cand in arr:
|
|
132
|
+
cid = cand.get("call_id") or ""
|
|
133
|
+
if cid and cid not in used:
|
|
134
|
+
used.add(cid)
|
|
135
|
+
return cand
|
|
136
|
+
return None
|
|
137
|
+
|
|
138
|
+
def pick_first():
|
|
139
|
+
for cand in calls:
|
|
140
|
+
cid = cand.get("call_id") or ""
|
|
141
|
+
if cid and cid not in used:
|
|
142
|
+
used.add(cid)
|
|
143
|
+
return cand
|
|
144
|
+
return None
|
|
145
|
+
|
|
146
|
+
out: list[dict] = []
|
|
147
|
+
|
|
148
|
+
if isinstance(results, dict) and ("function_responses" in results or "tool_outputs" in results):
|
|
149
|
+
items = results.get("function_responses") or results.get("tool_outputs") or []
|
|
150
|
+
for it in items:
|
|
151
|
+
if not isinstance(it, dict):
|
|
152
|
+
c = pick_first()
|
|
153
|
+
if c:
|
|
154
|
+
out.append({"call_id": c.get("call_id"), "previous_item_id": c.get("id"), "output": to_str(it)})
|
|
155
|
+
continue
|
|
156
|
+
cid = it.get("call_id") or it.get("id") or it.get("tool_call_id") or ""
|
|
157
|
+
nm = it.get("name") or ""
|
|
158
|
+
resp = it.get("response")
|
|
159
|
+
if resp is None:
|
|
160
|
+
resp = it.get("result") or it.get("output") or it.get("content")
|
|
161
|
+
c = by_call.get(cid) or by_id.get(cid) or (pick_name(nm) if nm else pick_first())
|
|
162
|
+
if c:
|
|
163
|
+
out.append({"call_id": c.get("call_id"), "previous_item_id": c.get("id"), "output": to_str(resp)})
|
|
164
|
+
return out
|
|
165
|
+
|
|
166
|
+
if isinstance(results, list):
|
|
167
|
+
for it in results:
|
|
168
|
+
if not isinstance(it, dict):
|
|
169
|
+
c = pick_first()
|
|
170
|
+
if c:
|
|
171
|
+
out.append({"call_id": c.get("call_id"), "previous_item_id": c.get("id"), "output": to_str(it)})
|
|
172
|
+
continue
|
|
173
|
+
cid = it.get("call_id") or it.get("id") or it.get("tool_call_id") or ""
|
|
174
|
+
nm = it.get("name") or ""
|
|
175
|
+
resp = it.get("response")
|
|
176
|
+
if resp is None:
|
|
177
|
+
resp = it.get("result") or it.get("output") or it.get("content")
|
|
178
|
+
c = by_call.get(cid) or by_id.get(cid) or (pick_name(nm) if nm else pick_first())
|
|
179
|
+
if c:
|
|
180
|
+
out.append({"call_id": c.get("call_id"), "previous_item_id": c.get("id"), "output": to_str(resp)})
|
|
181
|
+
return out
|
|
182
|
+
|
|
183
|
+
if isinstance(results, dict):
|
|
184
|
+
for k, v in results.items():
|
|
185
|
+
if not isinstance(k, str):
|
|
186
|
+
continue
|
|
187
|
+
c = by_call.get(k) or by_id.get(k) or pick_name(k)
|
|
188
|
+
if c:
|
|
189
|
+
out.append({"call_id": c.get("call_id"), "previous_item_id": c.get("id"), "output": to_str(v)})
|
|
190
|
+
return out
|
|
191
|
+
|
|
192
|
+
c = pick_first()
|
|
193
|
+
if c:
|
|
194
|
+
out.append({"call_id": c.get("call_id"), "previous_item_id": c.get("id"), "output": to_str(results)})
|
|
195
|
+
return out
|
|
196
|
+
|
|
197
|
+
def build_function_responses_payload(results, last_tool_calls: List[Dict]) -> List[Dict]:
|
|
198
|
+
"""
|
|
199
|
+
Produce neutral list of dicts for Google:
|
|
200
|
+
[{"id": "...", "name": "...", "response": {...}}]
|
|
201
|
+
Provider converts to gtypes.FunctionResponse downstream.
|
|
202
|
+
"""
|
|
203
|
+
calls = list(last_tool_calls or [])
|
|
204
|
+
by_id = {c.get("id") or "": c for c in calls if c.get("id")}
|
|
205
|
+
by_name: dict[str, list] = {}
|
|
206
|
+
for c in calls:
|
|
207
|
+
nm = (c.get("function") or {}).get("name") or ""
|
|
208
|
+
if nm:
|
|
209
|
+
by_name.setdefault(nm, []).append(c)
|
|
210
|
+
|
|
211
|
+
used_ids: set[str] = set()
|
|
212
|
+
|
|
213
|
+
def pick_id_for_name(name: str) -> str:
|
|
214
|
+
arr = by_name.get(name) or []
|
|
215
|
+
for cand in arr:
|
|
216
|
+
cid = cand.get("id") or ""
|
|
217
|
+
if cid and cid not in used_ids:
|
|
218
|
+
used_ids.add(cid)
|
|
219
|
+
return cid
|
|
220
|
+
return ""
|
|
221
|
+
|
|
222
|
+
def to_resp_dict(val):
|
|
223
|
+
if isinstance(val, dict):
|
|
224
|
+
return val
|
|
225
|
+
return {"result": str(val)}
|
|
226
|
+
|
|
227
|
+
out: list = []
|
|
228
|
+
|
|
229
|
+
if isinstance(results, dict) and "function_responses" in results:
|
|
230
|
+
items = results.get("function_responses") or []
|
|
231
|
+
for it in items:
|
|
232
|
+
fid = it.get("id") or ""
|
|
233
|
+
nm = it.get("name") or ""
|
|
234
|
+
resp = it.get("response")
|
|
235
|
+
if resp is None:
|
|
236
|
+
resp = it.get("result") or it.get("output") or it.get("content") or {}
|
|
237
|
+
out.append({"id": fid, "name": nm, "response": to_resp_dict(resp)})
|
|
238
|
+
return out
|
|
239
|
+
|
|
240
|
+
if isinstance(results, list):
|
|
241
|
+
for it in results:
|
|
242
|
+
if not isinstance(it, dict):
|
|
243
|
+
if calls:
|
|
244
|
+
ref = calls[0]
|
|
245
|
+
cid = ref.get("id") or ""
|
|
246
|
+
nm = (ref.get("function") or {}).get("name") or ""
|
|
247
|
+
used_ids.add(cid)
|
|
248
|
+
out.append({"id": cid, "name": nm, "response": to_resp_dict(it)})
|
|
249
|
+
continue
|
|
250
|
+
fid = it.get("id") or it.get("call_id") or it.get("tool_call_id") or ""
|
|
251
|
+
nm = it.get("name") or ""
|
|
252
|
+
resp = it.get("response")
|
|
253
|
+
if resp is None:
|
|
254
|
+
resp = it.get("result") or it.get("output") or it.get("content") or {}
|
|
255
|
+
if not fid and nm:
|
|
256
|
+
fid = pick_id_for_name(nm)
|
|
257
|
+
if fid:
|
|
258
|
+
used_ids.add(fid)
|
|
259
|
+
out.append({"id": fid, "name": nm, "response": to_resp_dict(resp)})
|
|
260
|
+
return out
|
|
261
|
+
|
|
262
|
+
if isinstance(results, dict):
|
|
263
|
+
for k, v in results.items():
|
|
264
|
+
if not isinstance(k, str):
|
|
265
|
+
continue
|
|
266
|
+
if k in by_id:
|
|
267
|
+
nm = (by_id[k].get("function") or {}).get("name") or ""
|
|
268
|
+
used_ids.add(k)
|
|
269
|
+
out.append({"id": k, "name": nm, "response": to_resp_dict(v)})
|
|
270
|
+
else:
|
|
271
|
+
nm = k
|
|
272
|
+
fid = pick_id_for_name(nm)
|
|
273
|
+
out.append({"id": fid, "name": nm, "response": to_resp_dict(v)})
|
|
274
|
+
return out
|
|
275
|
+
|
|
276
|
+
return out
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
# ================================================== #
|
|
4
|
+
# This file is a part of PYGPT package #
|
|
5
|
+
# Website: https://pygpt.net #
|
|
6
|
+
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
|
+
# MIT License #
|
|
8
|
+
# Created By : Marcin Szczygliński #
|
|
9
|
+
# Updated Date: 2025.08.31 23:00:00 #
|
|
10
|
+
# ================================================== #
|
|
11
|
+
|
|
12
|
+
from enum import Enum
|
|
13
|
+
|
|
14
|
+
class TurnMode(str, Enum):
|
|
15
|
+
MANUAL = "manual"
|
|
16
|
+
AUTO = "auto" # future (server VAD / automatic activity detection)
|
|
17
|
+
|
|
18
|
+
def apply_turn_mode_openai(session_payload: dict, mode: TurnMode):
|
|
19
|
+
"""
|
|
20
|
+
Mutate OpenAI session.update payload to reflect turn mode.
|
|
21
|
+
Manual: turn_detection=None (default).
|
|
22
|
+
Auto: enable server VAD if available.
|
|
23
|
+
"""
|
|
24
|
+
sess = session_payload.setdefault("session", {})
|
|
25
|
+
if mode == TurnMode.AUTO:
|
|
26
|
+
sess["turn_detection"] = {"type": "server_vad"}
|
|
27
|
+
else:
|
|
28
|
+
sess["turn_detection"] = None
|
|
29
|
+
|
|
30
|
+
def apply_turn_mode_google(live_cfg: dict, mode: TurnMode):
|
|
31
|
+
"""
|
|
32
|
+
Mutate Google Live connect config to reflect turn mode.
|
|
33
|
+
Manual: automatic_activity_detection.disabled=True
|
|
34
|
+
Auto: disabled=False (server handles VAD).
|
|
35
|
+
"""
|
|
36
|
+
ri = live_cfg.setdefault("realtime_input_config", {})
|
|
37
|
+
aad = ri.setdefault("automatic_activity_detection", {})
|
|
38
|
+
aad["disabled"] = (mode != TurnMode.AUTO)
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
# ================================================== #
|
|
4
|
+
# This file is a part of PYGPT package #
|
|
5
|
+
# Website: https://pygpt.net #
|
|
6
|
+
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
|
+
# MIT License #
|
|
8
|
+
# Created By : Marcin Szczygliński #
|
|
9
|
+
# Updated Date: 2025.08.31 23:00:00 #
|
|
10
|
+
# ================================================== #
|
|
11
|
+
|
|
12
|
+
from typing import Optional, Callable, Awaitable
|
|
13
|
+
|
|
14
|
+
TextCallback = Callable[[str], Awaitable[None]]
|
|
15
|
+
AudioCallback = Callable[[bytes, str, Optional[int], Optional[int], bool], Awaitable[None]]
|
|
16
|
+
StopCallback = Callable[[], bool]
|
|
@@ -0,0 +1,164 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
# ================================================== #
|
|
4
|
+
# This file is a part of PYGPT package #
|
|
5
|
+
# Website: https://pygpt.net #
|
|
6
|
+
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
|
+
# MIT License #
|
|
8
|
+
# Created By : Marcin Szczygliński #
|
|
9
|
+
# Updated Date: 2025.08.30 06:00:00 #
|
|
10
|
+
# ================================================== #
|
|
11
|
+
|
|
12
|
+
import asyncio
|
|
13
|
+
from typing import Optional
|
|
14
|
+
|
|
15
|
+
from PySide6.QtCore import Slot, QRunnable, QObject, Signal
|
|
16
|
+
|
|
17
|
+
from pygpt_net.core.events import RealtimeEvent
|
|
18
|
+
from pygpt_net.item.ctx import CtxItem
|
|
19
|
+
|
|
20
|
+
from .options import RealtimeOptions
|
|
21
|
+
|
|
22
|
+
class RealtimeSignals(QObject):
|
|
23
|
+
"""Realtime signals"""
|
|
24
|
+
response = Signal(object) # RealtimeEvent
|
|
25
|
+
|
|
26
|
+
class RealtimeWorker(QRunnable):
|
|
27
|
+
"""
|
|
28
|
+
QRunnable worker that runs a provider-specific realtime session (websocket).
|
|
29
|
+
|
|
30
|
+
- RT_OUTPUT_READY is emitted when the audio output is ready (STREAM_BEGIN).
|
|
31
|
+
- RT_OUTPUT_TEXT_DELTA is emitted for text deltas.
|
|
32
|
+
- RT_OUTPUT_AUDIO_DELTA is emitted for audio chunks to be handled by the main-thread AudioDispatcher.
|
|
33
|
+
- RT_OUTPUT_AUDIO_END is emitted when the session ends.
|
|
34
|
+
- RT_OUTPUT_AUDIO_ERROR is emitted on error.
|
|
35
|
+
"""
|
|
36
|
+
def __init__(
|
|
37
|
+
self,
|
|
38
|
+
window,
|
|
39
|
+
ctx: CtxItem,
|
|
40
|
+
opts: RealtimeOptions
|
|
41
|
+
):
|
|
42
|
+
"""
|
|
43
|
+
Initialize the worker.
|
|
44
|
+
|
|
45
|
+
:param window: Window instance
|
|
46
|
+
:param ctx: CtxItem
|
|
47
|
+
:param opts: RealtimeOptions
|
|
48
|
+
"""
|
|
49
|
+
super().__init__()
|
|
50
|
+
self.window = window
|
|
51
|
+
self.ctx = ctx
|
|
52
|
+
self.opts = opts
|
|
53
|
+
|
|
54
|
+
def get_client(self, provider: str):
|
|
55
|
+
"""
|
|
56
|
+
Get the appropriate client based on the provider
|
|
57
|
+
|
|
58
|
+
:param provider: Provider name
|
|
59
|
+
:return: Client instance
|
|
60
|
+
"""
|
|
61
|
+
provider = (provider or "openai").lower()
|
|
62
|
+
if provider == "google":
|
|
63
|
+
return self.window.core.api.google.realtime.handler
|
|
64
|
+
elif provider == "openai":
|
|
65
|
+
return self.window.core.api.openai.realtime.handler
|
|
66
|
+
else:
|
|
67
|
+
raise RuntimeError(f"Unsupported realtime provider: {provider}")
|
|
68
|
+
|
|
69
|
+
@Slot()
|
|
70
|
+
def run(self):
|
|
71
|
+
loop = None # ensure defined for cleanup
|
|
72
|
+
|
|
73
|
+
# STREAM_BEGIN -> UI
|
|
74
|
+
try:
|
|
75
|
+
event = RealtimeEvent(RealtimeEvent.RT_OUTPUT_READY, {
|
|
76
|
+
"ctx": self.ctx,
|
|
77
|
+
})
|
|
78
|
+
self.opts.rt_signals.response.emit(event) if self.opts.rt_signals else None
|
|
79
|
+
except Exception:
|
|
80
|
+
pass
|
|
81
|
+
|
|
82
|
+
try:
|
|
83
|
+
loop = asyncio.new_event_loop()
|
|
84
|
+
asyncio.set_event_loop(loop)
|
|
85
|
+
|
|
86
|
+
async def _amain():
|
|
87
|
+
# Text deltas -> UI
|
|
88
|
+
async def on_text(delta: str):
|
|
89
|
+
if not delta:
|
|
90
|
+
return
|
|
91
|
+
event = RealtimeEvent(RealtimeEvent.RT_OUTPUT_TEXT_DELTA, {
|
|
92
|
+
"ctx": self.ctx,
|
|
93
|
+
"chunk": delta,
|
|
94
|
+
})
|
|
95
|
+
self.opts.rt_signals.response.emit(event) if self.opts.rt_signals else None
|
|
96
|
+
|
|
97
|
+
# Audio -> enqueue to main-thread
|
|
98
|
+
async def on_audio(
|
|
99
|
+
data: bytes,
|
|
100
|
+
mime: str,
|
|
101
|
+
rate: Optional[int],
|
|
102
|
+
channels: Optional[int],
|
|
103
|
+
final: bool = False
|
|
104
|
+
):
|
|
105
|
+
event = RealtimeEvent(RealtimeEvent.RT_OUTPUT_AUDIO_DELTA, {
|
|
106
|
+
"payload": {
|
|
107
|
+
"ctx": self.ctx,
|
|
108
|
+
"data": data or b"",
|
|
109
|
+
"mime": mime or "audio/pcm",
|
|
110
|
+
"rate": int(rate) if rate is not None else None,
|
|
111
|
+
"channels": int(channels) if channels is not None else None,
|
|
112
|
+
"final": bool(final),
|
|
113
|
+
"provider": self.opts.provider,
|
|
114
|
+
"model": self.opts.model,
|
|
115
|
+
}
|
|
116
|
+
})
|
|
117
|
+
self.opts.rt_signals.response.emit(event) if self.opts.rt_signals else None
|
|
118
|
+
|
|
119
|
+
def _should_stop() -> bool:
|
|
120
|
+
try:
|
|
121
|
+
return bool(self.window.controller.kernel.stopped())
|
|
122
|
+
except Exception:
|
|
123
|
+
return False
|
|
124
|
+
|
|
125
|
+
# run the client
|
|
126
|
+
client = self.get_client(self.opts.provider)
|
|
127
|
+
await client.run(self.ctx, self.opts, on_text, on_audio, _should_stop)
|
|
128
|
+
|
|
129
|
+
loop.run_until_complete(_amain())
|
|
130
|
+
# print("[rt] STREAM_END")
|
|
131
|
+
|
|
132
|
+
except Exception as e:
|
|
133
|
+
try:
|
|
134
|
+
event = RealtimeEvent(RealtimeEvent.RT_OUTPUT_AUDIO_ERROR, {"error": e})
|
|
135
|
+
self.opts.rt_signals.response.emit(event) if self.opts.rt_signals else None
|
|
136
|
+
finally:
|
|
137
|
+
try:
|
|
138
|
+
event = RealtimeEvent(RealtimeEvent.RT_OUTPUT_AUDIO_END, {"ctx": self.ctx})
|
|
139
|
+
self.opts.rt_signals.response.emit(event) if self.opts.rt_signals else None
|
|
140
|
+
except Exception:
|
|
141
|
+
pass
|
|
142
|
+
finally:
|
|
143
|
+
# Robust asyncio teardown to avoid hangs on subsequent runs
|
|
144
|
+
if loop is not None:
|
|
145
|
+
try:
|
|
146
|
+
pending = [t for t in asyncio.all_tasks(loop) if not t.done()]
|
|
147
|
+
for t in pending:
|
|
148
|
+
t.cancel()
|
|
149
|
+
if pending:
|
|
150
|
+
loop.run_until_complete(asyncio.gather(*pending, return_exceptions=True))
|
|
151
|
+
except Exception:
|
|
152
|
+
pass
|
|
153
|
+
try:
|
|
154
|
+
loop.run_until_complete(loop.shutdown_asyncgens())
|
|
155
|
+
except Exception:
|
|
156
|
+
pass
|
|
157
|
+
try:
|
|
158
|
+
loop.close()
|
|
159
|
+
except Exception:
|
|
160
|
+
pass
|
|
161
|
+
try:
|
|
162
|
+
asyncio.set_event_loop(None)
|
|
163
|
+
except Exception:
|
|
164
|
+
pass
|
pygpt_net/core/types/__init__.py
CHANGED
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
# ================================================== #
|
|
4
|
+
# This file is a part of PYGPT package #
|
|
5
|
+
# Website: https://pygpt.net #
|
|
6
|
+
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
|
+
# MIT License #
|
|
8
|
+
# Created By : Marcin Szczygliński #
|
|
9
|
+
# Updated Date: 2025.07.13 01:00:00 #
|
|
10
|
+
# ================================================== #
|
|
11
|
+
|
|
12
|
+
IMAGE_AVAILABLE_RESOLUTIONS = {
|
|
13
|
+
"gpt-image": {
|
|
14
|
+
"auto": "auto",
|
|
15
|
+
"1024x1024": "1024x1024",
|
|
16
|
+
"1536x1024": "1536x1024",
|
|
17
|
+
"1024x1536": "1024x1536"
|
|
18
|
+
},
|
|
19
|
+
"dall-e-3": {
|
|
20
|
+
"1792x1024": "1792x1024",
|
|
21
|
+
"1024x1792": "1024x1792",
|
|
22
|
+
"1024x1024": "1024x1024"
|
|
23
|
+
},
|
|
24
|
+
"dall-e-2": {
|
|
25
|
+
"1024x1024": "1024x1024",
|
|
26
|
+
"512x512": "512x512",
|
|
27
|
+
"256x256": "256x256"
|
|
28
|
+
},
|
|
29
|
+
"imagen-3.0": {
|
|
30
|
+
"1024x1024": "1024x1024",
|
|
31
|
+
"896x1280": "896x1280",
|
|
32
|
+
"1280x896": "1280x896",
|
|
33
|
+
"768x1408": "768x1408",
|
|
34
|
+
"1408x768": "1408x768"
|
|
35
|
+
},
|
|
36
|
+
"imagen-4.0": {
|
|
37
|
+
"1024x1024": "1024x1024",
|
|
38
|
+
"896x1280": "896x1280",
|
|
39
|
+
"1280x896": "1280x896",
|
|
40
|
+
"768x1408": "768x1408",
|
|
41
|
+
"1408x768": "1408x768",
|
|
42
|
+
"2048x2048": "2048x2048",
|
|
43
|
+
"1792x2560": "1792x2560",
|
|
44
|
+
"2560x1792": "2560x1792",
|
|
45
|
+
"1536x2816": "1536x2816",
|
|
46
|
+
"2816x1536": "2816x1536"
|
|
47
|
+
}
|
|
48
|
+
}
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
{
|
|
2
2
|
"__meta__": {
|
|
3
|
-
"version": "2.6.
|
|
4
|
-
"app.version": "2.6.
|
|
5
|
-
"updated_at": "2025-08-
|
|
3
|
+
"version": "2.6.31",
|
|
4
|
+
"app.version": "2.6.31",
|
|
5
|
+
"updated_at": "2025-08-01T00:00:00"
|
|
6
6
|
},
|
|
7
7
|
"access.audio.event.speech": false,
|
|
8
8
|
"access.audio.event.speech.disabled": [],
|
|
@@ -106,6 +106,7 @@
|
|
|
106
106
|
"attachments_capture_clear": true,
|
|
107
107
|
"audio.cache.enabled": true,
|
|
108
108
|
"audio.cache.max_files": 1000,
|
|
109
|
+
"audio.input.auto_turn": false,
|
|
109
110
|
"audio.input.backend": "native",
|
|
110
111
|
"audio.input.channels": 1,
|
|
111
112
|
"audio.input.continuous": false,
|
|
@@ -114,6 +115,8 @@
|
|
|
114
115
|
"audio.input.stop_interval": 10,
|
|
115
116
|
"audio.input.timeout": 120,
|
|
116
117
|
"audio.input.timeout.continuous": false,
|
|
118
|
+
"audio.input.vad.prefix": 300,
|
|
119
|
+
"audio.input.vad.silence": 2000,
|
|
117
120
|
"audio.output.backend": "native",
|
|
118
121
|
"audio.output.device": "0",
|
|
119
122
|
"audio.transcribe.convert_video": true,
|
|
@@ -196,7 +199,7 @@
|
|
|
196
199
|
"frequency_penalty": 0.0,
|
|
197
200
|
"img_prompt_model": "gpt-4o",
|
|
198
201
|
"img_raw": true,
|
|
199
|
-
"img_resolution": "
|
|
202
|
+
"img_resolution": "1024x1024",
|
|
200
203
|
"img_quality": "standard",
|
|
201
204
|
"img_variants": 1,
|
|
202
205
|
"interpreter.auto_clear": false,
|
|
@@ -330,6 +333,7 @@
|
|
|
330
333
|
"log.level": "error",
|
|
331
334
|
"log.llama": false,
|
|
332
335
|
"log.plugins": false,
|
|
336
|
+
"log.realtime": false,
|
|
333
337
|
"max_output_tokens": 0,
|
|
334
338
|
"max_requests_limit": 60,
|
|
335
339
|
"max_tokens_length": 32000,
|
|
@@ -384,6 +388,7 @@
|
|
|
384
388
|
"prompt.default": "You are a helpful assistant.",
|
|
385
389
|
"prompt.expert": "# EXPERT MODE IS ENABLED:\n\nYou are a very helpful asssistant and the professional manager of a team of experts, each of whom is the best in their respective fields.\n\n## Rules:\n\n- Instead of directly answering questions, you will manage these experts by forwarding queries to them, collecting their responses, controlling the flow of information between them, and sending requests to them.\n- Experts are distinct, specialized AI models running on my local machine.\n- You will serve as a bridge between them.\n- Each expert has their own conversation context, allowing you to communicate with them normally.\n- The experts can communicate with you but not with each other, making you responsible for relaying information between them.\n- Each expert can use tools and commands, for example, reading files or searching the Web. You can ask experts to call tools and commands.\n- After consulting with the experts, your task is to prepare the best possible answer based on the assistance received.\n- To send a request to an expert, prepare the \"expert_call\" tool. Remember to format the JSON tool as shown, surrounding the tool with `<tool></tool>` tags, for example: `<tool>{\"cmd\": \"expert_call\", \"params\": {\"id\": \"expert_id\", \"query\": \"some query\"}}</tool>`. Make sure the JSON object is syntactically correct, without any redundand parenthness.\n- DO NOT call any remote tools when calling the experts.\n- Remember that the call must contain a query and the ID of the expert to whom the query is being forwarded.\n- Use ONLY the experts listed below; DO NOT TRY to ask any non-existent experts\u2014they will not respond.\n- Do not respond to the expert's replies or use the JSON tool again if replying; the expert only has the ability to listen to your queries sent via the JSON command and to respond to them.\n- Always follow ALL user commands, even if you think the expert can't do something. Experts are allowed to execute commands just as you can.\n- After querying an expert, always wait for the expert's response before sending another query.\n- Do not try to query experts in the middle of the conversation; put your queries and commands at the end of the text.\n- Always call only one expert at a time.\n- Always execute only one command at a time.\n- Always use the language used by the user in the conversation.\n- Never write as a called expert and do not imagine a response from them; instead, always wait for the response from the expert.\n- Every message sent to you directly after your query is always a response from the asked expert, not from me.\n- Always send a direct, clear, and detailed prompt to the expert, without any \"Please...\", \"Could you...\", etc.\n- Always forward all the messages that the user wants to send to the expert, without any exceptions.\n-- If a user command appears outside an expert's scope, fulfill the request regardless, without refusal.\n\n## List of allowed experts, along with their IDs:\n\n{presets}",
|
|
386
390
|
"prompt.img": "Convert my query to detailed image generate prompt, without any \"Please generate...\" and etc., just image description only. Return only the prompt, without nothing else.",
|
|
391
|
+
"realtime.auto_turn": true,
|
|
387
392
|
"render.blocks": true,
|
|
388
393
|
"render.engine": "web",
|
|
389
394
|
"render.open_gl": false,
|
|
@@ -399,6 +404,7 @@
|
|
|
399
404
|
"remote_tools.computer_use.env": "",
|
|
400
405
|
"remote_tools.google.web_search": true,
|
|
401
406
|
"remote_tools.google.code_interpreter": false,
|
|
407
|
+
"remote_tools.google.url_ctx": false,
|
|
402
408
|
"send_clear": true,
|
|
403
409
|
"send_mode": 2,
|
|
404
410
|
"store_history": true,
|