pygpt-net 2.6.8__py3-none-any.whl → 2.6.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +12 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/app.py +4 -0
- pygpt_net/controller/ctx/common.py +9 -3
- pygpt_net/controller/ctx/ctx.py +19 -17
- pygpt_net/controller/kernel/kernel.py +1 -2
- pygpt_net/core/agents/runner.py +19 -0
- pygpt_net/core/agents/tools.py +93 -52
- pygpt_net/core/render/web/body.py +11 -33
- pygpt_net/core/render/web/renderer.py +52 -79
- pygpt_net/data/config/config.json +4 -3
- pygpt_net/data/config/models.json +3 -3
- pygpt_net/data/config/presets/agent_openai_supervisor.json +54 -0
- pygpt_net/data/config/presets/agent_supervisor.json +52 -0
- pygpt_net/data/config/settings.json +14 -0
- pygpt_net/data/locale/locale.de.ini +2 -0
- pygpt_net/data/locale/locale.en.ini +2 -0
- pygpt_net/data/locale/locale.es.ini +2 -0
- pygpt_net/data/locale/locale.fr.ini +2 -0
- pygpt_net/data/locale/locale.it.ini +2 -0
- pygpt_net/data/locale/locale.pl.ini +3 -1
- pygpt_net/data/locale/locale.uk.ini +2 -0
- pygpt_net/data/locale/locale.zh.ini +2 -0
- pygpt_net/plugin/google/config.py +306 -1
- pygpt_net/plugin/google/plugin.py +22 -0
- pygpt_net/plugin/google/worker.py +579 -3
- pygpt_net/provider/agents/llama_index/supervisor_workflow.py +116 -0
- pygpt_net/provider/agents/llama_index/workflow/supervisor.py +303 -0
- pygpt_net/provider/agents/openai/supervisor.py +361 -0
- pygpt_net/provider/core/config/patch.py +11 -0
- pygpt_net/provider/core/preset/patch.py +18 -0
- pygpt_net/ui/main.py +1 -1
- pygpt_net/ui/widget/lists/context.py +10 -1
- pygpt_net/ui/widget/textarea/web.py +47 -4
- {pygpt_net-2.6.8.dist-info → pygpt_net-2.6.10.dist-info}/METADATA +93 -29
- {pygpt_net-2.6.8.dist-info → pygpt_net-2.6.10.dist-info}/RECORD +39 -34
- {pygpt_net-2.6.8.dist-info → pygpt_net-2.6.10.dist-info}/LICENSE +0 -0
- {pygpt_net-2.6.8.dist-info → pygpt_net-2.6.10.dist-info}/WHEEL +0 -0
- {pygpt_net-2.6.8.dist-info → pygpt_net-2.6.10.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,361 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
# ================================================== #
|
|
4
|
+
# This file is a part of PYGPT package #
|
|
5
|
+
# Website: https://pygpt.net #
|
|
6
|
+
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
|
+
# MIT License #
|
|
8
|
+
# Created By : Marcin Szczygliński #
|
|
9
|
+
# Updated Date: 2025.08.17 02:00:00 #
|
|
10
|
+
# ================================================== #
|
|
11
|
+
|
|
12
|
+
import json
|
|
13
|
+
import re
|
|
14
|
+
from typing import Dict, Any, Tuple, Optional
|
|
15
|
+
|
|
16
|
+
from agents import (
|
|
17
|
+
Agent as OpenAIAgent,
|
|
18
|
+
Runner,
|
|
19
|
+
RunConfig,
|
|
20
|
+
RunContextWrapper,
|
|
21
|
+
SQLiteSession,
|
|
22
|
+
ModelSettings,
|
|
23
|
+
function_tool,
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
from pygpt_net.core.agents.bridge import ConnectionContext
|
|
27
|
+
from pygpt_net.core.bridge import BridgeContext
|
|
28
|
+
from pygpt_net.core.types import (
|
|
29
|
+
AGENT_MODE_OPENAI,
|
|
30
|
+
AGENT_TYPE_OPENAI,
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
from pygpt_net.item.ctx import CtxItem
|
|
34
|
+
from pygpt_net.item.model import ModelItem
|
|
35
|
+
|
|
36
|
+
from pygpt_net.provider.gpt.agents.client import get_custom_model_provider, set_openai_env
|
|
37
|
+
from pygpt_net.provider.gpt.agents.remote_tools import append_tools
|
|
38
|
+
from pygpt_net.provider.gpt.agents.response import StreamHandler
|
|
39
|
+
from pygpt_net.provider.gpt.agents.experts import get_experts
|
|
40
|
+
|
|
41
|
+
from ..base import BaseAgent
|
|
42
|
+
|
|
43
|
+
JSON_RE = re.compile(r"\{[\s\S]*\}$", re.MULTILINE)
|
|
44
|
+
|
|
45
|
+
SUPERVISOR_PROMPT = """
|
|
46
|
+
You are the “Supervisor” (orchestrator). You never use tools directly except the tool that runs the Worker.
|
|
47
|
+
Process:
|
|
48
|
+
- Decompose the user's task into actionable instructions for the Worker.
|
|
49
|
+
- Do NOT pass your conversation history to the Worker. Pass ONLY a concise, self-contained instruction.
|
|
50
|
+
- After each Worker result, evaluate against a clear Definition of Done (DoD). If not met, call the Worker again with a refined instruction.
|
|
51
|
+
- Ask the user only if absolutely necessary. If you must, STOP and output a single JSON with:
|
|
52
|
+
{"action":"ask_user","question":"...","reasoning":"..."}
|
|
53
|
+
- When done, output a single JSON:
|
|
54
|
+
{"action":"final","final_answer":"...","reasoning":"..."}
|
|
55
|
+
- Otherwise, to run the Worker, call the run_worker tool with a short instruction.
|
|
56
|
+
Respond in the user's language. Keep outputs short and precise.
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
WORKER_PROMPT = """
|
|
60
|
+
You are the “Worker”. You execute Supervisor instructions strictly, using your tools.
|
|
61
|
+
- Keep your own memory across calls (Worker session).
|
|
62
|
+
- Return a concise result with key evidence/extracts from tools when applicable.
|
|
63
|
+
- Do not ask the user questions directly; if instruction is underspecified, clearly state what is missing.
|
|
64
|
+
Respond in the user's language.
|
|
65
|
+
"""
|
|
66
|
+
|
|
67
|
+
class Agent(BaseAgent):
|
|
68
|
+
|
|
69
|
+
def __init__(self, *args, **kwargs):
|
|
70
|
+
super(Agent, self).__init__(*args, **kwargs)
|
|
71
|
+
self.id = "openai_agent_supervisor"
|
|
72
|
+
self.type = AGENT_TYPE_OPENAI
|
|
73
|
+
self.mode = AGENT_MODE_OPENAI
|
|
74
|
+
self.name = "Supervisor + worker"
|
|
75
|
+
|
|
76
|
+
def get_agent(self, window, kwargs: Dict[str, Any]):
|
|
77
|
+
"""
|
|
78
|
+
Return Agent supervisor instance
|
|
79
|
+
|
|
80
|
+
:param window: window instance
|
|
81
|
+
:param kwargs: keyword arguments
|
|
82
|
+
:return: Agent instance
|
|
83
|
+
"""
|
|
84
|
+
context = kwargs.get("context", BridgeContext())
|
|
85
|
+
preset = context.preset
|
|
86
|
+
agent_name = preset.name if preset else "Supervisor"
|
|
87
|
+
model = kwargs.get("model", ModelItem())
|
|
88
|
+
worker_tool = kwargs.get("worker_tool", None)
|
|
89
|
+
kwargs = {
|
|
90
|
+
"name": agent_name,
|
|
91
|
+
"instructions": self.get_option(preset, "supervisor", "prompt"),
|
|
92
|
+
"model": model.id,
|
|
93
|
+
}
|
|
94
|
+
if worker_tool:
|
|
95
|
+
kwargs["tools"] = [worker_tool]
|
|
96
|
+
return OpenAIAgent(**kwargs)
|
|
97
|
+
|
|
98
|
+
def get_worker(self, window, kwargs: Dict[str, Any]):
|
|
99
|
+
"""
|
|
100
|
+
Return Agent worker instance
|
|
101
|
+
|
|
102
|
+
:param window: window instance
|
|
103
|
+
:param kwargs: keyword arguments
|
|
104
|
+
:return: Agent instance
|
|
105
|
+
"""
|
|
106
|
+
context = kwargs.get("context", BridgeContext())
|
|
107
|
+
preset = context.preset
|
|
108
|
+
agent_name = "Worker" # Default worker name
|
|
109
|
+
tools = kwargs.get("function_tools", [])
|
|
110
|
+
model = window.core.models.get(
|
|
111
|
+
self.get_option(preset, "worker", "model")
|
|
112
|
+
)
|
|
113
|
+
handoffs = kwargs.get("handoffs", [])
|
|
114
|
+
kwargs = {
|
|
115
|
+
"name": agent_name,
|
|
116
|
+
"instructions": self.get_option(preset, "worker", "prompt"),
|
|
117
|
+
"model": model.id,
|
|
118
|
+
}
|
|
119
|
+
if handoffs:
|
|
120
|
+
kwargs["handoffs"] = handoffs
|
|
121
|
+
|
|
122
|
+
tool_kwargs = append_tools(
|
|
123
|
+
tools=tools,
|
|
124
|
+
window=window,
|
|
125
|
+
model=model,
|
|
126
|
+
preset=preset,
|
|
127
|
+
allow_local_tools=self.get_option(preset, "worker", "allow_local_tools"),
|
|
128
|
+
allow_remote_tools= self.get_option(preset, "worker", "allow_remote_tools"),
|
|
129
|
+
)
|
|
130
|
+
kwargs.update(tool_kwargs) # update kwargs with tools
|
|
131
|
+
return OpenAIAgent(**kwargs)
|
|
132
|
+
|
|
133
|
+
async def run(
|
|
134
|
+
self,
|
|
135
|
+
window: Any = None,
|
|
136
|
+
agent_kwargs: Dict[str, Any] = None,
|
|
137
|
+
previous_response_id: str = None,
|
|
138
|
+
messages: list = None,
|
|
139
|
+
ctx: CtxItem = None,
|
|
140
|
+
stream: bool = False,
|
|
141
|
+
bridge: ConnectionContext = None,
|
|
142
|
+
use_partial_ctx: Optional[bool] = False,
|
|
143
|
+
) -> Tuple[CtxItem, str, str]:
|
|
144
|
+
"""
|
|
145
|
+
Run agent (async)
|
|
146
|
+
|
|
147
|
+
:param window: Window instance
|
|
148
|
+
:param agent_kwargs: Additional agent parameters
|
|
149
|
+
:param previous_response_id: ID of the previous response (if any)
|
|
150
|
+
:param messages: Conversation messages
|
|
151
|
+
:param ctx: Context item
|
|
152
|
+
:param stream: Whether to stream output
|
|
153
|
+
:param bridge: Connection context for agent operations
|
|
154
|
+
:param use_partial_ctx: Use partial ctx per cycle
|
|
155
|
+
:return: Current ctx, final output, last response ID
|
|
156
|
+
"""
|
|
157
|
+
final_output = ""
|
|
158
|
+
response_id = None
|
|
159
|
+
model = agent_kwargs.get("model", ModelItem())
|
|
160
|
+
verbose = agent_kwargs.get("verbose", False)
|
|
161
|
+
tools = agent_kwargs.get("function_tools", [])
|
|
162
|
+
context = agent_kwargs.get("context", BridgeContext())
|
|
163
|
+
preset = context.preset if context else None
|
|
164
|
+
|
|
165
|
+
# add experts
|
|
166
|
+
experts = get_experts(
|
|
167
|
+
window=window,
|
|
168
|
+
preset=preset,
|
|
169
|
+
verbose=verbose,
|
|
170
|
+
tools=tools,
|
|
171
|
+
)
|
|
172
|
+
if experts:
|
|
173
|
+
agent_kwargs["handoffs"] = experts
|
|
174
|
+
|
|
175
|
+
worker = self.get_worker(window, agent_kwargs)
|
|
176
|
+
max_steps = agent_kwargs.get("max_iterations", 10)
|
|
177
|
+
kwargs = {
|
|
178
|
+
"input": messages,
|
|
179
|
+
"max_turns": int(max_steps),
|
|
180
|
+
}
|
|
181
|
+
if model.provider != "openai":
|
|
182
|
+
custom_provider = get_custom_model_provider(window, model)
|
|
183
|
+
kwargs["run_config"] = RunConfig(model_provider=custom_provider)
|
|
184
|
+
else:
|
|
185
|
+
set_openai_env(window)
|
|
186
|
+
if previous_response_id:
|
|
187
|
+
kwargs["previous_response_id"] = previous_response_id
|
|
188
|
+
|
|
189
|
+
# temp worker memory
|
|
190
|
+
worker_session_id = f"worker_session_{ctx.meta.id}" if ctx.meta else "worker_session_default"
|
|
191
|
+
worker_session = SQLiteSession(worker_session_id)
|
|
192
|
+
|
|
193
|
+
handler = StreamHandler(window, bridge)
|
|
194
|
+
item_ctx = ctx
|
|
195
|
+
|
|
196
|
+
# tool to run Worker
|
|
197
|
+
@function_tool(name_override="run_worker")
|
|
198
|
+
async def run_worker(ctx: RunContextWrapper[Any], instruction: str) -> str:
|
|
199
|
+
"""
|
|
200
|
+
Run the Worker with an instruction from the Supervisor and return its output.
|
|
201
|
+
|
|
202
|
+
:param ctx: Run context wrapper
|
|
203
|
+
:param instruction: Instruction for the Worker
|
|
204
|
+
:return: Output from the Worker
|
|
205
|
+
"""
|
|
206
|
+
item_ctx.stream = f"\n\n**Supervisor --> Worker:** {instruction}\n\n"
|
|
207
|
+
bridge.on_step(item_ctx, True)
|
|
208
|
+
handler.begin = False
|
|
209
|
+
result = await Runner.run(
|
|
210
|
+
worker,
|
|
211
|
+
input=instruction,
|
|
212
|
+
session=worker_session,
|
|
213
|
+
max_turns=max_steps,
|
|
214
|
+
)
|
|
215
|
+
item_ctx.stream = f"\n\n{result.final_output}\n\n"
|
|
216
|
+
bridge.on_step(item_ctx, False)
|
|
217
|
+
return str(result.final_output)
|
|
218
|
+
|
|
219
|
+
agent_kwargs["worker_tool"] = run_worker
|
|
220
|
+
agent = self.get_agent(window, agent_kwargs)
|
|
221
|
+
|
|
222
|
+
if not stream:
|
|
223
|
+
result = await Runner.run(
|
|
224
|
+
agent,
|
|
225
|
+
**kwargs
|
|
226
|
+
)
|
|
227
|
+
final_output, last_response_id = window.core.gpt.responses.unpack_agent_response(result, ctx)
|
|
228
|
+
response_id = result.last_response_id
|
|
229
|
+
if verbose:
|
|
230
|
+
print("Final response:", result)
|
|
231
|
+
else:
|
|
232
|
+
result = Runner.run_streamed(
|
|
233
|
+
agent,
|
|
234
|
+
**kwargs
|
|
235
|
+
)
|
|
236
|
+
async for event in result.stream_events():
|
|
237
|
+
if bridge.stopped():
|
|
238
|
+
result.cancel()
|
|
239
|
+
bridge.on_stop(ctx)
|
|
240
|
+
break
|
|
241
|
+
final_output, response_id = handler.handle(event, ctx)
|
|
242
|
+
|
|
243
|
+
# extract final output from JSON
|
|
244
|
+
if final_output:
|
|
245
|
+
final_output = self.extract_final_response(final_output)
|
|
246
|
+
if verbose:
|
|
247
|
+
print("Final output after extraction:", final_output)
|
|
248
|
+
|
|
249
|
+
return ctx, final_output, response_id
|
|
250
|
+
|
|
251
|
+
def extract_final_response(self, output: str) -> str:
|
|
252
|
+
"""
|
|
253
|
+
Extract final response from the output string.
|
|
254
|
+
|
|
255
|
+
:param output: Output string from the agent
|
|
256
|
+
:return: Final response string
|
|
257
|
+
"""
|
|
258
|
+
if not output:
|
|
259
|
+
return ""
|
|
260
|
+
|
|
261
|
+
fence = re.search(r"```json\s*([\s\S]*?)\s*```", output, re.IGNORECASE)
|
|
262
|
+
if fence:
|
|
263
|
+
try:
|
|
264
|
+
# Try to parse the fenced JSON
|
|
265
|
+
json_text = fence.group(1).strip()
|
|
266
|
+
json_response = json.loads(json_text)
|
|
267
|
+
return self.response_from_json(json_response)
|
|
268
|
+
except Exception:
|
|
269
|
+
pass
|
|
270
|
+
|
|
271
|
+
tail = JSON_RE.findall(output)
|
|
272
|
+
for candidate in tail[::-1]:
|
|
273
|
+
try:
|
|
274
|
+
# Try to parse the JSON from the tail
|
|
275
|
+
json_response = json.loads(candidate)
|
|
276
|
+
return self.response_from_json(json_response)
|
|
277
|
+
except Exception:
|
|
278
|
+
continue
|
|
279
|
+
|
|
280
|
+
if output.startswith("{") and output.endswith("}"):
|
|
281
|
+
try:
|
|
282
|
+
# Try to parse the entire output as JSON
|
|
283
|
+
response = json.loads(output)
|
|
284
|
+
return self.response_from_json(response)
|
|
285
|
+
except Exception as e:
|
|
286
|
+
print(f"Error parsing JSON: {e}")
|
|
287
|
+
return output
|
|
288
|
+
|
|
289
|
+
return output.strip()
|
|
290
|
+
|
|
291
|
+
def response_from_json(self, response: dict) -> str:
|
|
292
|
+
"""
|
|
293
|
+
Extract response from JSON format
|
|
294
|
+
|
|
295
|
+
:param response: JSON response from the agent
|
|
296
|
+
:return: str: Formatted response string
|
|
297
|
+
"""
|
|
298
|
+
action = response.get("action", "")
|
|
299
|
+
if action == "ask_user":
|
|
300
|
+
question = response.get("question", "")
|
|
301
|
+
reasoning = response.get("reasoning", "")
|
|
302
|
+
return f"**Supervisor:** {reasoning}\n\n{question}"
|
|
303
|
+
elif action == "final":
|
|
304
|
+
final_answer = response.get("final_answer", "")
|
|
305
|
+
reasoning = response.get("reasoning", "")
|
|
306
|
+
return f"**Supervisor:** {reasoning}\n\n{final_answer}\n\n"
|
|
307
|
+
else:
|
|
308
|
+
return response.get("final_answer", "")
|
|
309
|
+
|
|
310
|
+
def get_options(self) -> Dict[str, Any]:
|
|
311
|
+
"""
|
|
312
|
+
Return Agent options
|
|
313
|
+
|
|
314
|
+
:return: dict of options
|
|
315
|
+
"""
|
|
316
|
+
return {
|
|
317
|
+
"supervisor": {
|
|
318
|
+
"label": "Supervisor",
|
|
319
|
+
"options": {
|
|
320
|
+
"prompt": {
|
|
321
|
+
"type": "textarea",
|
|
322
|
+
"label": "Prompt",
|
|
323
|
+
"description": "Prompt for supervisor",
|
|
324
|
+
"default": SUPERVISOR_PROMPT,
|
|
325
|
+
},
|
|
326
|
+
}
|
|
327
|
+
},
|
|
328
|
+
"worker": {
|
|
329
|
+
"label": "Worker",
|
|
330
|
+
"options": {
|
|
331
|
+
"model": {
|
|
332
|
+
"label": "Model",
|
|
333
|
+
"type": "combo",
|
|
334
|
+
"use": "models",
|
|
335
|
+
"default": "gpt-4o",
|
|
336
|
+
},
|
|
337
|
+
"prompt": {
|
|
338
|
+
"type": "textarea",
|
|
339
|
+
"label": "Prompt",
|
|
340
|
+
"description": "Prompt for worker",
|
|
341
|
+
"default": WORKER_PROMPT,
|
|
342
|
+
},
|
|
343
|
+
"allow_local_tools": {
|
|
344
|
+
"type": "bool",
|
|
345
|
+
"label": "Allow local tools",
|
|
346
|
+
"description": "Allow usage of local tools for this agent",
|
|
347
|
+
"default": True,
|
|
348
|
+
},
|
|
349
|
+
"allow_remote_tools": {
|
|
350
|
+
"type": "bool",
|
|
351
|
+
"label": "Allow remote tools",
|
|
352
|
+
"description": "Allow usage of remote tools for this agent",
|
|
353
|
+
"default": True,
|
|
354
|
+
},
|
|
355
|
+
}
|
|
356
|
+
},
|
|
357
|
+
}
|
|
358
|
+
|
|
359
|
+
|
|
360
|
+
|
|
361
|
+
|
|
@@ -2261,6 +2261,17 @@ class Patch:
|
|
|
2261
2261
|
self.window.core.updater.patch_css('style.dark.css', True) # tree
|
|
2262
2262
|
updated = True
|
|
2263
2263
|
|
|
2264
|
+
# < 2.6.10
|
|
2265
|
+
if old < parse_version("2.6.10"):
|
|
2266
|
+
print("Migrating config from < 2.6.10...")
|
|
2267
|
+
if "agent.idx.auto_retrieve" not in data:
|
|
2268
|
+
data["agent.idx.auto_retrieve"] = True
|
|
2269
|
+
if 'google' in data['plugins'] \
|
|
2270
|
+
and 'oauth_scopes' in data['plugins']['google']:
|
|
2271
|
+
# add documents scope
|
|
2272
|
+
if "https://www.googleapis.com/auth/documents" not in data['plugins']['google']['oauth_scopes']:
|
|
2273
|
+
data['plugins']['google']['oauth_scopes'] += " https://www.googleapis.com/auth/documents"
|
|
2274
|
+
updated = True
|
|
2264
2275
|
|
|
2265
2276
|
# update file
|
|
2266
2277
|
migrated = False
|
|
@@ -39,6 +39,7 @@ class Patch:
|
|
|
39
39
|
is_evolve = False
|
|
40
40
|
is_b2b = False
|
|
41
41
|
is_workflow = False
|
|
42
|
+
is_supervisor = False
|
|
42
43
|
|
|
43
44
|
for k in self.window.core.presets.items:
|
|
44
45
|
data = self.window.core.presets.items[k]
|
|
@@ -241,6 +242,23 @@ class Patch:
|
|
|
241
242
|
updated = True
|
|
242
243
|
save = True
|
|
243
244
|
|
|
245
|
+
# < 2.6.9
|
|
246
|
+
if old < parse_version("2.6.9"):
|
|
247
|
+
if 'agent_openai_supervisor' not in self.window.core.presets.items and not is_supervisor:
|
|
248
|
+
print("Migrating preset file from < 2.6.9...")
|
|
249
|
+
files = [
|
|
250
|
+
'agent_openai_supervisor.json',
|
|
251
|
+
'agent_supervisor.json',
|
|
252
|
+
]
|
|
253
|
+
for file in files:
|
|
254
|
+
dst = os.path.join(self.window.core.config.get_user_dir('presets'), file)
|
|
255
|
+
src = os.path.join(self.window.core.config.get_app_path(), 'data', 'config',
|
|
256
|
+
'presets', file)
|
|
257
|
+
shutil.copyfile(src, dst)
|
|
258
|
+
print("Patched file: {}.".format(dst))
|
|
259
|
+
updated = True
|
|
260
|
+
is_supervisor = True # prevent multiple copies
|
|
261
|
+
|
|
244
262
|
# update file
|
|
245
263
|
if updated:
|
|
246
264
|
if save:
|
pygpt_net/ui/main.py
CHANGED
|
@@ -197,7 +197,7 @@ class ContextList(BaseList):
|
|
|
197
197
|
icon = self._color_icon(status_info['color'])
|
|
198
198
|
status_action = set_label_menu.addAction(icon, name)
|
|
199
199
|
status_action.triggered.connect(
|
|
200
|
-
functools.partial(self.
|
|
200
|
+
functools.partial(self.action_set_label, ctx_id, status_id)
|
|
201
201
|
)
|
|
202
202
|
|
|
203
203
|
idx_menu = QMenu(trans('action.idx'), self)
|
|
@@ -387,6 +387,15 @@ class ContextList(BaseList):
|
|
|
387
387
|
self.restore_after_ctx_menu = False
|
|
388
388
|
self.window.controller.ctx.common.reset(id)
|
|
389
389
|
|
|
390
|
+
def action_set_label(self, id: int, label: int):
|
|
391
|
+
"""
|
|
392
|
+
Set label action handler
|
|
393
|
+
|
|
394
|
+
:param id: context id
|
|
395
|
+
:param label: label id
|
|
396
|
+
"""
|
|
397
|
+
self.window.controller.ctx.set_label(id, label)
|
|
398
|
+
|
|
390
399
|
def selectionCommand(self, index, event=None):
|
|
391
400
|
"""
|
|
392
401
|
Selection command
|
|
@@ -54,9 +54,7 @@ class ChatWebOutput(QWebEngineView):
|
|
|
54
54
|
self.setPage(CustomWebEnginePage(self.window, self, profile=self._profile))
|
|
55
55
|
|
|
56
56
|
def _detach_gl_event_filter(self):
|
|
57
|
-
"""
|
|
58
|
-
Detach OpenGL widget event filter if installed
|
|
59
|
-
"""
|
|
57
|
+
"""Detach OpenGL widget event filter if installed"""
|
|
60
58
|
if self._glwidget and self._glwidget_filter_installed:
|
|
61
59
|
try:
|
|
62
60
|
self._glwidget.removeEventFilter(self)
|
|
@@ -95,8 +93,9 @@ class ChatWebOutput(QWebEngineView):
|
|
|
95
93
|
return
|
|
96
94
|
try:
|
|
97
95
|
# detach the channel from the page to break JS<->Python references
|
|
96
|
+
page.cleanup()
|
|
98
97
|
page.setWebChannel(None)
|
|
99
|
-
except Exception:
|
|
98
|
+
except Exception as e:
|
|
100
99
|
pass
|
|
101
100
|
|
|
102
101
|
# bridge, channel, and signals have parent=page, so deleteLater of the page will clean them up
|
|
@@ -500,6 +499,39 @@ class CustomWebEnginePage(QWebEnginePage):
|
|
|
500
499
|
def javaScriptConsoleMessage(self, level, message, line_number, source_id):
|
|
501
500
|
self.signals.js_message.emit(line_number, message, source_id) # handled in debug controller
|
|
502
501
|
|
|
502
|
+
def cleanup(self):
|
|
503
|
+
"""Cleanup method to release resources"""
|
|
504
|
+
try:
|
|
505
|
+
self.findTextFinished.disconnect()
|
|
506
|
+
self.zoomFactorChanged.disconnect()
|
|
507
|
+
self.selectionChanged.disconnect()
|
|
508
|
+
except Exception:
|
|
509
|
+
pass
|
|
510
|
+
|
|
511
|
+
if self.bridge:
|
|
512
|
+
try:
|
|
513
|
+
self.bridge.cleanup()
|
|
514
|
+
except Exception:
|
|
515
|
+
pass
|
|
516
|
+
self.bridge = None
|
|
517
|
+
|
|
518
|
+
if self.channel:
|
|
519
|
+
try:
|
|
520
|
+
self.channel.unregisterObject("bridge")
|
|
521
|
+
except Exception:
|
|
522
|
+
pass
|
|
523
|
+
self.channel = None
|
|
524
|
+
|
|
525
|
+
if self.signals:
|
|
526
|
+
try:
|
|
527
|
+
self.signals.deleteLater()
|
|
528
|
+
except Exception:
|
|
529
|
+
pass
|
|
530
|
+
self.signals = None
|
|
531
|
+
|
|
532
|
+
# delete the page object
|
|
533
|
+
self.deleteLater()
|
|
534
|
+
|
|
503
535
|
|
|
504
536
|
class Bridge(QObject):
|
|
505
537
|
"""Bridge between Python and JavaScript"""
|
|
@@ -534,6 +566,17 @@ class Bridge(QObject):
|
|
|
534
566
|
def update_scroll_position(self, pos: int):
|
|
535
567
|
self.window.controller.chat.render.scroll = pos
|
|
536
568
|
|
|
569
|
+
def cleanup(self):
|
|
570
|
+
"""Cleanup method to release resources"""
|
|
571
|
+
if self.window:
|
|
572
|
+
try:
|
|
573
|
+
self.window = None
|
|
574
|
+
except Exception:
|
|
575
|
+
pass
|
|
576
|
+
|
|
577
|
+
# delete the bridge object
|
|
578
|
+
self.deleteLater()
|
|
579
|
+
|
|
537
580
|
|
|
538
581
|
class WebEngineSignals(QObject):
|
|
539
582
|
save_as = Signal(str, str)
|