pygpt-net 2.6.62__py3-none-any.whl → 2.6.63__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. pygpt_net/CHANGELOG.txt +5 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/controller/presets/editor.py +65 -1
  4. pygpt_net/core/agents/custom/llama_index/runner.py +15 -52
  5. pygpt_net/core/agents/custom/runner.py +194 -76
  6. pygpt_net/core/agents/runners/llama_workflow.py +60 -10
  7. pygpt_net/data/config/config.json +3 -3
  8. pygpt_net/data/config/models.json +3 -3
  9. pygpt_net/data/config/presets/agent_openai_b2b.json +1 -15
  10. pygpt_net/data/config/presets/agent_openai_coder.json +1 -15
  11. pygpt_net/data/config/presets/agent_openai_evolve.json +1 -23
  12. pygpt_net/data/config/presets/agent_openai_planner.json +1 -21
  13. pygpt_net/data/config/presets/agent_openai_researcher.json +1 -21
  14. pygpt_net/data/config/presets/agent_openai_supervisor.json +1 -13
  15. pygpt_net/data/config/presets/agent_openai_writer.json +1 -15
  16. pygpt_net/data/config/presets/agent_supervisor.json +1 -11
  17. pygpt_net/data/js/app/runtime.js +4 -1
  18. pygpt_net/data/js/app.min.js +3 -2
  19. pygpt_net/data/locale/locale.en.ini +5 -0
  20. pygpt_net/js_rc.py +13 -10
  21. pygpt_net/provider/agents/base.py +0 -0
  22. pygpt_net/provider/agents/llama_index/flow_from_schema.py +0 -0
  23. pygpt_net/provider/agents/llama_index/workflow/codeact.py +0 -0
  24. pygpt_net/provider/agents/llama_index/workflow/planner.py +229 -29
  25. pygpt_net/provider/agents/llama_index/workflow/supervisor.py +0 -0
  26. pygpt_net/provider/agents/openai/agent.py +0 -0
  27. pygpt_net/provider/agents/openai/agent_b2b.py +4 -4
  28. pygpt_net/provider/agents/openai/agent_planner.py +617 -262
  29. pygpt_net/provider/agents/openai/agent_with_experts.py +0 -0
  30. pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +4 -4
  31. pygpt_net/provider/agents/openai/agent_with_feedback.py +4 -4
  32. pygpt_net/provider/agents/openai/evolve.py +6 -6
  33. pygpt_net/provider/agents/openai/flow_from_schema.py +0 -0
  34. pygpt_net/provider/agents/openai/supervisor.py +290 -37
  35. pygpt_net/provider/api/x_ai/__init__.py +0 -0
  36. pygpt_net/provider/core/agent/__init__.py +0 -0
  37. pygpt_net/provider/core/agent/base.py +0 -0
  38. pygpt_net/provider/core/agent/json_file.py +0 -0
  39. pygpt_net/provider/core/config/patches/patch_before_2_6_42.py +0 -0
  40. pygpt_net/provider/llms/base.py +0 -0
  41. pygpt_net/provider/llms/deepseek_api.py +0 -0
  42. pygpt_net/provider/llms/google.py +0 -0
  43. pygpt_net/provider/llms/hugging_face_api.py +0 -0
  44. pygpt_net/provider/llms/hugging_face_router.py +0 -0
  45. pygpt_net/provider/llms/mistral.py +0 -0
  46. pygpt_net/provider/llms/perplexity.py +0 -0
  47. pygpt_net/provider/llms/x_ai.py +0 -0
  48. pygpt_net/ui/widget/dialog/confirm.py +34 -8
  49. pygpt_net/ui/widget/textarea/input.py +1 -1
  50. {pygpt_net-2.6.62.dist-info → pygpt_net-2.6.63.dist-info}/METADATA +7 -2
  51. {pygpt_net-2.6.62.dist-info → pygpt_net-2.6.63.dist-info}/RECORD +34 -34
  52. {pygpt_net-2.6.62.dist-info → pygpt_net-2.6.63.dist-info}/LICENSE +0 -0
  53. {pygpt_net-2.6.62.dist-info → pygpt_net-2.6.63.dist-info}/WHEEL +0 -0
  54. {pygpt_net-2.6.62.dist-info → pygpt_net-2.6.63.dist-info}/entry_points.txt +0 -0
@@ -1,3 +1,5 @@
1
+ # core/agents/runners/llama_workflow.py
2
+
1
3
  #!/usr/bin/env python3
2
4
  # -*- coding: utf-8 -*-
3
5
  # ================================================== #
@@ -6,7 +8,7 @@
6
8
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
9
  # MIT License #
8
10
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.09.26 17:00:00 #
11
+ # Updated Date: 2025.09.27 06:00:00 #
10
12
  # ================================================== #
11
13
 
12
14
  import re
@@ -260,6 +262,11 @@ class LlamaWorkflow(BaseRunner):
260
262
  # Keep last known agent name to avoid redundant ctx updates.
261
263
  last_agent_name: Optional[str] = None
262
264
 
265
+ # Track whether current block has already produced user-visible tokens.
266
+ # This prevents creating empty DB items and preserves order.
267
+ content_written: bool = False
268
+ block_open: bool = False # logical "block" opened after first StepEvent
269
+
263
270
  async for event in handler.stream_events():
264
271
  if self.is_stopped():
265
272
  # persist current output on stop
@@ -269,6 +276,7 @@ class LlamaWorkflow(BaseRunner):
269
276
  self.end_stream(item_ctx, signals)
270
277
  await handler.cancel_run() # cancel, will raise WorkflowCancelledByUser
271
278
  break
279
+
272
280
  if isinstance(event, ToolCallResult):
273
281
  output = f"\n-----------\nExecution result:\n{event.tool_output}"
274
282
  if verbose:
@@ -276,8 +284,11 @@ class LlamaWorkflow(BaseRunner):
276
284
  formatted = "\n```output\n" + str(event.tool_output) + "\n```\n"
277
285
  item_ctx.live_output += formatted
278
286
  item_ctx.stream = formatted
287
+ content_written = True
279
288
  if item_ctx.stream_agent_output and flush:
280
289
  self.send_stream(item_ctx, signals, begin)
290
+ begin = False
291
+
281
292
  elif isinstance(event, ToolCall):
282
293
  if "code" in event.tool_kwargs:
283
294
  output = f"\n-----------\nTool call code:\n{event.tool_kwargs['code']}"
@@ -286,23 +297,43 @@ class LlamaWorkflow(BaseRunner):
286
297
  formatted = "\n```python\n" + str(event.tool_kwargs['code']) + "\n```\n"
287
298
  item_ctx.live_output += formatted
288
299
  item_ctx.stream = formatted
300
+ content_written = True
289
301
  if item_ctx.stream_agent_output and flush:
290
302
  self.send_stream(item_ctx, signals, begin)
303
+ begin = False
304
+
291
305
  elif isinstance(event, StepEvent):
306
+ # UI splitting strategy aligned with OpenAI flow:
307
+ # - do NOT start a new DB item at the first StepEvent
308
+ # - only finalize the previous item if it already produced content
309
+ # (prevents empty items and ordering glitches)
292
310
  self.set_busy(signals)
293
311
  if not use_partials:
312
+ # We still want to propagate the name early if provided.
313
+ try:
314
+ meta = getattr(event, "meta", {}) or {}
315
+ next_name = meta.get("agent_name")
316
+ if next_name:
317
+ last_agent_name = self._apply_agent_name_to_ctx(item_ctx, next_name, last_agent_name)
318
+ except Exception:
319
+ pass
320
+ begin = True
294
321
  continue
322
+
295
323
  if verbose:
296
324
  print("\n\n-----STEP-----\n\n")
297
325
  print(f"[{event.name}] {event.index}/{event.total} meta={event.meta}")
298
- if flush:
299
- item_ctx = self.on_next_ctx(
300
- item_ctx,
301
- signals=signals,
302
- begin=begin,
303
- stream=True,
304
- )
305
- # Propagate agent name early based on StepEvent meta, if available.
326
+
327
+ # If there was an open block with content -> finalize it to a new DB item.
328
+ if block_open and content_written:
329
+ if flush:
330
+ item_ctx = self.on_next_ctx(
331
+ item_ctx,
332
+ signals=signals,
333
+ begin=begin,
334
+ stream=True,
335
+ )
336
+ # Apply next agent name on the fresh ctx (so UI header is correct from token #1).
306
337
  try:
307
338
  meta = getattr(event, "meta", {}) or {}
308
339
  next_name = meta.get("agent_name")
@@ -310,8 +341,22 @@ class LlamaWorkflow(BaseRunner):
310
341
  last_agent_name = self._apply_agent_name_to_ctx(item_ctx, next_name, last_agent_name)
311
342
  except Exception:
312
343
  pass
313
- # Optional: mark start of a new stream block
344
+ else:
345
+ # First step or previous step had no visible content: just propagate the name.
346
+ try:
347
+ meta = getattr(event, "meta", {}) or {}
348
+ next_name = meta.get("agent_name")
349
+ if next_name:
350
+ last_agent_name = self._apply_agent_name_to_ctx(item_ctx, next_name, last_agent_name)
351
+ except Exception:
352
+ pass
353
+
354
+ # Prepare for the upcoming tokens (new block begins).
355
+ block_open = True
356
+ content_written = False
314
357
  begin = True
358
+ continue
359
+
315
360
  elif isinstance(event, AgentStream):
316
361
  # Update agent name from event if present; fallback to header parsing.
317
362
  name = getattr(event, "current_agent_name", None)
@@ -325,9 +370,11 @@ class LlamaWorkflow(BaseRunner):
325
370
  if event.delta:
326
371
  item_ctx.live_output += event.delta
327
372
  item_ctx.stream = event.delta
373
+ content_written = True
328
374
  if item_ctx.stream_agent_output and flush:
329
375
  self.send_stream(item_ctx, signals, begin) # send stream to webview
330
376
  begin = False
377
+
331
378
  elif isinstance(event, AgentOutput):
332
379
  # Ensure final agent name is applied as well.
333
380
  name = getattr(event, "current_agent_name", None)
@@ -338,6 +385,9 @@ class LlamaWorkflow(BaseRunner):
338
385
  item_ctx.set_agent_final_response(answer)
339
386
  if verbose:
340
387
  print(f"\nFinal response: {answer}")
388
+ # Do not split the block here – we will either:
389
+ # - split on the next StepEvent, or
390
+ # - finalize once at the end (make_response), just like OpenAI flow does.
341
391
 
342
392
  return item_ctx
343
393
 
@@ -1,8 +1,8 @@
1
1
  {
2
2
  "__meta__": {
3
- "version": "2.6.62",
4
- "app.version": "2.6.62",
5
- "updated_at": "2025-09-26T00:00:00"
3
+ "version": "2.6.63",
4
+ "app.version": "2.6.63",
5
+ "updated_at": "2025-09-27T00:00:00"
6
6
  },
7
7
  "access.audio.event.speech": false,
8
8
  "access.audio.event.speech.disabled": [],
@@ -1,8 +1,8 @@
1
1
  {
2
2
  "__meta__": {
3
- "version": "2.6.62",
4
- "app.version": "2.6.62",
5
- "updated_at": "2025-09-26T00:00:00"
3
+ "version": "2.6.63",
4
+ "app.version": "2.6.63",
5
+ "updated_at": "2025-09-27T00:00:00"
6
6
  },
7
7
  "items": {
8
8
  "SpeakLeash/bielik-11b-v2.3-instruct:Q4_K_M": {
@@ -31,21 +31,7 @@
31
31
  "enabled": true,
32
32
  "description": "",
33
33
  "remote_tools": "",
34
- "extra": {
35
- "openai_agent_b2b": {
36
- "bot_1": {
37
- "prompt": "You're an advanced AI assistant and an expert in every field. Imagine that I am also such an AI assistant and converse with me in an expert manner. As two assistants, let's brainstorm and arrive at some advanced solutions.",
38
- "allow_local_tools": false,
39
- "allow_remote_tools": false
40
- },
41
- "bot_2": {
42
- "model": "gpt-4o",
43
- "prompt": "You're an advanced AI assistant and an expert in every field. Imagine that I am also such an AI assistant and converse with me in an expert manner. As two assistants, let's brainstorm and arrive at some advanced solutions.",
44
- "allow_local_tools": false,
45
- "allow_remote_tools": false
46
- }
47
- }
48
- },
34
+ "extra": {},
49
35
  "__meta__": {
50
36
  "version": "2.5.94",
51
37
  "app.version": "2.5.94",
@@ -31,21 +31,7 @@
31
31
  "enabled": true,
32
32
  "description": "",
33
33
  "remote_tools": "",
34
- "extra": {
35
- "openai_agent_feedback": {
36
- "base": {
37
- "prompt": "You are senior programmer and expert in coding. Use markdown for code blocks. If there is any feedback provided, use it to improve the code.",
38
- "allow_local_tools": false,
39
- "allow_remote_tools": false
40
- },
41
- "feedback": {
42
- "model": "o3-mini-low",
43
- "prompt": "You evaluate a code and decide if it's correct. If it's not correct, you provide feedback on what needs to be fixed and improved. Never give it a pass on the first try. After 5 attempts, you can give it a pass if the code is good enough. You can use tools for checking the code, running tests, etc.",
44
- "allow_local_tools": false,
45
- "allow_remote_tools": false
46
- }
47
- }
48
- },
34
+ "extra": {},
49
35
  "__meta__": {
50
36
  "version": "2.5.81",
51
37
  "app.version": "2.5.81",
@@ -31,29 +31,7 @@
31
31
  "enabled": true,
32
32
  "description": "",
33
33
  "remote_tools": "",
34
- "extra": {
35
- "openai_agent_evolve": {
36
- "base": {
37
- "num_parents": 2,
38
- "max_generations": 10,
39
- "prompt": "You generate a response based on the user's input. If there is any feedback provided, use it to improve the response.",
40
- "allow_local_tools": false,
41
- "allow_remote_tools": false
42
- },
43
- "chooser": {
44
- "model": "gpt-4o",
45
- "prompt": "I will give you a list of different answers to the given question. From the provided list, choose the best and most accurate answer and return the number of that answer to me, without any explanation, just the number of the answer.",
46
- "allow_local_tools": false,
47
- "allow_remote_tools": false
48
- },
49
- "feedback": {
50
- "model": "gpt-4o",
51
- "prompt": "You evaluate a result and decide if it's good enough. If it's not good enough, you provide feedback on what needs to be improved. Never give it a pass on the first try. After 5 attempts, you can give it a pass if the result is good enough - do not go for perfection.",
52
- "allow_local_tools": false,
53
- "allow_remote_tools": false
54
- }
55
- }
56
- },
34
+ "extra": {},
57
35
  "__meta__": {
58
36
  "version": "2.5.85",
59
37
  "app.version": "2.5.85",
@@ -31,27 +31,7 @@
31
31
  "enabled": true,
32
32
  "description": "",
33
33
  "remote_tools": "",
34
- "extra": {
35
- "openai_agent_planner": {
36
- "base": {
37
- "prompt": "Prepare a comprehensive and detailed response to the question based on the action plan. Follow each step outlined in the plan. If any feedback is provided, use it to improve the response.",
38
- "allow_local_tools": false,
39
- "allow_remote_tools": false
40
- },
41
- "planner": {
42
- "model": "o3-mini-high",
43
- "prompt": "Make a plan of task execution for the query by dividing a task into smaller steps. Do not provide any solutions here. The plan should only contain a list of steps as instructions for someone else to follow. Prepare a plan in the language in which the query was made. Format the plan using markdown.\n\nExample:\n\n----------------\n\n**Sub-task 1: <name>**\n\n- Description: <subtask description>\n- Expected output: <expected output>\n- Dependencies: []\n- Required Tools: []\n\n**Sub-task 2: <name>**\n\n- Description: <subtask description>\n- Expected output: <expected output>\n- Dependencies: [<subtask's 1 name>]\n- Required Tools: [WebSearch]\n\n[...]",
44
- "allow_local_tools": false,
45
- "allow_remote_tools": false
46
- },
47
- "feedback": {
48
- "model": "gpt-4o",
49
- "prompt": "You evaluate a result and decide if it's good enough. If it's not good enough, you provide feedback on what needs to be improved. Never give it a pass on the first try. After 5 attempts, you can give it a pass if the result is good enough - do not go for perfection, but ensure all tasks are completed.",
50
- "allow_local_tools": false,
51
- "allow_remote_tools": false
52
- }
53
- }
54
- },
34
+ "extra": {},
55
35
  "__meta__": {
56
36
  "version": "2.5.81",
57
37
  "app.version": "2.5.81",
@@ -31,27 +31,7 @@
31
31
  "enabled": true,
32
32
  "description": "",
33
33
  "remote_tools": "",
34
- "extra": {
35
- "openai_agent_bot_researcher": {
36
- "writer": {
37
- "prompt": "You are a senior researcher tasked with writing a cohesive report for a research query. You will be provided with the original query, and some initial research done by a research assistant.\nYou should first come up with an outline for the report that describes the structure and flow of the report. Then, generate the report and return that as your final output.\nThe final output should be in markdown format, and it should be lengthy and detailed. Aim for 5-10 pages of content, at least 1000 words.",
38
- "allow_local_tools": false,
39
- "allow_remote_tools": false
40
- },
41
- "planner": {
42
- "model": "gpt-4o",
43
- "prompt": "You are a helpful research assistant. Given a query, come up with a set of web searches to perform to best answer the query. Output between 5 and 20 terms to query for.",
44
- "allow_local_tools": false,
45
- "allow_remote_tools": false
46
- },
47
- "search": {
48
- "model": "gpt-4o",
49
- "prompt": "You are a research assistant. Given a search term, you search the web for that term and produce a concise summary of the results. The summary must be 2-3 paragraphs and less than 300 words. Capture the main points. Write succinctly, no need to have complete sentences or good grammar. This will be consumed by someone synthesizing a report, so its vital you capture the essence and ignore any fluff. Do not include any additional commentary other than the summary itself.",
50
- "allow_local_tools": false,
51
- "allow_remote_tools": true
52
- }
53
- }
54
- },
34
+ "extra": {},
55
35
  "__meta__": {
56
36
  "version": "2.5.81",
57
37
  "app.version": "2.5.81",
@@ -33,19 +33,7 @@
33
33
  "enabled": true,
34
34
  "description": "",
35
35
  "remote_tools": "",
36
- "extra": {
37
- "openai_agent_supervisor": {
38
- "supervisor": {
39
- "prompt": "\n You are the \u201cSupervisor\u201d (orchestrator). You never use tools directly except the tool that runs the Worker.\n Process:\n - Decompose the user's task into actionable instructions for the Worker.\n - Do NOT pass your conversation history to the Worker. Pass ONLY a concise, self-contained instruction.\n - After each Worker result, evaluate against a clear Definition of Done (DoD). If not met, call the Worker again with a refined instruction.\n - Ask the user only if absolutely necessary. If you must, STOP and output a single JSON with:\n {\"action\":\"ask_user\",\"question\":\"...\",\"reasoning\":\"...\"}\n - When done, output a single JSON:\n {\"action\":\"final\",\"final_answer\":\"...\",\"reasoning\":\"...\"}\n - Otherwise, to run the Worker, call the run_worker tool with a short instruction.\n Respond in the user's language. Keep outputs short and precise.\n "
40
- },
41
- "worker": {
42
- "model": "gpt-4o",
43
- "prompt": "\n You are the \u201cWorker\u201d. You execute Supervisor instructions strictly, using your tools.\n - Keep your own memory across calls (Worker session).\n - Return a concise result with key evidence/extracts from tools when applicable.\n - Do not ask the user questions directly; if instruction is underspecified, clearly state what is missing.\n Respond in the user's language.\n ",
44
- "allow_local_tools": true,
45
- "allow_remote_tools": true
46
- }
47
- }
48
- },
36
+ "extra": {},
49
37
  "__meta__": {
50
38
  "version": "2.6.8",
51
39
  "app.version": "2.6.8",
@@ -31,21 +31,7 @@
31
31
  "enabled": true,
32
32
  "description": "",
33
33
  "remote_tools": "",
34
- "extra": {
35
- "openai_agent_feedback": {
36
- "base": {
37
- "prompt": "You generate a very short story outline based on the user's input. If there is any feedback provided, use it to improve the outline.",
38
- "allow_local_tools": false,
39
- "allow_remote_tools": false
40
- },
41
- "feedback": {
42
- "model": "gpt-4o",
43
- "prompt": "You evaluate a story outline and decide if it's good enough. \nIf it's not good enough, you provide feedback on what needs to be improved. \nNever give it a pass on the first try. After 5 attempts, you can give it a pass if the story outline is good enough - do not go for perfection.",
44
- "allow_local_tools": false,
45
- "allow_remote_tools": false
46
- }
47
- }
48
- },
34
+ "extra": {},
49
35
  "__meta__": {
50
36
  "version": "2.5.81",
51
37
  "app.version": "2.5.81",
@@ -33,17 +33,7 @@
33
33
  "enabled": true,
34
34
  "description": "",
35
35
  "remote_tools": "",
36
- "extra": {
37
- "supervisor": {
38
- "supervisor": {
39
- "prompt": "\nYou are the \u201cSupervisor\u201d \u2013 the main orchestrator. Do not use tools directly.\nYour tasks:\n- Break down the user's task into steps and create precise instructions for the \u201cWorker\u201d agent.\n- Do not pass your history/memory to the Worker. Only pass minimal, self-sufficient instructions.\n- After each Worker response, assess progress towards the Definition of Done (DoD). If not met \u2013 generate a better instruction.\n- Ask the user only when absolutely necessary. Then stop and return the question.\n- When the task is complete \u2013 return the final answer to the user.\nAlways return only ONE JSON object:\n{\n \"action\": \"task\" | \"final\" | \"ask_user\",\n \"instruction\": \"<Worker's instruction or ''>\",\n \"final_answer\": \"<final answer or ''>\",\n \"question\": \"<user question or ''>\",\n \"reasoning\": \"<brief reasoning and quality control>\",\n \"done_criteria\": \"<list/text of DoD criteria>\"\n}\nEnsure proper JSON (no comments, no trailing commas). Respond in the user's language.\n"
40
- },
41
- "worker": {
42
- "model": "gpt-4o",
43
- "prompt": "\nYou are the \u201cWorker\u201d \u2013 executor of the Supervisor's instructions. You have your own memory and tools.\n- Execute the Supervisor's instructions precisely and concisely.\n- Use the available tools and return a brief result + relevant data/reasoning.\n- Maintain the working context in your memory (only Worker).\n- Return plain text (not JSON) unless instructed otherwise by the Supervisor.\n- Respond in the user's language.\n"
44
- }
45
- }
46
- },
36
+ "extra": {},
47
37
  "__meta__": {
48
38
  "version": "2.6.8",
49
39
  "app.version": "2.6.8",
@@ -267,7 +267,10 @@ class Runtime {
267
267
  api_updateToolOutput = (c) => this.toolOutput.update(c);
268
268
  api_clearToolOutput = () => this.toolOutput.clear();
269
269
  api_beginToolOutput = () => this.toolOutput.begin();
270
- api_endToolOutput = () => this.toolOutput.end();
270
+ api_endToolOutput = () => {
271
+ this.toolOutput.end();
272
+ this.scrollMgr.scheduleScroll();
273
+ }
271
274
  api_enableToolOutput = () => this.toolOutput.enable();
272
275
  api_disableToolOutput = () => this.toolOutput.disable();
273
276
  api_toggleToolOutput = (id) => this.toolOutput.toggle(id);
@@ -1,4 +1,4 @@
1
- /* app.min.js — generated on 2025-09-22 22:18:13 by bin/minify_js.py using rjsmin */
1
+ /* app.min.js — generated on 2025-09-27 09:10:08 by bin/minify_js.py using rjsmin */
2
2
 
3
3
  /* data/js/app/async.js */
4
4
  class AsyncRunner{constructor(cfg,raf){this.cfg=cfg||{};this.raf=raf||null;const A=this.cfg.ASYNC||{};this.SLICE_MS=Utils.g('ASYNC_SLICE_MS',A.SLICE_MS??12);this.SLICE_HIDDEN_MS=Utils.g('ASYNC_SLICE_HIDDEN_MS',A.SLICE_HIDDEN_MS??Math.min(this.SLICE_MS,6));this.MIN_YIELD_MS=Utils.g('ASYNC_MIN_YIELD_MS',A.MIN_YIELD_MS??0);this._opGen=new Map();}
@@ -867,7 +867,8 @@ try{this.tips&&this.tips.hide();}catch(_){}}
867
867
  api_onChunk=(name,chunk,type)=>{const t=String(type||'text_delta');if(t==='text_delta'){this.api_appendStream(name,chunk);return;}
868
868
  this.logger.debug('STREAM','IGNORED_NON_TEXT_CHUNK',{type:t,len:(chunk?String(chunk).length:0)});};api_beginStream=(chunk=false)=>{this.tips&&this.tips.hide();this.resetStreamState('beginStream',{clearMsg:true,finalizeActive:false,forceHeavy:true});this.stream.beginStream(chunk);};api_endStream=()=>{this.stream.endStream();};api_applyStream=(name,chunk)=>{this.stream.applyStream(name,chunk);};api_appendStream=(name,chunk)=>{this.streamQ.enqueue(name,chunk);};api_nextStream=()=>{this.tips&&this.tips.hide();const element=this.dom.get('_append_output_');const before=this.dom.get('_append_output_before_');if(element&&before){const frag=document.createDocumentFragment();while(element.firstChild)frag.appendChild(element.firstChild);before.appendChild(frag);}
869
869
  this.resetStreamState('nextStream',{clearMsg:true,finalizeActive:false,forceHeavy:true});this.scrollMgr.scheduleScroll();};api_clearStream=()=>{this.tips&&this.tips.hide();this.resetStreamState('clearStream',{clearMsg:true,forceHeavy:true});const el=this.dom.getStreamContainer();if(!el)return;el.replaceChildren();};api_appendNode=(payload)=>{this.resetStreamState('appendNode');this.data.append(payload);};api_replaceNodes=(payload)=>{this.resetStreamState('replaceNodes',{clearMsg:true,forceHeavy:true});this.dom.clearNodes();this.data.replace(payload);};api_appendToInput=(payload)=>{this.nodes.appendToInput(payload);this.scrollMgr.autoFollow=true;this.scrollMgr.userInteracted=false;try{this.scrollMgr.lastScrollTop=Utils.SE.scrollTop|0;}catch(_){}
870
- this.scrollMgr.scheduleScroll();};api_clearNodes=()=>{this.dom.clearNodes();this.resetStreamState('clearNodes',{clearMsg:true,forceHeavy:true});};api_clearInput=()=>{this.resetStreamState('clearInput',{forceHeavy:true});this.dom.clearInput();};api_clearOutput=()=>{this.dom.clearOutput();this.resetStreamState('clearOutput',{clearMsg:true,forceHeavy:true});};api_clearLive=()=>{this.dom.clearLive();this.resetStreamState('clearLive',{forceHeavy:true});};api_appendToolOutput=(c)=>this.toolOutput.append(c);api_updateToolOutput=(c)=>this.toolOutput.update(c);api_clearToolOutput=()=>this.toolOutput.clear();api_beginToolOutput=()=>this.toolOutput.begin();api_endToolOutput=()=>this.toolOutput.end();api_enableToolOutput=()=>this.toolOutput.enable();api_disableToolOutput=()=>this.toolOutput.disable();api_toggleToolOutput=(id)=>this.toolOutput.toggle(id);api_appendExtra=(id,c)=>this.nodes.appendExtra(id,c,this.scrollMgr);api_removeNode=(id)=>this.nodes.removeNode(id,this.scrollMgr);api_removeNodesFromId=(id)=>this.nodes.removeNodesFromId(id,this.scrollMgr);api_replaceLive=(content)=>{const el=this.dom.get('_append_live_');if(!el)return;if(el.classList.contains('hidden')){el.classList.remove('hidden');el.classList.add('visible');}
870
+ this.scrollMgr.scheduleScroll();};api_clearNodes=()=>{this.dom.clearNodes();this.resetStreamState('clearNodes',{clearMsg:true,forceHeavy:true});};api_clearInput=()=>{this.resetStreamState('clearInput',{forceHeavy:true});this.dom.clearInput();};api_clearOutput=()=>{this.dom.clearOutput();this.resetStreamState('clearOutput',{clearMsg:true,forceHeavy:true});};api_clearLive=()=>{this.dom.clearLive();this.resetStreamState('clearLive',{forceHeavy:true});};api_appendToolOutput=(c)=>this.toolOutput.append(c);api_updateToolOutput=(c)=>this.toolOutput.update(c);api_clearToolOutput=()=>this.toolOutput.clear();api_beginToolOutput=()=>this.toolOutput.begin();api_endToolOutput=()=>{this.toolOutput.end();this.scrollMgr.scheduleScroll();}
871
+ api_enableToolOutput=()=>this.toolOutput.enable();api_disableToolOutput=()=>this.toolOutput.disable();api_toggleToolOutput=(id)=>this.toolOutput.toggle(id);api_appendExtra=(id,c)=>this.nodes.appendExtra(id,c,this.scrollMgr);api_removeNode=(id)=>this.nodes.removeNode(id,this.scrollMgr);api_removeNodesFromId=(id)=>this.nodes.removeNodesFromId(id,this.scrollMgr);api_replaceLive=(content)=>{const el=this.dom.get('_append_live_');if(!el)return;if(el.classList.contains('hidden')){el.classList.remove('hidden');el.classList.add('visible');}
871
872
  el.innerHTML=content;try{const maybePromise=this.renderer.renderPendingMarkdown(el);const post=()=>{try{this.highlighter.observeNewCode(el,{deferLastIfStreaming:true,minLinesForLast:this.cfg.PROFILE_CODE.minLinesForHL,minCharsForLast:this.cfg.PROFILE_CODE.minCharsForHL},this.stream.activeCode);this.highlighter.observeMsgBoxes(el,(box)=>{this.highlighter.observeNewCode(box,{deferLastIfStreaming:true,minLinesForLast:this.cfg.PROFILE_CODE.minLinesForHL,minCharsForLast:this.cfg.PROFILE_CODE.minCharsForHL},this.stream.activeCode);this.codeScroll.initScrollableBlocks(box);});}catch(_){}
872
873
  try{const mm=getMathMode();if(mm==='finalize-only')this.math.schedule(el,0,true);else this.math.schedule(el);}catch(_){}
873
874
  this.scrollMgr.scheduleScroll();};if(maybePromise&&typeof maybePromise.then==='function'){maybePromise.then(post);}else{post();}}catch(_){this.scrollMgr.scheduleScroll();}};api_updateFooter=(html)=>{const el=this.dom.get('_footer_');if(el)el.innerHTML=html;};api_enableEditIcons=()=>this.ui.enableEditIcons();api_disableEditIcons=()=>this.ui.disableEditIcons();api_enableTimestamp=()=>this.ui.enableTimestamp();api_disableTimestamp=()=>this.ui.disableTimestamp();api_enableBlocks=()=>this.ui.enableBlocks();api_disableBlocks=()=>this.ui.disableBlocks();api_updateCSS=(styles)=>this.ui.updateCSS(styles);api_getScrollPosition=()=>{this.bridge.updateScrollPosition(window.scrollY);};api_setScrollPosition=(pos)=>{try{window.scrollTo(0,pos);this.scrollMgr.prevScroll=parseInt(pos);}catch(_){}};api_showLoading=()=>this.loading.show();api_hideLoading=()=>this.loading.hide();api_restoreCollapsedCode=(root)=>this.renderer.restoreCollapsedCode(root);api_scrollToTopUser=()=>this.scrollMgr.scrollToTopUser();api_scrollToBottomUser=()=>this.scrollMgr.scrollToBottomUser();api_showTips=()=>this.tips.show();api_hideTips=()=>this.tips.hide();api_getCustomMarkupRules=()=>this.customMarkup.getRules();api_setCustomMarkupRules=(rules)=>{this.customMarkup.setRules(rules);try{this.stream.setCustomFenceSpecs(this.customMarkup.getSourceFenceSpecs());}catch(_){}};init(){this.highlighter.initHLJS();this.dom.init();this.ui.ensureStickyHeaderStyle();this.tips=new TipsManager(this.dom);this.events.install();this.bridge.initQWebChannel(this.cfg.PID,(bridge)=>{const onChunk=(name,chunk,type)=>this.api_onChunk(name,chunk,type);const onNode=(payload)=>this.api_appendNode(payload);const onNodeReplace=(payload)=>this.api_replaceNodes(payload);const onNodeInput=(html)=>this.api_appendToInput(html);this.bridge.connect(onChunk,onNode,onNodeReplace,onNodeInput);try{this.logger.bindBridge(this.bridge.bridge||this.bridge);}catch(_){}});this.renderer.init();try{this.renderer.renderPendingMarkdown(document);}catch(_){}
@@ -98,6 +98,7 @@ agent.name.worker = Worker
98
98
  agent.option.model = Model
99
99
  agent.option.name = Name
100
100
  agent.option.prompt = Prompt
101
+ agent.option.prompt.refine.desc = Prompt for plan refining
101
102
  agent.option.prompt.b1.desc = Prompt for bot 1
102
103
  agent.option.prompt.b2.desc = Prompt for bot 2
103
104
  agent.option.prompt.base.desc = Prompt for Base Agent
@@ -107,11 +108,14 @@ agent.option.prompt.planner.desc = Prompt for Planner agent
107
108
  agent.option.prompt.search.desc = Prompt for search agent
108
109
  agent.option.prompt.supervisor.desc = Prompt for Supervisor
109
110
  agent.option.prompt.worker.desc = Prompt for Worker
111
+ agent.option.refine.after_each = After each step
112
+ agent.option.refine.after_each.desc = Refine plan after each step
110
113
  agent.option.role = Short description of the agent's operation for instructing the model (optional)
111
114
  agent.option.section.base = Base agent
112
115
  agent.option.section.chooser = Chooser
113
116
  agent.option.section.feedback = Feedback
114
117
  agent.option.section.planner = Planner
118
+ agent.option.section.refine = Refine plan
115
119
  agent.option.section.search = Search
116
120
  agent.option.section.supervisor = Supervisor
117
121
  agent.option.section.worker = Worker
@@ -1725,3 +1729,4 @@ vision.capture.manual.captured.success = Image captured from the camera:
1725
1729
  vision.capture.name.prefix = Camera capture:
1726
1730
  vision.capture.options.title = Video capture
1727
1731
  vision.checkbox.tooltip = If checked, the vision model is active. It will be automatically activated upon image upload. You can deactivate it in real-time.
1732
+ agent.option.prompt.desc = Prompt for agent
pygpt_net/js_rc.py CHANGED
@@ -111344,7 +111344,7 @@ toggleEl.classLi\
111344
111344
  st.toggle('toggl\
111345
111345
  e-expanded');\x0a\x09}\
111346
111346
  \x0a}\
111347
- \x00\x00J]\
111347
+ \x00\x00J\x8d\
111348
111348
  /\
111349
111349
  / ==============\
111350
111350
  ================\
@@ -111867,8 +111867,11 @@ utput = () => th\
111867
111867
  is.toolOutput.be\
111868
111868
  gin();\x0a\x09api_endT\
111869
111869
  oolOutput = () =\
111870
- > this.toolOutpu\
111871
- t.end();\x0a\x09api_en\
111870
+ > {\x0a\x09 this.to\
111871
+ olOutput.end();\x0a\
111872
+ \x09 this.scroll\
111873
+ Mgr.scheduleScro\
111874
+ ll();\x0a\x09}\x0a\x09api_en\
111872
111875
  ableToolOutput =\
111873
111876
  () => this.tool\
111874
111877
  Output.enable();\
@@ -113507,8 +113510,8 @@ ros||{},d(e,r)}}\
113507
113510
  /\
113508
113511
  * app.min.js \xe2\x80\x94\
113509
113512
  generated on 20\
113510
- 25-09-22 09:05:0\
113511
- 8 by bin/minify_\
113513
+ 25-09-22 22:18:1\
113514
+ 3 by bin/minify_\
113512
113515
  js.py using rjsm\
113513
113516
  in */\x0a\x0a/* data/j\
113514
113517
  s/app/async.js *\
@@ -125325,8 +125328,8 @@ order: 1px solid\
125325
125328
  transparent; ba\
125326
125329
  ckground: transp\
125327
125330
  arent; }','.msg-\
125328
- box.msg-user:hov\
125329
- er .msg .msg-cop\
125331
+ box.msg-user .ms\
125332
+ g:hover .msg-cop\
125330
125333
  y-btn, .msg-box.\
125331
125334
  msg-user .msg:fo\
125332
125335
  cus-within .msg-\
@@ -126940,14 +126943,14 @@ qt_resource_struct = b"\
126940
126943
  \x00\x00\x02\x88\x00\x00\x00\x00\x00\x01\x00\x18\x02\xdd\
126941
126944
  \x00\x00\x030\x00\x00\x00\x00\x00\x01\x00\x1b+\x19\
126942
126945
  \x00\x00\x02\x0e\x00\x00\x00\x00\x00\x01\x00\x13kD\
126943
- \x00\x00\x03\x9c\x00\x00\x00\x00\x00\x01\x00\x1b\xb1\x9d\
126946
+ \x00\x00\x03\x9c\x00\x00\x00\x00\x00\x01\x00\x1b\xb1\xcd\
126944
126947
  \x00\x00\x01\xb2\x00\x00\x00\x00\x00\x01\x00\x13\x0dY\
126945
126948
  \x00\x00\x00\xf8\x00\x00\x00\x00\x00\x01\x00\x11H\xf7\
126946
126949
  \x00\x00\x01\xf0\x00\x00\x00\x00\x00\x01\x00\x13d>\
126947
126950
  \x00\x00\x00J\x00\x00\x00\x00\x00\x01\x00\x10\x8c-\
126948
126951
  \x00\x00\x016\x00\x00\x00\x00\x00\x01\x00\x11j\xda\
126949
126952
  \x00\x00\x02\xb2\x00\x00\x00\x00\x00\x01\x00\x19\xe5\xc3\
126950
- \x00\x00\x03R\x00\x00\x00\x00\x00\x01\x00\x1buz\
126953
+ \x00\x00\x03R\x00\x00\x00\x00\x00\x01\x00\x1bu\xaa\
126951
126954
  \x00\x00\x02\xd0\x00\x00\x00\x00\x00\x01\x00\x1a\x15/\
126952
126955
  \x00\x00\x01\x96\x00\x00\x00\x00\x00\x01\x00\x13\x04\x0b\
126953
126956
  \x00\x00\x02.\x00\x00\x00\x00\x00\x01\x00\x13\x82Z\
@@ -126958,7 +126961,7 @@ qt_resource_struct = b"\
126958
126961
  \x00\x00\x00\x8a\x00\x00\x00\x00\x00\x01\x00\x11\x07\xcf\
126959
126962
  \x00\x00\x02\xf0\x00\x00\x00\x00\x00\x01\x00\x1a\x8d\x01\
126960
126963
  \x00\x00\x00\xc2\x00\x00\x00\x00\x00\x01\x00\x11Bm\
126961
- \x00\x00\x03r\x00\x00\x00\x00\x00\x01\x00\x1b\xa4\x03\
126964
+ \x00\x00\x03r\x00\x00\x00\x00\x00\x01\x00\x1b\xa43\
126962
126965
  \x00\x00\x01\x16\x00\x00\x00\x00\x00\x01\x00\x11a&\
126963
126966
  "
126964
126967
 
File without changes
File without changes
File without changes