pygpt-net 2.6.58__py3-none-any.whl → 2.6.60__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +10 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/app.py +9 -5
- pygpt_net/controller/__init__.py +1 -0
- pygpt_net/controller/presets/editor.py +442 -39
- pygpt_net/core/agents/custom/__init__.py +275 -0
- pygpt_net/core/agents/custom/debug.py +64 -0
- pygpt_net/core/agents/custom/factory.py +109 -0
- pygpt_net/core/agents/custom/graph.py +71 -0
- pygpt_net/core/agents/custom/llama_index/__init__.py +10 -0
- pygpt_net/core/agents/custom/llama_index/factory.py +89 -0
- pygpt_net/core/agents/custom/llama_index/router_streamer.py +106 -0
- pygpt_net/core/agents/custom/llama_index/runner.py +529 -0
- pygpt_net/core/agents/custom/llama_index/stream.py +56 -0
- pygpt_net/core/agents/custom/llama_index/utils.py +242 -0
- pygpt_net/core/agents/custom/logging.py +50 -0
- pygpt_net/core/agents/custom/memory.py +51 -0
- pygpt_net/core/agents/custom/router.py +116 -0
- pygpt_net/core/agents/custom/router_streamer.py +187 -0
- pygpt_net/core/agents/custom/runner.py +454 -0
- pygpt_net/core/agents/custom/schema.py +125 -0
- pygpt_net/core/agents/custom/utils.py +181 -0
- pygpt_net/core/agents/provider.py +72 -7
- pygpt_net/core/agents/runner.py +7 -4
- pygpt_net/core/agents/runners/helpers.py +1 -1
- pygpt_net/core/agents/runners/llama_workflow.py +3 -0
- pygpt_net/core/agents/runners/openai_workflow.py +8 -1
- pygpt_net/core/filesystem/parser.py +37 -24
- pygpt_net/{ui/widget/builder → core/node_editor}/__init__.py +2 -2
- pygpt_net/core/{builder → node_editor}/graph.py +11 -218
- pygpt_net/core/node_editor/models.py +111 -0
- pygpt_net/core/node_editor/types.py +76 -0
- pygpt_net/core/node_editor/utils.py +17 -0
- pygpt_net/core/render/web/renderer.py +10 -8
- pygpt_net/data/config/config.json +3 -3
- pygpt_net/data/config/models.json +3 -3
- pygpt_net/data/locale/locale.en.ini +4 -4
- pygpt_net/data/locale/plugin.cmd_system.en.ini +68 -0
- pygpt_net/item/agent.py +5 -1
- pygpt_net/item/preset.py +19 -1
- pygpt_net/plugin/cmd_system/config.py +377 -1
- pygpt_net/plugin/cmd_system/plugin.py +52 -8
- pygpt_net/plugin/cmd_system/runner.py +508 -32
- pygpt_net/plugin/cmd_system/winapi.py +481 -0
- pygpt_net/plugin/cmd_system/worker.py +88 -15
- pygpt_net/provider/agents/base.py +33 -2
- pygpt_net/provider/agents/llama_index/flow_from_schema.py +92 -0
- pygpt_net/provider/agents/llama_index/workflow/supervisor.py +0 -0
- pygpt_net/provider/agents/openai/flow_from_schema.py +96 -0
- pygpt_net/provider/core/agent/json_file.py +11 -5
- pygpt_net/provider/llms/openai.py +6 -4
- pygpt_net/tools/agent_builder/tool.py +217 -52
- pygpt_net/tools/agent_builder/ui/dialogs.py +119 -24
- pygpt_net/tools/agent_builder/ui/list.py +37 -10
- pygpt_net/tools/code_interpreter/ui/html.py +2 -1
- pygpt_net/ui/dialog/preset.py +16 -1
- pygpt_net/ui/main.py +1 -1
- pygpt_net/{core/builder → ui/widget/node_editor}/__init__.py +2 -2
- pygpt_net/ui/widget/node_editor/command.py +373 -0
- pygpt_net/ui/widget/node_editor/editor.py +2038 -0
- pygpt_net/ui/widget/node_editor/item.py +492 -0
- pygpt_net/ui/widget/node_editor/node.py +1205 -0
- pygpt_net/ui/widget/node_editor/utils.py +17 -0
- pygpt_net/ui/widget/node_editor/view.py +247 -0
- pygpt_net/ui/widget/textarea/web.py +1 -1
- {pygpt_net-2.6.58.dist-info → pygpt_net-2.6.60.dist-info}/METADATA +135 -61
- {pygpt_net-2.6.58.dist-info → pygpt_net-2.6.60.dist-info}/RECORD +69 -42
- pygpt_net/core/agents/custom.py +0 -150
- pygpt_net/ui/widget/builder/editor.py +0 -2001
- {pygpt_net-2.6.58.dist-info → pygpt_net-2.6.60.dist-info}/LICENSE +0 -0
- {pygpt_net-2.6.58.dist-info → pygpt_net-2.6.60.dist-info}/WHEEL +0 -0
- {pygpt_net-2.6.58.dist-info → pygpt_net-2.6.60.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
# ================================================== #
|
|
4
|
+
# This file is a part of PYGPT package #
|
|
5
|
+
# Website: https://pygpt.net #
|
|
6
|
+
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
|
+
# MIT License #
|
|
8
|
+
# Created By : Marcin Szczygliński #
|
|
9
|
+
# Updated Date: 2025.09.24 23:00:00 #
|
|
10
|
+
# ================================================== #
|
|
11
|
+
|
|
12
|
+
from __future__ import annotations
|
|
13
|
+
from dataclasses import dataclass
|
|
14
|
+
from typing import Any, Callable, Dict, List, Optional
|
|
15
|
+
|
|
16
|
+
from agents import TResponseInputItem
|
|
17
|
+
from pygpt_net.item.model import ModelItem
|
|
18
|
+
from pygpt_net.item.preset import PresetItem
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
# ---------- IO sanitization / output helpers ----------
|
|
22
|
+
|
|
23
|
+
def sanitize_input_items(items: List[TResponseInputItem]) -> List[TResponseInputItem]:
|
|
24
|
+
"""Remove server-assigned ids from items and content parts to avoid duplication errors."""
|
|
25
|
+
sanitized: List[TResponseInputItem] = []
|
|
26
|
+
for it in items or []:
|
|
27
|
+
if isinstance(it, dict):
|
|
28
|
+
new_it: Dict[str, Any] = dict(it)
|
|
29
|
+
# top-level ids that might reappear
|
|
30
|
+
new_it.pop("id", None)
|
|
31
|
+
new_it.pop("message_id", None)
|
|
32
|
+
# sanitize content list parts
|
|
33
|
+
if "content" in new_it and isinstance(new_it["content"], list):
|
|
34
|
+
new_content = []
|
|
35
|
+
for part in new_it["content"]:
|
|
36
|
+
if isinstance(part, dict):
|
|
37
|
+
p = dict(part)
|
|
38
|
+
p.pop("id", None)
|
|
39
|
+
new_content.append(p)
|
|
40
|
+
else:
|
|
41
|
+
new_content.append(part)
|
|
42
|
+
new_it["content"] = new_content
|
|
43
|
+
sanitized.append(new_it)
|
|
44
|
+
else:
|
|
45
|
+
sanitized.append(it)
|
|
46
|
+
return sanitized
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def extract_text_output(result: Any) -> str:
|
|
50
|
+
"""
|
|
51
|
+
Best-effort to get a human-facing text from openai-agents Runner result
|
|
52
|
+
without relying on app-specific helpers.
|
|
53
|
+
"""
|
|
54
|
+
out = getattr(result, "final_output", None)
|
|
55
|
+
if out is None:
|
|
56
|
+
return ""
|
|
57
|
+
try:
|
|
58
|
+
return str(out)
|
|
59
|
+
except Exception:
|
|
60
|
+
return ""
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def patch_last_assistant_output(items: List[TResponseInputItem], text: str) -> List[TResponseInputItem]:
|
|
64
|
+
"""
|
|
65
|
+
Replace the content of the last assistant message with plain text content.
|
|
66
|
+
This prevents leaking router JSON to subsequent agents.
|
|
67
|
+
"""
|
|
68
|
+
if not items:
|
|
69
|
+
return items
|
|
70
|
+
patched = list(items)
|
|
71
|
+
# find last assistant
|
|
72
|
+
idx: Optional[int] = None
|
|
73
|
+
for i in range(len(patched) - 1, -1, -1):
|
|
74
|
+
it = patched[i]
|
|
75
|
+
if isinstance(it, dict) and it.get("role") == "assistant":
|
|
76
|
+
idx = i
|
|
77
|
+
break
|
|
78
|
+
if idx is None:
|
|
79
|
+
return patched
|
|
80
|
+
|
|
81
|
+
# set standard output_text content
|
|
82
|
+
patched[idx] = {
|
|
83
|
+
"role": "assistant",
|
|
84
|
+
"content": [
|
|
85
|
+
{
|
|
86
|
+
"type": "output_text",
|
|
87
|
+
"text": text or "",
|
|
88
|
+
}
|
|
89
|
+
],
|
|
90
|
+
}
|
|
91
|
+
return sanitize_input_items(patched)
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
# ---------- Per-agent options resolution ----------
|
|
95
|
+
|
|
96
|
+
OptionGetter = Callable[[str, str, Any], Any]
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def make_option_getter(base_agent, preset: Optional[PresetItem]) -> OptionGetter:
|
|
100
|
+
"""
|
|
101
|
+
Returns option_get(section, key, default) bound to your BaseAgent.get_option semantics.
|
|
102
|
+
section == node.id (e.g. "agent_1"), key in {"model","prompt","allow_local_tools","allow_remote_tools"}.
|
|
103
|
+
"""
|
|
104
|
+
def option_get(section: str, key: str, default: Any = None) -> Any:
|
|
105
|
+
if preset is None:
|
|
106
|
+
return default
|
|
107
|
+
try:
|
|
108
|
+
val = base_agent.get_option(preset, section, key)
|
|
109
|
+
return default if val in (None, "") else val
|
|
110
|
+
except Exception:
|
|
111
|
+
return default
|
|
112
|
+
return option_get
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
@dataclass
|
|
116
|
+
class NodeRuntime:
|
|
117
|
+
model: ModelItem
|
|
118
|
+
instructions: str
|
|
119
|
+
allow_local_tools: bool
|
|
120
|
+
allow_remote_tools: bool
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
def resolve_node_runtime(
|
|
124
|
+
*,
|
|
125
|
+
window,
|
|
126
|
+
node,
|
|
127
|
+
option_get: OptionGetter,
|
|
128
|
+
default_model: ModelItem,
|
|
129
|
+
base_prompt: Optional[str],
|
|
130
|
+
schema_allow_local: Optional[bool],
|
|
131
|
+
schema_allow_remote: Optional[bool],
|
|
132
|
+
default_allow_local: bool,
|
|
133
|
+
default_allow_remote: bool,
|
|
134
|
+
) -> NodeRuntime:
|
|
135
|
+
"""
|
|
136
|
+
Resolve per-node runtime using get_option() overrides, schema slots and defaults.
|
|
137
|
+
|
|
138
|
+
Priority:
|
|
139
|
+
- model: get_option(node.id, "model") -> window.core.models.get(name) -> default_model
|
|
140
|
+
- prompt: get_option(node.id, "prompt") -> node.instruction -> base_prompt -> ""
|
|
141
|
+
- allow_*: get_option(node.id, "allow_local_tools"/"allow_remote_tools")
|
|
142
|
+
-> schema flags -> defaults
|
|
143
|
+
"""
|
|
144
|
+
# Model resolve
|
|
145
|
+
model_name = option_get(node.id, "model", None)
|
|
146
|
+
model_item: ModelItem = default_model
|
|
147
|
+
try:
|
|
148
|
+
if model_name:
|
|
149
|
+
cand = window.core.models.get(model_name)
|
|
150
|
+
if cand:
|
|
151
|
+
model_item = cand
|
|
152
|
+
except Exception:
|
|
153
|
+
# fallback to default_model
|
|
154
|
+
model_item = default_model
|
|
155
|
+
|
|
156
|
+
# Instructions resolve
|
|
157
|
+
prompt_opt = option_get(node.id, "prompt", None)
|
|
158
|
+
instructions = (prompt_opt or getattr(node, "instruction", None) or base_prompt or "").strip()
|
|
159
|
+
|
|
160
|
+
# Tools flags resolve
|
|
161
|
+
allow_local_tools = bool(
|
|
162
|
+
option_get(
|
|
163
|
+
node.id,
|
|
164
|
+
"allow_local_tools",
|
|
165
|
+
schema_allow_local if schema_allow_local is not None else default_allow_local,
|
|
166
|
+
)
|
|
167
|
+
)
|
|
168
|
+
allow_remote_tools = bool(
|
|
169
|
+
option_get(
|
|
170
|
+
node.id,
|
|
171
|
+
"allow_remote_tools",
|
|
172
|
+
schema_allow_remote if schema_allow_remote is not None else default_allow_remote,
|
|
173
|
+
)
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
return NodeRuntime(
|
|
177
|
+
model=model_item,
|
|
178
|
+
instructions=instructions,
|
|
179
|
+
allow_local_tools=allow_local_tools,
|
|
180
|
+
allow_remote_tools=allow_remote_tools,
|
|
181
|
+
)
|
|
@@ -6,12 +6,13 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.09.
|
|
9
|
+
# Updated Date: 2025.09.24 00:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
|
-
|
|
12
|
+
import copy
|
|
13
|
+
from typing import List, Dict, Any, Optional
|
|
13
14
|
|
|
14
|
-
from pygpt_net.core.types import MODE_CHAT
|
|
15
|
+
from pygpt_net.core.types import MODE_CHAT, MODE_AGENT_LLAMA
|
|
15
16
|
from pygpt_net.item.model import ModelItem
|
|
16
17
|
from pygpt_net.provider.agents.base import BaseAgent
|
|
17
18
|
|
|
@@ -25,6 +26,7 @@ class Provider:
|
|
|
25
26
|
"""
|
|
26
27
|
self.window = window
|
|
27
28
|
self.agents = {}
|
|
29
|
+
self.hidden = ["openai_custom", "llama_custom"] # builder hidden agents (hide provider on list)
|
|
28
30
|
|
|
29
31
|
def get_ids(self) -> List[str]:
|
|
30
32
|
"""
|
|
@@ -34,32 +36,90 @@ class Provider:
|
|
|
34
36
|
"""
|
|
35
37
|
return list(self.agents.keys())
|
|
36
38
|
|
|
37
|
-
def has(self, id: str) -> bool:
|
|
39
|
+
def has(self, id: str, mode: str = MODE_AGENT_LLAMA) -> bool:
|
|
38
40
|
"""
|
|
39
41
|
Check if agent exists
|
|
40
42
|
|
|
41
43
|
:param id: agent id
|
|
44
|
+
:param mode: agent mode, used for custom agents (optional)
|
|
42
45
|
:return: True if exists
|
|
43
46
|
"""
|
|
47
|
+
custom = self.get_custom(id, mode) # shared instance
|
|
48
|
+
if custom is not None:
|
|
49
|
+
return True
|
|
44
50
|
return id in self.agents
|
|
45
51
|
|
|
46
|
-
def get(self, id: str) -> BaseAgent:
|
|
52
|
+
def get(self, id: str, mode: str = MODE_AGENT_LLAMA) -> BaseAgent:
|
|
47
53
|
"""
|
|
48
54
|
Get agent provider
|
|
49
55
|
|
|
50
56
|
:param id: agent id
|
|
57
|
+
:param mode: agent mode, used for custom agents (optional)
|
|
51
58
|
:return: agent provider
|
|
52
59
|
"""
|
|
60
|
+
# custom agents
|
|
61
|
+
custom = self.get_custom(id, mode) # shared instance
|
|
62
|
+
if custom is not None:
|
|
63
|
+
return custom
|
|
64
|
+
|
|
65
|
+
# predefined agents
|
|
53
66
|
if id in self.agents:
|
|
54
67
|
return self.agents[id]
|
|
55
68
|
|
|
69
|
+
def get_custom(
|
|
70
|
+
self,
|
|
71
|
+
id: str,
|
|
72
|
+
mode: str = MODE_AGENT_LLAMA,
|
|
73
|
+
as_copy: bool = True
|
|
74
|
+
) -> Optional[BaseAgent]:
|
|
75
|
+
"""
|
|
76
|
+
Get custom agent provider by id
|
|
77
|
+
|
|
78
|
+
:param id: agent id
|
|
79
|
+
:param mode: agent mode, used for custom agents (optional)
|
|
80
|
+
:param as_copy: return a copy of the agent (default: False)
|
|
81
|
+
:return: custom agent provider
|
|
82
|
+
"""
|
|
83
|
+
custom = None
|
|
84
|
+
if self.window and self.window.core.agents.custom.is_custom(id):
|
|
85
|
+
try:
|
|
86
|
+
if mode == MODE_AGENT_LLAMA:
|
|
87
|
+
if "llama_custom" in self.agents:
|
|
88
|
+
custom = copy.deepcopy(self.agents["llama_custom"]) if as_copy else self.agents["llama_custom"]
|
|
89
|
+
else:
|
|
90
|
+
if "openai_custom" in self.agents:
|
|
91
|
+
custom = copy.deepcopy(self.agents["openai_custom"]) if as_copy else self.agents["openai_custom"]
|
|
92
|
+
except Exception as e:
|
|
93
|
+
self.window.core.debug.log(f"Failed to get custom agent '{id}': {e}")
|
|
94
|
+
return None
|
|
95
|
+
|
|
96
|
+
# append custom id and build options
|
|
97
|
+
if custom is not None:
|
|
98
|
+
options = self.window.core.agents.custom.build_options(id)
|
|
99
|
+
custom.set_id(id)
|
|
100
|
+
custom.set_options(options)
|
|
101
|
+
return custom
|
|
102
|
+
|
|
56
103
|
def all(self) -> Dict[str, BaseAgent]:
|
|
57
104
|
"""
|
|
58
|
-
Get all agents
|
|
105
|
+
Get all agents (predefined + custom, for build options)
|
|
59
106
|
|
|
60
107
|
:return: dict of agent providers
|
|
61
108
|
"""
|
|
62
|
-
|
|
109
|
+
all_agents = {}
|
|
110
|
+
|
|
111
|
+
# predefined agents
|
|
112
|
+
for id in self.get_ids():
|
|
113
|
+
if id in self.hidden:
|
|
114
|
+
continue
|
|
115
|
+
all_agents[id] = self.agents[id]
|
|
116
|
+
|
|
117
|
+
# custom agents
|
|
118
|
+
if self.window:
|
|
119
|
+
for id in self.window.core.agents.custom.get_ids():
|
|
120
|
+
all_agents[id] = self.get_custom(id, as_copy=True) # copy to avoid overwriting options
|
|
121
|
+
return all_agents
|
|
122
|
+
|
|
63
123
|
|
|
64
124
|
def register(self, id: str, agent):
|
|
65
125
|
"""
|
|
@@ -87,6 +147,8 @@ class Provider:
|
|
|
87
147
|
"""
|
|
88
148
|
choices = []
|
|
89
149
|
for id in self.get_ids():
|
|
150
|
+
if id in self.hidden:
|
|
151
|
+
continue
|
|
90
152
|
agent = self.get(id)
|
|
91
153
|
if type is not None:
|
|
92
154
|
if agent.type != type:
|
|
@@ -94,6 +156,9 @@ class Provider:
|
|
|
94
156
|
choices.append({id: agent.name})
|
|
95
157
|
|
|
96
158
|
# sort by name
|
|
159
|
+
if self.window:
|
|
160
|
+
custom = self.window.core.agents.custom.get_choices()
|
|
161
|
+
choices.extend(custom)
|
|
97
162
|
choices.sort(key=lambda x: list(x.values())[0].lower())
|
|
98
163
|
return choices
|
|
99
164
|
|
pygpt_net/core/agents/runner.py
CHANGED
|
@@ -81,7 +81,7 @@ class Runner:
|
|
|
81
81
|
|
|
82
82
|
try:
|
|
83
83
|
# first, check if agent exists
|
|
84
|
-
if not self.window.core.agents.provider.has(agent_id):
|
|
84
|
+
if not self.window.core.agents.provider.has(agent_id, context.mode):
|
|
85
85
|
raise Exception(f"Agent not found: {agent_id}")
|
|
86
86
|
|
|
87
87
|
# prepare input ctx
|
|
@@ -155,6 +155,8 @@ class Runner:
|
|
|
155
155
|
)
|
|
156
156
|
history.insert(0, msg)
|
|
157
157
|
|
|
158
|
+
# append custom schema if available
|
|
159
|
+
schema = self.window.core.agents.custom.get_schema(agent_id)
|
|
158
160
|
agent_kwargs = {
|
|
159
161
|
"context": context,
|
|
160
162
|
"tools": tools,
|
|
@@ -171,9 +173,9 @@ class Runner:
|
|
|
171
173
|
"are_commands": is_cmd,
|
|
172
174
|
"workdir": workdir,
|
|
173
175
|
"preset": context.preset if context else None,
|
|
176
|
+
"schema": schema,
|
|
174
177
|
}
|
|
175
|
-
|
|
176
|
-
provider = self.window.core.agents.provider.get(agent_id)
|
|
178
|
+
provider = self.window.core.agents.provider.get(agent_id, context.mode)
|
|
177
179
|
agent = provider.get_agent(self.window, agent_kwargs)
|
|
178
180
|
agent_run = provider.run
|
|
179
181
|
if verbose:
|
|
@@ -188,6 +190,8 @@ class Runner:
|
|
|
188
190
|
"signals": signals,
|
|
189
191
|
"verbose": verbose,
|
|
190
192
|
}
|
|
193
|
+
if schema:
|
|
194
|
+
kwargs["schema"] = schema
|
|
191
195
|
|
|
192
196
|
if mode == AGENT_MODE_PLAN:
|
|
193
197
|
return self.llama_plan.run(**kwargs)
|
|
@@ -291,7 +295,6 @@ class Runner:
|
|
|
291
295
|
"workdir": workdir,
|
|
292
296
|
"preset": context.preset if context else None,
|
|
293
297
|
}
|
|
294
|
-
|
|
295
298
|
provider = self.window.core.agents.provider.get(agent_id)
|
|
296
299
|
agent = provider.get_agent(self.window, agent_kwargs)
|
|
297
300
|
if verbose:
|
|
@@ -100,7 +100,7 @@ class Helpers:
|
|
|
100
100
|
"""
|
|
101
101
|
if signals is None:
|
|
102
102
|
return
|
|
103
|
-
chunk = ctx.stream.replace("<execute>", "\n```python\n").replace("</execute>", "\n```\n")
|
|
103
|
+
chunk = ctx.stream.replace("<execute>", "\n```python\n").replace("</execute>", "\n```\n") if ctx.stream else ""
|
|
104
104
|
data = {
|
|
105
105
|
"meta": ctx.meta,
|
|
106
106
|
"ctx": ctx,
|
|
@@ -48,6 +48,7 @@ class LlamaWorkflow(BaseRunner):
|
|
|
48
48
|
verbose: bool = False,
|
|
49
49
|
history: List[CtxItem] = None,
|
|
50
50
|
llm: Any = None,
|
|
51
|
+
schema: Optional[List] = None,
|
|
51
52
|
) -> bool:
|
|
52
53
|
"""
|
|
53
54
|
Run agent workflow
|
|
@@ -59,6 +60,7 @@ class LlamaWorkflow(BaseRunner):
|
|
|
59
60
|
:param verbose: verbose mode
|
|
60
61
|
:param history: chat history
|
|
61
62
|
:param llm: LLM instance
|
|
63
|
+
:param schema: custom agent flow schema
|
|
62
64
|
:return: True if success
|
|
63
65
|
"""
|
|
64
66
|
if self.is_stopped():
|
|
@@ -253,6 +255,7 @@ class LlamaWorkflow(BaseRunner):
|
|
|
253
255
|
item_ctx.stream = "" # for stream
|
|
254
256
|
|
|
255
257
|
async for event in handler.stream_events():
|
|
258
|
+
print(event)
|
|
256
259
|
if self.is_stopped():
|
|
257
260
|
# persist current output on stop
|
|
258
261
|
item_ctx.output = item_ctx.live_output
|
|
@@ -9,7 +9,7 @@
|
|
|
9
9
|
# Updated Date: 2025.08.24 03:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
|
-
from typing import Dict, Any, List
|
|
12
|
+
from typing import Dict, Any, List, Optional
|
|
13
13
|
|
|
14
14
|
from pygpt_net.core.bridge.context import BridgeContext
|
|
15
15
|
from pygpt_net.core.bridge.worker import BridgeSignals
|
|
@@ -18,6 +18,7 @@ from pygpt_net.item.ctx import CtxItem
|
|
|
18
18
|
|
|
19
19
|
from ..bridge import ConnectionContext
|
|
20
20
|
from .base import BaseRunner
|
|
21
|
+
from ..custom.logging import StdLogger
|
|
21
22
|
|
|
22
23
|
|
|
23
24
|
class OpenAIWorkflow(BaseRunner):
|
|
@@ -41,6 +42,7 @@ class OpenAIWorkflow(BaseRunner):
|
|
|
41
42
|
verbose: bool = False,
|
|
42
43
|
history: List[CtxItem] = None,
|
|
43
44
|
stream: bool = False,
|
|
45
|
+
schema: Optional[List] = None,
|
|
44
46
|
) -> bool:
|
|
45
47
|
"""
|
|
46
48
|
Run OpenAI agents
|
|
@@ -54,6 +56,7 @@ class OpenAIWorkflow(BaseRunner):
|
|
|
54
56
|
:param verbose: verbose mode
|
|
55
57
|
:param history: chat history
|
|
56
58
|
:param stream: use streaming
|
|
59
|
+
:param schema: custom agent flow schema
|
|
57
60
|
:return: True if success
|
|
58
61
|
"""
|
|
59
62
|
if self.is_stopped():
|
|
@@ -193,6 +196,10 @@ class OpenAIWorkflow(BaseRunner):
|
|
|
193
196
|
if previous_response_id:
|
|
194
197
|
run_kwargs["previous_response_id"] = previous_response_id
|
|
195
198
|
|
|
199
|
+
# custom agent schema
|
|
200
|
+
if schema:
|
|
201
|
+
run_kwargs["schema"] = schema
|
|
202
|
+
|
|
196
203
|
# split response messages to separated context items
|
|
197
204
|
run_kwargs["use_partial_ctx"] = self.window.core.config.get("agent.openai.response.split", True)
|
|
198
205
|
|
|
@@ -26,28 +26,44 @@ class Parser:
|
|
|
26
26
|
|
|
27
27
|
def extract_data_paths(self, text: str) -> list:
|
|
28
28
|
"""
|
|
29
|
-
Extract file paths from text that contain 'data' segment.
|
|
30
|
-
|
|
31
|
-
:param text: input text
|
|
32
|
-
:return: list of file paths containing 'data' segment
|
|
29
|
+
Extract file paths from text that contain 'data' segment (case-insensitive).
|
|
30
|
+
Supports quoted and unquoted paths, POSIX/Windows, and ignores URLs.
|
|
33
31
|
"""
|
|
34
|
-
if text
|
|
32
|
+
if not text:
|
|
35
33
|
return []
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
34
|
+
|
|
35
|
+
def is_data_path(p: str) -> bool:
|
|
36
|
+
# 'data' (case-insensitive)
|
|
37
|
+
return re.search(r"(?i)(?:^|[\\/])data(?:[\\/]|$)", p) is not None
|
|
38
|
+
|
|
39
|
+
def is_url(p: str) -> bool:
|
|
40
|
+
return re.match(r"^[a-z][a-z0-9+.-]*://", p, re.I) is not None
|
|
41
|
+
|
|
42
|
+
results = []
|
|
43
|
+
|
|
44
|
+
quoted_pat = re.compile(r"(?P<q>['\"])(?P<p>(?:[A-Za-z]:)?[\\/](?:(?!\1).)+?)\1")
|
|
45
|
+
for m in quoted_pat.finditer(text):
|
|
46
|
+
p = m.group("p").strip()
|
|
47
|
+
if not is_url(p) and is_data_path(p):
|
|
48
|
+
results.append(p)
|
|
49
|
+
|
|
50
|
+
unquoted_pat = re.compile(r"(?P<p>(?:[A-Za-z]:)?(?:[\\/][^\s'\"),;]+)+)")
|
|
51
|
+
for m in unquoted_pat.finditer(text):
|
|
52
|
+
p = m.group("p").strip()
|
|
53
|
+
if not is_url(p) and is_data_path(p):
|
|
54
|
+
results.append(p)
|
|
55
|
+
|
|
56
|
+
seen = set()
|
|
57
|
+
out = []
|
|
58
|
+
for p in results:
|
|
59
|
+
if p not in seen:
|
|
60
|
+
seen.add(p)
|
|
61
|
+
out.append(p)
|
|
62
|
+
return out
|
|
63
|
+
|
|
64
|
+
def extract_data_files(self, ctx: "CtxItem", response: str) -> list:
|
|
45
65
|
"""
|
|
46
66
|
Extract files from tool outputs and return list of file paths.
|
|
47
|
-
|
|
48
|
-
:param ctx: CtxItem
|
|
49
|
-
:param response: response text containing file paths
|
|
50
|
-
:return: list of file paths
|
|
51
67
|
"""
|
|
52
68
|
if response is None:
|
|
53
69
|
return []
|
|
@@ -58,14 +74,11 @@ class Parser:
|
|
|
58
74
|
def replace_with_local(path):
|
|
59
75
|
"""
|
|
60
76
|
Replace the path with local data directory path.
|
|
61
|
-
|
|
62
|
-
:param path: original path
|
|
63
|
-
:return: modified path
|
|
64
77
|
"""
|
|
65
78
|
segments = re.split(r"[\\/]+", path)
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
79
|
+
# case-insensitive find of 'data'
|
|
80
|
+
data_index = next((i for i, s in enumerate(segments) if s.lower() == "data"), None)
|
|
81
|
+
if data_index is None:
|
|
69
82
|
return path
|
|
70
83
|
tail = segments[data_index + 1:]
|
|
71
84
|
new_path = os.path.join(local_data_dir, *tail) if tail else local_data_dir
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.09.
|
|
9
|
+
# Updated Date: 2025.09.24 00:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
|
-
from .
|
|
12
|
+
from .graph import NodeGraph
|