abstractagent 0.2.0__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- abstractagent/adapters/__init__.py +2 -1
- abstractagent/adapters/codeact_runtime.py +823 -57
- abstractagent/adapters/memact_runtime.py +721 -0
- abstractagent/adapters/react_runtime.py +1114 -67
- abstractagent/agents/__init__.py +4 -0
- abstractagent/agents/base.py +58 -1
- abstractagent/agents/codeact.py +89 -18
- abstractagent/agents/memact.py +244 -0
- abstractagent/agents/react.py +91 -18
- abstractagent/logic/__init__.py +2 -0
- abstractagent/logic/builtins.py +212 -5
- abstractagent/logic/codeact.py +87 -80
- abstractagent/logic/memact.py +127 -0
- abstractagent/logic/react.py +108 -48
- abstractagent/repl.py +24 -447
- abstractagent/scripts/__init__.py +5 -0
- abstractagent/scripts/lmstudio_tool_eval.py +426 -0
- abstractagent/tools/__init__.py +3 -0
- {abstractagent-0.2.0.dist-info → abstractagent-0.3.0.dist-info}/METADATA +10 -11
- abstractagent-0.3.0.dist-info/RECORD +31 -0
- abstractagent/ui/__init__.py +0 -5
- abstractagent/ui/question.py +0 -197
- abstractagent-0.2.0.dist-info/RECORD +0 -28
- {abstractagent-0.2.0.dist-info → abstractagent-0.3.0.dist-info}/WHEEL +0 -0
- {abstractagent-0.2.0.dist-info → abstractagent-0.3.0.dist-info}/entry_points.txt +0 -0
- {abstractagent-0.2.0.dist-info → abstractagent-0.3.0.dist-info}/licenses/LICENSE +0 -0
- {abstractagent-0.2.0.dist-info → abstractagent-0.3.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,721 @@
|
|
|
1
|
+
"""AbstractRuntime adapter for MemAct (memory-enhanced agent)."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import hashlib
|
|
6
|
+
import json
|
|
7
|
+
from typing import Any, Callable, Dict, List, Optional
|
|
8
|
+
|
|
9
|
+
from abstractcore.tools import ToolCall
|
|
10
|
+
from abstractruntime import Effect, EffectType, RunState, StepPlan, WorkflowSpec
|
|
11
|
+
from abstractruntime.core.vars import ensure_limits, ensure_namespaces
|
|
12
|
+
from abstractruntime.memory.active_context import ActiveContextPolicy
|
|
13
|
+
|
|
14
|
+
from ..logic.memact import MemActLogic
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def _new_message(
|
|
18
|
+
ctx: Any,
|
|
19
|
+
*,
|
|
20
|
+
role: str,
|
|
21
|
+
content: str,
|
|
22
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
23
|
+
) -> Dict[str, Any]:
|
|
24
|
+
timestamp: Optional[str] = None
|
|
25
|
+
now_iso = getattr(ctx, "now_iso", None)
|
|
26
|
+
if callable(now_iso):
|
|
27
|
+
timestamp = str(now_iso())
|
|
28
|
+
if not timestamp:
|
|
29
|
+
from datetime import datetime, timezone
|
|
30
|
+
|
|
31
|
+
timestamp = datetime.now(timezone.utc).isoformat()
|
|
32
|
+
|
|
33
|
+
import uuid
|
|
34
|
+
|
|
35
|
+
meta = dict(metadata or {})
|
|
36
|
+
meta.setdefault("message_id", f"msg_{uuid.uuid4().hex}")
|
|
37
|
+
|
|
38
|
+
return {
|
|
39
|
+
"role": role,
|
|
40
|
+
"content": content,
|
|
41
|
+
"timestamp": timestamp,
|
|
42
|
+
"metadata": meta,
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def ensure_memact_vars(run: RunState) -> tuple[Dict[str, Any], Dict[str, Any], Dict[str, Any], Dict[str, Any], Dict[str, Any]]:
|
|
47
|
+
ensure_namespaces(run.vars)
|
|
48
|
+
limits = ensure_limits(run.vars)
|
|
49
|
+
context = run.vars["context"]
|
|
50
|
+
scratchpad = run.vars["scratchpad"]
|
|
51
|
+
runtime_ns = run.vars["_runtime"]
|
|
52
|
+
temp = run.vars["_temp"]
|
|
53
|
+
|
|
54
|
+
if "task" in run.vars and "task" not in context:
|
|
55
|
+
context["task"] = run.vars.pop("task")
|
|
56
|
+
if "messages" in run.vars and "messages" not in context:
|
|
57
|
+
context["messages"] = run.vars.pop("messages")
|
|
58
|
+
|
|
59
|
+
if not isinstance(context.get("messages"), list):
|
|
60
|
+
context["messages"] = []
|
|
61
|
+
if not isinstance(runtime_ns.get("inbox"), list):
|
|
62
|
+
runtime_ns["inbox"] = []
|
|
63
|
+
|
|
64
|
+
iteration = scratchpad.get("iteration")
|
|
65
|
+
if not isinstance(iteration, int):
|
|
66
|
+
try:
|
|
67
|
+
scratchpad["iteration"] = int(iteration or 0)
|
|
68
|
+
except Exception:
|
|
69
|
+
scratchpad["iteration"] = 0
|
|
70
|
+
max_iterations = scratchpad.get("max_iterations")
|
|
71
|
+
if not isinstance(max_iterations, int):
|
|
72
|
+
try:
|
|
73
|
+
scratchpad["max_iterations"] = int(max_iterations or 25)
|
|
74
|
+
except Exception:
|
|
75
|
+
scratchpad["max_iterations"] = 25
|
|
76
|
+
if scratchpad["max_iterations"] < 1:
|
|
77
|
+
scratchpad["max_iterations"] = 1
|
|
78
|
+
|
|
79
|
+
used_tools = scratchpad.get("used_tools")
|
|
80
|
+
if not isinstance(used_tools, bool):
|
|
81
|
+
scratchpad["used_tools"] = bool(used_tools) if used_tools is not None else False
|
|
82
|
+
|
|
83
|
+
return context, scratchpad, runtime_ns, temp, limits
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def _compute_toolset_id(tool_specs: List[Dict[str, Any]]) -> str:
|
|
87
|
+
normalized = sorted((dict(s) for s in tool_specs), key=lambda s: str(s.get("name", "")))
|
|
88
|
+
payload = json.dumps(normalized, sort_keys=True, ensure_ascii=False, separators=(",", ":")).encode("utf-8")
|
|
89
|
+
digest = hashlib.sha256(payload).hexdigest()
|
|
90
|
+
return f"ts_{digest}"
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def create_memact_workflow(
|
|
94
|
+
*,
|
|
95
|
+
logic: MemActLogic,
|
|
96
|
+
on_step: Optional[Callable[[str, Dict[str, Any]], None]] = None,
|
|
97
|
+
workflow_id: str = "memact_agent",
|
|
98
|
+
provider: Optional[str] = None,
|
|
99
|
+
model: Optional[str] = None,
|
|
100
|
+
allowed_tools: Optional[List[str]] = None,
|
|
101
|
+
) -> WorkflowSpec:
|
|
102
|
+
"""Adapt MemActLogic to an AbstractRuntime workflow."""
|
|
103
|
+
|
|
104
|
+
def emit(step: str, data: Dict[str, Any]) -> None:
|
|
105
|
+
if on_step:
|
|
106
|
+
on_step(step, data)
|
|
107
|
+
|
|
108
|
+
def _current_tool_defs() -> list[Any]:
|
|
109
|
+
defs = getattr(logic, "tools", None)
|
|
110
|
+
if not isinstance(defs, list):
|
|
111
|
+
try:
|
|
112
|
+
defs = list(defs) # type: ignore[arg-type]
|
|
113
|
+
except Exception:
|
|
114
|
+
defs = []
|
|
115
|
+
return [t for t in defs if getattr(t, "name", None)]
|
|
116
|
+
|
|
117
|
+
def _tool_by_name() -> dict[str, Any]:
|
|
118
|
+
out: dict[str, Any] = {}
|
|
119
|
+
for t in _current_tool_defs():
|
|
120
|
+
name = getattr(t, "name", None)
|
|
121
|
+
if isinstance(name, str) and name.strip():
|
|
122
|
+
out[name] = t
|
|
123
|
+
return out
|
|
124
|
+
|
|
125
|
+
def _default_allowlist() -> list[str]:
|
|
126
|
+
if isinstance(allowed_tools, list):
|
|
127
|
+
allow = [str(t).strip() for t in allowed_tools if isinstance(t, str) and t.strip()]
|
|
128
|
+
return allow if allow else []
|
|
129
|
+
out: list[str] = []
|
|
130
|
+
seen: set[str] = set()
|
|
131
|
+
for t in _current_tool_defs():
|
|
132
|
+
name = getattr(t, "name", None)
|
|
133
|
+
if not isinstance(name, str) or not name.strip() or name in seen:
|
|
134
|
+
continue
|
|
135
|
+
seen.add(name)
|
|
136
|
+
out.append(name)
|
|
137
|
+
return out
|
|
138
|
+
|
|
139
|
+
def _normalize_allowlist(raw: Any) -> list[str]:
|
|
140
|
+
items: list[Any]
|
|
141
|
+
if isinstance(raw, list):
|
|
142
|
+
items = raw
|
|
143
|
+
elif isinstance(raw, tuple):
|
|
144
|
+
items = list(raw)
|
|
145
|
+
elif isinstance(raw, str):
|
|
146
|
+
items = [raw]
|
|
147
|
+
else:
|
|
148
|
+
items = []
|
|
149
|
+
|
|
150
|
+
out: list[str] = []
|
|
151
|
+
seen: set[str] = set()
|
|
152
|
+
current = _tool_by_name()
|
|
153
|
+
for t in items:
|
|
154
|
+
if not isinstance(t, str):
|
|
155
|
+
continue
|
|
156
|
+
name = t.strip()
|
|
157
|
+
if not name or name in seen:
|
|
158
|
+
continue
|
|
159
|
+
if name not in current:
|
|
160
|
+
continue
|
|
161
|
+
seen.add(name)
|
|
162
|
+
out.append(name)
|
|
163
|
+
return out
|
|
164
|
+
|
|
165
|
+
def _effective_allowlist(runtime_ns: Dict[str, Any]) -> list[str]:
|
|
166
|
+
if isinstance(runtime_ns, dict) and "allowed_tools" in runtime_ns:
|
|
167
|
+
normalized = _normalize_allowlist(runtime_ns.get("allowed_tools"))
|
|
168
|
+
runtime_ns["allowed_tools"] = normalized
|
|
169
|
+
return normalized
|
|
170
|
+
return _normalize_allowlist(list(_default_allowlist()))
|
|
171
|
+
|
|
172
|
+
def _allowed_tool_defs(allow: list[str]) -> list[Any]:
|
|
173
|
+
out: list[Any] = []
|
|
174
|
+
current = _tool_by_name()
|
|
175
|
+
for name in allow:
|
|
176
|
+
tool = current.get(name)
|
|
177
|
+
if tool is not None:
|
|
178
|
+
out.append(tool)
|
|
179
|
+
return out
|
|
180
|
+
|
|
181
|
+
def _system_prompt_override(runtime_ns: Dict[str, Any]) -> Optional[str]:
|
|
182
|
+
raw = runtime_ns.get("system_prompt") if isinstance(runtime_ns, dict) else None
|
|
183
|
+
if isinstance(raw, str) and raw.strip():
|
|
184
|
+
return raw
|
|
185
|
+
return None
|
|
186
|
+
|
|
187
|
+
def _sanitize_llm_messages(messages: Any) -> List[Dict[str, str]]:
|
|
188
|
+
if not isinstance(messages, list) or not messages:
|
|
189
|
+
return []
|
|
190
|
+
out: List[Dict[str, str]] = []
|
|
191
|
+
for m in messages:
|
|
192
|
+
if not isinstance(m, dict):
|
|
193
|
+
continue
|
|
194
|
+
role = str(m.get("role") or "").strip()
|
|
195
|
+
content = m.get("content")
|
|
196
|
+
if not role or content is None:
|
|
197
|
+
continue
|
|
198
|
+
content_str = str(content)
|
|
199
|
+
if not content_str.strip():
|
|
200
|
+
continue
|
|
201
|
+
entry: Dict[str, str] = {"role": role, "content": content_str}
|
|
202
|
+
if role == "tool":
|
|
203
|
+
meta = m.get("metadata") if isinstance(m.get("metadata"), dict) else {}
|
|
204
|
+
call_id = meta.get("call_id") if isinstance(meta, dict) else None
|
|
205
|
+
if call_id is not None and str(call_id).strip():
|
|
206
|
+
entry["tool_call_id"] = str(call_id).strip()
|
|
207
|
+
out.append(entry)
|
|
208
|
+
return out
|
|
209
|
+
|
|
210
|
+
def init_node(run: RunState, ctx) -> StepPlan:
|
|
211
|
+
context, scratchpad, runtime_ns, _, limits = ensure_memact_vars(run)
|
|
212
|
+
scratchpad["iteration"] = 0
|
|
213
|
+
limits["current_iteration"] = 0
|
|
214
|
+
|
|
215
|
+
# Ensure MemAct Active Memory exists (seeded by agent.start when available).
|
|
216
|
+
from abstractruntime.memory.active_memory import ensure_memact_memory
|
|
217
|
+
|
|
218
|
+
ensure_memact_memory(run.vars)
|
|
219
|
+
|
|
220
|
+
task = str(context.get("task", "") or "")
|
|
221
|
+
context["task"] = task
|
|
222
|
+
messages = context["messages"]
|
|
223
|
+
if task and (not messages or messages[-1].get("role") != "user" or messages[-1].get("content") != task):
|
|
224
|
+
messages.append(_new_message(ctx, role="user", content=task))
|
|
225
|
+
|
|
226
|
+
allow = _effective_allowlist(runtime_ns)
|
|
227
|
+
allowed_defs = _allowed_tool_defs(allow)
|
|
228
|
+
tool_specs = [t.to_dict() for t in allowed_defs]
|
|
229
|
+
runtime_ns["tool_specs"] = tool_specs
|
|
230
|
+
runtime_ns["toolset_id"] = _compute_toolset_id(tool_specs)
|
|
231
|
+
runtime_ns.setdefault("allowed_tools", allow)
|
|
232
|
+
runtime_ns.setdefault("inbox", [])
|
|
233
|
+
|
|
234
|
+
emit("init", {"task": task})
|
|
235
|
+
return StepPlan(node_id="init", next_node="reason")
|
|
236
|
+
|
|
237
|
+
def reason_node(run: RunState, ctx) -> StepPlan:
|
|
238
|
+
context, scratchpad, runtime_ns, _, limits = ensure_memact_vars(run)
|
|
239
|
+
|
|
240
|
+
iteration = int(limits.get("current_iteration", 0) or 0)
|
|
241
|
+
max_iterations = int(limits.get("max_iterations", 25) or scratchpad.get("max_iterations", 25) or 25)
|
|
242
|
+
if max_iterations < 1:
|
|
243
|
+
max_iterations = 1
|
|
244
|
+
|
|
245
|
+
if iteration >= max_iterations:
|
|
246
|
+
return StepPlan(node_id="reason", next_node="max_iterations")
|
|
247
|
+
|
|
248
|
+
scratchpad["iteration"] = iteration + 1
|
|
249
|
+
limits["current_iteration"] = iteration + 1
|
|
250
|
+
|
|
251
|
+
task = str(context.get("task", "") or "")
|
|
252
|
+
messages_view = ActiveContextPolicy.select_active_messages_for_llm_from_run(run)
|
|
253
|
+
|
|
254
|
+
allow = _effective_allowlist(runtime_ns)
|
|
255
|
+
allowed_defs = _allowed_tool_defs(allow)
|
|
256
|
+
tool_specs = [t.to_dict() for t in allowed_defs]
|
|
257
|
+
runtime_ns["tool_specs"] = tool_specs
|
|
258
|
+
runtime_ns["toolset_id"] = _compute_toolset_id(tool_specs)
|
|
259
|
+
runtime_ns.setdefault("allowed_tools", allow)
|
|
260
|
+
|
|
261
|
+
# Inbox is a small, host/agent-controlled injection channel.
|
|
262
|
+
guidance = ""
|
|
263
|
+
inbox = runtime_ns.get("inbox", [])
|
|
264
|
+
if isinstance(inbox, list) and inbox:
|
|
265
|
+
inbox_messages = [str(m.get("content", "") or "") for m in inbox if isinstance(m, dict)]
|
|
266
|
+
guidance = " | ".join([m for m in inbox_messages if m])
|
|
267
|
+
runtime_ns["inbox"] = []
|
|
268
|
+
|
|
269
|
+
req = logic.build_request(
|
|
270
|
+
task=task,
|
|
271
|
+
messages=messages_view,
|
|
272
|
+
guidance=guidance,
|
|
273
|
+
iteration=iteration + 1,
|
|
274
|
+
max_iterations=max_iterations,
|
|
275
|
+
vars=run.vars,
|
|
276
|
+
)
|
|
277
|
+
|
|
278
|
+
from abstractruntime.memory.active_memory import render_memact_system_prompt
|
|
279
|
+
|
|
280
|
+
memory_prompt = render_memact_system_prompt(run.vars)
|
|
281
|
+
base_sys = _system_prompt_override(runtime_ns) or req.system_prompt
|
|
282
|
+
system_prompt = (memory_prompt + "\n\n" + str(base_sys or "")).strip()
|
|
283
|
+
|
|
284
|
+
emit("reason", {"iteration": iteration + 1, "max_iterations": max_iterations, "has_guidance": bool(guidance)})
|
|
285
|
+
|
|
286
|
+
payload: Dict[str, Any] = {"prompt": ""}
|
|
287
|
+
payload["messages"] = _sanitize_llm_messages(messages_view)
|
|
288
|
+
if tool_specs:
|
|
289
|
+
payload["tools"] = list(tool_specs)
|
|
290
|
+
if system_prompt:
|
|
291
|
+
payload["system_prompt"] = system_prompt
|
|
292
|
+
eff_provider = provider if isinstance(provider, str) and provider.strip() else runtime_ns.get("provider")
|
|
293
|
+
eff_model = model if isinstance(model, str) and model.strip() else runtime_ns.get("model")
|
|
294
|
+
if isinstance(eff_provider, str) and eff_provider.strip():
|
|
295
|
+
payload["provider"] = eff_provider.strip()
|
|
296
|
+
if isinstance(eff_model, str) and eff_model.strip():
|
|
297
|
+
payload["model"] = eff_model.strip()
|
|
298
|
+
|
|
299
|
+
params: Dict[str, Any] = {"temperature": 0.2 if tool_specs else 0.7}
|
|
300
|
+
if req.max_tokens is not None:
|
|
301
|
+
params["max_tokens"] = req.max_tokens
|
|
302
|
+
payload["params"] = params
|
|
303
|
+
|
|
304
|
+
return StepPlan(
|
|
305
|
+
node_id="reason",
|
|
306
|
+
effect=Effect(type=EffectType.LLM_CALL, payload=payload, result_key="_temp.llm_response"),
|
|
307
|
+
next_node="parse",
|
|
308
|
+
)
|
|
309
|
+
|
|
310
|
+
def parse_node(run: RunState, ctx) -> StepPlan:
|
|
311
|
+
context, scratchpad, runtime_ns, temp, _ = ensure_memact_vars(run)
|
|
312
|
+
response = temp.get("llm_response", {})
|
|
313
|
+
content, tool_calls = logic.parse_response(response)
|
|
314
|
+
temp.pop("llm_response", None)
|
|
315
|
+
|
|
316
|
+
emit(
|
|
317
|
+
"parse",
|
|
318
|
+
{
|
|
319
|
+
"has_tool_calls": bool(tool_calls),
|
|
320
|
+
"tool_calls": [{"name": tc.name, "arguments": tc.arguments, "call_id": tc.call_id} for tc in tool_calls],
|
|
321
|
+
},
|
|
322
|
+
)
|
|
323
|
+
|
|
324
|
+
if tool_calls:
|
|
325
|
+
# Keep any user-facing prelude content (optional) in history.
|
|
326
|
+
clean = str(content or "").strip()
|
|
327
|
+
if clean:
|
|
328
|
+
context["messages"].append(_new_message(ctx, role="assistant", content=clean))
|
|
329
|
+
temp["pending_tool_calls"] = [tc.__dict__ for tc in tool_calls]
|
|
330
|
+
return StepPlan(node_id="parse", next_node="act")
|
|
331
|
+
|
|
332
|
+
# Tool-free: draft answer becomes input to the envelope finalization call.
|
|
333
|
+
temp["draft_answer"] = str(content or "").strip()
|
|
334
|
+
scratchpad["tool_retry_count"] = 0
|
|
335
|
+
return StepPlan(node_id="parse", next_node="finalize")
|
|
336
|
+
|
|
337
|
+
def act_node(run: RunState, ctx) -> StepPlan:
|
|
338
|
+
# Queue semantics: preserve ordering and avoid dropping calls when schema-only tools
|
|
339
|
+
# (ask_user/memory/etc.) are interleaved with normal tools.
|
|
340
|
+
context, _, runtime_ns, temp, _ = ensure_memact_vars(run)
|
|
341
|
+
raw_queue = temp.get("pending_tool_calls", [])
|
|
342
|
+
if not isinstance(raw_queue, list) or not raw_queue:
|
|
343
|
+
temp["pending_tool_calls"] = []
|
|
344
|
+
return StepPlan(node_id="act", next_node="reason")
|
|
345
|
+
|
|
346
|
+
allow = _effective_allowlist(runtime_ns)
|
|
347
|
+
builtin_effect_tools = {
|
|
348
|
+
"ask_user",
|
|
349
|
+
"recall_memory",
|
|
350
|
+
"inspect_vars",
|
|
351
|
+
"remember",
|
|
352
|
+
"remember_note",
|
|
353
|
+
"compact_memory",
|
|
354
|
+
}
|
|
355
|
+
|
|
356
|
+
tool_queue: List[Dict[str, Any]] = []
|
|
357
|
+
for idx, item in enumerate(raw_queue, start=1):
|
|
358
|
+
if not isinstance(item, dict):
|
|
359
|
+
continue
|
|
360
|
+
d = dict(item)
|
|
361
|
+
call_id = str(d.get("call_id") or "").strip()
|
|
362
|
+
if not call_id:
|
|
363
|
+
d["call_id"] = str(idx)
|
|
364
|
+
tool_queue.append(d)
|
|
365
|
+
|
|
366
|
+
if not tool_queue:
|
|
367
|
+
temp["pending_tool_calls"] = []
|
|
368
|
+
return StepPlan(node_id="act", next_node="reason")
|
|
369
|
+
|
|
370
|
+
def _is_builtin(tc: Dict[str, Any]) -> bool:
|
|
371
|
+
name = tc.get("name")
|
|
372
|
+
return isinstance(name, str) and name in builtin_effect_tools
|
|
373
|
+
|
|
374
|
+
if _is_builtin(tool_queue[0]):
|
|
375
|
+
tc = tool_queue[0]
|
|
376
|
+
name = str(tc.get("name") or "").strip()
|
|
377
|
+
args = tc.get("arguments") or {}
|
|
378
|
+
if not isinstance(args, dict):
|
|
379
|
+
args = {}
|
|
380
|
+
|
|
381
|
+
temp["pending_tool_calls"] = list(tool_queue[1:])
|
|
382
|
+
|
|
383
|
+
if name and name not in allow:
|
|
384
|
+
temp["tool_results"] = {
|
|
385
|
+
"results": [
|
|
386
|
+
{
|
|
387
|
+
"call_id": str(tc.get("call_id") or ""),
|
|
388
|
+
"name": name,
|
|
389
|
+
"success": False,
|
|
390
|
+
"output": None,
|
|
391
|
+
"error": f"Tool '{name}' is not allowed for this agent",
|
|
392
|
+
}
|
|
393
|
+
]
|
|
394
|
+
}
|
|
395
|
+
emit("act_blocked", {"tool": name})
|
|
396
|
+
return StepPlan(node_id="act", next_node="observe")
|
|
397
|
+
|
|
398
|
+
if name == "ask_user":
|
|
399
|
+
question = str(args.get("question") or "Please provide input:")
|
|
400
|
+
choices = args.get("choices")
|
|
401
|
+
choices = list(choices) if isinstance(choices, list) else None
|
|
402
|
+
|
|
403
|
+
msgs = context.get("messages")
|
|
404
|
+
if isinstance(msgs, list):
|
|
405
|
+
content = f"[Agent question]: {question}"
|
|
406
|
+
last = msgs[-1] if msgs else None
|
|
407
|
+
last_role = last.get("role") if isinstance(last, dict) else None
|
|
408
|
+
last_meta = last.get("metadata") if isinstance(last, dict) else None
|
|
409
|
+
last_kind = last_meta.get("kind") if isinstance(last_meta, dict) else None
|
|
410
|
+
last_content = last.get("content") if isinstance(last, dict) else None
|
|
411
|
+
if not (last_role == "assistant" and last_kind == "ask_user_prompt" and str(last_content or "") == content):
|
|
412
|
+
msgs.append(_new_message(ctx, role="assistant", content=content, metadata={"kind": "ask_user_prompt"}))
|
|
413
|
+
|
|
414
|
+
emit("ask_user", {"question": question, "choices": choices or []})
|
|
415
|
+
return StepPlan(
|
|
416
|
+
node_id="act",
|
|
417
|
+
effect=Effect(
|
|
418
|
+
type=EffectType.ASK_USER,
|
|
419
|
+
payload={"prompt": question, "choices": choices, "allow_free_text": True},
|
|
420
|
+
result_key="_temp.user_response",
|
|
421
|
+
),
|
|
422
|
+
next_node="handle_user_response",
|
|
423
|
+
)
|
|
424
|
+
|
|
425
|
+
if name == "recall_memory":
|
|
426
|
+
payload = dict(args)
|
|
427
|
+
payload.setdefault("tool_name", "recall_memory")
|
|
428
|
+
payload.setdefault("call_id", tc.get("call_id") or "memory")
|
|
429
|
+
emit("memory_query", {"query": payload.get("query"), "span_id": payload.get("span_id")})
|
|
430
|
+
return StepPlan(
|
|
431
|
+
node_id="act",
|
|
432
|
+
effect=Effect(type=EffectType.MEMORY_QUERY, payload=payload, result_key="_temp.tool_results"),
|
|
433
|
+
next_node="observe",
|
|
434
|
+
)
|
|
435
|
+
|
|
436
|
+
if name == "inspect_vars":
|
|
437
|
+
payload = dict(args)
|
|
438
|
+
payload.setdefault("tool_name", "inspect_vars")
|
|
439
|
+
payload.setdefault("call_id", tc.get("call_id") or "vars")
|
|
440
|
+
emit("vars_query", {"path": payload.get("path")})
|
|
441
|
+
return StepPlan(
|
|
442
|
+
node_id="act",
|
|
443
|
+
effect=Effect(type=EffectType.VARS_QUERY, payload=payload, result_key="_temp.tool_results"),
|
|
444
|
+
next_node="observe",
|
|
445
|
+
)
|
|
446
|
+
|
|
447
|
+
if name == "remember":
|
|
448
|
+
payload = dict(args)
|
|
449
|
+
payload.setdefault("tool_name", "remember")
|
|
450
|
+
payload.setdefault("call_id", tc.get("call_id") or "memory")
|
|
451
|
+
emit("memory_tag", {"span_id": payload.get("span_id"), "tags": payload.get("tags")})
|
|
452
|
+
return StepPlan(
|
|
453
|
+
node_id="act",
|
|
454
|
+
effect=Effect(type=EffectType.MEMORY_TAG, payload=payload, result_key="_temp.tool_results"),
|
|
455
|
+
next_node="observe",
|
|
456
|
+
)
|
|
457
|
+
|
|
458
|
+
if name == "remember_note":
|
|
459
|
+
payload = dict(args)
|
|
460
|
+
payload.setdefault("tool_name", "remember_note")
|
|
461
|
+
payload.setdefault("call_id", tc.get("call_id") or "memory")
|
|
462
|
+
emit("memory_note", {"note": payload.get("note"), "tags": payload.get("tags")})
|
|
463
|
+
return StepPlan(
|
|
464
|
+
node_id="act",
|
|
465
|
+
effect=Effect(type=EffectType.MEMORY_NOTE, payload=payload, result_key="_temp.tool_results"),
|
|
466
|
+
next_node="observe",
|
|
467
|
+
)
|
|
468
|
+
|
|
469
|
+
if name == "compact_memory":
|
|
470
|
+
payload = dict(args)
|
|
471
|
+
payload.setdefault("tool_name", "compact_memory")
|
|
472
|
+
payload.setdefault("call_id", tc.get("call_id") or "compact")
|
|
473
|
+
emit(
|
|
474
|
+
"memory_compact",
|
|
475
|
+
{
|
|
476
|
+
"preserve_recent": payload.get("preserve_recent"),
|
|
477
|
+
"mode": payload.get("compression_mode"),
|
|
478
|
+
"focus": payload.get("focus"),
|
|
479
|
+
},
|
|
480
|
+
)
|
|
481
|
+
return StepPlan(
|
|
482
|
+
node_id="act",
|
|
483
|
+
effect=Effect(type=EffectType.MEMORY_COMPACT, payload=payload, result_key="_temp.tool_results"),
|
|
484
|
+
next_node="observe",
|
|
485
|
+
)
|
|
486
|
+
|
|
487
|
+
if temp.get("pending_tool_calls"):
|
|
488
|
+
return StepPlan(node_id="act", next_node="act")
|
|
489
|
+
return StepPlan(node_id="act", next_node="reason")
|
|
490
|
+
|
|
491
|
+
batch: List[Dict[str, Any]] = []
|
|
492
|
+
for tc in tool_queue:
|
|
493
|
+
if _is_builtin(tc):
|
|
494
|
+
break
|
|
495
|
+
batch.append(tc)
|
|
496
|
+
|
|
497
|
+
remaining = tool_queue[len(batch) :]
|
|
498
|
+
temp["pending_tool_calls"] = list(remaining)
|
|
499
|
+
|
|
500
|
+
for tc in batch:
|
|
501
|
+
emit("act", {"tool": tc.get("name", ""), "args": tc.get("arguments", {}), "call_id": str(tc.get("call_id") or "")})
|
|
502
|
+
|
|
503
|
+
formatted_calls: List[Dict[str, Any]] = []
|
|
504
|
+
for tc in batch:
|
|
505
|
+
formatted_calls.append(
|
|
506
|
+
{"name": tc.get("name", ""), "arguments": tc.get("arguments", {}), "call_id": str(tc.get("call_id") or "")}
|
|
507
|
+
)
|
|
508
|
+
|
|
509
|
+
return StepPlan(
|
|
510
|
+
node_id="act",
|
|
511
|
+
effect=Effect(
|
|
512
|
+
type=EffectType.TOOL_CALLS,
|
|
513
|
+
payload={"tool_calls": formatted_calls, "allowed_tools": list(allow)},
|
|
514
|
+
result_key="_temp.tool_results",
|
|
515
|
+
),
|
|
516
|
+
next_node="observe",
|
|
517
|
+
)
|
|
518
|
+
|
|
519
|
+
def observe_node(run: RunState, ctx) -> StepPlan:
|
|
520
|
+
context, scratchpad, _, temp, _ = ensure_memact_vars(run)
|
|
521
|
+
tool_results = temp.get("tool_results", {})
|
|
522
|
+
if not isinstance(tool_results, dict):
|
|
523
|
+
tool_results = {}
|
|
524
|
+
|
|
525
|
+
results = tool_results.get("results", [])
|
|
526
|
+
if not isinstance(results, list):
|
|
527
|
+
results = []
|
|
528
|
+
if results:
|
|
529
|
+
scratchpad["used_tools"] = True
|
|
530
|
+
|
|
531
|
+
def _display(v: Any) -> str:
|
|
532
|
+
if isinstance(v, dict):
|
|
533
|
+
rendered = v.get("rendered")
|
|
534
|
+
if isinstance(rendered, str) and rendered.strip():
|
|
535
|
+
return rendered.strip()
|
|
536
|
+
return "" if v is None else str(v)
|
|
537
|
+
|
|
538
|
+
for r in results:
|
|
539
|
+
if not isinstance(r, dict):
|
|
540
|
+
continue
|
|
541
|
+
name = str(r.get("name", "tool") or "tool")
|
|
542
|
+
success = bool(r.get("success"))
|
|
543
|
+
output = r.get("output", "")
|
|
544
|
+
error = r.get("error", "")
|
|
545
|
+
display = _display(output)
|
|
546
|
+
if not success:
|
|
547
|
+
display = _display(output) if isinstance(output, dict) else str(error or output)
|
|
548
|
+
rendered = logic.format_observation(name=name, output=display, success=success)
|
|
549
|
+
emit("observe", {"tool": name, "success": success})
|
|
550
|
+
|
|
551
|
+
context["messages"].append(
|
|
552
|
+
_new_message(
|
|
553
|
+
ctx,
|
|
554
|
+
role="tool",
|
|
555
|
+
content=rendered,
|
|
556
|
+
metadata={"name": name, "call_id": r.get("call_id"), "success": success},
|
|
557
|
+
)
|
|
558
|
+
)
|
|
559
|
+
|
|
560
|
+
temp.pop("tool_results", None)
|
|
561
|
+
pending = temp.get("pending_tool_calls", [])
|
|
562
|
+
if isinstance(pending, list) and pending:
|
|
563
|
+
return StepPlan(node_id="observe", next_node="act")
|
|
564
|
+
temp["pending_tool_calls"] = []
|
|
565
|
+
return StepPlan(node_id="observe", next_node="reason")
|
|
566
|
+
|
|
567
|
+
def handle_user_response_node(run: RunState, ctx) -> StepPlan:
|
|
568
|
+
context, _, _, temp, _ = ensure_memact_vars(run)
|
|
569
|
+
user_response = temp.get("user_response", {})
|
|
570
|
+
if not isinstance(user_response, dict):
|
|
571
|
+
user_response = {}
|
|
572
|
+
response_text = str(user_response.get("response", "") or "")
|
|
573
|
+
emit("user_response", {"response": response_text})
|
|
574
|
+
|
|
575
|
+
context["messages"].append(_new_message(ctx, role="user", content=f"[User response]: {response_text}"))
|
|
576
|
+
temp.pop("user_response", None)
|
|
577
|
+
|
|
578
|
+
if temp.get("pending_tool_calls"):
|
|
579
|
+
return StepPlan(node_id="handle_user_response", next_node="act")
|
|
580
|
+
return StepPlan(node_id="handle_user_response", next_node="reason")
|
|
581
|
+
|
|
582
|
+
def finalize_node(run: RunState, ctx) -> StepPlan:
|
|
583
|
+
context, scratchpad, runtime_ns, temp, limits = ensure_memact_vars(run)
|
|
584
|
+
_ = scratchpad
|
|
585
|
+
|
|
586
|
+
task = str(context.get("task", "") or "")
|
|
587
|
+
messages_view = ActiveContextPolicy.select_active_messages_for_llm_from_run(run)
|
|
588
|
+
payload_messages = _sanitize_llm_messages(messages_view)
|
|
589
|
+
|
|
590
|
+
draft = str(temp.get("draft_answer") or "").strip()
|
|
591
|
+
if draft:
|
|
592
|
+
payload_messages = list(payload_messages) + [{"role": "assistant", "content": draft}]
|
|
593
|
+
|
|
594
|
+
from abstractruntime.memory.active_memory import MEMACT_ENVELOPE_SCHEMA_V1, render_memact_system_prompt
|
|
595
|
+
|
|
596
|
+
memory_prompt = render_memact_system_prompt(run.vars)
|
|
597
|
+
base_sys = _system_prompt_override(runtime_ns) or ""
|
|
598
|
+
|
|
599
|
+
finalize_rules = (
|
|
600
|
+
"Finalize by returning a single JSON object that matches the required schema.\n"
|
|
601
|
+
"Rules:\n"
|
|
602
|
+
"- Put your normal user-facing answer in `content`.\n"
|
|
603
|
+
"- For each memory module, decide what unitary statements to add/remove.\n"
|
|
604
|
+
"- Do NOT include timestamps in added statements; the runtime will add timestamps.\n"
|
|
605
|
+
"- HISTORY is append-only and must be experiential; do NOT include raw commands or tool-call syntax.\n"
|
|
606
|
+
"- If you have no changes for a module, use empty lists.\n"
|
|
607
|
+
)
|
|
608
|
+
|
|
609
|
+
system_prompt = (memory_prompt + "\n\n" + str(base_sys or "")).strip()
|
|
610
|
+
prompt = (
|
|
611
|
+
"Finalize now.\n\n"
|
|
612
|
+
f"User request:\n{task}\n\n"
|
|
613
|
+
f"{finalize_rules}\n"
|
|
614
|
+
"Return ONLY the JSON object.\n"
|
|
615
|
+
).strip()
|
|
616
|
+
payload_messages = list(payload_messages) + [{"role": "user", "content": prompt}]
|
|
617
|
+
|
|
618
|
+
payload: Dict[str, Any] = {
|
|
619
|
+
"prompt": "",
|
|
620
|
+
"messages": payload_messages,
|
|
621
|
+
"system_prompt": system_prompt,
|
|
622
|
+
"response_schema": MEMACT_ENVELOPE_SCHEMA_V1,
|
|
623
|
+
"response_schema_name": "MemActEnvelopeV1",
|
|
624
|
+
"params": {"temperature": 0.2},
|
|
625
|
+
}
|
|
626
|
+
|
|
627
|
+
eff_provider = provider if isinstance(provider, str) and provider.strip() else runtime_ns.get("provider")
|
|
628
|
+
eff_model = model if isinstance(model, str) and model.strip() else runtime_ns.get("model")
|
|
629
|
+
if isinstance(eff_provider, str) and eff_provider.strip():
|
|
630
|
+
payload["provider"] = eff_provider.strip()
|
|
631
|
+
if isinstance(eff_model, str) and eff_model.strip():
|
|
632
|
+
payload["model"] = eff_model.strip()
|
|
633
|
+
|
|
634
|
+
emit("finalize_request", {"has_draft": bool(draft)})
|
|
635
|
+
|
|
636
|
+
return StepPlan(
|
|
637
|
+
node_id="finalize",
|
|
638
|
+
effect=Effect(type=EffectType.LLM_CALL, payload=payload, result_key="_temp.finalize_llm_response"),
|
|
639
|
+
next_node="finalize_parse",
|
|
640
|
+
)
|
|
641
|
+
|
|
642
|
+
def finalize_parse_node(run: RunState, ctx) -> StepPlan:
|
|
643
|
+
_, scratchpad, _, temp, _ = ensure_memact_vars(run)
|
|
644
|
+
resp = temp.get("finalize_llm_response", {})
|
|
645
|
+
if not isinstance(resp, dict):
|
|
646
|
+
resp = {}
|
|
647
|
+
|
|
648
|
+
data = resp.get("data")
|
|
649
|
+
if data is None and isinstance(resp.get("content"), str):
|
|
650
|
+
try:
|
|
651
|
+
data = json.loads(resp["content"])
|
|
652
|
+
except Exception:
|
|
653
|
+
data = None
|
|
654
|
+
if not isinstance(data, dict):
|
|
655
|
+
data = {}
|
|
656
|
+
|
|
657
|
+
content = data.get("content")
|
|
658
|
+
final_answer = str(content or "").strip()
|
|
659
|
+
|
|
660
|
+
from abstractruntime.memory.active_memory import apply_memact_envelope
|
|
661
|
+
|
|
662
|
+
apply_memact_envelope(run.vars, envelope=data)
|
|
663
|
+
|
|
664
|
+
temp.pop("finalize_llm_response", None)
|
|
665
|
+
temp.pop("draft_answer", None)
|
|
666
|
+
temp["final_answer"] = final_answer
|
|
667
|
+
scratchpad["used_tools"] = bool(scratchpad.get("used_tools"))
|
|
668
|
+
|
|
669
|
+
emit("finalize", {"has_answer": bool(final_answer)})
|
|
670
|
+
return StepPlan(node_id="finalize_parse", next_node="done")
|
|
671
|
+
|
|
672
|
+
def done_node(run: RunState, ctx) -> StepPlan:
|
|
673
|
+
context, scratchpad, _, temp, limits = ensure_memact_vars(run)
|
|
674
|
+
answer = str(temp.get("final_answer") or "No answer provided")
|
|
675
|
+
emit("done", {"answer": answer})
|
|
676
|
+
|
|
677
|
+
iterations = int(limits.get("current_iteration", 0) or scratchpad.get("iteration", 0) or 0)
|
|
678
|
+
|
|
679
|
+
messages = context.get("messages")
|
|
680
|
+
if isinstance(messages, list):
|
|
681
|
+
last = messages[-1] if messages else None
|
|
682
|
+
last_role = last.get("role") if isinstance(last, dict) else None
|
|
683
|
+
last_content = last.get("content") if isinstance(last, dict) else None
|
|
684
|
+
if last_role != "assistant" or str(last_content or "") != answer:
|
|
685
|
+
messages.append(_new_message(ctx, role="assistant", content=answer, metadata={"kind": "final_answer"}))
|
|
686
|
+
|
|
687
|
+
return StepPlan(
|
|
688
|
+
node_id="done",
|
|
689
|
+
complete_output={"answer": answer, "iterations": iterations, "messages": list(context.get("messages") or [])},
|
|
690
|
+
)
|
|
691
|
+
|
|
692
|
+
def max_iterations_node(run: RunState, ctx) -> StepPlan:
|
|
693
|
+
context, scratchpad, _, _, limits = ensure_memact_vars(run)
|
|
694
|
+
max_iterations = int(limits.get("max_iterations", 0) or scratchpad.get("max_iterations", 25) or 25)
|
|
695
|
+
if max_iterations < 1:
|
|
696
|
+
max_iterations = 1
|
|
697
|
+
emit("max_iterations", {"iterations": max_iterations})
|
|
698
|
+
|
|
699
|
+
messages = list(context.get("messages") or [])
|
|
700
|
+
last_content = messages[-1]["content"] if messages else "Max iterations reached"
|
|
701
|
+
return StepPlan(
|
|
702
|
+
node_id="max_iterations",
|
|
703
|
+
complete_output={"answer": last_content, "iterations": max_iterations, "messages": messages},
|
|
704
|
+
)
|
|
705
|
+
|
|
706
|
+
return WorkflowSpec(
|
|
707
|
+
workflow_id=str(workflow_id or "memact_agent"),
|
|
708
|
+
entry_node="init",
|
|
709
|
+
nodes={
|
|
710
|
+
"init": init_node,
|
|
711
|
+
"reason": reason_node,
|
|
712
|
+
"parse": parse_node,
|
|
713
|
+
"act": act_node,
|
|
714
|
+
"observe": observe_node,
|
|
715
|
+
"handle_user_response": handle_user_response_node,
|
|
716
|
+
"finalize": finalize_node,
|
|
717
|
+
"finalize_parse": finalize_parse_node,
|
|
718
|
+
"done": done_node,
|
|
719
|
+
"max_iterations": max_iterations_node,
|
|
720
|
+
},
|
|
721
|
+
)
|