abstractagent 0.2.0__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- abstractagent/adapters/__init__.py +2 -1
- abstractagent/adapters/codeact_runtime.py +907 -60
- abstractagent/adapters/generation_params.py +82 -0
- abstractagent/adapters/media.py +45 -0
- abstractagent/adapters/memact_runtime.py +959 -0
- abstractagent/adapters/react_runtime.py +1357 -135
- abstractagent/agents/__init__.py +4 -0
- abstractagent/agents/base.py +89 -1
- abstractagent/agents/codeact.py +125 -18
- abstractagent/agents/memact.py +280 -0
- abstractagent/agents/react.py +129 -18
- abstractagent/logic/__init__.py +2 -0
- abstractagent/logic/builtins.py +270 -5
- abstractagent/logic/codeact.py +91 -81
- abstractagent/logic/memact.py +128 -0
- abstractagent/logic/react.py +91 -50
- abstractagent/repl.py +24 -447
- abstractagent/scripts/__init__.py +5 -0
- abstractagent/scripts/lmstudio_tool_eval.py +426 -0
- abstractagent/tools/__init__.py +9 -0
- abstractagent-0.3.1.dist-info/METADATA +112 -0
- abstractagent-0.3.1.dist-info/RECORD +33 -0
- {abstractagent-0.2.0.dist-info → abstractagent-0.3.1.dist-info}/WHEEL +1 -1
- abstractagent/ui/__init__.py +0 -5
- abstractagent/ui/question.py +0 -197
- abstractagent-0.2.0.dist-info/METADATA +0 -134
- abstractagent-0.2.0.dist-info/RECORD +0 -28
- {abstractagent-0.2.0.dist-info → abstractagent-0.3.1.dist-info}/entry_points.txt +0 -0
- {abstractagent-0.2.0.dist-info → abstractagent-0.3.1.dist-info}/licenses/LICENSE +0 -0
- {abstractagent-0.2.0.dist-info → abstractagent-0.3.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,959 @@
|
|
|
1
|
+
"""AbstractRuntime adapter for MemAct (memory-enhanced agent)."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import hashlib
|
|
6
|
+
import json
|
|
7
|
+
from typing import Any, Callable, Dict, List, Optional
|
|
8
|
+
|
|
9
|
+
from abstractcore.tools import ToolCall
|
|
10
|
+
from abstractruntime import Effect, EffectType, RunState, StepPlan, WorkflowSpec
|
|
11
|
+
from abstractruntime.core.vars import ensure_limits, ensure_namespaces
|
|
12
|
+
from abstractruntime.memory.active_context import ActiveContextPolicy
|
|
13
|
+
|
|
14
|
+
from .generation_params import runtime_llm_params
|
|
15
|
+
from .media import extract_media_from_context
|
|
16
|
+
from ..logic.memact import MemActLogic
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def _new_message(
|
|
20
|
+
ctx: Any,
|
|
21
|
+
*,
|
|
22
|
+
role: str,
|
|
23
|
+
content: str,
|
|
24
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
25
|
+
) -> Dict[str, Any]:
|
|
26
|
+
timestamp: Optional[str] = None
|
|
27
|
+
now_iso = getattr(ctx, "now_iso", None)
|
|
28
|
+
if callable(now_iso):
|
|
29
|
+
timestamp = str(now_iso())
|
|
30
|
+
if not timestamp:
|
|
31
|
+
from datetime import datetime, timezone
|
|
32
|
+
|
|
33
|
+
timestamp = datetime.now(timezone.utc).isoformat()
|
|
34
|
+
|
|
35
|
+
import uuid
|
|
36
|
+
|
|
37
|
+
meta = dict(metadata or {})
|
|
38
|
+
meta.setdefault("message_id", f"msg_{uuid.uuid4().hex}")
|
|
39
|
+
|
|
40
|
+
return {
|
|
41
|
+
"role": role,
|
|
42
|
+
"content": content,
|
|
43
|
+
"timestamp": timestamp,
|
|
44
|
+
"metadata": meta,
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def ensure_memact_vars(run: RunState) -> tuple[Dict[str, Any], Dict[str, Any], Dict[str, Any], Dict[str, Any], Dict[str, Any]]:
|
|
49
|
+
ensure_namespaces(run.vars)
|
|
50
|
+
limits = ensure_limits(run.vars)
|
|
51
|
+
context = run.vars["context"]
|
|
52
|
+
scratchpad = run.vars["scratchpad"]
|
|
53
|
+
runtime_ns = run.vars["_runtime"]
|
|
54
|
+
temp = run.vars["_temp"]
|
|
55
|
+
|
|
56
|
+
if "task" in run.vars and "task" not in context:
|
|
57
|
+
context["task"] = run.vars.pop("task")
|
|
58
|
+
if "messages" in run.vars and "messages" not in context:
|
|
59
|
+
context["messages"] = run.vars.pop("messages")
|
|
60
|
+
|
|
61
|
+
if not isinstance(context.get("messages"), list):
|
|
62
|
+
context["messages"] = []
|
|
63
|
+
if not isinstance(runtime_ns.get("inbox"), list):
|
|
64
|
+
runtime_ns["inbox"] = []
|
|
65
|
+
|
|
66
|
+
iteration = scratchpad.get("iteration")
|
|
67
|
+
if not isinstance(iteration, int):
|
|
68
|
+
try:
|
|
69
|
+
scratchpad["iteration"] = int(iteration or 0)
|
|
70
|
+
except Exception:
|
|
71
|
+
scratchpad["iteration"] = 0
|
|
72
|
+
max_iterations = scratchpad.get("max_iterations")
|
|
73
|
+
if not isinstance(max_iterations, int):
|
|
74
|
+
try:
|
|
75
|
+
scratchpad["max_iterations"] = int(max_iterations or 25)
|
|
76
|
+
except Exception:
|
|
77
|
+
scratchpad["max_iterations"] = 25
|
|
78
|
+
if scratchpad["max_iterations"] < 1:
|
|
79
|
+
scratchpad["max_iterations"] = 1
|
|
80
|
+
|
|
81
|
+
used_tools = scratchpad.get("used_tools")
|
|
82
|
+
if not isinstance(used_tools, bool):
|
|
83
|
+
scratchpad["used_tools"] = bool(used_tools) if used_tools is not None else False
|
|
84
|
+
|
|
85
|
+
return context, scratchpad, runtime_ns, temp, limits
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def _compute_toolset_id(tool_specs: List[Dict[str, Any]]) -> str:
|
|
89
|
+
normalized = sorted((dict(s) for s in tool_specs), key=lambda s: str(s.get("name", "")))
|
|
90
|
+
payload = json.dumps(normalized, sort_keys=True, ensure_ascii=False, separators=(",", ":")).encode("utf-8")
|
|
91
|
+
digest = hashlib.sha256(payload).hexdigest()
|
|
92
|
+
return f"ts_{digest}"
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def create_memact_workflow(
|
|
96
|
+
*,
|
|
97
|
+
logic: MemActLogic,
|
|
98
|
+
on_step: Optional[Callable[[str, Dict[str, Any]], None]] = None,
|
|
99
|
+
workflow_id: str = "memact_agent",
|
|
100
|
+
provider: Optional[str] = None,
|
|
101
|
+
model: Optional[str] = None,
|
|
102
|
+
allowed_tools: Optional[List[str]] = None,
|
|
103
|
+
) -> WorkflowSpec:
|
|
104
|
+
"""Adapt MemActLogic to an AbstractRuntime workflow."""
|
|
105
|
+
|
|
106
|
+
def emit(step: str, data: Dict[str, Any]) -> None:
|
|
107
|
+
if on_step:
|
|
108
|
+
on_step(step, data)
|
|
109
|
+
|
|
110
|
+
def _current_tool_defs() -> list[Any]:
|
|
111
|
+
defs = getattr(logic, "tools", None)
|
|
112
|
+
if not isinstance(defs, list):
|
|
113
|
+
try:
|
|
114
|
+
defs = list(defs) # type: ignore[arg-type]
|
|
115
|
+
except Exception:
|
|
116
|
+
defs = []
|
|
117
|
+
return [t for t in defs if getattr(t, "name", None)]
|
|
118
|
+
|
|
119
|
+
def _tool_by_name() -> dict[str, Any]:
|
|
120
|
+
out: dict[str, Any] = {}
|
|
121
|
+
for t in _current_tool_defs():
|
|
122
|
+
name = getattr(t, "name", None)
|
|
123
|
+
if isinstance(name, str) and name.strip():
|
|
124
|
+
out[name] = t
|
|
125
|
+
return out
|
|
126
|
+
|
|
127
|
+
def _default_allowlist() -> list[str]:
|
|
128
|
+
if isinstance(allowed_tools, list):
|
|
129
|
+
allow = [str(t).strip() for t in allowed_tools if isinstance(t, str) and t.strip()]
|
|
130
|
+
return allow if allow else []
|
|
131
|
+
out: list[str] = []
|
|
132
|
+
seen: set[str] = set()
|
|
133
|
+
for t in _current_tool_defs():
|
|
134
|
+
name = getattr(t, "name", None)
|
|
135
|
+
if not isinstance(name, str) or not name.strip() or name in seen:
|
|
136
|
+
continue
|
|
137
|
+
seen.add(name)
|
|
138
|
+
out.append(name)
|
|
139
|
+
return out
|
|
140
|
+
|
|
141
|
+
def _normalize_allowlist(raw: Any) -> list[str]:
|
|
142
|
+
items: list[Any]
|
|
143
|
+
if isinstance(raw, list):
|
|
144
|
+
items = raw
|
|
145
|
+
elif isinstance(raw, tuple):
|
|
146
|
+
items = list(raw)
|
|
147
|
+
elif isinstance(raw, str):
|
|
148
|
+
items = [raw]
|
|
149
|
+
else:
|
|
150
|
+
items = []
|
|
151
|
+
|
|
152
|
+
out: list[str] = []
|
|
153
|
+
seen: set[str] = set()
|
|
154
|
+
current = _tool_by_name()
|
|
155
|
+
for t in items:
|
|
156
|
+
if not isinstance(t, str):
|
|
157
|
+
continue
|
|
158
|
+
name = t.strip()
|
|
159
|
+
if not name or name in seen:
|
|
160
|
+
continue
|
|
161
|
+
if name not in current:
|
|
162
|
+
continue
|
|
163
|
+
seen.add(name)
|
|
164
|
+
out.append(name)
|
|
165
|
+
return out
|
|
166
|
+
|
|
167
|
+
def _effective_allowlist(runtime_ns: Dict[str, Any]) -> list[str]:
|
|
168
|
+
if isinstance(runtime_ns, dict) and "allowed_tools" in runtime_ns:
|
|
169
|
+
normalized = _normalize_allowlist(runtime_ns.get("allowed_tools"))
|
|
170
|
+
runtime_ns["allowed_tools"] = normalized
|
|
171
|
+
return normalized
|
|
172
|
+
return _normalize_allowlist(list(_default_allowlist()))
|
|
173
|
+
|
|
174
|
+
def _allowed_tool_defs(allow: list[str]) -> list[Any]:
|
|
175
|
+
out: list[Any] = []
|
|
176
|
+
current = _tool_by_name()
|
|
177
|
+
for name in allow:
|
|
178
|
+
tool = current.get(name)
|
|
179
|
+
if tool is not None:
|
|
180
|
+
out.append(tool)
|
|
181
|
+
return out
|
|
182
|
+
|
|
183
|
+
def _system_prompt_override(runtime_ns: Dict[str, Any]) -> Optional[str]:
|
|
184
|
+
raw = runtime_ns.get("system_prompt") if isinstance(runtime_ns, dict) else None
|
|
185
|
+
if isinstance(raw, str) and raw.strip():
|
|
186
|
+
return raw
|
|
187
|
+
return None
|
|
188
|
+
|
|
189
|
+
def _sanitize_llm_messages(messages: Any) -> List[Dict[str, str]]:
|
|
190
|
+
if not isinstance(messages, list) or not messages:
|
|
191
|
+
return []
|
|
192
|
+
out: List[Dict[str, str]] = []
|
|
193
|
+
for m in messages:
|
|
194
|
+
if not isinstance(m, dict):
|
|
195
|
+
continue
|
|
196
|
+
role = str(m.get("role") or "").strip()
|
|
197
|
+
content = m.get("content")
|
|
198
|
+
if not role or content is None:
|
|
199
|
+
continue
|
|
200
|
+
content_str = str(content)
|
|
201
|
+
if not content_str.strip():
|
|
202
|
+
continue
|
|
203
|
+
entry: Dict[str, str] = {"role": role, "content": content_str}
|
|
204
|
+
if role == "tool":
|
|
205
|
+
meta = m.get("metadata") if isinstance(m.get("metadata"), dict) else {}
|
|
206
|
+
call_id = meta.get("call_id") if isinstance(meta, dict) else None
|
|
207
|
+
if call_id is not None and str(call_id).strip():
|
|
208
|
+
entry["tool_call_id"] = str(call_id).strip()
|
|
209
|
+
out.append(entry)
|
|
210
|
+
return out
|
|
211
|
+
|
|
212
|
+
def init_node(run: RunState, ctx) -> StepPlan:
|
|
213
|
+
context, scratchpad, runtime_ns, _, limits = ensure_memact_vars(run)
|
|
214
|
+
scratchpad["iteration"] = 0
|
|
215
|
+
limits["current_iteration"] = 0
|
|
216
|
+
|
|
217
|
+
# Ensure MemAct Active Memory exists (seeded by agent.start when available).
|
|
218
|
+
from abstractruntime.memory.active_memory import ensure_memact_memory
|
|
219
|
+
|
|
220
|
+
ensure_memact_memory(run.vars)
|
|
221
|
+
|
|
222
|
+
task = str(context.get("task", "") or "")
|
|
223
|
+
context["task"] = task
|
|
224
|
+
messages = context["messages"]
|
|
225
|
+
if task and (not messages or messages[-1].get("role") != "user" or messages[-1].get("content") != task):
|
|
226
|
+
messages.append(_new_message(ctx, role="user", content=task))
|
|
227
|
+
|
|
228
|
+
allow = _effective_allowlist(runtime_ns)
|
|
229
|
+
allowed_defs = _allowed_tool_defs(allow)
|
|
230
|
+
tool_specs = [t.to_dict() for t in allowed_defs]
|
|
231
|
+
runtime_ns["tool_specs"] = tool_specs
|
|
232
|
+
runtime_ns["toolset_id"] = _compute_toolset_id(tool_specs)
|
|
233
|
+
runtime_ns.setdefault("allowed_tools", allow)
|
|
234
|
+
runtime_ns.setdefault("inbox", [])
|
|
235
|
+
|
|
236
|
+
emit("init", {"task": task})
|
|
237
|
+
return StepPlan(node_id="init", next_node="compose")
|
|
238
|
+
|
|
239
|
+
def compose_node(run: RunState, ctx) -> StepPlan:
|
|
240
|
+
"""Optional, runtime-owned memory composition step (v0).
|
|
241
|
+
|
|
242
|
+
When enabled via `_runtime.memact_composer.enabled`, this node queries the
|
|
243
|
+
temporal KG (`MEMORY_KG_QUERY`) using the latest user message as stimulus
|
|
244
|
+
and maps the selected packets into MemAct CURRENT CONTEXT entries.
|
|
245
|
+
|
|
246
|
+
This runs *before* `reason` so it does not consume agent iterations.
|
|
247
|
+
"""
|
|
248
|
+
context, _, runtime_ns, temp, _ = ensure_memact_vars(run)
|
|
249
|
+
|
|
250
|
+
cfg_raw = runtime_ns.get("memact_composer") if isinstance(runtime_ns, dict) else None
|
|
251
|
+
cfg = cfg_raw if isinstance(cfg_raw, dict) else {}
|
|
252
|
+
enabled = bool(cfg.get("enabled"))
|
|
253
|
+
if not enabled:
|
|
254
|
+
return StepPlan(node_id="compose", next_node="reason")
|
|
255
|
+
|
|
256
|
+
# Derive stimulus from the latest user message, falling back to `context.task`.
|
|
257
|
+
stimulus = ""
|
|
258
|
+
stimulus_message_id: Optional[str] = None
|
|
259
|
+
messages = context.get("messages")
|
|
260
|
+
if isinstance(messages, list):
|
|
261
|
+
for m in reversed(messages):
|
|
262
|
+
if not isinstance(m, dict):
|
|
263
|
+
continue
|
|
264
|
+
if str(m.get("role") or "") != "user":
|
|
265
|
+
continue
|
|
266
|
+
raw = m.get("content")
|
|
267
|
+
if raw is None:
|
|
268
|
+
continue
|
|
269
|
+
text = str(raw).strip()
|
|
270
|
+
if not text:
|
|
271
|
+
continue
|
|
272
|
+
stimulus = text
|
|
273
|
+
meta = m.get("metadata") if isinstance(m.get("metadata"), dict) else {}
|
|
274
|
+
mid = meta.get("message_id") if isinstance(meta, dict) else None
|
|
275
|
+
if isinstance(mid, str) and mid.strip():
|
|
276
|
+
stimulus_message_id = mid.strip()
|
|
277
|
+
break
|
|
278
|
+
if not stimulus:
|
|
279
|
+
stimulus = str(context.get("task", "") or "").strip()
|
|
280
|
+
|
|
281
|
+
if not stimulus:
|
|
282
|
+
return StepPlan(node_id="compose", next_node="reason")
|
|
283
|
+
|
|
284
|
+
recall_level = str(cfg.get("recall_level") or "urgent").strip().lower() or "urgent"
|
|
285
|
+
scope = str(cfg.get("scope") or "session").strip().lower() or "session"
|
|
286
|
+
marker = str(cfg.get("marker") or "KG:").strip() or "KG:"
|
|
287
|
+
max_items = cfg.get("max_items")
|
|
288
|
+
max_items_int: Optional[int] = None
|
|
289
|
+
if max_items is not None and not isinstance(max_items, bool):
|
|
290
|
+
try:
|
|
291
|
+
mi = int(float(max_items))
|
|
292
|
+
except Exception:
|
|
293
|
+
mi = None
|
|
294
|
+
if isinstance(mi, int) and mi > 0:
|
|
295
|
+
max_items_int = mi
|
|
296
|
+
|
|
297
|
+
# Build a stable "composition key" so we don't re-query on tool iterations.
|
|
298
|
+
compose_key_parts = [
|
|
299
|
+
stimulus_message_id or stimulus,
|
|
300
|
+
recall_level,
|
|
301
|
+
scope,
|
|
302
|
+
str(cfg.get("limit") or ""),
|
|
303
|
+
str(cfg.get("min_score") or ""),
|
|
304
|
+
str(cfg.get("max_input_tokens") or cfg.get("max_in_tokens") or ""),
|
|
305
|
+
]
|
|
306
|
+
compose_key = "|".join([p for p in compose_key_parts if p is not None])
|
|
307
|
+
|
|
308
|
+
bucket_raw = temp.get("memact_composer")
|
|
309
|
+
bucket: Dict[str, Any] = bucket_raw if isinstance(bucket_raw, dict) else {}
|
|
310
|
+
temp["memact_composer"] = bucket
|
|
311
|
+
|
|
312
|
+
# If we already applied the composer for this key and have no pending results, skip.
|
|
313
|
+
if bucket.get("applied_key") == compose_key and "kg_result" not in bucket:
|
|
314
|
+
return StepPlan(node_id="compose", next_node="reason")
|
|
315
|
+
|
|
316
|
+
kg_result = bucket.get("kg_result")
|
|
317
|
+
if isinstance(kg_result, dict):
|
|
318
|
+
try:
|
|
319
|
+
from abstractruntime.memory.memact_composer import compose_memact_current_context_from_kg_result
|
|
320
|
+
|
|
321
|
+
out = compose_memact_current_context_from_kg_result(
|
|
322
|
+
run.vars,
|
|
323
|
+
kg_result=kg_result,
|
|
324
|
+
stimulus=stimulus,
|
|
325
|
+
marker=marker,
|
|
326
|
+
max_items=max_items_int,
|
|
327
|
+
)
|
|
328
|
+
except Exception as e:
|
|
329
|
+
out = {"ok": False, "error": str(e), "delta": {}, "trace": {}}
|
|
330
|
+
|
|
331
|
+
bucket.pop("kg_result", None)
|
|
332
|
+
bucket["applied_key"] = compose_key
|
|
333
|
+
bucket["last_stimulus"] = stimulus
|
|
334
|
+
|
|
335
|
+
# Persist a small trace for UI/debuggers (bounded list).
|
|
336
|
+
try:
|
|
337
|
+
from abstractruntime.memory.active_memory import ensure_memact_memory
|
|
338
|
+
|
|
339
|
+
mem = ensure_memact_memory(run.vars)
|
|
340
|
+
traces = mem.get("composer_traces")
|
|
341
|
+
if not isinstance(traces, list):
|
|
342
|
+
traces = []
|
|
343
|
+
mem["composer_traces"] = traces
|
|
344
|
+
|
|
345
|
+
timestamp: Optional[str] = None
|
|
346
|
+
now_iso = getattr(ctx, "now_iso", None)
|
|
347
|
+
if callable(now_iso):
|
|
348
|
+
timestamp = str(now_iso())
|
|
349
|
+
if not timestamp:
|
|
350
|
+
from datetime import datetime, timezone
|
|
351
|
+
|
|
352
|
+
timestamp = datetime.now(timezone.utc).isoformat()
|
|
353
|
+
|
|
354
|
+
trace_entry = {
|
|
355
|
+
"at": timestamp,
|
|
356
|
+
"compose_key": compose_key,
|
|
357
|
+
"ok": bool(out.get("ok")),
|
|
358
|
+
"trace": out.get("trace"),
|
|
359
|
+
}
|
|
360
|
+
traces.insert(0, trace_entry)
|
|
361
|
+
del traces[25:]
|
|
362
|
+
except Exception:
|
|
363
|
+
pass
|
|
364
|
+
|
|
365
|
+
emit("compose", {"ok": bool(out.get("ok")), "stimulus": stimulus, "recall_level": recall_level, "scope": scope})
|
|
366
|
+
return StepPlan(node_id="compose", next_node="reason")
|
|
367
|
+
|
|
368
|
+
# No result yet: schedule KG query.
|
|
369
|
+
payload: Dict[str, Any] = {
|
|
370
|
+
"query_text": stimulus,
|
|
371
|
+
"recall_level": recall_level,
|
|
372
|
+
"scope": scope,
|
|
373
|
+
}
|
|
374
|
+
for src_key, dst_key in (
|
|
375
|
+
("limit", "limit"),
|
|
376
|
+
("min_score", "min_score"),
|
|
377
|
+
("max_input_tokens", "max_input_tokens"),
|
|
378
|
+
("max_in_tokens", "max_input_tokens"),
|
|
379
|
+
("model", "model"),
|
|
380
|
+
):
|
|
381
|
+
if src_key in cfg:
|
|
382
|
+
payload[dst_key] = cfg.get(src_key)
|
|
383
|
+
|
|
384
|
+
# Default packing model: re-use the configured LLM model when available.
|
|
385
|
+
if "model" not in payload:
|
|
386
|
+
model_name = runtime_ns.get("model")
|
|
387
|
+
if isinstance(model_name, str) and model_name.strip():
|
|
388
|
+
payload["model"] = model_name.strip()
|
|
389
|
+
|
|
390
|
+
# Store key so we can attribute the result even if the stimulus changes later.
|
|
391
|
+
bucket["pending_key"] = compose_key
|
|
392
|
+
|
|
393
|
+
emit("compose_query", {"stimulus": stimulus, "recall_level": recall_level, "scope": scope})
|
|
394
|
+
return StepPlan(
|
|
395
|
+
node_id="compose",
|
|
396
|
+
effect=Effect(type=EffectType.MEMORY_KG_QUERY, payload=payload, result_key="_temp.memact_composer.kg_result"),
|
|
397
|
+
next_node="compose",
|
|
398
|
+
)
|
|
399
|
+
|
|
400
|
+
def reason_node(run: RunState, ctx) -> StepPlan:
|
|
401
|
+
context, scratchpad, runtime_ns, _, limits = ensure_memact_vars(run)
|
|
402
|
+
|
|
403
|
+
iteration = int(limits.get("current_iteration", 0) or 0)
|
|
404
|
+
max_iterations = int(limits.get("max_iterations", 25) or scratchpad.get("max_iterations", 25) or 25)
|
|
405
|
+
if max_iterations < 1:
|
|
406
|
+
max_iterations = 1
|
|
407
|
+
|
|
408
|
+
if iteration >= max_iterations:
|
|
409
|
+
return StepPlan(node_id="reason", next_node="max_iterations")
|
|
410
|
+
|
|
411
|
+
scratchpad["iteration"] = iteration + 1
|
|
412
|
+
limits["current_iteration"] = iteration + 1
|
|
413
|
+
|
|
414
|
+
task = str(context.get("task", "") or "")
|
|
415
|
+
messages_view = ActiveContextPolicy.select_active_messages_for_llm_from_run(run)
|
|
416
|
+
|
|
417
|
+
allow = _effective_allowlist(runtime_ns)
|
|
418
|
+
allowed_defs = _allowed_tool_defs(allow)
|
|
419
|
+
tool_specs = [t.to_dict() for t in allowed_defs]
|
|
420
|
+
runtime_ns["tool_specs"] = tool_specs
|
|
421
|
+
runtime_ns["toolset_id"] = _compute_toolset_id(tool_specs)
|
|
422
|
+
runtime_ns.setdefault("allowed_tools", allow)
|
|
423
|
+
|
|
424
|
+
# Inbox is a small, host/agent-controlled injection channel.
|
|
425
|
+
guidance = ""
|
|
426
|
+
inbox = runtime_ns.get("inbox", [])
|
|
427
|
+
if isinstance(inbox, list) and inbox:
|
|
428
|
+
inbox_messages = [str(m.get("content", "") or "") for m in inbox if isinstance(m, dict)]
|
|
429
|
+
guidance = " | ".join([m for m in inbox_messages if m])
|
|
430
|
+
runtime_ns["inbox"] = []
|
|
431
|
+
|
|
432
|
+
req = logic.build_request(
|
|
433
|
+
task=task,
|
|
434
|
+
messages=messages_view,
|
|
435
|
+
guidance=guidance,
|
|
436
|
+
iteration=iteration + 1,
|
|
437
|
+
max_iterations=max_iterations,
|
|
438
|
+
vars=run.vars,
|
|
439
|
+
)
|
|
440
|
+
|
|
441
|
+
from abstractruntime.memory.active_memory import render_memact_system_prompt
|
|
442
|
+
|
|
443
|
+
memory_prompt = render_memact_system_prompt(run.vars)
|
|
444
|
+
base_sys = _system_prompt_override(runtime_ns) or req.system_prompt
|
|
445
|
+
system_prompt = (memory_prompt + "\n\n" + str(base_sys or "")).strip()
|
|
446
|
+
|
|
447
|
+
emit("reason", {"iteration": iteration + 1, "max_iterations": max_iterations, "has_guidance": bool(guidance)})
|
|
448
|
+
|
|
449
|
+
payload: Dict[str, Any] = {"prompt": ""}
|
|
450
|
+
sanitized_messages = _sanitize_llm_messages(messages_view)
|
|
451
|
+
if sanitized_messages:
|
|
452
|
+
payload["messages"] = sanitized_messages
|
|
453
|
+
else:
|
|
454
|
+
# Ensure LLM_CALL contract is satisfied even when callers provide only `context.task`
|
|
455
|
+
# and the active message view is empty.
|
|
456
|
+
task_text = str(task or "").strip()
|
|
457
|
+
if task_text:
|
|
458
|
+
payload["prompt"] = task_text
|
|
459
|
+
media = extract_media_from_context(context)
|
|
460
|
+
if media:
|
|
461
|
+
payload["media"] = media
|
|
462
|
+
if tool_specs:
|
|
463
|
+
payload["tools"] = list(tool_specs)
|
|
464
|
+
if system_prompt:
|
|
465
|
+
payload["system_prompt"] = system_prompt
|
|
466
|
+
eff_provider = provider if isinstance(provider, str) and provider.strip() else runtime_ns.get("provider")
|
|
467
|
+
eff_model = model if isinstance(model, str) and model.strip() else runtime_ns.get("model")
|
|
468
|
+
if isinstance(eff_provider, str) and eff_provider.strip():
|
|
469
|
+
payload["provider"] = eff_provider.strip()
|
|
470
|
+
if isinstance(eff_model, str) and eff_model.strip():
|
|
471
|
+
payload["model"] = eff_model.strip()
|
|
472
|
+
|
|
473
|
+
params: Dict[str, Any] = {"temperature": 0.2 if tool_specs else 0.7}
|
|
474
|
+
if req.max_tokens is not None:
|
|
475
|
+
params["max_tokens"] = req.max_tokens
|
|
476
|
+
payload["params"] = runtime_llm_params(runtime_ns, extra=params)
|
|
477
|
+
|
|
478
|
+
return StepPlan(
|
|
479
|
+
node_id="reason",
|
|
480
|
+
effect=Effect(type=EffectType.LLM_CALL, payload=payload, result_key="_temp.llm_response"),
|
|
481
|
+
next_node="parse",
|
|
482
|
+
)
|
|
483
|
+
|
|
484
|
+
def parse_node(run: RunState, ctx) -> StepPlan:
|
|
485
|
+
context, scratchpad, runtime_ns, temp, _ = ensure_memact_vars(run)
|
|
486
|
+
response = temp.get("llm_response", {})
|
|
487
|
+
content, tool_calls = logic.parse_response(response)
|
|
488
|
+
temp.pop("llm_response", None)
|
|
489
|
+
|
|
490
|
+
emit(
|
|
491
|
+
"parse",
|
|
492
|
+
{
|
|
493
|
+
"has_tool_calls": bool(tool_calls),
|
|
494
|
+
"tool_calls": [{"name": tc.name, "arguments": tc.arguments, "call_id": tc.call_id} for tc in tool_calls],
|
|
495
|
+
},
|
|
496
|
+
)
|
|
497
|
+
|
|
498
|
+
if tool_calls:
|
|
499
|
+
# Keep any user-facing prelude content (optional) in history.
|
|
500
|
+
clean = str(content or "").strip()
|
|
501
|
+
if clean:
|
|
502
|
+
context["messages"].append(_new_message(ctx, role="assistant", content=clean))
|
|
503
|
+
temp["pending_tool_calls"] = [tc.__dict__ for tc in tool_calls]
|
|
504
|
+
return StepPlan(node_id="parse", next_node="act")
|
|
505
|
+
|
|
506
|
+
# Tool-free: draft answer becomes input to the envelope finalization call.
|
|
507
|
+
temp["draft_answer"] = str(content or "").strip()
|
|
508
|
+
scratchpad["tool_retry_count"] = 0
|
|
509
|
+
return StepPlan(node_id="parse", next_node="finalize")
|
|
510
|
+
|
|
511
|
+
def act_node(run: RunState, ctx) -> StepPlan:
|
|
512
|
+
# Queue semantics: preserve ordering and avoid dropping calls when schema-only tools
|
|
513
|
+
# (ask_user/memory/etc.) are interleaved with normal tools.
|
|
514
|
+
context, _, runtime_ns, temp, _ = ensure_memact_vars(run)
|
|
515
|
+
raw_queue = temp.get("pending_tool_calls", [])
|
|
516
|
+
if not isinstance(raw_queue, list) or not raw_queue:
|
|
517
|
+
temp["pending_tool_calls"] = []
|
|
518
|
+
return StepPlan(node_id="act", next_node="compose")
|
|
519
|
+
|
|
520
|
+
allow = _effective_allowlist(runtime_ns)
|
|
521
|
+
builtin_effect_tools = {
|
|
522
|
+
"ask_user",
|
|
523
|
+
"recall_memory",
|
|
524
|
+
"inspect_vars",
|
|
525
|
+
"remember",
|
|
526
|
+
"remember_note",
|
|
527
|
+
"compact_memory",
|
|
528
|
+
"delegate_agent",
|
|
529
|
+
}
|
|
530
|
+
|
|
531
|
+
tool_queue: List[Dict[str, Any]] = []
|
|
532
|
+
for idx, item in enumerate(raw_queue, start=1):
|
|
533
|
+
if not isinstance(item, dict):
|
|
534
|
+
continue
|
|
535
|
+
d = dict(item)
|
|
536
|
+
call_id = str(d.get("call_id") or "").strip()
|
|
537
|
+
if not call_id:
|
|
538
|
+
d["call_id"] = str(idx)
|
|
539
|
+
tool_queue.append(d)
|
|
540
|
+
|
|
541
|
+
if not tool_queue:
|
|
542
|
+
temp["pending_tool_calls"] = []
|
|
543
|
+
return StepPlan(node_id="act", next_node="compose")
|
|
544
|
+
|
|
545
|
+
def _is_builtin(tc: Dict[str, Any]) -> bool:
|
|
546
|
+
name = tc.get("name")
|
|
547
|
+
return isinstance(name, str) and name in builtin_effect_tools
|
|
548
|
+
|
|
549
|
+
if _is_builtin(tool_queue[0]):
|
|
550
|
+
tc = tool_queue[0]
|
|
551
|
+
name = str(tc.get("name") or "").strip()
|
|
552
|
+
args = tc.get("arguments") or {}
|
|
553
|
+
if not isinstance(args, dict):
|
|
554
|
+
args = {}
|
|
555
|
+
|
|
556
|
+
temp["pending_tool_calls"] = list(tool_queue[1:])
|
|
557
|
+
|
|
558
|
+
if name and name not in allow:
|
|
559
|
+
temp["tool_results"] = {
|
|
560
|
+
"results": [
|
|
561
|
+
{
|
|
562
|
+
"call_id": str(tc.get("call_id") or ""),
|
|
563
|
+
"name": name,
|
|
564
|
+
"success": False,
|
|
565
|
+
"output": None,
|
|
566
|
+
"error": f"Tool '{name}' is not allowed for this agent",
|
|
567
|
+
}
|
|
568
|
+
]
|
|
569
|
+
}
|
|
570
|
+
emit("act_blocked", {"tool": name})
|
|
571
|
+
return StepPlan(node_id="act", next_node="observe")
|
|
572
|
+
|
|
573
|
+
if name == "ask_user":
|
|
574
|
+
question = str(args.get("question") or "Please provide input:")
|
|
575
|
+
choices = args.get("choices")
|
|
576
|
+
choices = list(choices) if isinstance(choices, list) else None
|
|
577
|
+
|
|
578
|
+
msgs = context.get("messages")
|
|
579
|
+
if isinstance(msgs, list):
|
|
580
|
+
content = f"[Agent question]: {question}"
|
|
581
|
+
last = msgs[-1] if msgs else None
|
|
582
|
+
last_role = last.get("role") if isinstance(last, dict) else None
|
|
583
|
+
last_meta = last.get("metadata") if isinstance(last, dict) else None
|
|
584
|
+
last_kind = last_meta.get("kind") if isinstance(last_meta, dict) else None
|
|
585
|
+
last_content = last.get("content") if isinstance(last, dict) else None
|
|
586
|
+
if not (last_role == "assistant" and last_kind == "ask_user_prompt" and str(last_content or "") == content):
|
|
587
|
+
msgs.append(_new_message(ctx, role="assistant", content=content, metadata={"kind": "ask_user_prompt"}))
|
|
588
|
+
|
|
589
|
+
emit("ask_user", {"question": question, "choices": choices or []})
|
|
590
|
+
return StepPlan(
|
|
591
|
+
node_id="act",
|
|
592
|
+
effect=Effect(
|
|
593
|
+
type=EffectType.ASK_USER,
|
|
594
|
+
payload={"prompt": question, "choices": choices, "allow_free_text": True},
|
|
595
|
+
result_key="_temp.user_response",
|
|
596
|
+
),
|
|
597
|
+
next_node="handle_user_response",
|
|
598
|
+
)
|
|
599
|
+
|
|
600
|
+
if name == "delegate_agent":
|
|
601
|
+
delegated_task = str(args.get("task") or "").strip()
|
|
602
|
+
delegated_context = str(args.get("context") or "").strip()
|
|
603
|
+
|
|
604
|
+
tools_raw = args.get("tools")
|
|
605
|
+
if tools_raw is None:
|
|
606
|
+
# Inherit the current allowlist, but avoid recursive delegation and avoid waiting on ask_user
|
|
607
|
+
# unless explicitly enabled.
|
|
608
|
+
child_allow = [t for t in allow if t not in {"delegate_agent", "ask_user"}]
|
|
609
|
+
else:
|
|
610
|
+
child_allow = _normalize_allowlist(tools_raw)
|
|
611
|
+
|
|
612
|
+
if not delegated_task:
|
|
613
|
+
temp["tool_results"] = {
|
|
614
|
+
"results": [
|
|
615
|
+
{
|
|
616
|
+
"call_id": str(tc.get("call_id") or ""),
|
|
617
|
+
"name": "delegate_agent",
|
|
618
|
+
"success": False,
|
|
619
|
+
"output": None,
|
|
620
|
+
"error": "delegate_agent requires a non-empty task",
|
|
621
|
+
}
|
|
622
|
+
]
|
|
623
|
+
}
|
|
624
|
+
return StepPlan(node_id="act", next_node="observe")
|
|
625
|
+
|
|
626
|
+
combined_task = delegated_task
|
|
627
|
+
if delegated_context:
|
|
628
|
+
combined_task = f"{delegated_task}\n\nContext:\n{delegated_context}"
|
|
629
|
+
|
|
630
|
+
sub_vars: Dict[str, Any] = {
|
|
631
|
+
"context": {"task": combined_task, "messages": []},
|
|
632
|
+
"_runtime": {
|
|
633
|
+
"allowed_tools": list(child_allow),
|
|
634
|
+
"system_prompt_extra": (
|
|
635
|
+
"You are a delegated sub-agent.\n"
|
|
636
|
+
"- Focus ONLY on the delegated task.\n"
|
|
637
|
+
"- Use ONLY the allowed tools when needed.\n"
|
|
638
|
+
"- Do not ask the user questions; if blocked, state assumptions and proceed.\n"
|
|
639
|
+
"- Return a concise result suitable for the parent agent to act on.\n"
|
|
640
|
+
),
|
|
641
|
+
},
|
|
642
|
+
"_limits": {"max_iterations": 10},
|
|
643
|
+
}
|
|
644
|
+
|
|
645
|
+
payload = {
|
|
646
|
+
"workflow_id": str(getattr(run, "workflow_id", "") or "memact_agent"),
|
|
647
|
+
"vars": sub_vars,
|
|
648
|
+
"async": False,
|
|
649
|
+
"include_traces": False,
|
|
650
|
+
# Tool-mode wrapper so the parent receives a normal tool observation (no run failure on child failure).
|
|
651
|
+
"wrap_as_tool_result": True,
|
|
652
|
+
"tool_name": "delegate_agent",
|
|
653
|
+
"call_id": str(tc.get("call_id") or ""),
|
|
654
|
+
}
|
|
655
|
+
emit("delegate_agent", {"tools": list(child_allow), "call_id": payload.get("call_id")})
|
|
656
|
+
return StepPlan(
|
|
657
|
+
node_id="act",
|
|
658
|
+
effect=Effect(type=EffectType.START_SUBWORKFLOW, payload=payload, result_key="_temp.tool_results"),
|
|
659
|
+
next_node="observe",
|
|
660
|
+
)
|
|
661
|
+
|
|
662
|
+
if name == "recall_memory":
|
|
663
|
+
payload = dict(args)
|
|
664
|
+
payload.setdefault("tool_name", "recall_memory")
|
|
665
|
+
payload.setdefault("call_id", tc.get("call_id") or "memory")
|
|
666
|
+
emit("memory_query", {"query": payload.get("query"), "span_id": payload.get("span_id")})
|
|
667
|
+
return StepPlan(
|
|
668
|
+
node_id="act",
|
|
669
|
+
effect=Effect(type=EffectType.MEMORY_QUERY, payload=payload, result_key="_temp.tool_results"),
|
|
670
|
+
next_node="observe",
|
|
671
|
+
)
|
|
672
|
+
|
|
673
|
+
if name == "inspect_vars":
|
|
674
|
+
payload = dict(args)
|
|
675
|
+
payload.setdefault("tool_name", "inspect_vars")
|
|
676
|
+
payload.setdefault("call_id", tc.get("call_id") or "vars")
|
|
677
|
+
emit("vars_query", {"path": payload.get("path")})
|
|
678
|
+
return StepPlan(
|
|
679
|
+
node_id="act",
|
|
680
|
+
effect=Effect(type=EffectType.VARS_QUERY, payload=payload, result_key="_temp.tool_results"),
|
|
681
|
+
next_node="observe",
|
|
682
|
+
)
|
|
683
|
+
|
|
684
|
+
if name == "remember":
|
|
685
|
+
payload = dict(args)
|
|
686
|
+
payload.setdefault("tool_name", "remember")
|
|
687
|
+
payload.setdefault("call_id", tc.get("call_id") or "memory")
|
|
688
|
+
emit("memory_tag", {"span_id": payload.get("span_id"), "tags": payload.get("tags")})
|
|
689
|
+
return StepPlan(
|
|
690
|
+
node_id="act",
|
|
691
|
+
effect=Effect(type=EffectType.MEMORY_TAG, payload=payload, result_key="_temp.tool_results"),
|
|
692
|
+
next_node="observe",
|
|
693
|
+
)
|
|
694
|
+
|
|
695
|
+
if name == "remember_note":
|
|
696
|
+
payload = dict(args)
|
|
697
|
+
payload.setdefault("tool_name", "remember_note")
|
|
698
|
+
payload.setdefault("call_id", tc.get("call_id") or "memory")
|
|
699
|
+
emit("memory_note", {"note": payload.get("note"), "tags": payload.get("tags")})
|
|
700
|
+
return StepPlan(
|
|
701
|
+
node_id="act",
|
|
702
|
+
effect=Effect(type=EffectType.MEMORY_NOTE, payload=payload, result_key="_temp.tool_results"),
|
|
703
|
+
next_node="observe",
|
|
704
|
+
)
|
|
705
|
+
|
|
706
|
+
if name == "compact_memory":
|
|
707
|
+
payload = dict(args)
|
|
708
|
+
payload.setdefault("tool_name", "compact_memory")
|
|
709
|
+
payload.setdefault("call_id", tc.get("call_id") or "compact")
|
|
710
|
+
emit(
|
|
711
|
+
"memory_compact",
|
|
712
|
+
{
|
|
713
|
+
"preserve_recent": payload.get("preserve_recent"),
|
|
714
|
+
"mode": payload.get("compression_mode"),
|
|
715
|
+
"focus": payload.get("focus"),
|
|
716
|
+
},
|
|
717
|
+
)
|
|
718
|
+
return StepPlan(
|
|
719
|
+
node_id="act",
|
|
720
|
+
effect=Effect(type=EffectType.MEMORY_COMPACT, payload=payload, result_key="_temp.tool_results"),
|
|
721
|
+
next_node="observe",
|
|
722
|
+
)
|
|
723
|
+
|
|
724
|
+
if temp.get("pending_tool_calls"):
|
|
725
|
+
return StepPlan(node_id="act", next_node="act")
|
|
726
|
+
return StepPlan(node_id="act", next_node="compose")
|
|
727
|
+
|
|
728
|
+
batch: List[Dict[str, Any]] = []
|
|
729
|
+
for tc in tool_queue:
|
|
730
|
+
if _is_builtin(tc):
|
|
731
|
+
break
|
|
732
|
+
batch.append(tc)
|
|
733
|
+
|
|
734
|
+
remaining = tool_queue[len(batch) :]
|
|
735
|
+
temp["pending_tool_calls"] = list(remaining)
|
|
736
|
+
|
|
737
|
+
for tc in batch:
|
|
738
|
+
emit("act", {"tool": tc.get("name", ""), "args": tc.get("arguments", {}), "call_id": str(tc.get("call_id") or "")})
|
|
739
|
+
|
|
740
|
+
formatted_calls: List[Dict[str, Any]] = []
|
|
741
|
+
for tc in batch:
|
|
742
|
+
formatted_calls.append(
|
|
743
|
+
{"name": tc.get("name", ""), "arguments": tc.get("arguments", {}), "call_id": str(tc.get("call_id") or "")}
|
|
744
|
+
)
|
|
745
|
+
|
|
746
|
+
return StepPlan(
|
|
747
|
+
node_id="act",
|
|
748
|
+
effect=Effect(
|
|
749
|
+
type=EffectType.TOOL_CALLS,
|
|
750
|
+
payload={"tool_calls": formatted_calls, "allowed_tools": list(allow)},
|
|
751
|
+
result_key="_temp.tool_results",
|
|
752
|
+
),
|
|
753
|
+
next_node="observe",
|
|
754
|
+
)
|
|
755
|
+
|
|
756
|
+
def observe_node(run: RunState, ctx) -> StepPlan:
|
|
757
|
+
context, scratchpad, _, temp, _ = ensure_memact_vars(run)
|
|
758
|
+
tool_results = temp.get("tool_results", {})
|
|
759
|
+
if not isinstance(tool_results, dict):
|
|
760
|
+
tool_results = {}
|
|
761
|
+
|
|
762
|
+
results = tool_results.get("results", [])
|
|
763
|
+
if not isinstance(results, list):
|
|
764
|
+
results = []
|
|
765
|
+
if results:
|
|
766
|
+
scratchpad["used_tools"] = True
|
|
767
|
+
|
|
768
|
+
def _display(v: Any) -> str:
|
|
769
|
+
if isinstance(v, dict):
|
|
770
|
+
rendered = v.get("rendered")
|
|
771
|
+
if isinstance(rendered, str) and rendered.strip():
|
|
772
|
+
return rendered.strip()
|
|
773
|
+
return "" if v is None else str(v)
|
|
774
|
+
|
|
775
|
+
for r in results:
|
|
776
|
+
if not isinstance(r, dict):
|
|
777
|
+
continue
|
|
778
|
+
name = str(r.get("name", "tool") or "tool")
|
|
779
|
+
success = bool(r.get("success"))
|
|
780
|
+
output = r.get("output", "")
|
|
781
|
+
error = r.get("error", "")
|
|
782
|
+
display = _display(output)
|
|
783
|
+
if not success:
|
|
784
|
+
display = _display(output) if isinstance(output, dict) else str(error or output)
|
|
785
|
+
rendered = logic.format_observation(name=name, output=display, success=success)
|
|
786
|
+
emit("observe", {"tool": name, "success": success, "result": rendered})
|
|
787
|
+
|
|
788
|
+
context["messages"].append(
|
|
789
|
+
_new_message(
|
|
790
|
+
ctx,
|
|
791
|
+
role="tool",
|
|
792
|
+
content=rendered,
|
|
793
|
+
metadata={"name": name, "call_id": r.get("call_id"), "success": success},
|
|
794
|
+
)
|
|
795
|
+
)
|
|
796
|
+
|
|
797
|
+
temp.pop("tool_results", None)
|
|
798
|
+
pending = temp.get("pending_tool_calls", [])
|
|
799
|
+
if isinstance(pending, list) and pending:
|
|
800
|
+
return StepPlan(node_id="observe", next_node="act")
|
|
801
|
+
temp["pending_tool_calls"] = []
|
|
802
|
+
return StepPlan(node_id="observe", next_node="compose")
|
|
803
|
+
|
|
804
|
+
def handle_user_response_node(run: RunState, ctx) -> StepPlan:
|
|
805
|
+
context, _, _, temp, _ = ensure_memact_vars(run)
|
|
806
|
+
user_response = temp.get("user_response", {})
|
|
807
|
+
if not isinstance(user_response, dict):
|
|
808
|
+
user_response = {}
|
|
809
|
+
response_text = str(user_response.get("response", "") or "")
|
|
810
|
+
emit("user_response", {"response": response_text})
|
|
811
|
+
|
|
812
|
+
context["messages"].append(_new_message(ctx, role="user", content=f"[User response]: {response_text}"))
|
|
813
|
+
temp.pop("user_response", None)
|
|
814
|
+
|
|
815
|
+
if temp.get("pending_tool_calls"):
|
|
816
|
+
return StepPlan(node_id="handle_user_response", next_node="act")
|
|
817
|
+
return StepPlan(node_id="handle_user_response", next_node="compose")
|
|
818
|
+
|
|
819
|
+
def finalize_node(run: RunState, ctx) -> StepPlan:
|
|
820
|
+
context, scratchpad, runtime_ns, temp, limits = ensure_memact_vars(run)
|
|
821
|
+
_ = scratchpad
|
|
822
|
+
|
|
823
|
+
task = str(context.get("task", "") or "")
|
|
824
|
+
messages_view = ActiveContextPolicy.select_active_messages_for_llm_from_run(run)
|
|
825
|
+
payload_messages = _sanitize_llm_messages(messages_view)
|
|
826
|
+
|
|
827
|
+
draft = str(temp.get("draft_answer") or "").strip()
|
|
828
|
+
if draft:
|
|
829
|
+
payload_messages = list(payload_messages) + [{"role": "assistant", "content": draft}]
|
|
830
|
+
|
|
831
|
+
from abstractruntime.memory.active_memory import MEMACT_ENVELOPE_SCHEMA_V1, render_memact_system_prompt
|
|
832
|
+
|
|
833
|
+
memory_prompt = render_memact_system_prompt(run.vars)
|
|
834
|
+
base_sys = _system_prompt_override(runtime_ns) or ""
|
|
835
|
+
|
|
836
|
+
finalize_rules = (
|
|
837
|
+
"Finalize by returning a single JSON object that matches the required schema.\n"
|
|
838
|
+
"Rules:\n"
|
|
839
|
+
"- Put your normal user-facing answer in `content`.\n"
|
|
840
|
+
"- For each memory module, decide what unitary statements to add/remove.\n"
|
|
841
|
+
"- Do NOT include timestamps in added statements; the runtime will add timestamps.\n"
|
|
842
|
+
"- HISTORY is append-only and must be experiential; do NOT include raw commands or tool-call syntax.\n"
|
|
843
|
+
"- If you have no changes for a module, use empty lists.\n"
|
|
844
|
+
)
|
|
845
|
+
|
|
846
|
+
system_prompt = (memory_prompt + "\n\n" + str(base_sys or "")).strip()
|
|
847
|
+
prompt = (
|
|
848
|
+
"Finalize now.\n\n"
|
|
849
|
+
f"User request:\n{task}\n\n"
|
|
850
|
+
f"{finalize_rules}\n"
|
|
851
|
+
"Return ONLY the JSON object.\n"
|
|
852
|
+
).strip()
|
|
853
|
+
payload_messages = list(payload_messages) + [{"role": "user", "content": prompt}]
|
|
854
|
+
|
|
855
|
+
payload: Dict[str, Any] = {
|
|
856
|
+
"prompt": "",
|
|
857
|
+
"messages": payload_messages,
|
|
858
|
+
"system_prompt": system_prompt,
|
|
859
|
+
"response_schema": MEMACT_ENVELOPE_SCHEMA_V1,
|
|
860
|
+
"response_schema_name": "MemActEnvelopeV1",
|
|
861
|
+
"params": runtime_llm_params(runtime_ns, extra={"temperature": 0.2}),
|
|
862
|
+
}
|
|
863
|
+
|
|
864
|
+
eff_provider = provider if isinstance(provider, str) and provider.strip() else runtime_ns.get("provider")
|
|
865
|
+
eff_model = model if isinstance(model, str) and model.strip() else runtime_ns.get("model")
|
|
866
|
+
if isinstance(eff_provider, str) and eff_provider.strip():
|
|
867
|
+
payload["provider"] = eff_provider.strip()
|
|
868
|
+
if isinstance(eff_model, str) and eff_model.strip():
|
|
869
|
+
payload["model"] = eff_model.strip()
|
|
870
|
+
|
|
871
|
+
emit("finalize_request", {"has_draft": bool(draft)})
|
|
872
|
+
|
|
873
|
+
return StepPlan(
|
|
874
|
+
node_id="finalize",
|
|
875
|
+
effect=Effect(type=EffectType.LLM_CALL, payload=payload, result_key="_temp.finalize_llm_response"),
|
|
876
|
+
next_node="finalize_parse",
|
|
877
|
+
)
|
|
878
|
+
|
|
879
|
+
def finalize_parse_node(run: RunState, ctx) -> StepPlan:
|
|
880
|
+
_, scratchpad, _, temp, _ = ensure_memact_vars(run)
|
|
881
|
+
resp = temp.get("finalize_llm_response", {})
|
|
882
|
+
if not isinstance(resp, dict):
|
|
883
|
+
resp = {}
|
|
884
|
+
|
|
885
|
+
data = resp.get("data")
|
|
886
|
+
if data is None and isinstance(resp.get("content"), str):
|
|
887
|
+
try:
|
|
888
|
+
data = json.loads(resp["content"])
|
|
889
|
+
except Exception:
|
|
890
|
+
data = None
|
|
891
|
+
if not isinstance(data, dict):
|
|
892
|
+
data = {}
|
|
893
|
+
|
|
894
|
+
content = data.get("content")
|
|
895
|
+
final_answer = str(content or "").strip()
|
|
896
|
+
|
|
897
|
+
from abstractruntime.memory.active_memory import apply_memact_envelope
|
|
898
|
+
|
|
899
|
+
apply_memact_envelope(run.vars, envelope=data)
|
|
900
|
+
|
|
901
|
+
temp.pop("finalize_llm_response", None)
|
|
902
|
+
temp.pop("draft_answer", None)
|
|
903
|
+
temp["final_answer"] = final_answer
|
|
904
|
+
scratchpad["used_tools"] = bool(scratchpad.get("used_tools"))
|
|
905
|
+
|
|
906
|
+
emit("finalize", {"has_answer": bool(final_answer)})
|
|
907
|
+
return StepPlan(node_id="finalize_parse", next_node="done")
|
|
908
|
+
|
|
909
|
+
def done_node(run: RunState, ctx) -> StepPlan:
|
|
910
|
+
context, scratchpad, _, temp, limits = ensure_memact_vars(run)
|
|
911
|
+
answer = str(temp.get("final_answer") or "No answer provided")
|
|
912
|
+
emit("done", {"answer": answer})
|
|
913
|
+
|
|
914
|
+
iterations = int(limits.get("current_iteration", 0) or scratchpad.get("iteration", 0) or 0)
|
|
915
|
+
|
|
916
|
+
messages = context.get("messages")
|
|
917
|
+
if isinstance(messages, list):
|
|
918
|
+
last = messages[-1] if messages else None
|
|
919
|
+
last_role = last.get("role") if isinstance(last, dict) else None
|
|
920
|
+
last_content = last.get("content") if isinstance(last, dict) else None
|
|
921
|
+
if last_role != "assistant" or str(last_content or "") != answer:
|
|
922
|
+
messages.append(_new_message(ctx, role="assistant", content=answer, metadata={"kind": "final_answer"}))
|
|
923
|
+
|
|
924
|
+
return StepPlan(
|
|
925
|
+
node_id="done",
|
|
926
|
+
complete_output={"answer": answer, "iterations": iterations, "messages": list(context.get("messages") or [])},
|
|
927
|
+
)
|
|
928
|
+
|
|
929
|
+
def max_iterations_node(run: RunState, ctx) -> StepPlan:
|
|
930
|
+
context, scratchpad, _, _, limits = ensure_memact_vars(run)
|
|
931
|
+
max_iterations = int(limits.get("max_iterations", 0) or scratchpad.get("max_iterations", 25) or 25)
|
|
932
|
+
if max_iterations < 1:
|
|
933
|
+
max_iterations = 1
|
|
934
|
+
emit("max_iterations", {"iterations": max_iterations})
|
|
935
|
+
|
|
936
|
+
messages = list(context.get("messages") or [])
|
|
937
|
+
last_content = messages[-1]["content"] if messages else "Max iterations reached"
|
|
938
|
+
return StepPlan(
|
|
939
|
+
node_id="max_iterations",
|
|
940
|
+
complete_output={"answer": last_content, "iterations": max_iterations, "messages": messages},
|
|
941
|
+
)
|
|
942
|
+
|
|
943
|
+
return WorkflowSpec(
|
|
944
|
+
workflow_id=str(workflow_id or "memact_agent"),
|
|
945
|
+
entry_node="init",
|
|
946
|
+
nodes={
|
|
947
|
+
"init": init_node,
|
|
948
|
+
"compose": compose_node,
|
|
949
|
+
"reason": reason_node,
|
|
950
|
+
"parse": parse_node,
|
|
951
|
+
"act": act_node,
|
|
952
|
+
"observe": observe_node,
|
|
953
|
+
"handle_user_response": handle_user_response_node,
|
|
954
|
+
"finalize": finalize_node,
|
|
955
|
+
"finalize_parse": finalize_parse_node,
|
|
956
|
+
"done": done_node,
|
|
957
|
+
"max_iterations": max_iterations_node,
|
|
958
|
+
},
|
|
959
|
+
)
|