abstractagent 0.3.0__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -11,6 +11,8 @@ from abstractruntime import Effect, EffectType, RunState, StepPlan, WorkflowSpec
11
11
  from abstractruntime.core.vars import ensure_limits, ensure_namespaces
12
12
  from abstractruntime.memory.active_context import ActiveContextPolicy
13
13
 
14
+ from .generation_params import runtime_llm_params
15
+ from .media import extract_media_from_context
14
16
  from ..logic.codeact import CodeActLogic
15
17
 
16
18
 
@@ -215,6 +217,7 @@ def create_codeact_workflow(
215
217
  if keep < 200:
216
218
  keep = max_chars
217
219
  suffix = ""
220
+ #[WARNING:TRUNCATION] bounded message content for LLM payload
218
221
  return text[:keep].rstrip() + suffix
219
222
 
220
223
  out: List[Dict[str, str]] = []
@@ -335,7 +338,10 @@ def create_codeact_workflow(
335
338
 
336
339
  emit("plan_request", {"tools": allow})
337
340
 
338
- payload: Dict[str, Any] = {"prompt": prompt, "params": {"temperature": 0.2}}
341
+ payload: Dict[str, Any] = {"prompt": prompt, "params": runtime_llm_params(runtime_ns, extra={"temperature": 0.2})}
342
+ media = extract_media_from_context(context)
343
+ if media:
344
+ payload["media"] = media
339
345
  sys = _system_prompt(runtime_ns)
340
346
  if isinstance(sys, str) and sys.strip():
341
347
  payload["system_prompt"] = sys
@@ -431,11 +437,16 @@ def create_codeact_workflow(
431
437
  "messages": _sanitize_llm_messages(messages_view, limits=limits),
432
438
  "tools": list(tool_specs),
433
439
  }
440
+ media = extract_media_from_context(context)
441
+ if media:
442
+ payload["media"] = media
434
443
  sys = _system_prompt(runtime_ns) or req.system_prompt
435
444
  if isinstance(sys, str) and sys.strip():
436
445
  payload["system_prompt"] = sys
446
+ params: Dict[str, Any] = {}
437
447
  if req.max_tokens is not None:
438
- payload["params"] = {"max_tokens": req.max_tokens}
448
+ params["max_tokens"] = req.max_tokens
449
+ payload["params"] = runtime_llm_params(runtime_ns, extra=params)
439
450
 
440
451
  return StepPlan(
441
452
  node_id="reason",
@@ -560,6 +571,7 @@ def create_codeact_workflow(
560
571
  "remember",
561
572
  "remember_note",
562
573
  "compact_memory",
574
+ "delegate_agent",
563
575
  }
564
576
 
565
577
  tool_queue: List[Dict[str, Any]] = []
@@ -635,6 +647,68 @@ def create_codeact_workflow(
635
647
  next_node="handle_user_response",
636
648
  )
637
649
 
650
+ if name == "delegate_agent":
651
+ delegated_task = str(args.get("task") or "").strip()
652
+ delegated_context = str(args.get("context") or "").strip()
653
+
654
+ tools_raw = args.get("tools")
655
+ if tools_raw is None:
656
+ # Inherit the current allowlist, but avoid recursive delegation and avoid waiting on ask_user
657
+ # unless explicitly enabled.
658
+ child_allow = [t for t in allow if t not in {"delegate_agent", "ask_user"}]
659
+ else:
660
+ child_allow = _normalize_allowlist(tools_raw)
661
+
662
+ if not delegated_task:
663
+ temp["tool_results"] = {
664
+ "results": [
665
+ {
666
+ "call_id": str(tc.get("call_id") or ""),
667
+ "name": "delegate_agent",
668
+ "success": False,
669
+ "output": None,
670
+ "error": "delegate_agent requires a non-empty task",
671
+ }
672
+ ]
673
+ }
674
+ return StepPlan(node_id="act", next_node="observe")
675
+
676
+ combined_task = delegated_task
677
+ if delegated_context:
678
+ combined_task = f"{delegated_task}\n\nContext:\n{delegated_context}"
679
+
680
+ sub_vars: Dict[str, Any] = {
681
+ "context": {"task": combined_task, "messages": []},
682
+ "_runtime": {
683
+ "allowed_tools": list(child_allow),
684
+ "system_prompt_extra": (
685
+ "You are a delegated sub-agent.\n"
686
+ "- Focus ONLY on the delegated task.\n"
687
+ "- Use ONLY the allowed tools when needed.\n"
688
+ "- Do not ask the user questions; if blocked, state assumptions and proceed.\n"
689
+ "- Return a concise result suitable for the parent agent to act on.\n"
690
+ ),
691
+ },
692
+ "_limits": {"max_iterations": 10},
693
+ }
694
+
695
+ payload = {
696
+ "workflow_id": str(getattr(run, "workflow_id", "") or "codeact_agent"),
697
+ "vars": sub_vars,
698
+ "async": False,
699
+ "include_traces": False,
700
+ # Tool-mode wrapper so the parent receives a normal tool observation (no run failure on child failure).
701
+ "wrap_as_tool_result": True,
702
+ "tool_name": "delegate_agent",
703
+ "call_id": str(tc.get("call_id") or ""),
704
+ }
705
+ emit("delegate_agent", {"tools": list(child_allow), "call_id": payload.get("call_id")})
706
+ return StepPlan(
707
+ node_id="act",
708
+ effect=Effect(type=EffectType.START_SUBWORKFLOW, payload=payload, result_key="_temp.tool_results"),
709
+ next_node="observe",
710
+ )
711
+
638
712
  if name == "recall_memory":
639
713
  payload = dict(args)
640
714
  payload.setdefault("tool_name", "recall_memory")
@@ -730,13 +804,14 @@ def create_codeact_workflow(
730
804
  )
731
805
 
732
806
  def execute_code_node(run: RunState, ctx) -> StepPlan:
733
- _, _, _, temp, _ = ensure_codeact_vars(run)
807
+ _, _, runtime_ns, temp, _ = ensure_codeact_vars(run)
734
808
  code = temp.get("pending_code")
735
809
  if not isinstance(code, str) or not code.strip():
736
810
  return StepPlan(node_id="execute_code", next_node="reason")
737
811
 
738
812
  temp.pop("pending_code", None)
739
813
  emit("act", {"tool": "execute_python", "args": {"code": "(inline)", "timeout_s": 10.0}})
814
+ allow = _effective_allowlist(runtime_ns)
740
815
 
741
816
  return StepPlan(
742
817
  node_id="execute_code",
@@ -749,7 +824,8 @@ def create_codeact_workflow(
749
824
  "arguments": {"code": code, "timeout_s": 10.0},
750
825
  "call_id": "code",
751
826
  }
752
- ]
827
+ ],
828
+ "allowed_tools": list(allow),
753
829
  },
754
830
  result_key="_temp.tool_results",
755
831
  ),
@@ -794,6 +870,7 @@ def create_codeact_workflow(
794
870
  # Keep a bounded preview for huge tool outputs to avoid bloating traces/ledgers.
795
871
  preview = rendered
796
872
  if len(preview) > 1000:
873
+ #[WARNING:TRUNCATION] bounded preview for observability payloads
797
874
  preview = preview[:1000] + f"\n… (truncated, {len(rendered):,} chars total)"
798
875
  emit("observe", {"tool": name, "success": success, "result": preview})
799
876
  context["messages"].append(
@@ -870,6 +947,7 @@ def create_codeact_workflow(
870
947
  if keep < 200:
871
948
  keep = max_chars
872
949
  suffix = ""
950
+ #[WARNING:TRUNCATION] bounded transcript blocks for prompt reconstruction
873
951
  return s[:keep].rstrip() + suffix
874
952
 
875
953
  def _format_allowed_tools() -> str:
@@ -1002,8 +1080,11 @@ def create_codeact_workflow(
1002
1080
  "prompt": prompt,
1003
1081
  "response_schema": schema,
1004
1082
  "response_schema_name": "CodeActVerifier",
1005
- "params": {"temperature": 0.2},
1083
+ "params": runtime_llm_params(runtime_ns, extra={"temperature": 0.2}),
1006
1084
  }
1085
+ media = extract_media_from_context(context)
1086
+ if media:
1087
+ payload["media"] = media
1007
1088
  sys = _system_prompt(runtime_ns)
1008
1089
  if sys is not None:
1009
1090
  payload["system_prompt"] = sys
@@ -0,0 +1,82 @@
1
+ """Helpers for consistent generation params in AbstractAgent adapters.
2
+
3
+ These adapters build `EffectType.LLM_CALL` payloads for AbstractRuntime. We want
4
+ to expose a uniform `(temperature, seed)` interface across agents while keeping
5
+ backward compatibility with older runs that may not have these keys in
6
+ `vars["_runtime"]`.
7
+ """
8
+
9
+ from __future__ import annotations
10
+
11
+ from typing import Any, Dict, Optional
12
+
13
+
14
+ def normalize_seed(seed: Any) -> Optional[int]:
15
+ """Return a provider-ready seed or None when unset/random.
16
+
17
+ Policy:
18
+ - None or any negative value -> None (meaning: do not send seed).
19
+ - bool values are ignored (JSON booleans are ints in Python).
20
+ - numeric-ish values -> int(seed) if >= 0.
21
+ """
22
+ try:
23
+ if seed is None or isinstance(seed, bool):
24
+ return None
25
+ seed_i = int(seed)
26
+ return seed_i if seed_i >= 0 else None
27
+ except Exception:
28
+ return None
29
+
30
+
31
+ def runtime_llm_params(
32
+ runtime_ns: Dict[str, Any],
33
+ *,
34
+ extra: Optional[Dict[str, Any]] = None,
35
+ default_temperature: float = 0.7,
36
+ ) -> Dict[str, Any]:
37
+ """Merge `runtime_ns` sampling controls into an LLM_CALL params dict.
38
+
39
+ Precedence:
40
+ 1) `runtime_ns.temperature` / `runtime_ns.seed` when present
41
+ 2) `extra.temperature` / `extra.seed` (step-specific defaults)
42
+ 3) `default_temperature` (only for temperature)
43
+ """
44
+ out: Dict[str, Any] = dict(extra or {})
45
+
46
+ # Temperature: always provide a float (provider-agnostic).
47
+ temp_val = runtime_ns.get("temperature") if isinstance(runtime_ns, dict) else None
48
+ if temp_val is None:
49
+ temp_val = out.get("temperature")
50
+ if temp_val is None:
51
+ temp_val = default_temperature
52
+ try:
53
+ out["temperature"] = float(temp_val)
54
+ except Exception:
55
+ out["temperature"] = float(default_temperature)
56
+
57
+ # Seed: only include when explicitly set (>= 0).
58
+ seed_val = runtime_ns.get("seed") if isinstance(runtime_ns, dict) else None
59
+ if seed_val is None:
60
+ seed_val = out.get("seed")
61
+ seed_norm = normalize_seed(seed_val)
62
+ if seed_norm is not None:
63
+ out["seed"] = seed_norm
64
+ else:
65
+ out.pop("seed", None)
66
+
67
+ # Pass-through media policies (runtime-owned defaults).
68
+ #
69
+ # This keeps thin clients simple: they can set `_runtime.audio_policy` (and
70
+ # optional language hints) once at run start, and all LLM_CALL steps inherit it.
71
+ if isinstance(runtime_ns, dict):
72
+ audio_policy = runtime_ns.get("audio_policy")
73
+ if "audio_policy" not in out and isinstance(audio_policy, str) and audio_policy.strip():
74
+ out["audio_policy"] = audio_policy.strip()
75
+
76
+ stt_language = runtime_ns.get("stt_language")
77
+ if stt_language is None:
78
+ stt_language = runtime_ns.get("audio_language")
79
+ if "stt_language" not in out and isinstance(stt_language, str) and stt_language.strip():
80
+ out["stt_language"] = stt_language.strip()
81
+
82
+ return out
@@ -0,0 +1,45 @@
1
+ """Helpers for attachment/media plumbing in runtime-backed agents."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Any, Dict, List, Optional
6
+
7
+
8
+ def extract_media_from_context(context: Dict[str, Any]) -> Optional[List[Any]]:
9
+ """Return a normalized `media` list from a runtime `context` dict.
10
+
11
+ Supported keys (best-effort):
12
+ - `context["attachments"]`: preferred (artifact refs)
13
+ - `context["media"]`: legacy/alternate
14
+ """
15
+ raw = context.get("attachments")
16
+ if raw is None:
17
+ raw = context.get("media")
18
+
19
+ if isinstance(raw, tuple):
20
+ items = list(raw)
21
+ else:
22
+ items = raw
23
+
24
+ if not isinstance(items, list) or not items:
25
+ return None
26
+
27
+ out: List[Any] = []
28
+ for item in items:
29
+ if isinstance(item, str):
30
+ s = item.strip()
31
+ if s:
32
+ out.append(s)
33
+ continue
34
+
35
+ if isinstance(item, dict):
36
+ # Prefer artifact refs; accept both {"$artifact": "..."} and {"artifact_id": "..."}.
37
+ aid = item.get("$artifact")
38
+ if not (isinstance(aid, str) and aid.strip()):
39
+ aid = item.get("artifact_id")
40
+ if isinstance(aid, str) and aid.strip():
41
+ out.append(dict(item))
42
+ continue
43
+
44
+ return out or None
45
+
@@ -11,6 +11,8 @@ from abstractruntime import Effect, EffectType, RunState, StepPlan, WorkflowSpec
11
11
  from abstractruntime.core.vars import ensure_limits, ensure_namespaces
12
12
  from abstractruntime.memory.active_context import ActiveContextPolicy
13
13
 
14
+ from .generation_params import runtime_llm_params
15
+ from .media import extract_media_from_context
14
16
  from ..logic.memact import MemActLogic
15
17
 
16
18
 
@@ -232,7 +234,168 @@ def create_memact_workflow(
232
234
  runtime_ns.setdefault("inbox", [])
233
235
 
234
236
  emit("init", {"task": task})
235
- return StepPlan(node_id="init", next_node="reason")
237
+ return StepPlan(node_id="init", next_node="compose")
238
+
239
+ def compose_node(run: RunState, ctx) -> StepPlan:
240
+ """Optional, runtime-owned memory composition step (v0).
241
+
242
+ When enabled via `_runtime.memact_composer.enabled`, this node queries the
243
+ temporal KG (`MEMORY_KG_QUERY`) using the latest user message as stimulus
244
+ and maps the selected packets into MemAct CURRENT CONTEXT entries.
245
+
246
+ This runs *before* `reason` so it does not consume agent iterations.
247
+ """
248
+ context, _, runtime_ns, temp, _ = ensure_memact_vars(run)
249
+
250
+ cfg_raw = runtime_ns.get("memact_composer") if isinstance(runtime_ns, dict) else None
251
+ cfg = cfg_raw if isinstance(cfg_raw, dict) else {}
252
+ enabled = bool(cfg.get("enabled"))
253
+ if not enabled:
254
+ return StepPlan(node_id="compose", next_node="reason")
255
+
256
+ # Derive stimulus from the latest user message, falling back to `context.task`.
257
+ stimulus = ""
258
+ stimulus_message_id: Optional[str] = None
259
+ messages = context.get("messages")
260
+ if isinstance(messages, list):
261
+ for m in reversed(messages):
262
+ if not isinstance(m, dict):
263
+ continue
264
+ if str(m.get("role") or "") != "user":
265
+ continue
266
+ raw = m.get("content")
267
+ if raw is None:
268
+ continue
269
+ text = str(raw).strip()
270
+ if not text:
271
+ continue
272
+ stimulus = text
273
+ meta = m.get("metadata") if isinstance(m.get("metadata"), dict) else {}
274
+ mid = meta.get("message_id") if isinstance(meta, dict) else None
275
+ if isinstance(mid, str) and mid.strip():
276
+ stimulus_message_id = mid.strip()
277
+ break
278
+ if not stimulus:
279
+ stimulus = str(context.get("task", "") or "").strip()
280
+
281
+ if not stimulus:
282
+ return StepPlan(node_id="compose", next_node="reason")
283
+
284
+ recall_level = str(cfg.get("recall_level") or "urgent").strip().lower() or "urgent"
285
+ scope = str(cfg.get("scope") or "session").strip().lower() or "session"
286
+ marker = str(cfg.get("marker") or "KG:").strip() or "KG:"
287
+ max_items = cfg.get("max_items")
288
+ max_items_int: Optional[int] = None
289
+ if max_items is not None and not isinstance(max_items, bool):
290
+ try:
291
+ mi = int(float(max_items))
292
+ except Exception:
293
+ mi = None
294
+ if isinstance(mi, int) and mi > 0:
295
+ max_items_int = mi
296
+
297
+ # Build a stable "composition key" so we don't re-query on tool iterations.
298
+ compose_key_parts = [
299
+ stimulus_message_id or stimulus,
300
+ recall_level,
301
+ scope,
302
+ str(cfg.get("limit") or ""),
303
+ str(cfg.get("min_score") or ""),
304
+ str(cfg.get("max_input_tokens") or cfg.get("max_in_tokens") or ""),
305
+ ]
306
+ compose_key = "|".join([p for p in compose_key_parts if p is not None])
307
+
308
+ bucket_raw = temp.get("memact_composer")
309
+ bucket: Dict[str, Any] = bucket_raw if isinstance(bucket_raw, dict) else {}
310
+ temp["memact_composer"] = bucket
311
+
312
+ # If we already applied the composer for this key and have no pending results, skip.
313
+ if bucket.get("applied_key") == compose_key and "kg_result" not in bucket:
314
+ return StepPlan(node_id="compose", next_node="reason")
315
+
316
+ kg_result = bucket.get("kg_result")
317
+ if isinstance(kg_result, dict):
318
+ try:
319
+ from abstractruntime.memory.memact_composer import compose_memact_current_context_from_kg_result
320
+
321
+ out = compose_memact_current_context_from_kg_result(
322
+ run.vars,
323
+ kg_result=kg_result,
324
+ stimulus=stimulus,
325
+ marker=marker,
326
+ max_items=max_items_int,
327
+ )
328
+ except Exception as e:
329
+ out = {"ok": False, "error": str(e), "delta": {}, "trace": {}}
330
+
331
+ bucket.pop("kg_result", None)
332
+ bucket["applied_key"] = compose_key
333
+ bucket["last_stimulus"] = stimulus
334
+
335
+ # Persist a small trace for UI/debuggers (bounded list).
336
+ try:
337
+ from abstractruntime.memory.active_memory import ensure_memact_memory
338
+
339
+ mem = ensure_memact_memory(run.vars)
340
+ traces = mem.get("composer_traces")
341
+ if not isinstance(traces, list):
342
+ traces = []
343
+ mem["composer_traces"] = traces
344
+
345
+ timestamp: Optional[str] = None
346
+ now_iso = getattr(ctx, "now_iso", None)
347
+ if callable(now_iso):
348
+ timestamp = str(now_iso())
349
+ if not timestamp:
350
+ from datetime import datetime, timezone
351
+
352
+ timestamp = datetime.now(timezone.utc).isoformat()
353
+
354
+ trace_entry = {
355
+ "at": timestamp,
356
+ "compose_key": compose_key,
357
+ "ok": bool(out.get("ok")),
358
+ "trace": out.get("trace"),
359
+ }
360
+ traces.insert(0, trace_entry)
361
+ del traces[25:]
362
+ except Exception:
363
+ pass
364
+
365
+ emit("compose", {"ok": bool(out.get("ok")), "stimulus": stimulus, "recall_level": recall_level, "scope": scope})
366
+ return StepPlan(node_id="compose", next_node="reason")
367
+
368
+ # No result yet: schedule KG query.
369
+ payload: Dict[str, Any] = {
370
+ "query_text": stimulus,
371
+ "recall_level": recall_level,
372
+ "scope": scope,
373
+ }
374
+ for src_key, dst_key in (
375
+ ("limit", "limit"),
376
+ ("min_score", "min_score"),
377
+ ("max_input_tokens", "max_input_tokens"),
378
+ ("max_in_tokens", "max_input_tokens"),
379
+ ("model", "model"),
380
+ ):
381
+ if src_key in cfg:
382
+ payload[dst_key] = cfg.get(src_key)
383
+
384
+ # Default packing model: re-use the configured LLM model when available.
385
+ if "model" not in payload:
386
+ model_name = runtime_ns.get("model")
387
+ if isinstance(model_name, str) and model_name.strip():
388
+ payload["model"] = model_name.strip()
389
+
390
+ # Store key so we can attribute the result even if the stimulus changes later.
391
+ bucket["pending_key"] = compose_key
392
+
393
+ emit("compose_query", {"stimulus": stimulus, "recall_level": recall_level, "scope": scope})
394
+ return StepPlan(
395
+ node_id="compose",
396
+ effect=Effect(type=EffectType.MEMORY_KG_QUERY, payload=payload, result_key="_temp.memact_composer.kg_result"),
397
+ next_node="compose",
398
+ )
236
399
 
237
400
  def reason_node(run: RunState, ctx) -> StepPlan:
238
401
  context, scratchpad, runtime_ns, _, limits = ensure_memact_vars(run)
@@ -284,7 +447,18 @@ def create_memact_workflow(
284
447
  emit("reason", {"iteration": iteration + 1, "max_iterations": max_iterations, "has_guidance": bool(guidance)})
285
448
 
286
449
  payload: Dict[str, Any] = {"prompt": ""}
287
- payload["messages"] = _sanitize_llm_messages(messages_view)
450
+ sanitized_messages = _sanitize_llm_messages(messages_view)
451
+ if sanitized_messages:
452
+ payload["messages"] = sanitized_messages
453
+ else:
454
+ # Ensure LLM_CALL contract is satisfied even when callers provide only `context.task`
455
+ # and the active message view is empty.
456
+ task_text = str(task or "").strip()
457
+ if task_text:
458
+ payload["prompt"] = task_text
459
+ media = extract_media_from_context(context)
460
+ if media:
461
+ payload["media"] = media
288
462
  if tool_specs:
289
463
  payload["tools"] = list(tool_specs)
290
464
  if system_prompt:
@@ -299,7 +473,7 @@ def create_memact_workflow(
299
473
  params: Dict[str, Any] = {"temperature": 0.2 if tool_specs else 0.7}
300
474
  if req.max_tokens is not None:
301
475
  params["max_tokens"] = req.max_tokens
302
- payload["params"] = params
476
+ payload["params"] = runtime_llm_params(runtime_ns, extra=params)
303
477
 
304
478
  return StepPlan(
305
479
  node_id="reason",
@@ -341,7 +515,7 @@ def create_memact_workflow(
341
515
  raw_queue = temp.get("pending_tool_calls", [])
342
516
  if not isinstance(raw_queue, list) or not raw_queue:
343
517
  temp["pending_tool_calls"] = []
344
- return StepPlan(node_id="act", next_node="reason")
518
+ return StepPlan(node_id="act", next_node="compose")
345
519
 
346
520
  allow = _effective_allowlist(runtime_ns)
347
521
  builtin_effect_tools = {
@@ -351,6 +525,7 @@ def create_memact_workflow(
351
525
  "remember",
352
526
  "remember_note",
353
527
  "compact_memory",
528
+ "delegate_agent",
354
529
  }
355
530
 
356
531
  tool_queue: List[Dict[str, Any]] = []
@@ -365,7 +540,7 @@ def create_memact_workflow(
365
540
 
366
541
  if not tool_queue:
367
542
  temp["pending_tool_calls"] = []
368
- return StepPlan(node_id="act", next_node="reason")
543
+ return StepPlan(node_id="act", next_node="compose")
369
544
 
370
545
  def _is_builtin(tc: Dict[str, Any]) -> bool:
371
546
  name = tc.get("name")
@@ -422,6 +597,68 @@ def create_memact_workflow(
422
597
  next_node="handle_user_response",
423
598
  )
424
599
 
600
+ if name == "delegate_agent":
601
+ delegated_task = str(args.get("task") or "").strip()
602
+ delegated_context = str(args.get("context") or "").strip()
603
+
604
+ tools_raw = args.get("tools")
605
+ if tools_raw is None:
606
+ # Inherit the current allowlist, but avoid recursive delegation and avoid waiting on ask_user
607
+ # unless explicitly enabled.
608
+ child_allow = [t for t in allow if t not in {"delegate_agent", "ask_user"}]
609
+ else:
610
+ child_allow = _normalize_allowlist(tools_raw)
611
+
612
+ if not delegated_task:
613
+ temp["tool_results"] = {
614
+ "results": [
615
+ {
616
+ "call_id": str(tc.get("call_id") or ""),
617
+ "name": "delegate_agent",
618
+ "success": False,
619
+ "output": None,
620
+ "error": "delegate_agent requires a non-empty task",
621
+ }
622
+ ]
623
+ }
624
+ return StepPlan(node_id="act", next_node="observe")
625
+
626
+ combined_task = delegated_task
627
+ if delegated_context:
628
+ combined_task = f"{delegated_task}\n\nContext:\n{delegated_context}"
629
+
630
+ sub_vars: Dict[str, Any] = {
631
+ "context": {"task": combined_task, "messages": []},
632
+ "_runtime": {
633
+ "allowed_tools": list(child_allow),
634
+ "system_prompt_extra": (
635
+ "You are a delegated sub-agent.\n"
636
+ "- Focus ONLY on the delegated task.\n"
637
+ "- Use ONLY the allowed tools when needed.\n"
638
+ "- Do not ask the user questions; if blocked, state assumptions and proceed.\n"
639
+ "- Return a concise result suitable for the parent agent to act on.\n"
640
+ ),
641
+ },
642
+ "_limits": {"max_iterations": 10},
643
+ }
644
+
645
+ payload = {
646
+ "workflow_id": str(getattr(run, "workflow_id", "") or "memact_agent"),
647
+ "vars": sub_vars,
648
+ "async": False,
649
+ "include_traces": False,
650
+ # Tool-mode wrapper so the parent receives a normal tool observation (no run failure on child failure).
651
+ "wrap_as_tool_result": True,
652
+ "tool_name": "delegate_agent",
653
+ "call_id": str(tc.get("call_id") or ""),
654
+ }
655
+ emit("delegate_agent", {"tools": list(child_allow), "call_id": payload.get("call_id")})
656
+ return StepPlan(
657
+ node_id="act",
658
+ effect=Effect(type=EffectType.START_SUBWORKFLOW, payload=payload, result_key="_temp.tool_results"),
659
+ next_node="observe",
660
+ )
661
+
425
662
  if name == "recall_memory":
426
663
  payload = dict(args)
427
664
  payload.setdefault("tool_name", "recall_memory")
@@ -486,7 +723,7 @@ def create_memact_workflow(
486
723
 
487
724
  if temp.get("pending_tool_calls"):
488
725
  return StepPlan(node_id="act", next_node="act")
489
- return StepPlan(node_id="act", next_node="reason")
726
+ return StepPlan(node_id="act", next_node="compose")
490
727
 
491
728
  batch: List[Dict[str, Any]] = []
492
729
  for tc in tool_queue:
@@ -546,7 +783,7 @@ def create_memact_workflow(
546
783
  if not success:
547
784
  display = _display(output) if isinstance(output, dict) else str(error or output)
548
785
  rendered = logic.format_observation(name=name, output=display, success=success)
549
- emit("observe", {"tool": name, "success": success})
786
+ emit("observe", {"tool": name, "success": success, "result": rendered})
550
787
 
551
788
  context["messages"].append(
552
789
  _new_message(
@@ -562,7 +799,7 @@ def create_memact_workflow(
562
799
  if isinstance(pending, list) and pending:
563
800
  return StepPlan(node_id="observe", next_node="act")
564
801
  temp["pending_tool_calls"] = []
565
- return StepPlan(node_id="observe", next_node="reason")
802
+ return StepPlan(node_id="observe", next_node="compose")
566
803
 
567
804
  def handle_user_response_node(run: RunState, ctx) -> StepPlan:
568
805
  context, _, _, temp, _ = ensure_memact_vars(run)
@@ -577,7 +814,7 @@ def create_memact_workflow(
577
814
 
578
815
  if temp.get("pending_tool_calls"):
579
816
  return StepPlan(node_id="handle_user_response", next_node="act")
580
- return StepPlan(node_id="handle_user_response", next_node="reason")
817
+ return StepPlan(node_id="handle_user_response", next_node="compose")
581
818
 
582
819
  def finalize_node(run: RunState, ctx) -> StepPlan:
583
820
  context, scratchpad, runtime_ns, temp, limits = ensure_memact_vars(run)
@@ -621,7 +858,7 @@ def create_memact_workflow(
621
858
  "system_prompt": system_prompt,
622
859
  "response_schema": MEMACT_ENVELOPE_SCHEMA_V1,
623
860
  "response_schema_name": "MemActEnvelopeV1",
624
- "params": {"temperature": 0.2},
861
+ "params": runtime_llm_params(runtime_ns, extra={"temperature": 0.2}),
625
862
  }
626
863
 
627
864
  eff_provider = provider if isinstance(provider, str) and provider.strip() else runtime_ns.get("provider")
@@ -708,6 +945,7 @@ def create_memact_workflow(
708
945
  entry_node="init",
709
946
  nodes={
710
947
  "init": init_node,
948
+ "compose": compose_node,
711
949
  "reason": reason_node,
712
950
  "parse": parse_node,
713
951
  "act": act_node,