abstractagent 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,390 @@
1
+ """AbstractRuntime adapter for ReAct-like agents."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import hashlib
6
+ import json
7
+ from typing import Any, Callable, Dict, List, Optional
8
+
9
+ from abstractcore.tools import ToolCall
10
+ from abstractruntime import Effect, EffectType, RunState, StepPlan, WorkflowSpec
11
+ from abstractruntime.core.vars import ensure_limits, ensure_namespaces
12
+
13
+ from ..logic.react import ReActLogic
14
+
15
+
16
+ def _new_message(
17
+ ctx: Any,
18
+ *,
19
+ role: str,
20
+ content: str,
21
+ metadata: Optional[Dict[str, Any]] = None,
22
+ ) -> Dict[str, Any]:
23
+ timestamp: Optional[str] = None
24
+ now_iso = getattr(ctx, "now_iso", None)
25
+ if callable(now_iso):
26
+ timestamp = str(now_iso())
27
+ if not timestamp:
28
+ from datetime import datetime, timezone
29
+
30
+ timestamp = datetime.now(timezone.utc).isoformat()
31
+
32
+ return {
33
+ "role": role,
34
+ "content": content,
35
+ "timestamp": timestamp,
36
+ "metadata": metadata or {},
37
+ }
38
+
39
+
40
+ def ensure_react_vars(run: RunState) -> tuple[Dict[str, Any], Dict[str, Any], Dict[str, Any], Dict[str, Any], Dict[str, Any]]:
41
+ """Ensure namespaced vars exist and migrate legacy flat keys in-place.
42
+
43
+ Returns:
44
+ Tuple of (context, scratchpad, runtime_ns, temp, limits) dicts.
45
+ """
46
+ ensure_namespaces(run.vars)
47
+ limits = ensure_limits(run.vars)
48
+ context = run.vars["context"]
49
+ scratchpad = run.vars["scratchpad"]
50
+ runtime_ns = run.vars["_runtime"]
51
+ temp = run.vars["_temp"]
52
+
53
+ if "task" in run.vars and "task" not in context:
54
+ context["task"] = run.vars.pop("task")
55
+ if "messages" in run.vars and "messages" not in context:
56
+ context["messages"] = run.vars.pop("messages")
57
+ if "iteration" in run.vars and "iteration" not in scratchpad:
58
+ scratchpad["iteration"] = run.vars.pop("iteration")
59
+ if "max_iterations" in run.vars and "max_iterations" not in scratchpad:
60
+ scratchpad["max_iterations"] = run.vars.pop("max_iterations")
61
+ if "_inbox" in run.vars and "inbox" not in runtime_ns:
62
+ runtime_ns["inbox"] = run.vars.pop("_inbox")
63
+
64
+ for key in ("llm_response", "tool_results", "pending_tool_calls", "user_response", "final_answer"):
65
+ if key in run.vars and key not in temp:
66
+ temp[key] = run.vars.pop(key)
67
+
68
+ if not isinstance(context.get("messages"), list):
69
+ context["messages"] = []
70
+ if not isinstance(runtime_ns.get("inbox"), list):
71
+ runtime_ns["inbox"] = []
72
+
73
+ iteration = scratchpad.get("iteration")
74
+ if not isinstance(iteration, int):
75
+ try:
76
+ scratchpad["iteration"] = int(iteration or 0)
77
+ except (TypeError, ValueError):
78
+ scratchpad["iteration"] = 0
79
+
80
+ max_iterations = scratchpad.get("max_iterations")
81
+ if max_iterations is None:
82
+ scratchpad["max_iterations"] = 25
83
+ elif not isinstance(max_iterations, int):
84
+ try:
85
+ scratchpad["max_iterations"] = int(max_iterations)
86
+ except (TypeError, ValueError):
87
+ scratchpad["max_iterations"] = 25
88
+
89
+ if scratchpad["max_iterations"] < 1:
90
+ scratchpad["max_iterations"] = 1
91
+
92
+ return context, scratchpad, runtime_ns, temp, limits
93
+
94
+
95
+ def _compute_toolset_id(tool_specs: List[Dict[str, Any]]) -> str:
96
+ normalized = sorted((dict(s) for s in tool_specs), key=lambda s: str(s.get("name", "")))
97
+ payload = json.dumps(normalized, sort_keys=True, ensure_ascii=False, separators=(",", ":")).encode("utf-8")
98
+ digest = hashlib.sha256(payload).hexdigest()
99
+ return f"ts_{digest}"
100
+
101
+
102
+ def create_react_workflow(
103
+ *,
104
+ logic: ReActLogic,
105
+ on_step: Optional[Callable[[str, Dict[str, Any]], None]] = None,
106
+ ) -> WorkflowSpec:
107
+ """Adapt ReActLogic to an AbstractRuntime workflow."""
108
+
109
+ def emit(step: str, data: Dict[str, Any]) -> None:
110
+ if on_step:
111
+ on_step(step, data)
112
+
113
+ tool_defs = logic.tools
114
+ tool_specs = [t.to_dict() for t in tool_defs]
115
+ toolset_id = _compute_toolset_id(tool_specs)
116
+
117
+ def init_node(run: RunState, ctx) -> StepPlan:
118
+ context, scratchpad, runtime_ns, _, limits = ensure_react_vars(run)
119
+ scratchpad["iteration"] = 0
120
+ limits["current_iteration"] = 0
121
+
122
+ task = str(context.get("task", "") or "")
123
+ context["task"] = task
124
+ messages = context["messages"]
125
+
126
+ if task and (not messages or messages[-1].get("role") != "user" or messages[-1].get("content") != task):
127
+ messages.append(_new_message(ctx, role="user", content=task))
128
+
129
+ # Ensure toolset metadata is present for audit/debug.
130
+ runtime_ns.setdefault("tool_specs", tool_specs)
131
+ runtime_ns.setdefault("toolset_id", toolset_id)
132
+ runtime_ns.setdefault("inbox", [])
133
+
134
+ emit("init", {"task": task})
135
+ return StepPlan(node_id="init", next_node="reason")
136
+
137
+ def reason_node(run: RunState, ctx) -> StepPlan:
138
+ context, scratchpad, runtime_ns, _, limits = ensure_react_vars(run)
139
+
140
+ # Read from _limits (canonical) with fallback to scratchpad (backward compat)
141
+ if "current_iteration" in limits:
142
+ iteration = int(limits.get("current_iteration", 0) or 0)
143
+ max_iterations = int(limits.get("max_iterations", 25) or 25)
144
+ else:
145
+ # Backward compatibility: use scratchpad
146
+ iteration = int(scratchpad.get("iteration", 0) or 0)
147
+ max_iterations = int(scratchpad.get("max_iterations") or 25)
148
+
149
+ if max_iterations < 1:
150
+ max_iterations = 1
151
+
152
+ if iteration >= max_iterations:
153
+ return StepPlan(node_id="reason", next_node="max_iterations")
154
+
155
+ # Update both for transition period
156
+ scratchpad["iteration"] = iteration + 1
157
+ limits["current_iteration"] = iteration + 1
158
+
159
+ task = str(context.get("task", "") or "")
160
+ messages = context["messages"]
161
+
162
+ inbox = runtime_ns.get("inbox", [])
163
+ guidance = ""
164
+ if isinstance(inbox, list) and inbox:
165
+ inbox_messages = [str(m.get("content", "") or "") for m in inbox if isinstance(m, dict)]
166
+ guidance = " | ".join([m for m in inbox_messages if m])
167
+ runtime_ns["inbox"] = []
168
+
169
+ req = logic.build_request(
170
+ task=task,
171
+ messages=messages,
172
+ guidance=guidance,
173
+ iteration=iteration + 1,
174
+ max_iterations=max_iterations,
175
+ vars=run.vars, # Pass vars for _limits access
176
+ )
177
+
178
+ emit("reason", {"iteration": iteration + 1, "max_iterations": max_iterations, "has_guidance": bool(guidance)})
179
+
180
+ payload = {"prompt": req.prompt, "tools": [t.to_dict() for t in req.tools]}
181
+ if req.max_tokens is not None:
182
+ payload["params"] = {"max_tokens": req.max_tokens}
183
+
184
+ return StepPlan(
185
+ node_id="reason",
186
+ effect=Effect(
187
+ type=EffectType.LLM_CALL,
188
+ payload=payload,
189
+ result_key="_temp.llm_response",
190
+ ),
191
+ next_node="parse",
192
+ )
193
+
194
+ def parse_node(run: RunState, ctx) -> StepPlan:
195
+ context, _, _, temp, _ = ensure_react_vars(run)
196
+ response = temp.get("llm_response", {})
197
+ content, tool_calls = logic.parse_response(response)
198
+
199
+ context["messages"].append(_new_message(ctx, role="assistant", content=content))
200
+
201
+ emit(
202
+ "parse",
203
+ {
204
+ "has_tool_calls": bool(tool_calls),
205
+ "content_preview": content[:100] if content else "(no content)",
206
+ },
207
+ )
208
+ temp.pop("llm_response", None)
209
+
210
+ if tool_calls:
211
+ temp["pending_tool_calls"] = [tc.__dict__ for tc in tool_calls]
212
+ return StepPlan(node_id="parse", next_node="act")
213
+
214
+ temp["final_answer"] = content
215
+ return StepPlan(node_id="parse", next_node="done")
216
+
217
+ def act_node(run: RunState, ctx) -> StepPlan:
218
+ _, _, _, temp, _ = ensure_react_vars(run)
219
+ tool_calls = temp.get("pending_tool_calls", [])
220
+ if not isinstance(tool_calls, list):
221
+ tool_calls = []
222
+
223
+ if not tool_calls:
224
+ return StepPlan(node_id="act", next_node="reason")
225
+
226
+ # Handle ask_user specially with ASK_USER effect.
227
+ for i, tc in enumerate(tool_calls):
228
+ if not isinstance(tc, dict):
229
+ continue
230
+ if tc.get("name") != "ask_user":
231
+ continue
232
+ args = tc.get("arguments") or {}
233
+ question = str(args.get("question") or "Please provide input:")
234
+ choices = args.get("choices")
235
+ choices = list(choices) if isinstance(choices, list) else None
236
+
237
+ temp["pending_tool_calls"] = tool_calls[i + 1 :]
238
+ emit("ask_user", {"question": question, "choices": choices or []})
239
+ return StepPlan(
240
+ node_id="act",
241
+ effect=Effect(
242
+ type=EffectType.ASK_USER,
243
+ payload={"prompt": question, "choices": choices, "allow_free_text": True},
244
+ result_key="_temp.user_response",
245
+ ),
246
+ next_node="handle_user_response",
247
+ )
248
+
249
+ for tc in tool_calls:
250
+ if isinstance(tc, dict):
251
+ emit("act", {"tool": tc.get("name", ""), "args": tc.get("arguments", {})})
252
+
253
+ formatted_calls: List[Dict[str, Any]] = []
254
+ for tc in tool_calls:
255
+ if isinstance(tc, dict):
256
+ formatted_calls.append(
257
+ {
258
+ "name": tc.get("name", ""),
259
+ "arguments": tc.get("arguments", {}),
260
+ "call_id": tc.get("call_id", "1"),
261
+ }
262
+ )
263
+ elif isinstance(tc, ToolCall):
264
+ formatted_calls.append(
265
+ {
266
+ "name": tc.name,
267
+ "arguments": tc.arguments,
268
+ "call_id": tc.call_id or "1",
269
+ }
270
+ )
271
+
272
+ return StepPlan(
273
+ node_id="act",
274
+ effect=Effect(
275
+ type=EffectType.TOOL_CALLS,
276
+ payload={"tool_calls": formatted_calls},
277
+ result_key="_temp.tool_results",
278
+ ),
279
+ next_node="observe",
280
+ )
281
+
282
+ def observe_node(run: RunState, ctx) -> StepPlan:
283
+ context, _, _, temp, _ = ensure_react_vars(run)
284
+ tool_results = temp.get("tool_results", {})
285
+ if not isinstance(tool_results, dict):
286
+ tool_results = {}
287
+
288
+ results = tool_results.get("results", [])
289
+ if not isinstance(results, list):
290
+ results = []
291
+
292
+ for r in results:
293
+ if not isinstance(r, dict):
294
+ continue
295
+ name = str(r.get("name", "tool") or "tool")
296
+ success = bool(r.get("success"))
297
+ output = r.get("output", "")
298
+ error = r.get("error", "")
299
+ rendered = logic.format_observation(
300
+ name=name,
301
+ output=str(output if success else (error or output)),
302
+ success=success,
303
+ )
304
+ emit("observe", {"tool": name, "result": rendered[:150]})
305
+ context["messages"].append(
306
+ _new_message(
307
+ ctx,
308
+ role="tool",
309
+ content=rendered,
310
+ metadata={
311
+ "name": name,
312
+ "call_id": r.get("call_id"),
313
+ "success": success,
314
+ },
315
+ )
316
+ )
317
+
318
+ temp.pop("tool_results", None)
319
+ temp["pending_tool_calls"] = []
320
+ return StepPlan(node_id="observe", next_node="reason")
321
+
322
+ def handle_user_response_node(run: RunState, ctx) -> StepPlan:
323
+ context, _, _, temp, _ = ensure_react_vars(run)
324
+ user_response = temp.get("user_response", {})
325
+ if not isinstance(user_response, dict):
326
+ user_response = {}
327
+ response_text = str(user_response.get("response", "") or "")
328
+ emit("user_response", {"response": response_text})
329
+
330
+ context["messages"].append(
331
+ _new_message(ctx, role="user", content=f"[User response]: {response_text}")
332
+ )
333
+ temp.pop("user_response", None)
334
+
335
+ if temp.get("pending_tool_calls"):
336
+ return StepPlan(node_id="handle_user_response", next_node="act")
337
+ return StepPlan(node_id="handle_user_response", next_node="reason")
338
+
339
+ def done_node(run: RunState, ctx) -> StepPlan:
340
+ context, scratchpad, _, temp, limits = ensure_react_vars(run)
341
+ answer = str(temp.get("final_answer") or "No answer provided")
342
+ emit("done", {"answer": answer})
343
+
344
+ # Prefer _limits.current_iteration, fall back to scratchpad
345
+ iterations = int(limits.get("current_iteration", 0) or scratchpad.get("iteration", 0) or 0)
346
+
347
+ return StepPlan(
348
+ node_id="done",
349
+ complete_output={
350
+ "answer": answer,
351
+ "iterations": iterations,
352
+ "messages": list(context.get("messages") or []),
353
+ },
354
+ )
355
+
356
+ def max_iterations_node(run: RunState, ctx) -> StepPlan:
357
+ context, scratchpad, _, _, limits = ensure_react_vars(run)
358
+
359
+ # Prefer _limits, fall back to scratchpad
360
+ max_iterations = int(limits.get("max_iterations", 0) or scratchpad.get("max_iterations", 25) or 25)
361
+ if max_iterations < 1:
362
+ max_iterations = 1
363
+ emit("max_iterations", {"iterations": max_iterations})
364
+
365
+ messages = list(context.get("messages") or [])
366
+ last_content = messages[-1]["content"] if messages else "Max iterations reached"
367
+ return StepPlan(
368
+ node_id="max_iterations",
369
+ complete_output={
370
+ "answer": last_content,
371
+ "iterations": max_iterations,
372
+ "messages": messages,
373
+ },
374
+ )
375
+
376
+ return WorkflowSpec(
377
+ workflow_id="react_agent",
378
+ entry_node="init",
379
+ nodes={
380
+ "init": init_node,
381
+ "reason": reason_node,
382
+ "parse": parse_node,
383
+ "act": act_node,
384
+ "observe": observe_node,
385
+ "handle_user_response": handle_user_response_node,
386
+ "done": done_node,
387
+ "max_iterations": max_iterations_node,
388
+ },
389
+ )
390
+
@@ -0,0 +1,15 @@
1
+ """AbstractAgent agents."""
2
+
3
+ from .base import BaseAgent
4
+ from .react import ReactAgent, create_react_workflow, create_react_agent
5
+ from .codeact import CodeActAgent, create_codeact_workflow, create_codeact_agent
6
+
7
+ __all__ = [
8
+ "BaseAgent",
9
+ "ReactAgent",
10
+ "create_react_workflow",
11
+ "create_react_agent",
12
+ "CodeActAgent",
13
+ "create_codeact_workflow",
14
+ "create_codeact_agent",
15
+ ]