abstractagent 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,210 @@
1
+ """ReAct agent implementation.
2
+
3
+ This module wires:
4
+ - Pure ReAct reasoning logic (abstractagent.logic)
5
+ - To an AbstractRuntime workflow (abstractagent.adapters)
6
+
7
+ The public API is intentionally stable:
8
+ - ReactAgent
9
+ - create_react_workflow
10
+ - create_react_agent
11
+ """
12
+
13
+ from __future__ import annotations
14
+
15
+ from typing import Any, Callable, Dict, List, Optional
16
+
17
+ from abstractcore.tools import ToolDefinition
18
+ from abstractruntime import RunState, Runtime, WorkflowSpec
19
+
20
+ from .base import BaseAgent
21
+ from ..adapters.react_runtime import create_react_workflow
22
+ from ..logic.builtins import ASK_USER_TOOL
23
+ from ..logic.react import ReActLogic
24
+
25
+
26
+ def _tool_definitions_from_callables(tools: List[Callable[..., Any]]) -> List[ToolDefinition]:
27
+ tool_defs: List[ToolDefinition] = []
28
+ for t in tools:
29
+ tool_def = getattr(t, "_tool_definition", None)
30
+ if tool_def is None:
31
+ tool_def = ToolDefinition.from_function(t)
32
+ tool_defs.append(tool_def)
33
+ return tool_defs
34
+
35
+
36
+ def _copy_messages(messages: Any) -> List[Dict[str, Any]]:
37
+ if not isinstance(messages, list):
38
+ return []
39
+ out: List[Dict[str, Any]] = []
40
+ for m in messages:
41
+ if isinstance(m, dict):
42
+ out.append(dict(m))
43
+ return out
44
+
45
+
46
+ class ReactAgent(BaseAgent):
47
+ """Reason-Act-Observe agent with tool calling."""
48
+
49
+ def __init__(
50
+ self,
51
+ *,
52
+ runtime: Runtime,
53
+ tools: Optional[List[Callable[..., Any]]] = None,
54
+ on_step: Optional[Callable[[str, Dict[str, Any]], None]] = None,
55
+ max_iterations: int = 25,
56
+ max_history_messages: int = -1,
57
+ max_tokens: Optional[int] = 32768,
58
+ actor_id: Optional[str] = None,
59
+ session_id: Optional[str] = None,
60
+ ):
61
+ self._max_iterations = int(max_iterations)
62
+ if self._max_iterations < 1:
63
+ self._max_iterations = 1
64
+ self._max_history_messages = int(max_history_messages)
65
+ # -1 means unlimited (send all messages), otherwise must be >= 1
66
+ if self._max_history_messages != -1 and self._max_history_messages < 1:
67
+ self._max_history_messages = 1
68
+ self._max_tokens = max_tokens
69
+
70
+ self.logic: Optional[ReActLogic] = None
71
+ super().__init__(
72
+ runtime=runtime,
73
+ tools=tools,
74
+ on_step=on_step,
75
+ actor_id=actor_id,
76
+ session_id=session_id,
77
+ )
78
+
79
+ def _create_workflow(self) -> WorkflowSpec:
80
+ tool_defs = _tool_definitions_from_callables(self.tools)
81
+ # Built-in ask_user is a schema-only tool (handled via ASK_USER effect in the adapter).
82
+ tool_defs = [ASK_USER_TOOL, *tool_defs]
83
+
84
+ logic = ReActLogic(
85
+ tools=tool_defs,
86
+ max_history_messages=self._max_history_messages,
87
+ max_tokens=self._max_tokens,
88
+ )
89
+ self.logic = logic
90
+ return create_react_workflow(logic=logic, on_step=self.on_step)
91
+
92
+ def start(self, task: str) -> str:
93
+ task = str(task or "").strip()
94
+ if not task:
95
+ raise ValueError("task must be a non-empty string")
96
+
97
+ vars: Dict[str, Any] = {
98
+ "context": {"task": task, "messages": _copy_messages(self.session_messages)},
99
+ "scratchpad": {"iteration": 0, "max_iterations": int(self._max_iterations)},
100
+ "_runtime": {"inbox": []},
101
+ "_temp": {},
102
+ # Canonical _limits namespace for runtime awareness
103
+ "_limits": {
104
+ "max_iterations": int(self._max_iterations),
105
+ "current_iteration": 0,
106
+ "max_tokens": self._max_tokens,
107
+ "max_history_messages": int(self._max_history_messages),
108
+ "estimated_tokens_used": 0,
109
+ "warn_iterations_pct": 80,
110
+ "warn_tokens_pct": 80,
111
+ },
112
+ }
113
+
114
+ run_id = self.runtime.start(
115
+ workflow=self.workflow,
116
+ vars=vars,
117
+ actor_id=self._ensure_actor_id(),
118
+ session_id=self._ensure_session_id(),
119
+ )
120
+ self._current_run_id = run_id
121
+ return run_id
122
+
123
+ def get_limit_status(self) -> Dict[str, Any]:
124
+ """Get current limit status for the active run.
125
+
126
+ Returns a structured dict with information about iterations, tokens,
127
+ and history limits, including whether warning thresholds are reached.
128
+
129
+ Returns:
130
+ Dict with "iterations", "tokens", and "history" status info,
131
+ or empty dict if no active run.
132
+ """
133
+ if self._current_run_id is None:
134
+ return {}
135
+ return self.runtime.get_limit_status(self._current_run_id)
136
+
137
+ def update_limits(self, **updates: Any) -> None:
138
+ """Update limits mid-session.
139
+
140
+ Only allowed limit keys are updated; unknown keys are ignored.
141
+ Allowed keys: max_iterations, max_tokens, max_output_tokens,
142
+ max_history_messages, warn_iterations_pct, warn_tokens_pct.
143
+
144
+ Args:
145
+ **updates: Limit key-value pairs to update
146
+
147
+ Raises:
148
+ RuntimeError: If no active run
149
+ """
150
+ if self._current_run_id is None:
151
+ raise RuntimeError("No active run. Call start() first.")
152
+ self.runtime.update_limits(self._current_run_id, updates)
153
+
154
+ def step(self) -> RunState:
155
+ if not self._current_run_id:
156
+ raise RuntimeError("No active run. Call start() first.")
157
+ return self.runtime.tick(workflow=self.workflow, run_id=self._current_run_id, max_steps=1)
158
+
159
+
160
+ def create_react_agent(
161
+ *,
162
+ provider: str = "ollama",
163
+ model: str = "qwen3:1.7b-q4_K_M",
164
+ tools: Optional[List[Callable[..., Any]]] = None,
165
+ on_step: Optional[Callable[[str, Dict[str, Any]], None]] = None,
166
+ max_iterations: int = 25,
167
+ max_history_messages: int = -1,
168
+ max_tokens: Optional[int] = 32768,
169
+ llm_kwargs: Optional[Dict[str, Any]] = None,
170
+ run_store: Optional[Any] = None,
171
+ ledger_store: Optional[Any] = None,
172
+ actor_id: Optional[str] = None,
173
+ session_id: Optional[str] = None,
174
+ ) -> ReactAgent:
175
+ """Factory: create a ReactAgent with a local AbstractCore-backed runtime."""
176
+
177
+ from abstractruntime.integrations.abstractcore import MappingToolExecutor, create_local_runtime
178
+
179
+ if tools is None:
180
+ from ..tools import ALL_TOOLS as _DEFAULT_TOOLS
181
+
182
+ tools = list(_DEFAULT_TOOLS)
183
+
184
+ runtime = create_local_runtime(
185
+ provider=provider,
186
+ model=model,
187
+ llm_kwargs=llm_kwargs,
188
+ run_store=run_store,
189
+ ledger_store=ledger_store,
190
+ tool_executor=MappingToolExecutor.from_tools(list(tools)),
191
+ )
192
+
193
+ return ReactAgent(
194
+ runtime=runtime,
195
+ tools=list(tools),
196
+ on_step=on_step,
197
+ max_iterations=max_iterations,
198
+ max_history_messages=max_history_messages,
199
+ max_tokens=max_tokens,
200
+ actor_id=actor_id,
201
+ session_id=session_id,
202
+ )
203
+
204
+
205
+ __all__ = [
206
+ "ReactAgent",
207
+ "create_react_workflow",
208
+ "create_react_agent",
209
+ ]
210
+
@@ -0,0 +1,19 @@
1
+ """Pure agent logic (no runtime imports).
2
+
3
+ This package contains portable, unit-testable logic for agents. Runtime-specific
4
+ workflow wiring lives under `abstractagent.adapters`.
5
+ """
6
+
7
+ from .builtins import ASK_USER_TOOL
8
+ from .codeact import CodeActLogic
9
+ from .react import ReActLogic
10
+ from .types import AskUserAction, FinalAnswer, LLMRequest
11
+
12
+ __all__ = [
13
+ "ASK_USER_TOOL",
14
+ "LLMRequest",
15
+ "AskUserAction",
16
+ "FinalAnswer",
17
+ "ReActLogic",
18
+ "CodeActLogic",
19
+ ]
@@ -0,0 +1,29 @@
1
+ """Built-in tool specs used by agents.
2
+
3
+ These are tool *definitions* (schemas), not executable tool callables.
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ from abstractcore.tools import ToolDefinition
9
+
10
+ ASK_USER_TOOL = ToolDefinition(
11
+ name="ask_user",
12
+ description=(
13
+ "Ask the user a question when you need clarification or input. "
14
+ "Use this when the task is ambiguous or you need the user to make a choice."
15
+ ),
16
+ parameters={
17
+ "question": {
18
+ "type": "string",
19
+ "description": "The question to ask the user (required)",
20
+ },
21
+ "choices": {
22
+ "type": "array",
23
+ "items": {"type": "string"},
24
+ "description": "Optional list of choices for the user to pick from",
25
+ },
26
+ },
27
+ when_to_use="When the task is ambiguous or you need user input to proceed",
28
+ )
29
+
@@ -0,0 +1,166 @@
1
+ """CodeAct logic (pure; no runtime imports).
2
+
3
+ CodeAct is a ReAct-like loop where the main action is executing Python code
4
+ instead of calling many specialized tools.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import re
10
+ from typing import Any, Dict, List, Optional, Tuple
11
+
12
+ from abstractcore.tools import ToolCall, ToolDefinition
13
+
14
+ from .types import LLMRequest
15
+
16
+ _CODE_BLOCK_RE = re.compile(r"```(?:python|py)?\s*\n(.*?)\n```", re.IGNORECASE | re.DOTALL)
17
+
18
+
19
+ class CodeActLogic:
20
+ def __init__(
21
+ self,
22
+ *,
23
+ tools: List[ToolDefinition],
24
+ max_history_messages: int = -1,
25
+ max_tokens: Optional[int] = None,
26
+ ):
27
+ self._tools = list(tools)
28
+ self._max_history_messages = int(max_history_messages)
29
+ # -1 means unlimited (send all messages), otherwise must be >= 1
30
+ if self._max_history_messages != -1 and self._max_history_messages < 1:
31
+ self._max_history_messages = 1
32
+ self._max_tokens = max_tokens
33
+
34
+ @property
35
+ def tools(self) -> List[ToolDefinition]:
36
+ return list(self._tools)
37
+
38
+ def build_request(
39
+ self,
40
+ *,
41
+ task: str,
42
+ messages: List[Dict[str, Any]],
43
+ guidance: str = "",
44
+ iteration: int = 1,
45
+ max_iterations: int = 20,
46
+ vars: Optional[Dict[str, Any]] = None,
47
+ ) -> LLMRequest:
48
+ """Build an LLM request for the CodeAct agent.
49
+
50
+ Args:
51
+ task: The task to perform
52
+ messages: Conversation history
53
+ guidance: Optional guidance text to inject
54
+ iteration: Current iteration number
55
+ max_iterations: Maximum allowed iterations
56
+ vars: Optional run.vars dict. If provided, limits are read from
57
+ vars["_limits"] (canonical) with fallback to instance defaults.
58
+ """
59
+ task = str(task or "")
60
+ guidance = str(guidance or "").strip()
61
+
62
+ # Get limits from vars if available, else use instance defaults
63
+ limits = (vars or {}).get("_limits", {})
64
+ max_history = int(limits.get("max_history_messages", self._max_history_messages) or self._max_history_messages)
65
+ max_tokens = limits.get("max_tokens", self._max_tokens)
66
+ if max_tokens is not None:
67
+ max_tokens = int(max_tokens)
68
+
69
+ # -1 means unlimited (use all messages)
70
+ if max_history == -1:
71
+ history = messages if messages else []
72
+ else:
73
+ history = messages[-max_history:] if messages else []
74
+ history_text = "\n".join(
75
+ [f"{m.get('role', 'unknown')}: {m.get('content', '')}" for m in history]
76
+ )
77
+
78
+ prompt = (
79
+ "You are CodeAct: you can solve tasks by writing and executing Python code.\n"
80
+ "Use the tool `execute_python` to run Python snippets. Prefer small, focused scripts.\n"
81
+ "Print any intermediate results you need.\n"
82
+ "When you are confident, provide the final answer without calling tools.\n\n"
83
+ f"Iteration: {int(iteration)}/{int(max_iterations)}\n\n"
84
+ f"Task: {task}\n\n"
85
+ )
86
+ if history_text:
87
+ prompt += f"History:\n{history_text}\n\n"
88
+
89
+ if guidance:
90
+ prompt += f"[User guidance]: {guidance}\n\n"
91
+
92
+ prompt += (
93
+ "If you need to run code, either:\n"
94
+ "- Call `execute_python` with the Python code, or\n"
95
+ "- If tool calling is unavailable, include a fenced ```python code block.\n"
96
+ )
97
+
98
+ return LLMRequest(prompt=prompt, tools=self.tools, max_tokens=max_tokens)
99
+
100
+ def parse_response(self, response: Any) -> Tuple[str, List[ToolCall]]:
101
+ if not isinstance(response, dict):
102
+ return "", []
103
+
104
+ content = response.get("content")
105
+ content = "" if content is None else str(content)
106
+
107
+ tool_calls_raw = response.get("tool_calls") or []
108
+ tool_calls: List[ToolCall] = []
109
+ if isinstance(tool_calls_raw, list):
110
+ for tc in tool_calls_raw:
111
+ if isinstance(tc, ToolCall):
112
+ tool_calls.append(tc)
113
+ continue
114
+ if isinstance(tc, dict):
115
+ name = str(tc.get("name", "") or "")
116
+ args = tc.get("arguments", {})
117
+ call_id = tc.get("call_id")
118
+ if isinstance(args, dict):
119
+ tool_calls.append(ToolCall(name=name, arguments=dict(args), call_id=call_id))
120
+
121
+ # FALLBACK: Parse from content if no native tool calls
122
+ # Handles <|tool_call|>, <function_call>, ```tool_code, etc.
123
+ if not tool_calls and content:
124
+ from abstractcore.tools.parser import parse_tool_calls, detect_tool_calls
125
+ if detect_tool_calls(content):
126
+ # Pass model name for architecture-specific parsing
127
+ model_name = response.get("model")
128
+ tool_calls = parse_tool_calls(content, model_name=model_name)
129
+
130
+ return content, tool_calls
131
+
132
+ def extract_code(self, text: str) -> str | None:
133
+ text = str(text or "")
134
+ m = _CODE_BLOCK_RE.search(text)
135
+ if not m:
136
+ return None
137
+ code = m.group(1).strip("\n")
138
+ return code.strip() or None
139
+
140
+ def format_observation(self, *, name: str, output: Any, success: bool) -> str:
141
+ if name != "execute_python":
142
+ out = "" if output is None else str(output)
143
+ return f"[{name}]: {out}" if success else f"[{name}]: Error: {out}"
144
+
145
+ if not isinstance(output, dict):
146
+ out = "" if output is None else str(output)
147
+ return f"[execute_python]: {out}" if success else f"[execute_python]: Error: {out}"
148
+
149
+ stdout = str(output.get("stdout") or "")
150
+ stderr = str(output.get("stderr") or "")
151
+ exit_code = output.get("exit_code")
152
+ error = output.get("error")
153
+
154
+ parts: List[str] = []
155
+ if error:
156
+ parts.append(f"error={error}")
157
+ if exit_code is not None:
158
+ parts.append(f"exit_code={exit_code}")
159
+ if stdout:
160
+ parts.append("stdout:\n" + stdout)
161
+ if stderr:
162
+ parts.append("stderr:\n" + stderr)
163
+
164
+ rendered = "\n".join(parts).strip() or "(no output)"
165
+ return f"[execute_python]: {rendered}"
166
+
@@ -0,0 +1,126 @@
1
+ """ReAct logic (pure; no runtime imports)."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Any, Dict, List, Optional, Tuple
6
+
7
+ from abstractcore.tools import ToolCall, ToolDefinition
8
+
9
+ from .types import LLMRequest
10
+
11
+
12
+ class ReActLogic:
13
+ def __init__(
14
+ self,
15
+ *,
16
+ tools: List[ToolDefinition],
17
+ max_history_messages: int = -1,
18
+ max_tokens: Optional[int] = None,
19
+ ):
20
+ self._tools = list(tools)
21
+ self._max_history_messages = int(max_history_messages)
22
+ # -1 means unlimited (send all messages), otherwise must be >= 1
23
+ if self._max_history_messages != -1 and self._max_history_messages < 1:
24
+ self._max_history_messages = 1
25
+ self._max_tokens = max_tokens
26
+
27
+ @property
28
+ def tools(self) -> List[ToolDefinition]:
29
+ return list(self._tools)
30
+
31
+ def build_request(
32
+ self,
33
+ *,
34
+ task: str,
35
+ messages: List[Dict[str, Any]],
36
+ guidance: str = "",
37
+ iteration: int = 1,
38
+ max_iterations: int = 20,
39
+ vars: Optional[Dict[str, Any]] = None,
40
+ ) -> LLMRequest:
41
+ """Build an LLM request for the ReAct agent.
42
+
43
+ Args:
44
+ task: The task to perform
45
+ messages: Conversation history
46
+ guidance: Optional guidance text to inject
47
+ iteration: Current iteration number
48
+ max_iterations: Maximum allowed iterations
49
+ vars: Optional run.vars dict. If provided, limits are read from
50
+ vars["_limits"] (canonical) with fallback to instance defaults.
51
+ """
52
+ task = str(task or "")
53
+ guidance = str(guidance or "").strip()
54
+
55
+ # Get limits from vars if available, else use instance defaults
56
+ limits = (vars or {}).get("_limits", {})
57
+ max_history = int(limits.get("max_history_messages", self._max_history_messages) or self._max_history_messages)
58
+ max_tokens = limits.get("max_tokens", self._max_tokens)
59
+ if max_tokens is not None:
60
+ max_tokens = int(max_tokens)
61
+
62
+ if len(messages) <= 1:
63
+ prompt = (
64
+ f"Task: {task}\n\n"
65
+ "Use the available tools to complete this task. When done, provide your final answer."
66
+ )
67
+ else:
68
+ # -1 means unlimited (use all messages)
69
+ if max_history == -1:
70
+ history = messages
71
+ else:
72
+ history = messages[-max_history:]
73
+ history_text = "\n".join(
74
+ [f"{m.get('role', 'unknown')}: {m.get('content', '')}" for m in history]
75
+ )
76
+ prompt = (
77
+ "You have access to the conversation history below as context.\n"
78
+ "Do not claim you have no memory of it; it is provided to you here.\n\n"
79
+ f"Iteration: {int(iteration)}/{int(max_iterations)}\n\n"
80
+ f"History:\n{history_text}\n\n"
81
+ "Continue the conversation and work on the user's latest request.\n"
82
+ "Use tools when needed, or provide a final answer."
83
+ )
84
+
85
+ if guidance:
86
+ prompt += "\n\n[User guidance]: " + guidance
87
+
88
+ return LLMRequest(prompt=prompt, tools=self.tools, max_tokens=max_tokens)
89
+
90
+ def parse_response(self, response: Any) -> Tuple[str, List[ToolCall]]:
91
+ if not isinstance(response, dict):
92
+ return "", []
93
+
94
+ content = response.get("content")
95
+ content = "" if content is None else str(content)
96
+
97
+ tool_calls_raw = response.get("tool_calls") or []
98
+ tool_calls: List[ToolCall] = []
99
+ if isinstance(tool_calls_raw, list):
100
+ for tc in tool_calls_raw:
101
+ if isinstance(tc, ToolCall):
102
+ tool_calls.append(tc)
103
+ continue
104
+ if isinstance(tc, dict):
105
+ name = str(tc.get("name", "") or "")
106
+ args = tc.get("arguments", {})
107
+ call_id = tc.get("call_id")
108
+ if isinstance(args, dict):
109
+ tool_calls.append(ToolCall(name=name, arguments=dict(args), call_id=call_id))
110
+
111
+ # FALLBACK: Parse from content if no native tool calls
112
+ # Handles <|tool_call|>, <function_call>, ```tool_code, etc.
113
+ if not tool_calls and content:
114
+ from abstractcore.tools.parser import parse_tool_calls, detect_tool_calls
115
+ if detect_tool_calls(content):
116
+ # Pass model name for architecture-specific parsing
117
+ model_name = response.get("model")
118
+ tool_calls = parse_tool_calls(content, model_name=model_name)
119
+
120
+ return content, tool_calls
121
+
122
+ def format_observation(self, *, name: str, output: str, success: bool) -> str:
123
+ if success:
124
+ return f"[{name}]: {output}"
125
+ return f"[{name}]: Error: {output}"
126
+
@@ -0,0 +1,30 @@
1
+ """Pure logic types shared across agents."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import dataclass
6
+ from typing import List, Optional
7
+
8
+ from abstractcore.tools import ToolDefinition
9
+
10
+
11
+ @dataclass(frozen=True)
12
+ class LLMRequest:
13
+ """What to ask the LLM (portable; runtime-agnostic)."""
14
+
15
+ prompt: str
16
+ tools: List[ToolDefinition]
17
+ system_prompt: Optional[str] = None
18
+ max_tokens: Optional[int] = None
19
+
20
+
21
+ @dataclass(frozen=True)
22
+ class AskUserAction:
23
+ question: str
24
+ choices: Optional[List[str]] = None
25
+
26
+
27
+ @dataclass(frozen=True)
28
+ class FinalAnswer:
29
+ content: str
30
+