abstractagent 0.2.0__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- abstractagent/adapters/__init__.py +2 -1
- abstractagent/adapters/codeact_runtime.py +823 -57
- abstractagent/adapters/memact_runtime.py +721 -0
- abstractagent/adapters/react_runtime.py +1114 -67
- abstractagent/agents/__init__.py +4 -0
- abstractagent/agents/base.py +58 -1
- abstractagent/agents/codeact.py +89 -18
- abstractagent/agents/memact.py +244 -0
- abstractagent/agents/react.py +91 -18
- abstractagent/logic/__init__.py +2 -0
- abstractagent/logic/builtins.py +212 -5
- abstractagent/logic/codeact.py +87 -80
- abstractagent/logic/memact.py +127 -0
- abstractagent/logic/react.py +108 -48
- abstractagent/repl.py +24 -447
- abstractagent/scripts/__init__.py +5 -0
- abstractagent/scripts/lmstudio_tool_eval.py +426 -0
- abstractagent/tools/__init__.py +3 -0
- {abstractagent-0.2.0.dist-info → abstractagent-0.3.0.dist-info}/METADATA +10 -11
- abstractagent-0.3.0.dist-info/RECORD +31 -0
- abstractagent/ui/__init__.py +0 -5
- abstractagent/ui/question.py +0 -197
- abstractagent-0.2.0.dist-info/RECORD +0 -28
- {abstractagent-0.2.0.dist-info → abstractagent-0.3.0.dist-info}/WHEEL +0 -0
- {abstractagent-0.2.0.dist-info → abstractagent-0.3.0.dist-info}/entry_points.txt +0 -0
- {abstractagent-0.2.0.dist-info → abstractagent-0.3.0.dist-info}/licenses/LICENSE +0 -0
- {abstractagent-0.2.0.dist-info → abstractagent-0.3.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
"""MemAct logic (pure; no runtime imports).
|
|
2
|
+
|
|
3
|
+
MemAct is a memory-enhanced agent (Letta-like) that relies on a separate, runtime-owned
|
|
4
|
+
Active Memory system. This logic layer stays conventional:
|
|
5
|
+
- tool calling is the only way to have an effect
|
|
6
|
+
- tool results are appended to chat history by the runtime adapter
|
|
7
|
+
|
|
8
|
+
The memory system is injected by the MemAct runtime adapter via the system prompt and
|
|
9
|
+
updated via a structured JSON envelope at finalization.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from __future__ import annotations
|
|
13
|
+
|
|
14
|
+
from typing import Any, Dict, List, Optional, Tuple
|
|
15
|
+
|
|
16
|
+
from abstractcore.tools import ToolCall, ToolDefinition
|
|
17
|
+
|
|
18
|
+
from .types import LLMRequest
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class MemActLogic:
|
|
22
|
+
def __init__(
|
|
23
|
+
self,
|
|
24
|
+
*,
|
|
25
|
+
tools: List[ToolDefinition],
|
|
26
|
+
max_history_messages: int = -1,
|
|
27
|
+
max_tokens: Optional[int] = None,
|
|
28
|
+
):
|
|
29
|
+
self._tools = list(tools)
|
|
30
|
+
self._max_history_messages = int(max_history_messages)
|
|
31
|
+
if self._max_history_messages != -1 and self._max_history_messages < 1:
|
|
32
|
+
self._max_history_messages = 1
|
|
33
|
+
self._max_tokens = max_tokens
|
|
34
|
+
|
|
35
|
+
@property
|
|
36
|
+
def tools(self) -> List[ToolDefinition]:
|
|
37
|
+
return list(self._tools)
|
|
38
|
+
|
|
39
|
+
def build_request(
|
|
40
|
+
self,
|
|
41
|
+
*,
|
|
42
|
+
task: str,
|
|
43
|
+
messages: List[Dict[str, Any]],
|
|
44
|
+
guidance: str = "",
|
|
45
|
+
iteration: int = 1,
|
|
46
|
+
max_iterations: int = 20,
|
|
47
|
+
vars: Optional[Dict[str, Any]] = None,
|
|
48
|
+
) -> LLMRequest:
|
|
49
|
+
"""Build a base LLM request (adapter injects memory blocks separately)."""
|
|
50
|
+
_ = messages # history is carried via chat messages by the adapter
|
|
51
|
+
|
|
52
|
+
task = str(task or "").strip()
|
|
53
|
+
guidance = str(guidance or "").strip()
|
|
54
|
+
|
|
55
|
+
limits = (vars or {}).get("_limits", {})
|
|
56
|
+
max_output_tokens = limits.get("max_output_tokens", None)
|
|
57
|
+
if max_output_tokens is not None:
|
|
58
|
+
try:
|
|
59
|
+
max_output_tokens = int(max_output_tokens)
|
|
60
|
+
except Exception:
|
|
61
|
+
max_output_tokens = None
|
|
62
|
+
|
|
63
|
+
output_budget_line = ""
|
|
64
|
+
if isinstance(max_output_tokens, int) and max_output_tokens > 0:
|
|
65
|
+
output_budget_line = f"- Output token limit for this response: {max_output_tokens}.\n"
|
|
66
|
+
|
|
67
|
+
system_prompt = (
|
|
68
|
+
f"Iteration: {int(iteration)}/{int(max_iterations)}\n\n"
|
|
69
|
+
"You are an autonomous MemAct agent.\n"
|
|
70
|
+
"Taking action / having an effect means calling a tool.\n\n"
|
|
71
|
+
"Rules:\n"
|
|
72
|
+
"- Be truthful: only claim actions supported by tool outputs.\n"
|
|
73
|
+
"- Be autonomous: do not ask the user for confirmation to proceed; keep going until the task is done.\n"
|
|
74
|
+
"- If you need to create/edit files, run commands, fetch URLs, or search, you MUST call an appropriate tool.\n"
|
|
75
|
+
"- Never fabricate tool outputs.\n"
|
|
76
|
+
"- Only ask the user a question when required information is missing.\n"
|
|
77
|
+
f"{output_budget_line}"
|
|
78
|
+
).strip()
|
|
79
|
+
|
|
80
|
+
if guidance:
|
|
81
|
+
system_prompt = (system_prompt + "\n\nGuidance:\n" + guidance).strip()
|
|
82
|
+
|
|
83
|
+
return LLMRequest(
|
|
84
|
+
prompt=task,
|
|
85
|
+
system_prompt=system_prompt,
|
|
86
|
+
tools=self.tools,
|
|
87
|
+
max_tokens=max_output_tokens,
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
def parse_response(self, response: Any) -> Tuple[str, List[ToolCall]]:
|
|
91
|
+
if not isinstance(response, dict):
|
|
92
|
+
return "", []
|
|
93
|
+
|
|
94
|
+
content = response.get("content")
|
|
95
|
+
content = "" if content is None else str(content)
|
|
96
|
+
content = content.lstrip()
|
|
97
|
+
for prefix in ("assistant:", "assistant:"):
|
|
98
|
+
if content.lower().startswith(prefix):
|
|
99
|
+
content = content[len(prefix) :].lstrip()
|
|
100
|
+
break
|
|
101
|
+
|
|
102
|
+
if not content.strip():
|
|
103
|
+
reasoning = response.get("reasoning")
|
|
104
|
+
if isinstance(reasoning, str) and reasoning.strip():
|
|
105
|
+
content = reasoning.strip()
|
|
106
|
+
|
|
107
|
+
tool_calls_raw = response.get("tool_calls") or []
|
|
108
|
+
tool_calls: List[ToolCall] = []
|
|
109
|
+
if isinstance(tool_calls_raw, list):
|
|
110
|
+
for tc in tool_calls_raw:
|
|
111
|
+
if isinstance(tc, ToolCall):
|
|
112
|
+
tool_calls.append(tc)
|
|
113
|
+
continue
|
|
114
|
+
if isinstance(tc, dict):
|
|
115
|
+
name = str(tc.get("name", "") or "")
|
|
116
|
+
args = tc.get("arguments", {})
|
|
117
|
+
call_id = tc.get("call_id")
|
|
118
|
+
if isinstance(args, dict):
|
|
119
|
+
tool_calls.append(ToolCall(name=name, arguments=dict(args), call_id=call_id))
|
|
120
|
+
|
|
121
|
+
return content, tool_calls
|
|
122
|
+
|
|
123
|
+
def format_observation(self, *, name: str, output: str, success: bool) -> str:
|
|
124
|
+
if success:
|
|
125
|
+
return f"[{name}]: {output}"
|
|
126
|
+
return f"[{name}]: Error: {output}"
|
|
127
|
+
|
abstractagent/logic/react.py
CHANGED
|
@@ -1,4 +1,13 @@
|
|
|
1
|
-
"""ReAct logic (pure; no runtime imports).
|
|
1
|
+
"""ReAct logic (pure; no runtime imports).
|
|
2
|
+
|
|
3
|
+
This module implements the classic ReAct loop:
|
|
4
|
+
- the model decides whether to call tools
|
|
5
|
+
- tool results are appended to chat history
|
|
6
|
+
- the model iterates until it can answer directly
|
|
7
|
+
|
|
8
|
+
ReAct is intentionally *not* a memory-enhanced agent. Long-term memory and
|
|
9
|
+
structured memory blocks belong in a separate agent (MemAct).
|
|
10
|
+
"""
|
|
2
11
|
|
|
3
12
|
from __future__ import annotations
|
|
4
13
|
|
|
@@ -28,6 +37,24 @@ class ReActLogic:
|
|
|
28
37
|
def tools(self) -> List[ToolDefinition]:
|
|
29
38
|
return list(self._tools)
|
|
30
39
|
|
|
40
|
+
def add_tools(self, tools: List[ToolDefinition]) -> int:
|
|
41
|
+
"""Add tool definitions to this logic instance (deduped by name)."""
|
|
42
|
+
if not isinstance(tools, list) or not tools:
|
|
43
|
+
return 0
|
|
44
|
+
|
|
45
|
+
existing = {str(t.name) for t in self._tools if getattr(t, "name", None)}
|
|
46
|
+
added = 0
|
|
47
|
+
for t in tools:
|
|
48
|
+
name = getattr(t, "name", None)
|
|
49
|
+
if not isinstance(name, str) or not name.strip():
|
|
50
|
+
continue
|
|
51
|
+
if name in existing:
|
|
52
|
+
continue
|
|
53
|
+
self._tools.append(t)
|
|
54
|
+
existing.add(name)
|
|
55
|
+
added += 1
|
|
56
|
+
return added
|
|
57
|
+
|
|
31
58
|
def build_request(
|
|
32
59
|
self,
|
|
33
60
|
*,
|
|
@@ -40,52 +67,81 @@ class ReActLogic:
|
|
|
40
67
|
) -> LLMRequest:
|
|
41
68
|
"""Build an LLM request for the ReAct agent.
|
|
42
69
|
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
guidance: Optional guidance text to inject
|
|
47
|
-
iteration: Current iteration number
|
|
48
|
-
max_iterations: Maximum allowed iterations
|
|
49
|
-
vars: Optional run.vars dict. If provided, limits are read from
|
|
50
|
-
vars["_limits"] (canonical) with fallback to instance defaults.
|
|
70
|
+
Notes:
|
|
71
|
+
- The user request belongs in the user-role message (prompt), not in the system prompt.
|
|
72
|
+
- Conversation + tool history is provided via `messages` by the runtime adapter.
|
|
51
73
|
"""
|
|
74
|
+
_ = messages # history is carried out-of-band via chat messages
|
|
75
|
+
|
|
52
76
|
task = str(task or "")
|
|
53
77
|
guidance = str(guidance or "").strip()
|
|
54
78
|
|
|
55
|
-
#
|
|
79
|
+
# Output token cap (provider max_tokens) comes from `_limits.max_output_tokens`.
|
|
56
80
|
limits = (vars or {}).get("_limits", {})
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
else
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
prompt = (
|
|
77
|
-
"You have access to the conversation history below as context.\n"
|
|
78
|
-
"Do not claim you have no memory of it; it is provided to you here.\n\n"
|
|
79
|
-
f"Iteration: {int(iteration)}/{int(max_iterations)}\n\n"
|
|
80
|
-
f"History:\n{history_text}\n\n"
|
|
81
|
-
"Continue the conversation and work on the user's latest request.\n"
|
|
82
|
-
"Use tools when needed, or provide a final answer."
|
|
81
|
+
max_output_tokens = limits.get("max_output_tokens", None)
|
|
82
|
+
if max_output_tokens is not None:
|
|
83
|
+
try:
|
|
84
|
+
max_output_tokens = int(max_output_tokens)
|
|
85
|
+
except Exception:
|
|
86
|
+
max_output_tokens = None
|
|
87
|
+
|
|
88
|
+
runtime_ns = (vars or {}).get("_runtime", {})
|
|
89
|
+
scratchpad = (vars or {}).get("scratchpad", {})
|
|
90
|
+
plan_mode = bool(runtime_ns.get("plan_mode")) if isinstance(runtime_ns, dict) else False
|
|
91
|
+
plan_text = scratchpad.get("plan") if isinstance(scratchpad, dict) else None
|
|
92
|
+
plan = str(plan_text).strip() if isinstance(plan_text, str) and plan_text.strip() else ""
|
|
93
|
+
|
|
94
|
+
prompt = task.strip()
|
|
95
|
+
|
|
96
|
+
output_budget_line = ""
|
|
97
|
+
if isinstance(max_output_tokens, int) and max_output_tokens > 0:
|
|
98
|
+
output_budget_line = (
|
|
99
|
+
f"- Output token limit for this response: {max_output_tokens}.\n"
|
|
83
100
|
)
|
|
84
101
|
|
|
85
|
-
|
|
86
|
-
|
|
102
|
+
system_prompt = (
|
|
103
|
+
f"Iteration: {int(iteration)}/{int(max_iterations)}\n\n"
|
|
104
|
+
"""## MY PERSONA
|
|
105
|
+
I am a truthful and collaborative autonomous ReAct agent powered by the AbstractFramework. I am a creative critical thinker who balances ideas with constructive skepticism, always thinking of longer term consequences. I strive to be ethical and successful in all my actions and decisions. I am precise, clear, concise and direct in my responses, I avoid unnecessary verbosity.
|
|
106
|
+
|
|
107
|
+
## AGENCY / AUTONOMY
|
|
108
|
+
- You always analyze the intent behind every request to identify what is expected of you
|
|
109
|
+
- If the answer is straightforward and do not need you to take action, you answer directly
|
|
110
|
+
- If you need to take actions, it means you need to request the execution of one or more of the tools provided to you
|
|
111
|
+
- Remember that you are NOT the one executing the tools, you are REQUESTING their execution to your host and you have to wait for them to return the results so you can continue
|
|
112
|
+
- after each tool call, you must determine if the tools were successful and produced the effect you expected or if they failed to determine your next step
|
|
113
|
+
- if the tools were NOT successful, request again the execution of those tools with new parameters, based on the feedback given by your host
|
|
114
|
+
- if the tools were successful and you still have actions to take, then request a next series of tool executions
|
|
115
|
+
- if the tools were successful but you have enough information and don’t have any other actions to take, then provide your final answer
|
|
116
|
+
- The goal of autonomy is to define, at each loop, which are the set of independent tools you could run concurrently without affecting the end result. Try to request as many tool executions as you can, as long as you don’t need the result of one of them to plan the other
|
|
117
|
+
|
|
118
|
+
## EVIDENCE & ACTION (IMPORTANT)
|
|
119
|
+
- Be truthful: only claim actions that are supported by tool outputs.
|
|
120
|
+
- If the task requires reading/editing/running anything, call the relevant tools. Do not “announce” actions without doing them.
|
|
121
|
+
""").strip()
|
|
87
122
|
|
|
88
|
-
|
|
123
|
+
if guidance:
|
|
124
|
+
system_prompt = (system_prompt + "\n\nGuidance:\n" + guidance).strip()
|
|
125
|
+
|
|
126
|
+
if plan_mode and plan:
|
|
127
|
+
system_prompt = (system_prompt + "\n\nCurrent plan:\n" + plan).strip()
|
|
128
|
+
|
|
129
|
+
if plan_mode:
|
|
130
|
+
system_prompt = (
|
|
131
|
+
system_prompt
|
|
132
|
+
+ "\n\nPlan mode:\n"
|
|
133
|
+
"- Maintain and update the plan as you work.\n"
|
|
134
|
+
"- If the plan changes, include a final section at the END of your message:\n"
|
|
135
|
+
" Plan Update:\n"
|
|
136
|
+
" <markdown checklist>\n"
|
|
137
|
+
).strip()
|
|
138
|
+
|
|
139
|
+
return LLMRequest(
|
|
140
|
+
prompt=prompt,
|
|
141
|
+
system_prompt=system_prompt,
|
|
142
|
+
tools=self.tools,
|
|
143
|
+
max_tokens=max_output_tokens,
|
|
144
|
+
)
|
|
89
145
|
|
|
90
146
|
def parse_response(self, response: Any) -> Tuple[str, List[ToolCall]]:
|
|
91
147
|
if not isinstance(response, dict):
|
|
@@ -93,6 +149,19 @@ class ReActLogic:
|
|
|
93
149
|
|
|
94
150
|
content = response.get("content")
|
|
95
151
|
content = "" if content is None else str(content)
|
|
152
|
+
# Some OSS models echo role labels; strip common prefixes to keep UI/history clean.
|
|
153
|
+
content = content.lstrip()
|
|
154
|
+
for prefix in ("assistant:", "assistant:"):
|
|
155
|
+
if content.lower().startswith(prefix):
|
|
156
|
+
content = content[len(prefix) :].lstrip()
|
|
157
|
+
break
|
|
158
|
+
|
|
159
|
+
# Some providers return a separate `reasoning` field. If content is empty, fall back
|
|
160
|
+
# to reasoning so iterative loops don't lose context.
|
|
161
|
+
if not content.strip():
|
|
162
|
+
reasoning = response.get("reasoning")
|
|
163
|
+
if isinstance(reasoning, str) and reasoning.strip():
|
|
164
|
+
content = reasoning.strip()
|
|
96
165
|
|
|
97
166
|
tool_calls_raw = response.get("tool_calls") or []
|
|
98
167
|
tool_calls: List[ToolCall] = []
|
|
@@ -108,15 +177,6 @@ class ReActLogic:
|
|
|
108
177
|
if isinstance(args, dict):
|
|
109
178
|
tool_calls.append(ToolCall(name=name, arguments=dict(args), call_id=call_id))
|
|
110
179
|
|
|
111
|
-
# FALLBACK: Parse from content if no native tool calls
|
|
112
|
-
# Handles <|tool_call|>, <function_call>, ```tool_code, etc.
|
|
113
|
-
if not tool_calls and content:
|
|
114
|
-
from abstractcore.tools.parser import parse_tool_calls, detect_tool_calls
|
|
115
|
-
if detect_tool_calls(content):
|
|
116
|
-
# Pass model name for architecture-specific parsing
|
|
117
|
-
model_name = response.get("model")
|
|
118
|
-
tool_calls = parse_tool_calls(content, model_name=model_name)
|
|
119
|
-
|
|
120
180
|
return content, tool_calls
|
|
121
181
|
|
|
122
182
|
def format_observation(self, *, name: str, output: str, success: bool) -> str:
|