sentarc-agent 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sentarc_agent-0.1.0/PKG-INFO +6 -0
- sentarc_agent-0.1.0/README.md +383 -0
- sentarc_agent-0.1.0/pyproject.toml +17 -0
- sentarc_agent-0.1.0/sentarc_agent.egg-info/PKG-INFO +6 -0
- sentarc_agent-0.1.0/sentarc_agent.egg-info/SOURCES.txt +12 -0
- sentarc_agent-0.1.0/sentarc_agent.egg-info/dependency_links.txt +1 -0
- sentarc_agent-0.1.0/sentarc_agent.egg-info/requires.txt +1 -0
- sentarc_agent-0.1.0/sentarc_agent.egg-info/top_level.txt +1 -0
- sentarc_agent-0.1.0/setup.cfg +4 -0
- sentarc_agent-0.1.0/src/__init__.py +49 -0
- sentarc_agent-0.1.0/src/agent.py +346 -0
- sentarc_agent-0.1.0/src/agent_loop.py +387 -0
- sentarc_agent-0.1.0/src/types.py +185 -0
- sentarc_agent-0.1.0/tests/test_agent.py +155 -0
|
@@ -0,0 +1,383 @@
|
|
|
1
|
+
# sentarc-agent
|
|
2
|
+
|
|
3
|
+
Stateful agent with tool execution and event streaming. Built on `sentarc-ai`.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
pip install -e packages/agent
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Quick Start
|
|
12
|
+
|
|
13
|
+
```python
|
|
14
|
+
import asyncio
|
|
15
|
+
from sentarc_agent import Agent, AgentOptions
|
|
16
|
+
from sentarc_ai.models import get_model
|
|
17
|
+
|
|
18
|
+
async def main():
|
|
19
|
+
agent = Agent(AgentOptions(
|
|
20
|
+
initial_state={
|
|
21
|
+
"system_prompt": "You are a helpful assistant.",
|
|
22
|
+
"model": get_model("anthropic", "claude-3-5-sonnet-20240620"),
|
|
23
|
+
}
|
|
24
|
+
))
|
|
25
|
+
|
|
26
|
+
events = []
|
|
27
|
+
|
|
28
|
+
# Subscribe to the event loop directly
|
|
29
|
+
def on_event(event):
|
|
30
|
+
if getattr(event, "type", None) == "message_update":
|
|
31
|
+
if getattr(event.assistant_message_event, "type", None) == "text_delta":
|
|
32
|
+
print(event.assistant_message_event.text, end="", flush=True)
|
|
33
|
+
|
|
34
|
+
agent.subscribe(on_event)
|
|
35
|
+
|
|
36
|
+
# Prompt the agent
|
|
37
|
+
await agent.prompt("Hello!")
|
|
38
|
+
|
|
39
|
+
if __name__ == "__main__":
|
|
40
|
+
asyncio.run(main())
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
## Core Concepts
|
|
44
|
+
|
|
45
|
+
### AgentMessage vs LLM Message
|
|
46
|
+
|
|
47
|
+
The agent works with `AgentMessage`, a flexible type that can include:
|
|
48
|
+
- Standard LLM messages (dictionaries or `Message` objects with `user`, `assistant`, `toolResult` roles).
|
|
49
|
+
- Custom app-specific message types via mapping.
|
|
50
|
+
|
|
51
|
+
LLMs only understand `user`, `assistant`, and `tool`. The `convert_to_llm` function bridges this gap by filtering and transforming messages before each LLM call.
|
|
52
|
+
|
|
53
|
+
### Message Flow
|
|
54
|
+
|
|
55
|
+
```
|
|
56
|
+
AgentMessage[] → transform_context() → AgentMessage[] → convert_to_llm() → Message[] → LLM
|
|
57
|
+
(optional) (required)
|
|
58
|
+
```
|
|
59
|
+
|
|
60
|
+
1. **`transform_context`**: Prune old messages, inject external context.
|
|
61
|
+
2. **`convert_to_llm`**: Filter out UI-only messages, convert custom types to LLM format.
|
|
62
|
+
|
|
63
|
+
## Event Flow
|
|
64
|
+
|
|
65
|
+
The agent emits events for UI updates. Understanding the event sequence helps build responsive interfaces.
|
|
66
|
+
|
|
67
|
+
### `prompt()` Event Sequence
|
|
68
|
+
|
|
69
|
+
When you call `await agent.prompt("Hello")`:
|
|
70
|
+
|
|
71
|
+
```
|
|
72
|
+
prompt("Hello")
|
|
73
|
+
├─ agent_start
|
|
74
|
+
├─ turn_start
|
|
75
|
+
├─ message_start { message: userMessage } # Your prompt
|
|
76
|
+
├─ message_end { message: userMessage }
|
|
77
|
+
├─ message_start { message: assistantMessage } # LLM starts responding
|
|
78
|
+
├─ message_update { message: partial... } # Streaming chunks
|
|
79
|
+
├─ message_update { message: partial... }
|
|
80
|
+
├─ message_end { message: assistantMessage } # Complete response
|
|
81
|
+
├─ turn_end { message, tool_results: [] }
|
|
82
|
+
└─ agent_end { messages: [...] }
|
|
83
|
+
```
|
|
84
|
+
|
|
85
|
+
### With Tool Calls
|
|
86
|
+
|
|
87
|
+
If the assistant calls tools, the loop continues automatically:
|
|
88
|
+
|
|
89
|
+
```
|
|
90
|
+
prompt("Read config.json")
|
|
91
|
+
├─ agent_start
|
|
92
|
+
├─ turn_start
|
|
93
|
+
├─ message_start/end { userMessage }
|
|
94
|
+
├─ message_start { assistantMessage with toolCall }
|
|
95
|
+
├─ message_update...
|
|
96
|
+
├─ message_end { assistantMessage }
|
|
97
|
+
├─ tool_execution_start { toolCallId, toolName, args }
|
|
98
|
+
├─ tool_execution_update { partialResult } # If tool streams
|
|
99
|
+
├─ tool_execution_end { toolCallId, result }
|
|
100
|
+
├─ message_start/end { toolResultMessage }
|
|
101
|
+
├─ turn_end { message, tool_results: [toolResult] }
|
|
102
|
+
│
|
|
103
|
+
├─ turn_start # Next turn
|
|
104
|
+
├─ message_start { assistantMessage } # LLM responds to tool result
|
|
105
|
+
├─ message_update...
|
|
106
|
+
├─ message_end
|
|
107
|
+
├─ turn_end
|
|
108
|
+
└─ agent_end
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
### `continue_session()` Event Sequence
|
|
112
|
+
|
|
113
|
+
`continue_session()` resumes from existing context without adding a new message. Use it for retries after errors.
|
|
114
|
+
|
|
115
|
+
```python
|
|
116
|
+
# After an error, retry from current state
|
|
117
|
+
await agent.continue_session()
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
The last message in context must be `user` or `toolResult` (not `assistant`).
|
|
121
|
+
|
|
122
|
+
### Event Types
|
|
123
|
+
|
|
124
|
+
| Event | Description |
|
|
125
|
+
|-------|-------------|
|
|
126
|
+
| `agent_start` | Agent begins processing |
|
|
127
|
+
| `agent_end` | Agent completes with all new messages |
|
|
128
|
+
| `turn_start` | New turn begins (one LLM call + tool executions) |
|
|
129
|
+
| `turn_end` | Turn completes with assistant message and tool results |
|
|
130
|
+
| `message_start` | Any message begins (user, assistant, tool) |
|
|
131
|
+
| `message_update` | **Assistant only.** Includes `assistant_message_event` with delta |
|
|
132
|
+
| `message_end` | Message completes |
|
|
133
|
+
| `tool_execution_start` | Tool begins |
|
|
134
|
+
| `tool_execution_update` | Tool streams progress |
|
|
135
|
+
| `tool_execution_end` | Tool completes |
|
|
136
|
+
|
|
137
|
+
## Agent Options
|
|
138
|
+
|
|
139
|
+
```python
|
|
140
|
+
from sentarc_agent import AgentOptions
|
|
141
|
+
|
|
142
|
+
agent = Agent(AgentOptions(
|
|
143
|
+
# Initial state configurations
|
|
144
|
+
initial_state={
|
|
145
|
+
"system_prompt": "You are a helpful assistant.",
|
|
146
|
+
"model": get_model("openai", "gpt-4o"),
|
|
147
|
+
"thinking_level": "off", # or "minimal", "low", "medium", "high", "xhigh"
|
|
148
|
+
"tools": [my_tool],
|
|
149
|
+
"messages": [],
|
|
150
|
+
},
|
|
151
|
+
|
|
152
|
+
# Convert AgentMessage[] to LLM Message[] (required for custom message types)
|
|
153
|
+
convert_to_llm=lambda messages: default_convert_to_llm(messages),
|
|
154
|
+
|
|
155
|
+
# Transform context before convert_to_llm (for pruning, compaction)
|
|
156
|
+
transform_context=lambda messages, signal: prune_old_messages(messages),
|
|
157
|
+
|
|
158
|
+
# Steering mode: "one-at-a-time" (default) or "all"
|
|
159
|
+
steering_mode="one-at-a-time",
|
|
160
|
+
|
|
161
|
+
# Follow-up mode: "one-at-a-time" (default) or "all"
|
|
162
|
+
follow_up_mode="one-at-a-time",
|
|
163
|
+
|
|
164
|
+
# Custom stream function wrapper (if needed)
|
|
165
|
+
stream_fn=stream_simple,
|
|
166
|
+
|
|
167
|
+
# Session ID for provider caching
|
|
168
|
+
session_id="session-123",
|
|
169
|
+
|
|
170
|
+
# Dynamic API key resolution (for expiring tokens)
|
|
171
|
+
get_api_key=get_api_key_async,
|
|
172
|
+
|
|
173
|
+
# Optional map of token budgets per thinking level
|
|
174
|
+
thinking_budgets={
|
|
175
|
+
"low": 1024,
|
|
176
|
+
"medium": 2048,
|
|
177
|
+
"high": 4096
|
|
178
|
+
}
|
|
179
|
+
))
|
|
180
|
+
```
|
|
181
|
+
|
|
182
|
+
## Agent State
|
|
183
|
+
|
|
184
|
+
```python
|
|
185
|
+
from dataclasses import dataclass
|
|
186
|
+
from sentarc_agent import AgentState
|
|
187
|
+
|
|
188
|
+
# Agent State shape
|
|
189
|
+
@dataclass
|
|
190
|
+
class AgentState:
|
|
191
|
+
system_prompt: str
|
|
192
|
+
model: Optional[ModelDef]
|
|
193
|
+
thinking_level: str
|
|
194
|
+
tools: List[AgentTool]
|
|
195
|
+
messages: List[AgentMessage]
|
|
196
|
+
is_streaming: bool
|
|
197
|
+
stream_message: Optional[AgentMessage]
|
|
198
|
+
pending_tool_calls: Set[str]
|
|
199
|
+
error: Optional[str] = None
|
|
200
|
+
```
|
|
201
|
+
|
|
202
|
+
Access via `agent.state`. During streaming, `stream_message` contains the partial assistant message stream.
|
|
203
|
+
|
|
204
|
+
## Methods
|
|
205
|
+
|
|
206
|
+
### Prompting
|
|
207
|
+
|
|
208
|
+
```python
|
|
209
|
+
# Text prompt
|
|
210
|
+
await agent.prompt("Hello")
|
|
211
|
+
|
|
212
|
+
# With images
|
|
213
|
+
from sentarc_ai.types import ImageContent
|
|
214
|
+
await agent.prompt("What's in this image?", [
|
|
215
|
+
ImageContent(type="image", data=b"...", mimeType="image/jpeg", source_type="base64")
|
|
216
|
+
])
|
|
217
|
+
|
|
218
|
+
# AgentMessage dictionary directly
|
|
219
|
+
import time
|
|
220
|
+
await agent.prompt({"role": "user", "content": "Hello", "timestamp": int(time.time() * 1000)})
|
|
221
|
+
|
|
222
|
+
# Continue from current context (last message must be user or toolResult)
|
|
223
|
+
await agent.continue_session()
|
|
224
|
+
```
|
|
225
|
+
|
|
226
|
+
### State Management
|
|
227
|
+
|
|
228
|
+
```python
|
|
229
|
+
agent.set_system_prompt("New prompt")
|
|
230
|
+
agent.set_model(get_model("openai", "gpt-4o"))
|
|
231
|
+
agent.set_thinking_level("medium")
|
|
232
|
+
agent.set_tools([my_tool])
|
|
233
|
+
agent.replace_messages(new_messages)
|
|
234
|
+
agent.append_message(message)
|
|
235
|
+
agent.clear_messages()
|
|
236
|
+
agent.reset() # Clear everything
|
|
237
|
+
```
|
|
238
|
+
|
|
239
|
+
### Session and Thinking Budgets
|
|
240
|
+
|
|
241
|
+
```python
|
|
242
|
+
agent.session_id = "session-123"
|
|
243
|
+
|
|
244
|
+
agent.thinking_budgets = {
|
|
245
|
+
"minimal": 128,
|
|
246
|
+
"low": 512,
|
|
247
|
+
"medium": 1024,
|
|
248
|
+
"high": 2048
|
|
249
|
+
}
|
|
250
|
+
```
|
|
251
|
+
|
|
252
|
+
### Control
|
|
253
|
+
|
|
254
|
+
```python
|
|
255
|
+
agent.abort() # Cancel current operation
|
|
256
|
+
await agent.wait_for_idle() # Wait for prompt generation to complete implicitly
|
|
257
|
+
```
|
|
258
|
+
|
|
259
|
+
### Events
|
|
260
|
+
|
|
261
|
+
```python
|
|
262
|
+
unsubscribe = agent.subscribe(lambda event: print(getattr(event, "type", "Unknown Event!")))
|
|
263
|
+
unsubscribe()
|
|
264
|
+
```
|
|
265
|
+
|
|
266
|
+
## Tools
|
|
267
|
+
|
|
268
|
+
Define tools using `AgentTool`:
|
|
269
|
+
|
|
270
|
+
```python
|
|
271
|
+
from sentarc_agent import AgentTool, AgentToolResult
|
|
272
|
+
from sentarc_ai.types import TextContent
|
|
273
|
+
|
|
274
|
+
async def execute_read_file(tool_call_id, params, signal, on_update):
|
|
275
|
+
import os
|
|
276
|
+
if not os.path.exists(params["path"]):
|
|
277
|
+
raise FileNotFoundError(f"File not found: {params['path']}")
|
|
278
|
+
|
|
279
|
+
with open(params["path"], "r") as f:
|
|
280
|
+
content = f.read()
|
|
281
|
+
|
|
282
|
+
return AgentToolResult(
|
|
283
|
+
content=[TextContent(type="text", text=content)],
|
|
284
|
+
details={"path": params["path"], "size": len(content)}
|
|
285
|
+
)
|
|
286
|
+
|
|
287
|
+
read_file_tool = AgentTool(
|
|
288
|
+
name="read_file",
|
|
289
|
+
label="Read File", # For UI display
|
|
290
|
+
description="Read a file's contents",
|
|
291
|
+
parameters={"type": "object", "properties": {"path": {"type": "string"}}},
|
|
292
|
+
execute=execute_read_file
|
|
293
|
+
)
|
|
294
|
+
|
|
295
|
+
agent.set_tools([read_file_tool])
|
|
296
|
+
```
|
|
297
|
+
|
|
298
|
+
### Error Handling
|
|
299
|
+
|
|
300
|
+
**Throw an error** when a tool fails. Do not return error messages directly inside content blocks. Thrown exceptions are caught by the agent and inherently mapped to the LLM context with `is_error=True`.
|
|
301
|
+
|
|
302
|
+
## Steering and Follow-up (Queues)
|
|
303
|
+
|
|
304
|
+
Interrupt the agent while tools are running (`steer()`), or queue messages to respond once execution finishes (`follow_up()`).
|
|
305
|
+
|
|
306
|
+
```python
|
|
307
|
+
agent.set_steering_mode("one-at-a-time")
|
|
308
|
+
agent.set_follow_up_mode("one-at-a-time")
|
|
309
|
+
|
|
310
|
+
# While agent is running tools:
|
|
311
|
+
agent.steer({
|
|
312
|
+
"role": "user",
|
|
313
|
+
"content": "Stop! Do this instead.",
|
|
314
|
+
"timestamp": int(time.time() * 1000)
|
|
315
|
+
})
|
|
316
|
+
|
|
317
|
+
# Or follow up cleanly after the agent finishes its current work:
|
|
318
|
+
agent.follow_up({
|
|
319
|
+
"role": "user",
|
|
320
|
+
"content": "Also summarize the result.",
|
|
321
|
+
"timestamp": int(time.time() * 1000)
|
|
322
|
+
})
|
|
323
|
+
|
|
324
|
+
agent.clear_steering_queue()
|
|
325
|
+
agent.clear_follow_up_queue()
|
|
326
|
+
agent.clear_all_queues()
|
|
327
|
+
```
|
|
328
|
+
|
|
329
|
+
Use `clear_steering_queue`, `clear_follow_up_queue`, or `clear_all_queues` to drop queued messages dynamically.
|
|
330
|
+
|
|
331
|
+
When steering messages are detected after a tool completes:
|
|
332
|
+
1. Remaining tools are skipped with error results
|
|
333
|
+
2. Steering messages are injected
|
|
334
|
+
3. LLM responds to the interruption
|
|
335
|
+
|
|
336
|
+
Follow-up messages are checked only when there are no more tool calls and no steering messages. If any are queued, they are injected and another turn runs.
|
|
337
|
+
|
|
338
|
+
## Custom Message Types
|
|
339
|
+
|
|
340
|
+
Unlike TypeScript's rigid declaration merging, Python naturally allows rich `Dict[str, Any]` inheritance under the `AgentMessage` union!
|
|
341
|
+
|
|
342
|
+
You can define custom message schemas:
|
|
343
|
+
|
|
344
|
+
```python
|
|
345
|
+
# Valid custom AgentMessage (since the typing union supports 'User', 'Assistant', 'Tool', and 'Any dict')
|
|
346
|
+
notification_msg = {
|
|
347
|
+
"role": "notification",
|
|
348
|
+
"text": "File updated successfully",
|
|
349
|
+
"timestamp": int(time.time() * 1000)
|
|
350
|
+
}
|
|
351
|
+
```
|
|
352
|
+
|
|
353
|
+
Then, you only need to ensure they are filtered out in your `convert_to_llm` middleware function before the actual provider call:
|
|
354
|
+
|
|
355
|
+
```python
|
|
356
|
+
def my_custom_converter(messages):
|
|
357
|
+
llm_messages = []
|
|
358
|
+
for msg in messages:
|
|
359
|
+
if isinstance(msg, dict) and msg.get("role") == "notification":
|
|
360
|
+
continue # Filter out custom UI roles
|
|
361
|
+
llm_messages.append(msg)
|
|
362
|
+
return llm_messages
|
|
363
|
+
|
|
364
|
+
agent = Agent(AgentOptions(convert_to_llm=my_custom_converter))
|
|
365
|
+
```
|
|
366
|
+
|
|
367
|
+
## Low-Level API
|
|
368
|
+
|
|
369
|
+
For direct control without the stateful `Agent` wrapper, you can interact directly with the asynchronous generator functions parsed inside `agent_loop.py`:
|
|
370
|
+
|
|
371
|
+
```python
|
|
372
|
+
from sentarc_agent import agent_loop, AgentContext, AgentLoopConfig
|
|
373
|
+
|
|
374
|
+
context = AgentContext(system_prompt="...", messages=[], tools=[])
|
|
375
|
+
config = AgentLoopConfig(...)
|
|
376
|
+
|
|
377
|
+
async for event in agent_loop(messages, context, config):
|
|
378
|
+
print(getattr(event, "type", None))
|
|
379
|
+
```
|
|
380
|
+
|
|
381
|
+
## License
|
|
382
|
+
MIT
|
|
383
|
+
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "sentarc-agent"
|
|
3
|
+
version = "0.1.0"
|
|
4
|
+
description = "Stateful agent loop with tool execution and event streaming"
|
|
5
|
+
requires-python = ">=3.11"
|
|
6
|
+
dependencies = [
|
|
7
|
+
"sentarc-ai",
|
|
8
|
+
]
|
|
9
|
+
|
|
10
|
+
[build-system]
|
|
11
|
+
requires = ["setuptools>=68"]
|
|
12
|
+
build-backend = "setuptools.build_meta"
|
|
13
|
+
|
|
14
|
+
[tool.setuptools]
|
|
15
|
+
# sentarc-mono: files live directly in src/, mapped to sentarc_agent package
|
|
16
|
+
package-dir = {"sentarc_agent" = "src"}
|
|
17
|
+
packages = ["sentarc_agent"]
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
README.md
|
|
2
|
+
pyproject.toml
|
|
3
|
+
sentarc_agent.egg-info/PKG-INFO
|
|
4
|
+
sentarc_agent.egg-info/SOURCES.txt
|
|
5
|
+
sentarc_agent.egg-info/dependency_links.txt
|
|
6
|
+
sentarc_agent.egg-info/requires.txt
|
|
7
|
+
sentarc_agent.egg-info/top_level.txt
|
|
8
|
+
src/__init__.py
|
|
9
|
+
src/agent.py
|
|
10
|
+
src/agent_loop.py
|
|
11
|
+
src/types.py
|
|
12
|
+
tests/test_agent.py
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
sentarc-ai
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
sentarc_agent
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
from .types import (
|
|
2
|
+
AgentMessage,
|
|
3
|
+
AgentTool,
|
|
4
|
+
AgentToolResult,
|
|
5
|
+
AgentContext,
|
|
6
|
+
AgentState,
|
|
7
|
+
AgentLoopConfig,
|
|
8
|
+
AgentEvent,
|
|
9
|
+
AgentStartEvent,
|
|
10
|
+
AgentEndEvent,
|
|
11
|
+
TurnStartEvent,
|
|
12
|
+
TurnEndEvent,
|
|
13
|
+
MessageStartEvent,
|
|
14
|
+
MessageUpdateEvent,
|
|
15
|
+
MessageEndEvent,
|
|
16
|
+
ToolExecutionStartEvent,
|
|
17
|
+
ToolExecutionUpdateEvent,
|
|
18
|
+
ToolExecutionEndEvent,
|
|
19
|
+
ThinkingLevel,
|
|
20
|
+
StreamFn
|
|
21
|
+
)
|
|
22
|
+
from .agent_loop import agent_loop, agent_loop_continue
|
|
23
|
+
from .agent import Agent, AgentOptions
|
|
24
|
+
|
|
25
|
+
__all__ = [
|
|
26
|
+
"AgentMessage",
|
|
27
|
+
"AgentTool",
|
|
28
|
+
"AgentToolResult",
|
|
29
|
+
"AgentContext",
|
|
30
|
+
"AgentState",
|
|
31
|
+
"AgentLoopConfig",
|
|
32
|
+
"AgentEvent",
|
|
33
|
+
"AgentStartEvent",
|
|
34
|
+
"AgentEndEvent",
|
|
35
|
+
"TurnStartEvent",
|
|
36
|
+
"TurnEndEvent",
|
|
37
|
+
"MessageStartEvent",
|
|
38
|
+
"MessageUpdateEvent",
|
|
39
|
+
"MessageEndEvent",
|
|
40
|
+
"ToolExecutionStartEvent",
|
|
41
|
+
"ToolExecutionUpdateEvent",
|
|
42
|
+
"ToolExecutionEndEvent",
|
|
43
|
+
"ThinkingLevel",
|
|
44
|
+
"StreamFn",
|
|
45
|
+
"agent_loop",
|
|
46
|
+
"agent_loop_continue",
|
|
47
|
+
"Agent",
|
|
48
|
+
"AgentOptions"
|
|
49
|
+
]
|