pi-agents-py 0.58.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pi_agents_py-0.58.3/PKG-INFO +43 -0
- pi_agents_py-0.58.3/pyproject.toml +67 -0
- pi_agents_py-0.58.3/setup.cfg +4 -0
- pi_agents_py-0.58.3/src/pi_agents/__init__.py +1 -0
- pi_agents_py-0.58.3/src/pi_agents/agent/__init__.py +1 -0
- pi_agents_py-0.58.3/src/pi_agents/agent/agent.py +303 -0
- pi_agents_py-0.58.3/src/pi_agents/agent/agent_loop.py +318 -0
- pi_agents_py-0.58.3/src/pi_agents/agent/proxy.py +10 -0
- pi_agents_py-0.58.3/src/pi_agents/agent/types.py +105 -0
- pi_agents_py-0.58.3/src/pi_agents/ai/__init__.py +59 -0
- pi_agents_py-0.58.3/src/pi_agents/ai/api_registry.py +69 -0
- pi_agents_py-0.58.3/src/pi_agents/ai/env_api_keys.py +90 -0
- pi_agents_py-0.58.3/src/pi_agents/ai/models.py +102 -0
- pi_agents_py-0.58.3/src/pi_agents/ai/models_data.json +15905 -0
- pi_agents_py-0.58.3/src/pi_agents/ai/oauth.py +27 -0
- pi_agents_py-0.58.3/src/pi_agents/ai/providers/__init__.py +1 -0
- pi_agents_py-0.58.3/src/pi_agents/ai/providers/anthropic.py +361 -0
- pi_agents_py-0.58.3/src/pi_agents/ai/providers/base.py +33 -0
- pi_agents_py-0.58.3/src/pi_agents/ai/providers/google.py +166 -0
- pi_agents_py-0.58.3/src/pi_agents/ai/providers/mistral.py +159 -0
- pi_agents_py-0.58.3/src/pi_agents/ai/providers/openai_completions.py +313 -0
- pi_agents_py-0.58.3/src/pi_agents/ai/providers/openai_responses.py +180 -0
- pi_agents_py-0.58.3/src/pi_agents/ai/providers/register_builtins.py +83 -0
- pi_agents_py-0.58.3/src/pi_agents/ai/stream.py +62 -0
- pi_agents_py-0.58.3/src/pi_agents/ai/types.py +275 -0
- pi_agents_py-0.58.3/src/pi_agents/ai/utils/__init__.py +1 -0
- pi_agents_py-0.58.3/src/pi_agents/ai/utils/event_stream.py +78 -0
- pi_agents_py-0.58.3/src/pi_agents/ai/utils/json_parse.py +71 -0
- pi_agents_py-0.58.3/src/pi_agents/ai/utils/overflow.py +61 -0
- pi_agents_py-0.58.3/src/pi_agents/ai/utils/serialization.py +44 -0
- pi_agents_py-0.58.3/src/pi_agents/ai/utils/typebox_helpers.py +22 -0
- pi_agents_py-0.58.3/src/pi_agents/ai/utils/validation.py +42 -0
- pi_agents_py-0.58.3/src/pi_agents/coding_agent/__init__.py +1 -0
- pi_agents_py-0.58.3/src/pi_agents/coding_agent/compaction.py +141 -0
- pi_agents_py-0.58.3/src/pi_agents/coding_agent/config.py +67 -0
- pi_agents_py-0.58.3/src/pi_agents/coding_agent/skills.py +234 -0
- pi_agents_py-0.58.3/src/pi_agents/coding_agent/tools/__init__.py +1 -0
- pi_agents_py-0.58.3/src/pi_agents/coding_agent/tools/bash_tool.py +88 -0
- pi_agents_py-0.58.3/src/pi_agents/coding_agent/tools/edit_tool.py +130 -0
- pi_agents_py-0.58.3/src/pi_agents/coding_agent/tools/find_tool.py +75 -0
- pi_agents_py-0.58.3/src/pi_agents/coding_agent/tools/grep_tool.py +122 -0
- pi_agents_py-0.58.3/src/pi_agents/coding_agent/tools/ls_tool.py +85 -0
- pi_agents_py-0.58.3/src/pi_agents/coding_agent/tools/read_tool.py +121 -0
- pi_agents_py-0.58.3/src/pi_agents/coding_agent/tools/tools.py +35 -0
- pi_agents_py-0.58.3/src/pi_agents/coding_agent/tools/truncate.py +178 -0
- pi_agents_py-0.58.3/src/pi_agents/coding_agent/tools/write_tool.py +63 -0
- pi_agents_py-0.58.3/src/pi_agents/mom/__init__.py +1 -0
- pi_agents_py-0.58.3/src/pi_agents/mom/sandbox.py +100 -0
- pi_agents_py-0.58.3/src/pi_agents/mom/slack.py +30 -0
- pi_agents_py-0.58.3/src/pi_agents/pods/__init__.py +1 -0
- pi_agents_py-0.58.3/src/pi_agents/pods/config.py +114 -0
- pi_agents_py-0.58.3/src/pi_agents/pods/model_configs.py +128 -0
- pi_agents_py-0.58.3/src/pi_agents/pods/ssh.py +36 -0
- pi_agents_py-0.58.3/src/pi_agents/pods/types.py +44 -0
- pi_agents_py-0.58.3/src/pi_agents/tui/__init__.py +1 -0
- pi_agents_py-0.58.3/src/pi_agents/tui/fuzzy.py +105 -0
- pi_agents_py-0.58.3/src/pi_agents/tui/keys.py +409 -0
- pi_agents_py-0.58.3/src/pi_agents/tui/terminal.py +232 -0
- pi_agents_py-0.58.3/src/pi_agents/tui/text.py +159 -0
- pi_agents_py-0.58.3/src/pi_agents/web_ui/__init__.py +1 -0
- pi_agents_py-0.58.3/src/pi_agents/web_ui/format.py +95 -0
- pi_agents_py-0.58.3/src/pi_agents/web_ui/messages.py +26 -0
- pi_agents_py-0.58.3/src/pi_agents/web_ui/tools.py +55 -0
- pi_agents_py-0.58.3/src/pi_agents_py.egg-info/PKG-INFO +43 -0
- pi_agents_py-0.58.3/src/pi_agents_py.egg-info/SOURCES.txt +66 -0
- pi_agents_py-0.58.3/src/pi_agents_py.egg-info/dependency_links.txt +1 -0
- pi_agents_py-0.58.3/src/pi_agents_py.egg-info/requires.txt +35 -0
- pi_agents_py-0.58.3/src/pi_agents_py.egg-info/top_level.txt +1 -0
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: pi-agents-py
|
|
3
|
+
Version: 0.58.3
|
|
4
|
+
Summary: Python clone of pi-mono — multi-provider LLM agent framework
|
|
5
|
+
Author: Agentsable
|
|
6
|
+
License-Expression: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/Agentsable/pi-agents-py
|
|
8
|
+
Project-URL: Repository, https://github.com/Agentsable/pi-agents-py
|
|
9
|
+
Keywords: llm,agents,ai,anthropic,openai,multi-provider
|
|
10
|
+
Classifier: Development Status :: 4 - Beta
|
|
11
|
+
Classifier: Intended Audience :: Developers
|
|
12
|
+
Classifier: Programming Language :: Python :: 3
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
16
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
17
|
+
Requires-Python: >=3.11
|
|
18
|
+
Requires-Dist: httpx[http2]>=0.27
|
|
19
|
+
Requires-Dist: jsonschema>=4.20
|
|
20
|
+
Requires-Dist: pyyaml>=6.0
|
|
21
|
+
Provides-Extra: anthropic
|
|
22
|
+
Requires-Dist: anthropic>=0.40; extra == "anthropic"
|
|
23
|
+
Provides-Extra: openai
|
|
24
|
+
Requires-Dist: openai>=1.50; extra == "openai"
|
|
25
|
+
Provides-Extra: google
|
|
26
|
+
Requires-Dist: google-genai>=1.0; extra == "google"
|
|
27
|
+
Provides-Extra: mistral
|
|
28
|
+
Requires-Dist: mistralai>=1.0; extra == "mistral"
|
|
29
|
+
Provides-Extra: bedrock
|
|
30
|
+
Requires-Dist: boto3>=1.34; extra == "bedrock"
|
|
31
|
+
Provides-Extra: slack
|
|
32
|
+
Requires-Dist: slack-sdk>=3.30; extra == "slack"
|
|
33
|
+
Provides-Extra: tui
|
|
34
|
+
Requires-Dist: rich>=13.0; extra == "tui"
|
|
35
|
+
Provides-Extra: dev
|
|
36
|
+
Requires-Dist: pytest>=8.0; extra == "dev"
|
|
37
|
+
Requires-Dist: pytest-asyncio>=0.23; extra == "dev"
|
|
38
|
+
Requires-Dist: ruff>=0.5; extra == "dev"
|
|
39
|
+
Requires-Dist: fastapi>=0.115; extra == "dev"
|
|
40
|
+
Requires-Dist: uvicorn[standard]>=0.30; extra == "dev"
|
|
41
|
+
Requires-Dist: sse-starlette>=2.0; extra == "dev"
|
|
42
|
+
Provides-Extra: all
|
|
43
|
+
Requires-Dist: pi-agents-py[anthropic,bedrock,dev,google,mistral,openai,slack,tui]; extra == "all"
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=68.0", "wheel"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "pi-agents-py"
|
|
7
|
+
version = "0.58.3"
|
|
8
|
+
description = "Python clone of pi-mono — multi-provider LLM agent framework"
|
|
9
|
+
requires-python = ">=3.11"
|
|
10
|
+
license = "MIT"
|
|
11
|
+
authors = [{name = "Agentsable"}]
|
|
12
|
+
keywords = ["llm", "agents", "ai", "anthropic", "openai", "multi-provider"]
|
|
13
|
+
classifiers = [
|
|
14
|
+
"Development Status :: 4 - Beta",
|
|
15
|
+
"Intended Audience :: Developers",
|
|
16
|
+
"Programming Language :: Python :: 3",
|
|
17
|
+
"Programming Language :: Python :: 3.11",
|
|
18
|
+
"Programming Language :: Python :: 3.12",
|
|
19
|
+
"Programming Language :: Python :: 3.13",
|
|
20
|
+
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
|
21
|
+
]
|
|
22
|
+
dependencies = [
|
|
23
|
+
"httpx[http2]>=0.27",
|
|
24
|
+
"jsonschema>=4.20",
|
|
25
|
+
"pyyaml>=6.0",
|
|
26
|
+
]
|
|
27
|
+
|
|
28
|
+
[project.urls]
|
|
29
|
+
Homepage = "https://github.com/Agentsable/pi-agents-py"
|
|
30
|
+
Repository = "https://github.com/Agentsable/pi-agents-py"
|
|
31
|
+
|
|
32
|
+
[project.optional-dependencies]
|
|
33
|
+
anthropic = ["anthropic>=0.40"]
|
|
34
|
+
openai = ["openai>=1.50"]
|
|
35
|
+
google = ["google-genai>=1.0"]
|
|
36
|
+
mistral = ["mistralai>=1.0"]
|
|
37
|
+
bedrock = ["boto3>=1.34"]
|
|
38
|
+
slack = ["slack-sdk>=3.30"]
|
|
39
|
+
tui = ["rich>=13.0"]
|
|
40
|
+
dev = [
|
|
41
|
+
"pytest>=8.0",
|
|
42
|
+
"pytest-asyncio>=0.23",
|
|
43
|
+
"ruff>=0.5",
|
|
44
|
+
"fastapi>=0.115",
|
|
45
|
+
"uvicorn[standard]>=0.30",
|
|
46
|
+
"sse-starlette>=2.0",
|
|
47
|
+
]
|
|
48
|
+
all = [
|
|
49
|
+
"pi-agents-py[anthropic,openai,google,mistral,bedrock,slack,tui,dev]",
|
|
50
|
+
]
|
|
51
|
+
|
|
52
|
+
[tool.setuptools.packages.find]
|
|
53
|
+
where = ["src"]
|
|
54
|
+
|
|
55
|
+
[tool.setuptools.package-data]
|
|
56
|
+
"pi_agents.ai" = ["models_data.json"]
|
|
57
|
+
|
|
58
|
+
[tool.ruff]
|
|
59
|
+
line-length = 120
|
|
60
|
+
target-version = "py311"
|
|
61
|
+
|
|
62
|
+
[tool.ruff.format]
|
|
63
|
+
quote-style = "double"
|
|
64
|
+
|
|
65
|
+
[tool.pytest.ini_options]
|
|
66
|
+
asyncio_mode = "auto"
|
|
67
|
+
testpaths = ["tests"]
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""pi-agents-py — Python clone of pi-mono multi-provider LLM agent framework."""
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Agent runtime — stateful agent with tool execution and event streaming."""
|
|
@@ -0,0 +1,303 @@
|
|
|
1
|
+
"""Agent class — stateful agent with tool execution and event streaming."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
from typing import Any, Callable
|
|
7
|
+
|
|
8
|
+
from pi_agents.ai.types import (
|
|
9
|
+
AssistantMessage,
|
|
10
|
+
ImageContent,
|
|
11
|
+
Message,
|
|
12
|
+
Model,
|
|
13
|
+
TextContent,
|
|
14
|
+
UserMessage,
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
from pi_agents.agent.agent_loop import run_agent_loop, run_agent_loop_continue
|
|
18
|
+
from pi_agents.agent.types import (
|
|
19
|
+
AgentEvent,
|
|
20
|
+
AgentMessage,
|
|
21
|
+
AgentState,
|
|
22
|
+
AgentTool,
|
|
23
|
+
BeforeToolCallResult,
|
|
24
|
+
AfterToolCallResult,
|
|
25
|
+
ToolExecutionMode,
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class Agent:
|
|
30
|
+
"""Main agent class that orchestrates LLM interactions."""
|
|
31
|
+
|
|
32
|
+
def __init__(self, opts: dict[str, Any] | None = None):
|
|
33
|
+
opts = opts or {}
|
|
34
|
+
initial_state = opts.get("initial_state", opts.get("initialState", {}))
|
|
35
|
+
|
|
36
|
+
self._state = AgentState(
|
|
37
|
+
system_prompt=initial_state.get("systemPrompt", initial_state.get("system_prompt", "")),
|
|
38
|
+
model=initial_state.get("model"),
|
|
39
|
+
thinking_level=initial_state.get("thinkingLevel", initial_state.get("thinking_level", "off")),
|
|
40
|
+
tools=initial_state.get("tools", []),
|
|
41
|
+
messages=initial_state.get("messages", []),
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
self._convert_to_llm = opts.get("convert_to_llm", opts.get("convertToLlm", self._default_convert))
|
|
45
|
+
self._transform_context = opts.get("transform_context", opts.get("transformContext"))
|
|
46
|
+
self._steering_mode = opts.get("steering_mode", opts.get("steeringMode", "one-at-a-time"))
|
|
47
|
+
self._follow_up_mode = opts.get("follow_up_mode", opts.get("followUpMode", "one-at-a-time"))
|
|
48
|
+
self._stream_fn = opts.get("stream_fn", opts.get("streamFn"))
|
|
49
|
+
self._session_id = opts.get("session_id", opts.get("sessionId"))
|
|
50
|
+
self._get_api_key = opts.get("get_api_key", opts.get("getApiKey"))
|
|
51
|
+
self._thinking_budgets = opts.get("thinking_budgets", opts.get("thinkingBudgets"))
|
|
52
|
+
self._transport = opts.get("transport")
|
|
53
|
+
self._max_retry_delay_ms = opts.get("max_retry_delay_ms", opts.get("maxRetryDelayMs"))
|
|
54
|
+
self._tool_execution: ToolExecutionMode = opts.get("tool_execution", opts.get("toolExecution", "parallel"))
|
|
55
|
+
self._before_tool_call = opts.get("before_tool_call", opts.get("beforeToolCall"))
|
|
56
|
+
self._after_tool_call = opts.get("after_tool_call", opts.get("afterToolCall"))
|
|
57
|
+
|
|
58
|
+
self._subscribers: list[Callable[[AgentEvent], None]] = []
|
|
59
|
+
self._steering_queue: list[AgentMessage] = []
|
|
60
|
+
self._follow_up_queue: list[AgentMessage] = []
|
|
61
|
+
self._abort_controller: asyncio.Event | None = None
|
|
62
|
+
self._idle_event = asyncio.Event()
|
|
63
|
+
self._idle_event.set()
|
|
64
|
+
|
|
65
|
+
@staticmethod
|
|
66
|
+
def _default_convert(messages: list[AgentMessage]) -> list[Message]:
|
|
67
|
+
"""Default message converter — filters to user/assistant/toolResult roles."""
|
|
68
|
+
result = []
|
|
69
|
+
for msg in messages:
|
|
70
|
+
if hasattr(msg, "role"):
|
|
71
|
+
if msg.role in ("user", "assistant", "toolResult"):
|
|
72
|
+
result.append(msg)
|
|
73
|
+
elif isinstance(msg, dict) and msg.get("role") in ("user", "assistant", "toolResult"):
|
|
74
|
+
result.append(msg)
|
|
75
|
+
return result
|
|
76
|
+
|
|
77
|
+
@property
|
|
78
|
+
def state(self) -> AgentState:
|
|
79
|
+
return self._state
|
|
80
|
+
|
|
81
|
+
@property
|
|
82
|
+
def session_id(self) -> str | None:
|
|
83
|
+
return self._session_id
|
|
84
|
+
|
|
85
|
+
@session_id.setter
|
|
86
|
+
def session_id(self, value: str | None):
|
|
87
|
+
self._session_id = value
|
|
88
|
+
|
|
89
|
+
@property
|
|
90
|
+
def tool_execution(self) -> ToolExecutionMode:
|
|
91
|
+
return self._tool_execution
|
|
92
|
+
|
|
93
|
+
async def prompt(
|
|
94
|
+
self,
|
|
95
|
+
message: str | AgentMessage | list[AgentMessage],
|
|
96
|
+
images: list[ImageContent] | None = None,
|
|
97
|
+
) -> None:
|
|
98
|
+
"""Send a prompt and run the agent loop until completion."""
|
|
99
|
+
# Build prompt messages
|
|
100
|
+
prompts: list[AgentMessage] = []
|
|
101
|
+
if isinstance(message, str):
|
|
102
|
+
content: Any = message
|
|
103
|
+
if images:
|
|
104
|
+
content = [TextContent(type="text", text=message)] + list(images)
|
|
105
|
+
prompts.append(UserMessage(role="user", content=content, timestamp=0))
|
|
106
|
+
elif isinstance(message, list):
|
|
107
|
+
prompts = message
|
|
108
|
+
else:
|
|
109
|
+
prompts = [message]
|
|
110
|
+
|
|
111
|
+
self._state.is_streaming = True
|
|
112
|
+
self._state.error = None
|
|
113
|
+
self._idle_event.clear()
|
|
114
|
+
|
|
115
|
+
try:
|
|
116
|
+
context = {
|
|
117
|
+
"system_prompt": self._state.system_prompt,
|
|
118
|
+
"messages": list(self._state.messages),
|
|
119
|
+
"tools": self._state.tools,
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
config = self._build_config()
|
|
123
|
+
|
|
124
|
+
result = await run_agent_loop(
|
|
125
|
+
prompts=prompts,
|
|
126
|
+
context=context,
|
|
127
|
+
config=config,
|
|
128
|
+
emit=self._emit,
|
|
129
|
+
stream_fn=self._stream_fn,
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
self._state.messages = list(context.get("messages", [])) + [m for m in result if m not in context.get("messages", [])]
|
|
133
|
+
# Simplify: just use result messages
|
|
134
|
+
if result:
|
|
135
|
+
self._state.messages = result
|
|
136
|
+
|
|
137
|
+
except Exception as e:
|
|
138
|
+
self._state.error = str(e)
|
|
139
|
+
finally:
|
|
140
|
+
self._state.is_streaming = False
|
|
141
|
+
self._state.stream_message = None
|
|
142
|
+
self._state.pending_tool_calls.clear()
|
|
143
|
+
self._idle_event.set()
|
|
144
|
+
|
|
145
|
+
async def continue_(self) -> None:
|
|
146
|
+
"""Resume from current context."""
|
|
147
|
+
self._state.is_streaming = True
|
|
148
|
+
self._state.error = None
|
|
149
|
+
self._idle_event.clear()
|
|
150
|
+
|
|
151
|
+
try:
|
|
152
|
+
context = {
|
|
153
|
+
"system_prompt": self._state.system_prompt,
|
|
154
|
+
"messages": list(self._state.messages),
|
|
155
|
+
"tools": self._state.tools,
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
config = self._build_config()
|
|
159
|
+
|
|
160
|
+
result = await run_agent_loop_continue(
|
|
161
|
+
context=context,
|
|
162
|
+
config=config,
|
|
163
|
+
emit=self._emit,
|
|
164
|
+
stream_fn=self._stream_fn,
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
if result:
|
|
168
|
+
self._state.messages.extend(result)
|
|
169
|
+
|
|
170
|
+
except Exception as e:
|
|
171
|
+
self._state.error = str(e)
|
|
172
|
+
finally:
|
|
173
|
+
self._state.is_streaming = False
|
|
174
|
+
self._state.stream_message = None
|
|
175
|
+
self._state.pending_tool_calls.clear()
|
|
176
|
+
self._idle_event.set()
|
|
177
|
+
|
|
178
|
+
def abort(self) -> None:
|
|
179
|
+
"""Abort the current agent loop."""
|
|
180
|
+
if self._abort_controller:
|
|
181
|
+
self._abort_controller.set()
|
|
182
|
+
|
|
183
|
+
async def wait_for_idle(self) -> None:
|
|
184
|
+
"""Wait until agent is not streaming."""
|
|
185
|
+
await self._idle_event.wait()
|
|
186
|
+
|
|
187
|
+
def reset(self) -> None:
|
|
188
|
+
"""Reset agent state."""
|
|
189
|
+
self._state.messages.clear()
|
|
190
|
+
self._state.is_streaming = False
|
|
191
|
+
self._state.stream_message = None
|
|
192
|
+
self._state.pending_tool_calls.clear()
|
|
193
|
+
self._state.error = None
|
|
194
|
+
self._steering_queue.clear()
|
|
195
|
+
self._follow_up_queue.clear()
|
|
196
|
+
|
|
197
|
+
def subscribe(self, fn: Callable[[AgentEvent], None]) -> Callable[[], None]:
|
|
198
|
+
"""Subscribe to events. Returns unsubscribe function."""
|
|
199
|
+
self._subscribers.append(fn)
|
|
200
|
+
return lambda: self._subscribers.remove(fn) if fn in self._subscribers else None
|
|
201
|
+
|
|
202
|
+
# State setters
|
|
203
|
+
def set_system_prompt(self, v: str) -> None:
|
|
204
|
+
self._state.system_prompt = v
|
|
205
|
+
|
|
206
|
+
def set_model(self, m: Model) -> None:
|
|
207
|
+
self._state.model = m
|
|
208
|
+
|
|
209
|
+
def set_thinking_level(self, level: str) -> None:
|
|
210
|
+
self._state.thinking_level = level
|
|
211
|
+
|
|
212
|
+
def set_tools(self, tools: list[AgentTool]) -> None:
|
|
213
|
+
self._state.tools = tools
|
|
214
|
+
|
|
215
|
+
def set_tool_execution(self, mode: ToolExecutionMode) -> None:
|
|
216
|
+
self._tool_execution = mode
|
|
217
|
+
|
|
218
|
+
def set_steering_mode(self, mode: str) -> None:
|
|
219
|
+
self._steering_mode = mode
|
|
220
|
+
|
|
221
|
+
def set_follow_up_mode(self, mode: str) -> None:
|
|
222
|
+
self._follow_up_mode = mode
|
|
223
|
+
|
|
224
|
+
def get_steering_mode(self) -> str:
|
|
225
|
+
return self._steering_mode
|
|
226
|
+
|
|
227
|
+
def get_follow_up_mode(self) -> str:
|
|
228
|
+
return self._follow_up_mode
|
|
229
|
+
|
|
230
|
+
# Message management
|
|
231
|
+
def replace_messages(self, messages: list[AgentMessage]) -> None:
|
|
232
|
+
self._state.messages = list(messages)
|
|
233
|
+
|
|
234
|
+
def append_message(self, message: AgentMessage) -> None:
|
|
235
|
+
self._state.messages.append(message)
|
|
236
|
+
|
|
237
|
+
def clear_messages(self) -> None:
|
|
238
|
+
self._state.messages.clear()
|
|
239
|
+
|
|
240
|
+
# Queue management
|
|
241
|
+
def steer(self, message: AgentMessage) -> None:
|
|
242
|
+
self._steering_queue.append(message)
|
|
243
|
+
|
|
244
|
+
def follow_up(self, message: AgentMessage) -> None:
|
|
245
|
+
self._follow_up_queue.append(message)
|
|
246
|
+
|
|
247
|
+
def clear_steering_queue(self) -> None:
|
|
248
|
+
self._steering_queue.clear()
|
|
249
|
+
|
|
250
|
+
def clear_follow_up_queue(self) -> None:
|
|
251
|
+
self._follow_up_queue.clear()
|
|
252
|
+
|
|
253
|
+
def clear_all_queues(self) -> None:
|
|
254
|
+
self._steering_queue.clear()
|
|
255
|
+
self._follow_up_queue.clear()
|
|
256
|
+
|
|
257
|
+
def has_queued_messages(self) -> bool:
|
|
258
|
+
return len(self._steering_queue) > 0 or len(self._follow_up_queue) > 0
|
|
259
|
+
|
|
260
|
+
def _build_config(self) -> dict[str, Any]:
|
|
261
|
+
"""Build agent loop config."""
|
|
262
|
+
return {
|
|
263
|
+
"model": self._state.model,
|
|
264
|
+
"convert_to_llm": self._convert_to_llm,
|
|
265
|
+
"transform_context": self._transform_context,
|
|
266
|
+
"get_api_key": self._get_api_key,
|
|
267
|
+
"get_steering_messages": self._get_steering_messages,
|
|
268
|
+
"get_follow_up_messages": self._get_follow_up_messages,
|
|
269
|
+
"tool_execution": self._tool_execution,
|
|
270
|
+
"before_tool_call": self._before_tool_call,
|
|
271
|
+
"after_tool_call": self._after_tool_call,
|
|
272
|
+
"thinking_level": self._state.thinking_level,
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
async def _get_steering_messages(self) -> list[AgentMessage]:
|
|
276
|
+
"""Get steering messages from queue."""
|
|
277
|
+
if not self._steering_queue:
|
|
278
|
+
return []
|
|
279
|
+
if self._steering_mode == "all":
|
|
280
|
+
msgs = list(self._steering_queue)
|
|
281
|
+
self._steering_queue.clear()
|
|
282
|
+
return msgs
|
|
283
|
+
else:
|
|
284
|
+
return [self._steering_queue.pop(0)]
|
|
285
|
+
|
|
286
|
+
async def _get_follow_up_messages(self) -> list[AgentMessage]:
|
|
287
|
+
"""Get follow-up messages from queue."""
|
|
288
|
+
if not self._follow_up_queue:
|
|
289
|
+
return []
|
|
290
|
+
if self._follow_up_mode == "all":
|
|
291
|
+
msgs = list(self._follow_up_queue)
|
|
292
|
+
self._follow_up_queue.clear()
|
|
293
|
+
return msgs
|
|
294
|
+
else:
|
|
295
|
+
return [self._follow_up_queue.pop(0)]
|
|
296
|
+
|
|
297
|
+
def _emit(self, event: AgentEvent) -> None:
|
|
298
|
+
"""Emit an event to all subscribers."""
|
|
299
|
+
for sub in self._subscribers:
|
|
300
|
+
try:
|
|
301
|
+
sub(event)
|
|
302
|
+
except Exception:
|
|
303
|
+
pass
|