supyagent 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of supyagent might be problematic. Click here for more details.

@@ -0,0 +1,21 @@
1
+ """Core module for supyagent."""
2
+
3
+ from supyagent.core.agent import Agent
4
+ from supyagent.core.context import DelegationContext
5
+ from supyagent.core.credentials import CredentialManager
6
+ from supyagent.core.delegation import DelegationManager
7
+ from supyagent.core.executor import ExecutionRunner
8
+ from supyagent.core.llm import LLMClient
9
+ from supyagent.core.registry import AgentRegistry
10
+ from supyagent.core.session_manager import SessionManager
11
+
12
+ __all__ = [
13
+ "Agent",
14
+ "AgentRegistry",
15
+ "CredentialManager",
16
+ "DelegationContext",
17
+ "DelegationManager",
18
+ "ExecutionRunner",
19
+ "LLMClient",
20
+ "SessionManager",
21
+ ]
@@ -0,0 +1,379 @@
1
+ """
2
+ Core Agent class.
3
+
4
+ The Agent handles the conversation loop, tool execution, and message management.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import json
10
+ from typing import TYPE_CHECKING, Any
11
+
12
+ from supyagent.core.credentials import CredentialManager
13
+ from supyagent.core.llm import LLMClient
14
+ from supyagent.core.session_manager import SessionManager
15
+ from supyagent.core.tools import (
16
+ REQUEST_CREDENTIAL_TOOL,
17
+ discover_tools,
18
+ execute_tool,
19
+ filter_tools,
20
+ is_credential_request,
21
+ supypowers_to_openai_tools,
22
+ )
23
+ from supyagent.models.agent_config import AgentConfig
24
+ from supyagent.models.session import Message, Session
25
+
26
+ if TYPE_CHECKING:
27
+ from supyagent.core.delegation import DelegationManager
28
+ from supyagent.core.registry import AgentRegistry
29
+
30
+
31
+ class Agent:
32
+ """
33
+ An LLM agent that can use supypowers tools.
34
+
35
+ The agent maintains a conversation history and handles the loop of:
36
+ 1. Send user message to LLM
37
+ 2. If LLM wants to call tools, execute them
38
+ 3. Feed tool results back to LLM
39
+ 4. Repeat until LLM gives final response
40
+
41
+ Agents can also delegate tasks to other agents if configured with delegates.
42
+ """
43
+
44
+ def __init__(
45
+ self,
46
+ config: AgentConfig,
47
+ session: Session | None = None,
48
+ session_manager: SessionManager | None = None,
49
+ credential_manager: CredentialManager | None = None,
50
+ registry: "AgentRegistry | None" = None,
51
+ parent_instance_id: str | None = None,
52
+ ):
53
+ """
54
+ Initialize an agent from configuration.
55
+
56
+ Args:
57
+ config: Agent configuration
58
+ session: Optional existing session to resume
59
+ session_manager: Optional session manager (creates one if not provided)
60
+ credential_manager: Optional credential manager (creates one if not provided)
61
+ registry: Optional agent registry for multi-agent support
62
+ parent_instance_id: Instance ID of parent agent (if this is a sub-agent)
63
+ """
64
+ self.config = config
65
+
66
+ # Initialize LLM client
67
+ self.llm = LLMClient(
68
+ model=config.model.provider,
69
+ temperature=config.model.temperature,
70
+ max_tokens=config.model.max_tokens,
71
+ )
72
+
73
+ # Initialize session management
74
+ self.session_manager = session_manager or SessionManager()
75
+
76
+ # Initialize credential management
77
+ self.credential_manager = credential_manager or CredentialManager()
78
+
79
+ # Initialize multi-agent support
80
+ self.instance_id: str | None = None
81
+ self.delegation_mgr: "DelegationManager | None" = None
82
+
83
+ if config.delegates:
84
+ # Lazy import to avoid circular dependency
85
+ from supyagent.core.delegation import DelegationManager
86
+ from supyagent.core.registry import AgentRegistry
87
+
88
+ self.registry = registry or AgentRegistry()
89
+ self.delegation_mgr = DelegationManager(
90
+ self.registry,
91
+ self,
92
+ grandparent_instance_id=parent_instance_id,
93
+ )
94
+ self.instance_id = self.delegation_mgr.parent_id
95
+ else:
96
+ self.registry = registry
97
+
98
+ # Load available tools (including delegation tools)
99
+ self.tools = self._load_tools()
100
+
101
+ # Initialize session
102
+ if session:
103
+ self.session = session
104
+ self.messages = self._reconstruct_messages(session)
105
+ else:
106
+ self.session = self.session_manager.create_session(
107
+ config.name, config.model.provider
108
+ )
109
+ self.messages: list[dict[str, Any]] = [
110
+ {"role": "system", "content": config.system_prompt}
111
+ ]
112
+
113
+ def _load_tools(self) -> list[dict[str, Any]]:
114
+ """
115
+ Discover and filter available tools.
116
+
117
+ Returns:
118
+ List of tools in OpenAI function calling format
119
+ """
120
+ tools: list[dict[str, Any]] = []
121
+
122
+ # Discover tools from supypowers
123
+ sp_tools = discover_tools()
124
+
125
+ # Convert to OpenAI format
126
+ openai_tools = supypowers_to_openai_tools(sp_tools)
127
+
128
+ # Filter by permissions
129
+ filtered = filter_tools(openai_tools, self.config.tools)
130
+ tools.extend(filtered)
131
+
132
+ # Always add the credential request tool
133
+ tools.append(REQUEST_CREDENTIAL_TOOL)
134
+
135
+ # Add delegation tools if this agent can delegate
136
+ if self.delegation_mgr:
137
+ tools.extend(self.delegation_mgr.get_delegation_tools())
138
+
139
+ return tools
140
+
141
+ def _reconstruct_messages(self, session: Session) -> list[dict[str, Any]]:
142
+ """
143
+ Reconstruct LLM message format from session history.
144
+
145
+ Args:
146
+ session: Session with message history
147
+
148
+ Returns:
149
+ List of messages in OpenAI format
150
+ """
151
+ messages: list[dict[str, Any]] = [
152
+ {"role": "system", "content": self.config.system_prompt}
153
+ ]
154
+
155
+ for msg in session.messages:
156
+ if msg.type == "user":
157
+ messages.append({"role": "user", "content": msg.content})
158
+ elif msg.type == "assistant":
159
+ m: dict[str, Any] = {"role": "assistant", "content": msg.content}
160
+ if msg.tool_calls:
161
+ m["tool_calls"] = msg.tool_calls
162
+ messages.append(m)
163
+ elif msg.type == "tool_result":
164
+ messages.append({
165
+ "role": "tool",
166
+ "tool_call_id": msg.tool_call_id,
167
+ "content": msg.content,
168
+ })
169
+
170
+ return messages
171
+
172
+ def send_message(self, content: str) -> str:
173
+ """
174
+ Send a user message and get the agent's response.
175
+
176
+ This handles the full tool-use loop:
177
+ 1. Add user message
178
+ 2. Get LLM response
179
+ 3. If tools requested, execute them and continue
180
+ 4. Return final text response
181
+
182
+ Args:
183
+ content: User's message
184
+
185
+ Returns:
186
+ Agent's final text response
187
+ """
188
+ # Record user message
189
+ user_msg = Message(type="user", content=content)
190
+ self.session_manager.append_message(self.session, user_msg)
191
+ self.messages.append({"role": "user", "content": content})
192
+
193
+ # Maximum iterations to prevent infinite loops
194
+ max_iterations = self.config.limits.get("max_tool_calls_per_turn", 20)
195
+ iterations = 0
196
+
197
+ while iterations < max_iterations:
198
+ iterations += 1
199
+
200
+ # Call LLM
201
+ response = self.llm.chat(
202
+ messages=self.messages,
203
+ tools=self.tools if self.tools else None,
204
+ )
205
+
206
+ assistant_message = response.choices[0].message
207
+
208
+ # Build message dict for history
209
+ msg_dict: dict[str, Any] = {
210
+ "role": "assistant",
211
+ "content": assistant_message.content,
212
+ }
213
+
214
+ # Build tool_calls list for session storage
215
+ tool_calls_for_session: list[dict[str, Any]] = []
216
+
217
+ if assistant_message.tool_calls:
218
+ msg_dict["tool_calls"] = [
219
+ {
220
+ "id": tc.id,
221
+ "type": "function",
222
+ "function": {
223
+ "name": tc.function.name,
224
+ "arguments": tc.function.arguments,
225
+ },
226
+ }
227
+ for tc in assistant_message.tool_calls
228
+ ]
229
+ tool_calls_for_session = msg_dict["tool_calls"]
230
+
231
+ # Record assistant message to session
232
+ asst_record = Message(
233
+ type="assistant",
234
+ content=assistant_message.content,
235
+ tool_calls=tool_calls_for_session if tool_calls_for_session else None,
236
+ )
237
+ self.session_manager.append_message(self.session, asst_record)
238
+
239
+ # Add to LLM message history
240
+ self.messages.append(msg_dict)
241
+
242
+ # If no tool calls, we're done
243
+ if not assistant_message.tool_calls:
244
+ return assistant_message.content or ""
245
+
246
+ # Execute each tool call
247
+ for tool_call in assistant_message.tool_calls:
248
+ # Handle credential requests specially
249
+ if is_credential_request(tool_call):
250
+ result = self._handle_credential_request(tool_call)
251
+ else:
252
+ result = self._execute_tool_call(tool_call)
253
+
254
+ # Record tool result to session
255
+ tool_msg = Message(
256
+ type="tool_result",
257
+ tool_call_id=tool_call.id,
258
+ name=tool_call.function.name,
259
+ content=json.dumps(result),
260
+ )
261
+ self.session_manager.append_message(self.session, tool_msg)
262
+
263
+ # Add to LLM message history
264
+ self.messages.append({
265
+ "role": "tool",
266
+ "tool_call_id": tool_call.id,
267
+ "content": json.dumps(result),
268
+ })
269
+
270
+ # If we hit max iterations, return what we have
271
+ return self.messages[-1].get("content", "") or ""
272
+
273
+ def _handle_credential_request(self, tool_call: Any) -> dict[str, Any]:
274
+ """
275
+ Handle a credential request from the LLM.
276
+
277
+ Args:
278
+ tool_call: The tool call requesting credentials
279
+
280
+ Returns:
281
+ Result dict indicating success or failure
282
+ """
283
+ try:
284
+ args = json.loads(tool_call.function.arguments)
285
+ except json.JSONDecodeError:
286
+ return {"ok": False, "error": "Invalid credential request arguments"}
287
+
288
+ name = args.get("name", "")
289
+ description = args.get("description", "")
290
+ service = args.get("service")
291
+
292
+ if not name:
293
+ return {"ok": False, "error": "Credential name is required"}
294
+
295
+ # Check if we already have it
296
+ existing = self.credential_manager.get(self.config.name, name)
297
+ if existing:
298
+ return {
299
+ "ok": True,
300
+ "message": f"Credential {name} is already available",
301
+ }
302
+
303
+ # Prompt user for the credential
304
+ result = self.credential_manager.prompt_for_credential(
305
+ name=name,
306
+ description=description,
307
+ service=service,
308
+ )
309
+
310
+ if result is None:
311
+ return {
312
+ "ok": False,
313
+ "error": f"User declined to provide credential {name}",
314
+ }
315
+
316
+ value, persist = result
317
+ self.credential_manager.set(self.config.name, name, value, persist=persist)
318
+
319
+ return {
320
+ "ok": True,
321
+ "message": f"Credential {name} has been provided and is now available",
322
+ }
323
+
324
+ def _execute_tool_call(self, tool_call: Any) -> dict[str, Any]:
325
+ """
326
+ Execute a single tool call.
327
+
328
+ Args:
329
+ tool_call: Tool call from LLM response
330
+
331
+ Returns:
332
+ Result dict from tool execution
333
+ """
334
+ name = tool_call.function.name
335
+
336
+ # Check if this is a delegation tool
337
+ if self.delegation_mgr and self.delegation_mgr.is_delegation_tool(name):
338
+ return self.delegation_mgr.execute_delegation(tool_call)
339
+
340
+ # Parse arguments
341
+ arguments_str = tool_call.function.arguments
342
+ try:
343
+ args = json.loads(arguments_str)
344
+ except json.JSONDecodeError:
345
+ return {"ok": False, "error": f"Invalid JSON arguments: {arguments_str}"}
346
+
347
+ # Parse script__function format
348
+ if "__" not in name:
349
+ return {"ok": False, "error": f"Invalid tool name format: {name}"}
350
+
351
+ script, func = name.split("__", 1)
352
+
353
+ # Get credentials for tool execution
354
+ secrets = self.credential_manager.get_all_for_tools(self.config.name)
355
+
356
+ # Execute via supypowers
357
+ return execute_tool(script, func, args, secrets=secrets)
358
+
359
+ def get_available_tools(self) -> list[str]:
360
+ """
361
+ Get list of available tool names.
362
+
363
+ Returns:
364
+ List of tool names
365
+ """
366
+ return [
367
+ tool.get("function", {}).get("name", "unknown")
368
+ for tool in self.tools
369
+ ]
370
+
371
+ def clear_history(self) -> None:
372
+ """Clear conversation history and start a new session."""
373
+ # Create a new session
374
+ self.session = self.session_manager.create_session(
375
+ self.config.name, self.config.model.provider
376
+ )
377
+ self.messages = [
378
+ {"role": "system", "content": self.config.system_prompt}
379
+ ]
@@ -0,0 +1,158 @@
1
+ """
2
+ Context passing for multi-agent delegation.
3
+
4
+ Enables rich context to be passed from parent to child agents,
5
+ including conversation summaries and relevant facts.
6
+ """
7
+
8
+ from dataclasses import dataclass, field
9
+ from typing import Any
10
+
11
+ from supyagent.core.llm import LLMClient
12
+
13
+
14
+ @dataclass
15
+ class DelegationContext:
16
+ """
17
+ Context passed from parent to child agent.
18
+
19
+ Attributes:
20
+ parent_agent: Name of the parent agent
21
+ parent_task: The task the parent is working on
22
+ conversation_summary: Summary of relevant conversation history
23
+ relevant_facts: Key facts relevant to the delegation
24
+ shared_data: Arbitrary data to pass to the child
25
+ """
26
+
27
+ parent_agent: str
28
+ parent_task: str
29
+ conversation_summary: str | None = None
30
+ relevant_facts: list[str] = field(default_factory=list)
31
+ shared_data: dict[str, Any] = field(default_factory=dict)
32
+
33
+ def to_prompt(self) -> str:
34
+ """
35
+ Convert context to a prompt prefix for the child agent.
36
+
37
+ Returns:
38
+ A formatted string to prepend to the child's task
39
+ """
40
+ parts = [
41
+ f"You are being called by the '{self.parent_agent}' agent.",
42
+ f"Parent's current task: {self.parent_task}",
43
+ ]
44
+
45
+ if self.conversation_summary:
46
+ parts.append(f"\nConversation context:\n{self.conversation_summary}")
47
+
48
+ if self.relevant_facts:
49
+ parts.append("\nRelevant information:")
50
+ for fact in self.relevant_facts:
51
+ parts.append(f"- {fact}")
52
+
53
+ if self.shared_data:
54
+ parts.append("\nShared data:")
55
+ for key, value in self.shared_data.items():
56
+ parts.append(f"- {key}: {value}")
57
+
58
+ return "\n".join(parts)
59
+
60
+
61
+ def summarize_conversation(
62
+ messages: list[dict[str, Any]],
63
+ llm: LLMClient,
64
+ max_messages: int = 10,
65
+ ) -> str | None:
66
+ """
67
+ Use LLM to create a summary of the conversation for context passing.
68
+
69
+ Args:
70
+ messages: List of conversation messages
71
+ llm: LLM client for generating summary
72
+ max_messages: Maximum number of recent messages to include
73
+
74
+ Returns:
75
+ A concise summary of the conversation, or None if no messages
76
+ """
77
+ # Extract just user and assistant messages
78
+ conversation = []
79
+ for msg in messages[-max_messages:]:
80
+ role = msg.get("role")
81
+ if role in ("user", "assistant"):
82
+ role_label = "User" if role == "user" else "Assistant"
83
+ content = msg.get("content", "")
84
+ if content:
85
+ # Truncate long messages
86
+ if len(content) > 300:
87
+ content = content[:300] + "..."
88
+ conversation.append(f"{role_label}: {content}")
89
+
90
+ if not conversation:
91
+ return None
92
+
93
+ summary_prompt = f"""Summarize this conversation in 2-3 sentences, focusing on the main task and key decisions:
94
+
95
+ {chr(10).join(conversation)}
96
+
97
+ Summary:"""
98
+
99
+ try:
100
+ response = llm.chat([{"role": "user", "content": summary_prompt}])
101
+ return response.choices[0].message.content
102
+ except Exception:
103
+ # If summarization fails, return None
104
+ return None
105
+
106
+
107
+ def extract_relevant_facts(
108
+ messages: list[dict[str, Any]],
109
+ task: str,
110
+ llm: LLMClient,
111
+ ) -> list[str]:
112
+ """
113
+ Extract facts from the conversation relevant to a specific task.
114
+
115
+ Args:
116
+ messages: Conversation messages
117
+ task: The task to extract relevant facts for
118
+ llm: LLM client
119
+
120
+ Returns:
121
+ List of relevant facts
122
+ """
123
+ # Extract content from recent messages
124
+ content_parts = []
125
+ for msg in messages[-5:]:
126
+ if msg.get("content"):
127
+ content_parts.append(msg["content"])
128
+
129
+ if not content_parts:
130
+ return []
131
+
132
+ context = "\n---\n".join(content_parts)
133
+
134
+ prompt = f"""Given this conversation context and the upcoming task, list 2-4 key facts that would be relevant.
135
+
136
+ Conversation context:
137
+ {context}
138
+
139
+ Upcoming task: {task}
140
+
141
+ List the relevant facts (one per line, start each with "- "):"""
142
+
143
+ try:
144
+ response = llm.chat([{"role": "user", "content": prompt}])
145
+ content = response.choices[0].message.content or ""
146
+
147
+ # Parse bullet points
148
+ facts = []
149
+ for line in content.split("\n"):
150
+ line = line.strip()
151
+ if line.startswith("- "):
152
+ facts.append(line[2:])
153
+ elif line.startswith("• "):
154
+ facts.append(line[2:])
155
+
156
+ return facts[:4] # Limit to 4 facts
157
+ except Exception:
158
+ return []