agnt5 0.1.3__cp39-abi3-manylinux_2_34_aarch64.whl → 0.2.0__cp39-abi3-manylinux_2_34_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of agnt5 might be problematic. Click here for more details.

agnt5/__init__.py CHANGED
@@ -2,22 +2,59 @@
2
2
  AGNT5 Python SDK - Build durable, resilient agent-first applications.
3
3
 
4
4
  This SDK provides high-level components for building agents, tools, and workflows
5
- with built-in durability guarantees and state management, backed by a high-performance
6
- Rust core.
5
+ with built-in durability guarantees and state management.
7
6
  """
8
7
 
8
+ from ._compat import _import_error, _rust_available
9
+ from .agent import Agent, AgentResult
10
+ from .context import Context
11
+ from .entity import EntityInstance, EntityType, entity
12
+ from .exceptions import (
13
+ AGNT5Error,
14
+ CheckpointError,
15
+ ConfigurationError,
16
+ ExecutionError,
17
+ RetryError,
18
+ StateError,
19
+ )
20
+ from .function import FunctionRegistry, function
21
+ from .tool import Tool, ToolRegistry, tool
22
+ from .types import BackoffPolicy, BackoffType, FunctionConfig, RetryPolicy, WorkflowConfig
9
23
  from .version import _get_version
10
- from ._compat import _rust_available, _import_error
11
- from .decorators import function
12
24
  from .worker import Worker
13
- from .logging import install_opentelemetry_logging, remove_opentelemetry_logging
25
+ from .workflow import WorkflowRegistry, workflow
14
26
 
15
27
  __version__ = _get_version()
16
28
 
17
29
  __all__ = [
18
- 'function',
19
- 'Worker',
20
- 'install_opentelemetry_logging',
21
- 'remove_opentelemetry_logging',
22
- '__version__',
30
+ # Version
31
+ "__version__",
32
+ # Core components
33
+ "Context",
34
+ "function",
35
+ "FunctionRegistry",
36
+ "entity",
37
+ "EntityType",
38
+ "EntityInstance",
39
+ "workflow",
40
+ "WorkflowRegistry",
41
+ "tool",
42
+ "Tool",
43
+ "ToolRegistry",
44
+ "Agent",
45
+ "AgentResult",
46
+ "Worker",
47
+ # Types
48
+ "RetryPolicy",
49
+ "BackoffPolicy",
50
+ "BackoffType",
51
+ "FunctionConfig",
52
+ "WorkflowConfig",
53
+ # Exceptions
54
+ "AGNT5Error",
55
+ "ConfigurationError",
56
+ "ExecutionError",
57
+ "RetryError",
58
+ "StateError",
59
+ "CheckpointError",
23
60
  ]
agnt5/_compat.py CHANGED
@@ -8,8 +8,9 @@ for cross-referencing throughout the project.
8
8
  # Check if Rust core is available
9
9
  try:
10
10
  from . import _core
11
+
11
12
  _rust_available = True
12
13
  _import_error = None
13
14
  except ImportError as e:
14
15
  _rust_available = False
15
- _import_error = e
16
+ _import_error = e
agnt5/_core.abi3.so CHANGED
Binary file
agnt5/agent.py ADDED
@@ -0,0 +1,303 @@
1
+ """Agent component implementation for AGNT5 SDK.
2
+
3
+ Phase 1: Simple agent with external LLM integration and tool orchestration.
4
+ Phase 2: Platform-backed agents with durable execution and multi-agent coordination.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import json
10
+ import logging
11
+ from typing import Any, Dict, List, Optional
12
+
13
+ from .context import Context
14
+ from .lm import GenerateRequest, GenerateResponse, LanguageModel, Message, ToolDefinition
15
+ from .tool import Tool, ToolRegistry
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ class AgentResult:
21
+ """Result from agent execution."""
22
+
23
+ def __init__(self, output: str, tool_calls: List[Dict[str, Any]], context: Context):
24
+ self.output = output
25
+ self.tool_calls = tool_calls
26
+ self.context = context
27
+
28
+
29
+ class Agent:
30
+ """Autonomous LLM-driven agent with tool orchestration.
31
+
32
+ Phase 1: Simple agent with:
33
+ - LLM integration (OpenAI, Anthropic, etc.)
34
+ - Tool selection and execution
35
+ - Multi-turn reasoning
36
+ - Context and state management
37
+
38
+ Phase 2 will add:
39
+ - Durable execution with checkpointing
40
+ - Multi-agent coordination
41
+ - Platform-backed tool execution
42
+ - Streaming responses
43
+
44
+ Example:
45
+ ```python
46
+ from agnt5 import Agent, tool
47
+ from agnt5.lm import OpenAILanguageModel
48
+
49
+ @tool(auto_schema=True)
50
+ async def search_web(ctx: Context, query: str) -> List[Dict]:
51
+ # Search implementation
52
+ return [{"title": "Result", "url": "..."}]
53
+
54
+ lm = OpenAILanguageModel()
55
+ agent = Agent(
56
+ name="researcher",
57
+ model=lm,
58
+ instructions="You are a research assistant.",
59
+ tools=[search_web]
60
+ )
61
+
62
+ result = await agent.run("What are the latest AI trends?")
63
+ print(result.output)
64
+ ```
65
+ """
66
+
67
+ def __init__(
68
+ self,
69
+ name: str,
70
+ model: LanguageModel,
71
+ instructions: str,
72
+ tools: Optional[List[Any]] = None,
73
+ model_name: str = "gpt-4o-mini",
74
+ temperature: float = 0.7,
75
+ max_iterations: int = 10,
76
+ ):
77
+ """Initialize agent.
78
+
79
+ Args:
80
+ name: Agent name/identifier
81
+ model: Language model instance
82
+ instructions: System instructions for the agent
83
+ tools: List of tools available to the agent (functions with @tool decorator)
84
+ model_name: Model name to use (e.g., "gpt-4", "claude-3-opus")
85
+ temperature: LLM temperature (0.0 to 1.0)
86
+ max_iterations: Maximum reasoning iterations
87
+ """
88
+ self.name = name
89
+ self.model = model
90
+ self.instructions = instructions
91
+ self.model_name = model_name
92
+ self.temperature = temperature
93
+ self.max_iterations = max_iterations
94
+
95
+ # Build tool registry
96
+ self.tools: Dict[str, Tool] = {}
97
+ if tools:
98
+ for tool_item in tools:
99
+ # Check if it's a Tool instance
100
+ if isinstance(tool_item, Tool):
101
+ self.tools[tool_item.name] = tool_item
102
+ # Check if it's a decorated function with config
103
+ elif hasattr(tool_item, "_agnt5_config"):
104
+ # Try to get from ToolRegistry first
105
+ tool_config = tool_item._agnt5_config
106
+ tool_instance = ToolRegistry.get(tool_config.name)
107
+ if tool_instance:
108
+ self.tools[tool_instance.name] = tool_instance
109
+ # Otherwise try to look up by function name
110
+ elif callable(tool_item):
111
+ # Try to find in registry by function name
112
+ tool_name = tool_item.__name__
113
+ tool_instance = ToolRegistry.get(tool_name)
114
+ if tool_instance:
115
+ self.tools[tool_instance.name] = tool_instance
116
+
117
+ self.logger = logging.getLogger(f"agnt5.agent.{name}")
118
+
119
+ async def run(
120
+ self,
121
+ user_message: str,
122
+ context: Optional[Context] = None,
123
+ ) -> AgentResult:
124
+ """Run agent to completion.
125
+
126
+ Args:
127
+ user_message: User's input message
128
+ context: Optional context (auto-created if not provided)
129
+
130
+ Returns:
131
+ AgentResult with output and execution details
132
+
133
+ Example:
134
+ ```python
135
+ result = await agent.run("Analyze recent tech news")
136
+ print(result.output)
137
+ ```
138
+ """
139
+ # Create context if not provided
140
+ if context is None:
141
+ import uuid
142
+
143
+ context = Context(
144
+ run_id=f"agent-{self.name}-{uuid.uuid4().hex[:8]}",
145
+ component_type="agent",
146
+ )
147
+
148
+ # Initialize conversation
149
+ messages: List[Message] = [Message.user(user_message)]
150
+ all_tool_calls: List[Dict[str, Any]] = []
151
+
152
+ # Reasoning loop
153
+ for iteration in range(self.max_iterations):
154
+ self.logger.info(f"Agent iteration {iteration + 1}/{self.max_iterations}")
155
+
156
+ # Build tool definitions for LLM
157
+ tool_defs = [
158
+ ToolDefinition(
159
+ name=tool.name,
160
+ description=tool.description,
161
+ parameters=tool.input_schema,
162
+ )
163
+ for tool in self.tools.values()
164
+ ]
165
+
166
+ # Create LLM request
167
+ request = GenerateRequest(
168
+ model=self.model_name,
169
+ system_prompt=self.instructions,
170
+ messages=messages,
171
+ tools=tool_defs if tool_defs else [],
172
+ )
173
+ request.config.temperature = self.temperature
174
+
175
+ # Call LLM
176
+ response = await self.model.generate(request)
177
+
178
+ # Add assistant response to messages
179
+ messages.append(Message.assistant(response.text))
180
+
181
+ # Check if LLM wants to use tools
182
+ if response.tool_calls:
183
+ self.logger.info(f"Agent calling {len(response.tool_calls)} tool(s)")
184
+
185
+ # Execute tool calls
186
+ tool_results = []
187
+ for tool_call in response.tool_calls:
188
+ tool_name = tool_call["name"]
189
+ tool_args_str = tool_call["arguments"]
190
+
191
+ # Track tool call
192
+ all_tool_calls.append(
193
+ {
194
+ "name": tool_name,
195
+ "arguments": tool_args_str,
196
+ "iteration": iteration + 1,
197
+ }
198
+ )
199
+
200
+ # Execute tool
201
+ try:
202
+ # Parse arguments
203
+ tool_args = json.loads(tool_args_str)
204
+
205
+ # Get tool
206
+ tool = self.tools.get(tool_name)
207
+ if not tool:
208
+ result_text = f"Error: Tool '{tool_name}' not found"
209
+ else:
210
+ # Execute tool
211
+ result = await tool.invoke(context, **tool_args)
212
+ result_text = json.dumps(result) if result else "null"
213
+
214
+ tool_results.append(
215
+ {"tool": tool_name, "result": result_text, "error": None}
216
+ )
217
+
218
+ except Exception as e:
219
+ self.logger.error(f"Tool execution error: {e}")
220
+ tool_results.append(
221
+ {"tool": tool_name, "result": None, "error": str(e)}
222
+ )
223
+
224
+ # Add tool results to conversation
225
+ results_text = "\n".join(
226
+ [
227
+ f"Tool: {tr['tool']}\nResult: {tr['result']}"
228
+ if tr["error"] is None
229
+ else f"Tool: {tr['tool']}\nError: {tr['error']}"
230
+ for tr in tool_results
231
+ ]
232
+ )
233
+ messages.append(Message.user(f"Tool results:\n{results_text}"))
234
+
235
+ # Continue loop for agent to process results
236
+
237
+ else:
238
+ # No tool calls - agent is done
239
+ self.logger.info(f"Agent completed after {iteration + 1} iterations")
240
+ return AgentResult(
241
+ output=response.text,
242
+ tool_calls=all_tool_calls,
243
+ context=context,
244
+ )
245
+
246
+ # Max iterations reached
247
+ self.logger.warning(f"Agent reached max iterations ({self.max_iterations})")
248
+ final_output = messages[-1].content if messages else "No output generated"
249
+ return AgentResult(
250
+ output=final_output,
251
+ tool_calls=all_tool_calls,
252
+ context=context,
253
+ )
254
+
255
+ async def chat(
256
+ self,
257
+ user_message: str,
258
+ messages: List[Message],
259
+ context: Optional[Context] = None,
260
+ ) -> tuple[str, List[Message]]:
261
+ """Continue multi-turn conversation.
262
+
263
+ Args:
264
+ user_message: New user message
265
+ messages: Previous conversation messages
266
+ context: Optional context
267
+
268
+ Returns:
269
+ Tuple of (assistant_response, updated_messages)
270
+
271
+ Example:
272
+ ```python
273
+ messages = []
274
+ response, messages = await agent.chat("Hello", messages)
275
+ response, messages = await agent.chat("Tell me more", messages)
276
+ ```
277
+ """
278
+ if context is None:
279
+ import uuid
280
+
281
+ context = Context(
282
+ run_id=f"agent-chat-{self.name}-{uuid.uuid4().hex[:8]}",
283
+ component_type="agent",
284
+ )
285
+
286
+ # Add user message
287
+ conversation = messages + [Message.user(user_message)]
288
+
289
+ # Build request (no tools for simple chat)
290
+ request = GenerateRequest(
291
+ model=self.model_name,
292
+ system_prompt=self.instructions,
293
+ messages=conversation,
294
+ )
295
+ request.config.temperature = self.temperature
296
+
297
+ # Call LLM
298
+ response = await self.model.generate(request)
299
+
300
+ # Add assistant response
301
+ conversation.append(Message.assistant(response.text))
302
+
303
+ return response.text, conversation