agnt5 0.2.0__cp39-abi3-manylinux_2_34_aarch64.whl → 0.2.1__cp39-abi3-manylinux_2_34_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of agnt5 might be problematic. Click here for more details.

agnt5/__init__.py CHANGED
@@ -6,9 +6,19 @@ with built-in durability guarantees and state management.
6
6
  """
7
7
 
8
8
  from ._compat import _import_error, _rust_available
9
- from .agent import Agent, AgentResult
9
+ from .agent import Agent, AgentRegistry, AgentResult, agent
10
+ from .client import Client, RunError
10
11
  from .context import Context
11
- from .entity import EntityInstance, EntityType, entity
12
+ from .entity import (
13
+ DurableEntity,
14
+ SessionEntity,
15
+ MemoryEntity,
16
+ WorkflowEntity,
17
+ EntityInstance,
18
+ EntityRegistry,
19
+ EntityType,
20
+ entity,
21
+ )
12
22
  from .exceptions import (
13
23
  AGNT5Error,
14
24
  CheckpointError,
@@ -24,6 +34,9 @@ from .version import _get_version
24
34
  from .worker import Worker
25
35
  from .workflow import WorkflowRegistry, workflow
26
36
 
37
+ # Expose simplified language model API (recommended)
38
+ from . import lm
39
+
27
40
  __version__ = _get_version()
28
41
 
29
42
  __all__ = [
@@ -31,19 +44,27 @@ __all__ = [
31
44
  "__version__",
32
45
  # Core components
33
46
  "Context",
47
+ "Client",
48
+ "Worker",
34
49
  "function",
35
50
  "FunctionRegistry",
36
51
  "entity",
37
52
  "EntityType",
38
53
  "EntityInstance",
54
+ "EntityRegistry",
55
+ "DurableEntity",
56
+ "SessionEntity",
57
+ "MemoryEntity",
58
+ "WorkflowEntity",
39
59
  "workflow",
40
60
  "WorkflowRegistry",
41
61
  "tool",
42
62
  "Tool",
43
63
  "ToolRegistry",
64
+ "agent",
44
65
  "Agent",
66
+ "AgentRegistry",
45
67
  "AgentResult",
46
- "Worker",
47
68
  # Types
48
69
  "RetryPolicy",
49
70
  "BackoffPolicy",
@@ -57,4 +78,7 @@ __all__ = [
57
78
  "RetryError",
58
79
  "StateError",
59
80
  "CheckpointError",
81
+ "RunError",
82
+ # Language Model (Simplified API)
83
+ "lm",
60
84
  ]
agnt5/_core.abi3.so CHANGED
Binary file
agnt5/_telemetry.py ADDED
@@ -0,0 +1,141 @@
1
+ """
2
+ OpenTelemetry integration for Python logging.
3
+
4
+ This module bridges Python's standard logging to Rust's tracing/OpenTelemetry system,
5
+ ensuring all logs from ctx.logger are sent to both the console and OTLP exporters.
6
+ """
7
+
8
+ import logging
9
+ from typing import Optional
10
+
11
+
12
+ class OpenTelemetryHandler(logging.Handler):
13
+ """
14
+ Custom logging handler that forwards Python logs to Rust OpenTelemetry system.
15
+
16
+ This handler routes all Python log records through the Rust `log_from_python()`
17
+ function, which integrates with the tracing ecosystem. This ensures:
18
+
19
+ 1. Logs are sent to OpenTelemetry OTLP exporter
20
+ 2. Logs appear in console output (via Rust's fmt layer)
21
+ 3. Logs inherit span context (invocation.id, trace_id, etc.)
22
+ 4. Structured logging with proper attributes
23
+
24
+ The Rust side handles both console output and OTLP export, so we only
25
+ need one handler on the Python side.
26
+ """
27
+
28
+ def __init__(self, level=logging.NOTSET):
29
+ """Initialize the OpenTelemetry handler.
30
+
31
+ Args:
32
+ level: Minimum log level to process (default: NOTSET processes all)
33
+ """
34
+ super().__init__(level)
35
+
36
+ # Import Rust bridge function
37
+ try:
38
+ from ._core import log_from_python
39
+ self._log_from_python = log_from_python
40
+ except ImportError as e:
41
+ # Fallback if Rust core not available (development/testing)
42
+ import warnings
43
+ warnings.warn(
44
+ f"Failed to import Rust telemetry bridge: {e}. "
45
+ "Logs will not be sent to OpenTelemetry.",
46
+ RuntimeWarning
47
+ )
48
+ self._log_from_python = None
49
+
50
+ def emit(self, record: logging.LogRecord):
51
+ """
52
+ Process a log record and forward to Rust telemetry.
53
+
54
+ Args:
55
+ record: Python logging record to process
56
+ """
57
+ if self._log_from_python is None:
58
+ # No Rust bridge available, silently skip
59
+ return
60
+
61
+ try:
62
+ # Format the message (applies any formatters)
63
+ message = self.format(record)
64
+
65
+ # Forward to Rust tracing system
66
+ # Rust side will:
67
+ # - Add to current span context (inherits invocation.id)
68
+ # - Send to OTLP exporter
69
+ # - Print to console via fmt layer
70
+ self._log_from_python(
71
+ level=record.levelname,
72
+ message=message,
73
+ target=record.name,
74
+ module_path=record.module,
75
+ filename=record.pathname,
76
+ line=record.lineno
77
+ )
78
+ except Exception:
79
+ # Don't let logging errors crash the application
80
+ # Use handleError to report the issue via logging system
81
+ self.handleError(record)
82
+
83
+
84
+ def setup_context_logger(logger: logging.Logger, log_level: Optional[int] = None) -> None:
85
+ """
86
+ Configure a Context logger with OpenTelemetry integration.
87
+
88
+ This function:
89
+ 1. Removes any existing handlers (avoid duplicates)
90
+ 2. Adds OpenTelemetry handler for OTLP + console output
91
+ 3. Sets appropriate log level
92
+ 4. Disables propagation to avoid duplicate logs
93
+
94
+ Args:
95
+ logger: Logger instance to configure
96
+ log_level: Optional log level (default: DEBUG)
97
+ """
98
+ # Remove existing handlers to avoid duplicate logs
99
+ logger.handlers.clear()
100
+
101
+ # Add OpenTelemetry handler
102
+ otel_handler = OpenTelemetryHandler()
103
+ otel_handler.setLevel(logging.DEBUG)
104
+
105
+ # Use simple formatter - Rust side handles structured logging
106
+ # We just want the message here
107
+ formatter = logging.Formatter('%(message)s')
108
+ otel_handler.setFormatter(formatter)
109
+
110
+ logger.addHandler(otel_handler)
111
+
112
+ # Set log level (default to DEBUG to let Rust side filter)
113
+ if log_level is None:
114
+ log_level = logging.DEBUG
115
+ logger.setLevel(log_level)
116
+
117
+ # Don't propagate to root logger (we handle everything via OpenTelemetry)
118
+ logger.propagate = False
119
+
120
+
121
+ def setup_module_logger(module_name: str, log_level: Optional[int] = None) -> logging.Logger:
122
+ """
123
+ Create and configure a logger for a module with OpenTelemetry integration.
124
+
125
+ Convenience function for setting up loggers in SDK modules.
126
+
127
+ Args:
128
+ module_name: Name of the module (e.g., "agnt5.worker")
129
+ log_level: Optional log level (default: INFO for modules)
130
+
131
+ Returns:
132
+ Configured logger instance
133
+ """
134
+ logger = logging.getLogger(module_name)
135
+
136
+ # For module loggers, default to INFO level
137
+ if log_level is None:
138
+ log_level = logging.INFO
139
+
140
+ setup_context_logger(logger, log_level)
141
+ return logger
agnt5/agent.py CHANGED
@@ -6,15 +6,49 @@ Phase 2: Platform-backed agents with durable execution and multi-agent coordinat
6
6
 
7
7
  from __future__ import annotations
8
8
 
9
+ import functools
9
10
  import json
10
11
  import logging
11
- from typing import Any, Dict, List, Optional
12
+ from typing import Any, Callable, Dict, List, Optional
12
13
 
13
14
  from .context import Context
14
- from .lm import GenerateRequest, GenerateResponse, LanguageModel, Message, ToolDefinition
15
+ from . import lm
16
+ from .lm import GenerateRequest, GenerateResponse, Message, ModelConfig, ToolDefinition
15
17
  from .tool import Tool, ToolRegistry
18
+ from ._telemetry import setup_module_logger
16
19
 
17
- logger = logging.getLogger(__name__)
20
+ logger = setup_module_logger(__name__)
21
+
22
+ # Global agent registry
23
+ _AGENT_REGISTRY: Dict[str, "Agent"] = {}
24
+
25
+
26
+ class AgentRegistry:
27
+ """Registry for agents."""
28
+
29
+ @staticmethod
30
+ def register(agent: "Agent") -> None:
31
+ """Register an agent."""
32
+ if agent.name in _AGENT_REGISTRY:
33
+ logger.warning(f"Overwriting existing agent '{agent.name}'")
34
+ _AGENT_REGISTRY[agent.name] = agent
35
+ logger.debug(f"Registered agent '{agent.name}'")
36
+
37
+ @staticmethod
38
+ def get(name: str) -> Optional["Agent"]:
39
+ """Get agent by name."""
40
+ return _AGENT_REGISTRY.get(name)
41
+
42
+ @staticmethod
43
+ def all() -> Dict[str, "Agent"]:
44
+ """Get all registered agents."""
45
+ return _AGENT_REGISTRY.copy()
46
+
47
+ @staticmethod
48
+ def clear() -> None:
49
+ """Clear all registered agents."""
50
+ _AGENT_REGISTRY.clear()
51
+ logger.debug("Cleared agent registry")
18
52
 
19
53
 
20
54
  class AgentResult:
@@ -43,20 +77,20 @@ class Agent:
43
77
 
44
78
  Example:
45
79
  ```python
46
- from agnt5 import Agent, tool
47
- from agnt5.lm import OpenAILanguageModel
80
+ from agnt5 import Agent, tool, Context
48
81
 
49
82
  @tool(auto_schema=True)
50
83
  async def search_web(ctx: Context, query: str) -> List[Dict]:
51
84
  # Search implementation
52
85
  return [{"title": "Result", "url": "..."}]
53
86
 
54
- lm = OpenAILanguageModel()
87
+ # Simple usage with model string
55
88
  agent = Agent(
56
89
  name="researcher",
57
- model=lm,
90
+ model="openai/gpt-4o-mini",
58
91
  instructions="You are a research assistant.",
59
- tools=[search_web]
92
+ tools=[search_web],
93
+ temperature=0.7
60
94
  )
61
95
 
62
96
  result = await agent.run("What are the latest AI trends?")
@@ -67,29 +101,35 @@ class Agent:
67
101
  def __init__(
68
102
  self,
69
103
  name: str,
70
- model: LanguageModel,
104
+ model: str,
71
105
  instructions: str,
72
106
  tools: Optional[List[Any]] = None,
73
- model_name: str = "gpt-4o-mini",
74
107
  temperature: float = 0.7,
108
+ max_tokens: Optional[int] = None,
109
+ top_p: Optional[float] = None,
110
+ model_config: Optional[ModelConfig] = None,
75
111
  max_iterations: int = 10,
76
112
  ):
77
113
  """Initialize agent.
78
114
 
79
115
  Args:
80
116
  name: Agent name/identifier
81
- model: Language model instance
117
+ model: Model string with provider prefix (e.g., "openai/gpt-4o-mini")
82
118
  instructions: System instructions for the agent
83
119
  tools: List of tools available to the agent (functions with @tool decorator)
84
- model_name: Model name to use (e.g., "gpt-4", "claude-3-opus")
85
120
  temperature: LLM temperature (0.0 to 1.0)
121
+ max_tokens: Maximum tokens to generate
122
+ top_p: Nucleus sampling parameter
123
+ model_config: Optional advanced configuration (custom endpoints, headers, etc.)
86
124
  max_iterations: Maximum reasoning iterations
87
125
  """
88
126
  self.name = name
89
127
  self.model = model
90
128
  self.instructions = instructions
91
- self.model_name = model_name
92
129
  self.temperature = temperature
130
+ self.max_tokens = max_tokens
131
+ self.top_p = top_p
132
+ self.model_config = model_config
93
133
  self.max_iterations = max_iterations
94
134
 
95
135
  # Build tool registry
@@ -116,6 +156,33 @@ class Agent:
116
156
 
117
157
  self.logger = logging.getLogger(f"agnt5.agent.{name}")
118
158
 
159
+ # Define schemas based on the run method signature
160
+ # Input: user_message (string)
161
+ self.input_schema = {
162
+ "type": "object",
163
+ "properties": {
164
+ "user_message": {"type": "string"}
165
+ },
166
+ "required": ["user_message"]
167
+ }
168
+ # Output: AgentResult with output and tool_calls
169
+ self.output_schema = {
170
+ "type": "object",
171
+ "properties": {
172
+ "output": {"type": "string"},
173
+ "tool_calls": {
174
+ "type": "array",
175
+ "items": {"type": "object"}
176
+ }
177
+ }
178
+ }
179
+
180
+ # Store metadata
181
+ self.metadata = {
182
+ "description": instructions,
183
+ "model": model_name
184
+ }
185
+
119
186
  async def run(
120
187
  self,
121
188
  user_message: str,
@@ -163,17 +230,34 @@ class Agent:
163
230
  for tool in self.tools.values()
164
231
  ]
165
232
 
166
- # Create LLM request
233
+ # Convert messages to dict format for lm.generate()
234
+ messages_dict = []
235
+ for msg in messages:
236
+ messages_dict.append({
237
+ "role": msg.role.value,
238
+ "content": msg.content
239
+ })
240
+
241
+ # Call LLM using simplified API
242
+ # TODO: Support tools in lm.generate() - for now using GenerateRequest internally
167
243
  request = GenerateRequest(
168
- model=self.model_name,
244
+ model=self.model,
169
245
  system_prompt=self.instructions,
170
246
  messages=messages,
171
247
  tools=tool_defs if tool_defs else [],
172
248
  )
173
249
  request.config.temperature = self.temperature
174
-
175
- # Call LLM
176
- response = await self.model.generate(request)
250
+ if self.max_tokens:
251
+ request.config.max_tokens = self.max_tokens
252
+ if self.top_p:
253
+ request.config.top_p = self.top_p
254
+
255
+ # Create internal LM instance for generation
256
+ # TODO: Use model_config when provided
257
+ from .lm import _LanguageModel
258
+ provider, model_name = self.model.split('/', 1)
259
+ internal_lm = _LanguageModel(provider=provider.lower(), default_model=None)
260
+ response = await internal_lm.generate(request)
177
261
 
178
262
  # Add assistant response to messages
179
263
  messages.append(Message.assistant(response.text))
@@ -288,16 +372,124 @@ class Agent:
288
372
 
289
373
  # Build request (no tools for simple chat)
290
374
  request = GenerateRequest(
291
- model=self.model_name,
375
+ model=self.model,
292
376
  system_prompt=self.instructions,
293
377
  messages=conversation,
294
378
  )
295
379
  request.config.temperature = self.temperature
380
+ if self.max_tokens:
381
+ request.config.max_tokens = self.max_tokens
382
+ if self.top_p:
383
+ request.config.top_p = self.top_p
296
384
 
297
- # Call LLM
298
- response = await self.model.generate(request)
385
+ # Create internal LM instance for generation
386
+ from .lm import _LanguageModel
387
+ provider, model_name = self.model.split('/', 1)
388
+ internal_lm = _LanguageModel(provider=provider.lower(), default_model=None)
389
+ response = await internal_lm.generate(request)
299
390
 
300
391
  # Add assistant response
301
392
  conversation.append(Message.assistant(response.text))
302
393
 
303
394
  return response.text, conversation
395
+
396
+
397
+ def agent(
398
+ _func: Optional[Callable] = None,
399
+ *,
400
+ name: Optional[str] = None,
401
+ model: Optional[LanguageModel] = None,
402
+ instructions: Optional[str] = None,
403
+ tools: Optional[List[Any]] = None,
404
+ model_name: str = "gpt-4o-mini",
405
+ temperature: float = 0.7,
406
+ max_iterations: int = 10,
407
+ ) -> Callable:
408
+ """
409
+ Decorator to register a function as an agent and automatically register it.
410
+
411
+ This decorator allows you to define agents as functions that create and return Agent instances.
412
+ The agent will be automatically registered in the AgentRegistry for discovery by the worker.
413
+
414
+ Args:
415
+ name: Agent name (defaults to function name)
416
+ model: Language model instance (required if not provided in function)
417
+ instructions: System instructions (required if not provided in function)
418
+ tools: List of tools available to the agent
419
+ model_name: Model name to use
420
+ temperature: LLM temperature
421
+ max_iterations: Maximum reasoning iterations
422
+
423
+ Returns:
424
+ Decorated function that returns an Agent instance
425
+
426
+ Example:
427
+ ```python
428
+ from agnt5 import agent, tool
429
+ from agnt5.lm import OpenAILanguageModel
430
+
431
+ @agent(
432
+ name="research_agent",
433
+ model=OpenAILanguageModel(),
434
+ instructions="You are a research assistant.",
435
+ tools=[search_web, analyze_data]
436
+ )
437
+ def create_researcher():
438
+ # Agent is created and registered automatically
439
+ pass
440
+
441
+ # Or create agent directly
442
+ @agent
443
+ def my_agent():
444
+ from agnt5.lm import OpenAILanguageModel
445
+ return Agent(
446
+ name="my_agent",
447
+ model=OpenAILanguageModel(),
448
+ instructions="You are a helpful assistant."
449
+ )
450
+ ```
451
+ """
452
+
453
+ def decorator(func: Callable) -> Callable:
454
+ # Determine agent name
455
+ agent_name = name or func.__name__
456
+
457
+ # Create the agent
458
+ @functools.wraps(func)
459
+ def wrapper(*args, **kwargs) -> Agent:
460
+ # Check if function returns an Agent
461
+ result = func(*args, **kwargs)
462
+ if isinstance(result, Agent):
463
+ # Function creates its own agent
464
+ agent_instance = result
465
+ elif model is not None and instructions is not None:
466
+ # Create agent from decorator parameters
467
+ agent_instance = Agent(
468
+ name=agent_name,
469
+ model=model,
470
+ instructions=instructions,
471
+ tools=tools,
472
+ model_name=model_name,
473
+ temperature=temperature,
474
+ max_iterations=max_iterations,
475
+ )
476
+ else:
477
+ raise ValueError(
478
+ f"Agent decorator for '{agent_name}' requires either "
479
+ "the decorated function to return an Agent instance, "
480
+ "or 'model' and 'instructions' parameters to be provided"
481
+ )
482
+
483
+ # Register agent
484
+ AgentRegistry.register(agent_instance)
485
+ return agent_instance
486
+
487
+ # Create agent immediately and store reference
488
+ agent_instance = wrapper()
489
+
490
+ # Return the agent instance itself (so it can be used directly)
491
+ return agent_instance
492
+
493
+ if _func is None:
494
+ return decorator
495
+ return decorator(_func)