agnt5 0.2.2__cp39-abi3-manylinux_2_34_x86_64.whl → 0.2.6__cp39-abi3-manylinux_2_34_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
agnt5/__init__.py CHANGED
@@ -9,15 +9,15 @@ from ._compat import _import_error, _rust_available
9
9
  from .agent import Agent, AgentRegistry, AgentResult, Handoff, agent, handoff
10
10
  from .client import Client, RunError
11
11
  from .context import Context
12
+ from .function import FunctionContext
13
+ from .workflow import WorkflowContext
12
14
  from .entity import (
13
- DurableEntity,
14
- SessionEntity,
15
- MemoryEntity,
16
- WorkflowEntity,
17
- EntityInstance,
15
+ Entity,
18
16
  EntityRegistry,
17
+ EntityStateManager,
19
18
  EntityType,
20
- entity,
19
+ create_entity_context,
20
+ with_entity_context,
21
21
  )
22
22
  from .exceptions import (
23
23
  AGNT5Error,
@@ -44,18 +44,18 @@ __all__ = [
44
44
  "__version__",
45
45
  # Core components
46
46
  "Context",
47
+ "FunctionContext",
48
+ "WorkflowContext",
47
49
  "Client",
48
50
  "Worker",
49
51
  "function",
50
52
  "FunctionRegistry",
51
- "entity",
53
+ "Entity",
52
54
  "EntityType",
53
- "EntityInstance",
54
55
  "EntityRegistry",
55
- "DurableEntity",
56
- "SessionEntity",
57
- "MemoryEntity",
58
- "WorkflowEntity",
56
+ "EntityStateManager",
57
+ "with_entity_context",
58
+ "create_entity_context",
59
59
  "workflow",
60
60
  "WorkflowRegistry",
61
61
  "tool",
agnt5/_core.abi3.so CHANGED
Binary file
agnt5/_retry_utils.py ADDED
@@ -0,0 +1,169 @@
1
+ """Retry and backoff utilities for durable execution.
2
+
3
+ This module provides utilities for parsing retry policies, calculating backoff delays,
4
+ and executing functions with retry logic.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import asyncio
10
+ from typing import Any, Dict, Optional, Union
11
+
12
+ from .exceptions import RetryError
13
+ from .types import BackoffPolicy, BackoffType, HandlerFunc, RetryPolicy
14
+
15
+
16
+ def parse_retry_policy(retries: Optional[Union[int, Dict[str, Any], RetryPolicy]]) -> RetryPolicy:
17
+ """Parse retry configuration from various forms.
18
+
19
+ Args:
20
+ retries: Can be:
21
+ - int: max_attempts (e.g., 5)
22
+ - dict: RetryPolicy parameters (e.g., {"max_attempts": 5, "initial_interval_ms": 1000})
23
+ - RetryPolicy: pass through
24
+ - None: use default
25
+
26
+ Returns:
27
+ RetryPolicy instance
28
+ """
29
+ if retries is None:
30
+ return RetryPolicy()
31
+ elif isinstance(retries, int):
32
+ return RetryPolicy(max_attempts=retries)
33
+ elif isinstance(retries, dict):
34
+ return RetryPolicy(**retries)
35
+ elif isinstance(retries, RetryPolicy):
36
+ return retries
37
+ else:
38
+ raise TypeError(f"retries must be int, dict, or RetryPolicy, got {type(retries)}")
39
+
40
+
41
+ def parse_backoff_policy(backoff: Optional[Union[str, Dict[str, Any], BackoffPolicy]]) -> BackoffPolicy:
42
+ """Parse backoff configuration from various forms.
43
+
44
+ Args:
45
+ backoff: Can be:
46
+ - str: backoff type ("constant", "linear", "exponential")
47
+ - dict: BackoffPolicy parameters (e.g., {"type": "exponential", "multiplier": 2.0})
48
+ - BackoffPolicy: pass through
49
+ - None: use default
50
+
51
+ Returns:
52
+ BackoffPolicy instance
53
+ """
54
+ if backoff is None:
55
+ return BackoffPolicy()
56
+ elif isinstance(backoff, str):
57
+ backoff_type = BackoffType(backoff.lower())
58
+ return BackoffPolicy(type=backoff_type)
59
+ elif isinstance(backoff, dict):
60
+ # Convert string type to enum if present
61
+ if "type" in backoff and isinstance(backoff["type"], str):
62
+ backoff = {**backoff, "type": BackoffType(backoff["type"].lower())}
63
+ return BackoffPolicy(**backoff)
64
+ elif isinstance(backoff, BackoffPolicy):
65
+ return backoff
66
+ else:
67
+ raise TypeError(f"backoff must be str, dict, or BackoffPolicy, got {type(backoff)}")
68
+
69
+
70
+ def calculate_backoff_delay(
71
+ attempt: int,
72
+ retry_policy: RetryPolicy,
73
+ backoff_policy: BackoffPolicy,
74
+ ) -> float:
75
+ """Calculate backoff delay in seconds based on attempt number.
76
+
77
+ Args:
78
+ attempt: Current attempt number (0-indexed)
79
+ retry_policy: Retry configuration
80
+ backoff_policy: Backoff configuration
81
+
82
+ Returns:
83
+ Delay in seconds
84
+ """
85
+ if backoff_policy.type == BackoffType.CONSTANT:
86
+ delay_ms = retry_policy.initial_interval_ms
87
+ elif backoff_policy.type == BackoffType.LINEAR:
88
+ delay_ms = retry_policy.initial_interval_ms * (attempt + 1)
89
+ else: # EXPONENTIAL
90
+ delay_ms = retry_policy.initial_interval_ms * (backoff_policy.multiplier**attempt)
91
+
92
+ # Cap at max_interval_ms
93
+ delay_ms = min(delay_ms, retry_policy.max_interval_ms)
94
+ return delay_ms / 1000.0 # Convert to seconds
95
+
96
+
97
+ async def execute_with_retry(
98
+ handler: HandlerFunc,
99
+ ctx: Any, # FunctionContext, but avoid circular import
100
+ retry_policy: RetryPolicy,
101
+ backoff_policy: BackoffPolicy,
102
+ needs_context: bool,
103
+ *args: Any,
104
+ **kwargs: Any,
105
+ ) -> Any:
106
+ """Execute handler with retry logic.
107
+
108
+ Args:
109
+ handler: The function handler to execute
110
+ ctx: Context for logging and attempt tracking (FunctionContext)
111
+ retry_policy: Retry configuration
112
+ backoff_policy: Backoff configuration
113
+ needs_context: Whether handler accepts ctx parameter
114
+ *args: Arguments to pass to handler (excluding ctx if needs_context=False)
115
+ **kwargs: Keyword arguments to pass to handler
116
+
117
+ Returns:
118
+ Result of successful execution
119
+
120
+ Raises:
121
+ RetryError: If all retry attempts fail
122
+ """
123
+ # Import here to avoid circular dependency
124
+ from .function import FunctionContext
125
+
126
+ last_error: Optional[Exception] = None
127
+
128
+ for attempt in range(retry_policy.max_attempts):
129
+ try:
130
+ # Create context for this attempt (FunctionContext is immutable)
131
+ attempt_ctx = FunctionContext(
132
+ run_id=ctx.run_id,
133
+ attempt=attempt,
134
+ retry_policy=retry_policy
135
+ )
136
+
137
+ # Execute handler (pass context only if needed)
138
+ if needs_context:
139
+ result = await handler(attempt_ctx, *args, **kwargs)
140
+ else:
141
+ result = await handler(*args, **kwargs)
142
+ return result
143
+
144
+ except Exception as e:
145
+ last_error = e
146
+ ctx.logger.warning(
147
+ f"Function execution failed (attempt {attempt + 1}/{retry_policy.max_attempts}): {e}"
148
+ )
149
+
150
+ # If this was the last attempt, raise RetryError
151
+ if attempt == retry_policy.max_attempts - 1:
152
+ raise RetryError(
153
+ f"Function failed after {retry_policy.max_attempts} attempts",
154
+ attempts=retry_policy.max_attempts,
155
+ last_error=e,
156
+ )
157
+
158
+ # Calculate backoff delay
159
+ delay = calculate_backoff_delay(attempt, retry_policy, backoff_policy)
160
+ ctx.logger.info(f"Retrying in {delay:.2f} seconds...")
161
+ await asyncio.sleep(delay)
162
+
163
+ # Should never reach here, but for type safety
164
+ assert last_error is not None
165
+ raise RetryError(
166
+ f"Function failed after {retry_policy.max_attempts} attempts",
167
+ attempts=retry_policy.max_attempts,
168
+ last_error=last_error,
169
+ )
agnt5/_schema_utils.py ADDED
@@ -0,0 +1,312 @@
1
+ """Schema conversion utilities for structured output support.
2
+
3
+ This module provides utilities to convert Python dataclasses and Pydantic models
4
+ to JSON Schema format for LLM structured output generation, function signatures,
5
+ and tool definitions.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import dataclasses
11
+ import inspect
12
+ from typing import Any, Callable, Dict, Optional, Tuple, get_args, get_origin, get_type_hints
13
+
14
+ try:
15
+ from pydantic import BaseModel
16
+ PYDANTIC_AVAILABLE = True
17
+ except ImportError:
18
+ BaseModel = None # type: ignore
19
+ PYDANTIC_AVAILABLE = False
20
+
21
+
22
+ def detect_format_type(response_format: Any) -> Tuple[str, Dict[str, Any]]:
23
+ """Auto-detect format type and convert to JSON schema.
24
+
25
+ Args:
26
+ response_format: Pydantic model, dataclass, or dict
27
+
28
+ Returns:
29
+ Tuple of (format_type, json_schema)
30
+ - format_type: "pydantic", "dataclass", or "raw"
31
+ - json_schema: JSON schema dictionary
32
+
33
+ Raises:
34
+ ValueError: If format type is not supported
35
+ """
36
+ # Check for Pydantic model
37
+ if PYDANTIC_AVAILABLE and isinstance(response_format, type) and issubclass(response_format, BaseModel):
38
+ return 'pydantic', pydantic_to_json_schema(response_format)
39
+
40
+ # Check for dataclass
41
+ if dataclasses.is_dataclass(response_format):
42
+ return 'dataclass', dataclass_to_json_schema(response_format)
43
+
44
+ # Check for raw dict
45
+ if isinstance(response_format, dict):
46
+ return 'raw', response_format
47
+
48
+ raise ValueError(
49
+ f"Unsupported response_format type: {type(response_format)}. "
50
+ f"Expected Pydantic model, dataclass, or dict."
51
+ )
52
+
53
+
54
+ def pydantic_to_json_schema(model: type) -> Dict[str, Any]:
55
+ """Convert Pydantic model to JSON schema.
56
+
57
+ Supports both Pydantic v1 and v2 APIs.
58
+
59
+ Args:
60
+ model: Pydantic BaseModel class
61
+
62
+ Returns:
63
+ JSON schema dictionary
64
+ """
65
+ if not PYDANTIC_AVAILABLE:
66
+ raise ImportError("Pydantic is not installed. Install with: pip install pydantic")
67
+
68
+ if not (isinstance(model, type) and issubclass(model, BaseModel)):
69
+ raise ValueError(f"Expected Pydantic BaseModel class, got {type(model)}")
70
+
71
+ try:
72
+ # Try Pydantic v2 API first
73
+ if hasattr(model, 'model_json_schema'):
74
+ schema = model.model_json_schema()
75
+ # Fall back to Pydantic v1 API
76
+ elif hasattr(model, 'schema'):
77
+ schema = model.schema()
78
+ else:
79
+ # Fallback for edge cases
80
+ schema = {"type": "object"}
81
+ except Exception:
82
+ # If schema generation fails, return basic object schema
83
+ schema = {"type": "object"}
84
+
85
+ # Ensure we have the required fields
86
+ if "type" not in schema:
87
+ schema["type"] = "object"
88
+
89
+ return schema
90
+
91
+
92
+ def dataclass_to_json_schema(cls: type) -> Dict[str, Any]:
93
+ """Convert Python dataclass to JSON schema.
94
+
95
+ Args:
96
+ cls: Dataclass type
97
+
98
+ Returns:
99
+ JSON schema dictionary
100
+ """
101
+ if not dataclasses.is_dataclass(cls):
102
+ raise ValueError(f"Expected dataclass, got {type(cls)}")
103
+
104
+ properties: Dict[str, Any] = {}
105
+ required: list[str] = []
106
+
107
+ for field in dataclasses.fields(cls):
108
+ # Convert field type to JSON schema
109
+ field_schema = _type_to_schema(field.type)
110
+ properties[field.name] = field_schema
111
+
112
+ # Check if field is required (no default value)
113
+ if field.default == dataclasses.MISSING and field.default_factory == dataclasses.MISSING: # type: ignore
114
+ required.append(field.name)
115
+
116
+ schema = {
117
+ "type": "object",
118
+ "properties": properties,
119
+ "required": required,
120
+ "additionalProperties": False
121
+ }
122
+
123
+ return schema
124
+
125
+
126
+ def _type_to_schema(python_type: Any) -> Dict[str, Any]:
127
+ """Convert Python type hint to JSON schema type.
128
+
129
+ Args:
130
+ python_type: Python type annotation
131
+
132
+ Returns:
133
+ JSON schema type definition
134
+ """
135
+ # Handle Optional types
136
+ origin = get_origin(python_type)
137
+ args = get_args(python_type)
138
+
139
+ # Handle Optional[X] which is Union[X, None]
140
+ if origin is type(None) or python_type is type(None):
141
+ return {"type": "null"}
142
+
143
+ # Handle Union types (including Optional)
144
+ if origin is Union: # type: ignore
145
+ # Filter out None from union types
146
+ non_none_types = [t for t in args if t is not type(None)]
147
+ if len(non_none_types) == 1:
148
+ # Optional[X] case
149
+ return _type_to_schema(non_none_types[0])
150
+ else:
151
+ # True Union - use anyOf
152
+ return {"anyOf": [_type_to_schema(t) for t in non_none_types]}
153
+
154
+ # Handle List types
155
+ if origin is list:
156
+ item_type = args[0] if args else Any
157
+ return {
158
+ "type": "array",
159
+ "items": _type_to_schema(item_type)
160
+ }
161
+
162
+ # Handle Dict types
163
+ if origin is dict:
164
+ value_type = args[1] if len(args) > 1 else Any
165
+ return {
166
+ "type": "object",
167
+ "additionalProperties": _type_to_schema(value_type)
168
+ }
169
+
170
+ # Handle basic types
171
+ if python_type == str:
172
+ return {"type": "string"}
173
+ elif python_type == int:
174
+ return {"type": "integer"}
175
+ elif python_type == float:
176
+ return {"type": "number"}
177
+ elif python_type == bool:
178
+ return {"type": "boolean"}
179
+ elif python_type == dict:
180
+ return {"type": "object"}
181
+ elif python_type == list:
182
+ return {"type": "array"}
183
+ elif python_type == Any:
184
+ return {} # Any type - no restrictions
185
+
186
+ # Fallback for unknown types
187
+ return {"type": "string", "description": f"Type: {python_type}"}
188
+
189
+
190
+ # Import Union for type checking
191
+ try:
192
+ from typing import Union
193
+ except ImportError:
194
+ Union = None # type: ignore
195
+
196
+
197
+ def is_pydantic_model(type_hint: Any) -> bool:
198
+ """Check if a type hint is a Pydantic model.
199
+
200
+ Args:
201
+ type_hint: Type annotation to check
202
+
203
+ Returns:
204
+ True if type_hint is a Pydantic BaseModel subclass
205
+ """
206
+ if not PYDANTIC_AVAILABLE:
207
+ return False
208
+
209
+ try:
210
+ return isinstance(type_hint, type) and issubclass(type_hint, BaseModel)
211
+ except TypeError:
212
+ return False
213
+
214
+
215
+ def extract_function_schemas(func: Callable[..., Any]) -> Tuple[Optional[Dict[str, Any]], Optional[Dict[str, Any]]]:
216
+ """Extract input and output schemas from function type hints.
217
+
218
+ Supports both plain Python types and Pydantic models.
219
+ Pydantic models provide richer validation and schema generation.
220
+
221
+ Args:
222
+ func: Function to extract schemas from
223
+
224
+ Returns:
225
+ Tuple of (input_schema, output_schema) where each is a JSON Schema dict or None
226
+ """
227
+ try:
228
+ # Get type hints
229
+ hints = get_type_hints(func)
230
+ sig = inspect.signature(func)
231
+
232
+ # Build input schema from parameters (excluding 'ctx')
233
+ input_properties = {}
234
+ required_params = []
235
+
236
+ for param_name, param in sig.parameters.items():
237
+ if param_name == "ctx":
238
+ continue
239
+
240
+ # Get type hint for this parameter
241
+ if param_name in hints:
242
+ param_type = hints[param_name]
243
+
244
+ # Check if it's a Pydantic model
245
+ if is_pydantic_model(param_type):
246
+ # Use Pydantic's schema generation
247
+ input_properties[param_name] = pydantic_to_json_schema(param_type)
248
+ else:
249
+ # Use basic type conversion
250
+ input_properties[param_name] = _type_to_schema(param_type)
251
+ else:
252
+ # No type hint, use generic object
253
+ input_properties[param_name] = {"type": "object"}
254
+
255
+ # Check if parameter is required (no default value)
256
+ if param.default is inspect.Parameter.empty:
257
+ required_params.append(param_name)
258
+
259
+ input_schema = None
260
+ if input_properties:
261
+ input_schema = {
262
+ "type": "object",
263
+ "properties": input_properties,
264
+ }
265
+ if required_params:
266
+ input_schema["required"] = required_params
267
+
268
+ # Add description from docstring if available
269
+ if func.__doc__:
270
+ docstring = inspect.cleandoc(func.__doc__)
271
+ first_line = docstring.split('\n')[0].strip()
272
+ if first_line:
273
+ input_schema["description"] = first_line
274
+
275
+ # Build output schema from return type hint
276
+ output_schema = None
277
+ if "return" in hints:
278
+ return_type = hints["return"]
279
+
280
+ # Check if return type is a Pydantic model
281
+ if is_pydantic_model(return_type):
282
+ output_schema = pydantic_to_json_schema(return_type)
283
+ else:
284
+ output_schema = _type_to_schema(return_type)
285
+
286
+ return input_schema, output_schema
287
+
288
+ except Exception:
289
+ # If schema extraction fails, return None schemas
290
+ return None, None
291
+
292
+
293
+ def extract_function_metadata(func: Callable[..., Any]) -> Dict[str, str]:
294
+ """Extract metadata from function including description from docstring.
295
+
296
+ Args:
297
+ func: Function to extract metadata from
298
+
299
+ Returns:
300
+ Dictionary with metadata fields like 'description'
301
+ """
302
+ metadata = {}
303
+
304
+ # Extract description from docstring
305
+ if func.__doc__:
306
+ # Get first line of docstring as description
307
+ docstring = inspect.cleandoc(func.__doc__)
308
+ first_line = docstring.split('\n')[0].strip()
309
+ if first_line:
310
+ metadata["description"] = first_line
311
+
312
+ return metadata
agnt5/_telemetry.py CHANGED
@@ -67,6 +67,12 @@ class OpenTelemetryHandler(logging.Handler):
67
67
  # Format the message (applies any formatters)
68
68
  message = self.format(record)
69
69
 
70
+ # Include exception traceback if present (from logger.exception() or exc_info=True)
71
+ if record.exc_info:
72
+ # formatException() returns the formatted traceback string
73
+ exc_text = self.formatException(record.exc_info)
74
+ message = f"{message}\n{exc_text}"
75
+
70
76
  # Forward to Rust tracing system
71
77
  # Rust side will:
72
78
  # - Add to current span context (inherits invocation.id)
@@ -92,9 +98,10 @@ def setup_context_logger(logger: logging.Logger, log_level: Optional[int] = None
92
98
 
93
99
  This function:
94
100
  1. Removes any existing handlers (avoid duplicates)
95
- 2. Adds OpenTelemetry handler for OTLP + console output
96
- 3. Sets appropriate log level
97
- 4. Disables propagation to avoid duplicate logs
101
+ 2. Adds OpenTelemetry handler for OTLP + console output (when Worker is running)
102
+ 3. Adds console handler for local testing (fallback)
103
+ 4. Sets appropriate log level
104
+ 5. Disables propagation to avoid duplicate logs
98
105
 
99
106
  Args:
100
107
  logger: Logger instance to configure
@@ -103,23 +110,37 @@ def setup_context_logger(logger: logging.Logger, log_level: Optional[int] = None
103
110
  # Remove existing handlers to avoid duplicate logs
104
111
  logger.handlers.clear()
105
112
 
106
- # Add OpenTelemetry handler
113
+ # Add OpenTelemetry handler (for Worker/platform execution)
107
114
  otel_handler = OpenTelemetryHandler()
108
115
  otel_handler.setLevel(logging.DEBUG)
109
116
 
110
117
  # Use simple formatter - Rust side handles structured logging
111
- # We just want the message here
112
118
  formatter = logging.Formatter('%(message)s')
113
119
  otel_handler.setFormatter(formatter)
114
120
 
115
121
  logger.addHandler(otel_handler)
116
122
 
117
- # Set log level (default to DEBUG to let Rust side filter)
123
+ # Add console handler for local testing (fallback when Rust bridge not available)
124
+ # This ensures logs appear when testing functions locally without Worker
125
+ console_handler = logging.StreamHandler()
126
+ console_handler.setLevel(logging.DEBUG)
127
+
128
+ # Console format includes level, message, and exception info if present
129
+ # exc_info=True in the format string means "include traceback if present"
130
+ console_formatter = logging.Formatter(
131
+ '[%(levelname)s] %(message)s',
132
+ # Python automatically appends exception traceback when exc_info is set
133
+ )
134
+ console_handler.setFormatter(console_formatter)
135
+
136
+ logger.addHandler(console_handler)
137
+
138
+ # Set log level (default to DEBUG to let handlers filter)
118
139
  if log_level is None:
119
140
  log_level = logging.DEBUG
120
141
  logger.setLevel(log_level)
121
142
 
122
- # Don't propagate to root logger (we handle everything via OpenTelemetry)
143
+ # Don't propagate to root logger (we handle everything ourselves)
123
144
  logger.propagate = False
124
145
 
125
146