agnt5 0.1.1__cp39-abi3-manylinux_2_34_aarch64.whl → 0.1.3__cp39-abi3-manylinux_2_34_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of agnt5 might be problematic. Click here for more details.

agnt5/__init__.py CHANGED
@@ -7,21 +7,17 @@ Rust core.
7
7
  """
8
8
 
9
9
  from .version import _get_version
10
- # Import compatibility checks
11
10
  from ._compat import _rust_available, _import_error
12
-
13
- # Import decorators
14
11
  from .decorators import function
15
-
16
- # Import high-level Worker
17
- from .worker_manager import Worker
12
+ from .worker import Worker
13
+ from .logging import install_opentelemetry_logging, remove_opentelemetry_logging
18
14
 
19
15
  __version__ = _get_version()
20
16
 
21
-
22
- # Import the Rust core if available
23
- if _rust_available:
24
- from ._core import (
25
- PyWorker
26
- )
27
-
17
+ __all__ = [
18
+ 'function',
19
+ 'Worker',
20
+ 'install_opentelemetry_logging',
21
+ 'remove_opentelemetry_logging',
22
+ '__version__',
23
+ ]
agnt5/_core.abi3.so CHANGED
Binary file
agnt5/components.py ADDED
@@ -0,0 +1,278 @@
1
+ """
2
+ Component abstraction layer for AGNT5 SDK.
3
+
4
+ This module defines the base classes for all component types:
5
+ - Functions: Stateless operations
6
+ - Objects: Virtual objects with persistent state
7
+ - Flows: Multi-step workflows with orchestration
8
+ """
9
+
10
+ from abc import ABC, abstractmethod
11
+ from enum import Enum
12
+ from typing import Any, Callable, Dict, List, Optional, Type, Union
13
+ import uuid
14
+ import json
15
+ import time
16
+
17
+
18
+ class ComponentType(Enum):
19
+ """Component types matching protobuf enum"""
20
+ FUNCTION = "function"
21
+ OBJECT = "object"
22
+ FLOW = "flow"
23
+
24
+
25
+ class ExecutionContext:
26
+ """
27
+ Unified execution context for all component types.
28
+
29
+ Provides methods for:
30
+ - Functions: Simple input/output
31
+ - Objects: State management and mutations
32
+ - Flows: Orchestration and step coordination
33
+ """
34
+
35
+ def __init__(self, invocation_id: str, component_type: ComponentType):
36
+ self.invocation_id = invocation_id
37
+ self.component_type = component_type
38
+
39
+ # Object-specific state management
40
+ self.object_id: Optional[str] = None
41
+ self.state: Optional[Dict[str, Any]] = None
42
+ self.state_mutations: List[Dict[str, Any]] = []
43
+
44
+ # Flow-specific orchestration
45
+ self.flow_instance_id: Optional[str] = None
46
+ self.flow_step: int = 0
47
+ self.checkpoint_data: Optional[Dict[str, Any]] = None
48
+
49
+ # Extensible metadata
50
+ self.metadata: Dict[str, str] = {}
51
+
52
+ # State management methods (for Objects)
53
+ def get_state(self, key: str, default: Any = None) -> Any:
54
+ """Get a value from object state"""
55
+ if self.state is None:
56
+ return default
57
+ return self.state.get(key, default)
58
+
59
+ def set_state(self, key: str, value: Any) -> None:
60
+ """Set a value in object state (records mutation)"""
61
+ if self.state is None:
62
+ self.state = {}
63
+
64
+ old_value = self.state.get(key)
65
+ self.state[key] = value
66
+
67
+ # Record mutation for persistence
68
+ self.state_mutations.append({
69
+ "operation": "set",
70
+ "key": key,
71
+ "old_value": old_value,
72
+ "new_value": value,
73
+ "timestamp": int(time.time() * 1000)
74
+ })
75
+
76
+ def delete_state(self, key: str) -> Any:
77
+ """Delete a value from object state"""
78
+ if self.state is None or key not in self.state:
79
+ return None
80
+
81
+ old_value = self.state.pop(key)
82
+
83
+ # Record mutation
84
+ self.state_mutations.append({
85
+ "operation": "delete",
86
+ "key": key,
87
+ "old_value": old_value,
88
+ "new_value": None,
89
+ "timestamp": int(time.time() * 1000)
90
+ })
91
+
92
+ return old_value
93
+
94
+ # Flow orchestration methods (for Flows - future implementation)
95
+ async def call_function(self, function_name: str, input_data: Any) -> Any:
96
+ """Call another function from within a flow"""
97
+ # TODO: Implement in Phase 3 (Flows)
98
+ raise NotImplementedError("Flow orchestration coming in Phase 3")
99
+
100
+ async def call_object(self, object_type: str, object_id: str,
101
+ method: str, input_data: Any) -> Any:
102
+ """Call a method on a virtual object from within a flow"""
103
+ # TODO: Implement in Phase 3 (Flows)
104
+ raise NotImplementedError("Flow orchestration coming in Phase 3")
105
+
106
+ async def sleep(self, duration_seconds: int) -> None:
107
+ """Durable sleep in a flow"""
108
+ # TODO: Implement in Phase 3 (Flows)
109
+ raise NotImplementedError("Flow orchestration coming in Phase 3")
110
+
111
+ async def wait_for_event(self, event_type: str, timeout_seconds: int = None) -> Any:
112
+ """Wait for external event in a flow"""
113
+ # TODO: Implement in Phase 3 (Flows)
114
+ raise NotImplementedError("Flow orchestration coming in Phase 3")
115
+
116
+
117
+ class Component(ABC):
118
+ """Base class for all component types"""
119
+
120
+ def __init__(self, name: str, component_type: ComponentType):
121
+ self.name = name
122
+ self.component_type = component_type
123
+ self.metadata: Dict[str, str] = {}
124
+
125
+ @abstractmethod
126
+ async def invoke(self, context: ExecutionContext, input_data: Any) -> Any:
127
+ """Execute the component with given context and input"""
128
+ pass
129
+
130
+ def to_component_info(self) -> Dict[str, Any]:
131
+ """Convert to ComponentInfo for registration"""
132
+ return {
133
+ "name": self.name,
134
+ "component_type": self.component_type.value,
135
+ "metadata": self.metadata
136
+ }
137
+
138
+
139
+ class FunctionComponent(Component):
140
+ """Function component - stateless operation"""
141
+
142
+ def __init__(self, name: str, handler: Callable, **kwargs):
143
+ super().__init__(name, ComponentType.FUNCTION)
144
+ self.handler = handler
145
+ self.streaming = kwargs.get('streaming', False)
146
+
147
+ # Add function-specific metadata
148
+ self.metadata.update({
149
+ 'streaming': str(self.streaming),
150
+ 'handler_name': handler.__name__
151
+ })
152
+
153
+ async def invoke(self, context: ExecutionContext, input_data: Any) -> Any:
154
+ """Execute the function"""
155
+ # Functions get simple context and input
156
+ if self.streaming:
157
+ # For streaming functions, return async generator
158
+ result = self.handler(context, input_data)
159
+ if hasattr(result, '__aiter__'):
160
+ return result
161
+ else:
162
+ # Convert sync generator to async
163
+ async def async_generator():
164
+ for item in result:
165
+ yield item
166
+ return async_generator()
167
+ else:
168
+ # Regular function call
169
+ result = self.handler(context, input_data)
170
+ # Handle both sync and async functions
171
+ if hasattr(result, '__await__'):
172
+ return await result
173
+ return result
174
+
175
+
176
+ class ObjectComponent(Component):
177
+ """Virtual Object component - stateful entity"""
178
+
179
+ def __init__(self, name: str, object_class: Type, **kwargs):
180
+ super().__init__(name, ComponentType.OBJECT)
181
+ self.object_class = object_class
182
+
183
+ # Add object-specific metadata
184
+ self.metadata.update({
185
+ 'class_name': object_class.__name__,
186
+ 'methods': [m for m in dir(object_class)
187
+ if not m.startswith('_') and callable(getattr(object_class, m))]
188
+ })
189
+
190
+ async def invoke(self, context: ExecutionContext, input_data: Any) -> Any:
191
+ """Execute a method on the virtual object"""
192
+ # TODO: Implement in Phase 2 (Objects)
193
+ # For now, raise helpful error
194
+ raise NotImplementedError(
195
+ f"Virtual Objects coming in Phase 2. "
196
+ f"Component '{self.name}' is registered but not yet executable. "
197
+ f"Use @function decorator for now."
198
+ )
199
+
200
+
201
+ class FlowComponent(Component):
202
+ """Flow component - multi-step workflow"""
203
+
204
+ def __init__(self, name: str, flow_handler: Callable, **kwargs):
205
+ super().__init__(name, ComponentType.FLOW)
206
+ self.flow_handler = flow_handler
207
+ self.steps = kwargs.get('steps', [])
208
+
209
+ # Add flow-specific metadata
210
+ self.metadata.update({
211
+ 'handler_name': flow_handler.__name__,
212
+ 'step_count': str(len(self.steps)) if self.steps else 'dynamic'
213
+ })
214
+
215
+ async def invoke(self, context: ExecutionContext, input_data: Any) -> Any:
216
+ """Execute the workflow"""
217
+ # TODO: Implement in Phase 3 (Flows)
218
+ # For now, raise helpful error
219
+ raise NotImplementedError(
220
+ f"Flows/Workflows coming in Phase 3. "
221
+ f"Component '{self.name}' is registered but not yet executable. "
222
+ f"Use @function decorator for now."
223
+ )
224
+
225
+
226
+ # Helper classes for future phases
227
+
228
+ class StateManager:
229
+ """Manages state persistence for virtual objects (Phase 2)"""
230
+
231
+ def __init__(self):
232
+ # Will be implemented with actual state backend
233
+ pass
234
+
235
+ async def load_state(self, object_type: str, object_id: str) -> Optional[Dict[str, Any]]:
236
+ """Load object state from persistent storage"""
237
+ # TODO: Implement with NATS KV or similar
238
+ return None
239
+
240
+ async def save_state(self, object_type: str, object_id: str,
241
+ state: Dict[str, Any],
242
+ mutations: List[Dict[str, Any]]) -> None:
243
+ """Save object state to persistent storage"""
244
+ # TODO: Implement with NATS KV or similar
245
+ pass
246
+
247
+
248
+ class FlowExecutor:
249
+ """Manages workflow execution and orchestration (Phase 3)"""
250
+
251
+ def __init__(self):
252
+ # Will be implemented with actual flow execution engine
253
+ pass
254
+
255
+ async def execute_step(self, flow_instance_id: str, step: int,
256
+ input_data: Any) -> Any:
257
+ """Execute a single step in a workflow"""
258
+ # TODO: Implement with deterministic replay
259
+ pass
260
+
261
+ async def checkpoint(self, flow_instance_id: str,
262
+ checkpoint_data: Dict[str, Any]) -> None:
263
+ """Save workflow checkpoint"""
264
+ # TODO: Implement with journal persistence
265
+ pass
266
+
267
+
268
+ # Export main classes
269
+ __all__ = [
270
+ 'ComponentType',
271
+ 'ExecutionContext',
272
+ 'Component',
273
+ 'FunctionComponent',
274
+ 'ObjectComponent',
275
+ 'FlowComponent',
276
+ 'StateManager',
277
+ 'FlowExecutor'
278
+ ]
agnt5/decorators.py CHANGED
@@ -10,6 +10,8 @@ import inspect
10
10
  import logging
11
11
  from typing import Any, Callable, Dict, List, Optional
12
12
 
13
+ # Set default logging level to DEBUG
14
+ logging.getLogger().setLevel(logging.DEBUG)
13
15
  logger = logging.getLogger(__name__)
14
16
 
15
17
  # Global registry of decorated functions
@@ -82,9 +84,10 @@ def get_function_metadata(func: Callable) -> Optional[Dict[str, Any]]:
82
84
 
83
85
  signature = inspect.signature(func)
84
86
  parameters = []
87
+ param_items = list(signature.parameters.items())
85
88
 
86
- for param_name, param in signature.parameters.items():
87
- if param_name == 'ctx': # Skip context parameter
89
+ for i, (param_name, param) in enumerate(param_items):
90
+ if i == 0 and param_name == 'ctx': # Skip context parameter if it's the first one
88
91
  continue
89
92
 
90
93
  param_info = {
@@ -109,6 +112,10 @@ def get_function_metadata(func: Callable) -> Optional[Dict[str, Any]]:
109
112
  }
110
113
 
111
114
 
115
+ # Alias for more intuitive usage
116
+ handler = function
117
+
118
+
112
119
  def clear_registry():
113
120
  """Clear the function registry. Mainly for testing."""
114
121
  global _function_registry
@@ -132,67 +139,102 @@ def invoke_function(handler_name: str, input_data: bytes, context: Any = None) -
132
139
  RuntimeError: If function execution fails
133
140
  """
134
141
  import json
142
+ import traceback
143
+
144
+ # Input validation
145
+ if not handler_name:
146
+ error_msg = "Empty handler name provided"
147
+ logger.error(error_msg)
148
+ raise ValueError(error_msg)
135
149
 
136
150
  if handler_name not in _function_registry:
137
- raise ValueError(f"Handler '{handler_name}' not found")
151
+ error_msg = f"Handler '{handler_name}' not found in registry. Available handlers: {list(_function_registry.keys())}"
152
+ logger.error(error_msg)
153
+ raise ValueError(error_msg)
138
154
 
139
155
  func = _function_registry[handler_name]
156
+ logger.info(f"Invoking handler: {handler_name}")
140
157
 
141
158
  try:
142
159
  # Decode input data
143
160
  if input_data:
144
- print(f"📨 Received function invocation: {handler_name}")
161
+ logger.debug(f"Processing {len(input_data)} bytes for {handler_name}")
145
162
 
146
- # Check if this is protobuf data by looking for the pattern
163
+ # Try direct JSON first
147
164
  try:
148
165
  raw_data = input_data.decode('utf-8')
149
166
  input_params = json.loads(raw_data)
167
+ logger.info(f"Decoded JSON input for {handler_name}: {type(input_params)} with keys: {list(input_params.keys()) if isinstance(input_params, dict) else 'non-dict'}")
168
+ logger.debug(f"Input parameters: {input_params}")
150
169
  except (UnicodeDecodeError, json.JSONDecodeError):
151
- # This is protobuf data - extract the JSON payload
152
- # The JSON is embedded after the \x1a<length> pattern
170
+ # Fallback to protobuf extraction
171
+ logger.debug(f"JSON decoding failed, trying protobuf extraction for {handler_name}")
153
172
  start_idx = input_data.find(b'\x1a')
154
- if start_idx != -1 and start_idx + 1 < len(input_data):
155
- # The byte after \x1a indicates the length of the JSON data
156
- json_length = input_data[start_idx + 1]
157
- json_start = start_idx + 2
158
-
159
- if json_start + json_length <= len(input_data):
160
- json_bytes = input_data[json_start:json_start + json_length]
161
- raw_data = json_bytes.decode('utf-8')
162
- print(f"📋 Extracted JSON from protobuf: {raw_data}")
163
- input_params = json.loads(raw_data)
164
- else:
165
- raise ValueError("Invalid protobuf structure - JSON length exceeds available data")
166
- else:
167
- raise ValueError("Could not find JSON data in protobuf message")
173
+ if start_idx == -1 or start_idx + 1 >= len(input_data):
174
+ logger.error(f"Invalid data format for {handler_name}. Length: {len(input_data)}, Hex: {input_data.hex()}")
175
+ raise RuntimeError("Invalid input data - not JSON and no protobuf marker found")
176
+
177
+ json_length = input_data[start_idx + 1]
178
+ json_start = start_idx + 2
179
+
180
+ if json_start + json_length > len(input_data):
181
+ raise RuntimeError(f"Protobuf structure invalid - length {json_length} exceeds data")
182
+
183
+ json_bytes = input_data[json_start:json_start + json_length]
184
+ raw_data = json_bytes.decode('utf-8')
185
+ input_params = json.loads(raw_data)
186
+ logger.info(f"Extracted from protobuf for {handler_name}: {type(input_params)} with keys: {list(input_params.keys()) if isinstance(input_params, dict) else 'non-dict'}")
187
+ logger.debug(f"Extracted parameters: {input_params}")
188
+
168
189
  else:
169
190
  input_params = {}
191
+ logger.debug(f"No input data provided for {handler_name}")
170
192
 
171
- logger.debug(f"Invoking function {handler_name} with params: {input_params}")
172
-
173
- # Call function with context as first parameter
174
- if isinstance(input_params, dict):
175
- result = func(context, **input_params)
176
- else:
177
- # Handle case where input is not a dict (e.g., single value)
178
- result = func(context, input_params)
193
+ # Execute function
194
+ try:
195
+ sig = inspect.signature(func)
196
+ params = list(sig.parameters.keys())
197
+
198
+ logger.info(f"Calling {handler_name} with signature: {sig}")
199
+
200
+ if params and params[0] == 'ctx':
201
+ if isinstance(input_params, dict):
202
+ logger.debug(f"Calling {handler_name}(ctx, **{input_params})")
203
+ result = func(context, **input_params)
204
+ else:
205
+ logger.debug(f"Calling {handler_name}(ctx, {input_params})")
206
+ result = func(context, input_params)
207
+ else:
208
+ if isinstance(input_params, dict):
209
+ logger.debug(f"Calling {handler_name}(**{input_params})")
210
+ result = func(**input_params)
211
+ else:
212
+ logger.debug(f"Calling {handler_name}({input_params})")
213
+ result = func(input_params)
214
+
215
+ except TypeError as e:
216
+ logger.error(f"Signature mismatch in {handler_name}: {e}. Expected: {sig}, Got: {input_params}")
217
+ raise RuntimeError(f"Function signature mismatch: {e}")
218
+
219
+ except Exception as e:
220
+ logger.error(f"Function {handler_name} failed: {type(e).__name__}: {e}")
221
+ raise RuntimeError(f"Function execution failed: {e}")
179
222
 
180
223
  # Encode result
181
224
  if result is None:
182
- result_data = b""
183
- else:
225
+ return b""
226
+
227
+ try:
184
228
  result_json = json.dumps(result)
185
- result_data = result_json.encode('utf-8')
186
-
187
- logger.debug(f"Function {handler_name} completed successfully")
188
- return result_data
229
+ return result_json.encode('utf-8')
230
+ except (TypeError, ValueError, UnicodeEncodeError) as e:
231
+ logger.error(f"Cannot serialize/encode result from {handler_name}: {type(result)} - {e}")
232
+ raise RuntimeError(f"Result serialization/encoding error: {e}")
233
+
234
+ except RuntimeError:
235
+ raise
189
236
 
190
- except json.JSONDecodeError as e:
191
- print(f"❌ JSON parsing failed: {e}")
192
- print(f"📋 Failed to parse: {repr(raw_data if 'raw_data' in locals() else 'No raw_data available')}")
193
- logger.error(f"JSON decode error for {handler_name}: {e}")
194
- raise RuntimeError(f"Invalid JSON input: {e}")
195
237
  except Exception as e:
196
- print(f" Function '{handler_name}' failed: {type(e).__name__}: {e}")
197
- logger.error(f"Function {handler_name} failed: {e}")
198
- raise RuntimeError(f"Function execution failed: {e}")
238
+ logger.error(f"Unexpected error in {handler_name}: {type(e).__name__}: {e}")
239
+ logger.debug(f"Stack trace: {traceback.format_exc()}")
240
+ raise RuntimeError(f"Unexpected error: {e}")
agnt5/logging.py ADDED
@@ -0,0 +1,140 @@
1
+ """
2
+ OpenTelemetry logging integration for AGNT5 Python SDK.
3
+
4
+ This module provides a logging handler that forwards Python logs to the Rust core
5
+ for integration with OpenTelemetry. All logs are automatically correlated with
6
+ traces and sent to the OTLP collector.
7
+ """
8
+
9
+ import logging
10
+ import os
11
+ from typing import Optional
12
+
13
+ from ._compat import _rust_available
14
+
15
+ if _rust_available:
16
+ from ._core import log_from_python
17
+
18
+
19
+ class OpenTelemetryHandler(logging.Handler):
20
+ """
21
+ A logging handler that forwards Python logs to Rust for OpenTelemetry integration.
22
+
23
+ This handler automatically captures all Python logs and forwards them to the
24
+ Rust core where they are integrated with OpenTelemetry tracing and sent to
25
+ the OTLP collector. Logs are automatically correlated with active traces.
26
+ """
27
+
28
+ def __init__(self, level: int = logging.NOTSET):
29
+ super().__init__(level)
30
+
31
+ if not _rust_available:
32
+ raise RuntimeError("OpenTelemetry logging handler requires Rust core")
33
+
34
+ def emit(self, record: logging.LogRecord) -> None:
35
+ """
36
+ Forward a log record to Rust for OpenTelemetry integration.
37
+
38
+ Args:
39
+ record: The Python log record to forward
40
+ """
41
+ try:
42
+ # Format the message
43
+ message = self.format(record)
44
+
45
+ # Extract metadata for Rust
46
+ level = record.levelname
47
+ target = record.name # Logger name (e.g., 'agnt5.worker')
48
+ module_path = getattr(record, 'module', record.name)
49
+ filename = getattr(record, 'pathname', None)
50
+ line = getattr(record, 'lineno', None)
51
+
52
+ # Make filename relative if it's absolute
53
+ if filename and os.path.isabs(filename):
54
+ try:
55
+ # Try to make it relative to current working directory
56
+ filename = os.path.relpath(filename)
57
+ except ValueError:
58
+ # If relpath fails (e.g., different drives on Windows), use basename
59
+ filename = os.path.basename(filename)
60
+
61
+ # Forward to Rust core - silently ignore if telemetry not ready yet
62
+ try:
63
+ log_from_python(
64
+ level=level,
65
+ message=message,
66
+ target=target,
67
+ module_path=module_path,
68
+ filename=filename,
69
+ line=line
70
+ )
71
+ except Exception:
72
+ # Silently ignore if Rust telemetry system not ready yet
73
+ # This handles the timing issue during startup
74
+ pass
75
+
76
+ except Exception as e:
77
+ # Don't let logging errors crash the application
78
+ # Use handleError to maintain Python logging standards
79
+ self.handleError(record)
80
+
81
+
82
+ def install_opentelemetry_logging(
83
+ logger: Optional[logging.Logger] = None,
84
+ level: int = logging.INFO,
85
+ format_string: Optional[str] = None
86
+ ) -> OpenTelemetryHandler:
87
+ """
88
+ Install OpenTelemetry logging handler on a logger.
89
+
90
+ Args:
91
+ logger: Logger to install handler on. If None, uses root logger.
92
+ level: Minimum log level to forward to OpenTelemetry
93
+ format_string: Optional format string for log messages
94
+
95
+ Returns:
96
+ The installed OpenTelemetryHandler instance
97
+
98
+ Example:
99
+ # Install on root logger (captures all logs)
100
+ install_opentelemetry_logging()
101
+
102
+ # Install on specific logger
103
+ logger = logging.getLogger('my_app')
104
+ install_opentelemetry_logging(logger, level=logging.DEBUG)
105
+ """
106
+ if logger is None:
107
+ logger = logging.getLogger()
108
+
109
+ # Create handler
110
+ handler = OpenTelemetryHandler(level=level)
111
+
112
+ # Set formatter if provided
113
+ if format_string:
114
+ formatter = logging.Formatter(format_string)
115
+ handler.setFormatter(formatter)
116
+
117
+ # Install handler
118
+ logger.addHandler(handler)
119
+
120
+ return handler
121
+
122
+
123
+ def remove_opentelemetry_logging(logger: Optional[logging.Logger] = None) -> None:
124
+ """
125
+ Remove OpenTelemetry logging handlers from a logger.
126
+
127
+ Args:
128
+ logger: Logger to remove handlers from. If None, uses root logger.
129
+ """
130
+ if logger is None:
131
+ logger = logging.getLogger()
132
+
133
+ # Remove all OpenTelemetryHandler instances
134
+ handlers_to_remove = [
135
+ handler for handler in logger.handlers
136
+ if isinstance(handler, OpenTelemetryHandler)
137
+ ]
138
+
139
+ for handler in handlers_to_remove:
140
+ logger.removeHandler(handler)
@@ -0,0 +1,13 @@
1
+ """
2
+ Runtime adapters for different execution environments.
3
+ """
4
+
5
+ from .base import RuntimeAdapter
6
+ from .worker import WorkerRuntime
7
+ from .asgi import ASGIRuntime
8
+
9
+ __all__ = [
10
+ 'RuntimeAdapter',
11
+ 'WorkerRuntime',
12
+ 'ASGIRuntime',
13
+ ]