agnt5 0.3.2a1__cp310-abi3-manylinux_2_34_aarch64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of agnt5 might be problematic. Click here for more details.
- agnt5/__init__.py +119 -0
- agnt5/_compat.py +16 -0
- agnt5/_core.abi3.so +0 -0
- agnt5/_retry_utils.py +196 -0
- agnt5/_schema_utils.py +312 -0
- agnt5/_sentry.py +515 -0
- agnt5/_telemetry.py +279 -0
- agnt5/agent/__init__.py +48 -0
- agnt5/agent/context.py +581 -0
- agnt5/agent/core.py +1782 -0
- agnt5/agent/decorator.py +112 -0
- agnt5/agent/handoff.py +105 -0
- agnt5/agent/registry.py +68 -0
- agnt5/agent/result.py +39 -0
- agnt5/checkpoint.py +246 -0
- agnt5/client.py +1556 -0
- agnt5/context.py +288 -0
- agnt5/emit.py +197 -0
- agnt5/entity.py +1230 -0
- agnt5/events.py +567 -0
- agnt5/exceptions.py +110 -0
- agnt5/function.py +330 -0
- agnt5/journal.py +212 -0
- agnt5/lm.py +1266 -0
- agnt5/memoization.py +379 -0
- agnt5/memory.py +521 -0
- agnt5/tool.py +721 -0
- agnt5/tracing.py +300 -0
- agnt5/types.py +111 -0
- agnt5/version.py +19 -0
- agnt5/worker.py +2094 -0
- agnt5/workflow.py +1632 -0
- agnt5-0.3.2a1.dist-info/METADATA +26 -0
- agnt5-0.3.2a1.dist-info/RECORD +35 -0
- agnt5-0.3.2a1.dist-info/WHEEL +4 -0
agnt5/function.py
ADDED
|
@@ -0,0 +1,330 @@
|
|
|
1
|
+
"""Function component implementation for AGNT5 SDK."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
import functools
|
|
7
|
+
import inspect
|
|
8
|
+
import uuid
|
|
9
|
+
from typing import Any, Awaitable, Callable, Dict, Optional, TypeVar, Union, cast
|
|
10
|
+
|
|
11
|
+
from ._retry_utils import execute_with_retry, parse_backoff_policy, parse_retry_policy
|
|
12
|
+
from ._schema_utils import extract_function_metadata, extract_function_schemas
|
|
13
|
+
from .context import Context, set_current_context
|
|
14
|
+
from .exceptions import RetryError
|
|
15
|
+
from .types import BackoffPolicy, BackoffType, FunctionConfig, HandlerFunc, RetryPolicy
|
|
16
|
+
|
|
17
|
+
T = TypeVar("T")
|
|
18
|
+
|
|
19
|
+
# Global function registry
|
|
20
|
+
_FUNCTION_REGISTRY: Dict[str, FunctionConfig] = {}
|
|
21
|
+
|
|
22
|
+
class FunctionContext(Context):
|
|
23
|
+
"""
|
|
24
|
+
Lightweight context for stateless functions.
|
|
25
|
+
|
|
26
|
+
AGNT5 Philosophy: Context is a convenience, not a requirement.
|
|
27
|
+
The best function is one that doesn't need context at all!
|
|
28
|
+
|
|
29
|
+
Provides only:
|
|
30
|
+
- Quick logging (ctx.log())
|
|
31
|
+
- Execution metadata (run_id, attempt)
|
|
32
|
+
- Smart retry helper (should_retry())
|
|
33
|
+
- Non-durable sleep
|
|
34
|
+
|
|
35
|
+
Does NOT provide:
|
|
36
|
+
- Orchestration (task, parallel, gather) - use workflows
|
|
37
|
+
- State management (get, set, delete) - functions are stateless
|
|
38
|
+
- Checkpointing (step) - functions are atomic
|
|
39
|
+
"""
|
|
40
|
+
|
|
41
|
+
def __init__(
|
|
42
|
+
self,
|
|
43
|
+
run_id: str,
|
|
44
|
+
attempt: int = 0,
|
|
45
|
+
runtime_context: Optional[Any] = None,
|
|
46
|
+
retry_policy: Optional[Any] = None,
|
|
47
|
+
is_streaming: bool = False,
|
|
48
|
+
tenant_id: Optional[str] = None,
|
|
49
|
+
) -> None:
|
|
50
|
+
"""
|
|
51
|
+
Initialize function context.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
run_id: Unique execution identifier
|
|
55
|
+
attempt: Retry attempt number (0-indexed)
|
|
56
|
+
runtime_context: RuntimeContext for trace correlation
|
|
57
|
+
retry_policy: RetryPolicy for should_retry() checks
|
|
58
|
+
is_streaming: Whether this is a streaming request (for real-time SSE log delivery)
|
|
59
|
+
tenant_id: Tenant ID for multi-tenant deployments
|
|
60
|
+
"""
|
|
61
|
+
super().__init__(run_id, attempt, runtime_context, is_streaming, tenant_id)
|
|
62
|
+
self._retry_policy = retry_policy
|
|
63
|
+
|
|
64
|
+
# === Quick Logging ===
|
|
65
|
+
|
|
66
|
+
def log(self, message: str, **extra) -> None:
|
|
67
|
+
"""
|
|
68
|
+
Quick logging shorthand with structured data.
|
|
69
|
+
|
|
70
|
+
Example:
|
|
71
|
+
ctx.log("Processing payment", amount=100.50, user_id="123")
|
|
72
|
+
"""
|
|
73
|
+
self._logger.info(message, extra=extra)
|
|
74
|
+
|
|
75
|
+
# === Smart Execution ===
|
|
76
|
+
|
|
77
|
+
def should_retry(self, error: Exception) -> bool:
|
|
78
|
+
"""
|
|
79
|
+
Check if error is retryable based on configured policy.
|
|
80
|
+
|
|
81
|
+
Example:
|
|
82
|
+
try:
|
|
83
|
+
result = await external_api()
|
|
84
|
+
except Exception as e:
|
|
85
|
+
if not ctx.should_retry(e):
|
|
86
|
+
raise # Fail fast for non-retryable errors
|
|
87
|
+
# Otherwise let retry policy handle it
|
|
88
|
+
raise
|
|
89
|
+
|
|
90
|
+
Returns:
|
|
91
|
+
True if error is retryable, False otherwise
|
|
92
|
+
"""
|
|
93
|
+
# TODO: Implement retry policy checks
|
|
94
|
+
# For now, all errors are retryable (let retry policy handle it)
|
|
95
|
+
return True
|
|
96
|
+
|
|
97
|
+
async def sleep(self, seconds: float) -> None:
|
|
98
|
+
"""
|
|
99
|
+
Non-durable async sleep.
|
|
100
|
+
|
|
101
|
+
For durable sleep across failures, use workflows.
|
|
102
|
+
|
|
103
|
+
Args:
|
|
104
|
+
seconds: Number of seconds to sleep
|
|
105
|
+
"""
|
|
106
|
+
import asyncio
|
|
107
|
+
await asyncio.sleep(seconds)
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
class FunctionRegistry:
|
|
112
|
+
"""Registry for function handlers."""
|
|
113
|
+
|
|
114
|
+
@staticmethod
|
|
115
|
+
def register(config: FunctionConfig) -> None:
|
|
116
|
+
"""Register a function handler.
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
config: Function configuration to register
|
|
120
|
+
|
|
121
|
+
Raises:
|
|
122
|
+
ValueError: If a function with the same name is already registered
|
|
123
|
+
"""
|
|
124
|
+
# Check for name collision
|
|
125
|
+
if config.name in _FUNCTION_REGISTRY:
|
|
126
|
+
existing_config = _FUNCTION_REGISTRY[config.name]
|
|
127
|
+
existing_module = existing_config.handler.__module__
|
|
128
|
+
new_module = config.handler.__module__
|
|
129
|
+
|
|
130
|
+
raise ValueError(
|
|
131
|
+
f"Function name collision: '{config.name}' is already registered.\n"
|
|
132
|
+
f" Existing: {existing_module}.{existing_config.handler.__name__}\n"
|
|
133
|
+
f" New: {new_module}.{config.handler.__name__}\n"
|
|
134
|
+
f"Please use a different function name or use name= parameter to specify a unique name."
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
_FUNCTION_REGISTRY[config.name] = config
|
|
138
|
+
|
|
139
|
+
@staticmethod
|
|
140
|
+
def get(name: str) -> Optional[FunctionConfig]:
|
|
141
|
+
"""Get function configuration by name."""
|
|
142
|
+
return _FUNCTION_REGISTRY.get(name)
|
|
143
|
+
|
|
144
|
+
@staticmethod
|
|
145
|
+
def all() -> Dict[str, FunctionConfig]:
|
|
146
|
+
"""Get all registered functions."""
|
|
147
|
+
return _FUNCTION_REGISTRY.copy()
|
|
148
|
+
|
|
149
|
+
@staticmethod
|
|
150
|
+
def clear() -> None:
|
|
151
|
+
"""Clear all registered functions."""
|
|
152
|
+
_FUNCTION_REGISTRY.clear()
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
def function(
|
|
156
|
+
_func: Optional[Callable[..., Any]] = None,
|
|
157
|
+
*,
|
|
158
|
+
name: Optional[str] = None,
|
|
159
|
+
retries: Optional[Union[int, Dict[str, Any], RetryPolicy]] = None,
|
|
160
|
+
backoff: Optional[Union[str, Dict[str, Any], BackoffPolicy]] = None,
|
|
161
|
+
timeout_ms: Optional[int] = None,
|
|
162
|
+
) -> Callable[..., Any]:
|
|
163
|
+
"""
|
|
164
|
+
Decorator to mark a function as an AGNT5 durable function.
|
|
165
|
+
|
|
166
|
+
Args:
|
|
167
|
+
name: Custom function name (default: function's __name__)
|
|
168
|
+
retries: Retry policy configuration. Can be:
|
|
169
|
+
- int: max attempts (e.g., 5)
|
|
170
|
+
- dict: RetryPolicy params (e.g., {"max_attempts": 5, "initial_interval_ms": 1000})
|
|
171
|
+
- RetryPolicy: full policy object
|
|
172
|
+
backoff: Backoff policy for retries. Can be:
|
|
173
|
+
- str: backoff type ("constant", "linear", "exponential")
|
|
174
|
+
- dict: BackoffPolicy params (e.g., {"type": "exponential", "multiplier": 2.0})
|
|
175
|
+
- BackoffPolicy: full policy object
|
|
176
|
+
timeout_ms: Maximum execution time in milliseconds. If the function
|
|
177
|
+
takes longer than this, it raises asyncio.TimeoutError.
|
|
178
|
+
|
|
179
|
+
Note:
|
|
180
|
+
Sync Functions: Synchronous functions are automatically executed in a thread pool
|
|
181
|
+
to prevent blocking the event loop. This is ideal for I/O-bound operations
|
|
182
|
+
(requests.get(), file I/O, etc.). For CPU-bound operations or when you need
|
|
183
|
+
explicit control over concurrency, use async functions instead.
|
|
184
|
+
|
|
185
|
+
Example:
|
|
186
|
+
# Basic function with context
|
|
187
|
+
@function
|
|
188
|
+
async def greet(ctx: FunctionContext, name: str) -> str:
|
|
189
|
+
ctx.log(f"Greeting {name}") # AGNT5 shorthand!
|
|
190
|
+
return f"Hello, {name}!"
|
|
191
|
+
|
|
192
|
+
# Simple function without context (optional)
|
|
193
|
+
@function
|
|
194
|
+
async def add(a: int, b: int) -> int:
|
|
195
|
+
return a + b
|
|
196
|
+
|
|
197
|
+
# With Pydantic models (automatic validation + rich schemas)
|
|
198
|
+
from pydantic import BaseModel
|
|
199
|
+
|
|
200
|
+
class UserInput(BaseModel):
|
|
201
|
+
name: str
|
|
202
|
+
age: int
|
|
203
|
+
|
|
204
|
+
class UserOutput(BaseModel):
|
|
205
|
+
greeting: str
|
|
206
|
+
is_adult: bool
|
|
207
|
+
|
|
208
|
+
@function
|
|
209
|
+
async def process_user(ctx: FunctionContext, user: UserInput) -> UserOutput:
|
|
210
|
+
ctx.log(f"Processing user {user.name}")
|
|
211
|
+
return UserOutput(
|
|
212
|
+
greeting=f"Hello, {user.name}!",
|
|
213
|
+
is_adult=user.age >= 18
|
|
214
|
+
)
|
|
215
|
+
|
|
216
|
+
# Simple retry count
|
|
217
|
+
@function(retries=5)
|
|
218
|
+
async def with_retries(data: str) -> str:
|
|
219
|
+
return data.upper()
|
|
220
|
+
|
|
221
|
+
# Dict configuration
|
|
222
|
+
@function(retries={"max_attempts": 5}, backoff="exponential")
|
|
223
|
+
async def advanced(a: int, b: int) -> int:
|
|
224
|
+
return a + b
|
|
225
|
+
"""
|
|
226
|
+
|
|
227
|
+
def decorator(func: Callable[..., Any]) -> Callable[..., Any]:
|
|
228
|
+
# Get function name
|
|
229
|
+
func_name = name or func.__name__
|
|
230
|
+
|
|
231
|
+
# Validate function signature and check if context is needed
|
|
232
|
+
sig = inspect.signature(func)
|
|
233
|
+
params = list(sig.parameters.values())
|
|
234
|
+
|
|
235
|
+
# Check if function declares 'ctx' parameter
|
|
236
|
+
needs_context = params and params[0].name == "ctx"
|
|
237
|
+
|
|
238
|
+
# Convert sync to async if needed
|
|
239
|
+
# Note: Async generators should NOT be wrapped - they need to be returned as-is
|
|
240
|
+
if inspect.iscoroutinefunction(func) or inspect.isasyncgenfunction(func):
|
|
241
|
+
handler_func = cast(HandlerFunc, func)
|
|
242
|
+
else:
|
|
243
|
+
# Wrap sync function to run in thread pool (prevents blocking event loop)
|
|
244
|
+
@functools.wraps(func)
|
|
245
|
+
async def async_wrapper(*args: Any, **kwargs: Any) -> Any:
|
|
246
|
+
loop = asyncio.get_running_loop()
|
|
247
|
+
# Run sync function in thread pool executor to prevent blocking
|
|
248
|
+
return await loop.run_in_executor(None, lambda: func(*args, **kwargs))
|
|
249
|
+
|
|
250
|
+
handler_func = cast(HandlerFunc, async_wrapper)
|
|
251
|
+
|
|
252
|
+
# Extract schemas from type hints
|
|
253
|
+
input_schema, output_schema = extract_function_schemas(func)
|
|
254
|
+
|
|
255
|
+
# Extract metadata (description, etc.)
|
|
256
|
+
metadata = extract_function_metadata(func)
|
|
257
|
+
|
|
258
|
+
# Parse retry and backoff policies from flexible formats
|
|
259
|
+
retry_policy = parse_retry_policy(retries)
|
|
260
|
+
backoff_policy = parse_backoff_policy(backoff)
|
|
261
|
+
|
|
262
|
+
# Register function
|
|
263
|
+
config = FunctionConfig(
|
|
264
|
+
name=func_name,
|
|
265
|
+
handler=handler_func,
|
|
266
|
+
retries=retry_policy,
|
|
267
|
+
backoff=backoff_policy,
|
|
268
|
+
timeout_ms=timeout_ms,
|
|
269
|
+
input_schema=input_schema,
|
|
270
|
+
output_schema=output_schema,
|
|
271
|
+
metadata=metadata,
|
|
272
|
+
)
|
|
273
|
+
FunctionRegistry.register(config)
|
|
274
|
+
|
|
275
|
+
# Create wrapper with retry logic
|
|
276
|
+
@functools.wraps(func)
|
|
277
|
+
async def wrapper(*args: Any, **kwargs: Any) -> Any:
|
|
278
|
+
# Extract or create context based on function signature
|
|
279
|
+
if needs_context:
|
|
280
|
+
# Function declares ctx parameter - first argument must be FunctionContext
|
|
281
|
+
if not args or not isinstance(args[0], FunctionContext):
|
|
282
|
+
raise TypeError(
|
|
283
|
+
f"Function '{func_name}' requires FunctionContext as first argument. "
|
|
284
|
+
f"Usage: await {func_name}(ctx, ...)"
|
|
285
|
+
)
|
|
286
|
+
ctx = args[0]
|
|
287
|
+
func_args = args[1:]
|
|
288
|
+
else:
|
|
289
|
+
# Function doesn't use context - create a minimal one for internal use
|
|
290
|
+
# But first check if a context was passed anyway (for Worker execution)
|
|
291
|
+
if args and isinstance(args[0], FunctionContext):
|
|
292
|
+
# Context was provided by Worker - use it but don't pass to function
|
|
293
|
+
ctx = args[0]
|
|
294
|
+
func_args = args[1:]
|
|
295
|
+
else:
|
|
296
|
+
# No context provided - create a default one
|
|
297
|
+
ctx = FunctionContext(
|
|
298
|
+
run_id=f"local-{uuid.uuid4().hex[:8]}",
|
|
299
|
+
retry_policy=retry_policy
|
|
300
|
+
)
|
|
301
|
+
func_args = args
|
|
302
|
+
|
|
303
|
+
# Set context in task-local storage for automatic propagation
|
|
304
|
+
token = set_current_context(ctx)
|
|
305
|
+
try:
|
|
306
|
+
# Execute with retry
|
|
307
|
+
return await execute_with_retry(
|
|
308
|
+
handler_func,
|
|
309
|
+
ctx,
|
|
310
|
+
config.retries or RetryPolicy(),
|
|
311
|
+
config.backoff or BackoffPolicy(),
|
|
312
|
+
needs_context,
|
|
313
|
+
config.timeout_ms,
|
|
314
|
+
*func_args,
|
|
315
|
+
**kwargs,
|
|
316
|
+
)
|
|
317
|
+
finally:
|
|
318
|
+
# Always reset context to prevent leakage
|
|
319
|
+
from .context import _current_context
|
|
320
|
+
_current_context.reset(token)
|
|
321
|
+
|
|
322
|
+
# Store config on wrapper for introspection
|
|
323
|
+
wrapper._agnt5_config = config # type: ignore
|
|
324
|
+
return wrapper
|
|
325
|
+
|
|
326
|
+
# Handle both @function and @function(...) syntax
|
|
327
|
+
if _func is None:
|
|
328
|
+
return decorator
|
|
329
|
+
else:
|
|
330
|
+
return decorator(_func)
|
agnt5/journal.py
ADDED
|
@@ -0,0 +1,212 @@
|
|
|
1
|
+
"""Journal client for LLM observability events.
|
|
2
|
+
|
|
3
|
+
This module provides functions to write observability events to the AGNT5 journal.
|
|
4
|
+
These events are used for:
|
|
5
|
+
- LLM call tracking (lm.call.started, lm.call.completed, lm.call.failed)
|
|
6
|
+
- Cost and usage metrics
|
|
7
|
+
- Debugging and tracing
|
|
8
|
+
- Real-time SSE streaming
|
|
9
|
+
|
|
10
|
+
Note: This is for OBSERVABILITY, not memoization. Use CheckpointClient for memoization.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from __future__ import annotations
|
|
14
|
+
|
|
15
|
+
import json
|
|
16
|
+
import logging
|
|
17
|
+
import time
|
|
18
|
+
from dataclasses import dataclass, field
|
|
19
|
+
from typing import Any, Optional
|
|
20
|
+
|
|
21
|
+
from ._telemetry import setup_module_logger
|
|
22
|
+
|
|
23
|
+
logger = setup_module_logger(__name__)
|
|
24
|
+
|
|
25
|
+
# Try to import the Rust core function
|
|
26
|
+
try:
|
|
27
|
+
from ._core import write_journal_event as _write_journal_event
|
|
28
|
+
_JOURNAL_AVAILABLE = True
|
|
29
|
+
except ImportError:
|
|
30
|
+
_JOURNAL_AVAILABLE = False
|
|
31
|
+
logger.debug("Journal client not available (Rust core not loaded)")
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
@dataclass
|
|
35
|
+
class LMCallStartedEvent:
|
|
36
|
+
"""Event data for lm.call.started."""
|
|
37
|
+
model: str
|
|
38
|
+
provider: str
|
|
39
|
+
prompt_hash: Optional[str] = None
|
|
40
|
+
temperature: Optional[float] = None
|
|
41
|
+
max_tokens: Optional[int] = None
|
|
42
|
+
tools_count: int = 0
|
|
43
|
+
timestamp_ns: int = field(default_factory=time.time_ns)
|
|
44
|
+
|
|
45
|
+
def to_dict(self) -> dict[str, Any]:
|
|
46
|
+
return {
|
|
47
|
+
"model": self.model,
|
|
48
|
+
"provider": self.provider,
|
|
49
|
+
"prompt_hash": self.prompt_hash,
|
|
50
|
+
"temperature": self.temperature,
|
|
51
|
+
"max_tokens": self.max_tokens,
|
|
52
|
+
"tools_count": self.tools_count,
|
|
53
|
+
"timestamp_ns": self.timestamp_ns,
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
@dataclass
|
|
58
|
+
class LMCallCompletedEvent:
|
|
59
|
+
"""Event data for lm.call.completed."""
|
|
60
|
+
model: str
|
|
61
|
+
provider: str
|
|
62
|
+
input_tokens: int
|
|
63
|
+
output_tokens: int
|
|
64
|
+
total_tokens: int
|
|
65
|
+
latency_ms: int
|
|
66
|
+
cost_estimate_usd: Optional[float] = None
|
|
67
|
+
finish_reason: Optional[str] = None
|
|
68
|
+
tool_calls_count: int = 0
|
|
69
|
+
timestamp_ns: int = field(default_factory=time.time_ns)
|
|
70
|
+
|
|
71
|
+
def to_dict(self) -> dict[str, Any]:
|
|
72
|
+
return {
|
|
73
|
+
"model": self.model,
|
|
74
|
+
"provider": self.provider,
|
|
75
|
+
"input_tokens": self.input_tokens,
|
|
76
|
+
"output_tokens": self.output_tokens,
|
|
77
|
+
"total_tokens": self.total_tokens,
|
|
78
|
+
"latency_ms": self.latency_ms,
|
|
79
|
+
"cost_estimate_usd": self.cost_estimate_usd,
|
|
80
|
+
"finish_reason": self.finish_reason,
|
|
81
|
+
"tool_calls_count": self.tool_calls_count,
|
|
82
|
+
"timestamp_ns": self.timestamp_ns,
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
@dataclass
|
|
87
|
+
class LMCallFailedEvent:
|
|
88
|
+
"""Event data for lm.call.failed."""
|
|
89
|
+
model: str
|
|
90
|
+
provider: str
|
|
91
|
+
error_code: str
|
|
92
|
+
error_message: str
|
|
93
|
+
latency_ms: int
|
|
94
|
+
timestamp_ns: int = field(default_factory=time.time_ns)
|
|
95
|
+
|
|
96
|
+
def to_dict(self) -> dict[str, Any]:
|
|
97
|
+
return {
|
|
98
|
+
"model": self.model,
|
|
99
|
+
"provider": self.provider,
|
|
100
|
+
"error_code": self.error_code,
|
|
101
|
+
"error_message": self.error_message,
|
|
102
|
+
"latency_ms": self.latency_ms,
|
|
103
|
+
"timestamp_ns": self.timestamp_ns,
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
async def write_lm_call_started(
|
|
108
|
+
run_id: str,
|
|
109
|
+
trace_id: str,
|
|
110
|
+
span_id: str,
|
|
111
|
+
event: LMCallStartedEvent,
|
|
112
|
+
tenant_id: Optional[str] = None,
|
|
113
|
+
) -> None:
|
|
114
|
+
"""Write an lm.call.started event to the journal.
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
run_id: The run ID to associate the event with
|
|
118
|
+
trace_id: Trace ID for correlation
|
|
119
|
+
span_id: Span ID for correlation
|
|
120
|
+
event: The event data
|
|
121
|
+
tenant_id: Optional tenant ID
|
|
122
|
+
"""
|
|
123
|
+
if not _JOURNAL_AVAILABLE:
|
|
124
|
+
logger.debug("Journal not available, skipping lm.call.started event")
|
|
125
|
+
return
|
|
126
|
+
|
|
127
|
+
try:
|
|
128
|
+
data = json.dumps(event.to_dict()).encode("utf-8")
|
|
129
|
+
await _write_journal_event(
|
|
130
|
+
run_id=run_id,
|
|
131
|
+
event_type="lm.call.started",
|
|
132
|
+
data=data,
|
|
133
|
+
trace_id=trace_id,
|
|
134
|
+
span_id=span_id,
|
|
135
|
+
tenant_id=tenant_id,
|
|
136
|
+
source_timestamp_ns=event.timestamp_ns,
|
|
137
|
+
)
|
|
138
|
+
logger.debug(f"Wrote lm.call.started event for run_id={run_id}")
|
|
139
|
+
except Exception as e:
|
|
140
|
+
logger.warning(f"Failed to write lm.call.started event: {e}")
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
async def write_lm_call_completed(
|
|
144
|
+
run_id: str,
|
|
145
|
+
trace_id: str,
|
|
146
|
+
span_id: str,
|
|
147
|
+
event: LMCallCompletedEvent,
|
|
148
|
+
tenant_id: Optional[str] = None,
|
|
149
|
+
) -> None:
|
|
150
|
+
"""Write an lm.call.completed event to the journal.
|
|
151
|
+
|
|
152
|
+
Args:
|
|
153
|
+
run_id: The run ID to associate the event with
|
|
154
|
+
trace_id: Trace ID for correlation
|
|
155
|
+
span_id: Span ID for correlation
|
|
156
|
+
event: The event data
|
|
157
|
+
tenant_id: Optional tenant ID
|
|
158
|
+
"""
|
|
159
|
+
if not _JOURNAL_AVAILABLE:
|
|
160
|
+
logger.debug("Journal not available, skipping lm.call.completed event")
|
|
161
|
+
return
|
|
162
|
+
|
|
163
|
+
try:
|
|
164
|
+
data = json.dumps(event.to_dict()).encode("utf-8")
|
|
165
|
+
await _write_journal_event(
|
|
166
|
+
run_id=run_id,
|
|
167
|
+
event_type="lm.call.completed",
|
|
168
|
+
data=data,
|
|
169
|
+
trace_id=trace_id,
|
|
170
|
+
span_id=span_id,
|
|
171
|
+
tenant_id=tenant_id,
|
|
172
|
+
source_timestamp_ns=event.timestamp_ns,
|
|
173
|
+
)
|
|
174
|
+
logger.debug(f"Wrote lm.call.completed event for run_id={run_id}, tokens={event.total_tokens}")
|
|
175
|
+
except Exception as e:
|
|
176
|
+
logger.warning(f"Failed to write lm.call.completed event: {e}")
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
async def write_lm_call_failed(
|
|
180
|
+
run_id: str,
|
|
181
|
+
trace_id: str,
|
|
182
|
+
span_id: str,
|
|
183
|
+
event: LMCallFailedEvent,
|
|
184
|
+
tenant_id: Optional[str] = None,
|
|
185
|
+
) -> None:
|
|
186
|
+
"""Write an lm.call.failed event to the journal.
|
|
187
|
+
|
|
188
|
+
Args:
|
|
189
|
+
run_id: The run ID to associate the event with
|
|
190
|
+
trace_id: Trace ID for correlation
|
|
191
|
+
span_id: Span ID for correlation
|
|
192
|
+
event: The event data
|
|
193
|
+
tenant_id: Optional tenant ID
|
|
194
|
+
"""
|
|
195
|
+
if not _JOURNAL_AVAILABLE:
|
|
196
|
+
logger.debug("Journal not available, skipping lm.call.failed event")
|
|
197
|
+
return
|
|
198
|
+
|
|
199
|
+
try:
|
|
200
|
+
data = json.dumps(event.to_dict()).encode("utf-8")
|
|
201
|
+
await _write_journal_event(
|
|
202
|
+
run_id=run_id,
|
|
203
|
+
event_type="lm.call.failed",
|
|
204
|
+
data=data,
|
|
205
|
+
trace_id=trace_id,
|
|
206
|
+
span_id=span_id,
|
|
207
|
+
tenant_id=tenant_id,
|
|
208
|
+
source_timestamp_ns=event.timestamp_ns,
|
|
209
|
+
)
|
|
210
|
+
logger.debug(f"Wrote lm.call.failed event for run_id={run_id}, error={event.error_code}")
|
|
211
|
+
except Exception as e:
|
|
212
|
+
logger.warning(f"Failed to write lm.call.failed event: {e}")
|