lucidicai 1.2.16__py3-none-any.whl → 1.2.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lucidicai/__init__.py +93 -19
- lucidicai/client.py +3 -2
- lucidicai/decorators.py +357 -0
- lucidicai/image_upload.py +24 -1
- lucidicai/providers/image_storage.py +45 -0
- lucidicai/providers/lucidic_exporter.py +259 -0
- lucidicai/providers/lucidic_span_processor.py +648 -0
- lucidicai/providers/openai_agents_instrumentor.py +307 -0
- lucidicai/providers/otel_handlers.py +266 -0
- lucidicai/providers/otel_init.py +197 -0
- lucidicai/providers/otel_provider.py +168 -0
- lucidicai/providers/pydantic_ai_handler.py +1 -1
- lucidicai/providers/text_storage.py +53 -0
- lucidicai/providers/universal_image_interceptor.py +276 -0
- lucidicai/session.py +7 -0
- lucidicai/telemetry/__init__.py +0 -0
- lucidicai/telemetry/base_provider.py +21 -0
- lucidicai/telemetry/lucidic_exporter.py +259 -0
- lucidicai/telemetry/lucidic_span_processor.py +665 -0
- lucidicai/telemetry/openai_agents_instrumentor.py +306 -0
- lucidicai/telemetry/opentelemetry_converter.py +436 -0
- lucidicai/telemetry/otel_handlers.py +266 -0
- lucidicai/telemetry/otel_init.py +197 -0
- lucidicai/telemetry/otel_provider.py +168 -0
- lucidicai/telemetry/pydantic_ai_handler.py +600 -0
- lucidicai/telemetry/utils/__init__.py +0 -0
- lucidicai/telemetry/utils/image_storage.py +45 -0
- lucidicai/telemetry/utils/text_storage.py +53 -0
- lucidicai/telemetry/utils/universal_image_interceptor.py +276 -0
- {lucidicai-1.2.16.dist-info → lucidicai-1.2.17.dist-info}/METADATA +1 -1
- lucidicai-1.2.17.dist-info/RECORD +49 -0
- lucidicai-1.2.16.dist-info/RECORD +0 -25
- {lucidicai-1.2.16.dist-info → lucidicai-1.2.17.dist-info}/WHEEL +0 -0
- {lucidicai-1.2.16.dist-info → lucidicai-1.2.17.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,436 @@
|
|
|
1
|
+
"""OpenTelemetry to Lucidic AI concept converter for OpenAI Agents SDK"""
|
|
2
|
+
from typing import Dict, Optional, List, Any, Union
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
import logging
|
|
5
|
+
import json
|
|
6
|
+
|
|
7
|
+
logger = logging.getLogger("Lucidic")
|
|
8
|
+
|
|
9
|
+
# OpenAI Agents SDK span types (from AgentOps implementation)
|
|
10
|
+
AGENT_SPAN = "agent"
|
|
11
|
+
FUNCTION_SPAN = "function"
|
|
12
|
+
GENERATION_SPAN = "generation"
|
|
13
|
+
HANDOFF_SPAN = "handoff"
|
|
14
|
+
RESPONSE_SPAN = "response"
|
|
15
|
+
|
|
16
|
+
@dataclass
|
|
17
|
+
class SpanInfo:
|
|
18
|
+
"""Stores information about a span for conversion"""
|
|
19
|
+
span_id: str
|
|
20
|
+
parent_id: Optional[str]
|
|
21
|
+
name: str
|
|
22
|
+
span_type: str
|
|
23
|
+
span_data: Any # The actual span data object (AgentSpanData, FunctionSpanData, etc.)
|
|
24
|
+
start_time: float
|
|
25
|
+
end_time: Optional[float] = None
|
|
26
|
+
status: Optional[str] = None
|
|
27
|
+
error: Optional[str] = None
|
|
28
|
+
step_id: Optional[str] = None # Associated Lucidic step ID
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class OpenTelemetryConverter:
|
|
32
|
+
"""Converts OpenTelemetry concepts to Lucidic AI concepts
|
|
33
|
+
|
|
34
|
+
Mapping:
|
|
35
|
+
- Traces → Sessions
|
|
36
|
+
- Spans → Steps (including nested spans)
|
|
37
|
+
- GenerationSpanData → Creates both Step AND Event (to capture raw LLM I/O)
|
|
38
|
+
- FunctionSpanData → Creates Step with Event for function call details
|
|
39
|
+
"""
|
|
40
|
+
|
|
41
|
+
def __init__(self):
|
|
42
|
+
self.active_sessions: Dict[str, str] = {} # trace_id -> session_id
|
|
43
|
+
self.active_steps: Dict[str, str] = {} # span_id -> step_id
|
|
44
|
+
self.span_hierarchy: Dict[str, List[str]] = {} # parent_id -> [child_ids]
|
|
45
|
+
self.span_info: Dict[str, SpanInfo] = {} # span_id -> SpanInfo
|
|
46
|
+
self.current_agent_context: Optional[Dict[str, Any]] = None # Track current agent for context
|
|
47
|
+
|
|
48
|
+
def on_trace_start(self, trace_data: Dict[str, Any], session) -> str:
|
|
49
|
+
"""Convert a new trace to a Lucidic session"""
|
|
50
|
+
trace_id = trace_data.get("trace_id")
|
|
51
|
+
|
|
52
|
+
# Trace becomes a Session
|
|
53
|
+
logger.info(f"Creating session from trace {trace_id}")
|
|
54
|
+
|
|
55
|
+
# Extract metadata for session
|
|
56
|
+
session_name = trace_data.get("name", "OpenAI Agents Session")
|
|
57
|
+
task = self._extract_task_from_trace(trace_data)
|
|
58
|
+
|
|
59
|
+
# Session is already initialized by the provider
|
|
60
|
+
self.active_sessions[trace_id] = session.session_id
|
|
61
|
+
|
|
62
|
+
# Update session with trace metadata
|
|
63
|
+
session.update_session(
|
|
64
|
+
task=task,
|
|
65
|
+
tags={"source": "openai_agents_sdk", "trace_id": trace_id}
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
return session.session_id
|
|
69
|
+
|
|
70
|
+
def on_span_start(self, span_type: str, span_data: Any, parent_context: Any, session) -> str:
|
|
71
|
+
"""Convert a span to a Lucidic step
|
|
72
|
+
|
|
73
|
+
Args:
|
|
74
|
+
span_type: Type of span (agent, function, generation, etc.)
|
|
75
|
+
span_data: The actual span data object (AgentSpanData, FunctionSpanData, etc.)
|
|
76
|
+
parent_context: Parent span context
|
|
77
|
+
session: Active Lucidic session
|
|
78
|
+
"""
|
|
79
|
+
# Generate span ID from data
|
|
80
|
+
span_id = self._generate_span_id(span_data)
|
|
81
|
+
parent_id = self._get_parent_id(parent_context)
|
|
82
|
+
|
|
83
|
+
# Store span info for later use
|
|
84
|
+
span_info = SpanInfo(
|
|
85
|
+
span_id=span_id,
|
|
86
|
+
parent_id=parent_id,
|
|
87
|
+
name=getattr(span_data, 'name', str(span_data)),
|
|
88
|
+
span_type=span_type,
|
|
89
|
+
span_data=span_data,
|
|
90
|
+
start_time=getattr(span_data, 'start_time', 0)
|
|
91
|
+
)
|
|
92
|
+
self.span_info[span_id] = span_info
|
|
93
|
+
|
|
94
|
+
# Track hierarchy
|
|
95
|
+
if parent_id:
|
|
96
|
+
if parent_id not in self.span_hierarchy:
|
|
97
|
+
self.span_hierarchy[parent_id] = []
|
|
98
|
+
self.span_hierarchy[parent_id].append(span_id)
|
|
99
|
+
|
|
100
|
+
# Create a Step for this Span
|
|
101
|
+
logger.info(f"Creating step from {span_type} span: {span_id}")
|
|
102
|
+
|
|
103
|
+
# Extract step details based on span type
|
|
104
|
+
goal, action, state = self._extract_step_details_from_span(span_type, span_data)
|
|
105
|
+
|
|
106
|
+
step_id = session.create_step(
|
|
107
|
+
goal=goal,
|
|
108
|
+
action=action,
|
|
109
|
+
state=state
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
self.active_steps[span_id] = step_id
|
|
113
|
+
span_info.step_id = step_id
|
|
114
|
+
|
|
115
|
+
# For GenerationSpanData, create an event to capture raw LLM I/O
|
|
116
|
+
if span_type == GENERATION_SPAN:
|
|
117
|
+
self._create_llm_event(span_data, session, step_id)
|
|
118
|
+
|
|
119
|
+
# For FunctionSpanData, create an event for function details
|
|
120
|
+
elif span_type == FUNCTION_SPAN:
|
|
121
|
+
self._create_function_event(span_data, session, step_id)
|
|
122
|
+
|
|
123
|
+
# Update agent context
|
|
124
|
+
if span_type == AGENT_SPAN:
|
|
125
|
+
self.current_agent_context = {
|
|
126
|
+
"name": getattr(span_data, 'name', 'unknown'),
|
|
127
|
+
"instructions": getattr(span_data, 'instructions', ''),
|
|
128
|
+
"tools": getattr(span_data, 'tools', []),
|
|
129
|
+
"handoffs": getattr(span_data, 'handoffs', [])
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
return step_id
|
|
133
|
+
|
|
134
|
+
def on_span_end(self, span_data: Dict[str, Any], session) -> None:
|
|
135
|
+
"""Handle span completion"""
|
|
136
|
+
span_id = span_data.get("span_id")
|
|
137
|
+
step_id = self.active_steps.get(span_id)
|
|
138
|
+
|
|
139
|
+
if not step_id:
|
|
140
|
+
logger.warning(f"No step found for span {span_id}")
|
|
141
|
+
return
|
|
142
|
+
|
|
143
|
+
# Update span info
|
|
144
|
+
if span_id in self.span_info:
|
|
145
|
+
self.span_info[span_id].end_time = span_data.get("end_time")
|
|
146
|
+
self.span_info[span_id].status = span_data.get("status")
|
|
147
|
+
self.span_info[span_id].error = span_data.get("error")
|
|
148
|
+
|
|
149
|
+
# Update step with completion info
|
|
150
|
+
is_successful = span_data.get("status") != "error"
|
|
151
|
+
error_info = span_data.get("error", {})
|
|
152
|
+
|
|
153
|
+
session.update_step(
|
|
154
|
+
step_id=step_id,
|
|
155
|
+
is_finished=True,
|
|
156
|
+
eval_score=100 if is_successful else 0,
|
|
157
|
+
eval_description=error_info.get("message") if error_info else "Step completed successfully"
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
# Process any remaining span events
|
|
161
|
+
for event in span_data.get("events", []):
|
|
162
|
+
self._create_event_from_span_event(event, session, step_id=step_id)
|
|
163
|
+
|
|
164
|
+
def on_trace_end(self, trace_data: Dict[str, Any], session) -> None:
|
|
165
|
+
"""Handle trace completion"""
|
|
166
|
+
trace_id = trace_data.get("trace_id")
|
|
167
|
+
|
|
168
|
+
# Update session completion
|
|
169
|
+
is_successful = trace_data.get("status") != "error"
|
|
170
|
+
|
|
171
|
+
session.update_session(
|
|
172
|
+
is_finished=True,
|
|
173
|
+
is_successful=is_successful,
|
|
174
|
+
is_successful_reason=trace_data.get("error", {}).get("message") if not is_successful else "Session completed successfully"
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
# Cleanup
|
|
178
|
+
if trace_id in self.active_sessions:
|
|
179
|
+
del self.active_sessions[trace_id]
|
|
180
|
+
|
|
181
|
+
def _extract_task_from_trace(self, trace_data: Dict[str, Any]) -> str:
|
|
182
|
+
"""Extract task description from trace data"""
|
|
183
|
+
# Look for task in attributes or name
|
|
184
|
+
attributes = trace_data.get("attributes", {})
|
|
185
|
+
if "task" in attributes:
|
|
186
|
+
return attributes["task"]
|
|
187
|
+
if "prompt" in attributes:
|
|
188
|
+
return attributes["prompt"]
|
|
189
|
+
return trace_data.get("name", "OpenAI Agents Task")
|
|
190
|
+
|
|
191
|
+
def _extract_step_details_from_span(self, span_type: str, span_data: Any) -> tuple[str, str, str]:
|
|
192
|
+
"""Extract goal, action, and state from span data based on type"""
|
|
193
|
+
|
|
194
|
+
if span_type == AGENT_SPAN:
|
|
195
|
+
name = getattr(span_data, 'name', 'Unknown Agent')
|
|
196
|
+
instructions = getattr(span_data, 'instructions', 'No instructions')
|
|
197
|
+
goal = f"Execute agent: {name}"
|
|
198
|
+
action = f"Running agent with instructions: {instructions[:100]}..."
|
|
199
|
+
state = f"Agent: {name}"
|
|
200
|
+
|
|
201
|
+
elif span_type == FUNCTION_SPAN:
|
|
202
|
+
name = getattr(span_data, 'name', 'Unknown Function')
|
|
203
|
+
from_agent = getattr(span_data, 'from_agent', 'Unknown')
|
|
204
|
+
goal = f"Execute function: {name}"
|
|
205
|
+
action = f"Agent '{from_agent}' calling function '{name}'"
|
|
206
|
+
state = f"Function execution: {name}"
|
|
207
|
+
|
|
208
|
+
elif span_type == GENERATION_SPAN:
|
|
209
|
+
model = getattr(span_data, 'model', 'unknown')
|
|
210
|
+
input_data = getattr(span_data, 'input', {})
|
|
211
|
+
# Extract first message for context
|
|
212
|
+
first_msg = ""
|
|
213
|
+
if isinstance(input_data, dict) and 'messages' in input_data:
|
|
214
|
+
messages = input_data['messages']
|
|
215
|
+
if messages and len(messages) > 0:
|
|
216
|
+
first_msg = messages[0].get('content', '')[:50] + "..."
|
|
217
|
+
|
|
218
|
+
goal = f"Generate response using {model}"
|
|
219
|
+
action = f"Making LLM call with prompt: {first_msg}"
|
|
220
|
+
state = f"LLM Generation ({model})"
|
|
221
|
+
|
|
222
|
+
elif span_type == HANDOFF_SPAN:
|
|
223
|
+
from_agent = getattr(span_data, 'from_agent', 'unknown')
|
|
224
|
+
to_agent = getattr(span_data, 'to_agent', 'unknown')
|
|
225
|
+
goal = f"Hand off to agent: {to_agent}"
|
|
226
|
+
action = f"Transferring control from '{from_agent}' to '{to_agent}'"
|
|
227
|
+
state = f"Handoff: {from_agent} → {to_agent}"
|
|
228
|
+
|
|
229
|
+
elif span_type == RESPONSE_SPAN:
|
|
230
|
+
goal = "Process model response"
|
|
231
|
+
action = "Handling and formatting LLM response"
|
|
232
|
+
state = "Response processing"
|
|
233
|
+
|
|
234
|
+
else:
|
|
235
|
+
# Generic handling
|
|
236
|
+
name = getattr(span_data, 'name', span_type)
|
|
237
|
+
goal = f"Execute: {name}"
|
|
238
|
+
action = f"Processing {span_type} span"
|
|
239
|
+
state = f"Span: {span_type}"
|
|
240
|
+
|
|
241
|
+
return goal, action, state
|
|
242
|
+
|
|
243
|
+
def _create_llm_event(self, span_data: Any, session, step_id: str) -> None:
|
|
244
|
+
"""Create an event to capture raw LLM input/output from GenerationSpanData"""
|
|
245
|
+
model = getattr(span_data, 'model', 'unknown')
|
|
246
|
+
input_data = getattr(span_data, 'input', {})
|
|
247
|
+
output_data = getattr(span_data, 'output', {})
|
|
248
|
+
|
|
249
|
+
# Extract full message history
|
|
250
|
+
messages = []
|
|
251
|
+
if isinstance(input_data, dict) and 'messages' in input_data:
|
|
252
|
+
messages = input_data['messages']
|
|
253
|
+
|
|
254
|
+
# Format the messages for description
|
|
255
|
+
description = f"LLM Call to {model}\n\n"
|
|
256
|
+
description += "=== INPUT MESSAGES ===\n"
|
|
257
|
+
for msg in messages:
|
|
258
|
+
role = msg.get('role', 'unknown')
|
|
259
|
+
content = msg.get('content', '')
|
|
260
|
+
description += f"\n[{role.upper()}]:\n{content}\n"
|
|
261
|
+
|
|
262
|
+
# Extract the response
|
|
263
|
+
result = "=== MODEL RESPONSE ===\n"
|
|
264
|
+
if isinstance(output_data, dict):
|
|
265
|
+
if 'choices' in output_data and output_data['choices']:
|
|
266
|
+
# Standard OpenAI response format
|
|
267
|
+
choice = output_data['choices'][0]
|
|
268
|
+
if 'message' in choice:
|
|
269
|
+
result += choice['message'].get('content', str(choice['message']))
|
|
270
|
+
elif 'text' in choice:
|
|
271
|
+
result += choice['text']
|
|
272
|
+
else:
|
|
273
|
+
# Raw output
|
|
274
|
+
result += json.dumps(output_data, indent=2)
|
|
275
|
+
else:
|
|
276
|
+
result += str(output_data)
|
|
277
|
+
|
|
278
|
+
# Extract cost/usage information
|
|
279
|
+
cost = None
|
|
280
|
+
if isinstance(output_data, dict) and 'usage' in output_data:
|
|
281
|
+
usage = output_data['usage']
|
|
282
|
+
# You might want to calculate cost here based on model and usage
|
|
283
|
+
|
|
284
|
+
# Extract any tool calls
|
|
285
|
+
tool_calls = []
|
|
286
|
+
if isinstance(output_data, dict) and 'choices' in output_data:
|
|
287
|
+
for choice in output_data['choices']:
|
|
288
|
+
if 'message' in choice and 'tool_calls' in choice['message']:
|
|
289
|
+
tool_calls.extend(choice['message']['tool_calls'])
|
|
290
|
+
|
|
291
|
+
if tool_calls:
|
|
292
|
+
result += "\n\n=== TOOL CALLS ===\n"
|
|
293
|
+
result += json.dumps(tool_calls, indent=2)
|
|
294
|
+
|
|
295
|
+
session.create_event(
|
|
296
|
+
step_id=step_id,
|
|
297
|
+
description=description,
|
|
298
|
+
result=result,
|
|
299
|
+
model=model,
|
|
300
|
+
cost_added=cost,
|
|
301
|
+
is_finished=True,
|
|
302
|
+
is_successful=True
|
|
303
|
+
)
|
|
304
|
+
|
|
305
|
+
def _create_function_event(self, span_data: Any, session, step_id: str) -> None:
|
|
306
|
+
"""Create an event for function call details"""
|
|
307
|
+
name = getattr(span_data, 'name', 'Unknown Function')
|
|
308
|
+
input_data = getattr(span_data, 'input', {})
|
|
309
|
+
output_data = getattr(span_data, 'output', None)
|
|
310
|
+
from_agent = getattr(span_data, 'from_agent', 'Unknown')
|
|
311
|
+
|
|
312
|
+
description = f"Function Call: {name}\n"
|
|
313
|
+
description += f"Called by: {from_agent}\n\n"
|
|
314
|
+
description += "=== FUNCTION INPUT ===\n"
|
|
315
|
+
description += json.dumps(input_data, indent=2) if input_data else "No input"
|
|
316
|
+
|
|
317
|
+
result = "=== FUNCTION OUTPUT ===\n"
|
|
318
|
+
if output_data is not None:
|
|
319
|
+
result += json.dumps(output_data, indent=2) if isinstance(output_data, (dict, list)) else str(output_data)
|
|
320
|
+
else:
|
|
321
|
+
result += "No output captured"
|
|
322
|
+
|
|
323
|
+
session.create_event(
|
|
324
|
+
step_id=step_id,
|
|
325
|
+
description=description,
|
|
326
|
+
result=result,
|
|
327
|
+
is_finished=True,
|
|
328
|
+
is_successful=True
|
|
329
|
+
)
|
|
330
|
+
|
|
331
|
+
def _generate_span_id(self, span_data: Any) -> str:
|
|
332
|
+
"""Generate a unique span ID from span data"""
|
|
333
|
+
# Use the span's attributes or generate from content
|
|
334
|
+
if hasattr(span_data, 'id'):
|
|
335
|
+
return str(span_data.id)
|
|
336
|
+
elif hasattr(span_data, 'name'):
|
|
337
|
+
import hashlib
|
|
338
|
+
return hashlib.md5(f"{span_data.name}_{id(span_data)}".encode()).hexdigest()[:16]
|
|
339
|
+
else:
|
|
340
|
+
return str(id(span_data))
|
|
341
|
+
|
|
342
|
+
def _get_parent_id(self, parent_context: Any) -> Optional[str]:
|
|
343
|
+
"""Extract parent span ID from context"""
|
|
344
|
+
if parent_context is None:
|
|
345
|
+
return None
|
|
346
|
+
if hasattr(parent_context, 'span_id'):
|
|
347
|
+
return parent_context.span_id
|
|
348
|
+
elif isinstance(parent_context, dict) and 'span_id' in parent_context:
|
|
349
|
+
return parent_context['span_id']
|
|
350
|
+
return None
|
|
351
|
+
|
|
352
|
+
def _create_event_from_span_event(self, event_data: Dict[str, Any], session, step_id: Optional[str] = None) -> None:
|
|
353
|
+
"""Create a Lucidic Event from a span event (typically an API call)"""
|
|
354
|
+
event_type = event_data.get("type", "")
|
|
355
|
+
|
|
356
|
+
# These are the actual API calls
|
|
357
|
+
if event_type in ["openai_api_call", "api_call", "llm_call"]:
|
|
358
|
+
description = self._format_event_description(event_data)
|
|
359
|
+
result = event_data.get("result", "")
|
|
360
|
+
model = event_data.get("model")
|
|
361
|
+
cost = event_data.get("cost")
|
|
362
|
+
|
|
363
|
+
event_kwargs = {
|
|
364
|
+
"description": description,
|
|
365
|
+
"result": result,
|
|
366
|
+
"is_finished": True,
|
|
367
|
+
"is_successful": event_data.get("status") != "error"
|
|
368
|
+
}
|
|
369
|
+
|
|
370
|
+
if model:
|
|
371
|
+
event_kwargs["model"] = model
|
|
372
|
+
if cost:
|
|
373
|
+
event_kwargs["cost_added"] = cost
|
|
374
|
+
if step_id:
|
|
375
|
+
event_kwargs["step_id"] = step_id
|
|
376
|
+
|
|
377
|
+
session.create_event(**event_kwargs)
|
|
378
|
+
|
|
379
|
+
def _format_event_description(self, event_data: Dict[str, Any]) -> str:
|
|
380
|
+
"""Format event description based on event type and data"""
|
|
381
|
+
event_type = event_data.get("type", "")
|
|
382
|
+
|
|
383
|
+
if event_type == "openai_api_call":
|
|
384
|
+
messages = event_data.get("messages", [])
|
|
385
|
+
if messages:
|
|
386
|
+
return f"OpenAI API Call: {messages[-1].get('content', '')[:100]}..."
|
|
387
|
+
return "OpenAI API Call"
|
|
388
|
+
|
|
389
|
+
elif event_type == "api_call":
|
|
390
|
+
return f"API Call: {event_data.get('endpoint', 'unknown')}"
|
|
391
|
+
|
|
392
|
+
elif event_type == "llm_call":
|
|
393
|
+
return f"LLM Call: {event_data.get('prompt', '')[:100]}..."
|
|
394
|
+
|
|
395
|
+
return f"Event: {event_data.get('name', event_type)}"
|
|
396
|
+
|
|
397
|
+
def cleanup(self):
|
|
398
|
+
"""Clean up converter state"""
|
|
399
|
+
self.active_sessions.clear()
|
|
400
|
+
self.active_steps.clear()
|
|
401
|
+
self.span_hierarchy.clear()
|
|
402
|
+
self.span_info.clear()
|
|
403
|
+
|
|
404
|
+
# Public methods for external testing
|
|
405
|
+
def _convert_trace_to_session(self, trace_data: Any) -> Dict[str, Any]:
|
|
406
|
+
"""Convert trace data to session data (for testing)"""
|
|
407
|
+
session_data = {
|
|
408
|
+
'session_name': f"Agent Workflow: {getattr(trace_data, 'name', 'unknown')}",
|
|
409
|
+
'task': self._extract_task_from_trace({'attributes': getattr(trace_data, 'attributes', {})})
|
|
410
|
+
}
|
|
411
|
+
return session_data
|
|
412
|
+
|
|
413
|
+
def _convert_span_to_step(self, span_data: Any) -> Dict[str, Any]:
|
|
414
|
+
"""Convert span data to step data (for testing)"""
|
|
415
|
+
span_type = getattr(span_data, 'span_type', 'unknown')
|
|
416
|
+
name = getattr(span_data, 'name', 'unknown')
|
|
417
|
+
|
|
418
|
+
# Use the internal method to get details
|
|
419
|
+
goal, action, state = self._extract_step_details_from_span(
|
|
420
|
+
span_type if span_type != 'unknown' else AGENT_SPAN,
|
|
421
|
+
span_data
|
|
422
|
+
)
|
|
423
|
+
|
|
424
|
+
return {
|
|
425
|
+
'state': state,
|
|
426
|
+
'action': action,
|
|
427
|
+
'goal': goal
|
|
428
|
+
}
|
|
429
|
+
|
|
430
|
+
def _convert_span_event_to_event(self, event_data: Any) -> Dict[str, Any]:
|
|
431
|
+
"""Convert span event to event data (for testing)"""
|
|
432
|
+
return {
|
|
433
|
+
'description': getattr(event_data, 'name', 'unknown event'),
|
|
434
|
+
'model': getattr(event_data, 'attributes', {}).get('model', 'unknown'),
|
|
435
|
+
'cost_added': getattr(event_data, 'attributes', {}).get('cost', None)
|
|
436
|
+
}
|