openlit 1.34.28__py3-none-any.whl → 1.34.30__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openlit/instrumentation/crewai/__init__.py +86 -24
- openlit/instrumentation/crewai/async_crewai.py +89 -0
- openlit/instrumentation/crewai/crewai.py +79 -131
- openlit/instrumentation/crewai/utils.py +512 -0
- openlit/instrumentation/litellm/utils.py +18 -9
- openlit/instrumentation/openai/utils.py +58 -23
- openlit/instrumentation/openai_agents/__init__.py +46 -26
- openlit/instrumentation/openai_agents/processor.py +452 -0
- openlit/semcov/__init__.py +31 -2
- {openlit-1.34.28.dist-info → openlit-1.34.30.dist-info}/METADATA +2 -1
- {openlit-1.34.28.dist-info → openlit-1.34.30.dist-info}/RECORD +13 -11
- openlit/instrumentation/openai_agents/openai_agents.py +0 -65
- {openlit-1.34.28.dist-info → openlit-1.34.30.dist-info}/LICENSE +0 -0
- {openlit-1.34.28.dist-info → openlit-1.34.30.dist-info}/WHEEL +0 -0
@@ -0,0 +1,512 @@
|
|
1
|
+
"""
|
2
|
+
CrewAI utilities for comprehensive telemetry processing and business intelligence
|
3
|
+
"""
|
4
|
+
|
5
|
+
import time
|
6
|
+
import json
|
7
|
+
from urllib.parse import urlparse
|
8
|
+
from opentelemetry.trace import Status, StatusCode
|
9
|
+
from openlit.__helpers import (
|
10
|
+
common_framework_span_attributes,
|
11
|
+
handle_exception,
|
12
|
+
)
|
13
|
+
from openlit.semcov import SemanticConvention
|
14
|
+
|
15
|
+
# === OPERATION MAPPING - Framework Guide Compliant ===
|
16
|
+
OPERATION_MAP = {
|
17
|
+
# === STANDARD OPENTELEMETRY OPERATION NAMES ===
|
18
|
+
# Crew Operations (workflow management)
|
19
|
+
"crew_kickoff": "invoke_agent",
|
20
|
+
"crew_train": "invoke_agent",
|
21
|
+
"crew_replay": "invoke_agent",
|
22
|
+
"crew_test": "invoke_agent",
|
23
|
+
|
24
|
+
# Agent Operations (core agent functions)
|
25
|
+
"agent___init__": "create_agent",
|
26
|
+
"agent_execute_task": "invoke_agent",
|
27
|
+
"agent_backstory_property": "invoke_agent",
|
28
|
+
|
29
|
+
# Task Operations (task execution)
|
30
|
+
"task_execute": "invoke_agent",
|
31
|
+
"task_execute_async": "invoke_agent",
|
32
|
+
"task_execute_core": "invoke_agent",
|
33
|
+
|
34
|
+
# Tool Operations (tool execution)
|
35
|
+
"tool_run": "execute_tool",
|
36
|
+
"tool___call__": "execute_tool",
|
37
|
+
"tool_execute": "execute_tool",
|
38
|
+
|
39
|
+
# Memory Operations (knowledge management)
|
40
|
+
"memory_save": "invoke_agent",
|
41
|
+
"memory_search": "invoke_agent",
|
42
|
+
"memory_reset": "invoke_agent"
|
43
|
+
}
|
44
|
+
|
45
|
+
def set_server_address_and_port(instance):
|
46
|
+
"""
|
47
|
+
Extract server information from CrewAI instance.
|
48
|
+
|
49
|
+
Args:
|
50
|
+
instance: CrewAI instance (Crew, Agent, Task, etc.)
|
51
|
+
|
52
|
+
Returns:
|
53
|
+
tuple: (server_address, server_port)
|
54
|
+
"""
|
55
|
+
server_address = "localhost"
|
56
|
+
server_port = 8080
|
57
|
+
|
58
|
+
# Try to extract LLM endpoint information
|
59
|
+
try:
|
60
|
+
if hasattr(instance, 'llm') and hasattr(instance.llm, 'api_base'):
|
61
|
+
parsed = urlparse(instance.llm.api_base)
|
62
|
+
server_address = parsed.hostname or "localhost"
|
63
|
+
server_port = parsed.port or 443
|
64
|
+
elif hasattr(instance, 'agent') and hasattr(instance.agent, 'llm'):
|
65
|
+
# For tasks that have an agent with LLM
|
66
|
+
if hasattr(instance.agent.llm, 'api_base'):
|
67
|
+
parsed = urlparse(instance.agent.llm.api_base)
|
68
|
+
server_address = parsed.hostname or "localhost"
|
69
|
+
server_port = parsed.port or 443
|
70
|
+
except Exception:
|
71
|
+
# Graceful degradation
|
72
|
+
pass
|
73
|
+
|
74
|
+
return server_address, server_port
|
75
|
+
|
76
|
+
def process_crewai_response(response, operation_type, server_address, server_port,
|
77
|
+
environment, application_name, metrics, start_time, span,
|
78
|
+
capture_message_content, disable_metrics, version,
|
79
|
+
instance, args, endpoint=None, **kwargs):
|
80
|
+
"""
|
81
|
+
Process CrewAI response with comprehensive business intelligence.
|
82
|
+
OpenLIT's competitive advantage through superior observability.
|
83
|
+
"""
|
84
|
+
|
85
|
+
end_time = time.time()
|
86
|
+
duration_ms = (end_time - start_time) * 1000
|
87
|
+
|
88
|
+
# Create proper scope object for common_framework_span_attributes
|
89
|
+
scope = type("GenericScope", (), {})()
|
90
|
+
scope._span = span
|
91
|
+
scope._start_time = start_time
|
92
|
+
scope._end_time = end_time
|
93
|
+
|
94
|
+
# Get standard operation name from mapping
|
95
|
+
standard_operation = OPERATION_MAP.get(endpoint, "invoke_agent")
|
96
|
+
|
97
|
+
# Extract model information from agent's LLM for proper attribution
|
98
|
+
request_model = "unknown"
|
99
|
+
if instance:
|
100
|
+
llm = getattr(instance, "llm", None)
|
101
|
+
if llm:
|
102
|
+
# Try different model attribute names used by different LLM libraries
|
103
|
+
request_model = (getattr(llm, "model_name", None) or
|
104
|
+
getattr(llm, "model", None) or
|
105
|
+
getattr(llm, "_model_name", None) or
|
106
|
+
"unknown")
|
107
|
+
if request_model != "unknown":
|
108
|
+
request_model = str(request_model)
|
109
|
+
|
110
|
+
# Create a wrapper instance that exposes model_name for common_framework_span_attributes
|
111
|
+
class ModelWrapper:
|
112
|
+
"""Wrapper class to expose model_name for framework span attributes."""
|
113
|
+
def __init__(self, original_instance, model_name):
|
114
|
+
self._original = original_instance
|
115
|
+
self.model_name = model_name
|
116
|
+
|
117
|
+
def __getattr__(self, name):
|
118
|
+
return getattr(self._original, name)
|
119
|
+
|
120
|
+
def get_original_instance(self):
|
121
|
+
"""Get the original wrapped instance."""
|
122
|
+
return self._original
|
123
|
+
|
124
|
+
model_instance = ModelWrapper(instance, request_model) if instance else None
|
125
|
+
|
126
|
+
# Set common framework span attributes
|
127
|
+
common_framework_span_attributes(
|
128
|
+
scope, SemanticConvention.GEN_AI_SYSTEM_CREWAI, server_address, server_port,
|
129
|
+
environment, application_name, version, endpoint, model_instance
|
130
|
+
)
|
131
|
+
|
132
|
+
# Set span name following OpenTelemetry format
|
133
|
+
_set_span_name(span, standard_operation, instance, endpoint, args, kwargs)
|
134
|
+
|
135
|
+
# === CORE SEMANTIC ATTRIBUTES ===
|
136
|
+
span.set_attribute(SemanticConvention.GEN_AI_OPERATION, standard_operation)
|
137
|
+
# Remove gen_ai.endpoint as requested
|
138
|
+
|
139
|
+
# === STANDARD BUSINESS INTELLIGENCE ===
|
140
|
+
# Only use standard OpenTelemetry attributes, no framework-specific ones
|
141
|
+
_set_agent_business_intelligence(span, instance, endpoint, args, kwargs)
|
142
|
+
_set_tool_business_intelligence(span, instance, endpoint, args, kwargs)
|
143
|
+
_set_task_business_intelligence(span, instance, endpoint, args, kwargs)
|
144
|
+
_set_crew_business_intelligence(span, instance, endpoint, args, kwargs)
|
145
|
+
# Remove framework-specific functions: _set_workflow_business_intelligence, _set_memory_business_intelligence
|
146
|
+
|
147
|
+
# === PERFORMANCE INTELLIGENCE ===
|
148
|
+
# Use standard OpenTelemetry duration attribute through common_framework_span_attributes
|
149
|
+
|
150
|
+
# === CONTENT CAPTURE ===
|
151
|
+
if capture_message_content:
|
152
|
+
_capture_content(span, instance, response, endpoint)
|
153
|
+
|
154
|
+
# === COST TRACKING ===
|
155
|
+
_track_cost_and_tokens(span, instance, response, endpoint)
|
156
|
+
|
157
|
+
# === RECORD METRICS ===
|
158
|
+
if not disable_metrics and metrics:
|
159
|
+
_record_crewai_metrics(metrics, standard_operation, duration_ms, environment, application_name)
|
160
|
+
|
161
|
+
span.set_status(Status(StatusCode.OK))
|
162
|
+
return response
|
163
|
+
|
164
|
+
def _set_span_name(span, operation_type, instance, endpoint, args, kwargs):
|
165
|
+
"""Set span name following OpenTelemetry format: '{operation_type} {name}'"""
|
166
|
+
try:
|
167
|
+
# Get the operation name from our mapping
|
168
|
+
operation_name = OPERATION_MAP.get(endpoint, "invoke_agent")
|
169
|
+
|
170
|
+
if endpoint.startswith("crew_"):
|
171
|
+
# Crew operations: "invoke_agent {crew_name}"
|
172
|
+
crew_name = getattr(instance, "name", None) or "crew"
|
173
|
+
span.update_name(f"{operation_name} {crew_name}")
|
174
|
+
|
175
|
+
elif endpoint.startswith("agent_"):
|
176
|
+
if "create" in endpoint or endpoint == "agent___init__":
|
177
|
+
# Agent creation: "create_agent {agent_name}"
|
178
|
+
agent_name = getattr(instance, "name", None) or getattr(instance, "role", "agent")
|
179
|
+
span.update_name(f"create_agent {agent_name}")
|
180
|
+
else:
|
181
|
+
# Agent invocation: "invoke_agent {agent_name}"
|
182
|
+
agent_name = getattr(instance, "name", None) or getattr(instance, "role", "agent")
|
183
|
+
span.update_name(f"invoke_agent {agent_name}")
|
184
|
+
|
185
|
+
elif endpoint.startswith("task_"):
|
186
|
+
# Task operations: "invoke_agent task"
|
187
|
+
span.update_name("invoke_agent task")
|
188
|
+
|
189
|
+
elif endpoint.startswith("tool_"):
|
190
|
+
# Tool operations: "execute_tool {tool_name}"
|
191
|
+
tool_name = getattr(instance, "name", None) or getattr(instance, "__class__", type(instance)).__name__
|
192
|
+
span.update_name(f"execute_tool {tool_name}")
|
193
|
+
|
194
|
+
elif endpoint.startswith("memory_"):
|
195
|
+
# Memory operations: "invoke_agent memory:{operation}"
|
196
|
+
memory_op = endpoint.split("_", 1)[1] if "_" in endpoint else "operation"
|
197
|
+
span.update_name(f"invoke_agent memory:{memory_op}")
|
198
|
+
|
199
|
+
else:
|
200
|
+
# Default fallback
|
201
|
+
span.update_name(f"{operation_name} {endpoint}")
|
202
|
+
|
203
|
+
except Exception as e:
|
204
|
+
handle_exception(span, e)
|
205
|
+
# Fallback naming
|
206
|
+
span.update_name(f"invoke_agent {endpoint}")
|
207
|
+
|
208
|
+
def _set_agent_business_intelligence(span, instance, endpoint, args, kwargs):
|
209
|
+
"""Set agent business intelligence using standard OpenTelemetry semantic conventions"""
|
210
|
+
if not endpoint.startswith("agent_"):
|
211
|
+
return
|
212
|
+
|
213
|
+
try:
|
214
|
+
# Standard OpenTelemetry Gen AI Agent attributes
|
215
|
+
agent_id = getattr(instance, "id", "")
|
216
|
+
if agent_id:
|
217
|
+
span.set_attribute(SemanticConvention.GEN_AI_AGENT_ID, str(agent_id))
|
218
|
+
|
219
|
+
agent_name = getattr(instance, "name", None) or getattr(instance, "role", "")
|
220
|
+
if agent_name:
|
221
|
+
span.set_attribute(SemanticConvention.GEN_AI_AGENT_NAME, agent_name)
|
222
|
+
|
223
|
+
# Agent description - use role + goal as description per OpenTelemetry spec
|
224
|
+
agent_role = getattr(instance, "role", "")
|
225
|
+
agent_goal = getattr(instance, "goal", "")
|
226
|
+
if agent_role and agent_goal:
|
227
|
+
description = f"{agent_role}: {agent_goal}"
|
228
|
+
span.set_attribute(SemanticConvention.GEN_AI_AGENT_DESCRIPTION, description)
|
229
|
+
elif agent_goal:
|
230
|
+
span.set_attribute(SemanticConvention.GEN_AI_AGENT_DESCRIPTION, agent_goal)
|
231
|
+
|
232
|
+
# Enhanced Agent Configuration Tracking using SemanticConvention
|
233
|
+
max_retry_limit = getattr(instance, "max_retry_limit", None)
|
234
|
+
if max_retry_limit is not None:
|
235
|
+
span.set_attribute(SemanticConvention.GEN_AI_AGENT_MAX_RETRY_LIMIT, max_retry_limit)
|
236
|
+
|
237
|
+
allow_delegation = getattr(instance, "allow_delegation", None)
|
238
|
+
if allow_delegation is not None:
|
239
|
+
span.set_attribute(SemanticConvention.GEN_AI_AGENT_ALLOW_DELEGATION, allow_delegation)
|
240
|
+
|
241
|
+
allow_code_execution = getattr(instance, "allow_code_execution", None)
|
242
|
+
if allow_code_execution is not None:
|
243
|
+
span.set_attribute(SemanticConvention.GEN_AI_AGENT_ALLOW_CODE_EXECUTION, allow_code_execution)
|
244
|
+
|
245
|
+
# Tools tracking using SemanticConvention
|
246
|
+
tools = getattr(instance, "tools", [])
|
247
|
+
if tools:
|
248
|
+
tool_names = [getattr(tool, "name", str(tool)) for tool in tools[:5]] # Limit to first 5
|
249
|
+
if tool_names:
|
250
|
+
span.set_attribute(SemanticConvention.GEN_AI_AGENT_TOOLS, ", ".join(tool_names))
|
251
|
+
|
252
|
+
# === OpenAI Agent-specific Attributes ===
|
253
|
+
_set_openai_agent_attributes(span, instance, endpoint, args, kwargs)
|
254
|
+
|
255
|
+
# === Conversation and Data Source Tracking ===
|
256
|
+
_set_conversation_and_data_source_attributes(span, instance, endpoint, args, kwargs)
|
257
|
+
|
258
|
+
except Exception as e:
|
259
|
+
handle_exception(span, e)
|
260
|
+
|
261
|
+
def _set_openai_agent_attributes(span, instance, endpoint, args, kwargs):
|
262
|
+
"""Set OpenAI-specific agent attributes when using OpenAI models"""
|
263
|
+
try:
|
264
|
+
# Check if agent is using OpenAI LLM
|
265
|
+
llm = getattr(instance, "llm", None)
|
266
|
+
if llm:
|
267
|
+
llm_class = llm.__class__.__name__.lower()
|
268
|
+
llm_model = getattr(llm, "model_name", getattr(llm, "model", ""))
|
269
|
+
|
270
|
+
# Set model information
|
271
|
+
if llm_model:
|
272
|
+
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL, llm_model)
|
273
|
+
|
274
|
+
# OpenAI-specific attributes (but keep gen_ai.system as crewai)
|
275
|
+
if "openai" in llm_class or "gpt" in str(llm_model).lower():
|
276
|
+
# OpenAI service tier if available
|
277
|
+
service_tier = getattr(llm, "service_tier", None)
|
278
|
+
if service_tier:
|
279
|
+
span.set_attribute(SemanticConvention.GEN_AI_OPENAI_REQUEST_SERVICE_TIER, service_tier)
|
280
|
+
|
281
|
+
# OpenAI Assistant API attributes if available
|
282
|
+
assistant_id = getattr(instance, "assistant_id", None) or kwargs.get("assistant_id")
|
283
|
+
if assistant_id:
|
284
|
+
span.set_attribute(SemanticConvention.GEN_AI_OPENAI_ASSISTANT_ID, assistant_id)
|
285
|
+
|
286
|
+
thread_id = getattr(instance, "thread_id", None) or kwargs.get("thread_id")
|
287
|
+
if thread_id:
|
288
|
+
span.set_attribute(SemanticConvention.GEN_AI_OPENAI_THREAD_ID, thread_id)
|
289
|
+
|
290
|
+
run_id = getattr(instance, "run_id", None) or kwargs.get("run_id")
|
291
|
+
if run_id:
|
292
|
+
span.set_attribute(SemanticConvention.GEN_AI_OPENAI_RUN_ID, run_id)
|
293
|
+
|
294
|
+
# LiteLLM detection (but keep gen_ai.system as crewai)
|
295
|
+
elif "litellm" in llm_class:
|
296
|
+
# Could add LiteLLM-specific attributes here if needed
|
297
|
+
pass
|
298
|
+
|
299
|
+
except Exception as e:
|
300
|
+
handle_exception(span, e)
|
301
|
+
|
302
|
+
def _set_conversation_and_data_source_attributes(span, instance, endpoint, args, kwargs):
|
303
|
+
"""Set conversation tracking and data source attributes"""
|
304
|
+
try:
|
305
|
+
# Conversation ID for multi-turn interactions
|
306
|
+
conversation_id = (
|
307
|
+
getattr(instance, "conversation_id", None) or
|
308
|
+
getattr(instance, "session_id", None) or
|
309
|
+
kwargs.get("conversation_id") or
|
310
|
+
kwargs.get("session_id")
|
311
|
+
)
|
312
|
+
if conversation_id:
|
313
|
+
span.set_attribute(SemanticConvention.GEN_AI_CONVERSATION_ID, str(conversation_id))
|
314
|
+
|
315
|
+
# Data source tracking for RAG operations
|
316
|
+
memory = getattr(instance, "memory", None)
|
317
|
+
if memory:
|
318
|
+
# Memory as data source
|
319
|
+
memory_provider = getattr(memory, "provider", None)
|
320
|
+
if memory_provider:
|
321
|
+
span.set_attribute(SemanticConvention.GEN_AI_DATA_SOURCE_TYPE, "memory")
|
322
|
+
span.set_attribute(SemanticConvention.GEN_AI_DATA_SOURCE_ID, str(memory_provider))
|
323
|
+
|
324
|
+
# Knowledge base or vector store detection
|
325
|
+
knowledge_source = getattr(instance, "knowledge_source", None)
|
326
|
+
if knowledge_source:
|
327
|
+
span.set_attribute(SemanticConvention.GEN_AI_DATA_SOURCE_TYPE, "knowledge_base")
|
328
|
+
span.set_attribute(SemanticConvention.GEN_AI_DATA_SOURCE_ID, str(knowledge_source))
|
329
|
+
|
330
|
+
# Tool-based data sources
|
331
|
+
tools = getattr(instance, "tools", [])
|
332
|
+
for tool in tools:
|
333
|
+
tool_name = getattr(tool, "name", "").lower()
|
334
|
+
if any(keyword in tool_name for keyword in ["search", "retrieval", "database", "vector"]):
|
335
|
+
span.set_attribute(SemanticConvention.GEN_AI_DATA_SOURCE_TYPE, "external_tool")
|
336
|
+
break
|
337
|
+
|
338
|
+
except Exception as e:
|
339
|
+
handle_exception(span, e)
|
340
|
+
|
341
|
+
def _set_task_business_intelligence(span, instance, endpoint, args, kwargs):
|
342
|
+
"""Set task business intelligence using standard OpenTelemetry semantic conventions"""
|
343
|
+
if not endpoint.startswith("task_"):
|
344
|
+
return
|
345
|
+
|
346
|
+
try:
|
347
|
+
# Task ID tracking
|
348
|
+
task_id = getattr(instance, "id", None)
|
349
|
+
if task_id:
|
350
|
+
span.set_attribute(SemanticConvention.GEN_AI_AGENT_TASK_ID, str(task_id))
|
351
|
+
|
352
|
+
# Task description
|
353
|
+
task_description = getattr(instance, "description", "")
|
354
|
+
if task_description:
|
355
|
+
span.set_attribute(SemanticConvention.GEN_AI_TASK_DESCRIPTION, task_description)
|
356
|
+
|
357
|
+
# Task expected output (keep only essential attributes that have semantic conventions or are critical)
|
358
|
+
expected_output = getattr(instance, "expected_output", "")
|
359
|
+
if expected_output:
|
360
|
+
span.set_attribute(SemanticConvention.GEN_AI_TASK_EXPECTED_OUTPUT, expected_output)
|
361
|
+
|
362
|
+
except Exception as e:
|
363
|
+
handle_exception(span, e)
|
364
|
+
|
365
|
+
def _set_crew_business_intelligence(span, instance, endpoint, args, kwargs):
|
366
|
+
"""Set crew business intelligence using standard OpenTelemetry semantic conventions"""
|
367
|
+
if not endpoint.startswith("crew_"):
|
368
|
+
return
|
369
|
+
|
370
|
+
try:
|
371
|
+
# Only capture essential crew attributes - remove custom ones that don't have semantic conventions
|
372
|
+
pass
|
373
|
+
|
374
|
+
except Exception as e:
|
375
|
+
handle_exception(span, e)
|
376
|
+
|
377
|
+
def _set_tool_business_intelligence(span, instance, endpoint, args, kwargs):
|
378
|
+
"""Set tool business intelligence using standard OpenTelemetry semantic conventions"""
|
379
|
+
if not endpoint.startswith("tool_"):
|
380
|
+
return
|
381
|
+
|
382
|
+
try:
|
383
|
+
# Standard OpenTelemetry Gen AI Tool attributes
|
384
|
+
tool_name = getattr(instance, "name", None) or getattr(instance, "__class__", type(instance)).__name__
|
385
|
+
if tool_name:
|
386
|
+
span.set_attribute(SemanticConvention.GEN_AI_TOOL_NAME, tool_name)
|
387
|
+
|
388
|
+
# Tool call ID if available (for tracking specific tool invocations)
|
389
|
+
tool_call_id = kwargs.get("call_id", None) or getattr(instance, "call_id", None)
|
390
|
+
if tool_call_id:
|
391
|
+
span.set_attribute(SemanticConvention.GEN_AI_TOOL_CALL_ID, str(tool_call_id))
|
392
|
+
|
393
|
+
# === OpenAI Function Calling Attributes ===
|
394
|
+
_set_openai_tool_attributes(span, instance, endpoint, args, kwargs)
|
395
|
+
|
396
|
+
except Exception as e:
|
397
|
+
handle_exception(span, e)
|
398
|
+
|
399
|
+
def _set_openai_tool_attributes(span, instance, endpoint, args, kwargs):
|
400
|
+
"""Set OpenAI function calling specific attributes using standard conventions"""
|
401
|
+
try:
|
402
|
+
# Standard tool type classification (framework-agnostic)
|
403
|
+
tool_class = instance.__class__.__name__.lower()
|
404
|
+
if any(keyword in tool_class for keyword in ["search", "web", "browser"]):
|
405
|
+
tool_type = "search"
|
406
|
+
elif any(keyword in tool_class for keyword in ["file", "read", "write"]):
|
407
|
+
tool_type = "file_system"
|
408
|
+
elif any(keyword in tool_class for keyword in ["api", "http", "request"]):
|
409
|
+
tool_type = "api_client"
|
410
|
+
elif any(keyword in tool_class for keyword in ["database", "sql", "query"]):
|
411
|
+
tool_type = "database"
|
412
|
+
elif any(keyword in tool_class for keyword in ["vector", "embedding", "retrieval"]):
|
413
|
+
tool_type = "vector_store"
|
414
|
+
else:
|
415
|
+
tool_type = "custom"
|
416
|
+
|
417
|
+
# Use standard tool type attribute from semcov
|
418
|
+
span.set_attribute(SemanticConvention.GEN_AI_TOOL_TYPE, tool_type)
|
419
|
+
|
420
|
+
except Exception as e:
|
421
|
+
handle_exception(span, e)
|
422
|
+
|
423
|
+
def _capture_content(span, instance, response, endpoint):
|
424
|
+
"""Capture input/output content with MIME types"""
|
425
|
+
|
426
|
+
try:
|
427
|
+
# Capture response content
|
428
|
+
if response:
|
429
|
+
span.add_event(
|
430
|
+
name=SemanticConvention.GEN_AI_CONTENT_COMPLETION_EVENT,
|
431
|
+
attributes={
|
432
|
+
SemanticConvention.GEN_AI_CONTENT_COMPLETION: str(response)[:1000], # Limit size
|
433
|
+
}
|
434
|
+
)
|
435
|
+
|
436
|
+
# Capture input content based on operation type
|
437
|
+
if endpoint.startswith("task_"):
|
438
|
+
task_description = getattr(instance, "description", "")
|
439
|
+
if task_description:
|
440
|
+
span.add_event(
|
441
|
+
name=SemanticConvention.GEN_AI_CONTENT_PROMPT_EVENT,
|
442
|
+
attributes={
|
443
|
+
SemanticConvention.GEN_AI_CONTENT_PROMPT: task_description[:1000],
|
444
|
+
}
|
445
|
+
)
|
446
|
+
|
447
|
+
except Exception:
|
448
|
+
# Graceful degradation
|
449
|
+
pass
|
450
|
+
|
451
|
+
def _track_cost_and_tokens(span, instance, response, endpoint):
|
452
|
+
"""Track cost and token usage for business intelligence"""
|
453
|
+
|
454
|
+
try:
|
455
|
+
# Token tracking from LLM calls
|
456
|
+
if hasattr(instance, "llm") and hasattr(instance.llm, "get_num_tokens"):
|
457
|
+
# This would be framework-specific implementation
|
458
|
+
pass
|
459
|
+
|
460
|
+
# Response length as a proxy metric and token estimation
|
461
|
+
if response:
|
462
|
+
response_length = len(str(response))
|
463
|
+
# Estimate token count (rough approximation: 4 chars per token)
|
464
|
+
estimated_tokens = response_length // 4
|
465
|
+
span.set_attribute(SemanticConvention.GEN_AI_CLIENT_TOKEN_USAGE, estimated_tokens)
|
466
|
+
|
467
|
+
# Cost estimation would require pricing information
|
468
|
+
# This could be enhanced with actual cost tracking
|
469
|
+
|
470
|
+
except Exception:
|
471
|
+
# Graceful degradation
|
472
|
+
pass
|
473
|
+
|
474
|
+
def _record_crewai_metrics(metrics, operation_type, duration_ms, environment, application_name):
|
475
|
+
"""Record CrewAI-specific metrics"""
|
476
|
+
|
477
|
+
try:
|
478
|
+
attributes = {
|
479
|
+
"gen_ai.operation.name": operation_type,
|
480
|
+
"gen_ai.system": SemanticConvention.GEN_AI_SYSTEM_CREWAI,
|
481
|
+
"service.name": application_name,
|
482
|
+
"deployment.environment": environment,
|
483
|
+
}
|
484
|
+
|
485
|
+
# Record operation duration
|
486
|
+
if "genai_client_operation_duration" in metrics:
|
487
|
+
metrics["genai_client_operation_duration"].record(duration_ms / 1000, attributes)
|
488
|
+
|
489
|
+
# Record operation count
|
490
|
+
if "genai_requests" in metrics:
|
491
|
+
metrics["genai_requests"].add(1, attributes)
|
492
|
+
|
493
|
+
except Exception:
|
494
|
+
# Graceful degradation
|
495
|
+
pass
|
496
|
+
|
497
|
+
def _parse_tools(tools):
|
498
|
+
"""Parse tools list into JSON format"""
|
499
|
+
|
500
|
+
try:
|
501
|
+
result = []
|
502
|
+
for tool in tools:
|
503
|
+
tool_info = {}
|
504
|
+
if hasattr(tool, "name") and tool.name is not None:
|
505
|
+
tool_info["name"] = tool.name
|
506
|
+
if hasattr(tool, "description") and tool.description is not None:
|
507
|
+
tool_info["description"] = tool.description
|
508
|
+
if tool_info:
|
509
|
+
result.append(tool_info)
|
510
|
+
return json.dumps(result)
|
511
|
+
except Exception:
|
512
|
+
return "[]"
|
@@ -112,16 +112,21 @@ def common_chat_logic(scope, pricing_info, environment, application_name, metric
|
|
112
112
|
scope._server_address, scope._server_port, request_model, scope._response_model,
|
113
113
|
environment, application_name, is_stream, scope._tbt, scope._ttft, version)
|
114
114
|
|
115
|
+
# Helper function to handle None values with proper defaults
|
116
|
+
def safe_get(value, default):
|
117
|
+
return default if value is None else value
|
118
|
+
|
115
119
|
# Span Attributes for Request parameters
|
116
|
-
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_SEED, scope._kwargs.get('seed', ''))
|
117
|
-
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
118
|
-
|
119
|
-
scope._span.set_attribute(SemanticConvention.
|
120
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_SEED, safe_get(scope._kwargs.get('seed'), ''))
|
121
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
122
|
+
safe_get(scope._kwargs.get('frequency_penalty'), 0.0))
|
123
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MAX_TOKENS, safe_get(scope._kwargs.get('max_tokens'), -1))
|
124
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_PRESENCE_PENALTY, safe_get(scope._kwargs.get('presence_penalty'), 0.0))
|
120
125
|
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_STOP_SEQUENCES, scope._kwargs.get('stop', []))
|
121
|
-
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TEMPERATURE, scope._kwargs.get('temperature', 1.0))
|
122
|
-
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TOP_P, scope._kwargs.get('top_p', 1.0))
|
123
|
-
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_USER, scope._kwargs.get('user', ''))
|
124
|
-
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_SERVICE_TIER, scope._kwargs.get('service_tier', 'auto'))
|
126
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TEMPERATURE, safe_get(scope._kwargs.get('temperature'), 1.0))
|
127
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TOP_P, safe_get(scope._kwargs.get('top_p'), 1.0))
|
128
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_USER, safe_get(scope._kwargs.get('user'), ''))
|
129
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_SERVICE_TIER, safe_get(scope._kwargs.get('service_tier'), 'auto'))
|
125
130
|
|
126
131
|
# Span Attributes for Response parameters
|
127
132
|
scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_ID, scope._response_id)
|
@@ -256,9 +261,13 @@ def process_embedding_response(response, request_model, pricing_info, server_por
|
|
256
261
|
scope._server_address, scope._server_port, request_model, scope._response_model,
|
257
262
|
environment, application_name, False, 0, scope._end_time - scope._start_time, version)
|
258
263
|
|
264
|
+
# Helper function to handle None values with proper defaults
|
265
|
+
def safe_get(value, default):
|
266
|
+
return default if value is None else value
|
267
|
+
|
259
268
|
# Span Attributes for Request parameters
|
260
269
|
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_ENCODING_FORMATS, [scope._kwargs.get('encoding_format', 'float')])
|
261
|
-
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_USER, scope._kwargs.get('user', ''))
|
270
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_USER, safe_get(scope._kwargs.get('user'), ''))
|
262
271
|
|
263
272
|
# Span Attributes for Cost and Tokens
|
264
273
|
scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_INPUT_TOKENS, scope._input_tokens)
|