openlit 1.34.29__py3-none-any.whl → 1.34.31__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (168) hide show
  1. openlit/__helpers.py +235 -86
  2. openlit/__init__.py +16 -13
  3. openlit/_instrumentors.py +2 -1
  4. openlit/evals/all.py +50 -21
  5. openlit/evals/bias_detection.py +47 -20
  6. openlit/evals/hallucination.py +53 -22
  7. openlit/evals/toxicity.py +50 -21
  8. openlit/evals/utils.py +54 -30
  9. openlit/guard/all.py +61 -19
  10. openlit/guard/prompt_injection.py +34 -14
  11. openlit/guard/restrict_topic.py +46 -15
  12. openlit/guard/sensitive_topic.py +34 -14
  13. openlit/guard/utils.py +58 -22
  14. openlit/instrumentation/ag2/__init__.py +24 -8
  15. openlit/instrumentation/ag2/ag2.py +34 -13
  16. openlit/instrumentation/ag2/async_ag2.py +34 -13
  17. openlit/instrumentation/ag2/utils.py +133 -30
  18. openlit/instrumentation/ai21/__init__.py +43 -14
  19. openlit/instrumentation/ai21/ai21.py +47 -21
  20. openlit/instrumentation/ai21/async_ai21.py +47 -21
  21. openlit/instrumentation/ai21/utils.py +299 -78
  22. openlit/instrumentation/anthropic/__init__.py +21 -4
  23. openlit/instrumentation/anthropic/anthropic.py +28 -17
  24. openlit/instrumentation/anthropic/async_anthropic.py +28 -17
  25. openlit/instrumentation/anthropic/utils.py +145 -35
  26. openlit/instrumentation/assemblyai/__init__.py +11 -2
  27. openlit/instrumentation/assemblyai/assemblyai.py +15 -4
  28. openlit/instrumentation/assemblyai/utils.py +120 -25
  29. openlit/instrumentation/astra/__init__.py +43 -10
  30. openlit/instrumentation/astra/astra.py +28 -5
  31. openlit/instrumentation/astra/async_astra.py +28 -5
  32. openlit/instrumentation/astra/utils.py +151 -55
  33. openlit/instrumentation/azure_ai_inference/__init__.py +43 -10
  34. openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +53 -21
  35. openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +53 -21
  36. openlit/instrumentation/azure_ai_inference/utils.py +307 -83
  37. openlit/instrumentation/bedrock/__init__.py +21 -4
  38. openlit/instrumentation/bedrock/bedrock.py +63 -25
  39. openlit/instrumentation/bedrock/utils.py +139 -30
  40. openlit/instrumentation/chroma/__init__.py +89 -16
  41. openlit/instrumentation/chroma/chroma.py +28 -6
  42. openlit/instrumentation/chroma/utils.py +167 -51
  43. openlit/instrumentation/cohere/__init__.py +63 -18
  44. openlit/instrumentation/cohere/async_cohere.py +63 -24
  45. openlit/instrumentation/cohere/cohere.py +63 -24
  46. openlit/instrumentation/cohere/utils.py +286 -73
  47. openlit/instrumentation/controlflow/__init__.py +35 -9
  48. openlit/instrumentation/controlflow/controlflow.py +66 -33
  49. openlit/instrumentation/crawl4ai/__init__.py +25 -10
  50. openlit/instrumentation/crawl4ai/async_crawl4ai.py +78 -31
  51. openlit/instrumentation/crawl4ai/crawl4ai.py +78 -31
  52. openlit/instrumentation/crewai/__init__.py +111 -24
  53. openlit/instrumentation/crewai/async_crewai.py +114 -0
  54. openlit/instrumentation/crewai/crewai.py +104 -131
  55. openlit/instrumentation/crewai/utils.py +615 -0
  56. openlit/instrumentation/dynamiq/__init__.py +46 -12
  57. openlit/instrumentation/dynamiq/dynamiq.py +74 -33
  58. openlit/instrumentation/elevenlabs/__init__.py +23 -4
  59. openlit/instrumentation/elevenlabs/async_elevenlabs.py +16 -4
  60. openlit/instrumentation/elevenlabs/elevenlabs.py +16 -4
  61. openlit/instrumentation/elevenlabs/utils.py +128 -25
  62. openlit/instrumentation/embedchain/__init__.py +11 -2
  63. openlit/instrumentation/embedchain/embedchain.py +68 -35
  64. openlit/instrumentation/firecrawl/__init__.py +24 -7
  65. openlit/instrumentation/firecrawl/firecrawl.py +46 -20
  66. openlit/instrumentation/google_ai_studio/__init__.py +45 -10
  67. openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +67 -44
  68. openlit/instrumentation/google_ai_studio/google_ai_studio.py +67 -44
  69. openlit/instrumentation/google_ai_studio/utils.py +180 -67
  70. openlit/instrumentation/gpt4all/__init__.py +22 -7
  71. openlit/instrumentation/gpt4all/gpt4all.py +67 -29
  72. openlit/instrumentation/gpt4all/utils.py +285 -61
  73. openlit/instrumentation/gpu/__init__.py +128 -47
  74. openlit/instrumentation/groq/__init__.py +21 -4
  75. openlit/instrumentation/groq/async_groq.py +33 -21
  76. openlit/instrumentation/groq/groq.py +33 -21
  77. openlit/instrumentation/groq/utils.py +192 -55
  78. openlit/instrumentation/haystack/__init__.py +70 -24
  79. openlit/instrumentation/haystack/async_haystack.py +28 -6
  80. openlit/instrumentation/haystack/haystack.py +28 -6
  81. openlit/instrumentation/haystack/utils.py +196 -74
  82. openlit/instrumentation/julep/__init__.py +69 -19
  83. openlit/instrumentation/julep/async_julep.py +53 -27
  84. openlit/instrumentation/julep/julep.py +53 -28
  85. openlit/instrumentation/langchain/__init__.py +74 -63
  86. openlit/instrumentation/langchain/callback_handler.py +1100 -0
  87. openlit/instrumentation/langchain_community/__init__.py +13 -2
  88. openlit/instrumentation/langchain_community/async_langchain_community.py +23 -5
  89. openlit/instrumentation/langchain_community/langchain_community.py +23 -5
  90. openlit/instrumentation/langchain_community/utils.py +35 -9
  91. openlit/instrumentation/letta/__init__.py +68 -15
  92. openlit/instrumentation/letta/letta.py +99 -54
  93. openlit/instrumentation/litellm/__init__.py +43 -14
  94. openlit/instrumentation/litellm/async_litellm.py +51 -26
  95. openlit/instrumentation/litellm/litellm.py +51 -26
  96. openlit/instrumentation/litellm/utils.py +312 -101
  97. openlit/instrumentation/llamaindex/__init__.py +267 -90
  98. openlit/instrumentation/llamaindex/async_llamaindex.py +28 -6
  99. openlit/instrumentation/llamaindex/llamaindex.py +28 -6
  100. openlit/instrumentation/llamaindex/utils.py +204 -91
  101. openlit/instrumentation/mem0/__init__.py +11 -2
  102. openlit/instrumentation/mem0/mem0.py +50 -29
  103. openlit/instrumentation/milvus/__init__.py +10 -2
  104. openlit/instrumentation/milvus/milvus.py +31 -6
  105. openlit/instrumentation/milvus/utils.py +166 -67
  106. openlit/instrumentation/mistral/__init__.py +63 -18
  107. openlit/instrumentation/mistral/async_mistral.py +63 -24
  108. openlit/instrumentation/mistral/mistral.py +63 -24
  109. openlit/instrumentation/mistral/utils.py +277 -69
  110. openlit/instrumentation/multion/__init__.py +69 -19
  111. openlit/instrumentation/multion/async_multion.py +57 -26
  112. openlit/instrumentation/multion/multion.py +57 -26
  113. openlit/instrumentation/ollama/__init__.py +39 -18
  114. openlit/instrumentation/ollama/async_ollama.py +57 -26
  115. openlit/instrumentation/ollama/ollama.py +57 -26
  116. openlit/instrumentation/ollama/utils.py +226 -50
  117. openlit/instrumentation/openai/__init__.py +156 -32
  118. openlit/instrumentation/openai/async_openai.py +147 -67
  119. openlit/instrumentation/openai/openai.py +150 -67
  120. openlit/instrumentation/openai/utils.py +660 -186
  121. openlit/instrumentation/openai_agents/__init__.py +6 -2
  122. openlit/instrumentation/openai_agents/processor.py +409 -537
  123. openlit/instrumentation/phidata/__init__.py +13 -5
  124. openlit/instrumentation/phidata/phidata.py +67 -32
  125. openlit/instrumentation/pinecone/__init__.py +48 -9
  126. openlit/instrumentation/pinecone/async_pinecone.py +27 -5
  127. openlit/instrumentation/pinecone/pinecone.py +27 -5
  128. openlit/instrumentation/pinecone/utils.py +153 -47
  129. openlit/instrumentation/premai/__init__.py +22 -7
  130. openlit/instrumentation/premai/premai.py +51 -26
  131. openlit/instrumentation/premai/utils.py +246 -59
  132. openlit/instrumentation/pydantic_ai/__init__.py +49 -22
  133. openlit/instrumentation/pydantic_ai/pydantic_ai.py +69 -16
  134. openlit/instrumentation/pydantic_ai/utils.py +89 -24
  135. openlit/instrumentation/qdrant/__init__.py +19 -4
  136. openlit/instrumentation/qdrant/async_qdrant.py +33 -7
  137. openlit/instrumentation/qdrant/qdrant.py +33 -7
  138. openlit/instrumentation/qdrant/utils.py +228 -93
  139. openlit/instrumentation/reka/__init__.py +23 -10
  140. openlit/instrumentation/reka/async_reka.py +17 -11
  141. openlit/instrumentation/reka/reka.py +17 -11
  142. openlit/instrumentation/reka/utils.py +138 -36
  143. openlit/instrumentation/together/__init__.py +44 -12
  144. openlit/instrumentation/together/async_together.py +50 -27
  145. openlit/instrumentation/together/together.py +50 -27
  146. openlit/instrumentation/together/utils.py +301 -71
  147. openlit/instrumentation/transformers/__init__.py +2 -1
  148. openlit/instrumentation/transformers/transformers.py +13 -3
  149. openlit/instrumentation/transformers/utils.py +139 -36
  150. openlit/instrumentation/vertexai/__init__.py +81 -16
  151. openlit/instrumentation/vertexai/async_vertexai.py +33 -15
  152. openlit/instrumentation/vertexai/utils.py +123 -27
  153. openlit/instrumentation/vertexai/vertexai.py +33 -15
  154. openlit/instrumentation/vllm/__init__.py +12 -5
  155. openlit/instrumentation/vllm/utils.py +121 -31
  156. openlit/instrumentation/vllm/vllm.py +16 -10
  157. openlit/otel/events.py +35 -10
  158. openlit/otel/metrics.py +32 -24
  159. openlit/otel/tracing.py +24 -9
  160. openlit/semcov/__init__.py +101 -7
  161. {openlit-1.34.29.dist-info → openlit-1.34.31.dist-info}/METADATA +2 -1
  162. openlit-1.34.31.dist-info/RECORD +166 -0
  163. openlit/instrumentation/langchain/async_langchain.py +0 -102
  164. openlit/instrumentation/langchain/langchain.py +0 -102
  165. openlit/instrumentation/langchain/utils.py +0 -252
  166. openlit-1.34.29.dist-info/RECORD +0 -166
  167. {openlit-1.34.29.dist-info → openlit-1.34.31.dist-info}/LICENSE +0 -0
  168. {openlit-1.34.29.dist-info → openlit-1.34.31.dist-info}/WHEEL +0 -0
@@ -0,0 +1,615 @@
1
+ """
2
+ CrewAI utilities for comprehensive telemetry processing and business intelligence
3
+ """
4
+
5
+ import time
6
+ import json
7
+ from urllib.parse import urlparse
8
+ from opentelemetry.trace import Status, StatusCode
9
+ from openlit.__helpers import (
10
+ common_framework_span_attributes,
11
+ handle_exception,
12
+ )
13
+ from openlit.semcov import SemanticConvention
14
+
15
+ # === OPERATION MAPPING - Framework Guide Compliant ===
16
+ OPERATION_MAP = {
17
+ # === STANDARD OPENTELEMETRY OPERATION NAMES ===
18
+ # Crew Operations (workflow management)
19
+ "crew_kickoff": "invoke_agent",
20
+ "crew_train": "invoke_agent",
21
+ "crew_replay": "invoke_agent",
22
+ "crew_test": "invoke_agent",
23
+ # Agent Operations (core agent functions)
24
+ "agent___init__": "create_agent",
25
+ "agent_execute_task": "invoke_agent",
26
+ "agent_backstory_property": "invoke_agent",
27
+ # Task Operations (task execution)
28
+ "task_execute": "invoke_agent",
29
+ "task_execute_async": "invoke_agent",
30
+ "task_execute_core": "invoke_agent",
31
+ # Tool Operations (tool execution)
32
+ "tool_run": "execute_tool",
33
+ "tool___call__": "execute_tool",
34
+ "tool_execute": "execute_tool",
35
+ # Memory Operations (knowledge management)
36
+ "memory_save": "invoke_agent",
37
+ "memory_search": "invoke_agent",
38
+ "memory_reset": "invoke_agent",
39
+ }
40
+
41
+
42
+ def set_server_address_and_port(instance):
43
+ """
44
+ Extract server information from CrewAI instance.
45
+
46
+ Args:
47
+ instance: CrewAI instance (Crew, Agent, Task, etc.)
48
+
49
+ Returns:
50
+ tuple: (server_address, server_port)
51
+ """
52
+ server_address = "localhost"
53
+ server_port = 8080
54
+
55
+ # Try to extract LLM endpoint information
56
+ try:
57
+ if hasattr(instance, "llm") and hasattr(instance.llm, "api_base"):
58
+ parsed = urlparse(instance.llm.api_base)
59
+ server_address = parsed.hostname or "localhost"
60
+ server_port = parsed.port or 443
61
+ elif hasattr(instance, "agent") and hasattr(instance.agent, "llm"):
62
+ # For tasks that have an agent with LLM
63
+ if hasattr(instance.agent.llm, "api_base"):
64
+ parsed = urlparse(instance.agent.llm.api_base)
65
+ server_address = parsed.hostname or "localhost"
66
+ server_port = parsed.port or 443
67
+ except Exception:
68
+ # Graceful degradation
69
+ pass
70
+
71
+ return server_address, server_port
72
+
73
+
74
+ def process_crewai_response(
75
+ response,
76
+ operation_type,
77
+ server_address,
78
+ server_port,
79
+ environment,
80
+ application_name,
81
+ metrics,
82
+ start_time,
83
+ span,
84
+ capture_message_content,
85
+ disable_metrics,
86
+ version,
87
+ instance,
88
+ args,
89
+ endpoint=None,
90
+ **kwargs,
91
+ ):
92
+ """
93
+ Process CrewAI response with comprehensive business intelligence.
94
+ OpenLIT's competitive advantage through superior observability.
95
+ """
96
+
97
+ end_time = time.time()
98
+ duration_ms = (end_time - start_time) * 1000
99
+
100
+ # Create proper scope object for common_framework_span_attributes
101
+ scope = type("GenericScope", (), {})()
102
+ scope._span = span
103
+ scope._start_time = start_time
104
+ scope._end_time = end_time
105
+
106
+ # Get standard operation name from mapping
107
+ standard_operation = OPERATION_MAP.get(endpoint, "invoke_agent")
108
+
109
+ # Extract model information from agent's LLM for proper attribution
110
+ request_model = "unknown"
111
+ if instance:
112
+ llm = getattr(instance, "llm", None)
113
+ if llm:
114
+ # Try different model attribute names used by different LLM libraries
115
+ request_model = (
116
+ getattr(llm, "model_name", None)
117
+ or getattr(llm, "model", None)
118
+ or getattr(llm, "_model_name", None)
119
+ or "unknown"
120
+ )
121
+ if request_model != "unknown":
122
+ request_model = str(request_model)
123
+
124
+ # Create a wrapper instance that exposes model_name for common_framework_span_attributes
125
+ class ModelWrapper:
126
+ """Wrapper class to expose model_name for framework span attributes."""
127
+
128
+ def __init__(self, original_instance, model_name):
129
+ self._original = original_instance
130
+ self.model_name = model_name
131
+
132
+ def __getattr__(self, name):
133
+ return getattr(self._original, name)
134
+
135
+ def get_original_instance(self):
136
+ """Get the original wrapped instance."""
137
+ return self._original
138
+
139
+ model_instance = ModelWrapper(instance, request_model) if instance else None
140
+
141
+ # Set common framework span attributes
142
+ common_framework_span_attributes(
143
+ scope,
144
+ SemanticConvention.GEN_AI_SYSTEM_CREWAI,
145
+ server_address,
146
+ server_port,
147
+ environment,
148
+ application_name,
149
+ version,
150
+ endpoint,
151
+ model_instance,
152
+ )
153
+
154
+ # Set span name following OpenTelemetry format
155
+ _set_span_name(span, standard_operation, instance, endpoint, args, kwargs)
156
+
157
+ # === CORE SEMANTIC ATTRIBUTES ===
158
+ span.set_attribute(SemanticConvention.GEN_AI_OPERATION, standard_operation)
159
+ # Remove gen_ai.endpoint as requested
160
+
161
+ # === STANDARD BUSINESS INTELLIGENCE ===
162
+ # Only use standard OpenTelemetry attributes, no framework-specific ones
163
+ _set_agent_business_intelligence(span, instance, endpoint, args, kwargs)
164
+ _set_tool_business_intelligence(span, instance, endpoint, args, kwargs)
165
+ _set_task_business_intelligence(span, instance, endpoint, args, kwargs)
166
+ _set_crew_business_intelligence(span, instance, endpoint, args, kwargs)
167
+ # Remove framework-specific functions: _set_workflow_business_intelligence, _set_memory_business_intelligence
168
+
169
+ # === PERFORMANCE INTELLIGENCE ===
170
+ # Use standard OpenTelemetry duration attribute through common_framework_span_attributes
171
+
172
+ # === CONTENT CAPTURE ===
173
+ if capture_message_content:
174
+ _capture_content(span, instance, response, endpoint)
175
+
176
+ # === COST TRACKING ===
177
+ _track_cost_and_tokens(span, instance, response, endpoint)
178
+
179
+ # === RECORD METRICS ===
180
+ if not disable_metrics and metrics:
181
+ _record_crewai_metrics(
182
+ metrics, standard_operation, duration_ms, environment, application_name
183
+ )
184
+
185
+ span.set_status(Status(StatusCode.OK))
186
+ return response
187
+
188
+
189
+ def _set_span_name(span, operation_type, instance, endpoint, args, kwargs):
190
+ """Set span name following OpenTelemetry format: '{operation_type} {name}'"""
191
+ try:
192
+ # Get the operation name from our mapping
193
+ operation_name = OPERATION_MAP.get(endpoint, "invoke_agent")
194
+
195
+ if endpoint.startswith("crew_"):
196
+ # Crew operations: "invoke_agent {crew_name}"
197
+ crew_name = getattr(instance, "name", None) or "crew"
198
+ span.update_name(f"{operation_name} {crew_name}")
199
+
200
+ elif endpoint.startswith("agent_"):
201
+ if "create" in endpoint or endpoint == "agent___init__":
202
+ # Agent creation: "create_agent {agent_name}"
203
+ agent_name = getattr(instance, "name", None) or getattr(
204
+ instance, "role", "agent"
205
+ )
206
+ span.update_name(f"create_agent {agent_name}")
207
+ else:
208
+ # Agent invocation: "invoke_agent {agent_name}"
209
+ agent_name = getattr(instance, "name", None) or getattr(
210
+ instance, "role", "agent"
211
+ )
212
+ span.update_name(f"invoke_agent {agent_name}")
213
+
214
+ elif endpoint.startswith("task_"):
215
+ # Task operations: "invoke_agent task"
216
+ span.update_name("invoke_agent task")
217
+
218
+ elif endpoint.startswith("tool_"):
219
+ # Tool operations: "execute_tool {tool_name}"
220
+ tool_name = (
221
+ getattr(instance, "name", None)
222
+ or getattr(instance, "__class__", type(instance)).__name__
223
+ )
224
+ span.update_name(f"execute_tool {tool_name}")
225
+
226
+ elif endpoint.startswith("memory_"):
227
+ # Memory operations: "invoke_agent memory:{operation}"
228
+ memory_op = endpoint.split("_", 1)[1] if "_" in endpoint else "operation"
229
+ span.update_name(f"invoke_agent memory:{memory_op}")
230
+
231
+ else:
232
+ # Default fallback
233
+ span.update_name(f"{operation_name} {endpoint}")
234
+
235
+ except Exception as e:
236
+ handle_exception(span, e)
237
+ # Fallback naming
238
+ span.update_name(f"invoke_agent {endpoint}")
239
+
240
+
241
+ def _set_agent_business_intelligence(span, instance, endpoint, args, kwargs):
242
+ """Set agent business intelligence using standard OpenTelemetry semantic conventions"""
243
+ if not endpoint.startswith("agent_"):
244
+ return
245
+
246
+ try:
247
+ # Standard OpenTelemetry Gen AI Agent attributes
248
+ agent_id = getattr(instance, "id", "")
249
+ if agent_id:
250
+ span.set_attribute(SemanticConvention.GEN_AI_AGENT_ID, str(agent_id))
251
+
252
+ agent_name = getattr(instance, "name", None) or getattr(instance, "role", "")
253
+ if agent_name:
254
+ span.set_attribute(SemanticConvention.GEN_AI_AGENT_NAME, agent_name)
255
+
256
+ # Agent description - use role + goal as description per OpenTelemetry spec
257
+ agent_role = getattr(instance, "role", "")
258
+ agent_goal = getattr(instance, "goal", "")
259
+ if agent_role and agent_goal:
260
+ description = f"{agent_role}: {agent_goal}"
261
+ span.set_attribute(SemanticConvention.GEN_AI_AGENT_DESCRIPTION, description)
262
+ elif agent_goal:
263
+ span.set_attribute(SemanticConvention.GEN_AI_AGENT_DESCRIPTION, agent_goal)
264
+
265
+ # Enhanced Agent Configuration Tracking using SemanticConvention
266
+ max_retry_limit = getattr(instance, "max_retry_limit", None)
267
+ if max_retry_limit is not None:
268
+ span.set_attribute(
269
+ SemanticConvention.GEN_AI_AGENT_MAX_RETRY_LIMIT, max_retry_limit
270
+ )
271
+
272
+ allow_delegation = getattr(instance, "allow_delegation", None)
273
+ if allow_delegation is not None:
274
+ span.set_attribute(
275
+ SemanticConvention.GEN_AI_AGENT_ALLOW_DELEGATION, allow_delegation
276
+ )
277
+
278
+ allow_code_execution = getattr(instance, "allow_code_execution", None)
279
+ if allow_code_execution is not None:
280
+ span.set_attribute(
281
+ SemanticConvention.GEN_AI_AGENT_ALLOW_CODE_EXECUTION,
282
+ allow_code_execution,
283
+ )
284
+
285
+ # Tools tracking using SemanticConvention
286
+ tools = getattr(instance, "tools", [])
287
+ if tools:
288
+ tool_names = [
289
+ getattr(tool, "name", str(tool)) for tool in tools[:5]
290
+ ] # Limit to first 5
291
+ if tool_names:
292
+ span.set_attribute(
293
+ SemanticConvention.GEN_AI_AGENT_TOOLS, ", ".join(tool_names)
294
+ )
295
+
296
+ # === OpenAI Agent-specific Attributes ===
297
+ _set_openai_agent_attributes(span, instance, endpoint, args, kwargs)
298
+
299
+ # === Conversation and Data Source Tracking ===
300
+ _set_conversation_and_data_source_attributes(
301
+ span, instance, endpoint, args, kwargs
302
+ )
303
+
304
+ except Exception as e:
305
+ handle_exception(span, e)
306
+
307
+
308
+ def _set_openai_agent_attributes(span, instance, endpoint, args, kwargs):
309
+ """Set OpenAI-specific agent attributes when using OpenAI models"""
310
+ try:
311
+ # Check if agent is using OpenAI LLM
312
+ llm = getattr(instance, "llm", None)
313
+ if llm:
314
+ llm_class = llm.__class__.__name__.lower()
315
+ llm_model = getattr(llm, "model_name", getattr(llm, "model", ""))
316
+
317
+ # Set model information
318
+ if llm_model:
319
+ span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL, llm_model)
320
+
321
+ # OpenAI-specific attributes (but keep gen_ai.system as crewai)
322
+ if "openai" in llm_class or "gpt" in str(llm_model).lower():
323
+ # OpenAI service tier if available
324
+ service_tier = getattr(llm, "service_tier", None)
325
+ if service_tier:
326
+ span.set_attribute(
327
+ SemanticConvention.GEN_AI_OPENAI_REQUEST_SERVICE_TIER,
328
+ service_tier,
329
+ )
330
+
331
+ # OpenAI Assistant API attributes if available
332
+ assistant_id = getattr(instance, "assistant_id", None) or kwargs.get(
333
+ "assistant_id"
334
+ )
335
+ if assistant_id:
336
+ span.set_attribute(
337
+ SemanticConvention.GEN_AI_OPENAI_ASSISTANT_ID, assistant_id
338
+ )
339
+
340
+ thread_id = getattr(instance, "thread_id", None) or kwargs.get(
341
+ "thread_id"
342
+ )
343
+ if thread_id:
344
+ span.set_attribute(
345
+ SemanticConvention.GEN_AI_OPENAI_THREAD_ID, thread_id
346
+ )
347
+
348
+ run_id = getattr(instance, "run_id", None) or kwargs.get("run_id")
349
+ if run_id:
350
+ span.set_attribute(SemanticConvention.GEN_AI_OPENAI_RUN_ID, run_id)
351
+
352
+ # LiteLLM detection (but keep gen_ai.system as crewai)
353
+ elif "litellm" in llm_class:
354
+ # Could add LiteLLM-specific attributes here if needed
355
+ pass
356
+
357
+ except Exception as e:
358
+ handle_exception(span, e)
359
+
360
+
361
+ def _set_conversation_and_data_source_attributes(
362
+ span, instance, endpoint, args, kwargs
363
+ ):
364
+ """Set conversation tracking and data source attributes"""
365
+ try:
366
+ # Conversation ID for multi-turn interactions
367
+ conversation_id = (
368
+ getattr(instance, "conversation_id", None)
369
+ or getattr(instance, "session_id", None)
370
+ or kwargs.get("conversation_id")
371
+ or kwargs.get("session_id")
372
+ )
373
+ if conversation_id:
374
+ span.set_attribute(
375
+ SemanticConvention.GEN_AI_CONVERSATION_ID, str(conversation_id)
376
+ )
377
+
378
+ # Data source tracking for RAG operations
379
+ memory = getattr(instance, "memory", None)
380
+ if memory:
381
+ # Memory as data source
382
+ memory_provider = getattr(memory, "provider", None)
383
+ if memory_provider:
384
+ span.set_attribute(SemanticConvention.GEN_AI_DATA_SOURCE_TYPE, "memory")
385
+ span.set_attribute(
386
+ SemanticConvention.GEN_AI_DATA_SOURCE_ID, str(memory_provider)
387
+ )
388
+
389
+ # Knowledge base or vector store detection
390
+ knowledge_source = getattr(instance, "knowledge_source", None)
391
+ if knowledge_source:
392
+ span.set_attribute(
393
+ SemanticConvention.GEN_AI_DATA_SOURCE_TYPE, "knowledge_base"
394
+ )
395
+ span.set_attribute(
396
+ SemanticConvention.GEN_AI_DATA_SOURCE_ID, str(knowledge_source)
397
+ )
398
+
399
+ # Tool-based data sources
400
+ tools = getattr(instance, "tools", [])
401
+ for tool in tools:
402
+ tool_name = getattr(tool, "name", "").lower()
403
+ if any(
404
+ keyword in tool_name
405
+ for keyword in ["search", "retrieval", "database", "vector"]
406
+ ):
407
+ span.set_attribute(
408
+ SemanticConvention.GEN_AI_DATA_SOURCE_TYPE, "external_tool"
409
+ )
410
+ break
411
+
412
+ except Exception as e:
413
+ handle_exception(span, e)
414
+
415
+
416
+ def _set_task_business_intelligence(span, instance, endpoint, args, kwargs):
417
+ """Set task business intelligence using standard OpenTelemetry semantic conventions"""
418
+ if not endpoint.startswith("task_"):
419
+ return
420
+
421
+ try:
422
+ # Task ID tracking
423
+ task_id = getattr(instance, "id", None)
424
+ if task_id:
425
+ span.set_attribute(SemanticConvention.GEN_AI_AGENT_TASK_ID, str(task_id))
426
+
427
+ # Task description
428
+ task_description = getattr(instance, "description", "")
429
+ if task_description:
430
+ span.set_attribute(
431
+ SemanticConvention.GEN_AI_TASK_DESCRIPTION, task_description
432
+ )
433
+
434
+ # Task expected output (keep only essential attributes that have semantic conventions or are critical)
435
+ expected_output = getattr(instance, "expected_output", "")
436
+ if expected_output:
437
+ span.set_attribute(
438
+ SemanticConvention.GEN_AI_TASK_EXPECTED_OUTPUT, expected_output
439
+ )
440
+
441
+ except Exception as e:
442
+ handle_exception(span, e)
443
+
444
+
445
+ def _set_crew_business_intelligence(span, instance, endpoint, args, kwargs):
446
+ """Set crew business intelligence using standard OpenTelemetry semantic conventions"""
447
+ if not endpoint.startswith("crew_"):
448
+ return
449
+
450
+ try:
451
+ # Only capture essential crew attributes - remove custom ones that don't have semantic conventions
452
+ pass
453
+
454
+ except Exception as e:
455
+ handle_exception(span, e)
456
+
457
+
458
+ def _set_tool_business_intelligence(span, instance, endpoint, args, kwargs):
459
+ """Set tool business intelligence using standard OpenTelemetry semantic conventions"""
460
+ if not endpoint.startswith("tool_"):
461
+ return
462
+
463
+ try:
464
+ # Standard OpenTelemetry Gen AI Tool attributes
465
+ tool_name = (
466
+ getattr(instance, "name", None)
467
+ or getattr(instance, "__class__", type(instance)).__name__
468
+ )
469
+ if tool_name:
470
+ span.set_attribute(SemanticConvention.GEN_AI_TOOL_NAME, tool_name)
471
+
472
+ # Tool call ID if available (for tracking specific tool invocations)
473
+ tool_call_id = kwargs.get("call_id", None) or getattr(instance, "call_id", None)
474
+ if tool_call_id:
475
+ span.set_attribute(
476
+ SemanticConvention.GEN_AI_TOOL_CALL_ID, str(tool_call_id)
477
+ )
478
+
479
+ # === OpenAI Function Calling Attributes ===
480
+ _set_openai_tool_attributes(span, instance, endpoint, args, kwargs)
481
+
482
+ except Exception as e:
483
+ handle_exception(span, e)
484
+
485
+
486
+ def _set_openai_tool_attributes(span, instance, endpoint, args, kwargs):
487
+ """Set OpenAI function calling specific attributes using standard conventions"""
488
+ try:
489
+ # Standard tool type classification (framework-agnostic)
490
+ tool_class = instance.__class__.__name__.lower()
491
+ if any(keyword in tool_class for keyword in ["search", "web", "browser"]):
492
+ tool_type = "search"
493
+ elif any(keyword in tool_class for keyword in ["file", "read", "write"]):
494
+ tool_type = "file_system"
495
+ elif any(keyword in tool_class for keyword in ["api", "http", "request"]):
496
+ tool_type = "api_client"
497
+ elif any(keyword in tool_class for keyword in ["database", "sql", "query"]):
498
+ tool_type = "database"
499
+ elif any(
500
+ keyword in tool_class for keyword in ["vector", "embedding", "retrieval"]
501
+ ):
502
+ tool_type = "vector_store"
503
+ else:
504
+ tool_type = "custom"
505
+
506
+ # Use standard tool type attribute from semcov
507
+ span.set_attribute(SemanticConvention.GEN_AI_TOOL_TYPE, tool_type)
508
+
509
+ except Exception as e:
510
+ handle_exception(span, e)
511
+
512
+
513
+ def _capture_content(span, instance, response, endpoint):
514
+ """Capture input/output content with MIME types"""
515
+
516
+ try:
517
+ # Capture response content
518
+ if response:
519
+ span.add_event(
520
+ name=SemanticConvention.GEN_AI_CONTENT_COMPLETION_EVENT,
521
+ attributes={
522
+ SemanticConvention.GEN_AI_CONTENT_COMPLETION: str(response)[
523
+ :1000
524
+ ], # Limit size
525
+ },
526
+ )
527
+
528
+ # Capture input content based on operation type
529
+ if endpoint.startswith("task_"):
530
+ task_description = getattr(instance, "description", "")
531
+ if task_description:
532
+ span.add_event(
533
+ name=SemanticConvention.GEN_AI_CONTENT_PROMPT_EVENT,
534
+ attributes={
535
+ SemanticConvention.GEN_AI_CONTENT_PROMPT: task_description[
536
+ :1000
537
+ ],
538
+ },
539
+ )
540
+
541
+ except Exception:
542
+ # Graceful degradation
543
+ pass
544
+
545
+
546
+ def _track_cost_and_tokens(span, instance, response, endpoint):
547
+ """Track cost and token usage for business intelligence"""
548
+
549
+ try:
550
+ # Token tracking from LLM calls
551
+ if hasattr(instance, "llm") and hasattr(instance.llm, "get_num_tokens"):
552
+ # This would be framework-specific implementation
553
+ pass
554
+
555
+ # Response length as a proxy metric and token estimation
556
+ if response:
557
+ response_length = len(str(response))
558
+ # Estimate token count (rough approximation: 4 chars per token)
559
+ estimated_tokens = response_length // 4
560
+ span.set_attribute(
561
+ SemanticConvention.GEN_AI_CLIENT_TOKEN_USAGE, estimated_tokens
562
+ )
563
+
564
+ # Cost estimation would require pricing information
565
+ # This could be enhanced with actual cost tracking
566
+
567
+ except Exception:
568
+ # Graceful degradation
569
+ pass
570
+
571
+
572
+ def _record_crewai_metrics(
573
+ metrics, operation_type, duration_ms, environment, application_name
574
+ ):
575
+ """Record CrewAI-specific metrics"""
576
+
577
+ try:
578
+ attributes = {
579
+ "gen_ai.operation.name": operation_type,
580
+ "gen_ai.system": SemanticConvention.GEN_AI_SYSTEM_CREWAI,
581
+ "service.name": application_name,
582
+ "deployment.environment": environment,
583
+ }
584
+
585
+ # Record operation duration
586
+ if "genai_client_operation_duration" in metrics:
587
+ metrics["genai_client_operation_duration"].record(
588
+ duration_ms / 1000, attributes
589
+ )
590
+
591
+ # Record operation count
592
+ if "genai_requests" in metrics:
593
+ metrics["genai_requests"].add(1, attributes)
594
+
595
+ except Exception:
596
+ # Graceful degradation
597
+ pass
598
+
599
+
600
+ def _parse_tools(tools):
601
+ """Parse tools list into JSON format"""
602
+
603
+ try:
604
+ result = []
605
+ for tool in tools:
606
+ tool_info = {}
607
+ if hasattr(tool, "name") and tool.name is not None:
608
+ tool_info["name"] = tool.name
609
+ if hasattr(tool, "description") and tool.description is not None:
610
+ tool_info["description"] = tool.description
611
+ if tool_info:
612
+ result.append(tool_info)
613
+ return json.dumps(result)
614
+ except Exception:
615
+ return "[]"
@@ -6,12 +6,11 @@ import importlib.metadata
6
6
  from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
7
7
  from wrapt import wrap_function_wrapper
8
8
 
9
- from openlit.instrumentation.dynamiq.dynamiq import (
10
- dynamiq_wrap
11
- )
9
+ from openlit.instrumentation.dynamiq.dynamiq import dynamiq_wrap
12
10
 
13
11
  _instruments = ("dynamiq >= 0.4.0",)
14
12
 
13
+
15
14
  class DynamiqInstrumentor(BaseInstrumentor):
16
15
  """
17
16
  An instrumentor for dynamiq's client library.
@@ -33,32 +32,67 @@ class DynamiqInstrumentor(BaseInstrumentor):
33
32
  wrap_function_wrapper(
34
33
  "dynamiq.nodes.agents.base",
35
34
  "Agent.run",
36
- dynamiq_wrap("dynamiq.agent_run", version, environment, application_name,
37
- tracer, pricing_info, capture_message_content, metrics, disable_metrics),
35
+ dynamiq_wrap(
36
+ "dynamiq.agent_run",
37
+ version,
38
+ environment,
39
+ application_name,
40
+ tracer,
41
+ pricing_info,
42
+ capture_message_content,
43
+ metrics,
44
+ disable_metrics,
45
+ ),
38
46
  )
39
47
 
40
48
  wrap_function_wrapper(
41
49
  "dynamiq",
42
50
  "Workflow.run",
43
- dynamiq_wrap("dynamiq.workflow_run", version, environment, application_name,
44
- tracer, pricing_info, capture_message_content, metrics, disable_metrics),
51
+ dynamiq_wrap(
52
+ "dynamiq.workflow_run",
53
+ version,
54
+ environment,
55
+ application_name,
56
+ tracer,
57
+ pricing_info,
58
+ capture_message_content,
59
+ metrics,
60
+ disable_metrics,
61
+ ),
45
62
  )
46
63
 
47
64
  wrap_function_wrapper(
48
65
  "dynamiq.memory",
49
66
  "Memory.add",
50
- dynamiq_wrap("dynamiq.memory_add", version, environment, application_name,
51
- tracer, pricing_info, capture_message_content, metrics, disable_metrics),
67
+ dynamiq_wrap(
68
+ "dynamiq.memory_add",
69
+ version,
70
+ environment,
71
+ application_name,
72
+ tracer,
73
+ pricing_info,
74
+ capture_message_content,
75
+ metrics,
76
+ disable_metrics,
77
+ ),
52
78
  )
53
79
 
54
80
  wrap_function_wrapper(
55
81
  "dynamiq.memory",
56
82
  "Memory.search",
57
- dynamiq_wrap("dynamiq.memory_search", version, environment, application_name,
58
- tracer, pricing_info, capture_message_content, metrics, disable_metrics),
83
+ dynamiq_wrap(
84
+ "dynamiq.memory_search",
85
+ version,
86
+ environment,
87
+ application_name,
88
+ tracer,
89
+ pricing_info,
90
+ capture_message_content,
91
+ metrics,
92
+ disable_metrics,
93
+ ),
59
94
  )
60
95
 
61
-
62
96
  def _uninstrument(self, **kwargs):
63
97
  # Proper uninstrumentation logic to revert patched methods
64
98
  pass