openlit 1.34.30__py3-none-any.whl → 1.34.31__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (168) hide show
  1. openlit/__helpers.py +235 -86
  2. openlit/__init__.py +16 -13
  3. openlit/_instrumentors.py +2 -1
  4. openlit/evals/all.py +50 -21
  5. openlit/evals/bias_detection.py +47 -20
  6. openlit/evals/hallucination.py +53 -22
  7. openlit/evals/toxicity.py +50 -21
  8. openlit/evals/utils.py +54 -30
  9. openlit/guard/all.py +61 -19
  10. openlit/guard/prompt_injection.py +34 -14
  11. openlit/guard/restrict_topic.py +46 -15
  12. openlit/guard/sensitive_topic.py +34 -14
  13. openlit/guard/utils.py +58 -22
  14. openlit/instrumentation/ag2/__init__.py +24 -8
  15. openlit/instrumentation/ag2/ag2.py +34 -13
  16. openlit/instrumentation/ag2/async_ag2.py +34 -13
  17. openlit/instrumentation/ag2/utils.py +133 -30
  18. openlit/instrumentation/ai21/__init__.py +43 -14
  19. openlit/instrumentation/ai21/ai21.py +47 -21
  20. openlit/instrumentation/ai21/async_ai21.py +47 -21
  21. openlit/instrumentation/ai21/utils.py +299 -78
  22. openlit/instrumentation/anthropic/__init__.py +21 -4
  23. openlit/instrumentation/anthropic/anthropic.py +28 -17
  24. openlit/instrumentation/anthropic/async_anthropic.py +28 -17
  25. openlit/instrumentation/anthropic/utils.py +145 -35
  26. openlit/instrumentation/assemblyai/__init__.py +11 -2
  27. openlit/instrumentation/assemblyai/assemblyai.py +15 -4
  28. openlit/instrumentation/assemblyai/utils.py +120 -25
  29. openlit/instrumentation/astra/__init__.py +43 -10
  30. openlit/instrumentation/astra/astra.py +28 -5
  31. openlit/instrumentation/astra/async_astra.py +28 -5
  32. openlit/instrumentation/astra/utils.py +151 -55
  33. openlit/instrumentation/azure_ai_inference/__init__.py +43 -10
  34. openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +53 -21
  35. openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +53 -21
  36. openlit/instrumentation/azure_ai_inference/utils.py +307 -83
  37. openlit/instrumentation/bedrock/__init__.py +21 -4
  38. openlit/instrumentation/bedrock/bedrock.py +63 -25
  39. openlit/instrumentation/bedrock/utils.py +139 -30
  40. openlit/instrumentation/chroma/__init__.py +89 -16
  41. openlit/instrumentation/chroma/chroma.py +28 -6
  42. openlit/instrumentation/chroma/utils.py +167 -51
  43. openlit/instrumentation/cohere/__init__.py +63 -18
  44. openlit/instrumentation/cohere/async_cohere.py +63 -24
  45. openlit/instrumentation/cohere/cohere.py +63 -24
  46. openlit/instrumentation/cohere/utils.py +286 -73
  47. openlit/instrumentation/controlflow/__init__.py +35 -9
  48. openlit/instrumentation/controlflow/controlflow.py +66 -33
  49. openlit/instrumentation/crawl4ai/__init__.py +25 -10
  50. openlit/instrumentation/crawl4ai/async_crawl4ai.py +78 -31
  51. openlit/instrumentation/crawl4ai/crawl4ai.py +78 -31
  52. openlit/instrumentation/crewai/__init__.py +40 -15
  53. openlit/instrumentation/crewai/async_crewai.py +32 -7
  54. openlit/instrumentation/crewai/crewai.py +32 -7
  55. openlit/instrumentation/crewai/utils.py +159 -56
  56. openlit/instrumentation/dynamiq/__init__.py +46 -12
  57. openlit/instrumentation/dynamiq/dynamiq.py +74 -33
  58. openlit/instrumentation/elevenlabs/__init__.py +23 -4
  59. openlit/instrumentation/elevenlabs/async_elevenlabs.py +16 -4
  60. openlit/instrumentation/elevenlabs/elevenlabs.py +16 -4
  61. openlit/instrumentation/elevenlabs/utils.py +128 -25
  62. openlit/instrumentation/embedchain/__init__.py +11 -2
  63. openlit/instrumentation/embedchain/embedchain.py +68 -35
  64. openlit/instrumentation/firecrawl/__init__.py +24 -7
  65. openlit/instrumentation/firecrawl/firecrawl.py +46 -20
  66. openlit/instrumentation/google_ai_studio/__init__.py +45 -10
  67. openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +67 -44
  68. openlit/instrumentation/google_ai_studio/google_ai_studio.py +67 -44
  69. openlit/instrumentation/google_ai_studio/utils.py +180 -67
  70. openlit/instrumentation/gpt4all/__init__.py +22 -7
  71. openlit/instrumentation/gpt4all/gpt4all.py +67 -29
  72. openlit/instrumentation/gpt4all/utils.py +285 -61
  73. openlit/instrumentation/gpu/__init__.py +128 -47
  74. openlit/instrumentation/groq/__init__.py +21 -4
  75. openlit/instrumentation/groq/async_groq.py +33 -21
  76. openlit/instrumentation/groq/groq.py +33 -21
  77. openlit/instrumentation/groq/utils.py +192 -55
  78. openlit/instrumentation/haystack/__init__.py +70 -24
  79. openlit/instrumentation/haystack/async_haystack.py +28 -6
  80. openlit/instrumentation/haystack/haystack.py +28 -6
  81. openlit/instrumentation/haystack/utils.py +196 -74
  82. openlit/instrumentation/julep/__init__.py +69 -19
  83. openlit/instrumentation/julep/async_julep.py +53 -27
  84. openlit/instrumentation/julep/julep.py +53 -28
  85. openlit/instrumentation/langchain/__init__.py +74 -63
  86. openlit/instrumentation/langchain/callback_handler.py +1100 -0
  87. openlit/instrumentation/langchain_community/__init__.py +13 -2
  88. openlit/instrumentation/langchain_community/async_langchain_community.py +23 -5
  89. openlit/instrumentation/langchain_community/langchain_community.py +23 -5
  90. openlit/instrumentation/langchain_community/utils.py +35 -9
  91. openlit/instrumentation/letta/__init__.py +68 -15
  92. openlit/instrumentation/letta/letta.py +99 -54
  93. openlit/instrumentation/litellm/__init__.py +43 -14
  94. openlit/instrumentation/litellm/async_litellm.py +51 -26
  95. openlit/instrumentation/litellm/litellm.py +51 -26
  96. openlit/instrumentation/litellm/utils.py +304 -102
  97. openlit/instrumentation/llamaindex/__init__.py +267 -90
  98. openlit/instrumentation/llamaindex/async_llamaindex.py +28 -6
  99. openlit/instrumentation/llamaindex/llamaindex.py +28 -6
  100. openlit/instrumentation/llamaindex/utils.py +204 -91
  101. openlit/instrumentation/mem0/__init__.py +11 -2
  102. openlit/instrumentation/mem0/mem0.py +50 -29
  103. openlit/instrumentation/milvus/__init__.py +10 -2
  104. openlit/instrumentation/milvus/milvus.py +31 -6
  105. openlit/instrumentation/milvus/utils.py +166 -67
  106. openlit/instrumentation/mistral/__init__.py +63 -18
  107. openlit/instrumentation/mistral/async_mistral.py +63 -24
  108. openlit/instrumentation/mistral/mistral.py +63 -24
  109. openlit/instrumentation/mistral/utils.py +277 -69
  110. openlit/instrumentation/multion/__init__.py +69 -19
  111. openlit/instrumentation/multion/async_multion.py +57 -26
  112. openlit/instrumentation/multion/multion.py +57 -26
  113. openlit/instrumentation/ollama/__init__.py +39 -18
  114. openlit/instrumentation/ollama/async_ollama.py +57 -26
  115. openlit/instrumentation/ollama/ollama.py +57 -26
  116. openlit/instrumentation/ollama/utils.py +226 -50
  117. openlit/instrumentation/openai/__init__.py +156 -32
  118. openlit/instrumentation/openai/async_openai.py +147 -67
  119. openlit/instrumentation/openai/openai.py +150 -67
  120. openlit/instrumentation/openai/utils.py +657 -185
  121. openlit/instrumentation/openai_agents/__init__.py +5 -1
  122. openlit/instrumentation/openai_agents/processor.py +110 -90
  123. openlit/instrumentation/phidata/__init__.py +13 -5
  124. openlit/instrumentation/phidata/phidata.py +67 -32
  125. openlit/instrumentation/pinecone/__init__.py +48 -9
  126. openlit/instrumentation/pinecone/async_pinecone.py +27 -5
  127. openlit/instrumentation/pinecone/pinecone.py +27 -5
  128. openlit/instrumentation/pinecone/utils.py +153 -47
  129. openlit/instrumentation/premai/__init__.py +22 -7
  130. openlit/instrumentation/premai/premai.py +51 -26
  131. openlit/instrumentation/premai/utils.py +246 -59
  132. openlit/instrumentation/pydantic_ai/__init__.py +49 -22
  133. openlit/instrumentation/pydantic_ai/pydantic_ai.py +69 -16
  134. openlit/instrumentation/pydantic_ai/utils.py +89 -24
  135. openlit/instrumentation/qdrant/__init__.py +19 -4
  136. openlit/instrumentation/qdrant/async_qdrant.py +33 -7
  137. openlit/instrumentation/qdrant/qdrant.py +33 -7
  138. openlit/instrumentation/qdrant/utils.py +228 -93
  139. openlit/instrumentation/reka/__init__.py +23 -10
  140. openlit/instrumentation/reka/async_reka.py +17 -11
  141. openlit/instrumentation/reka/reka.py +17 -11
  142. openlit/instrumentation/reka/utils.py +138 -36
  143. openlit/instrumentation/together/__init__.py +44 -12
  144. openlit/instrumentation/together/async_together.py +50 -27
  145. openlit/instrumentation/together/together.py +50 -27
  146. openlit/instrumentation/together/utils.py +301 -71
  147. openlit/instrumentation/transformers/__init__.py +2 -1
  148. openlit/instrumentation/transformers/transformers.py +13 -3
  149. openlit/instrumentation/transformers/utils.py +139 -36
  150. openlit/instrumentation/vertexai/__init__.py +81 -16
  151. openlit/instrumentation/vertexai/async_vertexai.py +33 -15
  152. openlit/instrumentation/vertexai/utils.py +123 -27
  153. openlit/instrumentation/vertexai/vertexai.py +33 -15
  154. openlit/instrumentation/vllm/__init__.py +12 -5
  155. openlit/instrumentation/vllm/utils.py +121 -31
  156. openlit/instrumentation/vllm/vllm.py +16 -10
  157. openlit/otel/events.py +35 -10
  158. openlit/otel/metrics.py +32 -24
  159. openlit/otel/tracing.py +24 -9
  160. openlit/semcov/__init__.py +72 -6
  161. {openlit-1.34.30.dist-info → openlit-1.34.31.dist-info}/METADATA +2 -1
  162. openlit-1.34.31.dist-info/RECORD +166 -0
  163. openlit/instrumentation/langchain/async_langchain.py +0 -102
  164. openlit/instrumentation/langchain/langchain.py +0 -102
  165. openlit/instrumentation/langchain/utils.py +0 -252
  166. openlit-1.34.30.dist-info/RECORD +0 -168
  167. {openlit-1.34.30.dist-info → openlit-1.34.31.dist-info}/LICENSE +0 -0
  168. {openlit-1.34.30.dist-info → openlit-1.34.31.dist-info}/WHEEL +0 -0
@@ -10,6 +10,7 @@ from openlit.instrumentation.openai_agents.processor import OpenLITTracingProces
10
10
 
11
11
  _instruments = ("openai-agents >= 0.0.3",)
12
12
 
13
+
13
14
  class OpenAIAgentsInstrumentor(BaseInstrumentor):
14
15
  """OpenLIT instrumentor for OpenAI Agents using native tracing system"""
15
16
 
@@ -37,18 +38,20 @@ class OpenAIAgentsInstrumentor(BaseInstrumentor):
37
38
  capture_message_content=capture_message_content,
38
39
  metrics=metrics,
39
40
  disable_metrics=disable_metrics,
40
- detailed_tracing=detailed_tracing
41
+ detailed_tracing=detailed_tracing,
41
42
  )
42
43
 
43
44
  # Integrate with OpenAI Agents' native tracing system
44
45
  try:
45
46
  from agents import set_trace_processors
47
+
46
48
  # Replace existing processors with our enhanced processor
47
49
  set_trace_processors([processor])
48
50
  except ImportError:
49
51
  # Fallback: Add our processor to existing ones
50
52
  try:
51
53
  from agents import add_trace_processor
54
+
52
55
  add_trace_processor(processor)
53
56
  except ImportError:
54
57
  pass # Agents package may not have tracing
@@ -57,6 +60,7 @@ class OpenAIAgentsInstrumentor(BaseInstrumentor):
57
60
  # Clear our processors
58
61
  try:
59
62
  from agents import set_trace_processors
63
+
60
64
  set_trace_processors([])
61
65
  except ImportError:
62
66
  pass
@@ -10,13 +10,14 @@ from opentelemetry.trace import SpanKind, Status, StatusCode, set_span_in_contex
10
10
  from openlit.__helpers import (
11
11
  common_framework_span_attributes,
12
12
  handle_exception,
13
- get_chat_model_cost
13
+ get_chat_model_cost,
14
14
  )
15
15
  from openlit.semcov import SemanticConvention
16
16
 
17
17
  # Try to import agents framework components with fallback
18
18
  try:
19
19
  from agents import TracingProcessor
20
+
20
21
  if TYPE_CHECKING:
21
22
  from agents import Trace, Span
22
23
  TRACING_AVAILABLE = True
@@ -50,9 +51,19 @@ class OpenLITTracingProcessor(TracingProcessor):
50
51
  cost tracking, and performance metrics.
51
52
  """
52
53
 
53
- def __init__(self, tracer, version, environment, application_name,
54
- pricing_info, capture_message_content, metrics,
55
- disable_metrics, detailed_tracing, **kwargs):
54
+ def __init__(
55
+ self,
56
+ tracer,
57
+ version,
58
+ environment,
59
+ application_name,
60
+ pricing_info,
61
+ capture_message_content,
62
+ metrics,
63
+ disable_metrics,
64
+ detailed_tracing,
65
+ **kwargs,
66
+ ):
56
67
  """Initialize the OpenLIT tracing processor."""
57
68
  super().__init__()
58
69
 
@@ -90,11 +101,10 @@ class OpenLITTracingProcessor(TracingProcessor):
90
101
  kind=SpanKind.CLIENT,
91
102
  attributes={
92
103
  SemanticConvention.GEN_AI_SYSTEM: "openai_agents",
93
- SemanticConvention.GEN_AI_OPERATION:
94
- SemanticConvention.GEN_AI_OPERATION_TYPE_WORKFLOW,
104
+ SemanticConvention.GEN_AI_OPERATION: SemanticConvention.GEN_AI_OPERATION_TYPE_WORKFLOW,
95
105
  "trace.id": trace_id,
96
106
  "trace.name": name,
97
- }
107
+ },
98
108
  )
99
109
 
100
110
  # Create scope for common attributes
@@ -112,7 +122,7 @@ class OpenLITTracingProcessor(TracingProcessor):
112
122
  self.environment,
113
123
  self.application_name,
114
124
  self.version,
115
- name
125
+ name,
116
126
  )
117
127
 
118
128
  # Track active span
@@ -164,28 +174,28 @@ class OpenLITTracingProcessor(TracingProcessor):
164
174
  Formatted span name following semantic conventions
165
175
  """
166
176
  # Extract context for naming
167
- agent_name = metadata.get('agent_name', '')
168
- model_name = metadata.get('model_name', '')
169
- tool_name = metadata.get('tool_name', '')
170
- workflow_name = metadata.get('workflow_name', '')
177
+ agent_name = metadata.get("agent_name", "")
178
+ model_name = metadata.get("model_name", "")
179
+ tool_name = metadata.get("tool_name", "")
180
+ workflow_name = metadata.get("workflow_name", "")
171
181
 
172
182
  # Apply OpenTelemetry semantic conventions for GenAI agents
173
- if 'agent' in operation_name.lower():
183
+ if "agent" in operation_name.lower():
174
184
  if agent_name:
175
185
  return f"invoke_agent {agent_name}"
176
186
  return "invoke_agent"
177
- if 'chat' in operation_name.lower():
187
+ if "chat" in operation_name.lower():
178
188
  if model_name:
179
189
  return f"chat {model_name}"
180
190
  return "chat response"
181
- if 'tool' in operation_name.lower():
191
+ if "tool" in operation_name.lower():
182
192
  if tool_name:
183
193
  return f"execute_tool {tool_name}"
184
194
  return "execute_tool"
185
- if 'handoff' in operation_name.lower():
186
- target_agent = metadata.get('target_agent', 'unknown')
195
+ if "handoff" in operation_name.lower():
196
+ target_agent = metadata.get("target_agent", "unknown")
187
197
  return f"invoke_agent {target_agent}"
188
- if 'workflow' in operation_name.lower():
198
+ if "workflow" in operation_name.lower():
189
199
  if workflow_name:
190
200
  return f"workflow {workflow_name}"
191
201
  return "workflow"
@@ -203,15 +213,15 @@ class OpenLITTracingProcessor(TracingProcessor):
203
213
  """
204
214
  try:
205
215
  # Extract span information
206
- span_name = getattr(span_data, 'name', 'unknown_operation')
207
- span_type = getattr(span_data, 'type', 'unknown')
216
+ span_name = getattr(span_data, "name", "unknown_operation")
217
+ span_type = getattr(span_data, "type", "unknown")
208
218
 
209
219
  # Generate enhanced span name
210
220
  enhanced_name = self._get_span_name(
211
221
  span_name,
212
- agent_name=getattr(span_data, 'agent_name', None),
213
- model_name=getattr(span_data, 'model_name', None),
214
- tool_name=getattr(span_data, 'tool_name', None)
222
+ agent_name=getattr(span_data, "agent_name", None),
223
+ model_name=getattr(span_data, "model_name", None),
224
+ tool_name=getattr(span_data, "tool_name", None),
215
225
  )
216
226
 
217
227
  # Determine span operation type
@@ -229,15 +239,15 @@ class OpenLITTracingProcessor(TracingProcessor):
229
239
  SemanticConvention.GEN_AI_SYSTEM: "openai_agents",
230
240
  SemanticConvention.GEN_AI_OPERATION: operation_type,
231
241
  "span.type": span_type,
232
- "span.id": getattr(span_data, 'span_id', ''),
233
- }
242
+ "span.id": getattr(span_data, "span_id", ""),
243
+ },
234
244
  )
235
245
 
236
246
  # Process specific span types
237
247
  self._process_span_attributes(span, span_data, span_type)
238
248
 
239
249
  # Track span
240
- span_id = getattr(span_data, 'span_id', len(self.span_stack))
250
+ span_id = getattr(span_data, "span_id", len(self.span_stack))
241
251
  self.active_spans[f"{trace_id}:{span_id}"] = span
242
252
  self.span_stack.append(span)
243
253
 
@@ -247,11 +257,11 @@ class OpenLITTracingProcessor(TracingProcessor):
247
257
  def _get_operation_type(self, span_type: str, span_name: str) -> str:
248
258
  """Get operation type based on span characteristics."""
249
259
  type_mapping = {
250
- 'agent': SemanticConvention.GEN_AI_OPERATION_TYPE_AGENT,
251
- 'generation': SemanticConvention.GEN_AI_OPERATION_CHAT,
252
- 'function': SemanticConvention.GEN_AI_OPERATION_CHAT,
253
- 'tool': SemanticConvention.GEN_AI_OPERATION_CHAT,
254
- 'handoff': SemanticConvention.GEN_AI_OPERATION_TYPE_AGENT,
260
+ "agent": SemanticConvention.GEN_AI_OPERATION_TYPE_AGENT,
261
+ "generation": SemanticConvention.GEN_AI_OPERATION_CHAT,
262
+ "function": SemanticConvention.GEN_AI_OPERATION_CHAT,
263
+ "tool": SemanticConvention.GEN_AI_OPERATION_CHAT,
264
+ "handoff": SemanticConvention.GEN_AI_OPERATION_TYPE_AGENT,
255
265
  }
256
266
 
257
267
  # Check span type first
@@ -270,28 +280,30 @@ class OpenLITTracingProcessor(TracingProcessor):
270
280
  """Process and set span attributes based on span type."""
271
281
  try:
272
282
  # Common attributes
273
- if hasattr(span_data, 'agent_name'):
274
- span.set_attribute(SemanticConvention.GEN_AI_AGENT_NAME,
275
- span_data.agent_name)
283
+ if hasattr(span_data, "agent_name"):
284
+ span.set_attribute(
285
+ SemanticConvention.GEN_AI_AGENT_NAME, span_data.agent_name
286
+ )
276
287
 
277
- if hasattr(span_data, 'model_name'):
278
- span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL,
279
- span_data.model_name)
288
+ if hasattr(span_data, "model_name"):
289
+ span.set_attribute(
290
+ SemanticConvention.GEN_AI_REQUEST_MODEL, span_data.model_name
291
+ )
280
292
 
281
293
  # Agent-specific attributes
282
- if span_type == 'agent':
294
+ if span_type == "agent":
283
295
  self._process_agent_span(span, span_data)
284
296
 
285
297
  # Generation-specific attributes
286
- elif span_type == 'generation':
298
+ elif span_type == "generation":
287
299
  self._process_generation_span(span, span_data)
288
300
 
289
301
  # Function/Tool-specific attributes
290
- elif span_type in ['function', 'tool']:
302
+ elif span_type in ["function", "tool"]:
291
303
  self._process_function_span(span, span_data)
292
304
 
293
305
  # Handoff-specific attributes
294
- elif span_type == 'handoff':
306
+ elif span_type == "handoff":
295
307
  self._process_handoff_span(span, span_data)
296
308
 
297
309
  except Exception as e: # pylint: disable=broad-exception-caught
@@ -300,72 +312,77 @@ class OpenLITTracingProcessor(TracingProcessor):
300
312
  def _process_agent_span(self, span, agent_span):
301
313
  """Process agent span data (unused parameter)."""
302
314
  # Agent-specific processing
303
- if hasattr(agent_span, 'instructions'):
304
- span.set_attribute(SemanticConvention.GEN_AI_AGENT_DESCRIPTION,
305
- str(agent_span.instructions)[:500])
315
+ if hasattr(agent_span, "instructions"):
316
+ span.set_attribute(
317
+ SemanticConvention.GEN_AI_AGENT_DESCRIPTION,
318
+ str(agent_span.instructions)[:500],
319
+ )
306
320
 
307
- if hasattr(agent_span, 'model'):
308
- span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL,
309
- agent_span.model)
321
+ if hasattr(agent_span, "model"):
322
+ span.set_attribute(
323
+ SemanticConvention.GEN_AI_REQUEST_MODEL, agent_span.model
324
+ )
310
325
 
311
326
  def _process_generation_span(self, span, generation_span):
312
327
  """Process generation span data."""
313
328
  # Set generation-specific attributes
314
- if hasattr(generation_span, 'prompt'):
315
- span.set_attribute(SemanticConvention.GEN_AI_PROMPT,
316
- str(generation_span.prompt)[:1000])
329
+ if hasattr(generation_span, "prompt"):
330
+ span.set_attribute(
331
+ SemanticConvention.GEN_AI_PROMPT, str(generation_span.prompt)[:1000]
332
+ )
317
333
 
318
- if hasattr(generation_span, 'completion'):
319
- span.set_attribute(SemanticConvention.GEN_AI_COMPLETION,
320
- str(generation_span.completion)[:1000])
334
+ if hasattr(generation_span, "completion"):
335
+ span.set_attribute(
336
+ SemanticConvention.GEN_AI_COMPLETION,
337
+ str(generation_span.completion)[:1000],
338
+ )
321
339
 
322
- if hasattr(generation_span, 'usage'):
340
+ if hasattr(generation_span, "usage"):
323
341
  usage = generation_span.usage
324
- if hasattr(usage, 'prompt_tokens'):
325
- span.set_attribute(SemanticConvention.GEN_AI_USAGE_PROMPT_TOKENS,
326
- usage.prompt_tokens)
327
- if hasattr(usage, 'completion_tokens'):
342
+ if hasattr(usage, "prompt_tokens"):
343
+ span.set_attribute(
344
+ SemanticConvention.GEN_AI_USAGE_PROMPT_TOKENS, usage.prompt_tokens
345
+ )
346
+ if hasattr(usage, "completion_tokens"):
328
347
  span.set_attribute(
329
348
  SemanticConvention.GEN_AI_USAGE_COMPLETION_TOKENS,
330
- usage.completion_tokens
349
+ usage.completion_tokens,
331
350
  )
332
351
 
333
352
  def _process_function_span(self, span, function_span):
334
353
  """Process function/tool span data."""
335
- if hasattr(function_span, 'function_name'):
336
- span.set_attribute(SemanticConvention.GEN_AI_TOOL_NAME,
337
- function_span.function_name)
354
+ if hasattr(function_span, "function_name"):
355
+ span.set_attribute(
356
+ SemanticConvention.GEN_AI_TOOL_NAME, function_span.function_name
357
+ )
338
358
 
339
- if hasattr(function_span, 'arguments'):
340
- span.set_attribute("gen_ai.tool.arguments",
341
- str(function_span.arguments)[:500])
359
+ if hasattr(function_span, "arguments"):
360
+ span.set_attribute(
361
+ "gen_ai.tool.arguments", str(function_span.arguments)[:500]
362
+ )
342
363
 
343
- if hasattr(function_span, 'result'):
344
- span.set_attribute("gen_ai.tool.result",
345
- str(function_span.result)[:500])
364
+ if hasattr(function_span, "result"):
365
+ span.set_attribute("gen_ai.tool.result", str(function_span.result)[:500])
346
366
 
347
367
  def _process_handoff_span(self, span, handoff_span):
348
368
  """Process handoff span data."""
349
- if hasattr(handoff_span, 'target_agent'):
350
- span.set_attribute("gen_ai.handoff.target_agent",
351
- handoff_span.target_agent)
369
+ if hasattr(handoff_span, "target_agent"):
370
+ span.set_attribute("gen_ai.handoff.target_agent", handoff_span.target_agent)
352
371
 
353
- if hasattr(handoff_span, 'reason'):
354
- span.set_attribute("gen_ai.handoff.reason",
355
- str(handoff_span.reason)[:200])
372
+ if hasattr(handoff_span, "reason"):
373
+ span.set_attribute("gen_ai.handoff.reason", str(handoff_span.reason)[:200])
356
374
 
357
375
  def span_end(self, span_data, trace_id: str):
358
376
  """Handle span end events."""
359
377
  try:
360
- span_id = getattr(span_data, 'span_id', '')
378
+ span_id = getattr(span_data, "span_id", "")
361
379
  span_key = f"{trace_id}:{span_id}"
362
380
 
363
381
  span = self.active_spans.get(span_key)
364
382
  if span:
365
383
  # Set final status
366
- if hasattr(span_data, 'error') and span_data.error:
367
- span.set_status(Status(StatusCode.ERROR,
368
- str(span_data.error)))
384
+ if hasattr(span_data, "error") and span_data.error:
385
+ span.set_status(Status(StatusCode.ERROR, str(span_data.error)))
369
386
  else:
370
387
  span.set_status(Status(StatusCode.OK))
371
388
 
@@ -379,7 +396,7 @@ class OpenLITTracingProcessor(TracingProcessor):
379
396
  self.span_stack.remove(span)
380
397
 
381
398
  except Exception as e: # pylint: disable=broad-exception-caught
382
- handle_exception(span if 'span' in locals() else None, e)
399
+ handle_exception(span if "span" in locals() else None, e)
383
400
 
384
401
  def force_flush(self):
385
402
  """Force flush all pending spans."""
@@ -402,15 +419,16 @@ class OpenLITTracingProcessor(TracingProcessor):
402
419
  """Extract model information from span data."""
403
420
  model_info = {}
404
421
 
405
- if hasattr(span_data, 'model'):
406
- model_info['model'] = span_data.model
407
- if hasattr(span_data, 'model_name'):
408
- model_info['model'] = span_data.model_name
422
+ if hasattr(span_data, "model"):
423
+ model_info["model"] = span_data.model
424
+ if hasattr(span_data, "model_name"):
425
+ model_info["model"] = span_data.model_name
409
426
 
410
427
  return model_info
411
428
 
412
- def _calculate_cost(self, model: str, prompt_tokens: int,
413
- completion_tokens: int) -> float:
429
+ def _calculate_cost(
430
+ self, model: str, prompt_tokens: int, completion_tokens: int
431
+ ) -> float:
414
432
  """Calculate cost based on token usage."""
415
433
  try:
416
434
  return get_chat_model_cost(
@@ -423,22 +441,24 @@ class OpenLITTracingProcessor(TracingProcessor):
423
441
  def on_trace_start(self, trace):
424
442
  """Called when a trace starts - required by OpenAI Agents framework"""
425
443
  try:
426
- self.start_trace(getattr(trace, 'trace_id', 'unknown'),
427
- getattr(trace, 'name', 'workflow'))
444
+ self.start_trace(
445
+ getattr(trace, "trace_id", "unknown"),
446
+ getattr(trace, "name", "workflow"),
447
+ )
428
448
  except Exception: # pylint: disable=broad-exception-caught
429
449
  pass
430
450
 
431
451
  def on_trace_end(self, trace):
432
452
  """Called when a trace ends - required by OpenAI Agents framework"""
433
453
  try:
434
- self.end_trace(getattr(trace, 'trace_id', 'unknown'))
454
+ self.end_trace(getattr(trace, "trace_id", "unknown"))
435
455
  except Exception: # pylint: disable=broad-exception-caught
436
456
  pass
437
457
 
438
458
  def on_span_start(self, span):
439
459
  """Called when a span starts - required by OpenAI Agents framework"""
440
460
  try:
441
- trace_id = getattr(span, 'trace_id', 'unknown')
461
+ trace_id = getattr(span, "trace_id", "unknown")
442
462
  self.span_start(span, trace_id)
443
463
  except Exception: # pylint: disable=broad-exception-caught
444
464
  pass
@@ -446,7 +466,7 @@ class OpenLITTracingProcessor(TracingProcessor):
446
466
  def on_span_end(self, span):
447
467
  """Called when a span ends - required by OpenAI Agents framework"""
448
468
  try:
449
- trace_id = getattr(span, 'trace_id', 'unknown')
469
+ trace_id = getattr(span, "trace_id", "unknown")
450
470
  self.span_end(span, trace_id)
451
471
  except Exception: # pylint: disable=broad-exception-caught
452
472
  pass
@@ -6,12 +6,11 @@ import importlib.metadata
6
6
  from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
7
7
  from wrapt import wrap_function_wrapper
8
8
 
9
- from openlit.instrumentation.phidata.phidata import (
10
- phidata_wrap
11
- )
9
+ from openlit.instrumentation.phidata.phidata import phidata_wrap
12
10
 
13
11
  _instruments = ("phidata >= 2.5.32",)
14
12
 
13
+
15
14
  class PhidataInstrumentor(BaseInstrumentor):
16
15
  """
17
16
  An instrumentor for Phidata's client library.
@@ -33,8 +32,17 @@ class PhidataInstrumentor(BaseInstrumentor):
33
32
  wrap_function_wrapper(
34
33
  "phi.agent",
35
34
  "Agent.print_response",
36
- phidata_wrap("phidata.print_response", version, environment, application_name,
37
- tracer, pricing_info, capture_message_content, metrics, disable_metrics),
35
+ phidata_wrap(
36
+ "phidata.print_response",
37
+ version,
38
+ environment,
39
+ application_name,
40
+ tracer,
41
+ pricing_info,
42
+ capture_message_content,
43
+ metrics,
44
+ disable_metrics,
45
+ ),
38
46
  )
39
47
 
40
48
  def _uninstrument(self, **kwargs):
@@ -5,7 +5,11 @@ Module for monitoring Phidata calls.
5
5
 
6
6
  import logging
7
7
  from opentelemetry.trace import SpanKind, Status, StatusCode
8
- from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
8
+ from opentelemetry.sdk.resources import (
9
+ SERVICE_NAME,
10
+ TELEMETRY_SDK_NAME,
11
+ DEPLOYMENT_ENVIRONMENT,
12
+ )
9
13
  from openlit.__helpers import (
10
14
  handle_exception,
11
15
  )
@@ -14,8 +18,18 @@ from openlit.semcov import SemanticConvention
14
18
  # Initialize logger for logging potential issues and operations
15
19
  logger = logging.getLogger(__name__)
16
20
 
17
- def phidata_wrap(gen_ai_endpoint, version, environment, application_name,
18
- tracer, pricing_info, capture_message_content, metrics, disable_metrics):
21
+
22
+ def phidata_wrap(
23
+ gen_ai_endpoint,
24
+ version,
25
+ environment,
26
+ application_name,
27
+ tracer,
28
+ pricing_info,
29
+ capture_message_content,
30
+ metrics,
31
+ disable_metrics,
32
+ ):
19
33
  """
20
34
  Generates a telemetry wrapper for chat completions to collect metrics.
21
35
 
@@ -50,40 +64,61 @@ def phidata_wrap(gen_ai_endpoint, version, environment, application_name,
50
64
  """
51
65
 
52
66
  # pylint: disable=line-too-long
53
- with tracer.start_as_current_span(gen_ai_endpoint, kind= SpanKind.CLIENT) as span:
67
+ with tracer.start_as_current_span(
68
+ gen_ai_endpoint, kind=SpanKind.CLIENT
69
+ ) as span:
54
70
  response = wrapped(*args, **kwargs)
55
71
 
56
72
  try:
57
73
  # Set base span attribues
58
74
  span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
59
- span.set_attribute(SemanticConvention.GEN_AI_SYSTEM,
60
- SemanticConvention.GEN_AI_SYSTEM_PHIDATA)
61
- span.set_attribute(SemanticConvention.GEN_AI_OPERATION,
62
- SemanticConvention.GEN_AI_OPERATION_TYPE_AGENT)
63
- span.set_attribute(SemanticConvention.GEN_AI_ENDPOINT,
64
- gen_ai_endpoint)
65
- span.set_attribute(SERVICE_NAME,
66
- application_name)
67
- span.set_attribute(DEPLOYMENT_ENVIRONMENT,
68
- environment)
69
- span.set_attribute(SemanticConvention.GEN_AI_AGENT_ID,
70
- getattr(instance, 'agent_id', '') or '')
71
- span.set_attribute(SemanticConvention.GEN_AI_AGENT_ROLE,
72
- getattr(instance, 'name', '') or '')
73
- span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL,
74
- getattr(getattr(instance, 'model', None), 'id', '') or '')
75
- span.set_attribute(SemanticConvention.GEN_AI_AGENT_TOOLS,
76
- str(getattr(instance, 'tools', '')) or '')
77
- span.set_attribute(SemanticConvention.GEN_AI_AGENT_CONTEXT,
78
- str(getattr(instance, 'knowledge', '')) or '')
79
- span.set_attribute(SemanticConvention.GEN_AI_AGENT_TASK,
80
- str(getattr(instance, 'task', '')) or '')
81
- span.set_attribute(SemanticConvention.GEN_AI_AGENT_INSTRUCTIONS,
82
- str(getattr(instance, 'instructions', '')) or '')
83
- span.set_attribute(SemanticConvention.GEN_AI_AGENT_STORAGE,
84
- str(getattr(instance, 'storage', '')) or '')
85
- span.set_attribute(SemanticConvention.GEN_AI_AGENT_ENABLE_HISTORY,
86
- str(getattr(instance, 'add_history_to_messages', '')) or '')
75
+ span.set_attribute(
76
+ SemanticConvention.GEN_AI_SYSTEM,
77
+ SemanticConvention.GEN_AI_SYSTEM_PHIDATA,
78
+ )
79
+ span.set_attribute(
80
+ SemanticConvention.GEN_AI_OPERATION,
81
+ SemanticConvention.GEN_AI_OPERATION_TYPE_AGENT,
82
+ )
83
+ span.set_attribute(SemanticConvention.GEN_AI_ENDPOINT, gen_ai_endpoint)
84
+ span.set_attribute(SERVICE_NAME, application_name)
85
+ span.set_attribute(DEPLOYMENT_ENVIRONMENT, environment)
86
+ span.set_attribute(
87
+ SemanticConvention.GEN_AI_AGENT_ID,
88
+ getattr(instance, "agent_id", "") or "",
89
+ )
90
+ span.set_attribute(
91
+ SemanticConvention.GEN_AI_AGENT_ROLE,
92
+ getattr(instance, "name", "") or "",
93
+ )
94
+ span.set_attribute(
95
+ SemanticConvention.GEN_AI_REQUEST_MODEL,
96
+ getattr(getattr(instance, "model", None), "id", "") or "",
97
+ )
98
+ span.set_attribute(
99
+ SemanticConvention.GEN_AI_AGENT_TOOLS,
100
+ str(getattr(instance, "tools", "")) or "",
101
+ )
102
+ span.set_attribute(
103
+ SemanticConvention.GEN_AI_AGENT_CONTEXT,
104
+ str(getattr(instance, "knowledge", "")) or "",
105
+ )
106
+ span.set_attribute(
107
+ SemanticConvention.GEN_AI_AGENT_TASK,
108
+ str(getattr(instance, "task", "")) or "",
109
+ )
110
+ span.set_attribute(
111
+ SemanticConvention.GEN_AI_AGENT_INSTRUCTIONS,
112
+ str(getattr(instance, "instructions", "")) or "",
113
+ )
114
+ span.set_attribute(
115
+ SemanticConvention.GEN_AI_AGENT_STORAGE,
116
+ str(getattr(instance, "storage", "")) or "",
117
+ )
118
+ span.set_attribute(
119
+ SemanticConvention.GEN_AI_AGENT_ENABLE_HISTORY,
120
+ str(getattr(instance, "add_history_to_messages", "")) or "",
121
+ )
87
122
 
88
123
  span.set_status(Status(StatusCode.OK))
89
124