openlit 1.34.27__py3-none-any.whl → 1.34.29__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. openlit/__helpers.py +38 -0
  2. openlit/__init__.py +22 -155
  3. openlit/_instrumentors.py +144 -0
  4. openlit/guard/all.py +3 -3
  5. openlit/instrumentation/chroma/utils.py +2 -2
  6. openlit/instrumentation/controlflow/controlflow.py +2 -2
  7. openlit/instrumentation/embedchain/embedchain.py +4 -4
  8. openlit/instrumentation/groq/__init__.py +4 -4
  9. openlit/instrumentation/haystack/__init__.py +57 -28
  10. openlit/instrumentation/haystack/async_haystack.py +54 -0
  11. openlit/instrumentation/haystack/haystack.py +35 -65
  12. openlit/instrumentation/haystack/utils.py +377 -0
  13. openlit/instrumentation/julep/async_julep.py +2 -2
  14. openlit/instrumentation/julep/julep.py +2 -2
  15. openlit/instrumentation/langchain_community/utils.py +2 -2
  16. openlit/instrumentation/llamaindex/__init__.py +165 -37
  17. openlit/instrumentation/llamaindex/async_llamaindex.py +53 -0
  18. openlit/instrumentation/llamaindex/llamaindex.py +32 -64
  19. openlit/instrumentation/llamaindex/utils.py +412 -0
  20. openlit/instrumentation/mem0/mem0.py +2 -2
  21. openlit/instrumentation/openai/__init__.py +24 -24
  22. openlit/instrumentation/openai/utils.py +66 -27
  23. openlit/instrumentation/openai_agents/__init__.py +46 -26
  24. openlit/instrumentation/openai_agents/processor.py +600 -0
  25. openlit/instrumentation/pinecone/utils.py +2 -2
  26. openlit/instrumentation/qdrant/utils.py +2 -2
  27. openlit/instrumentation/together/__init__.py +8 -8
  28. openlit/semcov/__init__.py +80 -0
  29. {openlit-1.34.27.dist-info → openlit-1.34.29.dist-info}/METADATA +2 -1
  30. {openlit-1.34.27.dist-info → openlit-1.34.29.dist-info}/RECORD +32 -27
  31. openlit/instrumentation/openai_agents/openai_agents.py +0 -65
  32. {openlit-1.34.27.dist-info → openlit-1.34.29.dist-info}/LICENSE +0 -0
  33. {openlit-1.34.27.dist-info → openlit-1.34.29.dist-info}/WHEEL +0 -0
@@ -0,0 +1,600 @@
1
+ """
2
+ OpenLIT OpenAI Agents Instrumentation - Native TracingProcessor Implementation
3
+ """
4
+
5
+ import json
6
+ import time
7
+ from datetime import datetime
8
+ from typing import Any, Dict, Optional, TYPE_CHECKING
9
+
10
+ from opentelemetry import context as context_api
11
+ from opentelemetry.trace import SpanKind, Status, StatusCode, set_span_in_context
12
+ from opentelemetry.context import detach
13
+
14
+ from openlit.__helpers import (
15
+ common_framework_span_attributes,
16
+ handle_exception,
17
+ record_framework_metrics,
18
+ get_chat_model_cost
19
+ )
20
+ from openlit.semcov import SemanticConvention
21
+
22
+ # Try to import agents framework components with fallback
23
+ try:
24
+ from agents import TracingProcessor
25
+ if TYPE_CHECKING:
26
+ from agents import Trace, Span
27
+ TRACING_AVAILABLE = True
28
+ except ImportError:
29
+ # Create dummy class for when agents is not available
30
+ class TracingProcessor:
31
+ """Dummy TracingProcessor class for when agents is not available"""
32
+
33
+ def force_flush(self):
34
+ """Dummy force_flush method"""
35
+ return None
36
+
37
+ def shutdown(self):
38
+ """Dummy shutdown method"""
39
+ return None
40
+
41
+ if TYPE_CHECKING:
42
+ # Type hints only - these don't exist at runtime when agents unavailable
43
+ Trace = Any
44
+ Span = Any
45
+
46
+ TRACING_AVAILABLE = False
47
+
48
+
49
+ class OpenLITTracingProcessor(TracingProcessor):
50
+ """
51
+ OpenLIT processor that integrates with OpenAI Agents' native tracing system
52
+ Provides superior business intelligence while maintaining perfect hierarchy
53
+ """
54
+
55
+ def __init__(self, tracer: Any, version: str, environment: str,
56
+ application_name: str, pricing_info: dict, capture_message_content: bool,
57
+ metrics: Optional[Any], disable_metrics: bool, detailed_tracing: bool):
58
+ if not TRACING_AVAILABLE:
59
+ return
60
+
61
+ self._tracer = tracer
62
+ self._version = version
63
+ self._environment = environment
64
+ self._application_name = application_name
65
+ self._pricing_info = pricing_info
66
+ self._capture_message_content = capture_message_content
67
+ self._metrics = metrics
68
+ self._disable_metrics = disable_metrics
69
+ self._detailed_tracing = detailed_tracing
70
+
71
+ # Track spans for hierarchy
72
+ self._root_spans: Dict[str, Any] = {}
73
+ self._otel_spans: Dict[str, Any] = {}
74
+ self._tokens: Dict[str, object] = {}
75
+ self._span_start_times: Dict[str, float] = {}
76
+
77
+ # Track handoff context for better span naming
78
+ self._last_handoff_from: Optional[str] = None
79
+
80
+ def on_trace_start(self, trace: "Trace") -> None:
81
+ """Called when a trace is started - creates root workflow span"""
82
+ if not TRACING_AVAILABLE:
83
+ return
84
+
85
+ # Create root workflow span with {operation_type} {operation_name} format
86
+ workflow_name = getattr(trace, 'name', 'workflow')
87
+ span_name = f"agent {workflow_name}" # Follow {operation_type} {operation_name} pattern
88
+
89
+ # Use tracer.start_span for TracingProcessor pattern with proper context
90
+ otel_span = self._tracer.start_span(
91
+ name=span_name,
92
+ kind=SpanKind.CLIENT
93
+ )
94
+
95
+ # Set common framework attributes for root span
96
+ self._set_common_attributes(otel_span, trace.trace_id)
97
+
98
+ # Set agent name for root span using semantic conventions
99
+ if hasattr(trace, 'name') and trace.name:
100
+ otel_span.set_attribute(SemanticConvention.GEN_AI_AGENT_NAME, trace.name)
101
+
102
+ # Set default model for root span
103
+ otel_span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL, "gpt-4o")
104
+
105
+ self._root_spans[trace.trace_id] = otel_span
106
+ self._span_start_times[trace.trace_id] = time.time()
107
+
108
+ def on_span_start(self, span: "Span[Any]") -> None:
109
+ """Called when a span is started - creates child spans with proper hierarchy"""
110
+ if not TRACING_AVAILABLE or not hasattr(span, 'started_at') or not span.started_at:
111
+ return
112
+
113
+ start_time = self._parse_timestamp(span.started_at)
114
+
115
+ # Determine parent span for proper hierarchy
116
+ parent_span = None
117
+ if span.parent_id and span.parent_id in self._otel_spans:
118
+ parent_span = self._otel_spans[span.parent_id]
119
+ elif span.trace_id in self._root_spans:
120
+ parent_span = self._root_spans[span.trace_id]
121
+
122
+ # Set context for parent-child relationship
123
+ context = set_span_in_context(parent_span) if parent_span else None
124
+
125
+ # Get semantic span name and operation type
126
+ span_name = self._get_span_name(span)
127
+ operation_type = self._get_operation_type(span.span_data)
128
+
129
+ # Create span with proper context
130
+ otel_span = self._tracer.start_span(
131
+ name=span_name,
132
+ context=context,
133
+ start_time=self._as_utc_nano(start_time),
134
+ kind=SpanKind.CLIENT
135
+ )
136
+
137
+ # Set common framework attributes for all spans
138
+ self._set_common_framework_attributes(otel_span, operation_type)
139
+
140
+ # Set span-specific attributes
141
+ self._set_span_attributes(otel_span, span)
142
+
143
+ # Track span and context
144
+ self._otel_spans[span.span_id] = otel_span
145
+ self._tokens[span.span_id] = context_api.attach(set_span_in_context(otel_span))
146
+ self._span_start_times[span.span_id] = time.time()
147
+
148
+ def on_span_end(self, span: "Span[Any]") -> None:
149
+ """Called when a span is finished - adds business intelligence and ends span"""
150
+ if not TRACING_AVAILABLE or span.span_id not in self._otel_spans:
151
+ return
152
+
153
+ otel_span = self._otel_spans[span.span_id]
154
+
155
+ try:
156
+ # Add response data and business intelligence
157
+ self._process_span_completion(otel_span, span)
158
+
159
+ # Set successful status
160
+ otel_span.set_status(Status(StatusCode.OK))
161
+
162
+ # Record metrics if enabled
163
+ if not self._disable_metrics and self._metrics and span.span_id in self._span_start_times:
164
+ start_time = self._span_start_times[span.span_id]
165
+ end_time = time.time()
166
+ operation_type = self._get_operation_type(span.span_data)
167
+ record_framework_metrics(
168
+ self._metrics, operation_type, SemanticConvention.GEN_AI_SYSTEM_OPENAI_AGENTS,
169
+ "localhost", 80, self._environment, self._application_name,
170
+ start_time, end_time
171
+ )
172
+
173
+ except Exception as e:
174
+ handle_exception(otel_span, e)
175
+ finally:
176
+ # End span and cleanup
177
+ otel_span.end()
178
+
179
+ # Cleanup context
180
+ if span.span_id in self._tokens:
181
+ detach(self._tokens[span.span_id])
182
+ del self._tokens[span.span_id]
183
+
184
+ # Cleanup tracking
185
+ if span.span_id in self._otel_spans:
186
+ del self._otel_spans[span.span_id]
187
+ if span.span_id in self._span_start_times:
188
+ del self._span_start_times[span.span_id]
189
+
190
+ def on_trace_end(self, trace: "Trace") -> None:
191
+ """Called when a trace is finished - ends root span with business intelligence"""
192
+ if not TRACING_AVAILABLE or trace.trace_id not in self._root_spans:
193
+ return
194
+
195
+ root_span = self._root_spans[trace.trace_id]
196
+
197
+ try:
198
+ # Add trace-level business intelligence
199
+ self._process_trace_completion(root_span, trace)
200
+ root_span.set_status(Status(StatusCode.OK))
201
+ except Exception as e:
202
+ handle_exception(root_span, e)
203
+ finally:
204
+ root_span.end()
205
+
206
+ # Cleanup
207
+ if trace.trace_id in self._root_spans:
208
+ del self._root_spans[trace.trace_id]
209
+ if trace.trace_id in self._span_start_times:
210
+ del self._span_start_times[trace.trace_id]
211
+
212
+ def _get_span_name(self, span: "Span[Any]") -> str:
213
+ """Get semantic span name using {operation_type} {operation_name} format"""
214
+ data = span.span_data
215
+ operation_type = self._get_operation_type(data)
216
+
217
+ # Extract operation name based on span type
218
+ operation_name = "unknown"
219
+
220
+ # Special handling for handoffs
221
+ if hasattr(data, '__class__') and data.__class__.__name__ == 'HandoffSpanData':
222
+ if hasattr(data, 'to_agent') and data.to_agent:
223
+ operation_name = f"to {data.to_agent}"
224
+ else:
225
+ operation_name = "handoff"
226
+
227
+ # Use agent name for agent spans
228
+ elif hasattr(data, '__class__') and data.__class__.__name__ == 'AgentSpanData':
229
+ # Try multiple possible attribute names for agent name
230
+ agent_name = None
231
+
232
+ for attr in ['agent_name', 'name', 'agent', 'agent_id']:
233
+ if hasattr(data, attr):
234
+ agent_name = getattr(data, attr)
235
+ if agent_name and isinstance(agent_name, str):
236
+ break
237
+
238
+ # If still no agent name, try looking in context or other attributes
239
+ if not agent_name:
240
+ # Try context or other nested attributes
241
+ if hasattr(data, 'context') and hasattr(data.context, 'agent'):
242
+ agent_name = getattr(data.context.agent, 'name', None)
243
+ elif hasattr(data, 'metadata') and hasattr(data.metadata, 'agent_name'):
244
+ agent_name = data.metadata.agent_name
245
+
246
+ if agent_name:
247
+ operation_name = agent_name
248
+ else:
249
+ # If no agent name found, use a more descriptive fallback
250
+ operation_name = "execution"
251
+
252
+ # Use name if available for other spans
253
+ elif hasattr(data, 'name') and isinstance(data.name, str):
254
+ operation_name = data.name
255
+
256
+ # Fallback to type-based names
257
+ else:
258
+ operation_name = getattr(data, 'type', 'operation')
259
+
260
+ # Return formatted name: {operation_type} {operation_name}
261
+ return f"{operation_type} {operation_name}"
262
+
263
+ def _get_operation_type(self, data: Any) -> str:
264
+ """Map span data to operation types"""
265
+ class_name = data.__class__.__name__ if hasattr(data, '__class__') else str(type(data))
266
+
267
+ mapping = {
268
+ 'AgentSpanData': SemanticConvention.GEN_AI_OPERATION_TYPE_AGENT,
269
+ 'GenerationSpanData': SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT,
270
+ 'FunctionSpanData': SemanticConvention.GEN_AI_OPERATION_TYPE_TOOLS,
271
+ 'HandoffSpanData': SemanticConvention.GEN_AI_OPERATION_TYPE_AGENT,
272
+ 'ResponseSpanData': SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT,
273
+ }
274
+
275
+ return mapping.get(class_name, SemanticConvention.GEN_AI_OPERATION_TYPE_FRAMEWORK)
276
+
277
+ def _set_common_framework_attributes(self, span: Any, operation_type: str) -> None:
278
+ """Set common framework attributes using semantic conventions"""
279
+ # Create scope object for common_framework_span_attributes
280
+ scope = type("GenericScope", (), {})()
281
+ scope._span = span
282
+ scope._start_time = time.time()
283
+ scope._end_time = time.time()
284
+
285
+ # Use common framework attributes helper
286
+ # For framework operations, use localhost like other agent frameworks (AG2, Pydantic AI)
287
+ common_framework_span_attributes(
288
+ scope, SemanticConvention.GEN_AI_SYSTEM_OPENAI_AGENTS,
289
+ "localhost", 80, self._environment, self._application_name,
290
+ self._version, operation_type, None
291
+ )
292
+
293
+ def _set_common_attributes(self, span: Any, trace_id: str) -> None:
294
+ """Set common framework attributes for root spans"""
295
+ self._set_common_framework_attributes(span, SemanticConvention.GEN_AI_OPERATION_TYPE_FRAMEWORK)
296
+
297
+ def _set_span_attributes(self, span: Any, agent_span: "Span[Any]") -> None:
298
+ """Set span-specific attributes based on span data using semantic conventions"""
299
+ data = agent_span.span_data
300
+
301
+ # Agent-specific attributes using semantic conventions
302
+ if hasattr(data, 'agent_name') and data.agent_name:
303
+ span.set_attribute(SemanticConvention.GEN_AI_AGENT_NAME, data.agent_name)
304
+ elif hasattr(data, 'name') and data.name:
305
+ span.set_attribute(SemanticConvention.GEN_AI_AGENT_NAME, data.name)
306
+
307
+ # Enhanced model information extraction
308
+ model = self._extract_model_info(data, agent_span)
309
+ if model:
310
+ span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL, str(model))
311
+
312
+ # Enhanced input/output capture with MIME types (OpenLIT enhancement)
313
+ if self._capture_message_content:
314
+ self._capture_input_output(span, data)
315
+
316
+ # Enhanced token usage details (inspired by OpenInference)
317
+ self._capture_detailed_token_usage(span, data)
318
+
319
+ # Model invocation parameters as JSON (new feature from OpenInference)
320
+ self._capture_model_parameters(span, data)
321
+
322
+ # Tool/function information for tool calls
323
+ if hasattr(data, '__class__') and 'Function' in data.__class__.__name__:
324
+ if hasattr(data, 'function_name'):
325
+ span.set_attribute(SemanticConvention.GEN_AI_TOOL_NAME, data.function_name)
326
+ if hasattr(data, 'arguments'):
327
+ span.set_attribute(SemanticConvention.GEN_AI_TOOL_ARGS, str(data.arguments))
328
+
329
+ # Enhanced handoff information extraction
330
+ if hasattr(data, '__class__') and 'Handoff' in data.__class__.__name__:
331
+ target_agent = self._extract_handoff_target(data, agent_span)
332
+ if target_agent:
333
+ span.set_attribute(SemanticConvention.GEN_AI_AGENT_NAME, target_agent)
334
+ else:
335
+ # Fallback for handoff spans without clear target
336
+ span.set_attribute(SemanticConvention.GEN_AI_AGENT_NAME, "agent handoff")
337
+
338
+ # Request/response IDs if available
339
+ if hasattr(data, 'request_id'):
340
+ span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_ID, data.request_id)
341
+ elif hasattr(data, 'response_id'):
342
+ span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_ID, data.response_id)
343
+
344
+ def _extract_model_info(self, data: Any, agent_span: "Span[Any]") -> Optional[str]:
345
+ """Extract model information from span data or agent configuration"""
346
+ # Try direct model attributes first
347
+ model_attrs = ['model', 'model_name', 'model_id', 'llm_model', 'openai_model']
348
+
349
+ model = self._check_model_attrs(data, model_attrs)
350
+ if model:
351
+ return model
352
+
353
+ # Try nested configuration objects
354
+ config_attrs = ['config', 'configuration', 'client_config', 'llm_config']
355
+ model = self._check_config_model_attrs(data, config_attrs, model_attrs)
356
+ if model:
357
+ return model
358
+
359
+ # Try looking in the agent span itself
360
+ if hasattr(agent_span, 'model'):
361
+ return str(agent_span.model)
362
+
363
+ # Try agent_config if available
364
+ if hasattr(agent_span, 'agent_config'):
365
+ model = self._check_model_attrs(agent_span.agent_config, model_attrs)
366
+ if model:
367
+ return model
368
+
369
+ # Default fallback
370
+ return "gpt-4o"
371
+
372
+ def _check_model_attrs(self, obj: Any, model_attrs: list) -> Optional[str]:
373
+ """Helper method to check model attributes on an object"""
374
+ for attr in model_attrs:
375
+ if not hasattr(obj, attr):
376
+ continue
377
+ model_value = getattr(obj, attr)
378
+ if model_value and isinstance(model_value, str):
379
+ return model_value
380
+ return None
381
+
382
+ def _check_config_model_attrs(self, data: Any, config_attrs: list, model_attrs: list) -> Optional[str]:
383
+ """Helper method to check model attributes in nested configuration objects"""
384
+ for config_attr in config_attrs:
385
+ if not hasattr(data, config_attr):
386
+ continue
387
+ config = getattr(data, config_attr)
388
+ if not config:
389
+ continue
390
+ model = self._check_model_attrs(config, model_attrs)
391
+ if model:
392
+ return model
393
+ return None
394
+
395
+ def _extract_handoff_target(self, data: Any, agent_span: "Span[Any]") -> Optional[str]:
396
+ """Extract handoff target information with enhanced logic"""
397
+ # Try direct target attributes
398
+ target_attrs = ['to_agent', 'target_agent', 'destination_agent', 'next_agent']
399
+ for attr in target_attrs:
400
+ if hasattr(data, attr):
401
+ target = getattr(data, attr)
402
+ if target and isinstance(target, str):
403
+ return f"to {target}"
404
+
405
+ # Try from_agent for better handoff description
406
+ from_attrs = ['from_agent', 'source_agent', 'previous_agent']
407
+ for attr in from_attrs:
408
+ if hasattr(data, attr):
409
+ source = getattr(data, attr)
410
+ if source and isinstance(source, str):
411
+ return f"from {source}"
412
+
413
+ # Try nested objects
414
+ if hasattr(data, 'handoff_info'):
415
+ info = data.handoff_info
416
+ for attr in target_attrs + from_attrs:
417
+ if hasattr(info, attr):
418
+ value = getattr(info, attr)
419
+ if value and isinstance(value, str):
420
+ prefix = "to" if attr in target_attrs else "from"
421
+ return f"{prefix} {value}"
422
+
423
+ return None
424
+
425
+ def _capture_input_output(self, span: Any, data: Any) -> None:
426
+ """Capture input/output content with MIME type detection (OpenLIT enhancement)"""
427
+ try:
428
+ # Capture input content
429
+ if hasattr(data, 'input') and data.input is not None:
430
+ content = str(data.input)
431
+ span.set_attribute(SemanticConvention.GEN_AI_CONTENT_PROMPT, content)
432
+ # Set MIME type based on content structure
433
+ if content.startswith('{') or content.startswith('['):
434
+ span.set_attribute("gen_ai.content.prompt.mime_type", "application/json")
435
+ else:
436
+ span.set_attribute("gen_ai.content.prompt.mime_type", "text/plain")
437
+
438
+ # Capture output/response content
439
+ if hasattr(data, 'response') and data.response is not None:
440
+ content = str(data.response)
441
+ span.set_attribute(SemanticConvention.GEN_AI_CONTENT_COMPLETION, content)
442
+ # Set MIME type based on content structure
443
+ if content.startswith('{') or content.startswith('['):
444
+ span.set_attribute("gen_ai.content.completion.mime_type", "application/json")
445
+ else:
446
+ span.set_attribute("gen_ai.content.completion.mime_type", "text/plain")
447
+
448
+ except Exception:
449
+ pass # Ignore export errors
450
+
451
+ def _capture_detailed_token_usage(self, span: Any, data: Any) -> None:
452
+ """Capture detailed token usage information (inspired by OpenInference)"""
453
+ try:
454
+ if hasattr(data, 'usage'):
455
+ usage = data.usage
456
+
457
+ # Standard token usage
458
+ if hasattr(usage, 'input_tokens'):
459
+ span.set_attribute(SemanticConvention.GEN_AI_USAGE_INPUT_TOKENS, usage.input_tokens)
460
+ if hasattr(usage, 'output_tokens'):
461
+ span.set_attribute(SemanticConvention.GEN_AI_USAGE_OUTPUT_TOKENS, usage.output_tokens)
462
+
463
+ # Enhanced token details (when available)
464
+ if hasattr(usage, 'input_tokens_details'):
465
+ details = usage.input_tokens_details
466
+ if hasattr(details, 'cached_tokens'):
467
+ span.set_attribute("gen_ai.usage.input_tokens.cached", details.cached_tokens)
468
+ if hasattr(details, 'reasoning_tokens'):
469
+ span.set_attribute("gen_ai.usage.input_tokens.reasoning", details.reasoning_tokens)
470
+
471
+ if hasattr(usage, 'output_tokens_details'):
472
+ details = usage.output_tokens_details
473
+ if hasattr(details, 'reasoning_tokens'):
474
+ span.set_attribute("gen_ai.usage.output_tokens.reasoning", details.reasoning_tokens)
475
+
476
+ except Exception:
477
+ pass # Ignore export errors
478
+
479
+ def _capture_model_parameters(self, span: Any, data: Any) -> None:
480
+ """Capture model invocation parameters as JSON (new feature from OpenInference)"""
481
+ try:
482
+ # Look for model configuration parameters
483
+ params = {}
484
+
485
+ # Common parameter attributes
486
+ param_attrs = ['temperature', 'max_tokens', 'top_p', 'frequency_penalty', 'presence_penalty']
487
+ for attr in param_attrs:
488
+ if hasattr(data, attr):
489
+ params[attr] = getattr(data, attr)
490
+
491
+ # Try nested config objects
492
+ if hasattr(data, 'config'):
493
+ config = data.config
494
+ for attr in param_attrs:
495
+ if hasattr(config, attr):
496
+ params[attr] = getattr(config, attr)
497
+
498
+ # Try response object if available
499
+ if hasattr(data, 'response') and hasattr(data.response, 'model_dump'):
500
+ try:
501
+ response_dict = data.response.model_dump()
502
+ if response_dict and isinstance(response_dict, dict):
503
+ # Extract model parameters from response
504
+ if 'model' in response_dict:
505
+ params['model'] = response_dict['model']
506
+ if 'usage' in response_dict:
507
+ params['usage'] = response_dict['usage']
508
+ except Exception:
509
+ pass
510
+
511
+ # Set as JSON if we found any parameters
512
+ if params:
513
+ span.set_attribute("gen_ai.request.parameters", json.dumps(params))
514
+
515
+ except Exception:
516
+ pass # Ignore export errors
517
+
518
+ def _process_span_completion(self, span: Any, agent_span: "Span[Any]") -> None:
519
+ """Process span completion with enhanced business intelligence"""
520
+ data = agent_span.span_data
521
+
522
+ # Process response data if available
523
+ self._process_response_data(span, data)
524
+
525
+ # Extract and set token usage for business intelligence
526
+ self._extract_token_usage(span, data)
527
+
528
+ def _extract_token_usage(self, span: Any, data: Any) -> None:
529
+ """Extract token usage and calculate costs (OpenLIT's business intelligence)"""
530
+ try:
531
+ # Try to extract token usage from various possible locations
532
+ input_tokens = 0
533
+ output_tokens = 0
534
+
535
+ # Check direct usage attributes
536
+ if hasattr(data, 'usage'):
537
+ usage = data.usage
538
+ input_tokens = getattr(usage, 'input_tokens', 0) or getattr(usage, 'prompt_tokens', 0)
539
+ output_tokens = getattr(usage, 'output_tokens', 0) or getattr(usage, 'completion_tokens', 0)
540
+
541
+ # Check response object
542
+ elif hasattr(data, 'response') and hasattr(data.response, 'usage'):
543
+ usage = data.response.usage
544
+ input_tokens = getattr(usage, 'input_tokens', 0) or getattr(usage, 'prompt_tokens', 0)
545
+ output_tokens = getattr(usage, 'output_tokens', 0) or getattr(usage, 'completion_tokens', 0)
546
+
547
+ # Set token attributes
548
+ span.set_attribute(SemanticConvention.GEN_AI_USAGE_INPUT_TOKENS, input_tokens)
549
+ span.set_attribute(SemanticConvention.GEN_AI_USAGE_OUTPUT_TOKENS, output_tokens)
550
+ span.set_attribute(SemanticConvention.GEN_AI_CLIENT_TOKEN_USAGE, input_tokens + output_tokens)
551
+
552
+ # Calculate cost (OpenLIT's business intelligence advantage)
553
+ model = getattr(data, 'model', 'gpt-4o')
554
+ cost = get_chat_model_cost(model, self._pricing_info, input_tokens, output_tokens)
555
+ span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST, cost)
556
+
557
+ except Exception:
558
+ pass # Ignore errors in token usage extraction
559
+
560
+ def _process_response_data(self, span: Any, data: Any) -> None:
561
+ """Process response data with content capture"""
562
+ if self._capture_message_content:
563
+ self._capture_input_output(span, data)
564
+
565
+ def _process_trace_completion(self, span: Any, trace: "Trace") -> None:
566
+ """Process trace completion with business intelligence aggregation"""
567
+ # Add trace-level metadata
568
+ span.set_attribute(SemanticConvention.GEN_AI_OPERATION_NAME, "workflow")
569
+
570
+ # Calculate total duration
571
+ if trace.trace_id in self._span_start_times:
572
+ start_time = self._span_start_times[trace.trace_id]
573
+ duration = time.time() - start_time
574
+ span.set_attribute(SemanticConvention.GEN_AI_CLIENT_OPERATION_DURATION, duration)
575
+
576
+ def _parse_timestamp(self, timestamp: Any) -> float:
577
+ """Parse timestamp from various formats"""
578
+ if isinstance(timestamp, (int, float)):
579
+ return float(timestamp)
580
+ elif isinstance(timestamp, str):
581
+ try:
582
+ # Try parsing ISO format
583
+ dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00'))
584
+ return dt.timestamp()
585
+ except ValueError:
586
+ return time.time()
587
+ else:
588
+ return time.time()
589
+
590
+ def _as_utc_nano(self, timestamp: float) -> int:
591
+ """Convert timestamp to UTC nanoseconds for OpenTelemetry"""
592
+ return int(timestamp * 1_000_000_000)
593
+
594
+ def force_flush(self) -> bool:
595
+ """Force flush any pending spans (required by TracingProcessor)"""
596
+ return True
597
+
598
+ def shutdown(self) -> bool:
599
+ """Shutdown the processor (required by TracingProcessor)"""
600
+ return True
@@ -33,10 +33,10 @@ def object_count(obj):
33
33
  def set_server_address_and_port(instance):
34
34
  """
35
35
  Extracts server address and port from Pinecone client instance.
36
-
36
+
37
37
  Args:
38
38
  instance: Pinecone client instance
39
-
39
+
40
40
  Returns:
41
41
  tuple: (server_address, server_port)
42
42
  """
@@ -47,10 +47,10 @@ def object_count(obj):
47
47
  def set_server_address_and_port(instance):
48
48
  """
49
49
  Extracts server address and port from Qdrant client instance.
50
-
50
+
51
51
  Args:
52
52
  instance: Qdrant client instance
53
-
53
+
54
54
  Returns:
55
55
  tuple: (server_address, server_port)
56
56
  """
@@ -34,32 +34,32 @@ class TogetherInstrumentor(BaseInstrumentor):
34
34
 
35
35
  # Chat completions
36
36
  wrap_function_wrapper(
37
- "together.resources.chat.completions",
38
- "ChatCompletions.create",
37
+ "together.resources.chat.completions",
38
+ "ChatCompletions.create",
39
39
  completion(version, environment, application_name,
40
40
  tracer, pricing_info, capture_message_content, metrics, disable_metrics),
41
41
  )
42
42
 
43
43
  # Image generate
44
44
  wrap_function_wrapper(
45
- "together.resources.images",
46
- "Images.generate",
45
+ "together.resources.images",
46
+ "Images.generate",
47
47
  image_generate(version, environment, application_name,
48
48
  tracer, pricing_info, capture_message_content, metrics, disable_metrics),
49
49
  )
50
50
 
51
51
  # Chat completions
52
52
  wrap_function_wrapper(
53
- "together.resources.chat.completions",
54
- "AsyncChatCompletions.create",
53
+ "together.resources.chat.completions",
54
+ "AsyncChatCompletions.create",
55
55
  async_completion(version, environment, application_name,
56
56
  tracer, pricing_info, capture_message_content, metrics, disable_metrics),
57
57
  )
58
58
 
59
59
  # Image generate
60
60
  wrap_function_wrapper(
61
- "together.resources.images",
62
- "AsyncImages.generate",
61
+ "together.resources.images",
62
+ "AsyncImages.generate",
63
63
  async_image_generate(version, environment, application_name,
64
64
  tracer, pricing_info, capture_message_content, metrics, disable_metrics),
65
65
  )