genai-otel-instrument 0.1.24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. genai_otel/__init__.py +132 -0
  2. genai_otel/__version__.py +34 -0
  3. genai_otel/auto_instrument.py +602 -0
  4. genai_otel/cli.py +92 -0
  5. genai_otel/config.py +333 -0
  6. genai_otel/cost_calculator.py +467 -0
  7. genai_otel/cost_enriching_exporter.py +207 -0
  8. genai_otel/cost_enrichment_processor.py +174 -0
  9. genai_otel/evaluation/__init__.py +76 -0
  10. genai_otel/evaluation/bias_detector.py +364 -0
  11. genai_otel/evaluation/config.py +261 -0
  12. genai_otel/evaluation/hallucination_detector.py +525 -0
  13. genai_otel/evaluation/pii_detector.py +356 -0
  14. genai_otel/evaluation/prompt_injection_detector.py +262 -0
  15. genai_otel/evaluation/restricted_topics_detector.py +316 -0
  16. genai_otel/evaluation/span_processor.py +962 -0
  17. genai_otel/evaluation/toxicity_detector.py +406 -0
  18. genai_otel/exceptions.py +17 -0
  19. genai_otel/gpu_metrics.py +516 -0
  20. genai_otel/instrumentors/__init__.py +71 -0
  21. genai_otel/instrumentors/anthropic_instrumentor.py +134 -0
  22. genai_otel/instrumentors/anyscale_instrumentor.py +27 -0
  23. genai_otel/instrumentors/autogen_instrumentor.py +394 -0
  24. genai_otel/instrumentors/aws_bedrock_instrumentor.py +94 -0
  25. genai_otel/instrumentors/azure_openai_instrumentor.py +69 -0
  26. genai_otel/instrumentors/base.py +919 -0
  27. genai_otel/instrumentors/bedrock_agents_instrumentor.py +398 -0
  28. genai_otel/instrumentors/cohere_instrumentor.py +140 -0
  29. genai_otel/instrumentors/crewai_instrumentor.py +311 -0
  30. genai_otel/instrumentors/dspy_instrumentor.py +661 -0
  31. genai_otel/instrumentors/google_ai_instrumentor.py +310 -0
  32. genai_otel/instrumentors/groq_instrumentor.py +106 -0
  33. genai_otel/instrumentors/guardrails_ai_instrumentor.py +510 -0
  34. genai_otel/instrumentors/haystack_instrumentor.py +503 -0
  35. genai_otel/instrumentors/huggingface_instrumentor.py +399 -0
  36. genai_otel/instrumentors/hyperbolic_instrumentor.py +236 -0
  37. genai_otel/instrumentors/instructor_instrumentor.py +425 -0
  38. genai_otel/instrumentors/langchain_instrumentor.py +340 -0
  39. genai_otel/instrumentors/langgraph_instrumentor.py +328 -0
  40. genai_otel/instrumentors/llamaindex_instrumentor.py +36 -0
  41. genai_otel/instrumentors/mistralai_instrumentor.py +315 -0
  42. genai_otel/instrumentors/ollama_instrumentor.py +197 -0
  43. genai_otel/instrumentors/ollama_server_metrics_poller.py +336 -0
  44. genai_otel/instrumentors/openai_agents_instrumentor.py +291 -0
  45. genai_otel/instrumentors/openai_instrumentor.py +260 -0
  46. genai_otel/instrumentors/pydantic_ai_instrumentor.py +362 -0
  47. genai_otel/instrumentors/replicate_instrumentor.py +87 -0
  48. genai_otel/instrumentors/sambanova_instrumentor.py +196 -0
  49. genai_otel/instrumentors/togetherai_instrumentor.py +146 -0
  50. genai_otel/instrumentors/vertexai_instrumentor.py +106 -0
  51. genai_otel/llm_pricing.json +1676 -0
  52. genai_otel/logging_config.py +45 -0
  53. genai_otel/mcp_instrumentors/__init__.py +14 -0
  54. genai_otel/mcp_instrumentors/api_instrumentor.py +144 -0
  55. genai_otel/mcp_instrumentors/base.py +105 -0
  56. genai_otel/mcp_instrumentors/database_instrumentor.py +336 -0
  57. genai_otel/mcp_instrumentors/kafka_instrumentor.py +31 -0
  58. genai_otel/mcp_instrumentors/manager.py +139 -0
  59. genai_otel/mcp_instrumentors/redis_instrumentor.py +31 -0
  60. genai_otel/mcp_instrumentors/vector_db_instrumentor.py +265 -0
  61. genai_otel/metrics.py +148 -0
  62. genai_otel/py.typed +2 -0
  63. genai_otel/server_metrics.py +197 -0
  64. genai_otel_instrument-0.1.24.dist-info/METADATA +1404 -0
  65. genai_otel_instrument-0.1.24.dist-info/RECORD +69 -0
  66. genai_otel_instrument-0.1.24.dist-info/WHEEL +5 -0
  67. genai_otel_instrument-0.1.24.dist-info/entry_points.txt +2 -0
  68. genai_otel_instrument-0.1.24.dist-info/licenses/LICENSE +680 -0
  69. genai_otel_instrument-0.1.24.dist-info/top_level.txt +1 -0
@@ -0,0 +1,425 @@
1
+ """OpenTelemetry instrumentor for Instructor framework.
2
+
3
+ This instrumentor automatically traces structured output extraction using
4
+ Instructor's Pydantic-based response models with automatic validation and retries.
5
+
6
+ Instructor is a popular library (8K+ GitHub stars) for extracting structured data
7
+ from LLMs using Pydantic models, supporting OpenAI, Anthropic, Google, and more.
8
+
9
+ Requirements:
10
+ pip install instructor
11
+ """
12
+
13
+ import logging
14
+ from typing import Any, Dict, Optional
15
+
16
+ from ..config import OTelConfig
17
+ from .base import BaseInstrumentor
18
+
19
+ logger = logging.getLogger(__name__)
20
+
21
+
22
+ class InstructorInstrumentor(BaseInstrumentor):
23
+ """Instrumentor for Instructor framework"""
24
+
25
+ def __init__(self):
26
+ """Initialize the instrumentor."""
27
+ super().__init__()
28
+ self._instructor_available = False
29
+ self._check_availability()
30
+
31
+ def _check_availability(self):
32
+ """Check if Instructor is available."""
33
+ try:
34
+ import instructor
35
+
36
+ self._instructor_available = True
37
+ logger.debug("Instructor framework detected and available for instrumentation")
38
+ except ImportError:
39
+ logger.debug("Instructor not installed, instrumentation will be skipped")
40
+ self._instructor_available = False
41
+
42
+ def instrument(self, config: OTelConfig):
43
+ """Instrument Instructor if available.
44
+
45
+ Args:
46
+ config (OTelConfig): The OpenTelemetry configuration object.
47
+ """
48
+ if not self._instructor_available:
49
+ logger.debug("Skipping Instructor instrumentation - library not available")
50
+ return
51
+
52
+ self.config = config
53
+
54
+ try:
55
+ import wrapt
56
+
57
+ # Wrap from_provider method
58
+ wrapt.wrap_function_wrapper(
59
+ "instructor",
60
+ "from_provider",
61
+ self._wrap_from_provider,
62
+ )
63
+
64
+ # Wrap patch method (legacy API)
65
+ wrapt.wrap_function_wrapper(
66
+ "instructor",
67
+ "patch",
68
+ self._wrap_patch,
69
+ )
70
+
71
+ # Wrap the actual completion create method
72
+ # This happens after patching, so we wrap the process_response method
73
+ try:
74
+ wrapt.wrap_function_wrapper(
75
+ "instructor.client",
76
+ "Instructor.create_with_completion",
77
+ self._wrap_create_with_completion,
78
+ )
79
+ except (ImportError, AttributeError):
80
+ logger.debug("create_with_completion method not available")
81
+
82
+ # Wrap retry logic
83
+ try:
84
+ wrapt.wrap_function_wrapper(
85
+ "instructor.retry",
86
+ "retry_sync",
87
+ self._wrap_retry_sync,
88
+ )
89
+ except (ImportError, AttributeError):
90
+ logger.debug("retry_sync not available for instrumentation")
91
+
92
+ self._instrumented = True
93
+ logger.info("Instructor instrumentation enabled")
94
+
95
+ except Exception as e:
96
+ logger.error("Failed to instrument Instructor: %s", e, exc_info=True)
97
+ if config.fail_on_error:
98
+ raise
99
+
100
+ def _wrap_from_provider(self, wrapped, instance, args, kwargs):
101
+ """Wrap instructor.from_provider to trace client creation.
102
+
103
+ Args:
104
+ wrapped: The original method
105
+ instance: The instance (None for module function)
106
+ args: Positional arguments
107
+ kwargs: Keyword arguments
108
+
109
+ Returns:
110
+ The result of the wrapped method
111
+ """
112
+ return self.create_span_wrapper(
113
+ span_name="instructor.from_provider",
114
+ extract_attributes=lambda inst, args, kwargs: self._extract_from_provider_attributes(
115
+ args, kwargs
116
+ ),
117
+ )(wrapped)(*args, **kwargs)
118
+
119
+ def _wrap_patch(self, wrapped, instance, args, kwargs):
120
+ """Wrap instructor.patch to trace client patching.
121
+
122
+ Args:
123
+ wrapped: The original method
124
+ instance: The instance (None for module function)
125
+ args: Positional arguments
126
+ kwargs: Keyword arguments
127
+
128
+ Returns:
129
+ The result of the wrapped method
130
+ """
131
+ return self.create_span_wrapper(
132
+ span_name="instructor.patch",
133
+ extract_attributes=lambda inst, args, kwargs: self._extract_patch_attributes(
134
+ args, kwargs
135
+ ),
136
+ )(wrapped)(*args, **kwargs)
137
+
138
+ def _wrap_create_with_completion(self, wrapped, instance, args, kwargs):
139
+ """Wrap Instructor.create_with_completion to trace structured extraction.
140
+
141
+ Args:
142
+ wrapped: The original method
143
+ instance: The Instructor instance
144
+ args: Positional arguments
145
+ kwargs: Keyword arguments
146
+
147
+ Returns:
148
+ The result of the wrapped method
149
+ """
150
+ return self.create_span_wrapper(
151
+ span_name="instructor.create_with_completion",
152
+ extract_attributes=lambda inst, args, kwargs: self._extract_create_attributes(
153
+ instance, kwargs
154
+ ),
155
+ extract_response_attributes=self._extract_create_response_attributes,
156
+ )(wrapped)(*args, **kwargs)
157
+
158
+ def _wrap_retry_sync(self, wrapped, instance, args, kwargs):
159
+ """Wrap retry_sync to trace retry attempts.
160
+
161
+ Args:
162
+ wrapped: The original method
163
+ instance: The instance
164
+ args: Positional arguments
165
+ kwargs: Keyword arguments
166
+
167
+ Returns:
168
+ The result of the wrapped method
169
+ """
170
+ return self.create_span_wrapper(
171
+ span_name="instructor.retry",
172
+ extract_attributes=lambda inst, args, kwargs: self._extract_retry_attributes(kwargs),
173
+ )(wrapped)(*args, **kwargs)
174
+
175
+ def _extract_from_provider_attributes(
176
+ self, args: Any, kwargs: Dict[str, Any]
177
+ ) -> Dict[str, Any]:
178
+ """Extract attributes from from_provider call.
179
+
180
+ Args:
181
+ args: Positional arguments
182
+ kwargs: Keyword arguments
183
+
184
+ Returns:
185
+ Dict[str, Any]: Dictionary of attributes to set on the span.
186
+ """
187
+ attrs = {}
188
+
189
+ # Core attributes
190
+ attrs["gen_ai.system"] = "instructor"
191
+ attrs["gen_ai.operation.name"] = "from_provider"
192
+
193
+ try:
194
+ # Extract provider string
195
+ if args and len(args) > 0:
196
+ provider_str = args[0]
197
+ attrs["instructor.provider"] = str(provider_str)
198
+
199
+ # Parse provider/model format
200
+ if "/" in provider_str:
201
+ provider, model = provider_str.split("/", 1)
202
+ attrs["instructor.provider.name"] = provider
203
+ attrs["gen_ai.request.model"] = model
204
+
205
+ # Extract mode if provided
206
+ if "mode" in kwargs:
207
+ attrs["instructor.mode"] = str(kwargs["mode"])
208
+
209
+ except Exception as e:
210
+ logger.debug("Failed to extract from_provider attributes: %s", e)
211
+
212
+ return attrs
213
+
214
+ def _extract_patch_attributes(self, args: Any, kwargs: Dict[str, Any]) -> Dict[str, Any]:
215
+ """Extract attributes from patch call.
216
+
217
+ Args:
218
+ args: Positional arguments
219
+ kwargs: Keyword arguments
220
+
221
+ Returns:
222
+ Dict[str, Any]: Dictionary of attributes to set on the span.
223
+ """
224
+ attrs = {}
225
+
226
+ # Core attributes
227
+ attrs["gen_ai.system"] = "instructor"
228
+ attrs["gen_ai.operation.name"] = "patch"
229
+
230
+ try:
231
+ # Extract client type
232
+ if args and len(args) > 0:
233
+ client = args[0]
234
+ client_type = type(client).__name__
235
+ attrs["instructor.client.type"] = client_type
236
+
237
+ # Extract mode
238
+ if "mode" in kwargs:
239
+ attrs["instructor.mode"] = str(kwargs["mode"])
240
+
241
+ except Exception as e:
242
+ logger.debug("Failed to extract patch attributes: %s", e)
243
+
244
+ return attrs
245
+
246
+ def _extract_create_attributes(self, instance: Any, kwargs: Dict[str, Any]) -> Dict[str, Any]:
247
+ """Extract attributes from create_with_completion call.
248
+
249
+ Args:
250
+ instance: The Instructor instance
251
+ kwargs: Keyword arguments
252
+
253
+ Returns:
254
+ Dict[str, Any]: Dictionary of attributes to set on the span.
255
+ """
256
+ attrs = {}
257
+
258
+ # Core attributes
259
+ attrs["gen_ai.system"] = "instructor"
260
+ attrs["gen_ai.operation.name"] = "create_with_completion"
261
+
262
+ try:
263
+ # Extract response_model information
264
+ if "response_model" in kwargs:
265
+ response_model = kwargs["response_model"]
266
+
267
+ # Get model name
268
+ if hasattr(response_model, "__name__"):
269
+ attrs["instructor.response_model.name"] = response_model.__name__
270
+ elif hasattr(response_model, "__class__"):
271
+ attrs["instructor.response_model.name"] = response_model.__class__.__name__
272
+
273
+ # Extract field information from Pydantic model
274
+ if hasattr(response_model, "model_fields"):
275
+ fields = response_model.model_fields
276
+ field_names = list(fields.keys())[:10]
277
+ attrs["instructor.response_model.fields"] = field_names
278
+ attrs["instructor.response_model.fields_count"] = len(fields)
279
+
280
+ # Check if it's a streaming model (Partial)
281
+ if hasattr(response_model, "__origin__"):
282
+ attrs["instructor.response_model.is_partial"] = True
283
+
284
+ # Extract max_retries
285
+ if "max_retries" in kwargs:
286
+ attrs["instructor.max_retries"] = kwargs["max_retries"]
287
+
288
+ # Extract model from messages/kwargs
289
+ if "model" in kwargs:
290
+ attrs["gen_ai.request.model"] = str(kwargs["model"])
291
+
292
+ # Extract streaming flag
293
+ if "stream" in kwargs:
294
+ attrs["instructor.stream"] = bool(kwargs["stream"])
295
+
296
+ # Extract validation mode
297
+ if "validation_context" in kwargs:
298
+ attrs["instructor.has_validation_context"] = True
299
+
300
+ except Exception as e:
301
+ logger.debug("Failed to extract create_with_completion attributes: %s", e)
302
+
303
+ return attrs
304
+
305
+ def _extract_retry_attributes(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
306
+ """Extract attributes from retry call.
307
+
308
+ Args:
309
+ kwargs: Keyword arguments
310
+
311
+ Returns:
312
+ Dict[str, Any]: Dictionary of attributes to set on the span.
313
+ """
314
+ attrs = {}
315
+
316
+ # Core attributes
317
+ attrs["gen_ai.system"] = "instructor"
318
+ attrs["gen_ai.operation.name"] = "retry"
319
+
320
+ try:
321
+ # Extract max attempts
322
+ if "max_retries" in kwargs:
323
+ attrs["instructor.retry.max_attempts"] = kwargs["max_retries"]
324
+
325
+ # Extract retry context
326
+ if "context" in kwargs:
327
+ attrs["instructor.retry.has_context"] = True
328
+
329
+ except Exception as e:
330
+ logger.debug("Failed to extract retry attributes: %s", e)
331
+
332
+ return attrs
333
+
334
+ def _extract_create_response_attributes(self, result: Any) -> Dict[str, Any]:
335
+ """Extract response attributes from create_with_completion result.
336
+
337
+ Args:
338
+ result: The structured output result (Pydantic model instance)
339
+
340
+ Returns:
341
+ Dict[str, Any]: Dictionary of response attributes.
342
+ """
343
+ attrs = {}
344
+
345
+ try:
346
+ # Check if result is a Pydantic model
347
+ if hasattr(result, "model_dump"):
348
+ # Get result type
349
+ attrs["instructor.response.type"] = result.__class__.__name__
350
+
351
+ # Try to get field count
352
+ if hasattr(result, "model_fields"):
353
+ attrs["instructor.response.fields_count"] = len(result.model_fields)
354
+
355
+ # Extract some field values (limit to avoid huge spans)
356
+ try:
357
+ dumped = result.model_dump()
358
+ if dumped:
359
+ # Get first few keys
360
+ keys = list(dumped.keys())[:5]
361
+ attrs["instructor.response.fields"] = keys
362
+
363
+ # Extract first few values (truncated)
364
+ for key in keys[:3]:
365
+ value = dumped[key]
366
+ if isinstance(value, (str, int, float, bool)):
367
+ value_str = str(value)
368
+ attrs[f"instructor.response.{key}"] = value_str[:200]
369
+ except Exception:
370
+ pass
371
+
372
+ # Validation successful if we got a Pydantic model
373
+ attrs["instructor.validation.success"] = True
374
+ else:
375
+ # No Pydantic model means validation failed
376
+ attrs["instructor.validation.success"] = False
377
+
378
+ except Exception as e:
379
+ logger.debug("Failed to extract create_with_completion response attributes: %s", e)
380
+ attrs["instructor.validation.success"] = False
381
+
382
+ return attrs
383
+
384
+ def _extract_usage(self, result) -> Optional[Dict[str, int]]:
385
+ """Extract token usage from Instructor result.
386
+
387
+ Note: Instructor wraps LLM provider calls.
388
+ Token usage is captured by underlying provider instrumentors.
389
+
390
+ Args:
391
+ result: The Instructor operation result.
392
+
393
+ Returns:
394
+ Optional[Dict[str, int]]: Dictionary with token counts or None.
395
+ """
396
+ # Token usage is tracked by underlying LLM provider instrumentors
397
+ # Instructor responses don't typically expose token usage directly
398
+ return None
399
+
400
+ def _extract_finish_reason(self, result) -> Optional[str]:
401
+ """Extract finish reason from Instructor result.
402
+
403
+ Args:
404
+ result: The Instructor operation result.
405
+
406
+ Returns:
407
+ Optional[str]: The finish reason string or None if not available.
408
+ """
409
+ try:
410
+ # For successful Pydantic model extraction
411
+ if hasattr(result, "model_dump"):
412
+ return "completed"
413
+
414
+ # Check for validation metadata
415
+ if hasattr(result, "_raw_response"):
416
+ raw_response = result._raw_response
417
+ if hasattr(raw_response, "choices") and raw_response.choices:
418
+ choice = raw_response.choices[0]
419
+ if hasattr(choice, "finish_reason"):
420
+ return choice.finish_reason
421
+
422
+ except Exception as e:
423
+ logger.debug("Failed to extract finish reason: %s", e)
424
+
425
+ return None