genai-otel-instrument 0.1.24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. genai_otel/__init__.py +132 -0
  2. genai_otel/__version__.py +34 -0
  3. genai_otel/auto_instrument.py +602 -0
  4. genai_otel/cli.py +92 -0
  5. genai_otel/config.py +333 -0
  6. genai_otel/cost_calculator.py +467 -0
  7. genai_otel/cost_enriching_exporter.py +207 -0
  8. genai_otel/cost_enrichment_processor.py +174 -0
  9. genai_otel/evaluation/__init__.py +76 -0
  10. genai_otel/evaluation/bias_detector.py +364 -0
  11. genai_otel/evaluation/config.py +261 -0
  12. genai_otel/evaluation/hallucination_detector.py +525 -0
  13. genai_otel/evaluation/pii_detector.py +356 -0
  14. genai_otel/evaluation/prompt_injection_detector.py +262 -0
  15. genai_otel/evaluation/restricted_topics_detector.py +316 -0
  16. genai_otel/evaluation/span_processor.py +962 -0
  17. genai_otel/evaluation/toxicity_detector.py +406 -0
  18. genai_otel/exceptions.py +17 -0
  19. genai_otel/gpu_metrics.py +516 -0
  20. genai_otel/instrumentors/__init__.py +71 -0
  21. genai_otel/instrumentors/anthropic_instrumentor.py +134 -0
  22. genai_otel/instrumentors/anyscale_instrumentor.py +27 -0
  23. genai_otel/instrumentors/autogen_instrumentor.py +394 -0
  24. genai_otel/instrumentors/aws_bedrock_instrumentor.py +94 -0
  25. genai_otel/instrumentors/azure_openai_instrumentor.py +69 -0
  26. genai_otel/instrumentors/base.py +919 -0
  27. genai_otel/instrumentors/bedrock_agents_instrumentor.py +398 -0
  28. genai_otel/instrumentors/cohere_instrumentor.py +140 -0
  29. genai_otel/instrumentors/crewai_instrumentor.py +311 -0
  30. genai_otel/instrumentors/dspy_instrumentor.py +661 -0
  31. genai_otel/instrumentors/google_ai_instrumentor.py +310 -0
  32. genai_otel/instrumentors/groq_instrumentor.py +106 -0
  33. genai_otel/instrumentors/guardrails_ai_instrumentor.py +510 -0
  34. genai_otel/instrumentors/haystack_instrumentor.py +503 -0
  35. genai_otel/instrumentors/huggingface_instrumentor.py +399 -0
  36. genai_otel/instrumentors/hyperbolic_instrumentor.py +236 -0
  37. genai_otel/instrumentors/instructor_instrumentor.py +425 -0
  38. genai_otel/instrumentors/langchain_instrumentor.py +340 -0
  39. genai_otel/instrumentors/langgraph_instrumentor.py +328 -0
  40. genai_otel/instrumentors/llamaindex_instrumentor.py +36 -0
  41. genai_otel/instrumentors/mistralai_instrumentor.py +315 -0
  42. genai_otel/instrumentors/ollama_instrumentor.py +197 -0
  43. genai_otel/instrumentors/ollama_server_metrics_poller.py +336 -0
  44. genai_otel/instrumentors/openai_agents_instrumentor.py +291 -0
  45. genai_otel/instrumentors/openai_instrumentor.py +260 -0
  46. genai_otel/instrumentors/pydantic_ai_instrumentor.py +362 -0
  47. genai_otel/instrumentors/replicate_instrumentor.py +87 -0
  48. genai_otel/instrumentors/sambanova_instrumentor.py +196 -0
  49. genai_otel/instrumentors/togetherai_instrumentor.py +146 -0
  50. genai_otel/instrumentors/vertexai_instrumentor.py +106 -0
  51. genai_otel/llm_pricing.json +1676 -0
  52. genai_otel/logging_config.py +45 -0
  53. genai_otel/mcp_instrumentors/__init__.py +14 -0
  54. genai_otel/mcp_instrumentors/api_instrumentor.py +144 -0
  55. genai_otel/mcp_instrumentors/base.py +105 -0
  56. genai_otel/mcp_instrumentors/database_instrumentor.py +336 -0
  57. genai_otel/mcp_instrumentors/kafka_instrumentor.py +31 -0
  58. genai_otel/mcp_instrumentors/manager.py +139 -0
  59. genai_otel/mcp_instrumentors/redis_instrumentor.py +31 -0
  60. genai_otel/mcp_instrumentors/vector_db_instrumentor.py +265 -0
  61. genai_otel/metrics.py +148 -0
  62. genai_otel/py.typed +2 -0
  63. genai_otel/server_metrics.py +197 -0
  64. genai_otel_instrument-0.1.24.dist-info/METADATA +1404 -0
  65. genai_otel_instrument-0.1.24.dist-info/RECORD +69 -0
  66. genai_otel_instrument-0.1.24.dist-info/WHEEL +5 -0
  67. genai_otel_instrument-0.1.24.dist-info/entry_points.txt +2 -0
  68. genai_otel_instrument-0.1.24.dist-info/licenses/LICENSE +680 -0
  69. genai_otel_instrument-0.1.24.dist-info/top_level.txt +1 -0
@@ -0,0 +1,310 @@
1
+ """OpenTelemetry instrumentor for Google Generative AI (Gemini) SDK.
2
+
3
+ This instrumentor supports both the legacy google-generativeai SDK and the new
4
+ google-genai unified SDK. It automatically detects which SDK is installed and
5
+ instruments accordingly.
6
+
7
+ Legacy SDK (deprecated Nov 30, 2025): import google.generativeai as genai
8
+ New SDK (GA May 2025): from google import genai
9
+ """
10
+
11
+ import logging
12
+ from typing import Any, Dict, Optional
13
+
14
+ from ..config import OTelConfig
15
+ from .base import BaseInstrumentor
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ class GoogleAIInstrumentor(BaseInstrumentor):
21
+ """Instrumentor for Google Generative AI (Gemini)
22
+
23
+ Supports both:
24
+ - Legacy SDK: google-generativeai (pip install google-generativeai)
25
+ - New SDK: google-genai (pip install google-genai)
26
+ """
27
+
28
+ def __init__(self):
29
+ """Initialize the instrumentor."""
30
+ super().__init__()
31
+ self._google_available = False
32
+ self._using_new_sdk = False
33
+ self._check_availability()
34
+
35
+ def _check_availability(self):
36
+ """Check if Google Generative AI library is available.
37
+
38
+ Checks for new SDK first, falls back to legacy SDK.
39
+ """
40
+ # Try new SDK first (google-genai)
41
+ try:
42
+ from google import genai
43
+
44
+ self._google_available = True
45
+ self._using_new_sdk = True
46
+ logger.debug(
47
+ "Google GenAI (new unified SDK) detected and available for instrumentation"
48
+ )
49
+ return
50
+ except ImportError:
51
+ pass
52
+
53
+ # Fall back to legacy SDK (google-generativeai)
54
+ try:
55
+ import google.generativeai as genai
56
+
57
+ self._google_available = True
58
+ self._using_new_sdk = False
59
+ logger.debug(
60
+ "Google Generative AI (legacy SDK) detected and available for instrumentation. "
61
+ "Consider migrating to google-genai (support for legacy SDK ends Nov 30, 2025)"
62
+ )
63
+ return
64
+ except ImportError:
65
+ logger.debug(
66
+ "Google Generative AI library not installed, instrumentation will be skipped"
67
+ )
68
+ self._google_available = False
69
+
70
+ def instrument(self, config: OTelConfig):
71
+ """Instrument Google Generative AI SDK if available.
72
+
73
+ Args:
74
+ config (OTelConfig): The OpenTelemetry configuration object.
75
+ """
76
+ if not self._google_available:
77
+ logger.debug("Skipping Google Generative AI instrumentation - library not available")
78
+ return
79
+
80
+ self.config = config
81
+
82
+ try:
83
+ if self._using_new_sdk:
84
+ self._instrument_new_sdk()
85
+ else:
86
+ self._instrument_legacy_sdk()
87
+
88
+ self._instrumented = True
89
+ sdk_type = "new unified SDK" if self._using_new_sdk else "legacy SDK"
90
+ logger.info(f"Google Generative AI instrumentation enabled ({sdk_type})")
91
+
92
+ except Exception as e:
93
+ logger.error("Failed to instrument Google Generative AI: %s", e, exc_info=True)
94
+ if config.fail_on_error:
95
+ raise
96
+
97
+ def _instrument_new_sdk(self):
98
+ """Instrument the new google-genai unified SDK."""
99
+ import wrapt
100
+ from google import genai
101
+
102
+ # The new SDK uses a Client-based approach
103
+ # Instrument the Client class initialization to wrap generate_content methods
104
+ if hasattr(genai, "Client"):
105
+ original_init = genai.Client.__init__
106
+
107
+ def wrapped_init(wrapped, instance, args, kwargs):
108
+ result = wrapped(*args, **kwargs)
109
+ self._instrument_client(instance)
110
+ return result
111
+
112
+ genai.Client.__init__ = wrapt.FunctionWrapper(original_init, wrapped_init)
113
+
114
+ # Also instrument GenerativeModel if it exists (for backward compatibility)
115
+ if hasattr(genai, "GenerativeModel"):
116
+ if hasattr(genai.GenerativeModel, "generate_content"):
117
+ original_generate = genai.GenerativeModel.generate_content
118
+ genai.GenerativeModel.generate_content = self.create_span_wrapper(
119
+ span_name="google.genai.generate_content",
120
+ extract_attributes=self._extract_google_ai_attributes,
121
+ )(original_generate)
122
+
123
+ def _instrument_client(self, client):
124
+ """Instrument a google-genai Client instance.
125
+
126
+ Args:
127
+ client: The genai.Client instance to instrument.
128
+ """
129
+ # Instrument models.generate_content if available
130
+ if hasattr(client, "models"):
131
+ if hasattr(client.models, "generate_content"):
132
+ original_generate = client.models.generate_content
133
+ client.models.generate_content = self.create_span_wrapper(
134
+ span_name="google.genai.models.generate_content",
135
+ extract_attributes=self._extract_google_ai_attributes_new_sdk,
136
+ )(original_generate)
137
+
138
+ def _instrument_legacy_sdk(self):
139
+ """Instrument the legacy google-generativeai SDK."""
140
+ import google.generativeai as genai
141
+
142
+ # Legacy SDK: Instrument GenerativeModel.generate_content
143
+ if hasattr(genai, "GenerativeModel"):
144
+ if hasattr(genai.GenerativeModel, "generate_content"):
145
+ original_generate = genai.GenerativeModel.generate_content
146
+ genai.GenerativeModel.generate_content = self.create_span_wrapper(
147
+ span_name="google.generativeai.generate_content",
148
+ extract_attributes=self._extract_google_ai_attributes,
149
+ )(original_generate)
150
+
151
+ def _extract_google_ai_attributes(
152
+ self, instance: Any, args: Any, kwargs: Any
153
+ ) -> Dict[str, Any]:
154
+ """Extract attributes from Google AI API call (legacy SDK).
155
+
156
+ Args:
157
+ instance: The GenerativeModel instance.
158
+ args: Positional arguments.
159
+ kwargs: Keyword arguments.
160
+
161
+ Returns:
162
+ Dict[str, Any]: Dictionary of attributes to set on the span.
163
+ """
164
+ attrs = {}
165
+
166
+ # Extract model name from instance
167
+ model_name = getattr(instance, "model_name", "unknown")
168
+ attrs["gen_ai.system"] = "google"
169
+ attrs["gen_ai.request.model"] = model_name
170
+ attrs["gen_ai.operation.name"] = "chat"
171
+
172
+ # Extract generation config if available
173
+ if "generation_config" in kwargs:
174
+ config = kwargs["generation_config"]
175
+ if hasattr(config, "temperature"):
176
+ attrs["gen_ai.request.temperature"] = config.temperature
177
+ if hasattr(config, "top_p"):
178
+ attrs["gen_ai.request.top_p"] = config.top_p
179
+ if hasattr(config, "max_output_tokens"):
180
+ attrs["gen_ai.request.max_tokens"] = config.max_output_tokens
181
+
182
+ # Extract safety settings count if available
183
+ if "safety_settings" in kwargs:
184
+ attrs["gen_ai.request.safety_settings_count"] = len(kwargs["safety_settings"])
185
+
186
+ return attrs
187
+
188
+ def _extract_google_ai_attributes_new_sdk(
189
+ self, instance: Any, args: Any, kwargs: Any
190
+ ) -> Dict[str, Any]:
191
+ """Extract attributes from Google AI API call (new SDK).
192
+
193
+ Args:
194
+ instance: The client instance.
195
+ args: Positional arguments.
196
+ kwargs: Keyword arguments.
197
+
198
+ Returns:
199
+ Dict[str, Any]: Dictionary of attributes to set on the span.
200
+ """
201
+ attrs = {}
202
+
203
+ attrs["gen_ai.system"] = "google"
204
+ attrs["gen_ai.operation.name"] = "chat"
205
+
206
+ # Extract model from kwargs (new SDK uses model parameter)
207
+ if "model" in kwargs:
208
+ attrs["gen_ai.request.model"] = kwargs["model"]
209
+
210
+ # Extract config parameters if available
211
+ if "config" in kwargs:
212
+ config = kwargs["config"]
213
+ if isinstance(config, dict):
214
+ if "temperature" in config:
215
+ attrs["gen_ai.request.temperature"] = config["temperature"]
216
+ if "top_p" in config:
217
+ attrs["gen_ai.request.top_p"] = config["top_p"]
218
+ if "max_output_tokens" in config:
219
+ attrs["gen_ai.request.max_tokens"] = config["max_output_tokens"]
220
+
221
+ return attrs
222
+
223
+ def _extract_usage(self, result) -> Optional[Dict[str, int]]:
224
+ """Extract token usage from Google AI response.
225
+
226
+ Works with both legacy and new SDK response formats.
227
+
228
+ Args:
229
+ result: The API response object.
230
+
231
+ Returns:
232
+ Optional[Dict[str, int]]: Dictionary with token counts or None.
233
+ """
234
+ # Try new SDK format first (usage_metadata)
235
+ if hasattr(result, "usage_metadata") and result.usage_metadata:
236
+ usage = result.usage_metadata
237
+ return {
238
+ "prompt_tokens": getattr(usage, "prompt_token_count", 0),
239
+ "completion_tokens": getattr(usage, "candidates_token_count", 0),
240
+ "total_tokens": getattr(usage, "total_token_count", 0),
241
+ }
242
+
243
+ # Try alternative attribute names (in case SDK changes)
244
+ if hasattr(result, "usage") and result.usage:
245
+ usage = result.usage
246
+ return {
247
+ "prompt_tokens": getattr(
248
+ usage, "prompt_tokens", getattr(usage, "prompt_token_count", 0)
249
+ ),
250
+ "completion_tokens": getattr(
251
+ usage, "completion_tokens", getattr(usage, "candidates_token_count", 0)
252
+ ),
253
+ "total_tokens": getattr(
254
+ usage, "total_tokens", getattr(usage, "total_token_count", 0)
255
+ ),
256
+ }
257
+
258
+ return None
259
+
260
+ def _extract_response_attributes(self, result) -> Dict[str, Any]:
261
+ """Extract response attributes from Google AI response.
262
+
263
+ Args:
264
+ result: The API response object.
265
+
266
+ Returns:
267
+ Dict[str, Any]: Dictionary of response attributes.
268
+ """
269
+ attrs = {}
270
+
271
+ # Extract model name from response if available
272
+ if hasattr(result, "model"):
273
+ attrs["gen_ai.response.model"] = result.model
274
+
275
+ # Extract finish reasons from candidates
276
+ if hasattr(result, "candidates") and result.candidates:
277
+ finish_reasons = []
278
+ for candidate in result.candidates:
279
+ if hasattr(candidate, "finish_reason"):
280
+ finish_reasons.append(str(candidate.finish_reason))
281
+
282
+ if finish_reasons:
283
+ attrs["gen_ai.response.finish_reasons"] = finish_reasons
284
+
285
+ # Extract safety ratings if available
286
+ if hasattr(result, "candidates") and result.candidates:
287
+ for idx, candidate in enumerate(result.candidates[:1]): # Limit to first candidate
288
+ if hasattr(candidate, "safety_ratings") and candidate.safety_ratings:
289
+ for rating in candidate.safety_ratings:
290
+ category = getattr(rating, "category", "unknown")
291
+ probability = getattr(rating, "probability", "unknown")
292
+ attrs[f"gen_ai.safety.{category}"] = str(probability)
293
+
294
+ return attrs
295
+
296
+ def _extract_finish_reason(self, result) -> Optional[str]:
297
+ """Extract finish reason from Google AI response.
298
+
299
+ Args:
300
+ result: The Google AI API response object.
301
+
302
+ Returns:
303
+ Optional[str]: The finish reason string or None if not available.
304
+ """
305
+ if hasattr(result, "candidates") and result.candidates:
306
+ first_candidate = result.candidates[0]
307
+ if hasattr(first_candidate, "finish_reason"):
308
+ return str(first_candidate.finish_reason)
309
+
310
+ return None
@@ -0,0 +1,106 @@
1
+ """OpenTelemetry instrumentor for the Groq SDK.
2
+
3
+ This instrumentor automatically traces chat completion calls to Groq models,
4
+ capturing relevant attributes such as the model name and token usage.
5
+ """
6
+
7
+ import logging
8
+ from typing import Dict, Optional
9
+
10
+ from ..config import OTelConfig
11
+ from .base import BaseInstrumentor
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ class GroqInstrumentor(BaseInstrumentor):
17
+ """Instrumentor for Groq"""
18
+
19
+ def __init__(self):
20
+ """Initialize the instrumentor."""
21
+ super().__init__()
22
+ self._groq_available = False
23
+ self._check_availability()
24
+
25
+ def _check_availability(self):
26
+ """Check if Groq library is available."""
27
+ try:
28
+ import groq
29
+
30
+ self._groq_available = True
31
+ logger.debug("Groq library detected and available for instrumentation")
32
+ except ImportError:
33
+ logger.debug("Groq library not installed, instrumentation will be skipped")
34
+ self._groq_available = False
35
+
36
+ def instrument(self, config: OTelConfig):
37
+ """Instrument Groq SDK if available.
38
+
39
+ Args:
40
+ config (OTelConfig): The OpenTelemetry configuration object.
41
+ """
42
+ if not self._groq_available:
43
+ logger.debug("Skipping Groq instrumentation - library not available")
44
+ return
45
+
46
+ self.config = config
47
+
48
+ try:
49
+ import groq
50
+
51
+ original_init = groq.Groq.__init__
52
+
53
+ def wrapped_init(instance, *args, **kwargs):
54
+ original_init(instance, *args, **kwargs)
55
+ self._instrument_client(instance)
56
+ return instance
57
+
58
+ groq.Groq.__init__ = wrapped_init
59
+ self._instrumented = True
60
+ logger.info("Groq instrumentation enabled")
61
+
62
+ except Exception as e:
63
+ logger.error("Failed to instrument Groq: %s", e, exc_info=True)
64
+ if config.fail_on_error:
65
+ raise
66
+
67
+ def _instrument_client(self, client):
68
+ """Instrument Groq client methods.
69
+
70
+ Args:
71
+ client: The Groq client instance to instrument.
72
+ """
73
+ original_create = client.chat.completions.create
74
+
75
+ def wrapped_create(*args, **kwargs):
76
+ with self.tracer.start_as_current_span("groq.chat.completions") as span:
77
+ model = kwargs.get("model", "unknown")
78
+
79
+ span.set_attribute("gen_ai.system", "groq")
80
+ span.set_attribute("gen_ai.request.model", model)
81
+
82
+ if self.request_counter:
83
+ self.request_counter.add(1, {"model": model, "provider": "groq"})
84
+
85
+ result = original_create(*args, **kwargs)
86
+ self._record_result_metrics(span, result, 0)
87
+ return result
88
+
89
+ client.chat.completions.create = wrapped_create
90
+
91
+ def _extract_usage(self, result) -> Optional[Dict[str, int]]:
92
+ """Extract token usage from Groq response.
93
+
94
+ Args:
95
+ result: The API response object.
96
+
97
+ Returns:
98
+ Optional[Dict[str, int]]: Dictionary with token counts or None.
99
+ """
100
+ if hasattr(result, "usage"):
101
+ return {
102
+ "prompt_tokens": result.usage.prompt_tokens,
103
+ "completion_tokens": result.usage.completion_tokens,
104
+ "total_tokens": result.usage.total_tokens,
105
+ }
106
+ return None