genai-otel-instrument 0.1.1.dev0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of genai-otel-instrument might be problematic. Click here for more details.

Files changed (44) hide show
  1. genai_otel/__init__.py +129 -0
  2. genai_otel/__version__.py +34 -0
  3. genai_otel/auto_instrument.py +413 -0
  4. genai_otel/cli.py +92 -0
  5. genai_otel/config.py +187 -0
  6. genai_otel/cost_calculator.py +276 -0
  7. genai_otel/exceptions.py +17 -0
  8. genai_otel/gpu_metrics.py +240 -0
  9. genai_otel/instrumentors/__init__.py +47 -0
  10. genai_otel/instrumentors/anthropic_instrumentor.py +134 -0
  11. genai_otel/instrumentors/anyscale_instrumentor.py +27 -0
  12. genai_otel/instrumentors/aws_bedrock_instrumentor.py +94 -0
  13. genai_otel/instrumentors/azure_openai_instrumentor.py +69 -0
  14. genai_otel/instrumentors/base.py +528 -0
  15. genai_otel/instrumentors/cohere_instrumentor.py +76 -0
  16. genai_otel/instrumentors/google_ai_instrumentor.py +87 -0
  17. genai_otel/instrumentors/groq_instrumentor.py +106 -0
  18. genai_otel/instrumentors/huggingface_instrumentor.py +97 -0
  19. genai_otel/instrumentors/langchain_instrumentor.py +75 -0
  20. genai_otel/instrumentors/llamaindex_instrumentor.py +36 -0
  21. genai_otel/instrumentors/mistralai_instrumentor.py +119 -0
  22. genai_otel/instrumentors/ollama_instrumentor.py +83 -0
  23. genai_otel/instrumentors/openai_instrumentor.py +241 -0
  24. genai_otel/instrumentors/replicate_instrumentor.py +42 -0
  25. genai_otel/instrumentors/togetherai_instrumentor.py +42 -0
  26. genai_otel/instrumentors/vertexai_instrumentor.py +42 -0
  27. genai_otel/llm_pricing.json +589 -0
  28. genai_otel/logging_config.py +45 -0
  29. genai_otel/mcp_instrumentors/__init__.py +14 -0
  30. genai_otel/mcp_instrumentors/api_instrumentor.py +144 -0
  31. genai_otel/mcp_instrumentors/base.py +105 -0
  32. genai_otel/mcp_instrumentors/database_instrumentor.py +336 -0
  33. genai_otel/mcp_instrumentors/kafka_instrumentor.py +31 -0
  34. genai_otel/mcp_instrumentors/manager.py +139 -0
  35. genai_otel/mcp_instrumentors/redis_instrumentor.py +31 -0
  36. genai_otel/mcp_instrumentors/vector_db_instrumentor.py +265 -0
  37. genai_otel/metrics.py +148 -0
  38. genai_otel/py.typed +2 -0
  39. genai_otel_instrument-0.1.1.dev0.dist-info/METADATA +463 -0
  40. genai_otel_instrument-0.1.1.dev0.dist-info/RECORD +44 -0
  41. genai_otel_instrument-0.1.1.dev0.dist-info/WHEEL +5 -0
  42. genai_otel_instrument-0.1.1.dev0.dist-info/entry_points.txt +2 -0
  43. genai_otel_instrument-0.1.1.dev0.dist-info/licenses/LICENSE +201 -0
  44. genai_otel_instrument-0.1.1.dev0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,106 @@
1
+ """OpenTelemetry instrumentor for the Groq SDK.
2
+
3
+ This instrumentor automatically traces chat completion calls to Groq models,
4
+ capturing relevant attributes such as the model name and token usage.
5
+ """
6
+
7
+ import logging
8
+ from typing import Dict, Optional
9
+
10
+ from ..config import OTelConfig
11
+ from .base import BaseInstrumentor
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ class GroqInstrumentor(BaseInstrumentor):
17
+ """Instrumentor for Groq"""
18
+
19
+ def __init__(self):
20
+ """Initialize the instrumentor."""
21
+ super().__init__()
22
+ self._groq_available = False
23
+ self._check_availability()
24
+
25
+ def _check_availability(self):
26
+ """Check if Groq library is available."""
27
+ try:
28
+ import groq
29
+
30
+ self._groq_available = True
31
+ logger.debug("Groq library detected and available for instrumentation")
32
+ except ImportError:
33
+ logger.debug("Groq library not installed, instrumentation will be skipped")
34
+ self._groq_available = False
35
+
36
+ def instrument(self, config: OTelConfig):
37
+ """Instrument Groq SDK if available.
38
+
39
+ Args:
40
+ config (OTelConfig): The OpenTelemetry configuration object.
41
+ """
42
+ if not self._groq_available:
43
+ logger.debug("Skipping Groq instrumentation - library not available")
44
+ return
45
+
46
+ self.config = config
47
+
48
+ try:
49
+ import groq
50
+
51
+ original_init = groq.Groq.__init__
52
+
53
+ def wrapped_init(instance, *args, **kwargs):
54
+ original_init(instance, *args, **kwargs)
55
+ self._instrument_client(instance)
56
+ return instance
57
+
58
+ groq.Groq.__init__ = wrapped_init
59
+ self._instrumented = True
60
+ logger.info("Groq instrumentation enabled")
61
+
62
+ except Exception as e:
63
+ logger.error("Failed to instrument Groq: %s", e, exc_info=True)
64
+ if config.fail_on_error:
65
+ raise
66
+
67
+ def _instrument_client(self, client):
68
+ """Instrument Groq client methods.
69
+
70
+ Args:
71
+ client: The Groq client instance to instrument.
72
+ """
73
+ original_create = client.chat.completions.create
74
+
75
+ def wrapped_create(*args, **kwargs):
76
+ with self.tracer.start_as_current_span("groq.chat.completions") as span:
77
+ model = kwargs.get("model", "unknown")
78
+
79
+ span.set_attribute("gen_ai.system", "groq")
80
+ span.set_attribute("gen_ai.request.model", model)
81
+
82
+ if self.request_counter:
83
+ self.request_counter.add(1, {"model": model, "provider": "groq"})
84
+
85
+ result = original_create(*args, **kwargs)
86
+ self._record_result_metrics(span, result, 0)
87
+ return result
88
+
89
+ client.chat.completions.create = wrapped_create
90
+
91
+ def _extract_usage(self, result) -> Optional[Dict[str, int]]:
92
+ """Extract token usage from Groq response.
93
+
94
+ Args:
95
+ result: The API response object.
96
+
97
+ Returns:
98
+ Optional[Dict[str, int]]: Dictionary with token counts or None.
99
+ """
100
+ if hasattr(result, "usage"):
101
+ return {
102
+ "prompt_tokens": result.usage.prompt_tokens,
103
+ "completion_tokens": result.usage.completion_tokens,
104
+ "total_tokens": result.usage.total_tokens,
105
+ }
106
+ return None
@@ -0,0 +1,97 @@
1
+ """OpenTelemetry instrumentor for HuggingFace Transformers library.
2
+
3
+ This instrumentor automatically traces calls made through HuggingFace pipelines,
4
+ capturing relevant attributes such as the model name and task type.
5
+ """
6
+
7
+ import logging
8
+ import types
9
+ from typing import Dict, Optional
10
+
11
+ from ..config import OTelConfig
12
+ from .base import BaseInstrumentor
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ class HuggingFaceInstrumentor(BaseInstrumentor):
18
+ """Instrumentor for HuggingFace Transformers"""
19
+
20
+ def __init__(self):
21
+ """Initialize the instrumentor."""
22
+ super().__init__()
23
+ self._transformers_available = False
24
+ self._check_availability()
25
+
26
+ def _check_availability(self):
27
+ """Check if Transformers library is available."""
28
+ try:
29
+ import transformers
30
+
31
+ self._transformers_available = True
32
+ logger.debug("Transformers library detected and available for instrumentation")
33
+ except ImportError:
34
+ logger.debug("Transformers library not installed, instrumentation will be skipped")
35
+ self._transformers_available = False
36
+
37
+ def instrument(self, config: OTelConfig):
38
+ self.config = config
39
+
40
+ if not self._transformers_available:
41
+ return
42
+
43
+ try:
44
+ import importlib
45
+
46
+ transformers_module = importlib.import_module("transformers")
47
+ original_pipeline = transformers_module.pipeline
48
+
49
+ # Capture self reference for use in nested classes
50
+ instrumentor = self
51
+
52
+ def wrapped_pipeline(*args, **kwargs):
53
+ pipe = original_pipeline(*args, **kwargs)
54
+
55
+ class WrappedPipeline:
56
+ def __init__(self, original_pipe):
57
+ self._original_pipe = original_pipe
58
+
59
+ def __call__(self, *call_args, **call_kwargs):
60
+ # Use instrumentor.tracer instead of config.tracer
61
+ with instrumentor.tracer.start_span("huggingface.pipeline") as span:
62
+ task = getattr(self._original_pipe, "task", "unknown")
63
+ model = getattr(
64
+ getattr(self._original_pipe, "model", None),
65
+ "name_or_path",
66
+ "unknown",
67
+ )
68
+
69
+ span.set_attribute("gen_ai.system", "huggingface")
70
+ span.set_attribute("gen_ai.request.model", model)
71
+ span.set_attribute("huggingface.task", task)
72
+
73
+ if instrumentor.request_counter:
74
+ instrumentor.request_counter.add(
75
+ 1, {"model": model, "provider": "huggingface"}
76
+ )
77
+
78
+ result = self._original_pipe(*call_args, **call_kwargs)
79
+
80
+ # End span manually
81
+ span.end()
82
+ return result
83
+
84
+ def __getattr__(self, name):
85
+ # Delegate all other attribute access to the original pipe
86
+ return getattr(self._original_pipe, name)
87
+
88
+ return WrappedPipeline(pipe)
89
+
90
+ transformers_module.pipeline = wrapped_pipeline
91
+ logger.info("HuggingFace instrumentation enabled")
92
+
93
+ except ImportError:
94
+ pass
95
+
96
+ def _extract_usage(self, result) -> Optional[Dict[str, int]]:
97
+ return None
@@ -0,0 +1,75 @@
1
+ """OpenTelemetry instrumentor for the LangChain framework.
2
+
3
+ This instrumentor automatically traces various components within LangChain,
4
+ including chains and agents, capturing relevant attributes for observability.
5
+ """
6
+
7
+ import logging
8
+ from typing import Dict, Optional
9
+
10
+ from ..config import OTelConfig
11
+ from .base import BaseInstrumentor
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ class LangChainInstrumentor(BaseInstrumentor):
17
+ """Instrumentor for LangChain"""
18
+
19
+ def __init__(self):
20
+ """Initialize the instrumentor."""
21
+ super().__init__()
22
+ self._langchain_available = False
23
+ self._check_availability()
24
+
25
+ def _check_availability(self):
26
+ """Check if langchain library is available."""
27
+ try:
28
+ import langchain
29
+
30
+ self._langchain_available = True
31
+ logger.debug("langchain library detected and available for instrumentation")
32
+ except ImportError:
33
+ logger.debug("langchain library not installed, instrumentation will be skipped")
34
+ self._langchain_available = False
35
+
36
+ def instrument(self, config: OTelConfig):
37
+ """Instrument langchain available if available."""
38
+ if not self._langchain_available:
39
+ logger.debug("Skipping instrumentation - library not available")
40
+ return
41
+
42
+ self.config = config
43
+ try:
44
+ from langchain.agents.agent import AgentExecutor
45
+ from langchain.chains.base import Chain
46
+
47
+ # Instrument Chains
48
+ original_call = Chain.__call__
49
+
50
+ def wrapped_call(instance, *args, **kwargs):
51
+ chain_type = instance.__class__.__name__
52
+ with self.tracer.start_as_current_span(f"langchain.chain.{chain_type}") as span:
53
+ span.set_attribute("langchain.chain.type", chain_type)
54
+ result = original_call(instance, *args, **kwargs)
55
+ return result
56
+
57
+ Chain.__call__ = wrapped_call
58
+
59
+ # Instrument Agents
60
+ original_agent_call = AgentExecutor.__call__
61
+
62
+ def wrapped_agent_call(instance, *args, **kwargs):
63
+ with self.tracer.start_as_current_span("langchain.agent.execute") as span:
64
+ agent_name = getattr(instance, "agent", {}).get("name", "unknown")
65
+ span.set_attribute("langchain.agent.name", agent_name)
66
+ result = original_agent_call(instance, *args, **kwargs)
67
+ return result
68
+
69
+ AgentExecutor.__call__ = wrapped_agent_call
70
+
71
+ except ImportError:
72
+ pass
73
+
74
+ def _extract_usage(self, result) -> Optional[Dict[str, int]]:
75
+ return None
@@ -0,0 +1,36 @@
1
+ """OpenTelemetry instrumentor for the LlamaIndex framework.
2
+
3
+ This instrumentor automatically traces query engine operations within LlamaIndex,
4
+ capturing relevant attributes such as the query text.
5
+ """
6
+
7
+ from typing import Dict, Optional
8
+
9
+ from ..config import OTelConfig
10
+ from .base import BaseInstrumentor
11
+
12
+
13
+ class LlamaIndexInstrumentor(BaseInstrumentor):
14
+ """Instrumentor for LlamaIndex"""
15
+
16
+ def instrument(self, config: OTelConfig):
17
+ self.config = config
18
+ try:
19
+ from llama_index.core.query_engine import BaseQueryEngine
20
+
21
+ original_query = BaseQueryEngine.query
22
+
23
+ def wrapped_query(instance, *args, **kwargs):
24
+ with self.tracer.start_as_current_span("llamaindex.query_engine") as span:
25
+ query_text = args[0] if args else kwargs.get("query_str", "")
26
+ span.set_attribute("llamaindex.query", str(query_text)[:200])
27
+ result = original_query(instance, *args, **kwargs)
28
+ return result
29
+
30
+ BaseQueryEngine.query = wrapped_query
31
+
32
+ except ImportError:
33
+ pass
34
+
35
+ def _extract_usage(self, result) -> Optional[Dict[str, int]]:
36
+ return None
@@ -0,0 +1,119 @@
1
+ """OpenTelemetry instrumentor for the Mistral AI SDK (v1.0+).
2
+
3
+ This instrumentor automatically traces chat calls to Mistral AI models,
4
+ capturing relevant attributes such as the model name and token usage.
5
+
6
+ Supports Mistral SDK v1.0+ with the new API structure:
7
+ - Mistral.chat.complete()
8
+ - Mistral.chat.stream()
9
+ - Mistral.embeddings.create()
10
+ """
11
+
12
+ import logging
13
+ from typing import Any, Dict, Optional
14
+
15
+ from ..config import OTelConfig
16
+ from .base import BaseInstrumentor
17
+
18
+ logger = logging.getLogger(__name__)
19
+
20
+
21
+ class MistralAIInstrumentor(BaseInstrumentor):
22
+ """Instrumentor for Mistral AI SDK v1.0+"""
23
+
24
+ def instrument(self, config: OTelConfig):
25
+ self.config = config
26
+ try:
27
+ import wrapt
28
+ from mistralai import Mistral
29
+
30
+ # Wrap the Mistral client __init__ to instrument each instance
31
+ original_init = Mistral.__init__
32
+
33
+ def wrapped_init(wrapped, instance, args, kwargs):
34
+ result = wrapped(*args, **kwargs)
35
+ self._instrument_client(instance)
36
+ return result
37
+
38
+ Mistral.__init__ = wrapt.FunctionWrapper(original_init, wrapped_init)
39
+ logger.info("MistralAI instrumentation enabled (v1.0+ SDK)")
40
+
41
+ except ImportError:
42
+ logger.warning("mistralai package not available, skipping instrumentation")
43
+ except Exception as e:
44
+ logger.error(f"Failed to instrument mistralai: {e}", exc_info=True)
45
+
46
+ def _instrument_client(self, client):
47
+ """Instrument Mistral client instance methods."""
48
+ # Instrument chat.complete()
49
+ if hasattr(client, "chat") and hasattr(client.chat, "complete"):
50
+ original_complete = client.chat.complete
51
+ instrumented_complete = self.create_span_wrapper(
52
+ span_name="mistralai.chat.complete",
53
+ extract_attributes=self._extract_chat_attributes,
54
+ )(original_complete)
55
+ client.chat.complete = instrumented_complete
56
+
57
+ # Instrument chat.stream()
58
+ if hasattr(client, "chat") and hasattr(client.chat, "stream"):
59
+ original_stream = client.chat.stream
60
+ instrumented_stream = self.create_span_wrapper(
61
+ span_name="mistralai.chat.stream",
62
+ extract_attributes=self._extract_chat_attributes,
63
+ )(original_stream)
64
+ client.chat.stream = instrumented_stream
65
+
66
+ # Instrument embeddings.create()
67
+ if hasattr(client, "embeddings") and hasattr(client.embeddings, "create"):
68
+ original_embeddings = client.embeddings.create
69
+ instrumented_embeddings = self.create_span_wrapper(
70
+ span_name="mistralai.embeddings.create",
71
+ extract_attributes=self._extract_embeddings_attributes,
72
+ )(original_embeddings)
73
+ client.embeddings.create = instrumented_embeddings
74
+
75
+ def _extract_chat_attributes(self, instance: Any, args: Any, kwargs: Any) -> Dict[str, Any]:
76
+ """Extract attributes from chat.complete() or chat.stream() call."""
77
+ model = kwargs.get("model", "unknown")
78
+ attributes = {
79
+ "gen_ai.system": "mistralai",
80
+ "gen_ai.request.model": model,
81
+ "gen_ai.request.type": "chat",
82
+ }
83
+
84
+ # Add optional parameters
85
+ if "temperature" in kwargs and kwargs["temperature"] is not None:
86
+ attributes["gen_ai.request.temperature"] = kwargs["temperature"]
87
+ if "top_p" in kwargs and kwargs["top_p"] is not None:
88
+ attributes["gen_ai.request.top_p"] = kwargs["top_p"]
89
+ if "max_tokens" in kwargs and kwargs["max_tokens"] is not None:
90
+ attributes["gen_ai.request.max_tokens"] = kwargs["max_tokens"]
91
+
92
+ return attributes
93
+
94
+ def _extract_embeddings_attributes(
95
+ self, instance: Any, args: Any, kwargs: Any
96
+ ) -> Dict[str, Any]:
97
+ """Extract attributes from embeddings.create() call."""
98
+ model = kwargs.get("model", "mistral-embed")
99
+ attributes = {
100
+ "gen_ai.system": "mistralai",
101
+ "gen_ai.request.model": model,
102
+ "gen_ai.request.type": "embedding",
103
+ }
104
+ return attributes
105
+
106
+ def _extract_usage(self, result) -> Optional[Dict[str, int]]:
107
+ """Extract usage information from Mistral AI response"""
108
+ try:
109
+ if hasattr(result, "usage"):
110
+ usage = result.usage
111
+ return {
112
+ "prompt_tokens": getattr(usage, "prompt_tokens", 0),
113
+ "completion_tokens": getattr(usage, "completion_tokens", 0),
114
+ "total_tokens": getattr(usage, "total_tokens", 0),
115
+ }
116
+ except Exception as e:
117
+ logger.debug(f"Could not extract usage from MistralAI response: {e}")
118
+
119
+ return None
@@ -0,0 +1,83 @@
1
+ """OpenTelemetry instrumentor for the Ollama library.
2
+
3
+ This instrumentor automatically traces calls to Ollama models for both
4
+ generation and chat functionalities, capturing relevant attributes such as
5
+ the model name.
6
+ """
7
+
8
+ import logging
9
+ from typing import Dict, Optional
10
+
11
+ from ..config import OTelConfig
12
+ from .base import BaseInstrumentor
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ class OllamaInstrumentor(BaseInstrumentor):
18
+ """Instrumentor for Ollama"""
19
+
20
+ def __init__(self):
21
+ """Initialize the instrumentor."""
22
+ super().__init__()
23
+ self._ollama_available = False
24
+ self._ollama_module = None
25
+ self._original_generate = None # Add this
26
+ self._original_chat = None # Add this
27
+ self._check_availability()
28
+
29
+ def _check_availability(self):
30
+ """Check if Ollama library is available."""
31
+ try:
32
+ import ollama
33
+
34
+ self._ollama_available = True
35
+ self._ollama_module = ollama
36
+ logger.debug("Ollama library detected and available for instrumentation")
37
+ except ImportError:
38
+ logger.debug("Ollama library not installed, instrumentation will be skipped")
39
+ self._ollama_available = False
40
+ self._ollama_module = None
41
+
42
+ def instrument(self, config: OTelConfig):
43
+ """Instrument the Ollama library."""
44
+ self.config = config
45
+
46
+ if not self._ollama_available or self._ollama_module is None:
47
+ return
48
+
49
+ # Store original methods
50
+ self._original_generate = self._ollama_module.generate
51
+ self._original_chat = self._ollama_module.chat
52
+
53
+ def wrapped_generate(*args, **kwargs):
54
+ with self.tracer.start_as_current_span("ollama.generate") as span:
55
+ model = kwargs.get("model", "unknown")
56
+
57
+ span.set_attribute("gen_ai.system", "ollama")
58
+ span.set_attribute("gen_ai.request.model", model)
59
+
60
+ if self.request_counter:
61
+ self.request_counter.add(1, {"model": model, "provider": "ollama"})
62
+
63
+ result = self._original_generate(*args, **kwargs)
64
+ return result
65
+
66
+ def wrapped_chat(*args, **kwargs):
67
+ with self.tracer.start_as_current_span("ollama.chat") as span:
68
+ model = kwargs.get("model", "unknown")
69
+
70
+ span.set_attribute("gen_ai.system", "ollama")
71
+ span.set_attribute("gen_ai.request.model", model)
72
+
73
+ if self.request_counter:
74
+ self.request_counter.add(1, {"model": model, "provider": "ollama"})
75
+
76
+ result = self._original_chat(*args, **kwargs)
77
+ return result
78
+
79
+ self._ollama_module.generate = wrapped_generate
80
+ self._ollama_module.chat = wrapped_chat
81
+
82
+ def _extract_usage(self, result) -> Optional[Dict[str, int]]:
83
+ return None