genai-otel-instrument 0.1.4.dev0__py3-none-any.whl → 0.1.7.dev0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of genai-otel-instrument might be problematic. Click here for more details.

@@ -1,75 +1,75 @@
1
- """OpenTelemetry instrumentor for the LangChain framework.
2
-
3
- This instrumentor automatically traces various components within LangChain,
4
- including chains and agents, capturing relevant attributes for observability.
5
- """
6
-
7
- import logging
8
- from typing import Dict, Optional
9
-
10
- from ..config import OTelConfig
11
- from .base import BaseInstrumentor
12
-
13
- logger = logging.getLogger(__name__)
14
-
15
-
16
- class LangChainInstrumentor(BaseInstrumentor):
17
- """Instrumentor for LangChain"""
18
-
19
- def __init__(self):
20
- """Initialize the instrumentor."""
21
- super().__init__()
22
- self._langchain_available = False
23
- self._check_availability()
24
-
25
- def _check_availability(self):
26
- """Check if langchain library is available."""
27
- try:
28
- import langchain
29
-
30
- self._langchain_available = True
31
- logger.debug("langchain library detected and available for instrumentation")
32
- except ImportError:
33
- logger.debug("langchain library not installed, instrumentation will be skipped")
34
- self._langchain_available = False
35
-
36
- def instrument(self, config: OTelConfig):
37
- """Instrument langchain available if available."""
38
- if not self._langchain_available:
39
- logger.debug("Skipping instrumentation - library not available")
40
- return
41
-
42
- self.config = config
43
- try:
44
- from langchain.agents.agent import AgentExecutor
45
- from langchain.chains.base import Chain
46
-
47
- # Instrument Chains
48
- original_call = Chain.__call__
49
-
50
- def wrapped_call(instance, *args, **kwargs):
51
- chain_type = instance.__class__.__name__
52
- with self.tracer.start_as_current_span(f"langchain.chain.{chain_type}") as span:
53
- span.set_attribute("langchain.chain.type", chain_type)
54
- result = original_call(instance, *args, **kwargs)
55
- return result
56
-
57
- Chain.__call__ = wrapped_call
58
-
59
- # Instrument Agents
60
- original_agent_call = AgentExecutor.__call__
61
-
62
- def wrapped_agent_call(instance, *args, **kwargs):
63
- with self.tracer.start_as_current_span("langchain.agent.execute") as span:
64
- agent_name = getattr(instance, "agent", {}).get("name", "unknown")
65
- span.set_attribute("langchain.agent.name", agent_name)
66
- result = original_agent_call(instance, *args, **kwargs)
67
- return result
68
-
69
- AgentExecutor.__call__ = wrapped_agent_call
70
-
71
- except ImportError:
72
- pass
73
-
74
- def _extract_usage(self, result) -> Optional[Dict[str, int]]:
75
- return None
1
+ """OpenTelemetry instrumentor for the LangChain framework.
2
+
3
+ This instrumentor automatically traces various components within LangChain,
4
+ including chains and agents, capturing relevant attributes for observability.
5
+ """
6
+
7
+ import logging
8
+ from typing import Dict, Optional
9
+
10
+ from ..config import OTelConfig
11
+ from .base import BaseInstrumentor
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ class LangChainInstrumentor(BaseInstrumentor):
17
+ """Instrumentor for LangChain"""
18
+
19
+ def __init__(self):
20
+ """Initialize the instrumentor."""
21
+ super().__init__()
22
+ self._langchain_available = False
23
+ self._check_availability()
24
+
25
+ def _check_availability(self):
26
+ """Check if langchain library is available."""
27
+ try:
28
+ import langchain
29
+
30
+ self._langchain_available = True
31
+ logger.debug("langchain library detected and available for instrumentation")
32
+ except ImportError:
33
+ logger.debug("langchain library not installed, instrumentation will be skipped")
34
+ self._langchain_available = False
35
+
36
+ def instrument(self, config: OTelConfig):
37
+ """Instrument langchain available if available."""
38
+ if not self._langchain_available:
39
+ logger.debug("Skipping instrumentation - library not available")
40
+ return
41
+
42
+ self.config = config
43
+ try:
44
+ from langchain.agents.agent import AgentExecutor
45
+ from langchain.chains.base import Chain
46
+
47
+ # Instrument Chains
48
+ original_call = Chain.__call__
49
+
50
+ def wrapped_call(instance, *args, **kwargs):
51
+ chain_type = instance.__class__.__name__
52
+ with self.tracer.start_as_current_span(f"langchain.chain.{chain_type}") as span:
53
+ span.set_attribute("langchain.chain.type", chain_type)
54
+ result = original_call(instance, *args, **kwargs)
55
+ return result
56
+
57
+ Chain.__call__ = wrapped_call
58
+
59
+ # Instrument Agents
60
+ original_agent_call = AgentExecutor.__call__
61
+
62
+ def wrapped_agent_call(instance, *args, **kwargs):
63
+ with self.tracer.start_as_current_span("langchain.agent.execute") as span:
64
+ agent_name = getattr(instance, "agent", {}).get("name", "unknown")
65
+ span.set_attribute("langchain.agent.name", agent_name)
66
+ result = original_agent_call(instance, *args, **kwargs)
67
+ return result
68
+
69
+ AgentExecutor.__call__ = wrapped_agent_call
70
+
71
+ except ImportError:
72
+ pass
73
+
74
+ def _extract_usage(self, result) -> Optional[Dict[str, int]]:
75
+ return None
@@ -32,9 +32,8 @@ class MistralAIInstrumentor(BaseInstrumentor):
32
32
  # In Mistral SDK v1.0+, structure is:
33
33
  # - Mistral client has .chat and .embeddings properties
34
34
  # - These are bound methods that call internal APIs
35
-
36
35
  # Store original methods at module level before any instances are created
37
- if not hasattr(Mistral, '_genai_otel_instrumented'):
36
+ if not hasattr(Mistral, "_genai_otel_instrumented"):
38
37
  self._wrap_mistral_methods(Mistral, wrapt)
39
38
  Mistral._genai_otel_instrumented = True
40
39
  logger.info("MistralAI instrumentation enabled (v1.0+ SDK)")
@@ -54,29 +53,21 @@ class MistralAIInstrumentor(BaseInstrumentor):
54
53
  from mistralai.embeddings import Embeddings
55
54
 
56
55
  # Wrap Chat.complete method
57
- if hasattr(Chat, 'complete'):
56
+ if hasattr(Chat, "complete"):
58
57
  wrapt.wrap_function_wrapper(
59
- 'mistralai.chat',
60
- 'Chat.complete',
61
- self._wrap_chat_complete
58
+ "mistralai.chat", "Chat.complete", self._wrap_chat_complete
62
59
  )
63
60
  logger.debug("Wrapped Mistral Chat.complete")
64
61
 
65
62
  # Wrap Chat.stream method
66
- if hasattr(Chat, 'stream'):
67
- wrapt.wrap_function_wrapper(
68
- 'mistralai.chat',
69
- 'Chat.stream',
70
- self._wrap_chat_stream
71
- )
63
+ if hasattr(Chat, "stream"):
64
+ wrapt.wrap_function_wrapper("mistralai.chat", "Chat.stream", self._wrap_chat_stream)
72
65
  logger.debug("Wrapped Mistral Chat.stream")
73
66
 
74
67
  # Wrap Embeddings.create method
75
- if hasattr(Embeddings, 'create'):
68
+ if hasattr(Embeddings, "create"):
76
69
  wrapt.wrap_function_wrapper(
77
- 'mistralai.embeddings',
78
- 'Embeddings.create',
79
- self._wrap_embeddings_create
70
+ "mistralai.embeddings", "Embeddings.create", self._wrap_embeddings_create
80
71
  )
81
72
  logger.debug("Wrapped Mistral Embeddings.create")
82
73
 
@@ -140,15 +131,11 @@ class MistralAIInstrumentor(BaseInstrumentor):
140
131
  stream = wrapped(*args, **kwargs)
141
132
 
142
133
  # Wrap the stream with our tracking wrapper
143
- return self._StreamWrapper(
144
- stream, span, self, model, start_time, span_name
145
- )
134
+ return self._StreamWrapper(stream, span, self, model, start_time, span_name)
146
135
 
147
136
  except Exception as e:
148
137
  if self.error_counter:
149
- self.error_counter.add(
150
- 1, {"operation": span_name, "error.type": type(e).__name__}
151
- )
138
+ self.error_counter.add(1, {"operation": span_name, "error.type": type(e).__name__})
152
139
  span.record_exception(e)
153
140
  span.end()
154
141
  raise
@@ -240,10 +227,7 @@ class MistralAIInstrumentor(BaseInstrumentor):
240
227
 
241
228
  mock_response = MockResponse(self._usage)
242
229
  self._instrumentor._record_result_metrics(
243
- self._span,
244
- mock_response,
245
- self._start_time,
246
- {"model": self._model}
230
+ self._span, mock_response, self._start_time, {"model": self._model}
247
231
  )
248
232
 
249
233
  finally:
@@ -255,21 +239,21 @@ class MistralAIInstrumentor(BaseInstrumentor):
255
239
  """Process a streaming chunk to extract usage."""
256
240
  try:
257
241
  # Mistral streaming chunks have: data.choices[0].delta.content
258
- if hasattr(chunk, 'data'):
242
+ if hasattr(chunk, "data"):
259
243
  data = chunk.data
260
- if hasattr(data, 'choices') and len(data.choices) > 0:
244
+ if hasattr(data, "choices") and len(data.choices) > 0:
261
245
  delta = data.choices[0].delta
262
- if hasattr(delta, 'content') and delta.content:
246
+ if hasattr(delta, "content") and delta.content:
263
247
  self._response_text += delta.content
264
248
 
265
249
  # Extract usage if available on final chunk
266
- if hasattr(data, 'usage') and data.usage:
250
+ if hasattr(data, "usage") and data.usage:
267
251
  usage = data.usage
268
- if hasattr(usage, 'prompt_tokens'):
252
+ if hasattr(usage, "prompt_tokens"):
269
253
  self._usage["prompt_tokens"] = usage.prompt_tokens
270
- if hasattr(usage, 'completion_tokens'):
254
+ if hasattr(usage, "completion_tokens"):
271
255
  self._usage["completion_tokens"] = usage.completion_tokens
272
- if hasattr(usage, 'total_tokens'):
256
+ if hasattr(usage, "total_tokens"):
273
257
  self._usage["total_tokens"] = usage.total_tokens
274
258
 
275
259
  except Exception as e: