genai-otel-instrument 0.1.16__py3-none-any.whl → 0.1.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of genai-otel-instrument might be problematic. Click here for more details.
- genai_otel/__version__.py +2 -2
- genai_otel/auto_instrument.py +3 -8
- genai_otel/config.py +17 -3
- genai_otel/instrumentors/langchain_instrumentor.py +267 -4
- {genai_otel_instrument-0.1.16.dist-info → genai_otel_instrument-0.1.17.dist-info}/METADATA +3 -2
- {genai_otel_instrument-0.1.16.dist-info → genai_otel_instrument-0.1.17.dist-info}/RECORD +10 -10
- {genai_otel_instrument-0.1.16.dist-info → genai_otel_instrument-0.1.17.dist-info}/WHEEL +0 -0
- {genai_otel_instrument-0.1.16.dist-info → genai_otel_instrument-0.1.17.dist-info}/entry_points.txt +0 -0
- {genai_otel_instrument-0.1.16.dist-info → genai_otel_instrument-0.1.17.dist-info}/licenses/LICENSE +0 -0
- {genai_otel_instrument-0.1.16.dist-info → genai_otel_instrument-0.1.17.dist-info}/top_level.txt +0 -0
genai_otel/__version__.py
CHANGED
|
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
|
|
|
28
28
|
commit_id: COMMIT_ID
|
|
29
29
|
__commit_id__: COMMIT_ID
|
|
30
30
|
|
|
31
|
-
__version__ = version = '0.1.
|
|
32
|
-
__version_tuple__ = version_tuple = (0, 1,
|
|
31
|
+
__version__ = version = '0.1.17'
|
|
32
|
+
__version_tuple__ = version_tuple = (0, 1, 17)
|
|
33
33
|
|
|
34
34
|
__commit_id__ = commit_id = None
|
genai_otel/auto_instrument.py
CHANGED
|
@@ -186,13 +186,8 @@ def setup_auto_instrumentation(config: OTelConfig):
|
|
|
186
186
|
|
|
187
187
|
logger.debug(f"OTelConfig endpoint: {config.endpoint}")
|
|
188
188
|
if config.endpoint:
|
|
189
|
-
#
|
|
190
|
-
|
|
191
|
-
try:
|
|
192
|
-
timeout = float(timeout_str)
|
|
193
|
-
except (ValueError, TypeError):
|
|
194
|
-
logger.warning(f"Invalid timeout value '{timeout_str}', using default 10.0")
|
|
195
|
-
timeout = 10.0
|
|
189
|
+
# Use timeout from config (already validated as int)
|
|
190
|
+
timeout = config.exporter_timeout
|
|
196
191
|
|
|
197
192
|
# CRITICAL FIX: Set endpoint in environment variable so exporters can append correct paths
|
|
198
193
|
# The exporters only call _append_trace_path() when reading from env vars
|
|
@@ -221,7 +216,7 @@ def setup_auto_instrumentation(config: OTelConfig):
|
|
|
221
216
|
os.environ["OTEL_PYTHON_REQUESTS_EXCLUDED_URLS"] = ",".join(excluded_urls)
|
|
222
217
|
logger.info(f"Excluded OTLP endpoints from instrumentation: {base_url}")
|
|
223
218
|
|
|
224
|
-
# Set timeout in environment variable
|
|
219
|
+
# Set timeout in environment variable as integer string (OTLP exporters expect int)
|
|
225
220
|
os.environ["OTEL_EXPORTER_OTLP_TIMEOUT"] = str(timeout)
|
|
226
221
|
|
|
227
222
|
# Create exporters WITHOUT passing endpoint (let them read from env vars)
|
genai_otel/config.py
CHANGED
|
@@ -62,6 +62,22 @@ def _get_enabled_instrumentors() -> List[str]:
|
|
|
62
62
|
return DEFAULT_INSTRUMENTORS
|
|
63
63
|
|
|
64
64
|
|
|
65
|
+
def _get_exporter_timeout() -> int:
|
|
66
|
+
"""
|
|
67
|
+
Gets the OTLP exporter timeout from environment variable.
|
|
68
|
+
Returns default of 60 seconds if not set or invalid.
|
|
69
|
+
"""
|
|
70
|
+
timeout_str = os.getenv("OTEL_EXPORTER_OTLP_TIMEOUT", "60")
|
|
71
|
+
try:
|
|
72
|
+
return int(timeout_str)
|
|
73
|
+
except ValueError:
|
|
74
|
+
logger.warning(
|
|
75
|
+
f"Invalid timeout value '{timeout_str}' in OTEL_EXPORTER_OTLP_TIMEOUT. "
|
|
76
|
+
f"Using default of 60 seconds."
|
|
77
|
+
)
|
|
78
|
+
return 60
|
|
79
|
+
|
|
80
|
+
|
|
65
81
|
@dataclass
|
|
66
82
|
class OTelConfig:
|
|
67
83
|
"""Configuration for OpenTelemetry instrumentation.
|
|
@@ -97,9 +113,7 @@ class OTelConfig:
|
|
|
97
113
|
enable_co2_tracking: bool = field(
|
|
98
114
|
default_factory=lambda: os.getenv("GENAI_ENABLE_CO2_TRACKING", "false").lower() == "true"
|
|
99
115
|
)
|
|
100
|
-
exporter_timeout:
|
|
101
|
-
default_factory=lambda: float(os.getenv("OTEL_EXPORTER_OTLP_TIMEOUT", "60.0"))
|
|
102
|
-
)
|
|
116
|
+
exporter_timeout: int = field(default_factory=_get_exporter_timeout)
|
|
103
117
|
carbon_intensity: float = field(
|
|
104
118
|
default_factory=lambda: float(os.getenv("GENAI_CARBON_INTENSITY", "475.0"))
|
|
105
119
|
) # gCO2e/kWh
|
|
@@ -1,11 +1,13 @@
|
|
|
1
1
|
"""OpenTelemetry instrumentor for the LangChain framework.
|
|
2
2
|
|
|
3
3
|
This instrumentor automatically traces various components within LangChain,
|
|
4
|
-
including chains and
|
|
4
|
+
including chains, agents, and chat models, capturing relevant attributes for observability.
|
|
5
5
|
"""
|
|
6
6
|
|
|
7
|
+
import asyncio
|
|
8
|
+
import functools
|
|
7
9
|
import logging
|
|
8
|
-
from typing import Dict, Optional
|
|
10
|
+
from typing import Any, Dict, Optional
|
|
9
11
|
|
|
10
12
|
from ..config import OTelConfig
|
|
11
13
|
from .base import BaseInstrumentor
|
|
@@ -20,6 +22,7 @@ class LangChainInstrumentor(BaseInstrumentor):
|
|
|
20
22
|
"""Initialize the instrumentor."""
|
|
21
23
|
super().__init__()
|
|
22
24
|
self._langchain_available = False
|
|
25
|
+
self._langchain_core_available = False
|
|
23
26
|
self._check_availability()
|
|
24
27
|
|
|
25
28
|
def _check_availability(self):
|
|
@@ -33,13 +36,35 @@ class LangChainInstrumentor(BaseInstrumentor):
|
|
|
33
36
|
logger.debug("langchain library not installed, instrumentation will be skipped")
|
|
34
37
|
self._langchain_available = False
|
|
35
38
|
|
|
39
|
+
# Check for langchain_core (required for chat model instrumentation)
|
|
40
|
+
try:
|
|
41
|
+
import langchain_core
|
|
42
|
+
|
|
43
|
+
self._langchain_core_available = True
|
|
44
|
+
logger.debug("langchain_core library detected and available for instrumentation")
|
|
45
|
+
except ImportError:
|
|
46
|
+
logger.debug(
|
|
47
|
+
"langchain_core library not installed, chat model instrumentation will be skipped"
|
|
48
|
+
)
|
|
49
|
+
self._langchain_core_available = False
|
|
50
|
+
|
|
36
51
|
def instrument(self, config: OTelConfig):
|
|
37
|
-
"""Instrument
|
|
52
|
+
"""Instrument langchain components if available."""
|
|
38
53
|
if not self._langchain_available:
|
|
39
54
|
logger.debug("Skipping instrumentation - library not available")
|
|
40
55
|
return
|
|
41
56
|
|
|
42
57
|
self.config = config
|
|
58
|
+
|
|
59
|
+
# Instrument chains and agents
|
|
60
|
+
self._instrument_chains_and_agents()
|
|
61
|
+
|
|
62
|
+
# Instrument chat models if langchain_core is available
|
|
63
|
+
if self._langchain_core_available:
|
|
64
|
+
self._instrument_chat_models()
|
|
65
|
+
|
|
66
|
+
def _instrument_chains_and_agents(self):
|
|
67
|
+
"""Instrument LangChain chains and agents."""
|
|
43
68
|
try:
|
|
44
69
|
from langchain.agents.agent import AgentExecutor
|
|
45
70
|
from langchain.chains.base import Chain
|
|
@@ -67,9 +92,247 @@ class LangChainInstrumentor(BaseInstrumentor):
|
|
|
67
92
|
return result
|
|
68
93
|
|
|
69
94
|
AgentExecutor.__call__ = wrapped_agent_call
|
|
95
|
+
logger.debug("Chains and agents instrumentation completed")
|
|
70
96
|
|
|
71
97
|
except ImportError:
|
|
72
|
-
|
|
98
|
+
logger.debug("Could not import chains or agents, skipping instrumentation")
|
|
99
|
+
|
|
100
|
+
def _instrument_chat_models(self):
|
|
101
|
+
"""Instrument LangChain chat models."""
|
|
102
|
+
try:
|
|
103
|
+
from langchain_core.language_models.chat_models import BaseChatModel
|
|
104
|
+
|
|
105
|
+
# Instrument invoke method
|
|
106
|
+
original_invoke = BaseChatModel.invoke
|
|
107
|
+
|
|
108
|
+
@functools.wraps(original_invoke)
|
|
109
|
+
def wrapped_invoke(instance, *args, **kwargs):
|
|
110
|
+
model_name = self._get_model_name(instance)
|
|
111
|
+
with self.tracer.start_as_current_span("langchain.chat_model.invoke") as span:
|
|
112
|
+
self._set_chat_attributes(span, instance, args, kwargs, model_name)
|
|
113
|
+
|
|
114
|
+
result = original_invoke(instance, *args, **kwargs)
|
|
115
|
+
|
|
116
|
+
# Extract and record usage information
|
|
117
|
+
self._extract_and_record_usage(span, result, model_name)
|
|
118
|
+
|
|
119
|
+
return result
|
|
120
|
+
|
|
121
|
+
BaseChatModel.invoke = wrapped_invoke
|
|
122
|
+
|
|
123
|
+
# Instrument ainvoke (async invoke) method
|
|
124
|
+
original_ainvoke = BaseChatModel.ainvoke
|
|
125
|
+
|
|
126
|
+
@functools.wraps(original_ainvoke)
|
|
127
|
+
async def wrapped_ainvoke(instance, *args, **kwargs):
|
|
128
|
+
model_name = self._get_model_name(instance)
|
|
129
|
+
with self.tracer.start_as_current_span("langchain.chat_model.ainvoke") as span:
|
|
130
|
+
self._set_chat_attributes(span, instance, args, kwargs, model_name)
|
|
131
|
+
|
|
132
|
+
result = await original_ainvoke(instance, *args, **kwargs)
|
|
133
|
+
|
|
134
|
+
# Extract and record usage information
|
|
135
|
+
self._extract_and_record_usage(span, result, model_name)
|
|
136
|
+
|
|
137
|
+
return result
|
|
138
|
+
|
|
139
|
+
BaseChatModel.ainvoke = wrapped_ainvoke
|
|
140
|
+
|
|
141
|
+
# Instrument batch method
|
|
142
|
+
original_batch = BaseChatModel.batch
|
|
143
|
+
|
|
144
|
+
@functools.wraps(original_batch)
|
|
145
|
+
def wrapped_batch(instance, *args, **kwargs):
|
|
146
|
+
model_name = self._get_model_name(instance)
|
|
147
|
+
with self.tracer.start_as_current_span("langchain.chat_model.batch") as span:
|
|
148
|
+
span.set_attribute("langchain.chat_model.name", model_name)
|
|
149
|
+
span.set_attribute("langchain.chat_model.operation", "batch")
|
|
150
|
+
|
|
151
|
+
# Get batch size
|
|
152
|
+
if args and len(args) > 0:
|
|
153
|
+
batch_size = len(args[0]) if hasattr(args[0], "__len__") else 1
|
|
154
|
+
span.set_attribute("langchain.chat_model.batch_size", batch_size)
|
|
155
|
+
|
|
156
|
+
result = original_batch(instance, *args, **kwargs)
|
|
157
|
+
|
|
158
|
+
return result
|
|
159
|
+
|
|
160
|
+
BaseChatModel.batch = wrapped_batch
|
|
161
|
+
|
|
162
|
+
# Instrument abatch (async batch) method
|
|
163
|
+
original_abatch = BaseChatModel.abatch
|
|
164
|
+
|
|
165
|
+
@functools.wraps(original_abatch)
|
|
166
|
+
async def wrapped_abatch(instance, *args, **kwargs):
|
|
167
|
+
model_name = self._get_model_name(instance)
|
|
168
|
+
with self.tracer.start_as_current_span("langchain.chat_model.abatch") as span:
|
|
169
|
+
span.set_attribute("langchain.chat_model.name", model_name)
|
|
170
|
+
span.set_attribute("langchain.chat_model.operation", "abatch")
|
|
171
|
+
|
|
172
|
+
# Get batch size
|
|
173
|
+
if args and len(args) > 0:
|
|
174
|
+
batch_size = len(args[0]) if hasattr(args[0], "__len__") else 1
|
|
175
|
+
span.set_attribute("langchain.chat_model.batch_size", batch_size)
|
|
176
|
+
|
|
177
|
+
result = await original_abatch(instance, *args, **kwargs)
|
|
178
|
+
|
|
179
|
+
return result
|
|
180
|
+
|
|
181
|
+
BaseChatModel.abatch = wrapped_abatch
|
|
182
|
+
|
|
183
|
+
logger.info("LangChain chat models instrumentation completed")
|
|
184
|
+
|
|
185
|
+
except ImportError as e:
|
|
186
|
+
logger.debug(f"Could not import langchain_core chat models: {e}")
|
|
187
|
+
except Exception as e:
|
|
188
|
+
logger.error(f"Error instrumenting chat models: {e}", exc_info=True)
|
|
189
|
+
|
|
190
|
+
def _get_model_name(self, instance: Any) -> str:
|
|
191
|
+
"""Extract model name from chat model instance."""
|
|
192
|
+
# Try common attribute names for model name
|
|
193
|
+
for attr in ["model_name", "model", "model_id"]:
|
|
194
|
+
if hasattr(instance, attr):
|
|
195
|
+
value = getattr(instance, attr)
|
|
196
|
+
if value:
|
|
197
|
+
return str(value)
|
|
198
|
+
|
|
199
|
+
# Fallback to class name
|
|
200
|
+
return instance.__class__.__name__
|
|
201
|
+
|
|
202
|
+
def _set_chat_attributes(self, span, instance: Any, args: tuple, kwargs: dict, model_name: str):
|
|
203
|
+
"""Set span attributes for chat model invocations."""
|
|
204
|
+
span.set_attribute("langchain.chat_model.name", model_name)
|
|
205
|
+
span.set_attribute("langchain.chat_model.operation", "invoke")
|
|
206
|
+
|
|
207
|
+
# Try to extract provider from class name or module
|
|
208
|
+
provider = self._extract_provider(instance)
|
|
209
|
+
if provider:
|
|
210
|
+
span.set_attribute("langchain.chat_model.provider", provider)
|
|
211
|
+
|
|
212
|
+
# Count messages if available
|
|
213
|
+
if args and len(args) > 0:
|
|
214
|
+
messages = args[0]
|
|
215
|
+
if hasattr(messages, "__len__"):
|
|
216
|
+
span.set_attribute("langchain.chat_model.message_count", len(messages))
|
|
217
|
+
|
|
218
|
+
def _extract_provider(self, instance: Any) -> Optional[str]:
|
|
219
|
+
"""Extract provider name from chat model instance."""
|
|
220
|
+
class_name = instance.__class__.__name__.lower()
|
|
221
|
+
module_name = instance.__class__.__module__.lower()
|
|
222
|
+
|
|
223
|
+
# Map class names to providers
|
|
224
|
+
provider_mapping = {
|
|
225
|
+
"openai": "openai",
|
|
226
|
+
"anthropic": "anthropic",
|
|
227
|
+
"google": "google",
|
|
228
|
+
"ollama": "ollama",
|
|
229
|
+
"bedrock": "bedrock",
|
|
230
|
+
"cohere": "cohere",
|
|
231
|
+
"groq": "groq",
|
|
232
|
+
"mistral": "mistral",
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
# Check class name
|
|
236
|
+
for key, value in provider_mapping.items():
|
|
237
|
+
if key in class_name:
|
|
238
|
+
return value
|
|
239
|
+
|
|
240
|
+
# Check module name
|
|
241
|
+
for key, value in provider_mapping.items():
|
|
242
|
+
if key in module_name:
|
|
243
|
+
return value
|
|
244
|
+
|
|
245
|
+
return None
|
|
246
|
+
|
|
247
|
+
def _extract_and_record_usage(self, span, result: Any, model_name: str):
|
|
248
|
+
"""Extract usage information from LangChain response."""
|
|
249
|
+
try:
|
|
250
|
+
# LangChain responses may have usage_metadata or response_metadata
|
|
251
|
+
usage_data = None
|
|
252
|
+
|
|
253
|
+
# Check for usage_metadata (newer LangChain versions)
|
|
254
|
+
if hasattr(result, "usage_metadata") and result.usage_metadata:
|
|
255
|
+
usage_data = result.usage_metadata
|
|
256
|
+
|
|
257
|
+
# Check for response_metadata (older versions)
|
|
258
|
+
elif hasattr(result, "response_metadata") and result.response_metadata:
|
|
259
|
+
metadata = result.response_metadata
|
|
260
|
+
if "token_usage" in metadata:
|
|
261
|
+
usage_data = metadata["token_usage"]
|
|
262
|
+
elif "usage" in metadata:
|
|
263
|
+
usage_data = metadata["usage"]
|
|
264
|
+
|
|
265
|
+
if usage_data:
|
|
266
|
+
# Extract token counts (handle both dict and object)
|
|
267
|
+
if isinstance(usage_data, dict):
|
|
268
|
+
prompt_tokens = usage_data.get("input_tokens") or usage_data.get(
|
|
269
|
+
"prompt_tokens"
|
|
270
|
+
)
|
|
271
|
+
completion_tokens = usage_data.get("output_tokens") or usage_data.get(
|
|
272
|
+
"completion_tokens"
|
|
273
|
+
)
|
|
274
|
+
total_tokens = usage_data.get("total_tokens")
|
|
275
|
+
else:
|
|
276
|
+
prompt_tokens = getattr(usage_data, "input_tokens", None) or getattr(
|
|
277
|
+
usage_data, "prompt_tokens", None
|
|
278
|
+
)
|
|
279
|
+
completion_tokens = getattr(usage_data, "output_tokens", None) or getattr(
|
|
280
|
+
usage_data, "completion_tokens", None
|
|
281
|
+
)
|
|
282
|
+
total_tokens = getattr(usage_data, "total_tokens", None)
|
|
283
|
+
|
|
284
|
+
# Set span attributes
|
|
285
|
+
if prompt_tokens:
|
|
286
|
+
span.set_attribute("gen_ai.usage.prompt_tokens", int(prompt_tokens))
|
|
287
|
+
if completion_tokens:
|
|
288
|
+
span.set_attribute("gen_ai.usage.completion_tokens", int(completion_tokens))
|
|
289
|
+
if total_tokens:
|
|
290
|
+
span.set_attribute("gen_ai.usage.total_tokens", int(total_tokens))
|
|
291
|
+
elif prompt_tokens and completion_tokens:
|
|
292
|
+
span.set_attribute(
|
|
293
|
+
"gen_ai.usage.total_tokens", int(prompt_tokens) + int(completion_tokens)
|
|
294
|
+
)
|
|
295
|
+
|
|
296
|
+
except Exception as e:
|
|
297
|
+
logger.debug(f"Could not extract usage information: {e}")
|
|
73
298
|
|
|
74
299
|
def _extract_usage(self, result) -> Optional[Dict[str, int]]:
|
|
300
|
+
"""Extract usage information for BaseInstrumentor compatibility."""
|
|
301
|
+
try:
|
|
302
|
+
usage_data = None
|
|
303
|
+
|
|
304
|
+
if hasattr(result, "usage_metadata") and result.usage_metadata:
|
|
305
|
+
usage_data = result.usage_metadata
|
|
306
|
+
elif hasattr(result, "response_metadata") and result.response_metadata:
|
|
307
|
+
metadata = result.response_metadata
|
|
308
|
+
if "token_usage" in metadata:
|
|
309
|
+
usage_data = metadata["token_usage"]
|
|
310
|
+
elif "usage" in metadata:
|
|
311
|
+
usage_data = metadata["usage"]
|
|
312
|
+
|
|
313
|
+
if usage_data:
|
|
314
|
+
if isinstance(usage_data, dict):
|
|
315
|
+
prompt_tokens = usage_data.get("input_tokens") or usage_data.get(
|
|
316
|
+
"prompt_tokens"
|
|
317
|
+
)
|
|
318
|
+
completion_tokens = usage_data.get("output_tokens") or usage_data.get(
|
|
319
|
+
"completion_tokens"
|
|
320
|
+
)
|
|
321
|
+
else:
|
|
322
|
+
prompt_tokens = getattr(usage_data, "input_tokens", None) or getattr(
|
|
323
|
+
usage_data, "prompt_tokens", None
|
|
324
|
+
)
|
|
325
|
+
completion_tokens = getattr(usage_data, "output_tokens", None) or getattr(
|
|
326
|
+
usage_data, "completion_tokens", None
|
|
327
|
+
)
|
|
328
|
+
|
|
329
|
+
if prompt_tokens or completion_tokens:
|
|
330
|
+
return {
|
|
331
|
+
"prompt_tokens": int(prompt_tokens) if prompt_tokens else 0,
|
|
332
|
+
"completion_tokens": int(completion_tokens) if completion_tokens else 0,
|
|
333
|
+
"total_tokens": int(prompt_tokens or 0) + int(completion_tokens or 0),
|
|
334
|
+
}
|
|
335
|
+
except Exception:
|
|
336
|
+
pass
|
|
337
|
+
|
|
75
338
|
return None
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: genai-otel-instrument
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.17
|
|
4
4
|
Summary: Comprehensive OpenTelemetry auto-instrumentation for LLM/GenAI applications
|
|
5
5
|
Author-email: Kshitij Thakkar <kshitijthakkar@rocketmail.com>
|
|
6
6
|
License: AGPL-3.0-or-later
|
|
@@ -529,7 +529,8 @@ GENAI_ENABLE_MCP_INSTRUMENTATION=true
|
|
|
529
529
|
GENAI_GPU_COLLECTION_INTERVAL=5 # GPU metrics collection interval in seconds (default: 5)
|
|
530
530
|
OTEL_SERVICE_INSTANCE_ID=instance-1 # Optional service instance id
|
|
531
531
|
OTEL_ENVIRONMENT=production # Optional environment
|
|
532
|
-
OTEL_EXPORTER_OTLP_TIMEOUT=
|
|
532
|
+
OTEL_EXPORTER_OTLP_TIMEOUT=60 # Timeout for OTLP exporter in seconds (default: 60)
|
|
533
|
+
OTEL_EXPORTER_OTLP_PROTOCOL=http/protobuf # Protocol: "http/protobuf" (default) or "grpc"
|
|
533
534
|
|
|
534
535
|
# Semantic conventions (NEW)
|
|
535
536
|
OTEL_SEMCONV_STABILITY_OPT_IN=gen_ai # "gen_ai" for new conventions only, "gen_ai/dup" for dual emission
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
genai_otel/__init__.py,sha256=SXh49Ndu2KozUfZxD7AGHCewlUsUz43agsL7-HbeyE0,4476
|
|
2
|
-
genai_otel/__version__.py,sha256=
|
|
3
|
-
genai_otel/auto_instrument.py,sha256=
|
|
2
|
+
genai_otel/__version__.py,sha256=TtVjjQ5FSnY_MX0ZAPLaNmAfTJWa0sEMBdMs65ngXMM,706
|
|
3
|
+
genai_otel/auto_instrument.py,sha256=MjV-BEiHKCIj6foU5YtGz9bSQoIJkGmL4MObzdXJfho,16340
|
|
4
4
|
genai_otel/cli.py,sha256=mbhaTU0WIAkvPKdIing-guIxPDjEKQftChWQUtPFzkY,3170
|
|
5
|
-
genai_otel/config.py,sha256=
|
|
5
|
+
genai_otel/config.py,sha256=VEtk-Iwyj4eLgXD4-c72UIHs9VpFaUqDO8jiwV-rW94,8296
|
|
6
6
|
genai_otel/cost_calculator.py,sha256=BOW-TC41lJ1GcL4hIGZ4NySyV8aro4_juMOe2IqtJ-A,18115
|
|
7
7
|
genai_otel/cost_enriching_exporter.py,sha256=iED7njK21UBKlxRElGfqSs66gMkzDCr8fm-4ZkJBiLU,7874
|
|
8
8
|
genai_otel/cost_enrichment_processor.py,sha256=fQoVosBUgshD9ZRxWpwqqPWYnyhrvKBTJAW0S2H7t1E,7090
|
|
@@ -22,7 +22,7 @@ genai_otel/instrumentors/cohere_instrumentor.py,sha256=fsKvHaWvMRAGRbOtybVJVVz-F
|
|
|
22
22
|
genai_otel/instrumentors/google_ai_instrumentor.py,sha256=ExNo0_OxfCxaRpuUXYU8UZ-ClQRHRLUvf7-kMC6zdc8,2984
|
|
23
23
|
genai_otel/instrumentors/groq_instrumentor.py,sha256=bCm7IDmDyvg0-XuzcCSO5xf9QvDlQGwb7bdQ_ooS6QI,3398
|
|
24
24
|
genai_otel/instrumentors/huggingface_instrumentor.py,sha256=wvolJZnq9YKfJsvNvUnoOpL1tbeGy0DuxVmmmI1_BoA,17815
|
|
25
|
-
genai_otel/instrumentors/langchain_instrumentor.py,sha256=
|
|
25
|
+
genai_otel/instrumentors/langchain_instrumentor.py,sha256=utAPhHXQeYEupDSqQaKdJaKr0PXDLD_VALSfXtXgupk,14037
|
|
26
26
|
genai_otel/instrumentors/llamaindex_instrumentor.py,sha256=zZ1J7W4yQo1Ur6Y5y0UXpDdEx9oDnmsqNIin5Jrv9os,1206
|
|
27
27
|
genai_otel/instrumentors/mistralai_instrumentor.py,sha256=Blo8X4WV-xQe-xF-jhkaGPavkgayANf1F3zCTzuhuL0,12478
|
|
28
28
|
genai_otel/instrumentors/ollama_instrumentor.py,sha256=lv45qf8Cqe_HmF7BIMojZcBFK8AA13uUrCVOKAFhN0k,5286
|
|
@@ -38,9 +38,9 @@ genai_otel/mcp_instrumentors/kafka_instrumentor.py,sha256=QJYJC1rvo_zZAIaw-cp_Ic
|
|
|
38
38
|
genai_otel/mcp_instrumentors/manager.py,sha256=1Pj5lkEOL8Yq1Oeud4ZExN6k6NLIVtTzKnFLNiFdJvw,5895
|
|
39
39
|
genai_otel/mcp_instrumentors/redis_instrumentor.py,sha256=KUbs0dMyfMzU4T0SS8u43I5fvr09lcBBM92I3KCsYUw,943
|
|
40
40
|
genai_otel/mcp_instrumentors/vector_db_instrumentor.py,sha256=2vhnk4PGpfYKr-XlRbnCIOap4BPKHOn--fh-ai2YXlM,9994
|
|
41
|
-
genai_otel_instrument-0.1.
|
|
42
|
-
genai_otel_instrument-0.1.
|
|
43
|
-
genai_otel_instrument-0.1.
|
|
44
|
-
genai_otel_instrument-0.1.
|
|
45
|
-
genai_otel_instrument-0.1.
|
|
46
|
-
genai_otel_instrument-0.1.
|
|
41
|
+
genai_otel_instrument-0.1.17.dist-info/licenses/LICENSE,sha256=upRpn6Eg5iN_1y_TrvgUKxNQdoYy7OY2KybhHTOwTMc,35375
|
|
42
|
+
genai_otel_instrument-0.1.17.dist-info/METADATA,sha256=mP1d8ckN77UgzST7fndQC9VNMa9Ea04RqPuWPYiWug0,39614
|
|
43
|
+
genai_otel_instrument-0.1.17.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
44
|
+
genai_otel_instrument-0.1.17.dist-info/entry_points.txt,sha256=E9UqoHA_fq69yNGAY3SRYf5HH94sZT5DiDueiU1v0KM,57
|
|
45
|
+
genai_otel_instrument-0.1.17.dist-info/top_level.txt,sha256=cvCm8PUwvYUSQKruk-x6S-_YuDyhOBk8gD910XICcbg,11
|
|
46
|
+
genai_otel_instrument-0.1.17.dist-info/RECORD,,
|
|
File without changes
|
{genai_otel_instrument-0.1.16.dist-info → genai_otel_instrument-0.1.17.dist-info}/entry_points.txt
RENAMED
|
File without changes
|
{genai_otel_instrument-0.1.16.dist-info → genai_otel_instrument-0.1.17.dist-info}/licenses/LICENSE
RENAMED
|
File without changes
|
{genai_otel_instrument-0.1.16.dist-info → genai_otel_instrument-0.1.17.dist-info}/top_level.txt
RENAMED
|
File without changes
|