genai-otel-instrument 0.1.24__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- genai_otel/__init__.py +132 -0
- genai_otel/__version__.py +34 -0
- genai_otel/auto_instrument.py +602 -0
- genai_otel/cli.py +92 -0
- genai_otel/config.py +333 -0
- genai_otel/cost_calculator.py +467 -0
- genai_otel/cost_enriching_exporter.py +207 -0
- genai_otel/cost_enrichment_processor.py +174 -0
- genai_otel/evaluation/__init__.py +76 -0
- genai_otel/evaluation/bias_detector.py +364 -0
- genai_otel/evaluation/config.py +261 -0
- genai_otel/evaluation/hallucination_detector.py +525 -0
- genai_otel/evaluation/pii_detector.py +356 -0
- genai_otel/evaluation/prompt_injection_detector.py +262 -0
- genai_otel/evaluation/restricted_topics_detector.py +316 -0
- genai_otel/evaluation/span_processor.py +962 -0
- genai_otel/evaluation/toxicity_detector.py +406 -0
- genai_otel/exceptions.py +17 -0
- genai_otel/gpu_metrics.py +516 -0
- genai_otel/instrumentors/__init__.py +71 -0
- genai_otel/instrumentors/anthropic_instrumentor.py +134 -0
- genai_otel/instrumentors/anyscale_instrumentor.py +27 -0
- genai_otel/instrumentors/autogen_instrumentor.py +394 -0
- genai_otel/instrumentors/aws_bedrock_instrumentor.py +94 -0
- genai_otel/instrumentors/azure_openai_instrumentor.py +69 -0
- genai_otel/instrumentors/base.py +919 -0
- genai_otel/instrumentors/bedrock_agents_instrumentor.py +398 -0
- genai_otel/instrumentors/cohere_instrumentor.py +140 -0
- genai_otel/instrumentors/crewai_instrumentor.py +311 -0
- genai_otel/instrumentors/dspy_instrumentor.py +661 -0
- genai_otel/instrumentors/google_ai_instrumentor.py +310 -0
- genai_otel/instrumentors/groq_instrumentor.py +106 -0
- genai_otel/instrumentors/guardrails_ai_instrumentor.py +510 -0
- genai_otel/instrumentors/haystack_instrumentor.py +503 -0
- genai_otel/instrumentors/huggingface_instrumentor.py +399 -0
- genai_otel/instrumentors/hyperbolic_instrumentor.py +236 -0
- genai_otel/instrumentors/instructor_instrumentor.py +425 -0
- genai_otel/instrumentors/langchain_instrumentor.py +340 -0
- genai_otel/instrumentors/langgraph_instrumentor.py +328 -0
- genai_otel/instrumentors/llamaindex_instrumentor.py +36 -0
- genai_otel/instrumentors/mistralai_instrumentor.py +315 -0
- genai_otel/instrumentors/ollama_instrumentor.py +197 -0
- genai_otel/instrumentors/ollama_server_metrics_poller.py +336 -0
- genai_otel/instrumentors/openai_agents_instrumentor.py +291 -0
- genai_otel/instrumentors/openai_instrumentor.py +260 -0
- genai_otel/instrumentors/pydantic_ai_instrumentor.py +362 -0
- genai_otel/instrumentors/replicate_instrumentor.py +87 -0
- genai_otel/instrumentors/sambanova_instrumentor.py +196 -0
- genai_otel/instrumentors/togetherai_instrumentor.py +146 -0
- genai_otel/instrumentors/vertexai_instrumentor.py +106 -0
- genai_otel/llm_pricing.json +1676 -0
- genai_otel/logging_config.py +45 -0
- genai_otel/mcp_instrumentors/__init__.py +14 -0
- genai_otel/mcp_instrumentors/api_instrumentor.py +144 -0
- genai_otel/mcp_instrumentors/base.py +105 -0
- genai_otel/mcp_instrumentors/database_instrumentor.py +336 -0
- genai_otel/mcp_instrumentors/kafka_instrumentor.py +31 -0
- genai_otel/mcp_instrumentors/manager.py +139 -0
- genai_otel/mcp_instrumentors/redis_instrumentor.py +31 -0
- genai_otel/mcp_instrumentors/vector_db_instrumentor.py +265 -0
- genai_otel/metrics.py +148 -0
- genai_otel/py.typed +2 -0
- genai_otel/server_metrics.py +197 -0
- genai_otel_instrument-0.1.24.dist-info/METADATA +1404 -0
- genai_otel_instrument-0.1.24.dist-info/RECORD +69 -0
- genai_otel_instrument-0.1.24.dist-info/WHEEL +5 -0
- genai_otel_instrument-0.1.24.dist-info/entry_points.txt +2 -0
- genai_otel_instrument-0.1.24.dist-info/licenses/LICENSE +680 -0
- genai_otel_instrument-0.1.24.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
"""OpenTelemetry instrumentor for the Anthropic Claude SDK.
|
|
2
|
+
|
|
3
|
+
This instrumentor automatically traces calls to the Anthropic API, capturing
|
|
4
|
+
relevant attributes such as model name, message count, and token usage.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
from typing import Any, Dict, Optional
|
|
9
|
+
|
|
10
|
+
from ..config import OTelConfig
|
|
11
|
+
from .base import BaseInstrumentor
|
|
12
|
+
|
|
13
|
+
logger = logging.getLogger(__name__)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class AnthropicInstrumentor(BaseInstrumentor):
|
|
17
|
+
"""Instrumentor for Anthropic Claude SDK"""
|
|
18
|
+
|
|
19
|
+
def __init__(self):
|
|
20
|
+
"""Initialize the instrumentor."""
|
|
21
|
+
super().__init__()
|
|
22
|
+
self._anthropic_available = False
|
|
23
|
+
self._check_availability()
|
|
24
|
+
|
|
25
|
+
def _check_availability(self):
|
|
26
|
+
"""Check if Anthropic library is available."""
|
|
27
|
+
try:
|
|
28
|
+
import anthropic
|
|
29
|
+
|
|
30
|
+
self._anthropic_available = True
|
|
31
|
+
logger.debug("Anthropic library detected and available for instrumentation")
|
|
32
|
+
except ImportError:
|
|
33
|
+
logger.debug("Anthropic library not installed, instrumentation will be skipped")
|
|
34
|
+
self._anthropic_available = False
|
|
35
|
+
|
|
36
|
+
def instrument(self, config: OTelConfig):
|
|
37
|
+
"""Instrument Anthropic SDK if available.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
config (OTelConfig): The OpenTelemetry configuration object.
|
|
41
|
+
"""
|
|
42
|
+
if not self._anthropic_available:
|
|
43
|
+
logger.debug("Skipping Anthropic instrumentation - library not available")
|
|
44
|
+
return
|
|
45
|
+
|
|
46
|
+
self.config = config
|
|
47
|
+
|
|
48
|
+
try:
|
|
49
|
+
import anthropic
|
|
50
|
+
import wrapt
|
|
51
|
+
|
|
52
|
+
if hasattr(anthropic, "Anthropic"):
|
|
53
|
+
original_init = anthropic.Anthropic.__init__
|
|
54
|
+
|
|
55
|
+
def wrapped_init(wrapped, instance, args, kwargs):
|
|
56
|
+
result = wrapped(*args, **kwargs)
|
|
57
|
+
self._instrument_client(instance)
|
|
58
|
+
return result
|
|
59
|
+
|
|
60
|
+
anthropic.Anthropic.__init__ = wrapt.FunctionWrapper(original_init, wrapped_init)
|
|
61
|
+
self._instrumented = True
|
|
62
|
+
logger.info("Anthropic instrumentation enabled")
|
|
63
|
+
|
|
64
|
+
except Exception as e:
|
|
65
|
+
logger.error("Failed to instrument Anthropic: %s", e, exc_info=True)
|
|
66
|
+
if config.fail_on_error:
|
|
67
|
+
raise
|
|
68
|
+
|
|
69
|
+
def _instrument_client(self, client):
|
|
70
|
+
"""Instrument Anthropic client methods.
|
|
71
|
+
|
|
72
|
+
Args:
|
|
73
|
+
client: The Anthropic client instance to instrument.
|
|
74
|
+
"""
|
|
75
|
+
if hasattr(client, "messages") and hasattr(client.messages, "create"):
|
|
76
|
+
original_create = client.messages.create
|
|
77
|
+
instrumented_create_method = self.create_span_wrapper(
|
|
78
|
+
span_name="anthropic.messages.create",
|
|
79
|
+
extract_attributes=self._extract_anthropic_attributes,
|
|
80
|
+
)(original_create)
|
|
81
|
+
client.messages.create = instrumented_create_method
|
|
82
|
+
|
|
83
|
+
def _extract_anthropic_attributes(
|
|
84
|
+
self, instance: Any, args: Any, kwargs: Any
|
|
85
|
+
) -> Dict[str, Any]:
|
|
86
|
+
"""Extract attributes from Anthropic API call.
|
|
87
|
+
|
|
88
|
+
Args:
|
|
89
|
+
instance: The client instance.
|
|
90
|
+
args: Positional arguments.
|
|
91
|
+
kwargs: Keyword arguments.
|
|
92
|
+
|
|
93
|
+
Returns:
|
|
94
|
+
Dict[str, Any]: Dictionary of attributes to set on the span.
|
|
95
|
+
"""
|
|
96
|
+
attrs = {}
|
|
97
|
+
model = kwargs.get("model", "unknown")
|
|
98
|
+
messages = kwargs.get("messages", [])
|
|
99
|
+
|
|
100
|
+
attrs["gen_ai.system"] = "anthropic"
|
|
101
|
+
attrs["gen_ai.request.model"] = model
|
|
102
|
+
attrs["gen_ai.request.message_count"] = len(messages)
|
|
103
|
+
return attrs
|
|
104
|
+
|
|
105
|
+
def _extract_usage(self, result) -> Optional[Dict[str, int]]:
|
|
106
|
+
"""Extract token usage from Anthropic response.
|
|
107
|
+
|
|
108
|
+
Args:
|
|
109
|
+
result: The API response object.
|
|
110
|
+
|
|
111
|
+
Returns:
|
|
112
|
+
Optional[Dict[str, int]]: Dictionary with token counts or None.
|
|
113
|
+
"""
|
|
114
|
+
if hasattr(result, "usage") and result.usage:
|
|
115
|
+
usage = result.usage
|
|
116
|
+
usage_dict = {
|
|
117
|
+
"prompt_tokens": getattr(usage, "input_tokens", 0),
|
|
118
|
+
"completion_tokens": getattr(usage, "output_tokens", 0),
|
|
119
|
+
"total_tokens": getattr(usage, "input_tokens", 0)
|
|
120
|
+
+ getattr(usage, "output_tokens", 0),
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
# Extract cache tokens for Anthropic models (Phase 3.2)
|
|
124
|
+
# cache_read_input_tokens: Tokens that were read from cache
|
|
125
|
+
# cache_creation_input_tokens: Tokens that were written to cache
|
|
126
|
+
if hasattr(usage, "cache_read_input_tokens"):
|
|
127
|
+
usage_dict["cache_read_input_tokens"] = getattr(usage, "cache_read_input_tokens", 0)
|
|
128
|
+
if hasattr(usage, "cache_creation_input_tokens"):
|
|
129
|
+
usage_dict["cache_creation_input_tokens"] = getattr(
|
|
130
|
+
usage, "cache_creation_input_tokens", 0
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
return usage_dict
|
|
134
|
+
return None
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
"""OpenTelemetry instrumentor for Anyscale Endpoints.
|
|
2
|
+
|
|
3
|
+
This instrumentor integrates with Anyscale Endpoints, which often leverage
|
|
4
|
+
OpenAI-compatible APIs. It ensures that calls made to Anyscale services are
|
|
5
|
+
properly traced and attributed within the OpenTelemetry ecosystem.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from typing import Dict, Optional
|
|
9
|
+
|
|
10
|
+
from ..config import OTelConfig
|
|
11
|
+
from .base import BaseInstrumentor
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class AnyscaleInstrumentor(BaseInstrumentor):
|
|
15
|
+
"""Instrumentor for Anyscale Endpoints"""
|
|
16
|
+
|
|
17
|
+
def instrument(self, config: OTelConfig):
|
|
18
|
+
self.config = config
|
|
19
|
+
try:
|
|
20
|
+
# Anyscale uses OpenAI SDK, already instrumented
|
|
21
|
+
pass
|
|
22
|
+
|
|
23
|
+
except ImportError:
|
|
24
|
+
pass
|
|
25
|
+
|
|
26
|
+
def _extract_usage(self, result) -> Optional[Dict[str, int]]:
|
|
27
|
+
return None
|
|
@@ -0,0 +1,394 @@
|
|
|
1
|
+
"""OpenTelemetry instrumentor for Microsoft AutoGen framework.
|
|
2
|
+
|
|
3
|
+
This instrumentor automatically traces multi-agent conversations, group chats,
|
|
4
|
+
and agent interactions using the Microsoft AutoGen framework.
|
|
5
|
+
|
|
6
|
+
Note: AutoGen is entering maintenance mode and merging with Semantic Kernel
|
|
7
|
+
into the Microsoft Agent Framework (public preview Oct 2025). This instrumentor
|
|
8
|
+
supports the current AutoGen release.
|
|
9
|
+
|
|
10
|
+
Requirements:
|
|
11
|
+
pip install pyautogen # or autogen (legacy package name)
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
import json
|
|
15
|
+
import logging
|
|
16
|
+
from typing import Any, Dict, Optional
|
|
17
|
+
|
|
18
|
+
from ..config import OTelConfig
|
|
19
|
+
from .base import BaseInstrumentor
|
|
20
|
+
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class AutoGenInstrumentor(BaseInstrumentor):
|
|
25
|
+
"""Instrumentor for Microsoft AutoGen framework"""
|
|
26
|
+
|
|
27
|
+
def __init__(self):
|
|
28
|
+
"""Initialize the instrumentor."""
|
|
29
|
+
super().__init__()
|
|
30
|
+
self._autogen_available = False
|
|
31
|
+
self._check_availability()
|
|
32
|
+
|
|
33
|
+
def _check_availability(self):
|
|
34
|
+
"""Check if AutoGen library is available."""
|
|
35
|
+
# Try pyautogen first (newer package name)
|
|
36
|
+
try:
|
|
37
|
+
import autogen
|
|
38
|
+
|
|
39
|
+
self._autogen_available = True
|
|
40
|
+
logger.debug("AutoGen library detected and available for instrumentation")
|
|
41
|
+
return
|
|
42
|
+
except ImportError:
|
|
43
|
+
pass
|
|
44
|
+
|
|
45
|
+
# Fall back to older package name
|
|
46
|
+
try:
|
|
47
|
+
import pyautogen
|
|
48
|
+
|
|
49
|
+
self._autogen_available = True
|
|
50
|
+
logger.debug("AutoGen library (pyautogen) detected and available for instrumentation")
|
|
51
|
+
return
|
|
52
|
+
except ImportError:
|
|
53
|
+
logger.debug("AutoGen library not installed, instrumentation will be skipped")
|
|
54
|
+
self._autogen_available = False
|
|
55
|
+
|
|
56
|
+
def instrument(self, config: OTelConfig):
|
|
57
|
+
"""Instrument AutoGen framework if available.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
config (OTelConfig): The OpenTelemetry configuration object.
|
|
61
|
+
"""
|
|
62
|
+
if not self._autogen_available:
|
|
63
|
+
logger.debug("Skipping AutoGen instrumentation - library not available")
|
|
64
|
+
return
|
|
65
|
+
|
|
66
|
+
self.config = config
|
|
67
|
+
|
|
68
|
+
try:
|
|
69
|
+
import wrapt
|
|
70
|
+
|
|
71
|
+
# Try both package names
|
|
72
|
+
try:
|
|
73
|
+
import autogen
|
|
74
|
+
except ImportError:
|
|
75
|
+
import pyautogen as autogen
|
|
76
|
+
|
|
77
|
+
# Instrument ConversableAgent.initiate_chat (main conversation method)
|
|
78
|
+
if hasattr(autogen, "ConversableAgent"):
|
|
79
|
+
if hasattr(autogen.ConversableAgent, "initiate_chat"):
|
|
80
|
+
original_initiate = autogen.ConversableAgent.initiate_chat
|
|
81
|
+
autogen.ConversableAgent.initiate_chat = wrapt.FunctionWrapper(
|
|
82
|
+
original_initiate, self._wrap_initiate_chat
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
# Instrument GroupChat.run (group chat orchestration)
|
|
86
|
+
if hasattr(autogen, "GroupChat"):
|
|
87
|
+
# GroupChat typically has select_speaker method for agent selection
|
|
88
|
+
if hasattr(autogen.GroupChat, "select_speaker"):
|
|
89
|
+
original_select = autogen.GroupChat.select_speaker
|
|
90
|
+
autogen.GroupChat.select_speaker = wrapt.FunctionWrapper(
|
|
91
|
+
original_select, self._wrap_select_speaker
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
# Instrument GroupChatManager if available
|
|
95
|
+
if hasattr(autogen, "GroupChatManager"):
|
|
96
|
+
if hasattr(autogen.GroupChatManager, "run"):
|
|
97
|
+
original_run = autogen.GroupChatManager.run
|
|
98
|
+
autogen.GroupChatManager.run = wrapt.FunctionWrapper(
|
|
99
|
+
original_run, self._wrap_group_chat_run
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
self._instrumented = True
|
|
103
|
+
logger.info("AutoGen instrumentation enabled")
|
|
104
|
+
|
|
105
|
+
except Exception as e:
|
|
106
|
+
logger.error("Failed to instrument AutoGen: %s", e, exc_info=True)
|
|
107
|
+
if config.fail_on_error:
|
|
108
|
+
raise
|
|
109
|
+
|
|
110
|
+
def _wrap_initiate_chat(self, wrapped, instance, args, kwargs):
|
|
111
|
+
"""Wrap ConversableAgent.initiate_chat method with span.
|
|
112
|
+
|
|
113
|
+
Args:
|
|
114
|
+
wrapped: The original method.
|
|
115
|
+
instance: The ConversableAgent instance.
|
|
116
|
+
args: Positional arguments.
|
|
117
|
+
kwargs: Keyword arguments.
|
|
118
|
+
"""
|
|
119
|
+
return self.create_span_wrapper(
|
|
120
|
+
span_name="autogen.initiate_chat",
|
|
121
|
+
extract_attributes=self._extract_chat_attributes,
|
|
122
|
+
)(wrapped)(instance, *args, **kwargs)
|
|
123
|
+
|
|
124
|
+
def _wrap_select_speaker(self, wrapped, instance, args, kwargs):
|
|
125
|
+
"""Wrap GroupChat.select_speaker method with span.
|
|
126
|
+
|
|
127
|
+
Args:
|
|
128
|
+
wrapped: The original method.
|
|
129
|
+
instance: The GroupChat instance.
|
|
130
|
+
args: Positional arguments.
|
|
131
|
+
kwargs: Keyword arguments.
|
|
132
|
+
"""
|
|
133
|
+
return self.create_span_wrapper(
|
|
134
|
+
span_name="autogen.group_chat.select_speaker",
|
|
135
|
+
extract_attributes=self._extract_group_chat_attributes,
|
|
136
|
+
)(wrapped)(instance, *args, **kwargs)
|
|
137
|
+
|
|
138
|
+
def _wrap_group_chat_run(self, wrapped, instance, args, kwargs):
|
|
139
|
+
"""Wrap GroupChatManager.run method with span.
|
|
140
|
+
|
|
141
|
+
Args:
|
|
142
|
+
wrapped: The original method.
|
|
143
|
+
instance: The GroupChatManager instance.
|
|
144
|
+
args: Positional arguments.
|
|
145
|
+
kwargs: Keyword arguments.
|
|
146
|
+
"""
|
|
147
|
+
return self.create_span_wrapper(
|
|
148
|
+
span_name="autogen.group_chat.run",
|
|
149
|
+
extract_attributes=self._extract_group_chat_manager_attributes,
|
|
150
|
+
)(wrapped)(instance, *args, **kwargs)
|
|
151
|
+
|
|
152
|
+
def _extract_chat_attributes(self, instance: Any, args: Any, kwargs: Any) -> Dict[str, Any]:
|
|
153
|
+
"""Extract attributes from ConversableAgent.initiate_chat call.
|
|
154
|
+
|
|
155
|
+
Args:
|
|
156
|
+
instance: The ConversableAgent instance (sender).
|
|
157
|
+
args: Positional arguments (recipient, message, etc.).
|
|
158
|
+
kwargs: Keyword arguments.
|
|
159
|
+
|
|
160
|
+
Returns:
|
|
161
|
+
Dict[str, Any]: Dictionary of attributes to set on the span.
|
|
162
|
+
"""
|
|
163
|
+
attrs = {}
|
|
164
|
+
|
|
165
|
+
# Core attributes
|
|
166
|
+
attrs["gen_ai.system"] = "autogen"
|
|
167
|
+
attrs["gen_ai.operation.name"] = "conversation.initiate"
|
|
168
|
+
|
|
169
|
+
# Extract sender agent name
|
|
170
|
+
if hasattr(instance, "name"):
|
|
171
|
+
attrs["autogen.agent.name"] = instance.name
|
|
172
|
+
attrs["autogen.conversation.sender"] = instance.name
|
|
173
|
+
|
|
174
|
+
# Extract sender agent type
|
|
175
|
+
agent_type = type(instance).__name__
|
|
176
|
+
attrs["autogen.agent.type"] = agent_type
|
|
177
|
+
|
|
178
|
+
# Extract recipient agent (first positional argument)
|
|
179
|
+
recipient = None
|
|
180
|
+
if len(args) > 0:
|
|
181
|
+
recipient = args[0]
|
|
182
|
+
else:
|
|
183
|
+
recipient = kwargs.get("recipient")
|
|
184
|
+
|
|
185
|
+
if recipient:
|
|
186
|
+
if hasattr(recipient, "name"):
|
|
187
|
+
attrs["autogen.conversation.recipient"] = recipient.name
|
|
188
|
+
|
|
189
|
+
recipient_type = type(recipient).__name__
|
|
190
|
+
attrs["autogen.recipient.type"] = recipient_type
|
|
191
|
+
|
|
192
|
+
# Extract message (second positional argument or kwarg)
|
|
193
|
+
message = None
|
|
194
|
+
if len(args) > 1:
|
|
195
|
+
message = args[1]
|
|
196
|
+
else:
|
|
197
|
+
message = kwargs.get("message")
|
|
198
|
+
|
|
199
|
+
if message:
|
|
200
|
+
# Message can be string or dict
|
|
201
|
+
if isinstance(message, str):
|
|
202
|
+
attrs["autogen.message"] = message[:500] # Truncate
|
|
203
|
+
attrs["autogen.message.type"] = "string"
|
|
204
|
+
elif isinstance(message, dict):
|
|
205
|
+
if "content" in message:
|
|
206
|
+
attrs["autogen.message"] = str(message["content"])[:500]
|
|
207
|
+
attrs["autogen.message.type"] = "dict"
|
|
208
|
+
else:
|
|
209
|
+
attrs["autogen.message.type"] = str(type(message).__name__)
|
|
210
|
+
|
|
211
|
+
# Extract max_turns if specified
|
|
212
|
+
max_turns = kwargs.get("max_turns")
|
|
213
|
+
if max_turns is not None:
|
|
214
|
+
attrs["autogen.conversation.max_turns"] = max_turns
|
|
215
|
+
|
|
216
|
+
# Extract silent mode
|
|
217
|
+
if "silent" in kwargs:
|
|
218
|
+
attrs["autogen.conversation.silent"] = kwargs["silent"]
|
|
219
|
+
|
|
220
|
+
# Extract clear_history flag
|
|
221
|
+
if "clear_history" in kwargs:
|
|
222
|
+
attrs["autogen.conversation.clear_history"] = kwargs["clear_history"]
|
|
223
|
+
|
|
224
|
+
return attrs
|
|
225
|
+
|
|
226
|
+
def _extract_group_chat_attributes(
|
|
227
|
+
self, instance: Any, args: Any, kwargs: Any
|
|
228
|
+
) -> Dict[str, Any]:
|
|
229
|
+
"""Extract attributes from GroupChat.select_speaker call.
|
|
230
|
+
|
|
231
|
+
Args:
|
|
232
|
+
instance: The GroupChat instance.
|
|
233
|
+
args: Positional arguments.
|
|
234
|
+
kwargs: Keyword arguments.
|
|
235
|
+
|
|
236
|
+
Returns:
|
|
237
|
+
Dict[str, Any]: Dictionary of attributes to set on the span.
|
|
238
|
+
"""
|
|
239
|
+
attrs = {}
|
|
240
|
+
|
|
241
|
+
# Core attributes
|
|
242
|
+
attrs["gen_ai.system"] = "autogen"
|
|
243
|
+
attrs["gen_ai.operation.name"] = "group_chat.select_speaker"
|
|
244
|
+
|
|
245
|
+
# Extract agent count
|
|
246
|
+
if hasattr(instance, "agents") and instance.agents:
|
|
247
|
+
attrs["autogen.group_chat.agent_count"] = len(instance.agents)
|
|
248
|
+
|
|
249
|
+
# Extract agent names
|
|
250
|
+
agent_names = [getattr(agent, "name", "unknown") for agent in instance.agents]
|
|
251
|
+
attrs["autogen.group_chat.agents"] = agent_names[:10] # Limit to 10
|
|
252
|
+
|
|
253
|
+
# Extract selection mode/method
|
|
254
|
+
if hasattr(instance, "speaker_selection_method"):
|
|
255
|
+
attrs["autogen.group_chat.selection_mode"] = instance.speaker_selection_method
|
|
256
|
+
|
|
257
|
+
# Extract max_round
|
|
258
|
+
if hasattr(instance, "max_round"):
|
|
259
|
+
attrs["autogen.group_chat.max_round"] = instance.max_round
|
|
260
|
+
|
|
261
|
+
return attrs
|
|
262
|
+
|
|
263
|
+
def _extract_group_chat_manager_attributes(
|
|
264
|
+
self, instance: Any, args: Any, kwargs: Any
|
|
265
|
+
) -> Dict[str, Any]:
|
|
266
|
+
"""Extract attributes from GroupChatManager.run call.
|
|
267
|
+
|
|
268
|
+
Args:
|
|
269
|
+
instance: The GroupChatManager instance.
|
|
270
|
+
args: Positional arguments.
|
|
271
|
+
kwargs: Keyword arguments.
|
|
272
|
+
|
|
273
|
+
Returns:
|
|
274
|
+
Dict[str, Any]: Dictionary of attributes to set on the span.
|
|
275
|
+
"""
|
|
276
|
+
attrs = {}
|
|
277
|
+
|
|
278
|
+
# Core attributes
|
|
279
|
+
attrs["gen_ai.system"] = "autogen"
|
|
280
|
+
attrs["gen_ai.operation.name"] = "group_chat.run"
|
|
281
|
+
|
|
282
|
+
# Extract manager name
|
|
283
|
+
if hasattr(instance, "name"):
|
|
284
|
+
attrs["autogen.manager.name"] = instance.name
|
|
285
|
+
|
|
286
|
+
# Extract groupchat info
|
|
287
|
+
if hasattr(instance, "groupchat"):
|
|
288
|
+
groupchat = instance.groupchat
|
|
289
|
+
|
|
290
|
+
if hasattr(groupchat, "agents") and groupchat.agents:
|
|
291
|
+
attrs["autogen.group_chat.agent_count"] = len(groupchat.agents)
|
|
292
|
+
|
|
293
|
+
agent_names = [getattr(agent, "name", "unknown") for agent in groupchat.agents]
|
|
294
|
+
attrs["autogen.group_chat.agents"] = agent_names[:10]
|
|
295
|
+
|
|
296
|
+
if hasattr(groupchat, "speaker_selection_method"):
|
|
297
|
+
attrs["autogen.group_chat.selection_mode"] = groupchat.speaker_selection_method
|
|
298
|
+
|
|
299
|
+
return attrs
|
|
300
|
+
|
|
301
|
+
def _extract_usage(self, result) -> Optional[Dict[str, int]]:
|
|
302
|
+
"""Extract token usage from conversation result.
|
|
303
|
+
|
|
304
|
+
Note: AutoGen doesn't directly expose token usage in results.
|
|
305
|
+
Token usage is captured by underlying LLM provider instrumentors.
|
|
306
|
+
|
|
307
|
+
Args:
|
|
308
|
+
result: The conversation result.
|
|
309
|
+
|
|
310
|
+
Returns:
|
|
311
|
+
Optional[Dict[str, int]]: Dictionary with token counts or None.
|
|
312
|
+
"""
|
|
313
|
+
# AutoGen doesn't expose usage directly
|
|
314
|
+
# Token usage is captured by LLM provider instrumentors (OpenAI, etc.)
|
|
315
|
+
# We could try to aggregate if AutoGen provides usage info in the future
|
|
316
|
+
if hasattr(result, "usage"):
|
|
317
|
+
try:
|
|
318
|
+
usage = result.usage
|
|
319
|
+
return {
|
|
320
|
+
"prompt_tokens": getattr(usage, "prompt_tokens", 0),
|
|
321
|
+
"completion_tokens": getattr(usage, "completion_tokens", 0),
|
|
322
|
+
"total_tokens": getattr(usage, "total_tokens", 0),
|
|
323
|
+
}
|
|
324
|
+
except Exception as e:
|
|
325
|
+
logger.debug("Failed to extract token usage: %s", e)
|
|
326
|
+
|
|
327
|
+
return None
|
|
328
|
+
|
|
329
|
+
def _extract_response_attributes(self, result) -> Dict[str, Any]:
|
|
330
|
+
"""Extract response attributes from conversation result.
|
|
331
|
+
|
|
332
|
+
Args:
|
|
333
|
+
result: The conversation result.
|
|
334
|
+
|
|
335
|
+
Returns:
|
|
336
|
+
Dict[str, Any]: Dictionary of response attributes.
|
|
337
|
+
"""
|
|
338
|
+
attrs = {}
|
|
339
|
+
|
|
340
|
+
try:
|
|
341
|
+
# AutoGen results are typically ChatResult objects
|
|
342
|
+
if hasattr(result, "chat_history"):
|
|
343
|
+
# Extract chat history length
|
|
344
|
+
attrs["autogen.conversation.messages"] = len(result.chat_history)
|
|
345
|
+
|
|
346
|
+
# Extract last message content (truncated)
|
|
347
|
+
if result.chat_history:
|
|
348
|
+
last_message = result.chat_history[-1]
|
|
349
|
+
if isinstance(last_message, dict):
|
|
350
|
+
if "content" in last_message:
|
|
351
|
+
attrs["autogen.conversation.last_message"] = str(
|
|
352
|
+
last_message["content"]
|
|
353
|
+
)[:500]
|
|
354
|
+
if "role" in last_message:
|
|
355
|
+
attrs["autogen.conversation.last_role"] = last_message["role"]
|
|
356
|
+
if "name" in last_message:
|
|
357
|
+
attrs["autogen.conversation.last_speaker"] = last_message["name"]
|
|
358
|
+
|
|
359
|
+
# Extract cost if available
|
|
360
|
+
if hasattr(result, "cost"):
|
|
361
|
+
try:
|
|
362
|
+
cost = result.cost
|
|
363
|
+
if isinstance(cost, dict):
|
|
364
|
+
for key, value in cost.items():
|
|
365
|
+
attrs[f"autogen.cost.{key}"] = value
|
|
366
|
+
else:
|
|
367
|
+
attrs["autogen.cost"] = cost
|
|
368
|
+
except Exception as e:
|
|
369
|
+
logger.debug("Failed to extract cost: %s", e)
|
|
370
|
+
|
|
371
|
+
# Extract summary if available
|
|
372
|
+
if hasattr(result, "summary"):
|
|
373
|
+
attrs["autogen.conversation.summary"] = str(result.summary)[:500]
|
|
374
|
+
|
|
375
|
+
except Exception as e:
|
|
376
|
+
logger.debug("Failed to extract response attributes: %s", e)
|
|
377
|
+
|
|
378
|
+
return attrs
|
|
379
|
+
|
|
380
|
+
def _extract_finish_reason(self, result) -> Optional[str]:
|
|
381
|
+
"""Extract finish reason from conversation result.
|
|
382
|
+
|
|
383
|
+
Args:
|
|
384
|
+
result: The conversation result.
|
|
385
|
+
|
|
386
|
+
Returns:
|
|
387
|
+
Optional[str]: The finish reason string or None if not available.
|
|
388
|
+
"""
|
|
389
|
+
# Try to determine if conversation completed successfully
|
|
390
|
+
if hasattr(result, "chat_history"):
|
|
391
|
+
# If we have chat history, conversation completed
|
|
392
|
+
return "completed"
|
|
393
|
+
|
|
394
|
+
return None
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import logging
|
|
3
|
+
from typing import Any, Dict, Optional
|
|
4
|
+
|
|
5
|
+
from ..config import OTelConfig
|
|
6
|
+
from .base import BaseInstrumentor
|
|
7
|
+
|
|
8
|
+
logger = logging.getLogger(__name__)
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class AWSBedrockInstrumentor(BaseInstrumentor):
|
|
12
|
+
"""Instrumentor for AWS Bedrock"""
|
|
13
|
+
|
|
14
|
+
def __init__(self):
|
|
15
|
+
"""Initialize the instrumentor."""
|
|
16
|
+
super().__init__()
|
|
17
|
+
self._boto3_available = False
|
|
18
|
+
self._check_availability()
|
|
19
|
+
|
|
20
|
+
def _check_availability(self):
|
|
21
|
+
"""Check if boto3 library is available."""
|
|
22
|
+
try:
|
|
23
|
+
import boto3 # Moved to top
|
|
24
|
+
|
|
25
|
+
self._boto3_available = True
|
|
26
|
+
logger.debug("boto3 library detected and available for instrumentation")
|
|
27
|
+
except ImportError:
|
|
28
|
+
logger.debug("boto3 library not installed, instrumentation will be skipped")
|
|
29
|
+
self._boto3_available = False
|
|
30
|
+
|
|
31
|
+
def instrument(self, config: OTelConfig):
|
|
32
|
+
self.config = config
|
|
33
|
+
try:
|
|
34
|
+
import boto3 # Moved to top
|
|
35
|
+
|
|
36
|
+
original_client = boto3.client
|
|
37
|
+
|
|
38
|
+
def wrapped_client(*args, **kwargs):
|
|
39
|
+
client = original_client(*args, **kwargs)
|
|
40
|
+
if args and args[0] == "bedrock-runtime":
|
|
41
|
+
self._instrument_bedrock_client(client)
|
|
42
|
+
return client
|
|
43
|
+
|
|
44
|
+
boto3.client = wrapped_client
|
|
45
|
+
|
|
46
|
+
except ImportError:
|
|
47
|
+
pass
|
|
48
|
+
|
|
49
|
+
def _instrument_bedrock_client(self, client):
|
|
50
|
+
if hasattr(client, "invoke_model"):
|
|
51
|
+
instrumented_invoke_method = self.create_span_wrapper(
|
|
52
|
+
span_name="aws.bedrock.invoke_model",
|
|
53
|
+
extract_attributes=self._extract_aws_bedrock_attributes,
|
|
54
|
+
)
|
|
55
|
+
client.invoke_model = instrumented_invoke_method
|
|
56
|
+
|
|
57
|
+
def _extract_aws_bedrock_attributes(
|
|
58
|
+
self, instance: Any, args: Any, kwargs: Any
|
|
59
|
+
) -> Dict[str, Any]: # pylint: disable=W0613
|
|
60
|
+
attrs = {}
|
|
61
|
+
model_id = kwargs.get("modelId", "unknown")
|
|
62
|
+
|
|
63
|
+
attrs["gen_ai.system"] = "aws_bedrock"
|
|
64
|
+
attrs["gen_ai.request.model"] = model_id
|
|
65
|
+
return attrs
|
|
66
|
+
|
|
67
|
+
def _extract_usage(self, result) -> Optional[Dict[str, int]]: # pylint: disable=R1705
|
|
68
|
+
if hasattr(result, "get"):
|
|
69
|
+
content_type = result.get("contentType", "").lower()
|
|
70
|
+
body_str = result.get("body", "")
|
|
71
|
+
|
|
72
|
+
if "application/json" in content_type and body_str:
|
|
73
|
+
try:
|
|
74
|
+
body = json.loads(body_str)
|
|
75
|
+
if "usage" in body and isinstance(body["usage"], dict):
|
|
76
|
+
usage = body["usage"]
|
|
77
|
+
return {
|
|
78
|
+
"prompt_tokens": getattr(usage, "inputTokens", 0),
|
|
79
|
+
"completion_tokens": getattr(usage, "outputTokens", 0),
|
|
80
|
+
"total_tokens": getattr(usage, "inputTokens", 0)
|
|
81
|
+
+ getattr(usage, "outputTokens", 0),
|
|
82
|
+
}
|
|
83
|
+
elif "usageMetadata" in body and isinstance(body["usageMetadata"], dict):
|
|
84
|
+
usage = body["usageMetadata"]
|
|
85
|
+
return {
|
|
86
|
+
"prompt_tokens": getattr(usage, "promptTokenCount", 0),
|
|
87
|
+
"completion_tokens": getattr(usage, "candidatesTokenCount", 0),
|
|
88
|
+
"total_tokens": getattr(usage, "totalTokenCount", 0),
|
|
89
|
+
}
|
|
90
|
+
except json.JSONDecodeError:
|
|
91
|
+
logger.debug("Failed to parse Bedrock response body as JSON.")
|
|
92
|
+
except Exception as e:
|
|
93
|
+
logger.debug("Error extracting usage from Bedrock response: %s", e)
|
|
94
|
+
return None
|