mseep-agentops 0.4.18__py3-none-any.whl → 0.4.23__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentops/__init__.py +0 -0
- agentops/client/api/base.py +28 -30
- agentops/client/api/versions/v3.py +29 -25
- agentops/client/api/versions/v4.py +87 -46
- agentops/client/client.py +98 -29
- agentops/client/http/README.md +87 -0
- agentops/client/http/http_client.py +126 -172
- agentops/config.py +8 -2
- agentops/instrumentation/OpenTelemetry.md +133 -0
- agentops/instrumentation/README.md +167 -0
- agentops/instrumentation/__init__.py +13 -1
- agentops/instrumentation/agentic/ag2/__init__.py +18 -0
- agentops/instrumentation/agentic/ag2/instrumentor.py +922 -0
- agentops/instrumentation/agentic/agno/__init__.py +19 -0
- agentops/instrumentation/agentic/agno/attributes/__init__.py +20 -0
- agentops/instrumentation/agentic/agno/attributes/agent.py +250 -0
- agentops/instrumentation/agentic/agno/attributes/metrics.py +214 -0
- agentops/instrumentation/agentic/agno/attributes/storage.py +158 -0
- agentops/instrumentation/agentic/agno/attributes/team.py +195 -0
- agentops/instrumentation/agentic/agno/attributes/tool.py +210 -0
- agentops/instrumentation/agentic/agno/attributes/workflow.py +254 -0
- agentops/instrumentation/agentic/agno/instrumentor.py +1313 -0
- agentops/instrumentation/agentic/crewai/LICENSE +201 -0
- agentops/instrumentation/agentic/crewai/NOTICE.md +10 -0
- agentops/instrumentation/agentic/crewai/__init__.py +6 -0
- agentops/instrumentation/agentic/crewai/crewai_span_attributes.py +335 -0
- agentops/instrumentation/agentic/crewai/instrumentation.py +535 -0
- agentops/instrumentation/agentic/crewai/version.py +1 -0
- agentops/instrumentation/agentic/google_adk/__init__.py +19 -0
- agentops/instrumentation/agentic/google_adk/instrumentor.py +68 -0
- agentops/instrumentation/agentic/google_adk/patch.py +767 -0
- agentops/instrumentation/agentic/haystack/__init__.py +1 -0
- agentops/instrumentation/agentic/haystack/instrumentor.py +186 -0
- agentops/instrumentation/agentic/langgraph/__init__.py +3 -0
- agentops/instrumentation/agentic/langgraph/attributes.py +54 -0
- agentops/instrumentation/agentic/langgraph/instrumentation.py +598 -0
- agentops/instrumentation/agentic/langgraph/version.py +1 -0
- agentops/instrumentation/agentic/openai_agents/README.md +156 -0
- agentops/instrumentation/agentic/openai_agents/SPANS.md +145 -0
- agentops/instrumentation/agentic/openai_agents/TRACING_API.md +144 -0
- agentops/instrumentation/agentic/openai_agents/__init__.py +30 -0
- agentops/instrumentation/agentic/openai_agents/attributes/common.py +549 -0
- agentops/instrumentation/agentic/openai_agents/attributes/completion.py +172 -0
- agentops/instrumentation/agentic/openai_agents/attributes/model.py +58 -0
- agentops/instrumentation/agentic/openai_agents/attributes/tokens.py +275 -0
- agentops/instrumentation/agentic/openai_agents/exporter.py +469 -0
- agentops/instrumentation/agentic/openai_agents/instrumentor.py +107 -0
- agentops/instrumentation/agentic/openai_agents/processor.py +58 -0
- agentops/instrumentation/agentic/smolagents/README.md +88 -0
- agentops/instrumentation/agentic/smolagents/__init__.py +12 -0
- agentops/instrumentation/agentic/smolagents/attributes/agent.py +354 -0
- agentops/instrumentation/agentic/smolagents/attributes/model.py +205 -0
- agentops/instrumentation/agentic/smolagents/instrumentor.py +286 -0
- agentops/instrumentation/agentic/smolagents/stream_wrapper.py +258 -0
- agentops/instrumentation/agentic/xpander/__init__.py +15 -0
- agentops/instrumentation/agentic/xpander/context.py +112 -0
- agentops/instrumentation/agentic/xpander/instrumentor.py +877 -0
- agentops/instrumentation/agentic/xpander/trace_probe.py +86 -0
- agentops/instrumentation/agentic/xpander/version.py +3 -0
- agentops/instrumentation/common/README.md +65 -0
- agentops/instrumentation/common/attributes.py +1 -2
- agentops/instrumentation/providers/anthropic/__init__.py +24 -0
- agentops/instrumentation/providers/anthropic/attributes/__init__.py +23 -0
- agentops/instrumentation/providers/anthropic/attributes/common.py +64 -0
- agentops/instrumentation/providers/anthropic/attributes/message.py +541 -0
- agentops/instrumentation/providers/anthropic/attributes/tools.py +231 -0
- agentops/instrumentation/providers/anthropic/event_handler_wrapper.py +90 -0
- agentops/instrumentation/providers/anthropic/instrumentor.py +146 -0
- agentops/instrumentation/providers/anthropic/stream_wrapper.py +436 -0
- agentops/instrumentation/providers/google_genai/README.md +33 -0
- agentops/instrumentation/providers/google_genai/__init__.py +24 -0
- agentops/instrumentation/providers/google_genai/attributes/__init__.py +25 -0
- agentops/instrumentation/providers/google_genai/attributes/chat.py +125 -0
- agentops/instrumentation/providers/google_genai/attributes/common.py +88 -0
- agentops/instrumentation/providers/google_genai/attributes/model.py +284 -0
- agentops/instrumentation/providers/google_genai/instrumentor.py +170 -0
- agentops/instrumentation/providers/google_genai/stream_wrapper.py +238 -0
- agentops/instrumentation/providers/ibm_watsonx_ai/__init__.py +28 -0
- agentops/instrumentation/providers/ibm_watsonx_ai/attributes/__init__.py +27 -0
- agentops/instrumentation/providers/ibm_watsonx_ai/attributes/attributes.py +277 -0
- agentops/instrumentation/providers/ibm_watsonx_ai/attributes/common.py +104 -0
- agentops/instrumentation/providers/ibm_watsonx_ai/instrumentor.py +162 -0
- agentops/instrumentation/providers/ibm_watsonx_ai/stream_wrapper.py +302 -0
- agentops/instrumentation/providers/mem0/__init__.py +45 -0
- agentops/instrumentation/providers/mem0/common.py +377 -0
- agentops/instrumentation/providers/mem0/instrumentor.py +270 -0
- agentops/instrumentation/providers/mem0/memory.py +430 -0
- agentops/instrumentation/providers/openai/__init__.py +21 -0
- agentops/instrumentation/providers/openai/attributes/__init__.py +7 -0
- agentops/instrumentation/providers/openai/attributes/common.py +55 -0
- agentops/instrumentation/providers/openai/attributes/response.py +607 -0
- agentops/instrumentation/providers/openai/config.py +36 -0
- agentops/instrumentation/providers/openai/instrumentor.py +312 -0
- agentops/instrumentation/providers/openai/stream_wrapper.py +941 -0
- agentops/instrumentation/providers/openai/utils.py +44 -0
- agentops/instrumentation/providers/openai/v0.py +176 -0
- agentops/instrumentation/providers/openai/v0_wrappers.py +483 -0
- agentops/instrumentation/providers/openai/wrappers/__init__.py +30 -0
- agentops/instrumentation/providers/openai/wrappers/assistant.py +277 -0
- agentops/instrumentation/providers/openai/wrappers/chat.py +259 -0
- agentops/instrumentation/providers/openai/wrappers/completion.py +109 -0
- agentops/instrumentation/providers/openai/wrappers/embeddings.py +94 -0
- agentops/instrumentation/providers/openai/wrappers/image_gen.py +75 -0
- agentops/instrumentation/providers/openai/wrappers/responses.py +191 -0
- agentops/instrumentation/providers/openai/wrappers/shared.py +81 -0
- agentops/instrumentation/utilities/concurrent_futures/__init__.py +10 -0
- agentops/instrumentation/utilities/concurrent_futures/instrumentation.py +206 -0
- agentops/integration/callbacks/dspy/__init__.py +11 -0
- agentops/integration/callbacks/dspy/callback.py +471 -0
- agentops/integration/callbacks/langchain/README.md +59 -0
- agentops/integration/callbacks/langchain/__init__.py +15 -0
- agentops/integration/callbacks/langchain/callback.py +791 -0
- agentops/integration/callbacks/langchain/utils.py +54 -0
- agentops/legacy/crewai.md +121 -0
- agentops/logging/instrument_logging.py +4 -0
- agentops/sdk/README.md +220 -0
- agentops/sdk/core.py +75 -32
- agentops/sdk/descriptors/classproperty.py +28 -0
- agentops/sdk/exporters.py +152 -33
- agentops/semconv/README.md +125 -0
- agentops/semconv/span_kinds.py +0 -2
- agentops/validation.py +102 -63
- {mseep_agentops-0.4.18.dist-info → mseep_agentops-0.4.23.dist-info}/METADATA +30 -40
- mseep_agentops-0.4.23.dist-info/RECORD +178 -0
- {mseep_agentops-0.4.18.dist-info → mseep_agentops-0.4.23.dist-info}/WHEEL +1 -2
- mseep_agentops-0.4.18.dist-info/RECORD +0 -94
- mseep_agentops-0.4.18.dist-info/top_level.txt +0 -2
- tests/conftest.py +0 -10
- tests/unit/client/__init__.py +0 -1
- tests/unit/client/test_http_adapter.py +0 -221
- tests/unit/client/test_http_client.py +0 -206
- tests/unit/conftest.py +0 -54
- tests/unit/sdk/__init__.py +0 -1
- tests/unit/sdk/instrumentation_tester.py +0 -207
- tests/unit/sdk/test_attributes.py +0 -392
- tests/unit/sdk/test_concurrent_instrumentation.py +0 -468
- tests/unit/sdk/test_decorators.py +0 -763
- tests/unit/sdk/test_exporters.py +0 -241
- tests/unit/sdk/test_factory.py +0 -1188
- tests/unit/sdk/test_internal_span_processor.py +0 -397
- tests/unit/sdk/test_resource_attributes.py +0 -35
- tests/unit/test_config.py +0 -82
- tests/unit/test_context_manager.py +0 -777
- tests/unit/test_events.py +0 -27
- tests/unit/test_host_env.py +0 -54
- tests/unit/test_init_py.py +0 -501
- tests/unit/test_serialization.py +0 -433
- tests/unit/test_session.py +0 -676
- tests/unit/test_user_agent.py +0 -34
- tests/unit/test_validation.py +0 -405
- {tests → agentops/instrumentation/agentic/openai_agents/attributes}/__init__.py +0 -0
- /tests/unit/__init__.py → /agentops/instrumentation/providers/openai/attributes/tools.py +0 -0
- {mseep_agentops-0.4.18.dist-info → mseep_agentops-0.4.23.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,541 @@
|
|
1
|
+
"""Attribute extraction for Anthropic Message responses."""
|
2
|
+
|
3
|
+
import json
|
4
|
+
from typing import Dict, Any, Optional, Tuple
|
5
|
+
|
6
|
+
try:
|
7
|
+
from anthropic.types import Message, Completion
|
8
|
+
except ImportError:
|
9
|
+
Message = Any # type: ignore
|
10
|
+
Completion = Any # type: ignore
|
11
|
+
|
12
|
+
from agentops.logging import logger
|
13
|
+
from agentops.semconv import (
|
14
|
+
SpanAttributes,
|
15
|
+
LLMRequestTypeValues,
|
16
|
+
MessageAttributes,
|
17
|
+
)
|
18
|
+
from agentops.instrumentation.common.attributes import AttributeMap
|
19
|
+
from agentops.instrumentation.providers.anthropic.attributes.common import (
|
20
|
+
get_common_instrumentation_attributes,
|
21
|
+
extract_request_attributes,
|
22
|
+
)
|
23
|
+
from agentops.instrumentation.providers.anthropic.attributes.tools import (
|
24
|
+
extract_tool_definitions,
|
25
|
+
get_tool_attributes,
|
26
|
+
)
|
27
|
+
|
28
|
+
|
29
|
+
def get_message_attributes(
|
30
|
+
args: Optional[Tuple] = None, kwargs: Optional[Dict] = None, return_value: Any = None
|
31
|
+
) -> AttributeMap:
|
32
|
+
"""Extract attributes from Anthropic message API call.
|
33
|
+
|
34
|
+
This handles both the request parameters (in kwargs) and the response value
|
35
|
+
(in return_value) for comprehensive instrumentation. It serves as the main
|
36
|
+
attribute extraction function for the modern Messages API, handling both
|
37
|
+
synchronous and asynchronous calls in a consistent manner.
|
38
|
+
|
39
|
+
Args:
|
40
|
+
args: Positional arguments (not used in this handler)
|
41
|
+
kwargs: Keyword arguments from the API call
|
42
|
+
return_value: Response object from the API call
|
43
|
+
|
44
|
+
Returns:
|
45
|
+
Dictionary of attributes extracted from the request/response
|
46
|
+
"""
|
47
|
+
attributes = get_common_instrumentation_attributes()
|
48
|
+
attributes[SpanAttributes.LLM_REQUEST_TYPE] = LLMRequestTypeValues.CHAT.value
|
49
|
+
|
50
|
+
if kwargs:
|
51
|
+
attributes.update(get_message_request_attributes(kwargs))
|
52
|
+
|
53
|
+
if return_value:
|
54
|
+
try:
|
55
|
+
from anthropic.types import (
|
56
|
+
Message,
|
57
|
+
MessageStartEvent,
|
58
|
+
ContentBlockStartEvent,
|
59
|
+
ContentBlockDeltaEvent,
|
60
|
+
MessageStopEvent,
|
61
|
+
MessageStreamEvent,
|
62
|
+
)
|
63
|
+
from anthropic import Stream
|
64
|
+
|
65
|
+
if isinstance(return_value, Message):
|
66
|
+
attributes.update(get_message_response_attributes(return_value))
|
67
|
+
|
68
|
+
if hasattr(return_value, "content"):
|
69
|
+
attributes.update(get_tool_attributes(return_value.content))
|
70
|
+
elif isinstance(return_value, Stream):
|
71
|
+
for event in return_value:
|
72
|
+
attributes.update(get_stream_event_attributes(event))
|
73
|
+
elif isinstance(return_value, MessageStreamEvent):
|
74
|
+
attributes.update(get_stream_attributes(return_value))
|
75
|
+
elif isinstance(
|
76
|
+
return_value, (MessageStartEvent, ContentBlockStartEvent, ContentBlockDeltaEvent, MessageStopEvent)
|
77
|
+
):
|
78
|
+
attributes.update(get_stream_event_attributes(return_value))
|
79
|
+
else:
|
80
|
+
logger.debug(f"[agentops.instrumentation.anthropic] Unrecognized return type: {type(return_value)}")
|
81
|
+
except Exception as e:
|
82
|
+
logger.debug(f"[agentops.instrumentation.anthropic] Error extracting response attributes: {e}")
|
83
|
+
|
84
|
+
return attributes
|
85
|
+
|
86
|
+
|
87
|
+
def get_completion_attributes(
|
88
|
+
args: Optional[Tuple] = None, kwargs: Optional[Dict] = None, return_value: Any = None
|
89
|
+
) -> AttributeMap:
|
90
|
+
"""Extract attributes from Anthropic completion API call (legacy API).
|
91
|
+
|
92
|
+
This handles both the request parameters (in kwargs) and the response value
|
93
|
+
(in return_value) for comprehensive instrumentation of the legacy Completions API.
|
94
|
+
While similar to get_message_attributes, it accounts for the differences in the
|
95
|
+
request and response formats between the modern and legacy APIs.
|
96
|
+
|
97
|
+
Args:
|
98
|
+
args: Positional arguments (not used in this handler)
|
99
|
+
kwargs: Keyword arguments from the API call
|
100
|
+
return_value: Response object from the API call
|
101
|
+
|
102
|
+
Returns:
|
103
|
+
Dictionary of attributes extracted from the request/response
|
104
|
+
"""
|
105
|
+
attributes = get_common_instrumentation_attributes()
|
106
|
+
attributes[SpanAttributes.LLM_REQUEST_TYPE] = LLMRequestTypeValues.COMPLETION.value
|
107
|
+
|
108
|
+
if kwargs:
|
109
|
+
attributes.update(get_completion_request_attributes(kwargs))
|
110
|
+
|
111
|
+
if return_value:
|
112
|
+
try:
|
113
|
+
if hasattr(return_value, "__class__") and return_value.__class__.__name__ == "Completion":
|
114
|
+
attributes.update(get_completion_response_attributes(return_value))
|
115
|
+
elif hasattr(return_value, "__class__") and return_value.__class__.__name__ == "Stream":
|
116
|
+
attributes.update(get_stream_attributes(return_value))
|
117
|
+
else:
|
118
|
+
logger.debug(
|
119
|
+
f"[agentops.instrumentation.anthropic] Unrecognized completion return type: {type(return_value)}"
|
120
|
+
)
|
121
|
+
except Exception as e:
|
122
|
+
logger.debug(f"[agentops.instrumentation.anthropic] Error extracting completion response attributes: {e}")
|
123
|
+
|
124
|
+
return attributes
|
125
|
+
|
126
|
+
|
127
|
+
def _process_content(content, role, index):
|
128
|
+
"""Helper function to process content and extract attributes.
|
129
|
+
|
130
|
+
Args:
|
131
|
+
content: The content to process
|
132
|
+
role: The role of the message
|
133
|
+
index: The index of the message
|
134
|
+
|
135
|
+
Returns:
|
136
|
+
Dictionary of attributes for this content
|
137
|
+
"""
|
138
|
+
attributes = {}
|
139
|
+
|
140
|
+
if isinstance(content, str):
|
141
|
+
# String content is easy
|
142
|
+
attributes[MessageAttributes.PROMPT_ROLE.format(i=index)] = role
|
143
|
+
attributes[MessageAttributes.PROMPT_CONTENT.format(i=index)] = content
|
144
|
+
attributes[MessageAttributes.PROMPT_TYPE.format(i=index)] = "text"
|
145
|
+
elif isinstance(content, list):
|
146
|
+
# For list content, create a simplified representation
|
147
|
+
content_str = ""
|
148
|
+
for item in content:
|
149
|
+
if isinstance(item, dict) and "type" in item:
|
150
|
+
if item["type"] == "text" and "text" in item:
|
151
|
+
content_str += item["text"] + " "
|
152
|
+
elif item["type"] == "tool_result" and "content" in item:
|
153
|
+
content_str += f"[Tool Result: {str(item['content'])}] "
|
154
|
+
elif hasattr(item, "type"):
|
155
|
+
if item.type == "text" and hasattr(item, "text"):
|
156
|
+
content_str += item.text + " "
|
157
|
+
|
158
|
+
attributes[MessageAttributes.PROMPT_ROLE.format(i=index)] = role
|
159
|
+
attributes[MessageAttributes.PROMPT_CONTENT.format(i=index)] = content_str.strip()
|
160
|
+
attributes[MessageAttributes.PROMPT_TYPE.format(i=index)] = "text"
|
161
|
+
else:
|
162
|
+
# Other types - try to convert to string
|
163
|
+
try:
|
164
|
+
simple_content = str(content)
|
165
|
+
attributes[MessageAttributes.PROMPT_ROLE.format(i=index)] = role
|
166
|
+
attributes[MessageAttributes.PROMPT_CONTENT.format(i=index)] = simple_content
|
167
|
+
attributes[MessageAttributes.PROMPT_TYPE.format(i=index)] = "text"
|
168
|
+
except:
|
169
|
+
# Ultimate fallback
|
170
|
+
attributes[MessageAttributes.PROMPT_ROLE.format(i=index)] = role
|
171
|
+
attributes[MessageAttributes.PROMPT_CONTENT.format(i=index)] = "(complex content)"
|
172
|
+
attributes[MessageAttributes.PROMPT_TYPE.format(i=index)] = "unknown"
|
173
|
+
|
174
|
+
return attributes
|
175
|
+
|
176
|
+
|
177
|
+
def _create_simplified_message(msg):
|
178
|
+
"""Helper function to create a simplified message for LLM_PROMPTS attribute.
|
179
|
+
|
180
|
+
Args:
|
181
|
+
msg: The message to simplify
|
182
|
+
|
183
|
+
Returns:
|
184
|
+
Dictionary with role and content
|
185
|
+
"""
|
186
|
+
role = msg.get("role", "user")
|
187
|
+
content = msg.get("content", "")
|
188
|
+
|
189
|
+
if isinstance(content, str):
|
190
|
+
return {"role": role, "content": content}
|
191
|
+
elif isinstance(content, list):
|
192
|
+
content_str = ""
|
193
|
+
for item in content:
|
194
|
+
if isinstance(item, dict) and "type" in item:
|
195
|
+
if item["type"] == "text" and "text" in item:
|
196
|
+
content_str += item["text"] + " "
|
197
|
+
elif item["type"] == "tool_result" and "content" in item:
|
198
|
+
content_str += f"[Tool Result: {str(item['content'])}] "
|
199
|
+
elif hasattr(item, "type"):
|
200
|
+
if item.type == "text" and hasattr(item, "text"):
|
201
|
+
content_str += item.text + " "
|
202
|
+
return {"role": role, "content": content_str.strip()}
|
203
|
+
else:
|
204
|
+
try:
|
205
|
+
return {"role": role, "content": str(content)}
|
206
|
+
except:
|
207
|
+
return {"role": role, "content": "(complex content)"}
|
208
|
+
|
209
|
+
|
210
|
+
def get_message_request_attributes(kwargs: Dict[str, Any]) -> AttributeMap:
|
211
|
+
"""Extract attributes from message request parameters.
|
212
|
+
|
213
|
+
This function processes the request parameters for the Messages API call and extracts
|
214
|
+
standardized attributes for telemetry. It handles different message formats including
|
215
|
+
system prompts, user/assistant messages, and tool-using messages.
|
216
|
+
|
217
|
+
It extracts:
|
218
|
+
- System prompt (if present)
|
219
|
+
- User and assistant messages
|
220
|
+
- Tool definitions (if present)
|
221
|
+
- Model parameters (temperature, max_tokens, etc.)
|
222
|
+
|
223
|
+
Args:
|
224
|
+
kwargs: Request keyword arguments
|
225
|
+
|
226
|
+
Returns:
|
227
|
+
Dictionary of extracted attributes
|
228
|
+
"""
|
229
|
+
attributes = extract_request_attributes(kwargs=kwargs)
|
230
|
+
|
231
|
+
# Extract system prompt if present
|
232
|
+
system = kwargs.get("system", "")
|
233
|
+
if system:
|
234
|
+
attributes[MessageAttributes.PROMPT_ROLE.format(i=0)] = "system"
|
235
|
+
attributes[MessageAttributes.PROMPT_CONTENT.format(i=0)] = system
|
236
|
+
attributes[MessageAttributes.PROMPT_TYPE.format(i=0)] = "text"
|
237
|
+
|
238
|
+
# Extract messages
|
239
|
+
messages = kwargs.get("messages", [])
|
240
|
+
for index, msg in enumerate(messages):
|
241
|
+
role = msg.get("role", "user")
|
242
|
+
content = msg.get("content", "")
|
243
|
+
|
244
|
+
# Process content and extract attributes
|
245
|
+
content_attributes = _process_content(content, role, index)
|
246
|
+
attributes.update(content_attributes)
|
247
|
+
|
248
|
+
# Extract tools if present
|
249
|
+
tools = kwargs.get("tools", [])
|
250
|
+
if tools:
|
251
|
+
tool_attributes = extract_tool_definitions(tools)
|
252
|
+
attributes.update(tool_attributes)
|
253
|
+
|
254
|
+
return attributes
|
255
|
+
|
256
|
+
|
257
|
+
def get_completion_request_attributes(kwargs: Dict[str, Any]) -> AttributeMap:
|
258
|
+
"""Extract attributes from completion request parameters (legacy API).
|
259
|
+
|
260
|
+
This function handles the legacy Completions API format, which differs from
|
261
|
+
the modern Messages API in its structure and parameters. It standardizes
|
262
|
+
the attributes to make them consistent with the OpenTelemetry conventions.
|
263
|
+
|
264
|
+
This is specifically for the older Anthropic API format which used a prompt
|
265
|
+
parameter rather than the messages array format of the newer API.
|
266
|
+
|
267
|
+
Args:
|
268
|
+
kwargs: Keyword arguments from the legacy API call
|
269
|
+
|
270
|
+
Returns:
|
271
|
+
Dictionary of extracted attributes
|
272
|
+
"""
|
273
|
+
attributes = extract_request_attributes(kwargs=kwargs)
|
274
|
+
|
275
|
+
prompt = kwargs.get("prompt", "")
|
276
|
+
if prompt:
|
277
|
+
# Use structured prompt attributes
|
278
|
+
attributes[MessageAttributes.PROMPT_ROLE.format(i=0)] = "user"
|
279
|
+
attributes[MessageAttributes.PROMPT_CONTENT.format(i=0)] = prompt
|
280
|
+
attributes[MessageAttributes.PROMPT_TYPE.format(i=0)] = "text"
|
281
|
+
|
282
|
+
return attributes
|
283
|
+
|
284
|
+
|
285
|
+
def get_message_response_attributes(response: "Message") -> AttributeMap:
|
286
|
+
"""Extract attributes from a Message response.
|
287
|
+
|
288
|
+
This function processes the response from the Messages API call and extracts
|
289
|
+
standardized attributes for telemetry. It handles different response structures
|
290
|
+
including text content, token usage, and tool-using responses.
|
291
|
+
|
292
|
+
It extracts:
|
293
|
+
- Completion content (the assistant's response)
|
294
|
+
- Token usage metrics (input, output, total)
|
295
|
+
- Model information
|
296
|
+
- Content type information
|
297
|
+
- Tool usage information (via related functions)
|
298
|
+
|
299
|
+
Args:
|
300
|
+
response: The Message response object from Anthropic
|
301
|
+
|
302
|
+
Returns:
|
303
|
+
Dictionary of extracted attributes
|
304
|
+
"""
|
305
|
+
attributes = {}
|
306
|
+
|
307
|
+
# Extract message ID
|
308
|
+
if hasattr(response, "id"):
|
309
|
+
message_id = response.id
|
310
|
+
attributes[SpanAttributes.LLM_RESPONSE_ID] = message_id
|
311
|
+
# Also add to the completion ID
|
312
|
+
attributes[MessageAttributes.COMPLETION_ID.format(i=0)] = message_id
|
313
|
+
|
314
|
+
# Extract model
|
315
|
+
if hasattr(response, "model"):
|
316
|
+
model = response.model
|
317
|
+
attributes[SpanAttributes.LLM_RESPONSE_MODEL] = model
|
318
|
+
|
319
|
+
# Extract usage information
|
320
|
+
if hasattr(response, "usage"):
|
321
|
+
usage = response.usage
|
322
|
+
if hasattr(usage, "input_tokens"):
|
323
|
+
input_tokens = usage.input_tokens
|
324
|
+
attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = input_tokens
|
325
|
+
|
326
|
+
if hasattr(usage, "output_tokens"):
|
327
|
+
output_tokens = usage.output_tokens
|
328
|
+
attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] = output_tokens
|
329
|
+
|
330
|
+
if hasattr(usage, "input_tokens") and hasattr(usage, "output_tokens"):
|
331
|
+
total_tokens = usage.input_tokens + usage.output_tokens
|
332
|
+
attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = total_tokens
|
333
|
+
|
334
|
+
# Extract stop reason if available
|
335
|
+
if hasattr(response, "stop_reason"):
|
336
|
+
stop_reason = response.stop_reason
|
337
|
+
attributes[SpanAttributes.LLM_RESPONSE_STOP_REASON] = stop_reason
|
338
|
+
attributes[SpanAttributes.LLM_RESPONSE_FINISH_REASON] = stop_reason
|
339
|
+
attributes[MessageAttributes.COMPLETION_FINISH_REASON.format(i=0)] = stop_reason
|
340
|
+
|
341
|
+
# Extract content
|
342
|
+
if hasattr(response, "content"):
|
343
|
+
try:
|
344
|
+
content_list = response.content
|
345
|
+
|
346
|
+
# Set role for all content (assistant for Claude)
|
347
|
+
attributes[MessageAttributes.COMPLETION_ROLE.format(i=0)] = "assistant"
|
348
|
+
|
349
|
+
# Process different content block types
|
350
|
+
extracted_content = []
|
351
|
+
tool_calls = []
|
352
|
+
|
353
|
+
for i, block in enumerate(content_list):
|
354
|
+
if hasattr(block, "type") and block.type == "text":
|
355
|
+
# Add as text content
|
356
|
+
text_content = block.text if hasattr(block, "text") else ""
|
357
|
+
extracted_content.append({"type": "text", "text": text_content})
|
358
|
+
# Use structured completion attributes
|
359
|
+
attributes[MessageAttributes.COMPLETION_TYPE.format(i=i)] = "text"
|
360
|
+
attributes[MessageAttributes.COMPLETION_CONTENT.format(i=i)] = text_content
|
361
|
+
|
362
|
+
elif hasattr(block, "type") and block.type == "tool_use":
|
363
|
+
# Add as tool call
|
364
|
+
tool_call = {
|
365
|
+
"name": block.name if hasattr(block, "name") else "unknown",
|
366
|
+
"id": block.id if hasattr(block, "id") else "unknown",
|
367
|
+
"input": block.input if hasattr(block, "input") else {},
|
368
|
+
}
|
369
|
+
tool_calls.append(tool_call)
|
370
|
+
|
371
|
+
# Add structured tool call attributes
|
372
|
+
j = len(tool_calls) - 1
|
373
|
+
attributes[MessageAttributes.COMPLETION_TOOL_CALL_NAME.format(i=0, j=j)] = tool_call["name"]
|
374
|
+
attributes[MessageAttributes.COMPLETION_TOOL_CALL_ID.format(i=0, j=j)] = tool_call["id"]
|
375
|
+
attributes[MessageAttributes.COMPLETION_TOOL_CALL_TYPE.format(i=0, j=j)] = "function"
|
376
|
+
|
377
|
+
if isinstance(tool_call["input"], dict):
|
378
|
+
tool_input = json.dumps(tool_call["input"])
|
379
|
+
else:
|
380
|
+
tool_input = str(tool_call["input"])
|
381
|
+
|
382
|
+
attributes[MessageAttributes.COMPLETION_TOOL_CALL_ARGUMENTS.format(i=0, j=j)] = tool_input
|
383
|
+
|
384
|
+
except Exception as e:
|
385
|
+
logger.debug(f"[agentops.instrumentation.anthropic] Error extracting content: {e}")
|
386
|
+
|
387
|
+
return attributes
|
388
|
+
|
389
|
+
|
390
|
+
def get_completion_response_attributes(response: "Completion") -> AttributeMap:
|
391
|
+
"""Extract attributes from a Completion response (legacy API).
|
392
|
+
|
393
|
+
This function processes the response from the legacy Completions API call
|
394
|
+
and extracts standardized attributes for telemetry. The structure differs
|
395
|
+
from the modern Messages API, so this handles the specific format of the
|
396
|
+
older API responses.
|
397
|
+
|
398
|
+
Args:
|
399
|
+
response: The Completion response object from Anthropic
|
400
|
+
|
401
|
+
Returns:
|
402
|
+
Dictionary of extracted attributes
|
403
|
+
"""
|
404
|
+
attributes = {}
|
405
|
+
|
406
|
+
# Extract completion ID
|
407
|
+
if hasattr(response, "id"):
|
408
|
+
completion_id = response.id
|
409
|
+
attributes[SpanAttributes.LLM_RESPONSE_ID] = completion_id
|
410
|
+
attributes[MessageAttributes.COMPLETION_ID.format(i=0)] = completion_id
|
411
|
+
|
412
|
+
# Extract model
|
413
|
+
if hasattr(response, "model"):
|
414
|
+
model = response.model
|
415
|
+
attributes[SpanAttributes.LLM_RESPONSE_MODEL] = model
|
416
|
+
|
417
|
+
# Extract completion
|
418
|
+
if hasattr(response, "completion"):
|
419
|
+
completion_text = response.completion
|
420
|
+
# Add structured completion attributes
|
421
|
+
attributes[MessageAttributes.COMPLETION_TYPE.format(i=0)] = "text"
|
422
|
+
attributes[MessageAttributes.COMPLETION_ROLE.format(i=0)] = "assistant"
|
423
|
+
attributes[MessageAttributes.COMPLETION_CONTENT.format(i=0)] = completion_text
|
424
|
+
|
425
|
+
# For backward compatibility
|
426
|
+
attributes[SpanAttributes.LLM_COMPLETIONS] = json.dumps([{"type": "text", "text": completion_text}])
|
427
|
+
attributes[SpanAttributes.LLM_CONTENT_COMPLETION_CHUNK] = completion_text
|
428
|
+
|
429
|
+
# Extract stop reason if available
|
430
|
+
if hasattr(response, "stop_reason"):
|
431
|
+
stop_reason = response.stop_reason
|
432
|
+
attributes[SpanAttributes.LLM_RESPONSE_STOP_REASON] = stop_reason
|
433
|
+
attributes[SpanAttributes.LLM_RESPONSE_FINISH_REASON] = stop_reason
|
434
|
+
attributes[MessageAttributes.COMPLETION_FINISH_REASON.format(i=0)] = stop_reason
|
435
|
+
|
436
|
+
# Extract usage information (newer versions have this)
|
437
|
+
if hasattr(response, "usage"):
|
438
|
+
usage = response.usage
|
439
|
+
if hasattr(usage, "input_tokens"):
|
440
|
+
input_tokens = usage.input_tokens
|
441
|
+
attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = input_tokens
|
442
|
+
|
443
|
+
if hasattr(usage, "output_tokens"):
|
444
|
+
output_tokens = usage.output_tokens
|
445
|
+
attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] = output_tokens
|
446
|
+
|
447
|
+
# Calculate total tokens if we have both input and output
|
448
|
+
if hasattr(usage, "input_tokens") and hasattr(usage, "output_tokens"):
|
449
|
+
total_tokens = usage.input_tokens + usage.output_tokens
|
450
|
+
attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = total_tokens
|
451
|
+
|
452
|
+
return attributes
|
453
|
+
|
454
|
+
|
455
|
+
def get_stream_attributes(stream: Any) -> AttributeMap:
|
456
|
+
"""Extract attributes from a streaming response.
|
457
|
+
|
458
|
+
This function captures available metadata from a streaming response object
|
459
|
+
before the full content is available. This is typically limited to identifying
|
460
|
+
information rather than content or token usage which becomes available only
|
461
|
+
after the stream completes.
|
462
|
+
|
463
|
+
Args:
|
464
|
+
stream: The stream object from an Anthropic streaming request
|
465
|
+
|
466
|
+
Returns:
|
467
|
+
Dictionary of available stream metadata attributes
|
468
|
+
"""
|
469
|
+
attributes = {}
|
470
|
+
|
471
|
+
attributes[SpanAttributes.LLM_REQUEST_STREAMING] = True
|
472
|
+
|
473
|
+
if hasattr(stream, "model"):
|
474
|
+
model = stream.model
|
475
|
+
attributes[SpanAttributes.LLM_REQUEST_MODEL] = model
|
476
|
+
|
477
|
+
return attributes
|
478
|
+
|
479
|
+
|
480
|
+
def get_stream_event_attributes(event: Any) -> AttributeMap:
|
481
|
+
"""Extract attributes from a streaming event.
|
482
|
+
|
483
|
+
This function processes individual streaming events from the Anthropic API
|
484
|
+
and extracts available metadata. Different event types contain different
|
485
|
+
information, so the function handles various event classes appropriately.
|
486
|
+
|
487
|
+
Args:
|
488
|
+
event: A streaming event object from Anthropic
|
489
|
+
|
490
|
+
Returns:
|
491
|
+
Dictionary of available event attributes
|
492
|
+
"""
|
493
|
+
attributes = {}
|
494
|
+
|
495
|
+
# Extract only necessary information from events
|
496
|
+
event_type = event.__class__.__name__
|
497
|
+
|
498
|
+
if event_type == "MessageStartEvent":
|
499
|
+
if hasattr(event, "message"):
|
500
|
+
if hasattr(event.message, "id"):
|
501
|
+
message_id = event.message.id
|
502
|
+
attributes[SpanAttributes.LLM_RESPONSE_ID] = message_id
|
503
|
+
attributes[MessageAttributes.COMPLETION_ID.format(i=0)] = message_id
|
504
|
+
|
505
|
+
if hasattr(event.message, "model"):
|
506
|
+
model = event.message.model
|
507
|
+
attributes[SpanAttributes.LLM_RESPONSE_MODEL] = model
|
508
|
+
|
509
|
+
elif event_type == "MessageStopEvent":
|
510
|
+
if hasattr(event, "message"):
|
511
|
+
# Extract stop reason
|
512
|
+
if hasattr(event.message, "stop_reason"):
|
513
|
+
stop_reason = event.message.stop_reason
|
514
|
+
attributes[SpanAttributes.LLM_RESPONSE_STOP_REASON] = stop_reason
|
515
|
+
attributes[SpanAttributes.LLM_RESPONSE_FINISH_REASON] = stop_reason
|
516
|
+
attributes[MessageAttributes.COMPLETION_FINISH_REASON.format(i=0)] = stop_reason
|
517
|
+
|
518
|
+
elif event_type == "RawMessageStartEvent":
|
519
|
+
if hasattr(event, "message"):
|
520
|
+
if hasattr(event.message, "usage"):
|
521
|
+
usage = event.message.usage
|
522
|
+
if hasattr(usage, "input_tokens"):
|
523
|
+
input_tokens = usage.input_tokens
|
524
|
+
attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = input_tokens
|
525
|
+
|
526
|
+
if hasattr(usage, "output_tokens"):
|
527
|
+
output_tokens = usage.output_tokens
|
528
|
+
attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] = output_tokens
|
529
|
+
|
530
|
+
if hasattr(usage, "input_tokens") and hasattr(usage, "output_tokens"):
|
531
|
+
total_tokens = usage.input_tokens + usage.output_tokens
|
532
|
+
attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = total_tokens
|
533
|
+
|
534
|
+
elif event_type == "RawMessageDeltaEvent":
|
535
|
+
if hasattr(event, "delta"):
|
536
|
+
if hasattr(event.delta, "stop_reason"):
|
537
|
+
stop_reason = event.delta.stop_reason
|
538
|
+
attributes[SpanAttributes.LLM_RESPONSE_STOP_REASON] = stop_reason
|
539
|
+
attributes[SpanAttributes.LLM_RESPONSE_FINISH_REASON] = stop_reason
|
540
|
+
attributes[MessageAttributes.COMPLETION_FINISH_REASON.format(i=0)] = stop_reason
|
541
|
+
return attributes
|