mseep-agentops 0.4.18__py3-none-any.whl → 0.4.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentops/__init__.py +0 -0
- agentops/client/api/base.py +28 -30
- agentops/client/api/versions/v3.py +29 -25
- agentops/client/api/versions/v4.py +87 -46
- agentops/client/client.py +98 -29
- agentops/client/http/README.md +87 -0
- agentops/client/http/http_client.py +126 -172
- agentops/config.py +8 -2
- agentops/instrumentation/OpenTelemetry.md +133 -0
- agentops/instrumentation/README.md +167 -0
- agentops/instrumentation/__init__.py +13 -1
- agentops/instrumentation/agentic/ag2/__init__.py +18 -0
- agentops/instrumentation/agentic/ag2/instrumentor.py +922 -0
- agentops/instrumentation/agentic/agno/__init__.py +19 -0
- agentops/instrumentation/agentic/agno/attributes/__init__.py +20 -0
- agentops/instrumentation/agentic/agno/attributes/agent.py +250 -0
- agentops/instrumentation/agentic/agno/attributes/metrics.py +214 -0
- agentops/instrumentation/agentic/agno/attributes/storage.py +158 -0
- agentops/instrumentation/agentic/agno/attributes/team.py +195 -0
- agentops/instrumentation/agentic/agno/attributes/tool.py +210 -0
- agentops/instrumentation/agentic/agno/attributes/workflow.py +254 -0
- agentops/instrumentation/agentic/agno/instrumentor.py +1313 -0
- agentops/instrumentation/agentic/crewai/LICENSE +201 -0
- agentops/instrumentation/agentic/crewai/NOTICE.md +10 -0
- agentops/instrumentation/agentic/crewai/__init__.py +6 -0
- agentops/instrumentation/agentic/crewai/crewai_span_attributes.py +335 -0
- agentops/instrumentation/agentic/crewai/instrumentation.py +535 -0
- agentops/instrumentation/agentic/crewai/version.py +1 -0
- agentops/instrumentation/agentic/google_adk/__init__.py +19 -0
- agentops/instrumentation/agentic/google_adk/instrumentor.py +68 -0
- agentops/instrumentation/agentic/google_adk/patch.py +767 -0
- agentops/instrumentation/agentic/haystack/__init__.py +1 -0
- agentops/instrumentation/agentic/haystack/instrumentor.py +186 -0
- agentops/instrumentation/agentic/langgraph/__init__.py +3 -0
- agentops/instrumentation/agentic/langgraph/attributes.py +54 -0
- agentops/instrumentation/agentic/langgraph/instrumentation.py +598 -0
- agentops/instrumentation/agentic/langgraph/version.py +1 -0
- agentops/instrumentation/agentic/openai_agents/README.md +156 -0
- agentops/instrumentation/agentic/openai_agents/SPANS.md +145 -0
- agentops/instrumentation/agentic/openai_agents/TRACING_API.md +144 -0
- agentops/instrumentation/agentic/openai_agents/__init__.py +30 -0
- agentops/instrumentation/agentic/openai_agents/attributes/common.py +549 -0
- agentops/instrumentation/agentic/openai_agents/attributes/completion.py +172 -0
- agentops/instrumentation/agentic/openai_agents/attributes/model.py +58 -0
- agentops/instrumentation/agentic/openai_agents/attributes/tokens.py +275 -0
- agentops/instrumentation/agentic/openai_agents/exporter.py +469 -0
- agentops/instrumentation/agentic/openai_agents/instrumentor.py +107 -0
- agentops/instrumentation/agentic/openai_agents/processor.py +58 -0
- agentops/instrumentation/agentic/smolagents/README.md +88 -0
- agentops/instrumentation/agentic/smolagents/__init__.py +12 -0
- agentops/instrumentation/agentic/smolagents/attributes/agent.py +354 -0
- agentops/instrumentation/agentic/smolagents/attributes/model.py +205 -0
- agentops/instrumentation/agentic/smolagents/instrumentor.py +286 -0
- agentops/instrumentation/agentic/smolagents/stream_wrapper.py +258 -0
- agentops/instrumentation/agentic/xpander/__init__.py +15 -0
- agentops/instrumentation/agentic/xpander/context.py +112 -0
- agentops/instrumentation/agentic/xpander/instrumentor.py +877 -0
- agentops/instrumentation/agentic/xpander/trace_probe.py +86 -0
- agentops/instrumentation/agentic/xpander/version.py +3 -0
- agentops/instrumentation/common/README.md +65 -0
- agentops/instrumentation/common/attributes.py +1 -2
- agentops/instrumentation/providers/anthropic/__init__.py +24 -0
- agentops/instrumentation/providers/anthropic/attributes/__init__.py +23 -0
- agentops/instrumentation/providers/anthropic/attributes/common.py +64 -0
- agentops/instrumentation/providers/anthropic/attributes/message.py +541 -0
- agentops/instrumentation/providers/anthropic/attributes/tools.py +231 -0
- agentops/instrumentation/providers/anthropic/event_handler_wrapper.py +90 -0
- agentops/instrumentation/providers/anthropic/instrumentor.py +146 -0
- agentops/instrumentation/providers/anthropic/stream_wrapper.py +436 -0
- agentops/instrumentation/providers/google_genai/README.md +33 -0
- agentops/instrumentation/providers/google_genai/__init__.py +24 -0
- agentops/instrumentation/providers/google_genai/attributes/__init__.py +25 -0
- agentops/instrumentation/providers/google_genai/attributes/chat.py +125 -0
- agentops/instrumentation/providers/google_genai/attributes/common.py +88 -0
- agentops/instrumentation/providers/google_genai/attributes/model.py +284 -0
- agentops/instrumentation/providers/google_genai/instrumentor.py +170 -0
- agentops/instrumentation/providers/google_genai/stream_wrapper.py +238 -0
- agentops/instrumentation/providers/ibm_watsonx_ai/__init__.py +28 -0
- agentops/instrumentation/providers/ibm_watsonx_ai/attributes/__init__.py +27 -0
- agentops/instrumentation/providers/ibm_watsonx_ai/attributes/attributes.py +277 -0
- agentops/instrumentation/providers/ibm_watsonx_ai/attributes/common.py +104 -0
- agentops/instrumentation/providers/ibm_watsonx_ai/instrumentor.py +162 -0
- agentops/instrumentation/providers/ibm_watsonx_ai/stream_wrapper.py +302 -0
- agentops/instrumentation/providers/mem0/__init__.py +45 -0
- agentops/instrumentation/providers/mem0/common.py +377 -0
- agentops/instrumentation/providers/mem0/instrumentor.py +270 -0
- agentops/instrumentation/providers/mem0/memory.py +430 -0
- agentops/instrumentation/providers/openai/__init__.py +21 -0
- agentops/instrumentation/providers/openai/attributes/__init__.py +7 -0
- agentops/instrumentation/providers/openai/attributes/common.py +55 -0
- agentops/instrumentation/providers/openai/attributes/response.py +607 -0
- agentops/instrumentation/providers/openai/config.py +36 -0
- agentops/instrumentation/providers/openai/instrumentor.py +312 -0
- agentops/instrumentation/providers/openai/stream_wrapper.py +941 -0
- agentops/instrumentation/providers/openai/utils.py +44 -0
- agentops/instrumentation/providers/openai/v0.py +176 -0
- agentops/instrumentation/providers/openai/v0_wrappers.py +483 -0
- agentops/instrumentation/providers/openai/wrappers/__init__.py +30 -0
- agentops/instrumentation/providers/openai/wrappers/assistant.py +277 -0
- agentops/instrumentation/providers/openai/wrappers/chat.py +259 -0
- agentops/instrumentation/providers/openai/wrappers/completion.py +109 -0
- agentops/instrumentation/providers/openai/wrappers/embeddings.py +94 -0
- agentops/instrumentation/providers/openai/wrappers/image_gen.py +75 -0
- agentops/instrumentation/providers/openai/wrappers/responses.py +191 -0
- agentops/instrumentation/providers/openai/wrappers/shared.py +81 -0
- agentops/instrumentation/utilities/concurrent_futures/__init__.py +10 -0
- agentops/instrumentation/utilities/concurrent_futures/instrumentation.py +206 -0
- agentops/integration/callbacks/dspy/__init__.py +11 -0
- agentops/integration/callbacks/dspy/callback.py +471 -0
- agentops/integration/callbacks/langchain/README.md +59 -0
- agentops/integration/callbacks/langchain/__init__.py +15 -0
- agentops/integration/callbacks/langchain/callback.py +791 -0
- agentops/integration/callbacks/langchain/utils.py +54 -0
- agentops/legacy/crewai.md +121 -0
- agentops/logging/instrument_logging.py +4 -0
- agentops/sdk/README.md +220 -0
- agentops/sdk/core.py +75 -32
- agentops/sdk/descriptors/classproperty.py +28 -0
- agentops/sdk/exporters.py +152 -33
- agentops/semconv/README.md +125 -0
- agentops/semconv/span_kinds.py +0 -2
- agentops/validation.py +102 -63
- {mseep_agentops-0.4.18.dist-info → mseep_agentops-0.4.22.dist-info}/METADATA +30 -40
- mseep_agentops-0.4.22.dist-info/RECORD +178 -0
- {mseep_agentops-0.4.18.dist-info → mseep_agentops-0.4.22.dist-info}/WHEEL +1 -2
- mseep_agentops-0.4.18.dist-info/RECORD +0 -94
- mseep_agentops-0.4.18.dist-info/top_level.txt +0 -2
- tests/conftest.py +0 -10
- tests/unit/client/__init__.py +0 -1
- tests/unit/client/test_http_adapter.py +0 -221
- tests/unit/client/test_http_client.py +0 -206
- tests/unit/conftest.py +0 -54
- tests/unit/sdk/__init__.py +0 -1
- tests/unit/sdk/instrumentation_tester.py +0 -207
- tests/unit/sdk/test_attributes.py +0 -392
- tests/unit/sdk/test_concurrent_instrumentation.py +0 -468
- tests/unit/sdk/test_decorators.py +0 -763
- tests/unit/sdk/test_exporters.py +0 -241
- tests/unit/sdk/test_factory.py +0 -1188
- tests/unit/sdk/test_internal_span_processor.py +0 -397
- tests/unit/sdk/test_resource_attributes.py +0 -35
- tests/unit/test_config.py +0 -82
- tests/unit/test_context_manager.py +0 -777
- tests/unit/test_events.py +0 -27
- tests/unit/test_host_env.py +0 -54
- tests/unit/test_init_py.py +0 -501
- tests/unit/test_serialization.py +0 -433
- tests/unit/test_session.py +0 -676
- tests/unit/test_user_agent.py +0 -34
- tests/unit/test_validation.py +0 -405
- {tests → agentops/instrumentation/agentic/openai_agents/attributes}/__init__.py +0 -0
- /tests/unit/__init__.py → /agentops/instrumentation/providers/openai/attributes/tools.py +0 -0
- {mseep_agentops-0.4.18.dist-info → mseep_agentops-0.4.22.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,94 @@
|
|
1
|
+
"""Embeddings wrapper for OpenAI instrumentation.
|
2
|
+
|
3
|
+
This module provides attribute extraction for OpenAI embeddings API.
|
4
|
+
"""
|
5
|
+
|
6
|
+
import logging
|
7
|
+
from typing import Any, Dict, Optional, Tuple
|
8
|
+
|
9
|
+
from agentops.instrumentation.providers.openai.utils import is_openai_v1
|
10
|
+
from agentops.instrumentation.providers.openai.wrappers.shared import (
|
11
|
+
model_as_dict,
|
12
|
+
should_send_prompts,
|
13
|
+
)
|
14
|
+
from agentops.instrumentation.common.attributes import AttributeMap
|
15
|
+
from agentops.semconv import SpanAttributes, LLMRequestTypeValues
|
16
|
+
|
17
|
+
logger = logging.getLogger(__name__)
|
18
|
+
|
19
|
+
LLM_REQUEST_TYPE = LLMRequestTypeValues.EMBEDDING
|
20
|
+
|
21
|
+
|
22
|
+
def handle_embeddings_attributes(
|
23
|
+
args: Optional[Tuple] = None,
|
24
|
+
kwargs: Optional[Dict] = None,
|
25
|
+
return_value: Optional[Any] = None,
|
26
|
+
) -> AttributeMap:
|
27
|
+
"""Extract attributes from embeddings calls."""
|
28
|
+
attributes = {
|
29
|
+
SpanAttributes.LLM_REQUEST_TYPE: LLM_REQUEST_TYPE.value,
|
30
|
+
SpanAttributes.LLM_SYSTEM: "OpenAI",
|
31
|
+
}
|
32
|
+
|
33
|
+
# Extract request attributes from kwargs
|
34
|
+
if kwargs:
|
35
|
+
# Model
|
36
|
+
if "model" in kwargs:
|
37
|
+
attributes[SpanAttributes.LLM_REQUEST_MODEL] = kwargs["model"]
|
38
|
+
|
39
|
+
# Headers
|
40
|
+
headers = kwargs.get("extra_headers") or kwargs.get("headers")
|
41
|
+
if headers:
|
42
|
+
attributes[SpanAttributes.LLM_REQUEST_HEADERS] = str(headers)
|
43
|
+
|
44
|
+
# Input
|
45
|
+
if should_send_prompts() and "input" in kwargs:
|
46
|
+
input_param = kwargs["input"]
|
47
|
+
if isinstance(input_param, str):
|
48
|
+
attributes[f"{SpanAttributes.LLM_PROMPTS}.0.content"] = input_param
|
49
|
+
elif isinstance(input_param, list):
|
50
|
+
for i, inp in enumerate(input_param):
|
51
|
+
if isinstance(inp, str):
|
52
|
+
attributes[f"{SpanAttributes.LLM_PROMPTS}.{i}.content"] = inp
|
53
|
+
elif isinstance(inp, (int, list)):
|
54
|
+
# Token inputs - convert to string representation
|
55
|
+
attributes[f"{SpanAttributes.LLM_PROMPTS}.{i}.content"] = str(inp)
|
56
|
+
|
57
|
+
# Extract response attributes from return value
|
58
|
+
if return_value:
|
59
|
+
# Convert to dict if needed
|
60
|
+
response_dict = {}
|
61
|
+
if hasattr(return_value, "__dict__") and not hasattr(return_value, "__iter__"):
|
62
|
+
response_dict = model_as_dict(return_value)
|
63
|
+
elif isinstance(return_value, dict):
|
64
|
+
response_dict = return_value
|
65
|
+
elif hasattr(return_value, "model_dump"):
|
66
|
+
# Handle Pydantic models directly
|
67
|
+
response_dict = return_value.model_dump()
|
68
|
+
elif hasattr(return_value, "__dict__"):
|
69
|
+
# Try to use model_as_dict even if it has __iter__
|
70
|
+
response_dict = model_as_dict(return_value)
|
71
|
+
# Basic response attributes
|
72
|
+
if "model" in response_dict:
|
73
|
+
attributes[SpanAttributes.LLM_RESPONSE_MODEL] = response_dict["model"]
|
74
|
+
|
75
|
+
# Usage
|
76
|
+
usage = response_dict.get("usage", {})
|
77
|
+
if usage:
|
78
|
+
if is_openai_v1() and hasattr(usage, "__dict__"):
|
79
|
+
usage = usage.__dict__
|
80
|
+
if "total_tokens" in usage:
|
81
|
+
attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = usage["total_tokens"]
|
82
|
+
if "prompt_tokens" in usage:
|
83
|
+
attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = usage["prompt_tokens"]
|
84
|
+
|
85
|
+
# Embeddings data
|
86
|
+
if should_send_prompts() and "data" in response_dict:
|
87
|
+
data = response_dict["data"]
|
88
|
+
for i, item in enumerate(data):
|
89
|
+
embedding = item.get("embedding", [])
|
90
|
+
if embedding:
|
91
|
+
# We don't store the full embedding vector, just metadata
|
92
|
+
attributes[f"{SpanAttributes.LLM_COMPLETIONS}.{i}.embedding_length"] = len(embedding)
|
93
|
+
|
94
|
+
return attributes
|
@@ -0,0 +1,75 @@
|
|
1
|
+
"""Image generation wrapper for OpenAI instrumentation.
|
2
|
+
|
3
|
+
This module provides attribute extraction for OpenAI image generation API.
|
4
|
+
"""
|
5
|
+
|
6
|
+
import logging
|
7
|
+
from typing import Any, Dict, Optional, Tuple
|
8
|
+
|
9
|
+
from agentops.instrumentation.providers.openai.wrappers.shared import model_as_dict
|
10
|
+
from agentops.instrumentation.common.attributes import AttributeMap
|
11
|
+
from agentops.semconv import SpanAttributes
|
12
|
+
|
13
|
+
logger = logging.getLogger(__name__)
|
14
|
+
|
15
|
+
|
16
|
+
def handle_image_gen_attributes(
|
17
|
+
args: Optional[Tuple] = None,
|
18
|
+
kwargs: Optional[Dict] = None,
|
19
|
+
return_value: Optional[Any] = None,
|
20
|
+
) -> AttributeMap:
|
21
|
+
"""Extract attributes from image generation calls."""
|
22
|
+
attributes = {
|
23
|
+
SpanAttributes.LLM_SYSTEM: "OpenAI",
|
24
|
+
"gen_ai.operation.name": "image_generation",
|
25
|
+
}
|
26
|
+
|
27
|
+
# Extract request attributes from kwargs
|
28
|
+
if kwargs:
|
29
|
+
# Model
|
30
|
+
if "model" in kwargs:
|
31
|
+
attributes[SpanAttributes.LLM_REQUEST_MODEL] = kwargs["model"]
|
32
|
+
|
33
|
+
# Image parameters
|
34
|
+
if "prompt" in kwargs:
|
35
|
+
attributes["gen_ai.request.image_prompt"] = kwargs["prompt"]
|
36
|
+
if "size" in kwargs:
|
37
|
+
attributes["gen_ai.request.image_size"] = kwargs["size"]
|
38
|
+
if "quality" in kwargs:
|
39
|
+
attributes["gen_ai.request.image_quality"] = kwargs["quality"]
|
40
|
+
if "style" in kwargs:
|
41
|
+
attributes["gen_ai.request.image_style"] = kwargs["style"]
|
42
|
+
if "n" in kwargs:
|
43
|
+
attributes["gen_ai.request.image_count"] = kwargs["n"]
|
44
|
+
if "response_format" in kwargs:
|
45
|
+
attributes["gen_ai.request.image_response_format"] = kwargs["response_format"]
|
46
|
+
|
47
|
+
# Headers
|
48
|
+
headers = kwargs.get("extra_headers") or kwargs.get("headers")
|
49
|
+
if headers:
|
50
|
+
attributes[SpanAttributes.LLM_REQUEST_HEADERS] = str(headers)
|
51
|
+
|
52
|
+
# Extract response attributes from return value
|
53
|
+
if return_value:
|
54
|
+
# Convert to dict if needed
|
55
|
+
response_dict = {}
|
56
|
+
if hasattr(return_value, "__dict__") and not hasattr(return_value, "__iter__"):
|
57
|
+
response_dict = model_as_dict(return_value)
|
58
|
+
elif isinstance(return_value, dict):
|
59
|
+
response_dict = return_value
|
60
|
+
|
61
|
+
# Response data
|
62
|
+
if "created" in response_dict:
|
63
|
+
attributes["gen_ai.response.created"] = response_dict["created"]
|
64
|
+
|
65
|
+
# Images data
|
66
|
+
if "data" in response_dict:
|
67
|
+
data = response_dict["data"]
|
68
|
+
attributes["gen_ai.response.image_count"] = len(data)
|
69
|
+
|
70
|
+
# We don't typically store the full image data, but we can store metadata
|
71
|
+
for i, item in enumerate(data):
|
72
|
+
if "revised_prompt" in item:
|
73
|
+
attributes[f"gen_ai.response.images.{i}.revised_prompt"] = item["revised_prompt"]
|
74
|
+
|
75
|
+
return attributes
|
@@ -0,0 +1,191 @@
|
|
1
|
+
"""Responses API wrapper for OpenAI instrumentation.
|
2
|
+
|
3
|
+
This module provides attribute extraction for OpenAI Responses API endpoints.
|
4
|
+
"""
|
5
|
+
|
6
|
+
import json
|
7
|
+
import logging
|
8
|
+
from typing import Any, Dict, Optional, Tuple
|
9
|
+
|
10
|
+
from agentops.instrumentation.providers.openai.utils import is_openai_v1
|
11
|
+
from agentops.instrumentation.providers.openai.wrappers.shared import (
|
12
|
+
model_as_dict,
|
13
|
+
should_send_prompts,
|
14
|
+
)
|
15
|
+
from agentops.instrumentation.common.attributes import AttributeMap
|
16
|
+
from agentops.semconv import SpanAttributes, LLMRequestTypeValues
|
17
|
+
|
18
|
+
logger = logging.getLogger(__name__)
|
19
|
+
|
20
|
+
|
21
|
+
def handle_responses_attributes(
|
22
|
+
args: Optional[Tuple] = None,
|
23
|
+
kwargs: Optional[Dict] = None,
|
24
|
+
return_value: Optional[Any] = None,
|
25
|
+
) -> AttributeMap:
|
26
|
+
"""Extract attributes from responses API calls."""
|
27
|
+
attributes = {
|
28
|
+
SpanAttributes.LLM_SYSTEM: "OpenAI",
|
29
|
+
SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.CHAT.value,
|
30
|
+
}
|
31
|
+
|
32
|
+
# Extract request attributes from kwargs
|
33
|
+
if kwargs:
|
34
|
+
# Model
|
35
|
+
if "model" in kwargs:
|
36
|
+
attributes[SpanAttributes.LLM_REQUEST_MODEL] = kwargs["model"]
|
37
|
+
|
38
|
+
# Request parameters
|
39
|
+
if "max_tokens" in kwargs:
|
40
|
+
attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] = kwargs["max_tokens"]
|
41
|
+
if "temperature" in kwargs:
|
42
|
+
attributes[SpanAttributes.LLM_REQUEST_TEMPERATURE] = kwargs["temperature"]
|
43
|
+
if "top_p" in kwargs:
|
44
|
+
attributes[SpanAttributes.LLM_REQUEST_TOP_P] = kwargs["top_p"]
|
45
|
+
if "frequency_penalty" in kwargs:
|
46
|
+
attributes[SpanAttributes.LLM_REQUEST_FREQUENCY_PENALTY] = kwargs["frequency_penalty"]
|
47
|
+
if "presence_penalty" in kwargs:
|
48
|
+
attributes[SpanAttributes.LLM_REQUEST_PRESENCE_PENALTY] = kwargs["presence_penalty"]
|
49
|
+
if "user" in kwargs:
|
50
|
+
attributes[SpanAttributes.LLM_USER] = kwargs["user"]
|
51
|
+
|
52
|
+
# Streaming
|
53
|
+
attributes[SpanAttributes.LLM_REQUEST_STREAMING] = kwargs.get("stream", False)
|
54
|
+
|
55
|
+
# Input messages
|
56
|
+
if should_send_prompts() and "input" in kwargs:
|
57
|
+
messages = kwargs["input"]
|
58
|
+
for i, msg in enumerate(messages):
|
59
|
+
prefix = f"{SpanAttributes.LLM_PROMPTS}.{i}"
|
60
|
+
if isinstance(msg, dict):
|
61
|
+
if "role" in msg:
|
62
|
+
attributes[f"{prefix}.role"] = msg["role"]
|
63
|
+
if "content" in msg:
|
64
|
+
content = msg["content"]
|
65
|
+
if isinstance(content, list):
|
66
|
+
content = json.dumps(content)
|
67
|
+
attributes[f"{prefix}.content"] = content
|
68
|
+
|
69
|
+
# Tools
|
70
|
+
if "tools" in kwargs:
|
71
|
+
tools = kwargs["tools"]
|
72
|
+
if tools:
|
73
|
+
for i, tool in enumerate(tools):
|
74
|
+
if isinstance(tool, dict) and "function" in tool:
|
75
|
+
function = tool["function"]
|
76
|
+
prefix = f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.{i}"
|
77
|
+
if "name" in function:
|
78
|
+
attributes[f"{prefix}.name"] = function["name"]
|
79
|
+
if "description" in function:
|
80
|
+
attributes[f"{prefix}.description"] = function["description"]
|
81
|
+
if "parameters" in function:
|
82
|
+
attributes[f"{prefix}.parameters"] = json.dumps(function["parameters"])
|
83
|
+
|
84
|
+
# Extract response attributes from return value
|
85
|
+
if return_value:
|
86
|
+
# Convert to dict if needed
|
87
|
+
response_dict = {}
|
88
|
+
if hasattr(return_value, "__dict__") and not hasattr(return_value, "__iter__"):
|
89
|
+
response_dict = model_as_dict(return_value)
|
90
|
+
elif isinstance(return_value, dict):
|
91
|
+
response_dict = return_value
|
92
|
+
elif hasattr(return_value, "model_dump"):
|
93
|
+
response_dict = return_value.model_dump()
|
94
|
+
|
95
|
+
# Basic response attributes
|
96
|
+
if "id" in response_dict:
|
97
|
+
attributes[SpanAttributes.LLM_RESPONSE_ID] = response_dict["id"]
|
98
|
+
if "model" in response_dict:
|
99
|
+
attributes[SpanAttributes.LLM_RESPONSE_MODEL] = response_dict["model"]
|
100
|
+
|
101
|
+
# Usage
|
102
|
+
usage = response_dict.get("usage", {})
|
103
|
+
if usage:
|
104
|
+
if is_openai_v1() and hasattr(usage, "__dict__"):
|
105
|
+
usage = usage.__dict__
|
106
|
+
if "total_tokens" in usage:
|
107
|
+
attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = usage["total_tokens"]
|
108
|
+
# Responses API uses input_tokens/output_tokens instead of prompt_tokens/completion_tokens
|
109
|
+
if "input_tokens" in usage:
|
110
|
+
attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = usage["input_tokens"]
|
111
|
+
if "output_tokens" in usage:
|
112
|
+
attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] = usage["output_tokens"]
|
113
|
+
|
114
|
+
# Reasoning tokens
|
115
|
+
output_details = usage.get("output_tokens_details", {})
|
116
|
+
if isinstance(output_details, dict) and "reasoning_tokens" in output_details:
|
117
|
+
attributes[SpanAttributes.LLM_USAGE_REASONING_TOKENS] = output_details["reasoning_tokens"]
|
118
|
+
|
119
|
+
# Output items
|
120
|
+
if should_send_prompts() and "output" in response_dict:
|
121
|
+
output_items = response_dict["output"]
|
122
|
+
completion_idx = 0
|
123
|
+
for i, output_item in enumerate(output_items):
|
124
|
+
# Handle dictionary format
|
125
|
+
if isinstance(output_item, dict):
|
126
|
+
item_type = output_item.get("type")
|
127
|
+
# Handle object format (Pydantic models)
|
128
|
+
elif hasattr(output_item, "type"):
|
129
|
+
item_type = output_item.type
|
130
|
+
output_item_dict = model_as_dict(output_item)
|
131
|
+
if output_item_dict and isinstance(output_item_dict, dict):
|
132
|
+
output_item = output_item_dict
|
133
|
+
else:
|
134
|
+
continue
|
135
|
+
else:
|
136
|
+
continue
|
137
|
+
|
138
|
+
if item_type == "message":
|
139
|
+
# Extract message content
|
140
|
+
if isinstance(output_item, dict):
|
141
|
+
content = output_item.get("content", [])
|
142
|
+
if isinstance(content, list):
|
143
|
+
# Aggregate all text content
|
144
|
+
text_parts = []
|
145
|
+
for content_item in content:
|
146
|
+
if isinstance(content_item, dict) and content_item.get("type") == "text":
|
147
|
+
text = content_item.get("text", "")
|
148
|
+
if text:
|
149
|
+
text_parts.append(text)
|
150
|
+
if text_parts:
|
151
|
+
full_text = "".join(text_parts)
|
152
|
+
attributes[f"{SpanAttributes.LLM_COMPLETIONS}.{completion_idx}.content"] = full_text
|
153
|
+
attributes[f"{SpanAttributes.LLM_COMPLETIONS}.{completion_idx}.role"] = "assistant"
|
154
|
+
completion_idx += 1
|
155
|
+
elif isinstance(content, str):
|
156
|
+
# Simple string content
|
157
|
+
attributes[f"{SpanAttributes.LLM_COMPLETIONS}.{completion_idx}.content"] = content
|
158
|
+
attributes[f"{SpanAttributes.LLM_COMPLETIONS}.{completion_idx}.role"] = "assistant"
|
159
|
+
completion_idx += 1
|
160
|
+
|
161
|
+
elif item_type == "function_call" and isinstance(output_item, dict):
|
162
|
+
# Handle function calls
|
163
|
+
# The arguments contain the actual response content for function calls
|
164
|
+
args_str = output_item.get("arguments", "")
|
165
|
+
if args_str:
|
166
|
+
try:
|
167
|
+
args = json.loads(args_str)
|
168
|
+
# Extract reasoning if present (common in o3 models)
|
169
|
+
reasoning = args.get("reasoning", "")
|
170
|
+
if reasoning:
|
171
|
+
attributes[f"{SpanAttributes.LLM_COMPLETIONS}.{completion_idx}.content"] = reasoning
|
172
|
+
attributes[f"{SpanAttributes.LLM_COMPLETIONS}.{completion_idx}.role"] = "assistant"
|
173
|
+
completion_idx += 1
|
174
|
+
except json.JSONDecodeError:
|
175
|
+
pass
|
176
|
+
|
177
|
+
# Also store tool call details
|
178
|
+
attributes[f"{SpanAttributes.LLM_COMPLETIONS}.{i}.tool_calls.0.id"] = output_item.get("id", "")
|
179
|
+
attributes[f"{SpanAttributes.LLM_COMPLETIONS}.{i}.tool_calls.0.name"] = output_item.get("name", "")
|
180
|
+
attributes[f"{SpanAttributes.LLM_COMPLETIONS}.{i}.tool_calls.0.arguments"] = args_str
|
181
|
+
|
182
|
+
elif item_type == "reasoning" and isinstance(output_item, dict):
|
183
|
+
# Handle reasoning items (o3 models provide these)
|
184
|
+
summary = output_item.get("summary", "")
|
185
|
+
if summary:
|
186
|
+
attributes[f"{SpanAttributes.LLM_COMPLETIONS}.{completion_idx}.content"] = summary
|
187
|
+
attributes[f"{SpanAttributes.LLM_COMPLETIONS}.{completion_idx}.role"] = "assistant"
|
188
|
+
attributes[f"{SpanAttributes.LLM_COMPLETIONS}.{completion_idx}.type"] = "reasoning"
|
189
|
+
completion_idx += 1
|
190
|
+
|
191
|
+
return attributes
|
@@ -0,0 +1,81 @@
|
|
1
|
+
"""Shared utilities for OpenAI instrumentation wrappers.
|
2
|
+
|
3
|
+
This module contains common functions and utilities used across different
|
4
|
+
OpenAI API endpoint wrappers.
|
5
|
+
"""
|
6
|
+
|
7
|
+
import os
|
8
|
+
import types
|
9
|
+
import logging
|
10
|
+
from typing import Any, Dict, Optional
|
11
|
+
from importlib.metadata import version
|
12
|
+
|
13
|
+
import openai
|
14
|
+
from opentelemetry import context as context_api
|
15
|
+
|
16
|
+
from agentops.instrumentation.providers.openai.utils import is_openai_v1
|
17
|
+
|
18
|
+
logger = logging.getLogger(__name__)
|
19
|
+
|
20
|
+
# Pydantic version for model serialization
|
21
|
+
_PYDANTIC_VERSION = version("pydantic")
|
22
|
+
|
23
|
+
# Cache for tiktoken encodings
|
24
|
+
tiktoken_encodings = {}
|
25
|
+
|
26
|
+
|
27
|
+
def should_send_prompts() -> bool:
|
28
|
+
"""Check if prompt content should be sent in traces."""
|
29
|
+
return (os.getenv("TRACELOOP_TRACE_CONTENT") or "true").lower() == "true" or context_api.get_value(
|
30
|
+
"override_enable_content_tracing"
|
31
|
+
)
|
32
|
+
|
33
|
+
|
34
|
+
def is_streaming_response(response: Any) -> bool:
|
35
|
+
"""Check if a response is a streaming response."""
|
36
|
+
if is_openai_v1():
|
37
|
+
return isinstance(response, openai.Stream) or isinstance(response, openai.AsyncStream)
|
38
|
+
return isinstance(response, types.GeneratorType) or isinstance(response, types.AsyncGeneratorType)
|
39
|
+
|
40
|
+
|
41
|
+
def model_as_dict(model: Any) -> Dict[str, Any]:
|
42
|
+
"""Convert a model object to a dictionary."""
|
43
|
+
if model is None:
|
44
|
+
return {}
|
45
|
+
if isinstance(model, dict):
|
46
|
+
return model
|
47
|
+
if _PYDANTIC_VERSION < "2.0.0":
|
48
|
+
return model.dict()
|
49
|
+
if hasattr(model, "model_dump"):
|
50
|
+
return model.model_dump()
|
51
|
+
elif hasattr(model, "parse"): # Raw API response
|
52
|
+
return model_as_dict(model.parse())
|
53
|
+
else:
|
54
|
+
return model if isinstance(model, dict) else {}
|
55
|
+
|
56
|
+
|
57
|
+
def get_token_count_from_string(string: str, model_name: str) -> Optional[int]:
|
58
|
+
"""Get token count from a string using tiktoken."""
|
59
|
+
from agentops.instrumentation.providers.openai.utils import should_record_stream_token_usage
|
60
|
+
|
61
|
+
if not should_record_stream_token_usage():
|
62
|
+
return None
|
63
|
+
|
64
|
+
try:
|
65
|
+
import tiktoken
|
66
|
+
except ImportError:
|
67
|
+
return None
|
68
|
+
|
69
|
+
if tiktoken_encodings.get(model_name) is None:
|
70
|
+
try:
|
71
|
+
encoding = tiktoken.encoding_for_model(model_name)
|
72
|
+
except KeyError as ex:
|
73
|
+
logger.warning(f"Failed to get tiktoken encoding for model_name {model_name}, error: {str(ex)}")
|
74
|
+
return None
|
75
|
+
|
76
|
+
tiktoken_encodings[model_name] = encoding
|
77
|
+
else:
|
78
|
+
encoding = tiktoken_encodings.get(model_name)
|
79
|
+
|
80
|
+
token_count = len(encoding.encode(string))
|
81
|
+
return token_count
|
@@ -0,0 +1,10 @@
|
|
1
|
+
"""
|
2
|
+
Instrumentation for concurrent.futures module.
|
3
|
+
|
4
|
+
This module provides automatic instrumentation for ThreadPoolExecutor to ensure
|
5
|
+
proper OpenTelemetry context propagation across thread boundaries.
|
6
|
+
"""
|
7
|
+
|
8
|
+
from .instrumentation import ConcurrentFuturesInstrumentor
|
9
|
+
|
10
|
+
__all__ = ["ConcurrentFuturesInstrumentor"]
|
@@ -0,0 +1,206 @@
|
|
1
|
+
"""
|
2
|
+
OpenTelemetry Instrumentation for concurrent.futures module.
|
3
|
+
|
4
|
+
This instrumentation automatically patches ThreadPoolExecutor to ensure proper
|
5
|
+
context propagation across thread boundaries, preventing "NEW TRACE DETECTED" issues.
|
6
|
+
"""
|
7
|
+
|
8
|
+
import contextvars
|
9
|
+
import functools
|
10
|
+
from typing import Any, Callable, Collection, Optional, Tuple, TypeVar, List, Dict
|
11
|
+
|
12
|
+
from concurrent.futures import ThreadPoolExecutor, Future
|
13
|
+
|
14
|
+
from agentops.instrumentation.common import CommonInstrumentor, InstrumentorConfig
|
15
|
+
from agentops.instrumentation.common.wrappers import WrapConfig
|
16
|
+
from agentops.logging import logger
|
17
|
+
|
18
|
+
# Store original methods to restore during uninstrumentation
|
19
|
+
_original_init = None
|
20
|
+
_original_submit = None
|
21
|
+
|
22
|
+
# Type variables for better typing
|
23
|
+
T = TypeVar("T")
|
24
|
+
R = TypeVar("R")
|
25
|
+
|
26
|
+
|
27
|
+
def _context_propagating_init(original_init: Callable) -> Callable:
|
28
|
+
"""Wrap ThreadPoolExecutor.__init__ to set up context-aware initializer."""
|
29
|
+
|
30
|
+
@functools.wraps(original_init)
|
31
|
+
def wrapped_init(
|
32
|
+
self: ThreadPoolExecutor,
|
33
|
+
max_workers: Optional[int] = None,
|
34
|
+
thread_name_prefix: str = "",
|
35
|
+
initializer: Optional[Callable] = None,
|
36
|
+
initargs: Tuple = (),
|
37
|
+
) -> None:
|
38
|
+
# Capture the current context when the executor is created
|
39
|
+
main_context = contextvars.copy_context()
|
40
|
+
|
41
|
+
def context_aware_initializer() -> None:
|
42
|
+
"""Initializer that sets up the captured context in each worker thread."""
|
43
|
+
|
44
|
+
# Set the main context variables in this thread
|
45
|
+
for var, value in main_context.items():
|
46
|
+
try:
|
47
|
+
var.set(value)
|
48
|
+
except Exception as e:
|
49
|
+
logger.debug(f"[ConcurrentFuturesInstrumentor] Could not set context var {var}: {e}")
|
50
|
+
|
51
|
+
# Run user's initializer if provided
|
52
|
+
if initializer and callable(initializer):
|
53
|
+
try:
|
54
|
+
if initargs:
|
55
|
+
initializer(*initargs)
|
56
|
+
else:
|
57
|
+
initializer()
|
58
|
+
except Exception as e:
|
59
|
+
logger.error(f"[ConcurrentFuturesInstrumentor] Error in user initializer: {e}")
|
60
|
+
raise
|
61
|
+
|
62
|
+
# Create executor with context-aware initializer
|
63
|
+
prefix = f"AgentOps-{thread_name_prefix}" if thread_name_prefix else "AgentOps-Thread"
|
64
|
+
|
65
|
+
# Call original init with our context-aware initializer
|
66
|
+
original_init(
|
67
|
+
self,
|
68
|
+
max_workers=max_workers,
|
69
|
+
thread_name_prefix=prefix,
|
70
|
+
initializer=context_aware_initializer,
|
71
|
+
initargs=(), # We handle initargs in our wrapper
|
72
|
+
)
|
73
|
+
|
74
|
+
return wrapped_init
|
75
|
+
|
76
|
+
|
77
|
+
def _context_propagating_submit(original_submit: Callable) -> Callable:
|
78
|
+
"""Wrap ThreadPoolExecutor.submit to ensure context propagation."""
|
79
|
+
|
80
|
+
@functools.wraps(original_submit)
|
81
|
+
def wrapped_submit(self: ThreadPoolExecutor, func: Callable[..., R], *args: Any, **kwargs: Any) -> Future[R]:
|
82
|
+
# Log the submission
|
83
|
+
func_name = getattr(func, "__name__", str(func)) # noqa: F841
|
84
|
+
|
85
|
+
# The context propagation is handled by the initializer, so we can submit normally
|
86
|
+
# But we can add additional logging or monitoring here if needed
|
87
|
+
return original_submit(self, func, *args, **kwargs)
|
88
|
+
|
89
|
+
return wrapped_submit
|
90
|
+
|
91
|
+
|
92
|
+
class ConcurrentFuturesInstrumentor(CommonInstrumentor):
|
93
|
+
"""
|
94
|
+
Instrumentor for concurrent.futures module.
|
95
|
+
|
96
|
+
This instrumentor patches ThreadPoolExecutor to automatically propagate
|
97
|
+
OpenTelemetry context to worker threads, ensuring all LLM calls and other
|
98
|
+
instrumented operations maintain proper trace context.
|
99
|
+
"""
|
100
|
+
|
101
|
+
def __init__(self):
|
102
|
+
"""Initialize the concurrent.futures instrumentor."""
|
103
|
+
config = InstrumentorConfig(
|
104
|
+
library_name="agentops.instrumentation.concurrent_futures",
|
105
|
+
library_version="0.1.0",
|
106
|
+
wrapped_methods=[], # We handle wrapping manually
|
107
|
+
metrics_enabled=False, # No metrics needed for context propagation
|
108
|
+
dependencies=[],
|
109
|
+
)
|
110
|
+
super().__init__(config)
|
111
|
+
self._original_init = None
|
112
|
+
self._original_submit = None
|
113
|
+
|
114
|
+
def instrumentation_dependencies(self) -> Collection[str]:
|
115
|
+
"""Return a list of instrumentation dependencies."""
|
116
|
+
return []
|
117
|
+
|
118
|
+
def _get_wrapped_methods(self) -> List[WrapConfig]:
|
119
|
+
"""
|
120
|
+
Return list of methods to be wrapped.
|
121
|
+
|
122
|
+
For concurrent_futures, we don't use the standard wrapping mechanism
|
123
|
+
since we're patching methods directly for context propagation.
|
124
|
+
"""
|
125
|
+
return []
|
126
|
+
|
127
|
+
def _create_metrics(self, meter) -> Dict[str, Any]:
|
128
|
+
"""
|
129
|
+
Create metrics for this instrumentor.
|
130
|
+
|
131
|
+
This instrumentor doesn't need metrics as it's purely for context propagation.
|
132
|
+
|
133
|
+
Args:
|
134
|
+
meter: The meter instance (unused)
|
135
|
+
|
136
|
+
Returns:
|
137
|
+
Empty dict since no metrics are needed
|
138
|
+
"""
|
139
|
+
return {}
|
140
|
+
|
141
|
+
def _instrument(self, **kwargs: Any) -> None:
|
142
|
+
"""Instrument the concurrent.futures module."""
|
143
|
+
# Note: We don't call super()._instrument() here because we're not using
|
144
|
+
# the standard wrapping mechanism for this special instrumentor
|
145
|
+
|
146
|
+
logger.debug("[ConcurrentFuturesInstrumentor] Starting instrumentation")
|
147
|
+
|
148
|
+
# Store original methods
|
149
|
+
self._original_init = ThreadPoolExecutor.__init__
|
150
|
+
self._original_submit = ThreadPoolExecutor.submit
|
151
|
+
|
152
|
+
# Patch ThreadPoolExecutor methods
|
153
|
+
ThreadPoolExecutor.__init__ = _context_propagating_init(self._original_init)
|
154
|
+
ThreadPoolExecutor.submit = _context_propagating_submit(self._original_submit)
|
155
|
+
|
156
|
+
logger.info("[ConcurrentFuturesInstrumentor] Successfully instrumented concurrent.futures.ThreadPoolExecutor")
|
157
|
+
|
158
|
+
def _uninstrument(self, **kwargs: Any) -> None:
|
159
|
+
"""Uninstrument the concurrent.futures module."""
|
160
|
+
# Note: We don't call super()._uninstrument() here because we're not using
|
161
|
+
# the standard wrapping mechanism for this special instrumentor
|
162
|
+
|
163
|
+
logger.debug("[ConcurrentFuturesInstrumentor] Starting uninstrumentation")
|
164
|
+
|
165
|
+
# Restore original methods
|
166
|
+
if self._original_init:
|
167
|
+
ThreadPoolExecutor.__init__ = self._original_init
|
168
|
+
self._original_init = None
|
169
|
+
|
170
|
+
if self._original_submit:
|
171
|
+
ThreadPoolExecutor.submit = self._original_submit
|
172
|
+
self._original_submit = None
|
173
|
+
|
174
|
+
logger.info("[ConcurrentFuturesInstrumentor] Successfully uninstrumented concurrent.futures.ThreadPoolExecutor")
|
175
|
+
|
176
|
+
@staticmethod
|
177
|
+
def instrument_module_directly() -> bool:
|
178
|
+
"""
|
179
|
+
Directly instrument the module without using the standard instrumentor interface.
|
180
|
+
|
181
|
+
This can be called manually if automatic instrumentation is not desired.
|
182
|
+
|
183
|
+
Returns:
|
184
|
+
bool: True if instrumentation was applied, False if already instrumented
|
185
|
+
"""
|
186
|
+
instrumentor = ConcurrentFuturesInstrumentor()
|
187
|
+
if not instrumentor.is_instrumented_by_opentelemetry:
|
188
|
+
instrumentor.instrument()
|
189
|
+
return True
|
190
|
+
return False
|
191
|
+
|
192
|
+
@staticmethod
|
193
|
+
def uninstrument_module_directly() -> bool:
|
194
|
+
"""
|
195
|
+
Directly uninstrument the module.
|
196
|
+
|
197
|
+
This can be called manually to remove instrumentation.
|
198
|
+
|
199
|
+
Returns:
|
200
|
+
bool: True if uninstrumentation was applied, False if already uninstrumented
|
201
|
+
"""
|
202
|
+
instrumentor = ConcurrentFuturesInstrumentor()
|
203
|
+
if instrumentor.is_instrumented_by_opentelemetry:
|
204
|
+
instrumentor.uninstrument()
|
205
|
+
return True
|
206
|
+
return False
|
@@ -0,0 +1,11 @@
|
|
1
|
+
"""
|
2
|
+
DSPy integration for AgentOps.
|
3
|
+
|
4
|
+
This module provides the AgentOps DSPy integration, including callbacks and utilities.
|
5
|
+
"""
|
6
|
+
|
7
|
+
from agentops.integration.callbacks.dspy.callback import DSPyCallbackHandler
|
8
|
+
|
9
|
+
__all__ = [
|
10
|
+
"DSPyCallbackHandler",
|
11
|
+
]
|