mseep-agentops 0.4.18__py3-none-any.whl → 0.4.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentops/__init__.py +0 -0
- agentops/client/api/base.py +28 -30
- agentops/client/api/versions/v3.py +29 -25
- agentops/client/api/versions/v4.py +87 -46
- agentops/client/client.py +98 -29
- agentops/client/http/README.md +87 -0
- agentops/client/http/http_client.py +126 -172
- agentops/config.py +8 -2
- agentops/instrumentation/OpenTelemetry.md +133 -0
- agentops/instrumentation/README.md +167 -0
- agentops/instrumentation/__init__.py +13 -1
- agentops/instrumentation/agentic/ag2/__init__.py +18 -0
- agentops/instrumentation/agentic/ag2/instrumentor.py +922 -0
- agentops/instrumentation/agentic/agno/__init__.py +19 -0
- agentops/instrumentation/agentic/agno/attributes/__init__.py +20 -0
- agentops/instrumentation/agentic/agno/attributes/agent.py +250 -0
- agentops/instrumentation/agentic/agno/attributes/metrics.py +214 -0
- agentops/instrumentation/agentic/agno/attributes/storage.py +158 -0
- agentops/instrumentation/agentic/agno/attributes/team.py +195 -0
- agentops/instrumentation/agentic/agno/attributes/tool.py +210 -0
- agentops/instrumentation/agentic/agno/attributes/workflow.py +254 -0
- agentops/instrumentation/agentic/agno/instrumentor.py +1313 -0
- agentops/instrumentation/agentic/crewai/LICENSE +201 -0
- agentops/instrumentation/agentic/crewai/NOTICE.md +10 -0
- agentops/instrumentation/agentic/crewai/__init__.py +6 -0
- agentops/instrumentation/agentic/crewai/crewai_span_attributes.py +335 -0
- agentops/instrumentation/agentic/crewai/instrumentation.py +535 -0
- agentops/instrumentation/agentic/crewai/version.py +1 -0
- agentops/instrumentation/agentic/google_adk/__init__.py +19 -0
- agentops/instrumentation/agentic/google_adk/instrumentor.py +68 -0
- agentops/instrumentation/agentic/google_adk/patch.py +767 -0
- agentops/instrumentation/agentic/haystack/__init__.py +1 -0
- agentops/instrumentation/agentic/haystack/instrumentor.py +186 -0
- agentops/instrumentation/agentic/langgraph/__init__.py +3 -0
- agentops/instrumentation/agentic/langgraph/attributes.py +54 -0
- agentops/instrumentation/agentic/langgraph/instrumentation.py +598 -0
- agentops/instrumentation/agentic/langgraph/version.py +1 -0
- agentops/instrumentation/agentic/openai_agents/README.md +156 -0
- agentops/instrumentation/agentic/openai_agents/SPANS.md +145 -0
- agentops/instrumentation/agentic/openai_agents/TRACING_API.md +144 -0
- agentops/instrumentation/agentic/openai_agents/__init__.py +30 -0
- agentops/instrumentation/agentic/openai_agents/attributes/common.py +549 -0
- agentops/instrumentation/agentic/openai_agents/attributes/completion.py +172 -0
- agentops/instrumentation/agentic/openai_agents/attributes/model.py +58 -0
- agentops/instrumentation/agentic/openai_agents/attributes/tokens.py +275 -0
- agentops/instrumentation/agentic/openai_agents/exporter.py +469 -0
- agentops/instrumentation/agentic/openai_agents/instrumentor.py +107 -0
- agentops/instrumentation/agentic/openai_agents/processor.py +58 -0
- agentops/instrumentation/agentic/smolagents/README.md +88 -0
- agentops/instrumentation/agentic/smolagents/__init__.py +12 -0
- agentops/instrumentation/agentic/smolagents/attributes/agent.py +354 -0
- agentops/instrumentation/agentic/smolagents/attributes/model.py +205 -0
- agentops/instrumentation/agentic/smolagents/instrumentor.py +286 -0
- agentops/instrumentation/agentic/smolagents/stream_wrapper.py +258 -0
- agentops/instrumentation/agentic/xpander/__init__.py +15 -0
- agentops/instrumentation/agentic/xpander/context.py +112 -0
- agentops/instrumentation/agentic/xpander/instrumentor.py +877 -0
- agentops/instrumentation/agentic/xpander/trace_probe.py +86 -0
- agentops/instrumentation/agentic/xpander/version.py +3 -0
- agentops/instrumentation/common/README.md +65 -0
- agentops/instrumentation/common/attributes.py +1 -2
- agentops/instrumentation/providers/anthropic/__init__.py +24 -0
- agentops/instrumentation/providers/anthropic/attributes/__init__.py +23 -0
- agentops/instrumentation/providers/anthropic/attributes/common.py +64 -0
- agentops/instrumentation/providers/anthropic/attributes/message.py +541 -0
- agentops/instrumentation/providers/anthropic/attributes/tools.py +231 -0
- agentops/instrumentation/providers/anthropic/event_handler_wrapper.py +90 -0
- agentops/instrumentation/providers/anthropic/instrumentor.py +146 -0
- agentops/instrumentation/providers/anthropic/stream_wrapper.py +436 -0
- agentops/instrumentation/providers/google_genai/README.md +33 -0
- agentops/instrumentation/providers/google_genai/__init__.py +24 -0
- agentops/instrumentation/providers/google_genai/attributes/__init__.py +25 -0
- agentops/instrumentation/providers/google_genai/attributes/chat.py +125 -0
- agentops/instrumentation/providers/google_genai/attributes/common.py +88 -0
- agentops/instrumentation/providers/google_genai/attributes/model.py +284 -0
- agentops/instrumentation/providers/google_genai/instrumentor.py +170 -0
- agentops/instrumentation/providers/google_genai/stream_wrapper.py +238 -0
- agentops/instrumentation/providers/ibm_watsonx_ai/__init__.py +28 -0
- agentops/instrumentation/providers/ibm_watsonx_ai/attributes/__init__.py +27 -0
- agentops/instrumentation/providers/ibm_watsonx_ai/attributes/attributes.py +277 -0
- agentops/instrumentation/providers/ibm_watsonx_ai/attributes/common.py +104 -0
- agentops/instrumentation/providers/ibm_watsonx_ai/instrumentor.py +162 -0
- agentops/instrumentation/providers/ibm_watsonx_ai/stream_wrapper.py +302 -0
- agentops/instrumentation/providers/mem0/__init__.py +45 -0
- agentops/instrumentation/providers/mem0/common.py +377 -0
- agentops/instrumentation/providers/mem0/instrumentor.py +270 -0
- agentops/instrumentation/providers/mem0/memory.py +430 -0
- agentops/instrumentation/providers/openai/__init__.py +21 -0
- agentops/instrumentation/providers/openai/attributes/__init__.py +7 -0
- agentops/instrumentation/providers/openai/attributes/common.py +55 -0
- agentops/instrumentation/providers/openai/attributes/response.py +607 -0
- agentops/instrumentation/providers/openai/config.py +36 -0
- agentops/instrumentation/providers/openai/instrumentor.py +312 -0
- agentops/instrumentation/providers/openai/stream_wrapper.py +941 -0
- agentops/instrumentation/providers/openai/utils.py +44 -0
- agentops/instrumentation/providers/openai/v0.py +176 -0
- agentops/instrumentation/providers/openai/v0_wrappers.py +483 -0
- agentops/instrumentation/providers/openai/wrappers/__init__.py +30 -0
- agentops/instrumentation/providers/openai/wrappers/assistant.py +277 -0
- agentops/instrumentation/providers/openai/wrappers/chat.py +259 -0
- agentops/instrumentation/providers/openai/wrappers/completion.py +109 -0
- agentops/instrumentation/providers/openai/wrappers/embeddings.py +94 -0
- agentops/instrumentation/providers/openai/wrappers/image_gen.py +75 -0
- agentops/instrumentation/providers/openai/wrappers/responses.py +191 -0
- agentops/instrumentation/providers/openai/wrappers/shared.py +81 -0
- agentops/instrumentation/utilities/concurrent_futures/__init__.py +10 -0
- agentops/instrumentation/utilities/concurrent_futures/instrumentation.py +206 -0
- agentops/integration/callbacks/dspy/__init__.py +11 -0
- agentops/integration/callbacks/dspy/callback.py +471 -0
- agentops/integration/callbacks/langchain/README.md +59 -0
- agentops/integration/callbacks/langchain/__init__.py +15 -0
- agentops/integration/callbacks/langchain/callback.py +791 -0
- agentops/integration/callbacks/langchain/utils.py +54 -0
- agentops/legacy/crewai.md +121 -0
- agentops/logging/instrument_logging.py +4 -0
- agentops/sdk/README.md +220 -0
- agentops/sdk/core.py +75 -32
- agentops/sdk/descriptors/classproperty.py +28 -0
- agentops/sdk/exporters.py +152 -33
- agentops/semconv/README.md +125 -0
- agentops/semconv/span_kinds.py +0 -2
- agentops/validation.py +102 -63
- {mseep_agentops-0.4.18.dist-info → mseep_agentops-0.4.22.dist-info}/METADATA +30 -40
- mseep_agentops-0.4.22.dist-info/RECORD +178 -0
- {mseep_agentops-0.4.18.dist-info → mseep_agentops-0.4.22.dist-info}/WHEEL +1 -2
- mseep_agentops-0.4.18.dist-info/RECORD +0 -94
- mseep_agentops-0.4.18.dist-info/top_level.txt +0 -2
- tests/conftest.py +0 -10
- tests/unit/client/__init__.py +0 -1
- tests/unit/client/test_http_adapter.py +0 -221
- tests/unit/client/test_http_client.py +0 -206
- tests/unit/conftest.py +0 -54
- tests/unit/sdk/__init__.py +0 -1
- tests/unit/sdk/instrumentation_tester.py +0 -207
- tests/unit/sdk/test_attributes.py +0 -392
- tests/unit/sdk/test_concurrent_instrumentation.py +0 -468
- tests/unit/sdk/test_decorators.py +0 -763
- tests/unit/sdk/test_exporters.py +0 -241
- tests/unit/sdk/test_factory.py +0 -1188
- tests/unit/sdk/test_internal_span_processor.py +0 -397
- tests/unit/sdk/test_resource_attributes.py +0 -35
- tests/unit/test_config.py +0 -82
- tests/unit/test_context_manager.py +0 -777
- tests/unit/test_events.py +0 -27
- tests/unit/test_host_env.py +0 -54
- tests/unit/test_init_py.py +0 -501
- tests/unit/test_serialization.py +0 -433
- tests/unit/test_session.py +0 -676
- tests/unit/test_user_agent.py +0 -34
- tests/unit/test_validation.py +0 -405
- {tests → agentops/instrumentation/agentic/openai_agents/attributes}/__init__.py +0 -0
- /tests/unit/__init__.py → /agentops/instrumentation/providers/openai/attributes/tools.py +0 -0
- {mseep_agentops-0.4.18.dist-info → mseep_agentops-0.4.22.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,88 @@
|
|
1
|
+
"""Common attribute extraction for Google Generative AI instrumentation."""
|
2
|
+
|
3
|
+
from typing import Dict, Any
|
4
|
+
|
5
|
+
from agentops.logging import logger
|
6
|
+
from agentops.semconv import InstrumentationAttributes, SpanAttributes
|
7
|
+
from agentops.instrumentation.common.attributes import (
|
8
|
+
AttributeMap,
|
9
|
+
get_common_attributes,
|
10
|
+
_extract_attributes_from_mapping,
|
11
|
+
)
|
12
|
+
from agentops.instrumentation.providers.google_genai import LIBRARY_NAME, LIBRARY_VERSION
|
13
|
+
|
14
|
+
# Common mapping for config parameters
|
15
|
+
REQUEST_CONFIG_ATTRIBUTES: AttributeMap = {
|
16
|
+
SpanAttributes.LLM_REQUEST_TEMPERATURE: "temperature",
|
17
|
+
SpanAttributes.LLM_REQUEST_MAX_TOKENS: "max_output_tokens",
|
18
|
+
SpanAttributes.LLM_REQUEST_TOP_P: "top_p",
|
19
|
+
SpanAttributes.LLM_REQUEST_TOP_K: "top_k",
|
20
|
+
SpanAttributes.LLM_REQUEST_SEED: "seed",
|
21
|
+
SpanAttributes.LLM_REQUEST_SYSTEM_INSTRUCTION: "system_instruction",
|
22
|
+
SpanAttributes.LLM_REQUEST_PRESENCE_PENALTY: "presence_penalty",
|
23
|
+
SpanAttributes.LLM_REQUEST_FREQUENCY_PENALTY: "frequency_penalty",
|
24
|
+
SpanAttributes.LLM_REQUEST_STOP_SEQUENCES: "stop_sequences",
|
25
|
+
SpanAttributes.LLM_REQUEST_CANDIDATE_COUNT: "candidate_count",
|
26
|
+
}
|
27
|
+
|
28
|
+
|
29
|
+
def get_common_instrumentation_attributes() -> AttributeMap:
|
30
|
+
"""Get common instrumentation attributes for the Google Generative AI instrumentation.
|
31
|
+
|
32
|
+
This combines the generic AgentOps attributes with Google Generative AI specific library attributes.
|
33
|
+
|
34
|
+
Returns:
|
35
|
+
Dictionary of common instrumentation attributes
|
36
|
+
"""
|
37
|
+
attributes = get_common_attributes()
|
38
|
+
attributes.update(
|
39
|
+
{
|
40
|
+
InstrumentationAttributes.LIBRARY_NAME: LIBRARY_NAME,
|
41
|
+
InstrumentationAttributes.LIBRARY_VERSION: LIBRARY_VERSION,
|
42
|
+
}
|
43
|
+
)
|
44
|
+
return attributes
|
45
|
+
|
46
|
+
|
47
|
+
def extract_request_attributes(kwargs: Dict[str, Any]) -> AttributeMap:
|
48
|
+
"""Extract request attributes from the function arguments.
|
49
|
+
|
50
|
+
Extracts common request parameters that apply to both content generation
|
51
|
+
and chat completions, focusing on model parameters and generation settings.
|
52
|
+
|
53
|
+
Args:
|
54
|
+
kwargs: Request keyword arguments
|
55
|
+
|
56
|
+
Returns:
|
57
|
+
Dictionary of extracted request attributes
|
58
|
+
"""
|
59
|
+
attributes = {}
|
60
|
+
|
61
|
+
if "model" in kwargs:
|
62
|
+
model = kwargs["model"]
|
63
|
+
|
64
|
+
# Handle string model names
|
65
|
+
if isinstance(model, str):
|
66
|
+
attributes[SpanAttributes.LLM_REQUEST_MODEL] = model
|
67
|
+
# Handle model objects with _model_name or name attribute
|
68
|
+
elif hasattr(model, "_model_name"):
|
69
|
+
attributes[SpanAttributes.LLM_REQUEST_MODEL] = model._model_name
|
70
|
+
elif hasattr(model, "name"):
|
71
|
+
attributes[SpanAttributes.LLM_REQUEST_MODEL] = model.name
|
72
|
+
|
73
|
+
config = kwargs.get("config")
|
74
|
+
|
75
|
+
if config:
|
76
|
+
try:
|
77
|
+
attributes.update(
|
78
|
+
_extract_attributes_from_mapping(
|
79
|
+
config.__dict__ if hasattr(config, "__dict__") else config, REQUEST_CONFIG_ATTRIBUTES
|
80
|
+
)
|
81
|
+
)
|
82
|
+
except Exception as e:
|
83
|
+
logger.debug(f"Error extracting config parameters: {e}")
|
84
|
+
|
85
|
+
if "stream" in kwargs:
|
86
|
+
attributes[SpanAttributes.LLM_REQUEST_STREAMING] = kwargs["stream"]
|
87
|
+
|
88
|
+
return attributes
|
@@ -0,0 +1,284 @@
|
|
1
|
+
"""Model attribute extraction for Google Generative AI instrumentation."""
|
2
|
+
|
3
|
+
from typing import Dict, Any, Optional, Tuple
|
4
|
+
|
5
|
+
from agentops.logging import logger
|
6
|
+
from agentops.semconv import SpanAttributes, LLMRequestTypeValues, MessageAttributes
|
7
|
+
from agentops.instrumentation.common.attributes import AttributeMap
|
8
|
+
from agentops.instrumentation.providers.google_genai.attributes.common import (
|
9
|
+
extract_request_attributes,
|
10
|
+
get_common_instrumentation_attributes,
|
11
|
+
)
|
12
|
+
|
13
|
+
|
14
|
+
def _extract_content_from_prompt(content: Any) -> str:
|
15
|
+
"""Extract prompt text from content.
|
16
|
+
|
17
|
+
Handles the various content formats that Google's Generative AI SDK accepts,
|
18
|
+
including strings, ContentDict, lists of parts, etc.
|
19
|
+
|
20
|
+
Args:
|
21
|
+
content: The content object to extract text from
|
22
|
+
|
23
|
+
Returns:
|
24
|
+
Extracted text as a string
|
25
|
+
"""
|
26
|
+
# Direct string case
|
27
|
+
if isinstance(content, str):
|
28
|
+
return content
|
29
|
+
|
30
|
+
# Lists of parts/content
|
31
|
+
if isinstance(content, list):
|
32
|
+
text = ""
|
33
|
+
for item in content:
|
34
|
+
if isinstance(item, str):
|
35
|
+
text += item + "\n"
|
36
|
+
elif isinstance(item, dict) and "text" in item:
|
37
|
+
if item.get("text") is not None:
|
38
|
+
text += str(item["text"]) + "\n"
|
39
|
+
elif hasattr(item, "text"):
|
40
|
+
part_text = getattr(item, "text", None)
|
41
|
+
if part_text:
|
42
|
+
text += part_text + "\n"
|
43
|
+
# Handle content as a list with mixed types
|
44
|
+
elif hasattr(item, "parts"):
|
45
|
+
parts = item.parts
|
46
|
+
for part in parts:
|
47
|
+
if isinstance(part, str):
|
48
|
+
text += part + "\n"
|
49
|
+
elif hasattr(part, "text"):
|
50
|
+
part_text = getattr(part, "text", None)
|
51
|
+
if part_text:
|
52
|
+
text += part_text + "\n"
|
53
|
+
return text
|
54
|
+
|
55
|
+
# Dict with text key
|
56
|
+
if isinstance(content, dict) and "text" in content:
|
57
|
+
return content["text"]
|
58
|
+
|
59
|
+
# Content object with text attribute
|
60
|
+
if hasattr(content, "text"):
|
61
|
+
return content.text
|
62
|
+
|
63
|
+
# Content object with parts attribute
|
64
|
+
if hasattr(content, "parts"):
|
65
|
+
text = ""
|
66
|
+
for part in content.parts:
|
67
|
+
if isinstance(part, str):
|
68
|
+
text += part + "\n"
|
69
|
+
elif hasattr(part, "text"):
|
70
|
+
part_text = getattr(part, "text", None)
|
71
|
+
if part_text:
|
72
|
+
text += part_text + "\n"
|
73
|
+
return text
|
74
|
+
|
75
|
+
# Other object types - try to convert to string
|
76
|
+
try:
|
77
|
+
return str(content)
|
78
|
+
except Exception:
|
79
|
+
return ""
|
80
|
+
|
81
|
+
|
82
|
+
def _set_prompt_attributes(attributes: AttributeMap, args: Tuple, kwargs: Dict[str, Any]) -> None:
|
83
|
+
"""Extract and set prompt attributes from the request.
|
84
|
+
|
85
|
+
Respects privacy controls and handles the various ways prompts can be specified
|
86
|
+
in the Google Generative AI API.
|
87
|
+
|
88
|
+
Args:
|
89
|
+
attributes: The attribute dictionary to update
|
90
|
+
args: Positional arguments to the method
|
91
|
+
kwargs: Keyword arguments to the method
|
92
|
+
"""
|
93
|
+
|
94
|
+
content = None
|
95
|
+
if args and len(args) > 0:
|
96
|
+
content = args[0]
|
97
|
+
elif "contents" in kwargs:
|
98
|
+
content = kwargs["contents"]
|
99
|
+
elif "content" in kwargs:
|
100
|
+
content = kwargs["content"]
|
101
|
+
|
102
|
+
if content is None:
|
103
|
+
return
|
104
|
+
|
105
|
+
if isinstance(content, list):
|
106
|
+
for i, item in enumerate(content):
|
107
|
+
try:
|
108
|
+
extracted_text = _extract_content_from_prompt(item)
|
109
|
+
if extracted_text:
|
110
|
+
attributes[MessageAttributes.PROMPT_CONTENT.format(i=i)] = extracted_text
|
111
|
+
role = "user"
|
112
|
+
if isinstance(item, dict) and "role" in item:
|
113
|
+
role = item["role"]
|
114
|
+
elif hasattr(item, "role"):
|
115
|
+
role = item.role
|
116
|
+
attributes[MessageAttributes.PROMPT_ROLE.format(i=i)] = role
|
117
|
+
except Exception as e:
|
118
|
+
logger.debug(f"Error extracting prompt content at index {i}: {e}")
|
119
|
+
else:
|
120
|
+
try:
|
121
|
+
extracted_text = _extract_content_from_prompt(content)
|
122
|
+
if extracted_text:
|
123
|
+
attributes[MessageAttributes.PROMPT_CONTENT.format(i=0)] = extracted_text
|
124
|
+
attributes[MessageAttributes.PROMPT_ROLE.format(i=0)] = "user"
|
125
|
+
except Exception as e:
|
126
|
+
logger.debug(f"Error extracting prompt content: {e}")
|
127
|
+
|
128
|
+
|
129
|
+
def _set_response_attributes(attributes: AttributeMap, response: Any) -> None:
|
130
|
+
"""Extract and set response attributes from the completion response.
|
131
|
+
|
132
|
+
Args:
|
133
|
+
attributes: The attribute dictionary to update
|
134
|
+
response: The response from the API
|
135
|
+
"""
|
136
|
+
if response is None:
|
137
|
+
return
|
138
|
+
|
139
|
+
if hasattr(response, "model"):
|
140
|
+
attributes[SpanAttributes.LLM_RESPONSE_MODEL] = response.model
|
141
|
+
|
142
|
+
if hasattr(response, "usage_metadata"):
|
143
|
+
usage = response.usage_metadata
|
144
|
+
if hasattr(usage, "prompt_token_count"):
|
145
|
+
attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = usage.prompt_token_count
|
146
|
+
if hasattr(usage, "candidates_token_count"):
|
147
|
+
attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] = usage.candidates_token_count
|
148
|
+
if hasattr(usage, "total_token_count"):
|
149
|
+
attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = usage.total_token_count
|
150
|
+
|
151
|
+
try:
|
152
|
+
if hasattr(response, "text"):
|
153
|
+
attributes[MessageAttributes.COMPLETION_CONTENT.format(i=0)] = response.text
|
154
|
+
attributes[MessageAttributes.COMPLETION_ROLE.format(i=0)] = "assistant"
|
155
|
+
elif hasattr(response, "candidates"):
|
156
|
+
# List of candidates
|
157
|
+
for i, candidate in enumerate(response.candidates):
|
158
|
+
if hasattr(candidate, "content") and hasattr(candidate.content, "parts"):
|
159
|
+
parts = candidate.content.parts
|
160
|
+
text = ""
|
161
|
+
for part in parts:
|
162
|
+
if isinstance(part, str):
|
163
|
+
text += part
|
164
|
+
elif hasattr(part, "text"):
|
165
|
+
part_text = getattr(part, "text", None)
|
166
|
+
if part_text:
|
167
|
+
text += part_text
|
168
|
+
|
169
|
+
attributes[MessageAttributes.COMPLETION_CONTENT.format(i=i)] = text
|
170
|
+
attributes[MessageAttributes.COMPLETION_ROLE.format(i=i)] = "assistant"
|
171
|
+
|
172
|
+
if hasattr(candidate, "finish_reason"):
|
173
|
+
attributes[MessageAttributes.COMPLETION_FINISH_REASON.format(i=i)] = candidate.finish_reason
|
174
|
+
except Exception as e:
|
175
|
+
logger.debug(f"Error extracting completion content: {e}")
|
176
|
+
|
177
|
+
|
178
|
+
def get_model_attributes(
|
179
|
+
args: Optional[Tuple] = None,
|
180
|
+
kwargs: Optional[Dict[str, Any]] = None,
|
181
|
+
return_value: Optional[Any] = None,
|
182
|
+
) -> AttributeMap:
|
183
|
+
"""Extract attributes for GenerativeModel methods.
|
184
|
+
|
185
|
+
This function handles attribute extraction for the general model operations,
|
186
|
+
focusing on the common parameters and pattern shared by multiple methods.
|
187
|
+
|
188
|
+
Args:
|
189
|
+
args: Positional arguments to the method
|
190
|
+
kwargs: Keyword arguments to the method
|
191
|
+
return_value: Return value from the method
|
192
|
+
|
193
|
+
Returns:
|
194
|
+
Dictionary of extracted attributes
|
195
|
+
"""
|
196
|
+
attributes = get_common_instrumentation_attributes()
|
197
|
+
attributes[SpanAttributes.LLM_SYSTEM] = "Gemini"
|
198
|
+
attributes[SpanAttributes.LLM_REQUEST_TYPE] = LLMRequestTypeValues.CHAT.value
|
199
|
+
|
200
|
+
if kwargs:
|
201
|
+
kwargs_attributes = extract_request_attributes(kwargs)
|
202
|
+
attributes.update(kwargs_attributes)
|
203
|
+
|
204
|
+
if args or kwargs:
|
205
|
+
_set_prompt_attributes(attributes, args or (), kwargs or {})
|
206
|
+
|
207
|
+
if return_value is not None:
|
208
|
+
_set_response_attributes(attributes, return_value)
|
209
|
+
|
210
|
+
return attributes
|
211
|
+
|
212
|
+
|
213
|
+
def get_generate_content_attributes(
|
214
|
+
args: Optional[Tuple] = None,
|
215
|
+
kwargs: Optional[Dict[str, Any]] = None,
|
216
|
+
return_value: Optional[Any] = None,
|
217
|
+
) -> AttributeMap:
|
218
|
+
"""Extract attributes for the generate_content method.
|
219
|
+
|
220
|
+
This specialized extractor handles the generate_content method,
|
221
|
+
which is the primary way to interact with Gemini models.
|
222
|
+
|
223
|
+
Args:
|
224
|
+
args: Positional arguments to the method
|
225
|
+
kwargs: Keyword arguments to the method
|
226
|
+
return_value: Return value from the method
|
227
|
+
|
228
|
+
Returns:
|
229
|
+
Dictionary of extracted attributes
|
230
|
+
"""
|
231
|
+
return get_model_attributes(args, kwargs, return_value)
|
232
|
+
|
233
|
+
|
234
|
+
def get_token_counting_attributes(
|
235
|
+
args: Optional[Tuple] = None,
|
236
|
+
kwargs: Optional[Dict[str, Any]] = None,
|
237
|
+
return_value: Optional[Any] = None,
|
238
|
+
) -> AttributeMap:
|
239
|
+
"""Extract attributes for token counting operations.
|
240
|
+
|
241
|
+
This specialized extractor handles token counting operations.
|
242
|
+
|
243
|
+
Args:
|
244
|
+
args: Positional arguments to the method
|
245
|
+
kwargs: Keyword arguments to the method
|
246
|
+
return_value: Return value from the method
|
247
|
+
|
248
|
+
Returns:
|
249
|
+
Dictionary of extracted attributes
|
250
|
+
"""
|
251
|
+
attributes = get_common_instrumentation_attributes()
|
252
|
+
attributes[SpanAttributes.LLM_SYSTEM] = "Gemini"
|
253
|
+
attributes[SpanAttributes.LLM_REQUEST_TYPE] = "token_count"
|
254
|
+
|
255
|
+
# Process kwargs if available
|
256
|
+
if kwargs:
|
257
|
+
kwargs_attributes = extract_request_attributes(kwargs)
|
258
|
+
attributes.update(kwargs_attributes)
|
259
|
+
|
260
|
+
# Set token count from response
|
261
|
+
if return_value is not None:
|
262
|
+
if hasattr(return_value, "total_tokens"):
|
263
|
+
attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = return_value.total_tokens
|
264
|
+
elif hasattr(return_value, "total_token_count"):
|
265
|
+
attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = return_value.total_token_count
|
266
|
+
|
267
|
+
return attributes
|
268
|
+
|
269
|
+
|
270
|
+
def get_stream_attributes(stream: Any) -> AttributeMap:
|
271
|
+
"""Extract attributes from a stream object.
|
272
|
+
|
273
|
+
Args:
|
274
|
+
stream: The stream object to extract attributes from
|
275
|
+
|
276
|
+
Returns:
|
277
|
+
Dictionary of extracted attributes
|
278
|
+
"""
|
279
|
+
attributes = {}
|
280
|
+
|
281
|
+
if hasattr(stream, "model"):
|
282
|
+
attributes[SpanAttributes.LLM_RESPONSE_MODEL] = stream.model
|
283
|
+
|
284
|
+
return attributes
|
@@ -0,0 +1,170 @@
|
|
1
|
+
"""Google Generative AI Instrumentation for AgentOps
|
2
|
+
|
3
|
+
This module provides instrumentation for the Google Generative AI API, implementing OpenTelemetry
|
4
|
+
instrumentation for Gemini model requests and responses.
|
5
|
+
|
6
|
+
We focus on instrumenting the following key endpoints:
|
7
|
+
- ChatSession.send_message - Chat message API
|
8
|
+
- Streaming responses - Special handling for streaming responses
|
9
|
+
"""
|
10
|
+
|
11
|
+
from typing import List, Dict, Any
|
12
|
+
from wrapt import wrap_function_wrapper
|
13
|
+
from opentelemetry.metrics import Meter
|
14
|
+
|
15
|
+
from agentops.logging import logger
|
16
|
+
from agentops.instrumentation.common import CommonInstrumentor, StandardMetrics, InstrumentorConfig
|
17
|
+
from agentops.instrumentation.common.wrappers import WrapConfig
|
18
|
+
from agentops.instrumentation.providers.google_genai.attributes.model import (
|
19
|
+
get_generate_content_attributes,
|
20
|
+
get_token_counting_attributes,
|
21
|
+
)
|
22
|
+
from agentops.instrumentation.providers.google_genai.stream_wrapper import (
|
23
|
+
generate_content_stream_wrapper,
|
24
|
+
generate_content_stream_async_wrapper,
|
25
|
+
)
|
26
|
+
|
27
|
+
# Library info for tracer/meter
|
28
|
+
LIBRARY_NAME = "agentops.instrumentation.google_genai"
|
29
|
+
LIBRARY_VERSION = "0.1.0"
|
30
|
+
|
31
|
+
# Methods to wrap for instrumentation
|
32
|
+
WRAPPED_METHODS: List[WrapConfig] = [
|
33
|
+
# Client-based API methods
|
34
|
+
WrapConfig(
|
35
|
+
trace_name="gemini.generate_content",
|
36
|
+
package="google.genai.models",
|
37
|
+
class_name="Models",
|
38
|
+
method_name="generate_content",
|
39
|
+
handler=get_generate_content_attributes,
|
40
|
+
),
|
41
|
+
WrapConfig(
|
42
|
+
trace_name="gemini.count_tokens",
|
43
|
+
package="google.genai.models",
|
44
|
+
class_name="Models",
|
45
|
+
method_name="count_tokens",
|
46
|
+
handler=get_token_counting_attributes,
|
47
|
+
),
|
48
|
+
WrapConfig(
|
49
|
+
trace_name="gemini.compute_tokens",
|
50
|
+
package="google.genai.models",
|
51
|
+
class_name="Models",
|
52
|
+
method_name="compute_tokens",
|
53
|
+
handler=get_token_counting_attributes,
|
54
|
+
),
|
55
|
+
# Async client-based API methods
|
56
|
+
WrapConfig(
|
57
|
+
trace_name="gemini.generate_content",
|
58
|
+
package="google.genai.models",
|
59
|
+
class_name="AsyncModels",
|
60
|
+
method_name="generate_content",
|
61
|
+
handler=get_generate_content_attributes,
|
62
|
+
is_async=True,
|
63
|
+
),
|
64
|
+
WrapConfig(
|
65
|
+
trace_name="gemini.count_tokens",
|
66
|
+
package="google.genai.models",
|
67
|
+
class_name="AsyncModels",
|
68
|
+
method_name="count_tokens",
|
69
|
+
handler=get_token_counting_attributes,
|
70
|
+
is_async=True,
|
71
|
+
),
|
72
|
+
WrapConfig(
|
73
|
+
trace_name="gemini.compute_tokens",
|
74
|
+
package="google.genai.models",
|
75
|
+
class_name="AsyncModels",
|
76
|
+
method_name="compute_tokens",
|
77
|
+
handler=get_token_counting_attributes,
|
78
|
+
is_async=True,
|
79
|
+
),
|
80
|
+
]
|
81
|
+
|
82
|
+
# Streaming methods that need special handling
|
83
|
+
STREAMING_METHODS = [
|
84
|
+
# Client API
|
85
|
+
{
|
86
|
+
"module": "google.genai.models",
|
87
|
+
"class_method": "Models.generate_content_stream",
|
88
|
+
"wrapper": generate_content_stream_wrapper,
|
89
|
+
"is_async": False,
|
90
|
+
},
|
91
|
+
{
|
92
|
+
"module": "google.genai.models",
|
93
|
+
"class_method": "AsyncModels.generate_content_stream",
|
94
|
+
"wrapper": generate_content_stream_async_wrapper,
|
95
|
+
"is_async": True,
|
96
|
+
},
|
97
|
+
]
|
98
|
+
|
99
|
+
|
100
|
+
class GoogleGenaiInstrumentor(CommonInstrumentor):
|
101
|
+
"""An instrumentor for Google Generative AI (Gemini) API.
|
102
|
+
|
103
|
+
This class provides instrumentation for Google's Generative AI API by wrapping key methods
|
104
|
+
in the client library and capturing telemetry data. It supports both synchronous and
|
105
|
+
asynchronous API calls, including streaming responses.
|
106
|
+
|
107
|
+
It captures metrics including token usage, operation duration, and exceptions.
|
108
|
+
"""
|
109
|
+
|
110
|
+
def __init__(self):
|
111
|
+
"""Initialize the Google GenAI instrumentor."""
|
112
|
+
config = InstrumentorConfig(
|
113
|
+
library_name=LIBRARY_NAME,
|
114
|
+
library_version=LIBRARY_VERSION,
|
115
|
+
wrapped_methods=WRAPPED_METHODS,
|
116
|
+
metrics_enabled=True,
|
117
|
+
dependencies=["google-genai >= 0.1.0"],
|
118
|
+
)
|
119
|
+
super().__init__(config)
|
120
|
+
|
121
|
+
def _create_metrics(self, meter: Meter) -> Dict[str, Any]:
|
122
|
+
"""Create standard LLM metrics for Google GenAI operations.
|
123
|
+
|
124
|
+
Args:
|
125
|
+
meter: The OpenTelemetry meter to use for creating metrics.
|
126
|
+
|
127
|
+
Returns:
|
128
|
+
Dictionary containing the created metrics.
|
129
|
+
"""
|
130
|
+
return StandardMetrics.create_standard_metrics(meter)
|
131
|
+
|
132
|
+
def _custom_wrap(self, **kwargs):
|
133
|
+
"""Perform custom wrapping for streaming methods.
|
134
|
+
|
135
|
+
Args:
|
136
|
+
**kwargs: Configuration options for instrumentation.
|
137
|
+
"""
|
138
|
+
# Special handling for streaming responses
|
139
|
+
for stream_method in STREAMING_METHODS:
|
140
|
+
try:
|
141
|
+
wrap_function_wrapper(
|
142
|
+
stream_method["module"],
|
143
|
+
stream_method["class_method"],
|
144
|
+
stream_method["wrapper"](self._tracer),
|
145
|
+
)
|
146
|
+
logger.debug(
|
147
|
+
f"Successfully wrapped streaming method {stream_method['module']}.{stream_method['class_method']}"
|
148
|
+
)
|
149
|
+
except (AttributeError, ModuleNotFoundError) as e:
|
150
|
+
logger.debug(f"Failed to wrap {stream_method['module']}.{stream_method['class_method']}: {e}")
|
151
|
+
|
152
|
+
logger.info("Google Generative AI instrumentation enabled")
|
153
|
+
|
154
|
+
def _custom_unwrap(self, **kwargs):
|
155
|
+
"""Remove custom wrapping for streaming methods.
|
156
|
+
|
157
|
+
Args:
|
158
|
+
**kwargs: Configuration options for uninstrumentation.
|
159
|
+
"""
|
160
|
+
# Unwrap streaming methods
|
161
|
+
from opentelemetry.instrumentation.utils import unwrap as otel_unwrap
|
162
|
+
|
163
|
+
for stream_method in STREAMING_METHODS:
|
164
|
+
try:
|
165
|
+
otel_unwrap(stream_method["module"], stream_method["class_method"])
|
166
|
+
logger.debug(f"Unwrapped streaming method {stream_method['module']}.{stream_method['class_method']}")
|
167
|
+
except (AttributeError, ModuleNotFoundError) as e:
|
168
|
+
logger.debug(f"Failed to unwrap {stream_method['module']}.{stream_method['class_method']}: {e}")
|
169
|
+
|
170
|
+
logger.info("Google Generative AI instrumentation disabled")
|