mseep-agentops 0.4.18__py3-none-any.whl → 0.4.23__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (153) hide show
  1. agentops/__init__.py +0 -0
  2. agentops/client/api/base.py +28 -30
  3. agentops/client/api/versions/v3.py +29 -25
  4. agentops/client/api/versions/v4.py +87 -46
  5. agentops/client/client.py +98 -29
  6. agentops/client/http/README.md +87 -0
  7. agentops/client/http/http_client.py +126 -172
  8. agentops/config.py +8 -2
  9. agentops/instrumentation/OpenTelemetry.md +133 -0
  10. agentops/instrumentation/README.md +167 -0
  11. agentops/instrumentation/__init__.py +13 -1
  12. agentops/instrumentation/agentic/ag2/__init__.py +18 -0
  13. agentops/instrumentation/agentic/ag2/instrumentor.py +922 -0
  14. agentops/instrumentation/agentic/agno/__init__.py +19 -0
  15. agentops/instrumentation/agentic/agno/attributes/__init__.py +20 -0
  16. agentops/instrumentation/agentic/agno/attributes/agent.py +250 -0
  17. agentops/instrumentation/agentic/agno/attributes/metrics.py +214 -0
  18. agentops/instrumentation/agentic/agno/attributes/storage.py +158 -0
  19. agentops/instrumentation/agentic/agno/attributes/team.py +195 -0
  20. agentops/instrumentation/agentic/agno/attributes/tool.py +210 -0
  21. agentops/instrumentation/agentic/agno/attributes/workflow.py +254 -0
  22. agentops/instrumentation/agentic/agno/instrumentor.py +1313 -0
  23. agentops/instrumentation/agentic/crewai/LICENSE +201 -0
  24. agentops/instrumentation/agentic/crewai/NOTICE.md +10 -0
  25. agentops/instrumentation/agentic/crewai/__init__.py +6 -0
  26. agentops/instrumentation/agentic/crewai/crewai_span_attributes.py +335 -0
  27. agentops/instrumentation/agentic/crewai/instrumentation.py +535 -0
  28. agentops/instrumentation/agentic/crewai/version.py +1 -0
  29. agentops/instrumentation/agentic/google_adk/__init__.py +19 -0
  30. agentops/instrumentation/agentic/google_adk/instrumentor.py +68 -0
  31. agentops/instrumentation/agentic/google_adk/patch.py +767 -0
  32. agentops/instrumentation/agentic/haystack/__init__.py +1 -0
  33. agentops/instrumentation/agentic/haystack/instrumentor.py +186 -0
  34. agentops/instrumentation/agentic/langgraph/__init__.py +3 -0
  35. agentops/instrumentation/agentic/langgraph/attributes.py +54 -0
  36. agentops/instrumentation/agentic/langgraph/instrumentation.py +598 -0
  37. agentops/instrumentation/agentic/langgraph/version.py +1 -0
  38. agentops/instrumentation/agentic/openai_agents/README.md +156 -0
  39. agentops/instrumentation/agentic/openai_agents/SPANS.md +145 -0
  40. agentops/instrumentation/agentic/openai_agents/TRACING_API.md +144 -0
  41. agentops/instrumentation/agentic/openai_agents/__init__.py +30 -0
  42. agentops/instrumentation/agentic/openai_agents/attributes/common.py +549 -0
  43. agentops/instrumentation/agentic/openai_agents/attributes/completion.py +172 -0
  44. agentops/instrumentation/agentic/openai_agents/attributes/model.py +58 -0
  45. agentops/instrumentation/agentic/openai_agents/attributes/tokens.py +275 -0
  46. agentops/instrumentation/agentic/openai_agents/exporter.py +469 -0
  47. agentops/instrumentation/agentic/openai_agents/instrumentor.py +107 -0
  48. agentops/instrumentation/agentic/openai_agents/processor.py +58 -0
  49. agentops/instrumentation/agentic/smolagents/README.md +88 -0
  50. agentops/instrumentation/agentic/smolagents/__init__.py +12 -0
  51. agentops/instrumentation/agentic/smolagents/attributes/agent.py +354 -0
  52. agentops/instrumentation/agentic/smolagents/attributes/model.py +205 -0
  53. agentops/instrumentation/agentic/smolagents/instrumentor.py +286 -0
  54. agentops/instrumentation/agentic/smolagents/stream_wrapper.py +258 -0
  55. agentops/instrumentation/agentic/xpander/__init__.py +15 -0
  56. agentops/instrumentation/agentic/xpander/context.py +112 -0
  57. agentops/instrumentation/agentic/xpander/instrumentor.py +877 -0
  58. agentops/instrumentation/agentic/xpander/trace_probe.py +86 -0
  59. agentops/instrumentation/agentic/xpander/version.py +3 -0
  60. agentops/instrumentation/common/README.md +65 -0
  61. agentops/instrumentation/common/attributes.py +1 -2
  62. agentops/instrumentation/providers/anthropic/__init__.py +24 -0
  63. agentops/instrumentation/providers/anthropic/attributes/__init__.py +23 -0
  64. agentops/instrumentation/providers/anthropic/attributes/common.py +64 -0
  65. agentops/instrumentation/providers/anthropic/attributes/message.py +541 -0
  66. agentops/instrumentation/providers/anthropic/attributes/tools.py +231 -0
  67. agentops/instrumentation/providers/anthropic/event_handler_wrapper.py +90 -0
  68. agentops/instrumentation/providers/anthropic/instrumentor.py +146 -0
  69. agentops/instrumentation/providers/anthropic/stream_wrapper.py +436 -0
  70. agentops/instrumentation/providers/google_genai/README.md +33 -0
  71. agentops/instrumentation/providers/google_genai/__init__.py +24 -0
  72. agentops/instrumentation/providers/google_genai/attributes/__init__.py +25 -0
  73. agentops/instrumentation/providers/google_genai/attributes/chat.py +125 -0
  74. agentops/instrumentation/providers/google_genai/attributes/common.py +88 -0
  75. agentops/instrumentation/providers/google_genai/attributes/model.py +284 -0
  76. agentops/instrumentation/providers/google_genai/instrumentor.py +170 -0
  77. agentops/instrumentation/providers/google_genai/stream_wrapper.py +238 -0
  78. agentops/instrumentation/providers/ibm_watsonx_ai/__init__.py +28 -0
  79. agentops/instrumentation/providers/ibm_watsonx_ai/attributes/__init__.py +27 -0
  80. agentops/instrumentation/providers/ibm_watsonx_ai/attributes/attributes.py +277 -0
  81. agentops/instrumentation/providers/ibm_watsonx_ai/attributes/common.py +104 -0
  82. agentops/instrumentation/providers/ibm_watsonx_ai/instrumentor.py +162 -0
  83. agentops/instrumentation/providers/ibm_watsonx_ai/stream_wrapper.py +302 -0
  84. agentops/instrumentation/providers/mem0/__init__.py +45 -0
  85. agentops/instrumentation/providers/mem0/common.py +377 -0
  86. agentops/instrumentation/providers/mem0/instrumentor.py +270 -0
  87. agentops/instrumentation/providers/mem0/memory.py +430 -0
  88. agentops/instrumentation/providers/openai/__init__.py +21 -0
  89. agentops/instrumentation/providers/openai/attributes/__init__.py +7 -0
  90. agentops/instrumentation/providers/openai/attributes/common.py +55 -0
  91. agentops/instrumentation/providers/openai/attributes/response.py +607 -0
  92. agentops/instrumentation/providers/openai/config.py +36 -0
  93. agentops/instrumentation/providers/openai/instrumentor.py +312 -0
  94. agentops/instrumentation/providers/openai/stream_wrapper.py +941 -0
  95. agentops/instrumentation/providers/openai/utils.py +44 -0
  96. agentops/instrumentation/providers/openai/v0.py +176 -0
  97. agentops/instrumentation/providers/openai/v0_wrappers.py +483 -0
  98. agentops/instrumentation/providers/openai/wrappers/__init__.py +30 -0
  99. agentops/instrumentation/providers/openai/wrappers/assistant.py +277 -0
  100. agentops/instrumentation/providers/openai/wrappers/chat.py +259 -0
  101. agentops/instrumentation/providers/openai/wrappers/completion.py +109 -0
  102. agentops/instrumentation/providers/openai/wrappers/embeddings.py +94 -0
  103. agentops/instrumentation/providers/openai/wrappers/image_gen.py +75 -0
  104. agentops/instrumentation/providers/openai/wrappers/responses.py +191 -0
  105. agentops/instrumentation/providers/openai/wrappers/shared.py +81 -0
  106. agentops/instrumentation/utilities/concurrent_futures/__init__.py +10 -0
  107. agentops/instrumentation/utilities/concurrent_futures/instrumentation.py +206 -0
  108. agentops/integration/callbacks/dspy/__init__.py +11 -0
  109. agentops/integration/callbacks/dspy/callback.py +471 -0
  110. agentops/integration/callbacks/langchain/README.md +59 -0
  111. agentops/integration/callbacks/langchain/__init__.py +15 -0
  112. agentops/integration/callbacks/langchain/callback.py +791 -0
  113. agentops/integration/callbacks/langchain/utils.py +54 -0
  114. agentops/legacy/crewai.md +121 -0
  115. agentops/logging/instrument_logging.py +4 -0
  116. agentops/sdk/README.md +220 -0
  117. agentops/sdk/core.py +75 -32
  118. agentops/sdk/descriptors/classproperty.py +28 -0
  119. agentops/sdk/exporters.py +152 -33
  120. agentops/semconv/README.md +125 -0
  121. agentops/semconv/span_kinds.py +0 -2
  122. agentops/validation.py +102 -63
  123. {mseep_agentops-0.4.18.dist-info → mseep_agentops-0.4.23.dist-info}/METADATA +30 -40
  124. mseep_agentops-0.4.23.dist-info/RECORD +178 -0
  125. {mseep_agentops-0.4.18.dist-info → mseep_agentops-0.4.23.dist-info}/WHEEL +1 -2
  126. mseep_agentops-0.4.18.dist-info/RECORD +0 -94
  127. mseep_agentops-0.4.18.dist-info/top_level.txt +0 -2
  128. tests/conftest.py +0 -10
  129. tests/unit/client/__init__.py +0 -1
  130. tests/unit/client/test_http_adapter.py +0 -221
  131. tests/unit/client/test_http_client.py +0 -206
  132. tests/unit/conftest.py +0 -54
  133. tests/unit/sdk/__init__.py +0 -1
  134. tests/unit/sdk/instrumentation_tester.py +0 -207
  135. tests/unit/sdk/test_attributes.py +0 -392
  136. tests/unit/sdk/test_concurrent_instrumentation.py +0 -468
  137. tests/unit/sdk/test_decorators.py +0 -763
  138. tests/unit/sdk/test_exporters.py +0 -241
  139. tests/unit/sdk/test_factory.py +0 -1188
  140. tests/unit/sdk/test_internal_span_processor.py +0 -397
  141. tests/unit/sdk/test_resource_attributes.py +0 -35
  142. tests/unit/test_config.py +0 -82
  143. tests/unit/test_context_manager.py +0 -777
  144. tests/unit/test_events.py +0 -27
  145. tests/unit/test_host_env.py +0 -54
  146. tests/unit/test_init_py.py +0 -501
  147. tests/unit/test_serialization.py +0 -433
  148. tests/unit/test_session.py +0 -676
  149. tests/unit/test_user_agent.py +0 -34
  150. tests/unit/test_validation.py +0 -405
  151. {tests → agentops/instrumentation/agentic/openai_agents/attributes}/__init__.py +0 -0
  152. /tests/unit/__init__.py → /agentops/instrumentation/providers/openai/attributes/tools.py +0 -0
  153. {mseep_agentops-0.4.18.dist-info → mseep_agentops-0.4.23.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,238 @@
1
+ """Google Generative AI stream wrapper implementation.
2
+
3
+ This module provides wrappers for Google Generative AI's streaming functionality,
4
+ focusing on the generate_content_stream method for both sync and async operations.
5
+ It instruments streams to collect telemetry data for monitoring and analysis.
6
+ """
7
+
8
+ import logging
9
+ from typing import TypeVar
10
+
11
+ from opentelemetry import context as context_api
12
+ from opentelemetry.trace import SpanKind, Status, StatusCode
13
+ from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
14
+
15
+ from agentops.semconv import SpanAttributes, LLMRequestTypeValues, CoreAttributes, MessageAttributes
16
+ from agentops.instrumentation.common.wrappers import _with_tracer_wrapper
17
+ from agentops.instrumentation.providers.google_genai.attributes.model import (
18
+ get_generate_content_attributes,
19
+ get_stream_attributes,
20
+ )
21
+ from agentops.instrumentation.providers.google_genai.attributes.common import (
22
+ extract_request_attributes,
23
+ )
24
+
25
+ logger = logging.getLogger(__name__)
26
+
27
+ T = TypeVar("T")
28
+
29
+
30
+ @_with_tracer_wrapper
31
+ def generate_content_stream_wrapper(tracer, wrapped, instance, args, kwargs):
32
+ """Wrapper for the GenerativeModel.generate_content_stream method.
33
+
34
+ This wrapper creates spans for tracking stream performance and processes
35
+ the streaming responses to collect telemetry data.
36
+
37
+ Args:
38
+ tracer: The OpenTelemetry tracer to use
39
+ wrapped: The original stream method
40
+ instance: The instance the method is bound to
41
+ args: Positional arguments to the method
42
+ kwargs: Keyword arguments to the method
43
+
44
+ Returns:
45
+ A wrapped generator that captures telemetry data
46
+ """
47
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
48
+ return wrapped(*args, **kwargs)
49
+
50
+ span = tracer.start_span(
51
+ "gemini.generate_content_stream",
52
+ kind=SpanKind.CLIENT,
53
+ attributes={SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.CHAT.value},
54
+ )
55
+
56
+ # Extract request parameters and custom config
57
+ request_attributes = get_generate_content_attributes(args=args, kwargs=kwargs)
58
+ for key, value in request_attributes.items():
59
+ span.set_attribute(key, value)
60
+
61
+ # Mark as streaming request
62
+ span.set_attribute(SpanAttributes.LLM_REQUEST_STREAMING, True)
63
+
64
+ # Extract custom parameters from config (if present)
65
+ if "config" in kwargs:
66
+ config_attributes = extract_request_attributes({"config": kwargs["config"]})
67
+ for key, value in config_attributes.items():
68
+ span.set_attribute(key, value)
69
+
70
+ try:
71
+ stream = wrapped(*args, **kwargs)
72
+
73
+ # Extract model information if available
74
+ stream_attributes = get_stream_attributes(stream)
75
+ for key, value in stream_attributes.items():
76
+ span.set_attribute(key, value)
77
+
78
+ def instrumented_stream():
79
+ """Generator that wraps the original stream with instrumentation.
80
+
81
+ Yields:
82
+ Items from the original stream with added instrumentation
83
+ """
84
+ full_text = ""
85
+ last_chunk_with_metadata = None
86
+
87
+ try:
88
+ for chunk in stream:
89
+ # Keep track of the last chunk that might have metadata
90
+ if hasattr(chunk, "usage_metadata") and chunk.usage_metadata:
91
+ last_chunk_with_metadata = chunk
92
+
93
+ # Track token count (approximate by word count if metadata not available)
94
+ if hasattr(chunk, "text"):
95
+ text_value = getattr(chunk, "text", None)
96
+ if text_value:
97
+ full_text += text_value
98
+
99
+ yield chunk
100
+
101
+ # Set final content when complete
102
+ if full_text:
103
+ span.set_attribute(MessageAttributes.COMPLETION_CONTENT.format(i=0), full_text)
104
+ span.set_attribute(MessageAttributes.COMPLETION_ROLE.format(i=0), "assistant")
105
+
106
+ # Get token usage from the last chunk if available
107
+ if last_chunk_with_metadata and hasattr(last_chunk_with_metadata, "usage_metadata"):
108
+ metadata = last_chunk_with_metadata.usage_metadata
109
+ if hasattr(metadata, "prompt_token_count"):
110
+ span.set_attribute(SpanAttributes.LLM_USAGE_PROMPT_TOKENS, metadata.prompt_token_count)
111
+ if hasattr(metadata, "candidates_token_count"):
112
+ span.set_attribute(SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, metadata.candidates_token_count)
113
+ if hasattr(metadata, "total_token_count"):
114
+ span.set_attribute(SpanAttributes.LLM_USAGE_TOTAL_TOKENS, metadata.total_token_count)
115
+
116
+ span.set_status(Status(StatusCode.OK))
117
+ except Exception as e:
118
+ span.record_exception(e)
119
+ span.set_attribute(CoreAttributes.ERROR_MESSAGE, str(e))
120
+ span.set_attribute(CoreAttributes.ERROR_TYPE, e.__class__.__name__)
121
+ span.set_status(Status(StatusCode.ERROR, str(e)))
122
+ raise
123
+ finally:
124
+ span.end()
125
+
126
+ return instrumented_stream()
127
+ except Exception as e:
128
+ span.record_exception(e)
129
+ span.set_attribute(CoreAttributes.ERROR_MESSAGE, str(e))
130
+ span.set_attribute(CoreAttributes.ERROR_TYPE, e.__class__.__name__)
131
+ span.set_status(Status(StatusCode.ERROR, str(e)))
132
+ span.end()
133
+ raise
134
+
135
+
136
+ @_with_tracer_wrapper
137
+ async def generate_content_stream_async_wrapper(tracer, wrapped, instance, args, kwargs):
138
+ """Wrapper for the async GenerativeModel.generate_content_stream method.
139
+
140
+ This wrapper creates spans for tracking async stream performance and processes
141
+ the streaming responses to collect telemetry data.
142
+
143
+ Args:
144
+ tracer: The OpenTelemetry tracer to use
145
+ wrapped: The original async stream method
146
+ instance: The instance the method is bound to
147
+ args: Positional arguments to the method
148
+ kwargs: Keyword arguments to the method
149
+
150
+ Returns:
151
+ A wrapped async generator that captures telemetry data
152
+ """
153
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
154
+ return await wrapped(*args, **kwargs)
155
+
156
+ span = tracer.start_span(
157
+ "gemini.generate_content_stream_async",
158
+ kind=SpanKind.CLIENT,
159
+ attributes={SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.CHAT.value},
160
+ )
161
+
162
+ # Extract request parameters and custom config
163
+ request_attributes = get_generate_content_attributes(args=args, kwargs=kwargs)
164
+ for key, value in request_attributes.items():
165
+ span.set_attribute(key, value)
166
+
167
+ # Mark as streaming request
168
+ span.set_attribute(SpanAttributes.LLM_REQUEST_STREAMING, True)
169
+
170
+ # Extract custom parameters from config (if present)
171
+ if "config" in kwargs:
172
+ config_attributes = extract_request_attributes({"config": kwargs["config"]})
173
+ for key, value in config_attributes.items():
174
+ span.set_attribute(key, value)
175
+
176
+ try:
177
+ stream = await wrapped(*args, **kwargs)
178
+
179
+ # Extract model information if available
180
+ stream_attributes = get_stream_attributes(stream)
181
+ for key, value in stream_attributes.items():
182
+ span.set_attribute(key, value)
183
+
184
+ async def instrumented_stream():
185
+ """Async generator that wraps the original stream with instrumentation.
186
+
187
+ Yields:
188
+ Items from the original stream with added instrumentation
189
+ """
190
+ full_text = ""
191
+ last_chunk_with_metadata = None
192
+
193
+ try:
194
+ async for chunk in stream:
195
+ # Keep track of the last chunk that might have metadata
196
+ if hasattr(chunk, "usage_metadata") and chunk.usage_metadata:
197
+ last_chunk_with_metadata = chunk
198
+
199
+ if hasattr(chunk, "text"):
200
+ text_value = getattr(chunk, "text", None)
201
+ if text_value:
202
+ full_text += text_value
203
+
204
+ yield chunk
205
+
206
+ # Set final content when complete
207
+ if full_text:
208
+ span.set_attribute(MessageAttributes.COMPLETION_CONTENT.format(i=0), full_text)
209
+ span.set_attribute(MessageAttributes.COMPLETION_ROLE.format(i=0), "assistant")
210
+
211
+ # Get token usage from the last chunk if available
212
+ if last_chunk_with_metadata and hasattr(last_chunk_with_metadata, "usage_metadata"):
213
+ metadata = last_chunk_with_metadata.usage_metadata
214
+ if hasattr(metadata, "prompt_token_count"):
215
+ span.set_attribute(SpanAttributes.LLM_USAGE_PROMPT_TOKENS, metadata.prompt_token_count)
216
+ if hasattr(metadata, "candidates_token_count"):
217
+ span.set_attribute(SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, metadata.candidates_token_count)
218
+ if hasattr(metadata, "total_token_count"):
219
+ span.set_attribute(SpanAttributes.LLM_USAGE_TOTAL_TOKENS, metadata.total_token_count)
220
+
221
+ span.set_status(Status(StatusCode.OK))
222
+ except Exception as e:
223
+ span.record_exception(e)
224
+ span.set_attribute(CoreAttributes.ERROR_MESSAGE, str(e))
225
+ span.set_attribute(CoreAttributes.ERROR_TYPE, e.__class__.__name__)
226
+ span.set_status(Status(StatusCode.ERROR, str(e)))
227
+ raise
228
+ finally:
229
+ span.end()
230
+
231
+ return instrumented_stream()
232
+ except Exception as e:
233
+ span.record_exception(e)
234
+ span.set_attribute(CoreAttributes.ERROR_MESSAGE, str(e))
235
+ span.set_attribute(CoreAttributes.ERROR_TYPE, e.__class__.__name__)
236
+ span.set_status(Status(StatusCode.ERROR, str(e)))
237
+ span.end()
238
+ raise
@@ -0,0 +1,28 @@
1
+ """IBM WatsonX AI instrumentation for AgentOps.
2
+
3
+ This package provides instrumentation for IBM's WatsonX AI foundation models,
4
+ capturing telemetry for model interactions including completions, chat, and streaming responses.
5
+ """
6
+
7
+ import logging
8
+ from agentops.instrumentation.common import LibraryInfo
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+ # Library information
13
+ _library_info = LibraryInfo(
14
+ name="ibm_watsonx_ai",
15
+ package_name="ibm-watsonx-ai",
16
+ default_version="1.3.11", # Default to known supported version if not found
17
+ )
18
+ LIBRARY_NAME = _library_info.name
19
+ LIBRARY_VERSION = _library_info.version
20
+
21
+ # Import after defining constants to avoid circular imports
22
+ from agentops.instrumentation.providers.ibm_watsonx_ai.instrumentor import WatsonxInstrumentor # noqa: E402
23
+
24
+ __all__ = [
25
+ "LIBRARY_NAME",
26
+ "LIBRARY_VERSION",
27
+ "WatsonxInstrumentor",
28
+ ]
@@ -0,0 +1,27 @@
1
+ """Attribute extraction utilities for IBM watsonx.ai instrumentation."""
2
+
3
+ from agentops.instrumentation.providers.ibm_watsonx_ai.attributes.attributes import (
4
+ get_generate_attributes,
5
+ get_chat_attributes,
6
+ get_tokenize_attributes,
7
+ get_model_details_attributes,
8
+ )
9
+ from agentops.instrumentation.providers.ibm_watsonx_ai.attributes.common import (
10
+ extract_params_attributes,
11
+ convert_params_to_dict,
12
+ extract_prompt_from_args,
13
+ extract_messages_from_args,
14
+ extract_params_from_args,
15
+ )
16
+
17
+ __all__ = [
18
+ "get_generate_attributes",
19
+ "get_chat_attributes",
20
+ "get_tokenize_attributes",
21
+ "get_model_details_attributes",
22
+ "extract_params_attributes",
23
+ "convert_params_to_dict",
24
+ "extract_prompt_from_args",
25
+ "extract_messages_from_args",
26
+ "extract_params_from_args",
27
+ ]
@@ -0,0 +1,277 @@
1
+ """Attributes for IBM watsonx.ai model instrumentation.
2
+
3
+ This module provides attribute extraction functions for IBM watsonx.ai model operations.
4
+ """
5
+
6
+ from typing import Any, Dict, Optional, Tuple
7
+ from agentops.instrumentation.common.attributes import AttributeMap
8
+ from agentops.semconv import SpanAttributes, MessageAttributes
9
+ from agentops.instrumentation.providers.ibm_watsonx_ai.attributes.common import (
10
+ extract_params_attributes,
11
+ convert_params_to_dict,
12
+ extract_prompt_from_args,
13
+ extract_messages_from_args,
14
+ extract_params_from_args,
15
+ )
16
+
17
+
18
+ def get_generate_attributes(
19
+ args: Optional[Tuple] = None, kwargs: Optional[Dict] = None, return_value: Optional[Any] = None
20
+ ) -> AttributeMap:
21
+ """Extract token usage attributes from generate method calls."""
22
+ attributes = {}
23
+
24
+ # Extract prompt using helper function
25
+ prompt = extract_prompt_from_args(args, kwargs)
26
+ if prompt:
27
+ attributes[MessageAttributes.PROMPT_ROLE.format(i=0)] = "user"
28
+ attributes[MessageAttributes.PROMPT_CONTENT.format(i=0)] = prompt
29
+ attributes[MessageAttributes.PROMPT_TYPE.format(i=0)] = "text"
30
+
31
+ # Extract parameters using helper functions
32
+ params = extract_params_from_args(args, kwargs)
33
+ if params:
34
+ params_dict = convert_params_to_dict(params)
35
+ if params_dict:
36
+ attributes.update(extract_params_attributes(params_dict))
37
+
38
+ # Extract response information
39
+ if return_value:
40
+ if isinstance(return_value, dict):
41
+ # Extract model information
42
+ if "model_id" in return_value:
43
+ attributes[SpanAttributes.LLM_REQUEST_MODEL] = return_value["model_id"]
44
+
45
+ # Handle results
46
+ if "results" in return_value:
47
+ for idx, result in enumerate(return_value["results"]):
48
+ # Extract completion
49
+ if "generated_text" in result:
50
+ attributes[MessageAttributes.COMPLETION_CONTENT.format(i=idx)] = result["generated_text"]
51
+ attributes[MessageAttributes.COMPLETION_ROLE.format(i=idx)] = "assistant"
52
+ attributes[MessageAttributes.COMPLETION_TYPE.format(i=idx)] = "text"
53
+
54
+ # Extract token usage
55
+ if "input_token_count" in result:
56
+ attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = result["input_token_count"]
57
+ if "generated_token_count" in result:
58
+ attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] = result["generated_token_count"]
59
+ if "input_token_count" in result and "generated_token_count" in result:
60
+ attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = (
61
+ result["input_token_count"] + result["generated_token_count"]
62
+ )
63
+
64
+ if "stop_reason" in result:
65
+ attributes[SpanAttributes.LLM_RESPONSE_STOP_REASON] = result["stop_reason"]
66
+
67
+ return attributes
68
+
69
+
70
+ def get_tokenize_attributes(
71
+ args: Optional[Tuple] = None, kwargs: Optional[Dict] = None, return_value: Optional[Any] = None
72
+ ) -> AttributeMap:
73
+ """Extract attributes from tokenize method calls."""
74
+ attributes = {}
75
+
76
+ # Extract input from args or kwargs using helper function
77
+ prompt = extract_prompt_from_args(args, kwargs)
78
+ if prompt:
79
+ attributes[MessageAttributes.PROMPT_ROLE.format(i=0)] = "user"
80
+ attributes[MessageAttributes.PROMPT_CONTENT.format(i=0)] = prompt
81
+ attributes[MessageAttributes.PROMPT_TYPE.format(i=0)] = "text"
82
+
83
+ # Extract response information
84
+ if return_value and isinstance(return_value, dict):
85
+ if "model_id" in return_value:
86
+ attributes[SpanAttributes.LLM_REQUEST_MODEL] = return_value["model_id"]
87
+ if "result" in return_value:
88
+ attributes["ibm.watsonx.tokenize.result"] = str(return_value["result"])
89
+ if "token_count" in return_value["result"]:
90
+ attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = return_value["result"]["token_count"]
91
+
92
+ return attributes
93
+
94
+
95
+ def get_model_details_attributes(
96
+ args: Optional[Tuple] = None, kwargs: Optional[Dict] = None, return_value: Optional[Any] = None
97
+ ) -> AttributeMap:
98
+ """Extract attributes from get_details method calls."""
99
+ if not isinstance(return_value, dict):
100
+ return {}
101
+
102
+ # Basic model information
103
+ attributes = {
104
+ f"ibm.watsonx.model.{key}": value
105
+ for key, value in return_value.items()
106
+ if key
107
+ in [
108
+ "model_id",
109
+ "label",
110
+ "provider",
111
+ "source",
112
+ "short_description",
113
+ "long_description",
114
+ "number_params",
115
+ "input_tier",
116
+ "output_tier",
117
+ ]
118
+ }
119
+
120
+ # Model functions
121
+ if "functions" in return_value:
122
+ attributes["ibm.watsonx.model.functions"] = str([func["id"] for func in return_value["functions"]])
123
+
124
+ # Model tasks
125
+ if "tasks" in return_value:
126
+ task_info = [
127
+ {k: v for k, v in task.items() if k in ["id", "ratings", "tags"]} for task in return_value["tasks"]
128
+ ]
129
+ attributes["ibm.watsonx.model.tasks"] = str(task_info)
130
+
131
+ # Model limits
132
+ if "model_limits" in return_value:
133
+ limits = return_value["model_limits"]
134
+ attributes.update(
135
+ {
136
+ f"ibm.watsonx.model.{key}": value
137
+ for key, value in limits.items()
138
+ if key in ["max_sequence_length", "max_output_tokens", "training_data_max_records"]
139
+ }
140
+ )
141
+
142
+ # Service tier limits
143
+ if "limits" in return_value:
144
+ for tier, tier_limits in return_value["limits"].items():
145
+ attributes.update(
146
+ {
147
+ f"ibm.watsonx.model.limits.{tier}.{key}": value
148
+ for key, value in tier_limits.items()
149
+ if key in ["call_time", "max_output_tokens"]
150
+ }
151
+ )
152
+
153
+ # Model lifecycle
154
+ if "lifecycle" in return_value:
155
+ attributes.update(
156
+ {
157
+ f"ibm.watsonx.model.lifecycle.{stage['id']}": stage["start_date"]
158
+ for stage in return_value["lifecycle"]
159
+ if "id" in stage and "start_date" in stage
160
+ }
161
+ )
162
+
163
+ # Training parameters
164
+ if "training_parameters" in return_value:
165
+ attributes.update(
166
+ {
167
+ f"ibm.watsonx.model.training.{key}": str(value) if isinstance(value, dict) else value
168
+ for key, value in return_value["training_parameters"].items()
169
+ }
170
+ )
171
+
172
+ return attributes
173
+
174
+
175
+ def get_chat_attributes(
176
+ args: Optional[Tuple] = None, kwargs: Optional[Dict] = None, return_value: Optional[Any] = None
177
+ ) -> AttributeMap:
178
+ """Extract attributes from chat method calls."""
179
+ attributes = {}
180
+
181
+ # Extract messages using helper function
182
+ messages = extract_messages_from_args(args, kwargs)
183
+ if messages:
184
+ # Process each message in the conversation
185
+ for i, message in enumerate(messages):
186
+ if not isinstance(message, dict):
187
+ continue
188
+
189
+ # Extract role and content
190
+ role = message.get("role", "")
191
+ content = message.get("content", [])
192
+
193
+ # Handle content which can be a list of different types (text, image_url)
194
+ if isinstance(content, list):
195
+ # Combine all text content
196
+ text_content = []
197
+ image_urls = []
198
+
199
+ for content_item in content:
200
+ if isinstance(content_item, dict):
201
+ if content_item.get("type") == "text":
202
+ text_content.append(content_item.get("text", ""))
203
+ elif content_item.get("type") == "image_url":
204
+ image_url = content_item.get("image_url", {})
205
+ if isinstance(image_url, dict) and "url" in image_url:
206
+ url = image_url["url"]
207
+ # Only store URLs that start with http, otherwise use placeholder
208
+ if url and isinstance(url, str) and url.startswith(("http://", "https://")):
209
+ image_urls.append(url)
210
+ else:
211
+ image_urls.append("[IMAGE_PLACEHOLDER]")
212
+
213
+ # Set text content if any
214
+ if text_content:
215
+ attributes[MessageAttributes.PROMPT_CONTENT.format(i=i)] = " ".join(text_content)
216
+ attributes[MessageAttributes.PROMPT_TYPE.format(i=i)] = "text"
217
+ attributes[MessageAttributes.PROMPT_ROLE.format(i=i)] = role
218
+
219
+ # Set image URLs if any
220
+ if image_urls:
221
+ attributes[f"ibm.watsonx.chat.message.{i}.images"] = str(image_urls)
222
+ else:
223
+ # Handle string content
224
+ attributes[MessageAttributes.PROMPT_CONTENT.format(i=i)] = str(content)
225
+ attributes[MessageAttributes.PROMPT_TYPE.format(i=i)] = "text"
226
+ attributes[MessageAttributes.PROMPT_ROLE.format(i=i)] = role
227
+
228
+ # Extract parameters using helper functions
229
+ params = extract_params_from_args(args, kwargs)
230
+ if params:
231
+ params_dict = convert_params_to_dict(params)
232
+ if params_dict:
233
+ attributes.update(extract_params_attributes(params_dict))
234
+
235
+ # Extract response information
236
+ if return_value and isinstance(return_value, dict):
237
+ # Extract model information
238
+ if "model_id" in return_value:
239
+ attributes[SpanAttributes.LLM_REQUEST_MODEL] = return_value["model_id"]
240
+ elif "model" in return_value:
241
+ attributes[SpanAttributes.LLM_REQUEST_MODEL] = return_value["model"]
242
+
243
+ # Extract completion from choices
244
+ if "choices" in return_value:
245
+ for idx, choice in enumerate(return_value["choices"]):
246
+ if isinstance(choice, dict) and "message" in choice:
247
+ message = choice["message"]
248
+ if isinstance(message, dict):
249
+ if "content" in message:
250
+ attributes[MessageAttributes.COMPLETION_CONTENT.format(i=idx)] = message["content"]
251
+ attributes[MessageAttributes.COMPLETION_ROLE.format(i=idx)] = message.get(
252
+ "role", "assistant"
253
+ )
254
+ attributes[MessageAttributes.COMPLETION_TYPE.format(i=idx)] = "text"
255
+ if "finish_reason" in choice:
256
+ attributes[SpanAttributes.LLM_RESPONSE_STOP_REASON] = choice["finish_reason"]
257
+
258
+ # Extract token usage
259
+ if "usage" in return_value:
260
+ usage = return_value["usage"]
261
+ if isinstance(usage, dict):
262
+ if "prompt_tokens" in usage:
263
+ attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = usage["prompt_tokens"]
264
+ if "completion_tokens" in usage:
265
+ attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] = usage["completion_tokens"]
266
+ if "total_tokens" in usage:
267
+ attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = usage["total_tokens"]
268
+
269
+ # Extract additional metadata
270
+ if "id" in return_value:
271
+ attributes["ibm.watsonx.chat.id"] = return_value["id"]
272
+ if "model_version" in return_value:
273
+ attributes["ibm.watsonx.model.version"] = return_value["model_version"]
274
+ if "created_at" in return_value:
275
+ attributes["ibm.watsonx.chat.created_at"] = return_value["created_at"]
276
+
277
+ return attributes
@@ -0,0 +1,104 @@
1
+ """Common utilities and constants for IBM watsonx.ai attribute processing.
2
+
3
+ This module contains shared constants, attribute mappings, and utility functions for processing
4
+ trace and span attributes in IBM watsonx.ai instrumentation.
5
+ """
6
+
7
+ from typing import Any, Dict, Optional, Tuple, List
8
+ from agentops.instrumentation.common.attributes import AttributeMap
9
+ from agentops.semconv import SpanAttributes
10
+ from agentops.logging import logger
11
+ from ibm_watsonx_ai.foundation_models.schema import TextGenParameters, TextChatParameters
12
+
13
+ # Mapping of generation parameters to their OpenTelemetry attribute names
14
+ GENERATION_PARAM_ATTRIBUTES: AttributeMap = {
15
+ "max_new_tokens": SpanAttributes.LLM_REQUEST_MAX_TOKENS,
16
+ "min_new_tokens": "ibm.watsonx.min_new_tokens",
17
+ "temperature": SpanAttributes.LLM_REQUEST_TEMPERATURE,
18
+ "top_p": SpanAttributes.LLM_REQUEST_TOP_P,
19
+ "top_k": "ibm.watsonx.top_k",
20
+ "repetition_penalty": "ibm.watsonx.repetition_penalty",
21
+ "time_limit": "ibm.watsonx.time_limit",
22
+ "random_seed": "ibm.watsonx.random_seed",
23
+ "stop_sequences": "ibm.watsonx.stop_sequences",
24
+ "truncate_input_tokens": "ibm.watsonx.truncate_input_tokens",
25
+ "decoding_method": "ibm.watsonx.decoding_method",
26
+ }
27
+
28
+ # Mapping of guardrail parameters to their OpenTelemetry attribute names
29
+ GUARDRAIL_PARAM_ATTRIBUTES: AttributeMap = {
30
+ "guardrails": "ibm.watsonx.guardrails.enabled",
31
+ "guardrails_hap_params": "ibm.watsonx.guardrails.hap_params",
32
+ "guardrails_pii_params": "ibm.watsonx.guardrails.pii_params",
33
+ }
34
+
35
+
36
+ def extract_prompt_from_args(args: Optional[Tuple] = None, kwargs: Optional[Dict] = None) -> Optional[str]:
37
+ """Extract prompt from method arguments."""
38
+ if args and len(args) > 0:
39
+ return args[0]
40
+ elif kwargs and "prompt" in kwargs:
41
+ return kwargs["prompt"]
42
+ return None
43
+
44
+
45
+ def extract_messages_from_args(
46
+ args: Optional[Tuple] = None, kwargs: Optional[Dict] = None
47
+ ) -> Optional[List[Dict[str, Any]]]:
48
+ """Extract messages from method arguments."""
49
+ if args and len(args) > 0:
50
+ return args[0]
51
+ elif kwargs and "messages" in kwargs:
52
+ return kwargs["messages"]
53
+ return None
54
+
55
+
56
+ def extract_params_from_args(args: Optional[Tuple] = None, kwargs: Optional[Dict] = None) -> Optional[Any]:
57
+ """Extract parameters from method arguments."""
58
+ if args and len(args) > 1:
59
+ return args[1]
60
+ elif kwargs and "params" in kwargs:
61
+ return kwargs["params"]
62
+ return None
63
+
64
+
65
+ def convert_params_to_dict(params: Any) -> Dict[str, Any]:
66
+ """Convert parameter objects to dictionaries."""
67
+ if not params:
68
+ return {}
69
+
70
+ if isinstance(params, (TextGenParameters, TextChatParameters)):
71
+ try:
72
+ return params.to_dict()
73
+ except Exception as e:
74
+ logger.debug(f"Could not convert params object to dict: {e}")
75
+ return {}
76
+
77
+ return params if isinstance(params, dict) else {}
78
+
79
+
80
+ def extract_params_attributes(params: Dict[str, Any]) -> AttributeMap:
81
+ """Extract generation parameters from a params dictionary."""
82
+ attributes = {}
83
+
84
+ # Extract standard generation parameters
85
+ for param_name, attr_name in GENERATION_PARAM_ATTRIBUTES.items():
86
+ if param_name in params:
87
+ value = params[param_name]
88
+ if isinstance(value, list):
89
+ value = str(value)
90
+ attributes[attr_name] = value
91
+
92
+ # Extract guardrail parameters
93
+ for param_name, attr_name in GUARDRAIL_PARAM_ATTRIBUTES.items():
94
+ if param_name in params:
95
+ value = params[param_name]
96
+ if isinstance(value, dict):
97
+ value = str(value)
98
+ attributes[attr_name] = value
99
+
100
+ # Extract concurrency limit
101
+ if "concurrency_limit" in params:
102
+ attributes["ibm.watsonx.concurrency_limit"] = params["concurrency_limit"]
103
+
104
+ return attributes