mseep-agentops 0.4.18__py3-none-any.whl → 0.4.23__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (153) hide show
  1. agentops/__init__.py +0 -0
  2. agentops/client/api/base.py +28 -30
  3. agentops/client/api/versions/v3.py +29 -25
  4. agentops/client/api/versions/v4.py +87 -46
  5. agentops/client/client.py +98 -29
  6. agentops/client/http/README.md +87 -0
  7. agentops/client/http/http_client.py +126 -172
  8. agentops/config.py +8 -2
  9. agentops/instrumentation/OpenTelemetry.md +133 -0
  10. agentops/instrumentation/README.md +167 -0
  11. agentops/instrumentation/__init__.py +13 -1
  12. agentops/instrumentation/agentic/ag2/__init__.py +18 -0
  13. agentops/instrumentation/agentic/ag2/instrumentor.py +922 -0
  14. agentops/instrumentation/agentic/agno/__init__.py +19 -0
  15. agentops/instrumentation/agentic/agno/attributes/__init__.py +20 -0
  16. agentops/instrumentation/agentic/agno/attributes/agent.py +250 -0
  17. agentops/instrumentation/agentic/agno/attributes/metrics.py +214 -0
  18. agentops/instrumentation/agentic/agno/attributes/storage.py +158 -0
  19. agentops/instrumentation/agentic/agno/attributes/team.py +195 -0
  20. agentops/instrumentation/agentic/agno/attributes/tool.py +210 -0
  21. agentops/instrumentation/agentic/agno/attributes/workflow.py +254 -0
  22. agentops/instrumentation/agentic/agno/instrumentor.py +1313 -0
  23. agentops/instrumentation/agentic/crewai/LICENSE +201 -0
  24. agentops/instrumentation/agentic/crewai/NOTICE.md +10 -0
  25. agentops/instrumentation/agentic/crewai/__init__.py +6 -0
  26. agentops/instrumentation/agentic/crewai/crewai_span_attributes.py +335 -0
  27. agentops/instrumentation/agentic/crewai/instrumentation.py +535 -0
  28. agentops/instrumentation/agentic/crewai/version.py +1 -0
  29. agentops/instrumentation/agentic/google_adk/__init__.py +19 -0
  30. agentops/instrumentation/agentic/google_adk/instrumentor.py +68 -0
  31. agentops/instrumentation/agentic/google_adk/patch.py +767 -0
  32. agentops/instrumentation/agentic/haystack/__init__.py +1 -0
  33. agentops/instrumentation/agentic/haystack/instrumentor.py +186 -0
  34. agentops/instrumentation/agentic/langgraph/__init__.py +3 -0
  35. agentops/instrumentation/agentic/langgraph/attributes.py +54 -0
  36. agentops/instrumentation/agentic/langgraph/instrumentation.py +598 -0
  37. agentops/instrumentation/agentic/langgraph/version.py +1 -0
  38. agentops/instrumentation/agentic/openai_agents/README.md +156 -0
  39. agentops/instrumentation/agentic/openai_agents/SPANS.md +145 -0
  40. agentops/instrumentation/agentic/openai_agents/TRACING_API.md +144 -0
  41. agentops/instrumentation/agentic/openai_agents/__init__.py +30 -0
  42. agentops/instrumentation/agentic/openai_agents/attributes/common.py +549 -0
  43. agentops/instrumentation/agentic/openai_agents/attributes/completion.py +172 -0
  44. agentops/instrumentation/agentic/openai_agents/attributes/model.py +58 -0
  45. agentops/instrumentation/agentic/openai_agents/attributes/tokens.py +275 -0
  46. agentops/instrumentation/agentic/openai_agents/exporter.py +469 -0
  47. agentops/instrumentation/agentic/openai_agents/instrumentor.py +107 -0
  48. agentops/instrumentation/agentic/openai_agents/processor.py +58 -0
  49. agentops/instrumentation/agentic/smolagents/README.md +88 -0
  50. agentops/instrumentation/agentic/smolagents/__init__.py +12 -0
  51. agentops/instrumentation/agentic/smolagents/attributes/agent.py +354 -0
  52. agentops/instrumentation/agentic/smolagents/attributes/model.py +205 -0
  53. agentops/instrumentation/agentic/smolagents/instrumentor.py +286 -0
  54. agentops/instrumentation/agentic/smolagents/stream_wrapper.py +258 -0
  55. agentops/instrumentation/agentic/xpander/__init__.py +15 -0
  56. agentops/instrumentation/agentic/xpander/context.py +112 -0
  57. agentops/instrumentation/agentic/xpander/instrumentor.py +877 -0
  58. agentops/instrumentation/agentic/xpander/trace_probe.py +86 -0
  59. agentops/instrumentation/agentic/xpander/version.py +3 -0
  60. agentops/instrumentation/common/README.md +65 -0
  61. agentops/instrumentation/common/attributes.py +1 -2
  62. agentops/instrumentation/providers/anthropic/__init__.py +24 -0
  63. agentops/instrumentation/providers/anthropic/attributes/__init__.py +23 -0
  64. agentops/instrumentation/providers/anthropic/attributes/common.py +64 -0
  65. agentops/instrumentation/providers/anthropic/attributes/message.py +541 -0
  66. agentops/instrumentation/providers/anthropic/attributes/tools.py +231 -0
  67. agentops/instrumentation/providers/anthropic/event_handler_wrapper.py +90 -0
  68. agentops/instrumentation/providers/anthropic/instrumentor.py +146 -0
  69. agentops/instrumentation/providers/anthropic/stream_wrapper.py +436 -0
  70. agentops/instrumentation/providers/google_genai/README.md +33 -0
  71. agentops/instrumentation/providers/google_genai/__init__.py +24 -0
  72. agentops/instrumentation/providers/google_genai/attributes/__init__.py +25 -0
  73. agentops/instrumentation/providers/google_genai/attributes/chat.py +125 -0
  74. agentops/instrumentation/providers/google_genai/attributes/common.py +88 -0
  75. agentops/instrumentation/providers/google_genai/attributes/model.py +284 -0
  76. agentops/instrumentation/providers/google_genai/instrumentor.py +170 -0
  77. agentops/instrumentation/providers/google_genai/stream_wrapper.py +238 -0
  78. agentops/instrumentation/providers/ibm_watsonx_ai/__init__.py +28 -0
  79. agentops/instrumentation/providers/ibm_watsonx_ai/attributes/__init__.py +27 -0
  80. agentops/instrumentation/providers/ibm_watsonx_ai/attributes/attributes.py +277 -0
  81. agentops/instrumentation/providers/ibm_watsonx_ai/attributes/common.py +104 -0
  82. agentops/instrumentation/providers/ibm_watsonx_ai/instrumentor.py +162 -0
  83. agentops/instrumentation/providers/ibm_watsonx_ai/stream_wrapper.py +302 -0
  84. agentops/instrumentation/providers/mem0/__init__.py +45 -0
  85. agentops/instrumentation/providers/mem0/common.py +377 -0
  86. agentops/instrumentation/providers/mem0/instrumentor.py +270 -0
  87. agentops/instrumentation/providers/mem0/memory.py +430 -0
  88. agentops/instrumentation/providers/openai/__init__.py +21 -0
  89. agentops/instrumentation/providers/openai/attributes/__init__.py +7 -0
  90. agentops/instrumentation/providers/openai/attributes/common.py +55 -0
  91. agentops/instrumentation/providers/openai/attributes/response.py +607 -0
  92. agentops/instrumentation/providers/openai/config.py +36 -0
  93. agentops/instrumentation/providers/openai/instrumentor.py +312 -0
  94. agentops/instrumentation/providers/openai/stream_wrapper.py +941 -0
  95. agentops/instrumentation/providers/openai/utils.py +44 -0
  96. agentops/instrumentation/providers/openai/v0.py +176 -0
  97. agentops/instrumentation/providers/openai/v0_wrappers.py +483 -0
  98. agentops/instrumentation/providers/openai/wrappers/__init__.py +30 -0
  99. agentops/instrumentation/providers/openai/wrappers/assistant.py +277 -0
  100. agentops/instrumentation/providers/openai/wrappers/chat.py +259 -0
  101. agentops/instrumentation/providers/openai/wrappers/completion.py +109 -0
  102. agentops/instrumentation/providers/openai/wrappers/embeddings.py +94 -0
  103. agentops/instrumentation/providers/openai/wrappers/image_gen.py +75 -0
  104. agentops/instrumentation/providers/openai/wrappers/responses.py +191 -0
  105. agentops/instrumentation/providers/openai/wrappers/shared.py +81 -0
  106. agentops/instrumentation/utilities/concurrent_futures/__init__.py +10 -0
  107. agentops/instrumentation/utilities/concurrent_futures/instrumentation.py +206 -0
  108. agentops/integration/callbacks/dspy/__init__.py +11 -0
  109. agentops/integration/callbacks/dspy/callback.py +471 -0
  110. agentops/integration/callbacks/langchain/README.md +59 -0
  111. agentops/integration/callbacks/langchain/__init__.py +15 -0
  112. agentops/integration/callbacks/langchain/callback.py +791 -0
  113. agentops/integration/callbacks/langchain/utils.py +54 -0
  114. agentops/legacy/crewai.md +121 -0
  115. agentops/logging/instrument_logging.py +4 -0
  116. agentops/sdk/README.md +220 -0
  117. agentops/sdk/core.py +75 -32
  118. agentops/sdk/descriptors/classproperty.py +28 -0
  119. agentops/sdk/exporters.py +152 -33
  120. agentops/semconv/README.md +125 -0
  121. agentops/semconv/span_kinds.py +0 -2
  122. agentops/validation.py +102 -63
  123. {mseep_agentops-0.4.18.dist-info → mseep_agentops-0.4.23.dist-info}/METADATA +30 -40
  124. mseep_agentops-0.4.23.dist-info/RECORD +178 -0
  125. {mseep_agentops-0.4.18.dist-info → mseep_agentops-0.4.23.dist-info}/WHEEL +1 -2
  126. mseep_agentops-0.4.18.dist-info/RECORD +0 -94
  127. mseep_agentops-0.4.18.dist-info/top_level.txt +0 -2
  128. tests/conftest.py +0 -10
  129. tests/unit/client/__init__.py +0 -1
  130. tests/unit/client/test_http_adapter.py +0 -221
  131. tests/unit/client/test_http_client.py +0 -206
  132. tests/unit/conftest.py +0 -54
  133. tests/unit/sdk/__init__.py +0 -1
  134. tests/unit/sdk/instrumentation_tester.py +0 -207
  135. tests/unit/sdk/test_attributes.py +0 -392
  136. tests/unit/sdk/test_concurrent_instrumentation.py +0 -468
  137. tests/unit/sdk/test_decorators.py +0 -763
  138. tests/unit/sdk/test_exporters.py +0 -241
  139. tests/unit/sdk/test_factory.py +0 -1188
  140. tests/unit/sdk/test_internal_span_processor.py +0 -397
  141. tests/unit/sdk/test_resource_attributes.py +0 -35
  142. tests/unit/test_config.py +0 -82
  143. tests/unit/test_context_manager.py +0 -777
  144. tests/unit/test_events.py +0 -27
  145. tests/unit/test_host_env.py +0 -54
  146. tests/unit/test_init_py.py +0 -501
  147. tests/unit/test_serialization.py +0 -433
  148. tests/unit/test_session.py +0 -676
  149. tests/unit/test_user_agent.py +0 -34
  150. tests/unit/test_validation.py +0 -405
  151. {tests → agentops/instrumentation/agentic/openai_agents/attributes}/__init__.py +0 -0
  152. /tests/unit/__init__.py → /agentops/instrumentation/providers/openai/attributes/tools.py +0 -0
  153. {mseep_agentops-0.4.18.dist-info → mseep_agentops-0.4.23.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,162 @@
1
+ """IBM watsonx.ai Instrumentation for AgentOps
2
+
3
+ This module provides instrumentation for the IBM watsonx.ai API, implementing OpenTelemetry
4
+ instrumentation for model requests and responses.
5
+
6
+ Key endpoints instrumented:
7
+ - Model.generate - Text generation API
8
+ - Model.generate_text_stream - Streaming text generation API
9
+ - Model.chat - Chat completion API
10
+ - Model.chat_stream - Streaming chat completion API
11
+ - Model.tokenize - Tokenization API
12
+ - Model.get_details - Model details API
13
+ """
14
+
15
+ from typing import List, Dict, Any
16
+ from wrapt import wrap_function_wrapper
17
+ from opentelemetry.metrics import Meter
18
+
19
+ from agentops.logging import logger
20
+ from agentops.instrumentation.common import CommonInstrumentor, StandardMetrics, InstrumentorConfig
21
+ from agentops.instrumentation.common.wrappers import WrapConfig
22
+ from agentops.instrumentation.providers.ibm_watsonx_ai.attributes.attributes import (
23
+ get_generate_attributes,
24
+ get_tokenize_attributes,
25
+ get_model_details_attributes,
26
+ get_chat_attributes,
27
+ )
28
+ from agentops.instrumentation.providers.ibm_watsonx_ai.stream_wrapper import (
29
+ generate_text_stream_wrapper,
30
+ chat_stream_wrapper,
31
+ )
32
+
33
+ # Library info for tracer/meter
34
+ LIBRARY_NAME = "agentops.instrumentation.ibm_watsonx_ai"
35
+ LIBRARY_VERSION = "0.1.0"
36
+
37
+ # Methods to wrap for instrumentation
38
+ WRAPPED_METHODS: List[WrapConfig] = [
39
+ WrapConfig(
40
+ trace_name="watsonx.generate",
41
+ package="ibm_watsonx_ai.foundation_models.inference",
42
+ class_name="ModelInference",
43
+ method_name="generate",
44
+ handler=get_generate_attributes,
45
+ ),
46
+ WrapConfig(
47
+ trace_name="watsonx.generate_text_stream",
48
+ package="ibm_watsonx_ai.foundation_models.inference",
49
+ class_name="ModelInference",
50
+ method_name="generate_text_stream",
51
+ handler=None, # Handled by dedicated wrapper
52
+ ),
53
+ WrapConfig(
54
+ trace_name="watsonx.chat",
55
+ package="ibm_watsonx_ai.foundation_models.inference",
56
+ class_name="ModelInference",
57
+ method_name="chat",
58
+ handler=get_chat_attributes,
59
+ ),
60
+ WrapConfig(
61
+ trace_name="watsonx.chat_stream",
62
+ package="ibm_watsonx_ai.foundation_models.inference",
63
+ class_name="ModelInference",
64
+ method_name="chat_stream",
65
+ handler=None, # Handled by dedicated wrapper
66
+ ),
67
+ WrapConfig(
68
+ trace_name="watsonx.tokenize",
69
+ package="ibm_watsonx_ai.foundation_models.inference",
70
+ class_name="ModelInference",
71
+ method_name="tokenize",
72
+ handler=get_tokenize_attributes,
73
+ ),
74
+ WrapConfig(
75
+ trace_name="watsonx.get_details",
76
+ package="ibm_watsonx_ai.foundation_models.inference",
77
+ class_name="ModelInference",
78
+ method_name="get_details",
79
+ handler=get_model_details_attributes,
80
+ ),
81
+ ]
82
+
83
+
84
+ class WatsonxInstrumentor(CommonInstrumentor):
85
+ """An instrumentor for IBM watsonx.ai API."""
86
+
87
+ def __init__(self):
88
+ """Initialize the IBM watsonx.ai instrumentor."""
89
+ # Filter out stream methods that need custom wrapping
90
+ standard_methods = [
91
+ wc for wc in WRAPPED_METHODS if wc.method_name not in ["generate_text_stream", "chat_stream"]
92
+ ]
93
+
94
+ config = InstrumentorConfig(
95
+ library_name=LIBRARY_NAME,
96
+ library_version=LIBRARY_VERSION,
97
+ wrapped_methods=standard_methods,
98
+ metrics_enabled=True,
99
+ dependencies=["ibm-watsonx-ai >= 1.3.11"],
100
+ )
101
+ super().__init__(config)
102
+
103
+ def _create_metrics(self, meter: Meter) -> Dict[str, Any]:
104
+ """Create metrics for IBM watsonx.ai operations.
105
+
106
+ Args:
107
+ meter: The OpenTelemetry meter to use for creating metrics.
108
+
109
+ Returns:
110
+ Dictionary containing the created metrics.
111
+ """
112
+ return StandardMetrics.create_standard_metrics(meter)
113
+
114
+ def _custom_wrap(self, **kwargs):
115
+ """Perform custom wrapping for streaming methods."""
116
+ # Dedicated wrappers for stream methods
117
+ try:
118
+ generate_text_stream_config = next(wc for wc in WRAPPED_METHODS if wc.method_name == "generate_text_stream")
119
+ wrap_function_wrapper(
120
+ generate_text_stream_config.package,
121
+ f"{generate_text_stream_config.class_name}.{generate_text_stream_config.method_name}",
122
+ generate_text_stream_wrapper,
123
+ )
124
+ logger.debug(
125
+ f"Wrapped {generate_text_stream_config.package}.{generate_text_stream_config.class_name}.{generate_text_stream_config.method_name} with dedicated wrapper"
126
+ )
127
+ except (StopIteration, AttributeError, ModuleNotFoundError) as e:
128
+ logger.debug(f"Could not wrap generate_text_stream with dedicated wrapper: {e}")
129
+
130
+ try:
131
+ chat_stream_config = next(wc for wc in WRAPPED_METHODS if wc.method_name == "chat_stream")
132
+ wrap_function_wrapper(
133
+ chat_stream_config.package,
134
+ f"{chat_stream_config.class_name}.{chat_stream_config.method_name}",
135
+ chat_stream_wrapper,
136
+ )
137
+ logger.debug(
138
+ f"Wrapped {chat_stream_config.package}.{chat_stream_config.class_name}.{chat_stream_config.method_name} with dedicated wrapper"
139
+ )
140
+ except (StopIteration, AttributeError, ModuleNotFoundError) as e:
141
+ logger.debug(f"Could not wrap chat_stream with dedicated wrapper: {e}")
142
+
143
+ logger.info("IBM watsonx.ai instrumentation enabled")
144
+
145
+ def _custom_unwrap(self, **kwargs):
146
+ """Remove custom wrapping for streaming methods."""
147
+ # Unwrap streaming methods manually
148
+ from opentelemetry.instrumentation.utils import unwrap as otel_unwrap
149
+
150
+ for wrap_config in WRAPPED_METHODS:
151
+ if wrap_config.method_name in ["generate_text_stream", "chat_stream"]:
152
+ try:
153
+ otel_unwrap(wrap_config.package, f"{wrap_config.class_name}.{wrap_config.method_name}")
154
+ logger.debug(
155
+ f"Unwrapped streaming method {wrap_config.package}.{wrap_config.class_name}.{wrap_config.method_name}"
156
+ )
157
+ except Exception as e:
158
+ logger.debug(
159
+ f"Failed to unwrap streaming method {wrap_config.package}.{wrap_config.class_name}.{wrap_config.method_name}: {e}"
160
+ )
161
+
162
+ logger.info("IBM watsonx.ai instrumentation disabled")
@@ -0,0 +1,302 @@
1
+ """Stream wrappers for IBM watsonx.ai.
2
+
3
+ This module provides stream wrapper classes and functions for IBM watsonx.ai's streaming
4
+ responses, implementing telemetry tracking for streaming content.
5
+ """
6
+
7
+ import json
8
+ from opentelemetry.trace import get_tracer, SpanKind
9
+ from agentops.logging import logger
10
+ from agentops.instrumentation.providers.ibm_watsonx_ai import LIBRARY_NAME, LIBRARY_VERSION
11
+ from agentops.instrumentation.providers.ibm_watsonx_ai.attributes.common import (
12
+ extract_params_attributes,
13
+ convert_params_to_dict,
14
+ extract_prompt_from_args,
15
+ extract_messages_from_args,
16
+ extract_params_from_args,
17
+ )
18
+ from agentops.semconv import SpanAttributes, LLMRequestTypeValues, CoreAttributes, MessageAttributes
19
+
20
+
21
+ class TracedStream:
22
+ """A wrapper for IBM watsonx.ai's streaming response that adds telemetry."""
23
+
24
+ def __init__(self, original_stream, span):
25
+ """Initialize with the original stream and span."""
26
+ self.original_stream = original_stream
27
+ self.span = span
28
+ self.completion_content = ""
29
+ self.input_tokens = 0
30
+ self.output_tokens = 0
31
+ self.model_id = None
32
+
33
+ def __iter__(self):
34
+ """Iterate through chunks, tracking content and attempting to extract token data."""
35
+ try:
36
+ for yielded_chunk in self.original_stream:
37
+ # Initialize data for this chunk
38
+ generated_text_chunk = ""
39
+ model_id_chunk = None
40
+
41
+ try:
42
+ # Attempt to access internal frame local variable 'chunk' for full data
43
+ internal_chunk_data_str = getattr(self.original_stream, "gi_frame", {}).f_locals.get("chunk")
44
+
45
+ if isinstance(internal_chunk_data_str, str) and internal_chunk_data_str.startswith("data: "):
46
+ try:
47
+ # Remove 'data: ' prefix and parse JSON
48
+ json_payload_str = internal_chunk_data_str[len("data: ") :]
49
+ json_payload = json.loads(json_payload_str)
50
+
51
+ # Determine if it's generate_text_stream or chat_stream structure
52
+ if "results" in json_payload: # Likely generate_text_stream
53
+ model_id_chunk = json_payload.get("model_id")
54
+ if isinstance(json_payload["results"], list):
55
+ for result in json_payload["results"]:
56
+ if isinstance(result, dict):
57
+ # Use yielded_chunk for generated_text as internal one might be partial
58
+ if isinstance(yielded_chunk, str):
59
+ generated_text_chunk = yielded_chunk
60
+ # Use the first non-zero input token count found
61
+ if self.input_tokens == 0 and result.get("input_token_count", 0) > 0:
62
+ self.input_tokens = result.get("input_token_count", 0)
63
+ # Accumulate output tokens
64
+ self.output_tokens += result.get("generated_token_count", 0)
65
+
66
+ elif "choices" in json_payload: # Likely chat_stream
67
+ # model_id might be at top level or within choices in other APIs, check top first
68
+ model_id_chunk = json_payload.get("model_id") or json_payload.get("model")
69
+ if isinstance(json_payload["choices"], list) and json_payload["choices"]:
70
+ choice = json_payload["choices"][0]
71
+ if isinstance(choice, dict):
72
+ delta = choice.get("delta", {})
73
+ if isinstance(delta, dict):
74
+ generated_text_chunk = delta.get("content", "")
75
+
76
+ # Check for finish reason to potentially get final usage
77
+ finish_reason = choice.get("finish_reason")
78
+ if finish_reason == "stop":
79
+ try:
80
+ final_response_data = getattr(
81
+ self.original_stream, "gi_frame", {}
82
+ ).f_locals.get("parsed_response")
83
+ if (
84
+ isinstance(final_response_data, dict)
85
+ and "usage" in final_response_data
86
+ ):
87
+ usage = final_response_data["usage"]
88
+ if isinstance(usage, dict):
89
+ # Update token counts with final values
90
+ self.input_tokens = usage.get(
91
+ "prompt_tokens", self.input_tokens
92
+ )
93
+ self.output_tokens = usage.get(
94
+ "completion_tokens", self.output_tokens
95
+ )
96
+ # Update span immediately with final counts
97
+ if self.input_tokens is not None:
98
+ self.span.set_attribute(
99
+ SpanAttributes.LLM_USAGE_PROMPT_TOKENS,
100
+ self.input_tokens,
101
+ )
102
+ if self.output_tokens is not None:
103
+ self.span.set_attribute(
104
+ SpanAttributes.LLM_USAGE_COMPLETION_TOKENS,
105
+ self.output_tokens,
106
+ )
107
+ if (
108
+ self.input_tokens is not None
109
+ and self.output_tokens is not None
110
+ ):
111
+ self.span.set_attribute(
112
+ SpanAttributes.LLM_USAGE_TOTAL_TOKENS,
113
+ self.input_tokens + self.output_tokens,
114
+ )
115
+
116
+ except AttributeError as final_attr_err:
117
+ logger.debug(
118
+ f"Could not access internal generator state for final response: {final_attr_err}"
119
+ )
120
+ except Exception as final_err:
121
+ logger.debug(
122
+ f"Error accessing or processing final response data: {final_err}"
123
+ )
124
+
125
+ except json.JSONDecodeError as json_err:
126
+ logger.debug(f"Failed to parse JSON from internal chunk data: {json_err}")
127
+ # Fallback to using the yielded chunk directly
128
+ if isinstance(yielded_chunk, dict): # chat_stream yields dicts
129
+ if "choices" in yielded_chunk and yielded_chunk["choices"]:
130
+ delta = yielded_chunk["choices"][0].get("delta", {})
131
+ generated_text_chunk = delta.get("content", "")
132
+ elif isinstance(yielded_chunk, str): # generate_text_stream yields strings
133
+ generated_text_chunk = yielded_chunk
134
+ except Exception as parse_err:
135
+ logger.debug(f"Error processing internal chunk data: {parse_err}")
136
+ if isinstance(yielded_chunk, dict): # Fallback for chat
137
+ if "choices" in yielded_chunk and yielded_chunk["choices"]:
138
+ delta = yielded_chunk["choices"][0].get("delta", {})
139
+ generated_text_chunk = delta.get("content", "")
140
+ elif isinstance(yielded_chunk, str): # Fallback for generate
141
+ generated_text_chunk = yielded_chunk
142
+ else:
143
+ # If internal data not found or not in expected format, use yielded chunk
144
+ if isinstance(yielded_chunk, dict): # chat_stream yields dicts
145
+ if "choices" in yielded_chunk and yielded_chunk["choices"]:
146
+ delta = yielded_chunk["choices"][0].get("delta", {})
147
+ generated_text_chunk = delta.get("content", "")
148
+ elif isinstance(yielded_chunk, str): # generate_text_stream yields strings
149
+ generated_text_chunk = yielded_chunk
150
+
151
+ except AttributeError as attr_err:
152
+ logger.debug(f"Could not access internal generator state (gi_frame.f_locals): {attr_err}")
153
+ if isinstance(yielded_chunk, dict): # Fallback for chat
154
+ if "choices" in yielded_chunk and yielded_chunk["choices"]:
155
+ delta = yielded_chunk["choices"][0].get("delta", {})
156
+ generated_text_chunk = delta.get("content", "")
157
+ elif isinstance(yielded_chunk, str): # Fallback for generate
158
+ generated_text_chunk = yielded_chunk
159
+ except Exception as e:
160
+ logger.debug(f"Error accessing or processing internal generator state: {e}")
161
+ if isinstance(yielded_chunk, dict): # Fallback for chat
162
+ if "choices" in yielded_chunk and yielded_chunk["choices"]:
163
+ delta = yielded_chunk["choices"][0].get("delta", {})
164
+ generated_text_chunk = delta.get("content", "")
165
+ elif isinstance(yielded_chunk, str): # Fallback for generate
166
+ generated_text_chunk = yielded_chunk
167
+
168
+ # Accumulate completion content regardless of where it came from
169
+ self.completion_content += generated_text_chunk
170
+
171
+ # Update span attributes within the loop if data is available
172
+ if model_id_chunk and not self.model_id:
173
+ self.model_id = model_id_chunk
174
+ self.span.set_attribute(SpanAttributes.LLM_REQUEST_MODEL, self.model_id)
175
+
176
+ if self.input_tokens is not None:
177
+ self.span.set_attribute(SpanAttributes.LLM_USAGE_PROMPT_TOKENS, self.input_tokens)
178
+ if self.output_tokens is not None:
179
+ self.span.set_attribute(SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, self.output_tokens)
180
+ if self.input_tokens is not None and self.output_tokens is not None:
181
+ self.span.set_attribute(
182
+ SpanAttributes.LLM_USAGE_TOTAL_TOKENS, self.input_tokens + self.output_tokens
183
+ )
184
+
185
+ # Yield the original chunk that the user expects
186
+ yield yielded_chunk
187
+ finally:
188
+ # Update final completion content attribute after stream finishes
189
+ if self.completion_content:
190
+ self.span.set_attribute(MessageAttributes.COMPLETION_TYPE.format(i=0), "text")
191
+ self.span.set_attribute(MessageAttributes.COMPLETION_ROLE.format(i=0), "assistant")
192
+ self.span.set_attribute(MessageAttributes.COMPLETION_CONTENT.format(i=0), self.completion_content)
193
+
194
+ # Final update for token counts
195
+ if self.input_tokens is not None:
196
+ self.span.set_attribute(SpanAttributes.LLM_USAGE_PROMPT_TOKENS, self.input_tokens)
197
+ if self.output_tokens is not None:
198
+ self.span.set_attribute(SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, self.output_tokens)
199
+ if self.input_tokens is not None and self.output_tokens is not None:
200
+ self.span.set_attribute(SpanAttributes.LLM_USAGE_TOTAL_TOKENS, self.input_tokens + self.output_tokens)
201
+
202
+ # End the span when the stream is exhausted
203
+ if self.span.is_recording():
204
+ self.span.end()
205
+
206
+
207
+ def generate_text_stream_wrapper(wrapped, instance, args, kwargs):
208
+ """Wrapper for the Model.generate_text_stream method."""
209
+ tracer = get_tracer(LIBRARY_NAME, LIBRARY_VERSION)
210
+ span = tracer.start_span(
211
+ "watsonx.generate_text_stream",
212
+ kind=SpanKind.CLIENT,
213
+ attributes={SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value},
214
+ )
215
+
216
+ # Extract prompt using helper function
217
+ prompt = extract_prompt_from_args(args, kwargs)
218
+ if prompt:
219
+ span.set_attribute(MessageAttributes.PROMPT_ROLE.format(i=0), "user")
220
+ span.set_attribute(MessageAttributes.PROMPT_CONTENT.format(i=0), prompt)
221
+ span.set_attribute(MessageAttributes.PROMPT_TYPE.format(i=0), "text")
222
+
223
+ # Extract parameters using helper function
224
+ params = extract_params_from_args(args, kwargs)
225
+ if params:
226
+ params_dict = convert_params_to_dict(params)
227
+ if params_dict:
228
+ try:
229
+ span_attributes = extract_params_attributes(params_dict)
230
+ for key, value in span_attributes.items():
231
+ span.set_attribute(key, value)
232
+ except Exception as e:
233
+ logger.debug(f"Error extracting attributes from params dict: {e}")
234
+
235
+ span.set_attribute(SpanAttributes.LLM_REQUEST_STREAMING, True)
236
+
237
+ try:
238
+ stream = wrapped(*args, **kwargs)
239
+ return TracedStream(stream, span)
240
+ except Exception as e:
241
+ span.record_exception(e)
242
+ span.set_attribute(CoreAttributes.ERROR_MESSAGE, str(e))
243
+ span.set_attribute(CoreAttributes.ERROR_TYPE, e.__class__.__name__)
244
+ span.end()
245
+ raise
246
+
247
+
248
+ def chat_stream_wrapper(wrapped, instance, args, kwargs):
249
+ """Wrapper for the Model.chat_stream method."""
250
+ tracer = get_tracer(LIBRARY_NAME, LIBRARY_VERSION)
251
+ span = tracer.start_span(
252
+ "watsonx.chat_stream",
253
+ kind=SpanKind.CLIENT,
254
+ attributes={SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.CHAT.value},
255
+ )
256
+
257
+ # Extract messages using helper function
258
+ messages = extract_messages_from_args(args, kwargs)
259
+ if messages and isinstance(messages, list):
260
+ for i, message in enumerate(messages):
261
+ if isinstance(message, dict):
262
+ role = message.get("role")
263
+ content = message.get("content")
264
+ # Handle complex content (list of dicts) vs simple string
265
+ if isinstance(content, list):
266
+ text_content = []
267
+ for item in content:
268
+ if isinstance(item, dict) and item.get("type") == "text":
269
+ text_content.append(item.get("text", ""))
270
+ content_str = " ".join(text_content)
271
+ else:
272
+ content_str = str(content)
273
+
274
+ if role:
275
+ span.set_attribute(MessageAttributes.PROMPT_ROLE.format(i=i), role)
276
+ if content_str:
277
+ span.set_attribute(MessageAttributes.PROMPT_CONTENT.format(i=i), content_str)
278
+ span.set_attribute(MessageAttributes.PROMPT_TYPE.format(i=i), "text")
279
+
280
+ # Extract parameters using helper function
281
+ params = extract_params_from_args(args, kwargs)
282
+ if params:
283
+ params_dict = convert_params_to_dict(params)
284
+ if params_dict:
285
+ try:
286
+ span_attributes = extract_params_attributes(params_dict)
287
+ for key, value in span_attributes.items():
288
+ span.set_attribute(key, value)
289
+ except Exception as e:
290
+ logger.debug(f"Error extracting attributes from params dict: {e}")
291
+
292
+ span.set_attribute(SpanAttributes.LLM_REQUEST_STREAMING, True)
293
+
294
+ try:
295
+ stream = wrapped(*args, **kwargs)
296
+ return TracedStream(stream, span)
297
+ except Exception as e:
298
+ span.record_exception(e)
299
+ span.set_attribute(CoreAttributes.ERROR_MESSAGE, str(e))
300
+ span.set_attribute(CoreAttributes.ERROR_TYPE, e.__class__.__name__)
301
+ span.end()
302
+ raise
@@ -0,0 +1,45 @@
1
+ """Mem0 instrumentation library for AgentOps.
2
+
3
+ This package provides instrumentation for the Mem0 memory management system,
4
+ capturing telemetry data for memory operations.
5
+ """
6
+
7
+ import logging
8
+ from agentops.instrumentation.common import LibraryInfo
9
+
10
+ # Import memory operation wrappers
11
+ from .memory import (
12
+ mem0_add_wrapper,
13
+ mem0_search_wrapper,
14
+ mem0_get_all_wrapper,
15
+ mem0_get_wrapper,
16
+ mem0_delete_wrapper,
17
+ mem0_update_wrapper,
18
+ mem0_delete_all_wrapper,
19
+ mem0_history_wrapper,
20
+ )
21
+
22
+ logger = logging.getLogger(__name__)
23
+
24
+ # Library information
25
+ _library_info = LibraryInfo(name="agentops.instrumentation.mem0", package_name="mem0ai")
26
+ LIBRARY_NAME = _library_info.name
27
+ LIBRARY_VERSION = "1.0.0" # Internal version for instrumentation
28
+
29
+ # Import after defining constants to avoid circular imports
30
+ from agentops.instrumentation.providers.mem0.instrumentor import Mem0Instrumentor # noqa: E402
31
+
32
+ __all__ = [
33
+ "LIBRARY_NAME",
34
+ "LIBRARY_VERSION",
35
+ "Mem0Instrumentor",
36
+ # Memory operation wrappers
37
+ "mem0_add_wrapper",
38
+ "mem0_search_wrapper",
39
+ "mem0_get_all_wrapper",
40
+ "mem0_get_wrapper",
41
+ "mem0_delete_wrapper",
42
+ "mem0_update_wrapper",
43
+ "mem0_delete_all_wrapper",
44
+ "mem0_history_wrapper",
45
+ ]