sentry-sdk 0.18.0__py2.py3-none-any.whl → 2.46.0__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (193) hide show
  1. sentry_sdk/__init__.py +48 -6
  2. sentry_sdk/_compat.py +64 -56
  3. sentry_sdk/_init_implementation.py +84 -0
  4. sentry_sdk/_log_batcher.py +172 -0
  5. sentry_sdk/_lru_cache.py +47 -0
  6. sentry_sdk/_metrics_batcher.py +167 -0
  7. sentry_sdk/_queue.py +81 -19
  8. sentry_sdk/_types.py +311 -11
  9. sentry_sdk/_werkzeug.py +98 -0
  10. sentry_sdk/ai/__init__.py +7 -0
  11. sentry_sdk/ai/monitoring.py +137 -0
  12. sentry_sdk/ai/utils.py +144 -0
  13. sentry_sdk/api.py +409 -67
  14. sentry_sdk/attachments.py +75 -0
  15. sentry_sdk/client.py +849 -103
  16. sentry_sdk/consts.py +1389 -34
  17. sentry_sdk/crons/__init__.py +10 -0
  18. sentry_sdk/crons/api.py +62 -0
  19. sentry_sdk/crons/consts.py +4 -0
  20. sentry_sdk/crons/decorator.py +135 -0
  21. sentry_sdk/debug.py +12 -15
  22. sentry_sdk/envelope.py +112 -61
  23. sentry_sdk/feature_flags.py +71 -0
  24. sentry_sdk/hub.py +442 -386
  25. sentry_sdk/integrations/__init__.py +228 -58
  26. sentry_sdk/integrations/_asgi_common.py +108 -0
  27. sentry_sdk/integrations/_wsgi_common.py +131 -40
  28. sentry_sdk/integrations/aiohttp.py +221 -72
  29. sentry_sdk/integrations/anthropic.py +439 -0
  30. sentry_sdk/integrations/argv.py +4 -6
  31. sentry_sdk/integrations/ariadne.py +161 -0
  32. sentry_sdk/integrations/arq.py +247 -0
  33. sentry_sdk/integrations/asgi.py +237 -135
  34. sentry_sdk/integrations/asyncio.py +144 -0
  35. sentry_sdk/integrations/asyncpg.py +208 -0
  36. sentry_sdk/integrations/atexit.py +13 -18
  37. sentry_sdk/integrations/aws_lambda.py +233 -80
  38. sentry_sdk/integrations/beam.py +27 -35
  39. sentry_sdk/integrations/boto3.py +137 -0
  40. sentry_sdk/integrations/bottle.py +91 -69
  41. sentry_sdk/integrations/celery/__init__.py +529 -0
  42. sentry_sdk/integrations/celery/beat.py +293 -0
  43. sentry_sdk/integrations/celery/utils.py +43 -0
  44. sentry_sdk/integrations/chalice.py +35 -28
  45. sentry_sdk/integrations/clickhouse_driver.py +177 -0
  46. sentry_sdk/integrations/cloud_resource_context.py +280 -0
  47. sentry_sdk/integrations/cohere.py +274 -0
  48. sentry_sdk/integrations/dedupe.py +32 -8
  49. sentry_sdk/integrations/django/__init__.py +343 -89
  50. sentry_sdk/integrations/django/asgi.py +201 -22
  51. sentry_sdk/integrations/django/caching.py +204 -0
  52. sentry_sdk/integrations/django/middleware.py +80 -32
  53. sentry_sdk/integrations/django/signals_handlers.py +91 -0
  54. sentry_sdk/integrations/django/templates.py +69 -2
  55. sentry_sdk/integrations/django/transactions.py +39 -14
  56. sentry_sdk/integrations/django/views.py +69 -16
  57. sentry_sdk/integrations/dramatiq.py +226 -0
  58. sentry_sdk/integrations/excepthook.py +19 -13
  59. sentry_sdk/integrations/executing.py +5 -6
  60. sentry_sdk/integrations/falcon.py +128 -65
  61. sentry_sdk/integrations/fastapi.py +141 -0
  62. sentry_sdk/integrations/flask.py +114 -75
  63. sentry_sdk/integrations/gcp.py +67 -36
  64. sentry_sdk/integrations/gnu_backtrace.py +14 -22
  65. sentry_sdk/integrations/google_genai/__init__.py +301 -0
  66. sentry_sdk/integrations/google_genai/consts.py +16 -0
  67. sentry_sdk/integrations/google_genai/streaming.py +155 -0
  68. sentry_sdk/integrations/google_genai/utils.py +576 -0
  69. sentry_sdk/integrations/gql.py +162 -0
  70. sentry_sdk/integrations/graphene.py +151 -0
  71. sentry_sdk/integrations/grpc/__init__.py +168 -0
  72. sentry_sdk/integrations/grpc/aio/__init__.py +7 -0
  73. sentry_sdk/integrations/grpc/aio/client.py +95 -0
  74. sentry_sdk/integrations/grpc/aio/server.py +100 -0
  75. sentry_sdk/integrations/grpc/client.py +91 -0
  76. sentry_sdk/integrations/grpc/consts.py +1 -0
  77. sentry_sdk/integrations/grpc/server.py +66 -0
  78. sentry_sdk/integrations/httpx.py +178 -0
  79. sentry_sdk/integrations/huey.py +174 -0
  80. sentry_sdk/integrations/huggingface_hub.py +378 -0
  81. sentry_sdk/integrations/langchain.py +1132 -0
  82. sentry_sdk/integrations/langgraph.py +337 -0
  83. sentry_sdk/integrations/launchdarkly.py +61 -0
  84. sentry_sdk/integrations/litellm.py +287 -0
  85. sentry_sdk/integrations/litestar.py +315 -0
  86. sentry_sdk/integrations/logging.py +261 -85
  87. sentry_sdk/integrations/loguru.py +213 -0
  88. sentry_sdk/integrations/mcp.py +566 -0
  89. sentry_sdk/integrations/modules.py +6 -33
  90. sentry_sdk/integrations/openai.py +725 -0
  91. sentry_sdk/integrations/openai_agents/__init__.py +61 -0
  92. sentry_sdk/integrations/openai_agents/consts.py +1 -0
  93. sentry_sdk/integrations/openai_agents/patches/__init__.py +5 -0
  94. sentry_sdk/integrations/openai_agents/patches/agent_run.py +140 -0
  95. sentry_sdk/integrations/openai_agents/patches/error_tracing.py +77 -0
  96. sentry_sdk/integrations/openai_agents/patches/models.py +50 -0
  97. sentry_sdk/integrations/openai_agents/patches/runner.py +45 -0
  98. sentry_sdk/integrations/openai_agents/patches/tools.py +77 -0
  99. sentry_sdk/integrations/openai_agents/spans/__init__.py +5 -0
  100. sentry_sdk/integrations/openai_agents/spans/agent_workflow.py +21 -0
  101. sentry_sdk/integrations/openai_agents/spans/ai_client.py +42 -0
  102. sentry_sdk/integrations/openai_agents/spans/execute_tool.py +48 -0
  103. sentry_sdk/integrations/openai_agents/spans/handoff.py +19 -0
  104. sentry_sdk/integrations/openai_agents/spans/invoke_agent.py +86 -0
  105. sentry_sdk/integrations/openai_agents/utils.py +199 -0
  106. sentry_sdk/integrations/openfeature.py +35 -0
  107. sentry_sdk/integrations/opentelemetry/__init__.py +7 -0
  108. sentry_sdk/integrations/opentelemetry/consts.py +5 -0
  109. sentry_sdk/integrations/opentelemetry/integration.py +58 -0
  110. sentry_sdk/integrations/opentelemetry/propagator.py +117 -0
  111. sentry_sdk/integrations/opentelemetry/span_processor.py +391 -0
  112. sentry_sdk/integrations/otlp.py +82 -0
  113. sentry_sdk/integrations/pure_eval.py +20 -11
  114. sentry_sdk/integrations/pydantic_ai/__init__.py +47 -0
  115. sentry_sdk/integrations/pydantic_ai/consts.py +1 -0
  116. sentry_sdk/integrations/pydantic_ai/patches/__init__.py +4 -0
  117. sentry_sdk/integrations/pydantic_ai/patches/agent_run.py +215 -0
  118. sentry_sdk/integrations/pydantic_ai/patches/graph_nodes.py +110 -0
  119. sentry_sdk/integrations/pydantic_ai/patches/model_request.py +40 -0
  120. sentry_sdk/integrations/pydantic_ai/patches/tools.py +98 -0
  121. sentry_sdk/integrations/pydantic_ai/spans/__init__.py +3 -0
  122. sentry_sdk/integrations/pydantic_ai/spans/ai_client.py +246 -0
  123. sentry_sdk/integrations/pydantic_ai/spans/execute_tool.py +49 -0
  124. sentry_sdk/integrations/pydantic_ai/spans/invoke_agent.py +112 -0
  125. sentry_sdk/integrations/pydantic_ai/utils.py +223 -0
  126. sentry_sdk/integrations/pymongo.py +214 -0
  127. sentry_sdk/integrations/pyramid.py +71 -60
  128. sentry_sdk/integrations/quart.py +237 -0
  129. sentry_sdk/integrations/ray.py +165 -0
  130. sentry_sdk/integrations/redis/__init__.py +48 -0
  131. sentry_sdk/integrations/redis/_async_common.py +116 -0
  132. sentry_sdk/integrations/redis/_sync_common.py +119 -0
  133. sentry_sdk/integrations/redis/consts.py +19 -0
  134. sentry_sdk/integrations/redis/modules/__init__.py +0 -0
  135. sentry_sdk/integrations/redis/modules/caches.py +118 -0
  136. sentry_sdk/integrations/redis/modules/queries.py +65 -0
  137. sentry_sdk/integrations/redis/rb.py +32 -0
  138. sentry_sdk/integrations/redis/redis.py +69 -0
  139. sentry_sdk/integrations/redis/redis_cluster.py +107 -0
  140. sentry_sdk/integrations/redis/redis_py_cluster_legacy.py +50 -0
  141. sentry_sdk/integrations/redis/utils.py +148 -0
  142. sentry_sdk/integrations/rq.py +62 -52
  143. sentry_sdk/integrations/rust_tracing.py +284 -0
  144. sentry_sdk/integrations/sanic.py +248 -114
  145. sentry_sdk/integrations/serverless.py +13 -22
  146. sentry_sdk/integrations/socket.py +96 -0
  147. sentry_sdk/integrations/spark/spark_driver.py +115 -62
  148. sentry_sdk/integrations/spark/spark_worker.py +42 -50
  149. sentry_sdk/integrations/sqlalchemy.py +82 -37
  150. sentry_sdk/integrations/starlette.py +737 -0
  151. sentry_sdk/integrations/starlite.py +292 -0
  152. sentry_sdk/integrations/statsig.py +37 -0
  153. sentry_sdk/integrations/stdlib.py +100 -58
  154. sentry_sdk/integrations/strawberry.py +394 -0
  155. sentry_sdk/integrations/sys_exit.py +70 -0
  156. sentry_sdk/integrations/threading.py +142 -38
  157. sentry_sdk/integrations/tornado.py +68 -53
  158. sentry_sdk/integrations/trytond.py +15 -20
  159. sentry_sdk/integrations/typer.py +60 -0
  160. sentry_sdk/integrations/unleash.py +33 -0
  161. sentry_sdk/integrations/unraisablehook.py +53 -0
  162. sentry_sdk/integrations/wsgi.py +126 -125
  163. sentry_sdk/logger.py +96 -0
  164. sentry_sdk/metrics.py +81 -0
  165. sentry_sdk/monitor.py +120 -0
  166. sentry_sdk/profiler/__init__.py +49 -0
  167. sentry_sdk/profiler/continuous_profiler.py +730 -0
  168. sentry_sdk/profiler/transaction_profiler.py +839 -0
  169. sentry_sdk/profiler/utils.py +195 -0
  170. sentry_sdk/scope.py +1542 -112
  171. sentry_sdk/scrubber.py +177 -0
  172. sentry_sdk/serializer.py +152 -210
  173. sentry_sdk/session.py +177 -0
  174. sentry_sdk/sessions.py +202 -179
  175. sentry_sdk/spotlight.py +242 -0
  176. sentry_sdk/tracing.py +1202 -294
  177. sentry_sdk/tracing_utils.py +1236 -0
  178. sentry_sdk/transport.py +693 -189
  179. sentry_sdk/types.py +52 -0
  180. sentry_sdk/utils.py +1395 -228
  181. sentry_sdk/worker.py +30 -17
  182. sentry_sdk-2.46.0.dist-info/METADATA +268 -0
  183. sentry_sdk-2.46.0.dist-info/RECORD +189 -0
  184. {sentry_sdk-0.18.0.dist-info → sentry_sdk-2.46.0.dist-info}/WHEEL +1 -1
  185. sentry_sdk-2.46.0.dist-info/entry_points.txt +2 -0
  186. sentry_sdk-2.46.0.dist-info/licenses/LICENSE +21 -0
  187. sentry_sdk/_functools.py +0 -66
  188. sentry_sdk/integrations/celery.py +0 -275
  189. sentry_sdk/integrations/redis.py +0 -103
  190. sentry_sdk-0.18.0.dist-info/LICENSE +0 -9
  191. sentry_sdk-0.18.0.dist-info/METADATA +0 -66
  192. sentry_sdk-0.18.0.dist-info/RECORD +0 -65
  193. {sentry_sdk-0.18.0.dist-info → sentry_sdk-2.46.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1132 @@
1
+ import contextvars
2
+ import itertools
3
+ import warnings
4
+ from collections import OrderedDict
5
+ from functools import wraps
6
+ import sys
7
+
8
+ import sentry_sdk
9
+ from sentry_sdk.ai.monitoring import set_ai_pipeline_name
10
+ from sentry_sdk.ai.utils import (
11
+ GEN_AI_ALLOWED_MESSAGE_ROLES,
12
+ normalize_message_roles,
13
+ set_data_normalized,
14
+ get_start_span_function,
15
+ truncate_and_annotate_messages,
16
+ )
17
+ from sentry_sdk.consts import OP, SPANDATA
18
+ from sentry_sdk.integrations import DidNotEnable, Integration
19
+ from sentry_sdk.scope import should_send_default_pii
20
+ from sentry_sdk.tracing_utils import _get_value, set_span_errored
21
+ from sentry_sdk.utils import logger, capture_internal_exceptions
22
+
23
+ from typing import TYPE_CHECKING
24
+
25
+ if TYPE_CHECKING:
26
+ from typing import (
27
+ Any,
28
+ AsyncIterator,
29
+ Callable,
30
+ Dict,
31
+ Iterator,
32
+ List,
33
+ Optional,
34
+ Union,
35
+ )
36
+ from uuid import UUID
37
+ from sentry_sdk.tracing import Span
38
+
39
+
40
+ try:
41
+ from langchain_core.agents import AgentFinish
42
+ from langchain_core.callbacks import (
43
+ BaseCallbackHandler,
44
+ BaseCallbackManager,
45
+ Callbacks,
46
+ manager,
47
+ )
48
+ from langchain_core.messages import BaseMessage
49
+ from langchain_core.outputs import LLMResult
50
+
51
+ except ImportError:
52
+ raise DidNotEnable("langchain not installed")
53
+
54
+
55
+ try:
56
+ # >=v1
57
+ from langchain_classic.agents import AgentExecutor # type: ignore[import-not-found]
58
+ except ImportError:
59
+ try:
60
+ # <v1
61
+ from langchain.agents import AgentExecutor
62
+ except ImportError:
63
+ AgentExecutor = None
64
+
65
+
66
+ # Conditional imports for embeddings providers
67
+ try:
68
+ from langchain_openai import OpenAIEmbeddings # type: ignore[import-not-found]
69
+ except ImportError:
70
+ OpenAIEmbeddings = None
71
+
72
+ try:
73
+ from langchain_openai import AzureOpenAIEmbeddings
74
+ except ImportError:
75
+ AzureOpenAIEmbeddings = None
76
+
77
+ try:
78
+ from langchain_google_vertexai import VertexAIEmbeddings # type: ignore[import-not-found]
79
+ except ImportError:
80
+ VertexAIEmbeddings = None
81
+
82
+ try:
83
+ from langchain_aws import BedrockEmbeddings # type: ignore[import-not-found]
84
+ except ImportError:
85
+ BedrockEmbeddings = None
86
+
87
+ try:
88
+ from langchain_cohere import CohereEmbeddings # type: ignore[import-not-found]
89
+ except ImportError:
90
+ CohereEmbeddings = None
91
+
92
+ try:
93
+ from langchain_mistralai import MistralAIEmbeddings # type: ignore[import-not-found]
94
+ except ImportError:
95
+ MistralAIEmbeddings = None
96
+
97
+ try:
98
+ from langchain_huggingface import HuggingFaceEmbeddings # type: ignore[import-not-found]
99
+ except ImportError:
100
+ HuggingFaceEmbeddings = None
101
+
102
+ try:
103
+ from langchain_ollama import OllamaEmbeddings # type: ignore[import-not-found]
104
+ except ImportError:
105
+ OllamaEmbeddings = None
106
+
107
+
108
+ DATA_FIELDS = {
109
+ "frequency_penalty": SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY,
110
+ "function_call": SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS,
111
+ "max_tokens": SPANDATA.GEN_AI_REQUEST_MAX_TOKENS,
112
+ "presence_penalty": SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY,
113
+ "temperature": SPANDATA.GEN_AI_REQUEST_TEMPERATURE,
114
+ "tool_calls": SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS,
115
+ "top_k": SPANDATA.GEN_AI_REQUEST_TOP_K,
116
+ "top_p": SPANDATA.GEN_AI_REQUEST_TOP_P,
117
+ }
118
+
119
+
120
+ # Contextvar to track agent names in a stack for re-entrant agent support
121
+ _agent_stack = contextvars.ContextVar("langchain_agent_stack", default=None) # type: contextvars.ContextVar[Optional[List[Optional[str]]]]
122
+
123
+
124
+ def _push_agent(agent_name):
125
+ # type: (Optional[str]) -> None
126
+ """Push an agent name onto the stack."""
127
+ stack = _agent_stack.get()
128
+ if stack is None:
129
+ stack = []
130
+ else:
131
+ # Copy the list to maintain contextvar isolation across async contexts
132
+ stack = stack.copy()
133
+ stack.append(agent_name)
134
+ _agent_stack.set(stack)
135
+
136
+
137
+ def _pop_agent():
138
+ # type: () -> Optional[str]
139
+ """Pop an agent name from the stack and return it."""
140
+ stack = _agent_stack.get()
141
+ if stack:
142
+ # Copy the list to maintain contextvar isolation across async contexts
143
+ stack = stack.copy()
144
+ agent_name = stack.pop()
145
+ _agent_stack.set(stack)
146
+ return agent_name
147
+ return None
148
+
149
+
150
+ def _get_current_agent():
151
+ # type: () -> Optional[str]
152
+ """Get the current agent name (top of stack) without removing it."""
153
+ stack = _agent_stack.get()
154
+ if stack:
155
+ return stack[-1]
156
+ return None
157
+
158
+
159
+ class LangchainIntegration(Integration):
160
+ identifier = "langchain"
161
+ origin = f"auto.ai.{identifier}"
162
+
163
+ def __init__(self, include_prompts=True, max_spans=None):
164
+ # type: (LangchainIntegration, bool, Optional[int]) -> None
165
+ self.include_prompts = include_prompts
166
+ self.max_spans = max_spans
167
+
168
+ if max_spans is not None:
169
+ warnings.warn(
170
+ "The `max_spans` parameter of `LangchainIntegration` is "
171
+ "deprecated and will be removed in version 3.0 of sentry-sdk.",
172
+ DeprecationWarning,
173
+ stacklevel=2,
174
+ )
175
+
176
+ @staticmethod
177
+ def setup_once():
178
+ # type: () -> None
179
+ manager._configure = _wrap_configure(manager._configure)
180
+
181
+ if AgentExecutor is not None:
182
+ AgentExecutor.invoke = _wrap_agent_executor_invoke(AgentExecutor.invoke)
183
+ AgentExecutor.stream = _wrap_agent_executor_stream(AgentExecutor.stream)
184
+
185
+ # Patch embeddings providers
186
+ _patch_embeddings_provider(OpenAIEmbeddings)
187
+ _patch_embeddings_provider(AzureOpenAIEmbeddings)
188
+ _patch_embeddings_provider(VertexAIEmbeddings)
189
+ _patch_embeddings_provider(BedrockEmbeddings)
190
+ _patch_embeddings_provider(CohereEmbeddings)
191
+ _patch_embeddings_provider(MistralAIEmbeddings)
192
+ _patch_embeddings_provider(HuggingFaceEmbeddings)
193
+ _patch_embeddings_provider(OllamaEmbeddings)
194
+
195
+
196
+ class WatchedSpan:
197
+ span = None # type: Span
198
+ children = [] # type: List[WatchedSpan]
199
+ is_pipeline = False # type: bool
200
+
201
+ def __init__(self, span):
202
+ # type: (Span) -> None
203
+ self.span = span
204
+
205
+
206
+ class SentryLangchainCallback(BaseCallbackHandler): # type: ignore[misc]
207
+ """Callback handler that creates Sentry spans."""
208
+
209
+ def __init__(self, max_span_map_size, include_prompts):
210
+ # type: (Optional[int], bool) -> None
211
+ self.span_map = OrderedDict() # type: OrderedDict[UUID, WatchedSpan]
212
+ self.max_span_map_size = max_span_map_size
213
+ self.include_prompts = include_prompts
214
+
215
+ def gc_span_map(self):
216
+ # type: () -> None
217
+
218
+ if self.max_span_map_size is not None:
219
+ while len(self.span_map) > self.max_span_map_size:
220
+ run_id, watched_span = self.span_map.popitem(last=False)
221
+ self._exit_span(watched_span, run_id)
222
+
223
+ def _handle_error(self, run_id, error):
224
+ # type: (UUID, Any) -> None
225
+ with capture_internal_exceptions():
226
+ if not run_id or run_id not in self.span_map:
227
+ return
228
+
229
+ span_data = self.span_map[run_id]
230
+ span = span_data.span
231
+ set_span_errored(span)
232
+
233
+ sentry_sdk.capture_exception(error, span.scope)
234
+
235
+ span.__exit__(None, None, None)
236
+ del self.span_map[run_id]
237
+
238
+ def _normalize_langchain_message(self, message):
239
+ # type: (BaseMessage) -> Any
240
+ parsed = {"role": message.type, "content": message.content}
241
+ parsed.update(message.additional_kwargs)
242
+ return parsed
243
+
244
+ def _create_span(self, run_id, parent_id, **kwargs):
245
+ # type: (SentryLangchainCallback, UUID, Optional[Any], Any) -> WatchedSpan
246
+ watched_span = None # type: Optional[WatchedSpan]
247
+ if parent_id:
248
+ parent_span = self.span_map.get(parent_id) # type: Optional[WatchedSpan]
249
+ if parent_span:
250
+ watched_span = WatchedSpan(parent_span.span.start_child(**kwargs))
251
+ parent_span.children.append(watched_span)
252
+
253
+ if watched_span is None:
254
+ watched_span = WatchedSpan(sentry_sdk.start_span(**kwargs))
255
+
256
+ watched_span.span.__enter__()
257
+ self.span_map[run_id] = watched_span
258
+ self.gc_span_map()
259
+ return watched_span
260
+
261
+ def _exit_span(self, span_data, run_id):
262
+ # type: (SentryLangchainCallback, WatchedSpan, UUID) -> None
263
+ if span_data.is_pipeline:
264
+ set_ai_pipeline_name(None)
265
+
266
+ span_data.span.__exit__(None, None, None)
267
+ del self.span_map[run_id]
268
+
269
+ def on_llm_start(
270
+ self,
271
+ serialized,
272
+ prompts,
273
+ *,
274
+ run_id,
275
+ tags=None,
276
+ parent_run_id=None,
277
+ metadata=None,
278
+ **kwargs,
279
+ ):
280
+ # type: (SentryLangchainCallback, Dict[str, Any], List[str], UUID, Optional[List[str]], Optional[UUID], Optional[Dict[str, Any]], Any) -> Any
281
+ """Run when LLM starts running."""
282
+ with capture_internal_exceptions():
283
+ if not run_id:
284
+ return
285
+
286
+ all_params = kwargs.get("invocation_params", {})
287
+ all_params.update(serialized.get("kwargs", {}))
288
+
289
+ model = (
290
+ all_params.get("model")
291
+ or all_params.get("model_name")
292
+ or all_params.get("model_id")
293
+ or ""
294
+ )
295
+
296
+ watched_span = self._create_span(
297
+ run_id,
298
+ parent_run_id,
299
+ op=OP.GEN_AI_PIPELINE,
300
+ name=kwargs.get("name") or "Langchain LLM call",
301
+ origin=LangchainIntegration.origin,
302
+ )
303
+ span = watched_span.span
304
+
305
+ if model:
306
+ span.set_data(
307
+ SPANDATA.GEN_AI_REQUEST_MODEL,
308
+ model,
309
+ )
310
+
311
+ ai_type = all_params.get("_type", "")
312
+ if "anthropic" in ai_type:
313
+ span.set_data(SPANDATA.GEN_AI_SYSTEM, "anthropic")
314
+ elif "openai" in ai_type:
315
+ span.set_data(SPANDATA.GEN_AI_SYSTEM, "openai")
316
+
317
+ for key, attribute in DATA_FIELDS.items():
318
+ if key in all_params and all_params[key] is not None:
319
+ set_data_normalized(span, attribute, all_params[key], unpack=False)
320
+
321
+ _set_tools_on_span(span, all_params.get("tools"))
322
+
323
+ if should_send_default_pii() and self.include_prompts:
324
+ normalized_messages = [
325
+ {
326
+ "role": GEN_AI_ALLOWED_MESSAGE_ROLES.USER,
327
+ "content": {"type": "text", "text": prompt},
328
+ }
329
+ for prompt in prompts
330
+ ]
331
+ scope = sentry_sdk.get_current_scope()
332
+ messages_data = truncate_and_annotate_messages(
333
+ normalized_messages, span, scope
334
+ )
335
+ if messages_data is not None:
336
+ set_data_normalized(
337
+ span,
338
+ SPANDATA.GEN_AI_REQUEST_MESSAGES,
339
+ messages_data,
340
+ unpack=False,
341
+ )
342
+
343
+ def on_chat_model_start(self, serialized, messages, *, run_id, **kwargs):
344
+ # type: (SentryLangchainCallback, Dict[str, Any], List[List[BaseMessage]], UUID, Any) -> Any
345
+ """Run when Chat Model starts running."""
346
+ with capture_internal_exceptions():
347
+ if not run_id:
348
+ return
349
+
350
+ all_params = kwargs.get("invocation_params", {})
351
+ all_params.update(serialized.get("kwargs", {}))
352
+
353
+ model = (
354
+ all_params.get("model")
355
+ or all_params.get("model_name")
356
+ or all_params.get("model_id")
357
+ or ""
358
+ )
359
+
360
+ watched_span = self._create_span(
361
+ run_id,
362
+ kwargs.get("parent_run_id"),
363
+ op=OP.GEN_AI_CHAT,
364
+ name=f"chat {model}".strip(),
365
+ origin=LangchainIntegration.origin,
366
+ )
367
+ span = watched_span.span
368
+
369
+ span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat")
370
+ if model:
371
+ span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model)
372
+
373
+ ai_type = all_params.get("_type", "")
374
+ if "anthropic" in ai_type:
375
+ span.set_data(SPANDATA.GEN_AI_SYSTEM, "anthropic")
376
+ elif "openai" in ai_type:
377
+ span.set_data(SPANDATA.GEN_AI_SYSTEM, "openai")
378
+
379
+ agent_name = _get_current_agent()
380
+ if agent_name:
381
+ span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent_name)
382
+
383
+ for key, attribute in DATA_FIELDS.items():
384
+ if key in all_params and all_params[key] is not None:
385
+ set_data_normalized(span, attribute, all_params[key], unpack=False)
386
+
387
+ _set_tools_on_span(span, all_params.get("tools"))
388
+
389
+ if should_send_default_pii() and self.include_prompts:
390
+ normalized_messages = []
391
+ for list_ in messages:
392
+ for message in list_:
393
+ normalized_messages.append(
394
+ self._normalize_langchain_message(message)
395
+ )
396
+ normalized_messages = normalize_message_roles(normalized_messages)
397
+ scope = sentry_sdk.get_current_scope()
398
+ messages_data = truncate_and_annotate_messages(
399
+ normalized_messages, span, scope
400
+ )
401
+ if messages_data is not None:
402
+ set_data_normalized(
403
+ span,
404
+ SPANDATA.GEN_AI_REQUEST_MESSAGES,
405
+ messages_data,
406
+ unpack=False,
407
+ )
408
+
409
+ def on_chat_model_end(self, response, *, run_id, **kwargs):
410
+ # type: (SentryLangchainCallback, LLMResult, UUID, Any) -> Any
411
+ """Run when Chat Model ends running."""
412
+ with capture_internal_exceptions():
413
+ if not run_id or run_id not in self.span_map:
414
+ return
415
+
416
+ span_data = self.span_map[run_id]
417
+ span = span_data.span
418
+
419
+ if should_send_default_pii() and self.include_prompts:
420
+ set_data_normalized(
421
+ span,
422
+ SPANDATA.GEN_AI_RESPONSE_TEXT,
423
+ [[x.text for x in list_] for list_ in response.generations],
424
+ )
425
+
426
+ _record_token_usage(span, response)
427
+ self._exit_span(span_data, run_id)
428
+
429
+ def on_llm_end(self, response, *, run_id, **kwargs):
430
+ # type: (SentryLangchainCallback, LLMResult, UUID, Any) -> Any
431
+ """Run when LLM ends running."""
432
+ with capture_internal_exceptions():
433
+ if not run_id or run_id not in self.span_map:
434
+ return
435
+
436
+ span_data = self.span_map[run_id]
437
+ span = span_data.span
438
+
439
+ try:
440
+ generation = response.generations[0][0]
441
+ except IndexError:
442
+ generation = None
443
+
444
+ if generation is not None:
445
+ try:
446
+ response_model = generation.generation_info.get("model_name")
447
+ if response_model is not None:
448
+ span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, response_model)
449
+ except AttributeError:
450
+ pass
451
+
452
+ try:
453
+ finish_reason = generation.generation_info.get("finish_reason")
454
+ if finish_reason is not None:
455
+ span.set_data(
456
+ SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS, finish_reason
457
+ )
458
+ except AttributeError:
459
+ pass
460
+
461
+ try:
462
+ if should_send_default_pii() and self.include_prompts:
463
+ tool_calls = getattr(generation.message, "tool_calls", None)
464
+ if tool_calls is not None and tool_calls != []:
465
+ set_data_normalized(
466
+ span,
467
+ SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS,
468
+ tool_calls,
469
+ unpack=False,
470
+ )
471
+ except AttributeError:
472
+ pass
473
+
474
+ if should_send_default_pii() and self.include_prompts:
475
+ set_data_normalized(
476
+ span,
477
+ SPANDATA.GEN_AI_RESPONSE_TEXT,
478
+ [[x.text for x in list_] for list_ in response.generations],
479
+ )
480
+
481
+ _record_token_usage(span, response)
482
+ self._exit_span(span_data, run_id)
483
+
484
+ def on_llm_error(self, error, *, run_id, **kwargs):
485
+ # type: (SentryLangchainCallback, Union[Exception, KeyboardInterrupt], UUID, Any) -> Any
486
+ """Run when LLM errors."""
487
+ self._handle_error(run_id, error)
488
+
489
+ def on_chat_model_error(self, error, *, run_id, **kwargs):
490
+ # type: (SentryLangchainCallback, Union[Exception, KeyboardInterrupt], UUID, Any) -> Any
491
+ """Run when Chat Model errors."""
492
+ self._handle_error(run_id, error)
493
+
494
+ def on_agent_finish(self, finish, *, run_id, **kwargs):
495
+ # type: (SentryLangchainCallback, AgentFinish, UUID, Any) -> Any
496
+ with capture_internal_exceptions():
497
+ if not run_id or run_id not in self.span_map:
498
+ return
499
+
500
+ span_data = self.span_map[run_id]
501
+ span = span_data.span
502
+
503
+ if should_send_default_pii() and self.include_prompts:
504
+ set_data_normalized(
505
+ span, SPANDATA.GEN_AI_RESPONSE_TEXT, finish.return_values.items()
506
+ )
507
+
508
+ self._exit_span(span_data, run_id)
509
+
510
+ def on_tool_start(self, serialized, input_str, *, run_id, **kwargs):
511
+ # type: (SentryLangchainCallback, Dict[str, Any], str, UUID, Any) -> Any
512
+ """Run when tool starts running."""
513
+ with capture_internal_exceptions():
514
+ if not run_id:
515
+ return
516
+
517
+ tool_name = serialized.get("name") or kwargs.get("name") or ""
518
+
519
+ watched_span = self._create_span(
520
+ run_id,
521
+ kwargs.get("parent_run_id"),
522
+ op=OP.GEN_AI_EXECUTE_TOOL,
523
+ name=f"execute_tool {tool_name}".strip(),
524
+ origin=LangchainIntegration.origin,
525
+ )
526
+ span = watched_span.span
527
+
528
+ span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "execute_tool")
529
+ span.set_data(SPANDATA.GEN_AI_TOOL_NAME, tool_name)
530
+
531
+ tool_description = serialized.get("description")
532
+ if tool_description is not None:
533
+ span.set_data(SPANDATA.GEN_AI_TOOL_DESCRIPTION, tool_description)
534
+
535
+ agent_name = _get_current_agent()
536
+ if agent_name:
537
+ span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent_name)
538
+
539
+ if should_send_default_pii() and self.include_prompts:
540
+ set_data_normalized(
541
+ span,
542
+ SPANDATA.GEN_AI_TOOL_INPUT,
543
+ kwargs.get("inputs", [input_str]),
544
+ )
545
+
546
+ def on_tool_end(self, output, *, run_id, **kwargs):
547
+ # type: (SentryLangchainCallback, str, UUID, Any) -> Any
548
+ """Run when tool ends running."""
549
+ with capture_internal_exceptions():
550
+ if not run_id or run_id not in self.span_map:
551
+ return
552
+
553
+ span_data = self.span_map[run_id]
554
+ span = span_data.span
555
+
556
+ if should_send_default_pii() and self.include_prompts:
557
+ set_data_normalized(span, SPANDATA.GEN_AI_TOOL_OUTPUT, output)
558
+
559
+ self._exit_span(span_data, run_id)
560
+
561
+ def on_tool_error(self, error, *args, run_id, **kwargs):
562
+ # type: (SentryLangchainCallback, Union[Exception, KeyboardInterrupt], UUID, Any) -> Any
563
+ """Run when tool errors."""
564
+ self._handle_error(run_id, error)
565
+
566
+
567
+ def _extract_tokens(token_usage):
568
+ # type: (Any) -> tuple[Optional[int], Optional[int], Optional[int]]
569
+ if not token_usage:
570
+ return None, None, None
571
+
572
+ input_tokens = _get_value(token_usage, "prompt_tokens") or _get_value(
573
+ token_usage, "input_tokens"
574
+ )
575
+ output_tokens = _get_value(token_usage, "completion_tokens") or _get_value(
576
+ token_usage, "output_tokens"
577
+ )
578
+ total_tokens = _get_value(token_usage, "total_tokens")
579
+
580
+ return input_tokens, output_tokens, total_tokens
581
+
582
+
583
+ def _extract_tokens_from_generations(generations):
584
+ # type: (Any) -> tuple[Optional[int], Optional[int], Optional[int]]
585
+ """Extract token usage from response.generations structure."""
586
+ if not generations:
587
+ return None, None, None
588
+
589
+ total_input = 0
590
+ total_output = 0
591
+ total_total = 0
592
+
593
+ for gen_list in generations:
594
+ for gen in gen_list:
595
+ token_usage = _get_token_usage(gen)
596
+ input_tokens, output_tokens, total_tokens = _extract_tokens(token_usage)
597
+ total_input += input_tokens if input_tokens is not None else 0
598
+ total_output += output_tokens if output_tokens is not None else 0
599
+ total_total += total_tokens if total_tokens is not None else 0
600
+
601
+ return (
602
+ total_input if total_input > 0 else None,
603
+ total_output if total_output > 0 else None,
604
+ total_total if total_total > 0 else None,
605
+ )
606
+
607
+
608
+ def _get_token_usage(obj):
609
+ # type: (Any) -> Optional[Dict[str, Any]]
610
+ """
611
+ Check multiple paths to extract token usage from different objects.
612
+ """
613
+ possible_names = ("usage", "token_usage", "usage_metadata")
614
+
615
+ message = _get_value(obj, "message")
616
+ if message is not None:
617
+ for name in possible_names:
618
+ usage = _get_value(message, name)
619
+ if usage is not None:
620
+ return usage
621
+
622
+ llm_output = _get_value(obj, "llm_output")
623
+ if llm_output is not None:
624
+ for name in possible_names:
625
+ usage = _get_value(llm_output, name)
626
+ if usage is not None:
627
+ return usage
628
+
629
+ for name in possible_names:
630
+ usage = _get_value(obj, name)
631
+ if usage is not None:
632
+ return usage
633
+
634
+ return None
635
+
636
+
637
+ def _record_token_usage(span, response):
638
+ # type: (Span, Any) -> None
639
+ token_usage = _get_token_usage(response)
640
+ if token_usage:
641
+ input_tokens, output_tokens, total_tokens = _extract_tokens(token_usage)
642
+ else:
643
+ input_tokens, output_tokens, total_tokens = _extract_tokens_from_generations(
644
+ response.generations
645
+ )
646
+
647
+ if input_tokens is not None:
648
+ span.set_data(SPANDATA.GEN_AI_USAGE_INPUT_TOKENS, input_tokens)
649
+
650
+ if output_tokens is not None:
651
+ span.set_data(SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS, output_tokens)
652
+
653
+ if total_tokens is not None:
654
+ span.set_data(SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS, total_tokens)
655
+
656
+
657
+ def _get_request_data(obj, args, kwargs):
658
+ # type: (Any, Any, Any) -> tuple[Optional[str], Optional[List[Any]]]
659
+ """
660
+ Get the agent name and available tools for the agent.
661
+ """
662
+ agent = getattr(obj, "agent", None)
663
+ runnable = getattr(agent, "runnable", None)
664
+ runnable_config = getattr(runnable, "config", {})
665
+ tools = (
666
+ getattr(obj, "tools", None)
667
+ or getattr(agent, "tools", None)
668
+ or runnable_config.get("tools")
669
+ or runnable_config.get("available_tools")
670
+ )
671
+ tools = tools if tools and len(tools) > 0 else None
672
+
673
+ try:
674
+ agent_name = None
675
+ if len(args) > 1:
676
+ agent_name = args[1].get("run_name")
677
+ if agent_name is None:
678
+ agent_name = runnable_config.get("run_name")
679
+ except Exception:
680
+ pass
681
+
682
+ return (agent_name, tools)
683
+
684
+
685
+ def _simplify_langchain_tools(tools):
686
+ # type: (Any) -> Optional[List[Any]]
687
+ """Parse and simplify tools into a cleaner format."""
688
+ if not tools:
689
+ return None
690
+
691
+ if not isinstance(tools, (list, tuple)):
692
+ return None
693
+
694
+ simplified_tools = []
695
+ for tool in tools:
696
+ try:
697
+ if isinstance(tool, dict):
698
+ if "function" in tool and isinstance(tool["function"], dict):
699
+ func = tool["function"]
700
+ simplified_tool = {
701
+ "name": func.get("name"),
702
+ "description": func.get("description"),
703
+ }
704
+ if simplified_tool["name"]:
705
+ simplified_tools.append(simplified_tool)
706
+ elif "name" in tool:
707
+ simplified_tool = {
708
+ "name": tool.get("name"),
709
+ "description": tool.get("description"),
710
+ }
711
+ simplified_tools.append(simplified_tool)
712
+ else:
713
+ name = (
714
+ tool.get("name")
715
+ or tool.get("tool_name")
716
+ or tool.get("function_name")
717
+ )
718
+ if name:
719
+ simplified_tools.append(
720
+ {
721
+ "name": name,
722
+ "description": tool.get("description")
723
+ or tool.get("desc"),
724
+ }
725
+ )
726
+ elif hasattr(tool, "name"):
727
+ simplified_tool = {
728
+ "name": getattr(tool, "name", None),
729
+ "description": getattr(tool, "description", None)
730
+ or getattr(tool, "desc", None),
731
+ }
732
+ if simplified_tool["name"]:
733
+ simplified_tools.append(simplified_tool)
734
+ elif hasattr(tool, "__name__"):
735
+ simplified_tools.append(
736
+ {
737
+ "name": tool.__name__,
738
+ "description": getattr(tool, "__doc__", None),
739
+ }
740
+ )
741
+ else:
742
+ tool_str = str(tool)
743
+ if tool_str and tool_str != "":
744
+ simplified_tools.append({"name": tool_str, "description": None})
745
+ except Exception:
746
+ continue
747
+
748
+ return simplified_tools if simplified_tools else None
749
+
750
+
751
+ def _set_tools_on_span(span, tools):
752
+ # type: (Span, Any) -> None
753
+ """Set available tools data on a span if tools are provided."""
754
+ if tools is not None:
755
+ simplified_tools = _simplify_langchain_tools(tools)
756
+ if simplified_tools:
757
+ set_data_normalized(
758
+ span,
759
+ SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS,
760
+ simplified_tools,
761
+ unpack=False,
762
+ )
763
+
764
+
765
+ def _wrap_configure(f):
766
+ # type: (Callable[..., Any]) -> Callable[..., Any]
767
+
768
+ @wraps(f)
769
+ def new_configure(
770
+ callback_manager_cls, # type: type
771
+ inheritable_callbacks=None, # type: Callbacks
772
+ local_callbacks=None, # type: Callbacks
773
+ *args, # type: Any
774
+ **kwargs, # type: Any
775
+ ):
776
+ # type: (...) -> Any
777
+
778
+ integration = sentry_sdk.get_client().get_integration(LangchainIntegration)
779
+ if integration is None:
780
+ return f(
781
+ callback_manager_cls,
782
+ inheritable_callbacks,
783
+ local_callbacks,
784
+ *args,
785
+ **kwargs,
786
+ )
787
+
788
+ local_callbacks = local_callbacks or []
789
+
790
+ # Handle each possible type of local_callbacks. For each type, we
791
+ # extract the list of callbacks to check for SentryLangchainCallback,
792
+ # and define a function that would add the SentryLangchainCallback
793
+ # to the existing callbacks list.
794
+ if isinstance(local_callbacks, BaseCallbackManager):
795
+ callbacks_list = local_callbacks.handlers
796
+ elif isinstance(local_callbacks, BaseCallbackHandler):
797
+ callbacks_list = [local_callbacks]
798
+ elif isinstance(local_callbacks, list):
799
+ callbacks_list = local_callbacks
800
+ else:
801
+ logger.debug("Unknown callback type: %s", local_callbacks)
802
+ # Just proceed with original function call
803
+ return f(
804
+ callback_manager_cls,
805
+ inheritable_callbacks,
806
+ local_callbacks,
807
+ *args,
808
+ **kwargs,
809
+ )
810
+
811
+ # Handle each possible type of inheritable_callbacks.
812
+ if isinstance(inheritable_callbacks, BaseCallbackManager):
813
+ inheritable_callbacks_list = inheritable_callbacks.handlers
814
+ elif isinstance(inheritable_callbacks, list):
815
+ inheritable_callbacks_list = inheritable_callbacks
816
+ else:
817
+ inheritable_callbacks_list = []
818
+
819
+ if not any(
820
+ isinstance(cb, SentryLangchainCallback)
821
+ for cb in itertools.chain(callbacks_list, inheritable_callbacks_list)
822
+ ):
823
+ sentry_handler = SentryLangchainCallback(
824
+ integration.max_spans,
825
+ integration.include_prompts,
826
+ )
827
+ if isinstance(local_callbacks, BaseCallbackManager):
828
+ local_callbacks = local_callbacks.copy()
829
+ local_callbacks.handlers = [
830
+ *local_callbacks.handlers,
831
+ sentry_handler,
832
+ ]
833
+ elif isinstance(local_callbacks, BaseCallbackHandler):
834
+ local_callbacks = [local_callbacks, sentry_handler]
835
+ else:
836
+ local_callbacks = [*local_callbacks, sentry_handler]
837
+
838
+ return f(
839
+ callback_manager_cls,
840
+ inheritable_callbacks,
841
+ local_callbacks,
842
+ *args,
843
+ **kwargs,
844
+ )
845
+
846
+ return new_configure
847
+
848
+
849
+ def _wrap_agent_executor_invoke(f):
850
+ # type: (Callable[..., Any]) -> Callable[..., Any]
851
+
852
+ @wraps(f)
853
+ def new_invoke(self, *args, **kwargs):
854
+ # type: (Any, Any, Any) -> Any
855
+ integration = sentry_sdk.get_client().get_integration(LangchainIntegration)
856
+ if integration is None:
857
+ return f(self, *args, **kwargs)
858
+
859
+ agent_name, tools = _get_request_data(self, args, kwargs)
860
+ start_span_function = get_start_span_function()
861
+
862
+ with start_span_function(
863
+ op=OP.GEN_AI_INVOKE_AGENT,
864
+ name=f"invoke_agent {agent_name}" if agent_name else "invoke_agent",
865
+ origin=LangchainIntegration.origin,
866
+ ) as span:
867
+ _push_agent(agent_name)
868
+ try:
869
+ if agent_name:
870
+ span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent_name)
871
+
872
+ span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
873
+ span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, False)
874
+
875
+ _set_tools_on_span(span, tools)
876
+
877
+ # Run the agent
878
+ result = f(self, *args, **kwargs)
879
+
880
+ input = result.get("input")
881
+ if (
882
+ input is not None
883
+ and should_send_default_pii()
884
+ and integration.include_prompts
885
+ ):
886
+ normalized_messages = normalize_message_roles([input])
887
+ scope = sentry_sdk.get_current_scope()
888
+ messages_data = truncate_and_annotate_messages(
889
+ normalized_messages, span, scope
890
+ )
891
+ if messages_data is not None:
892
+ set_data_normalized(
893
+ span,
894
+ SPANDATA.GEN_AI_REQUEST_MESSAGES,
895
+ messages_data,
896
+ unpack=False,
897
+ )
898
+
899
+ output = result.get("output")
900
+ if (
901
+ output is not None
902
+ and should_send_default_pii()
903
+ and integration.include_prompts
904
+ ):
905
+ set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, output)
906
+
907
+ return result
908
+ finally:
909
+ # Ensure agent is popped even if an exception occurs
910
+ _pop_agent()
911
+
912
+ return new_invoke
913
+
914
+
915
+ def _wrap_agent_executor_stream(f):
916
+ # type: (Callable[..., Any]) -> Callable[..., Any]
917
+
918
+ @wraps(f)
919
+ def new_stream(self, *args, **kwargs):
920
+ # type: (Any, Any, Any) -> Any
921
+ integration = sentry_sdk.get_client().get_integration(LangchainIntegration)
922
+ if integration is None:
923
+ return f(self, *args, **kwargs)
924
+
925
+ agent_name, tools = _get_request_data(self, args, kwargs)
926
+ start_span_function = get_start_span_function()
927
+
928
+ span = start_span_function(
929
+ op=OP.GEN_AI_INVOKE_AGENT,
930
+ name=f"invoke_agent {agent_name}" if agent_name else "invoke_agent",
931
+ origin=LangchainIntegration.origin,
932
+ )
933
+ span.__enter__()
934
+
935
+ _push_agent(agent_name)
936
+
937
+ if agent_name:
938
+ span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent_name)
939
+
940
+ span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
941
+ span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True)
942
+
943
+ _set_tools_on_span(span, tools)
944
+
945
+ input = args[0].get("input") if len(args) >= 1 else None
946
+ if (
947
+ input is not None
948
+ and should_send_default_pii()
949
+ and integration.include_prompts
950
+ ):
951
+ normalized_messages = normalize_message_roles([input])
952
+ scope = sentry_sdk.get_current_scope()
953
+ messages_data = truncate_and_annotate_messages(
954
+ normalized_messages, span, scope
955
+ )
956
+ if messages_data is not None:
957
+ set_data_normalized(
958
+ span,
959
+ SPANDATA.GEN_AI_REQUEST_MESSAGES,
960
+ messages_data,
961
+ unpack=False,
962
+ )
963
+
964
+ # Run the agent
965
+ result = f(self, *args, **kwargs)
966
+
967
+ old_iterator = result
968
+
969
+ def new_iterator():
970
+ # type: () -> Iterator[Any]
971
+ exc_info = (None, None, None) # type: tuple[Any, Any, Any]
972
+ try:
973
+ for event in old_iterator:
974
+ yield event
975
+
976
+ try:
977
+ output = event.get("output")
978
+ except Exception:
979
+ output = None
980
+
981
+ if (
982
+ output is not None
983
+ and should_send_default_pii()
984
+ and integration.include_prompts
985
+ ):
986
+ set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, output)
987
+ except Exception:
988
+ exc_info = sys.exc_info()
989
+ set_span_errored(span)
990
+ raise
991
+ finally:
992
+ # Ensure cleanup happens even if iterator is abandoned or fails
993
+ _pop_agent()
994
+ span.__exit__(*exc_info)
995
+
996
+ async def new_iterator_async():
997
+ # type: () -> AsyncIterator[Any]
998
+ exc_info = (None, None, None) # type: tuple[Any, Any, Any]
999
+ try:
1000
+ async for event in old_iterator:
1001
+ yield event
1002
+
1003
+ try:
1004
+ output = event.get("output")
1005
+ except Exception:
1006
+ output = None
1007
+
1008
+ if (
1009
+ output is not None
1010
+ and should_send_default_pii()
1011
+ and integration.include_prompts
1012
+ ):
1013
+ set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, output)
1014
+ except Exception:
1015
+ exc_info = sys.exc_info()
1016
+ set_span_errored(span)
1017
+ raise
1018
+ finally:
1019
+ # Ensure cleanup happens even if iterator is abandoned or fails
1020
+ _pop_agent()
1021
+ span.__exit__(*exc_info)
1022
+
1023
+ if str(type(result)) == "<class 'async_generator'>":
1024
+ result = new_iterator_async()
1025
+ else:
1026
+ result = new_iterator()
1027
+
1028
+ return result
1029
+
1030
+ return new_stream
1031
+
1032
+
1033
+ def _patch_embeddings_provider(provider_class):
1034
+ # type: (Any) -> None
1035
+ """Patch an embeddings provider class with monitoring wrappers."""
1036
+ if provider_class is None:
1037
+ return
1038
+
1039
+ if hasattr(provider_class, "embed_documents"):
1040
+ provider_class.embed_documents = _wrap_embedding_method(
1041
+ provider_class.embed_documents
1042
+ )
1043
+ if hasattr(provider_class, "embed_query"):
1044
+ provider_class.embed_query = _wrap_embedding_method(provider_class.embed_query)
1045
+ if hasattr(provider_class, "aembed_documents"):
1046
+ provider_class.aembed_documents = _wrap_async_embedding_method(
1047
+ provider_class.aembed_documents
1048
+ )
1049
+ if hasattr(provider_class, "aembed_query"):
1050
+ provider_class.aembed_query = _wrap_async_embedding_method(
1051
+ provider_class.aembed_query
1052
+ )
1053
+
1054
+
1055
+ def _wrap_embedding_method(f):
1056
+ # type: (Callable[..., Any]) -> Callable[..., Any]
1057
+ """Wrap sync embedding methods (embed_documents and embed_query)."""
1058
+
1059
+ @wraps(f)
1060
+ def new_embedding_method(self, *args, **kwargs):
1061
+ # type: (Any, Any, Any) -> Any
1062
+ integration = sentry_sdk.get_client().get_integration(LangchainIntegration)
1063
+ if integration is None:
1064
+ return f(self, *args, **kwargs)
1065
+
1066
+ model_name = getattr(self, "model", None) or getattr(self, "model_name", None)
1067
+ with sentry_sdk.start_span(
1068
+ op=OP.GEN_AI_EMBEDDINGS,
1069
+ name=f"embeddings {model_name}" if model_name else "embeddings",
1070
+ origin=LangchainIntegration.origin,
1071
+ ) as span:
1072
+ span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "embeddings")
1073
+ if model_name:
1074
+ span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name)
1075
+
1076
+ # Capture input if PII is allowed
1077
+ if (
1078
+ should_send_default_pii()
1079
+ and integration.include_prompts
1080
+ and len(args) > 0
1081
+ ):
1082
+ input_data = args[0]
1083
+ # Normalize to list format
1084
+ texts = input_data if isinstance(input_data, list) else [input_data]
1085
+ set_data_normalized(
1086
+ span, SPANDATA.GEN_AI_EMBEDDINGS_INPUT, texts, unpack=False
1087
+ )
1088
+
1089
+ result = f(self, *args, **kwargs)
1090
+ return result
1091
+
1092
+ return new_embedding_method
1093
+
1094
+
1095
+ def _wrap_async_embedding_method(f):
1096
+ # type: (Callable[..., Any]) -> Callable[..., Any]
1097
+ """Wrap async embedding methods (aembed_documents and aembed_query)."""
1098
+
1099
+ @wraps(f)
1100
+ async def new_async_embedding_method(self, *args, **kwargs):
1101
+ # type: (Any, Any, Any) -> Any
1102
+ integration = sentry_sdk.get_client().get_integration(LangchainIntegration)
1103
+ if integration is None:
1104
+ return await f(self, *args, **kwargs)
1105
+
1106
+ model_name = getattr(self, "model", None) or getattr(self, "model_name", None)
1107
+ with sentry_sdk.start_span(
1108
+ op=OP.GEN_AI_EMBEDDINGS,
1109
+ name=f"embeddings {model_name}" if model_name else "embeddings",
1110
+ origin=LangchainIntegration.origin,
1111
+ ) as span:
1112
+ span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "embeddings")
1113
+ if model_name:
1114
+ span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name)
1115
+
1116
+ # Capture input if PII is allowed
1117
+ if (
1118
+ should_send_default_pii()
1119
+ and integration.include_prompts
1120
+ and len(args) > 0
1121
+ ):
1122
+ input_data = args[0]
1123
+ # Normalize to list format
1124
+ texts = input_data if isinstance(input_data, list) else [input_data]
1125
+ set_data_normalized(
1126
+ span, SPANDATA.GEN_AI_EMBEDDINGS_INPUT, texts, unpack=False
1127
+ )
1128
+
1129
+ result = await f(self, *args, **kwargs)
1130
+ return result
1131
+
1132
+ return new_async_embedding_method