sentry-sdk 0.7.5__py2.py3-none-any.whl → 2.46.0__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sentry_sdk/__init__.py +48 -30
- sentry_sdk/_compat.py +74 -61
- sentry_sdk/_init_implementation.py +84 -0
- sentry_sdk/_log_batcher.py +172 -0
- sentry_sdk/_lru_cache.py +47 -0
- sentry_sdk/_metrics_batcher.py +167 -0
- sentry_sdk/_queue.py +289 -0
- sentry_sdk/_types.py +338 -0
- sentry_sdk/_werkzeug.py +98 -0
- sentry_sdk/ai/__init__.py +7 -0
- sentry_sdk/ai/monitoring.py +137 -0
- sentry_sdk/ai/utils.py +144 -0
- sentry_sdk/api.py +496 -80
- sentry_sdk/attachments.py +75 -0
- sentry_sdk/client.py +1023 -103
- sentry_sdk/consts.py +1438 -66
- sentry_sdk/crons/__init__.py +10 -0
- sentry_sdk/crons/api.py +62 -0
- sentry_sdk/crons/consts.py +4 -0
- sentry_sdk/crons/decorator.py +135 -0
- sentry_sdk/debug.py +15 -14
- sentry_sdk/envelope.py +369 -0
- sentry_sdk/feature_flags.py +71 -0
- sentry_sdk/hub.py +611 -280
- sentry_sdk/integrations/__init__.py +276 -49
- sentry_sdk/integrations/_asgi_common.py +108 -0
- sentry_sdk/integrations/_wsgi_common.py +180 -44
- sentry_sdk/integrations/aiohttp.py +291 -42
- sentry_sdk/integrations/anthropic.py +439 -0
- sentry_sdk/integrations/argv.py +9 -8
- sentry_sdk/integrations/ariadne.py +161 -0
- sentry_sdk/integrations/arq.py +247 -0
- sentry_sdk/integrations/asgi.py +341 -0
- sentry_sdk/integrations/asyncio.py +144 -0
- sentry_sdk/integrations/asyncpg.py +208 -0
- sentry_sdk/integrations/atexit.py +17 -10
- sentry_sdk/integrations/aws_lambda.py +377 -62
- sentry_sdk/integrations/beam.py +176 -0
- sentry_sdk/integrations/boto3.py +137 -0
- sentry_sdk/integrations/bottle.py +221 -0
- sentry_sdk/integrations/celery/__init__.py +529 -0
- sentry_sdk/integrations/celery/beat.py +293 -0
- sentry_sdk/integrations/celery/utils.py +43 -0
- sentry_sdk/integrations/chalice.py +134 -0
- sentry_sdk/integrations/clickhouse_driver.py +177 -0
- sentry_sdk/integrations/cloud_resource_context.py +280 -0
- sentry_sdk/integrations/cohere.py +274 -0
- sentry_sdk/integrations/dedupe.py +48 -14
- sentry_sdk/integrations/django/__init__.py +584 -191
- sentry_sdk/integrations/django/asgi.py +245 -0
- sentry_sdk/integrations/django/caching.py +204 -0
- sentry_sdk/integrations/django/middleware.py +187 -0
- sentry_sdk/integrations/django/signals_handlers.py +91 -0
- sentry_sdk/integrations/django/templates.py +79 -5
- sentry_sdk/integrations/django/transactions.py +49 -22
- sentry_sdk/integrations/django/views.py +96 -0
- sentry_sdk/integrations/dramatiq.py +226 -0
- sentry_sdk/integrations/excepthook.py +50 -13
- sentry_sdk/integrations/executing.py +67 -0
- sentry_sdk/integrations/falcon.py +272 -0
- sentry_sdk/integrations/fastapi.py +141 -0
- sentry_sdk/integrations/flask.py +142 -88
- sentry_sdk/integrations/gcp.py +239 -0
- sentry_sdk/integrations/gnu_backtrace.py +99 -0
- sentry_sdk/integrations/google_genai/__init__.py +301 -0
- sentry_sdk/integrations/google_genai/consts.py +16 -0
- sentry_sdk/integrations/google_genai/streaming.py +155 -0
- sentry_sdk/integrations/google_genai/utils.py +576 -0
- sentry_sdk/integrations/gql.py +162 -0
- sentry_sdk/integrations/graphene.py +151 -0
- sentry_sdk/integrations/grpc/__init__.py +168 -0
- sentry_sdk/integrations/grpc/aio/__init__.py +7 -0
- sentry_sdk/integrations/grpc/aio/client.py +95 -0
- sentry_sdk/integrations/grpc/aio/server.py +100 -0
- sentry_sdk/integrations/grpc/client.py +91 -0
- sentry_sdk/integrations/grpc/consts.py +1 -0
- sentry_sdk/integrations/grpc/server.py +66 -0
- sentry_sdk/integrations/httpx.py +178 -0
- sentry_sdk/integrations/huey.py +174 -0
- sentry_sdk/integrations/huggingface_hub.py +378 -0
- sentry_sdk/integrations/langchain.py +1132 -0
- sentry_sdk/integrations/langgraph.py +337 -0
- sentry_sdk/integrations/launchdarkly.py +61 -0
- sentry_sdk/integrations/litellm.py +287 -0
- sentry_sdk/integrations/litestar.py +315 -0
- sentry_sdk/integrations/logging.py +307 -96
- sentry_sdk/integrations/loguru.py +213 -0
- sentry_sdk/integrations/mcp.py +566 -0
- sentry_sdk/integrations/modules.py +14 -31
- sentry_sdk/integrations/openai.py +725 -0
- sentry_sdk/integrations/openai_agents/__init__.py +61 -0
- sentry_sdk/integrations/openai_agents/consts.py +1 -0
- sentry_sdk/integrations/openai_agents/patches/__init__.py +5 -0
- sentry_sdk/integrations/openai_agents/patches/agent_run.py +140 -0
- sentry_sdk/integrations/openai_agents/patches/error_tracing.py +77 -0
- sentry_sdk/integrations/openai_agents/patches/models.py +50 -0
- sentry_sdk/integrations/openai_agents/patches/runner.py +45 -0
- sentry_sdk/integrations/openai_agents/patches/tools.py +77 -0
- sentry_sdk/integrations/openai_agents/spans/__init__.py +5 -0
- sentry_sdk/integrations/openai_agents/spans/agent_workflow.py +21 -0
- sentry_sdk/integrations/openai_agents/spans/ai_client.py +42 -0
- sentry_sdk/integrations/openai_agents/spans/execute_tool.py +48 -0
- sentry_sdk/integrations/openai_agents/spans/handoff.py +19 -0
- sentry_sdk/integrations/openai_agents/spans/invoke_agent.py +86 -0
- sentry_sdk/integrations/openai_agents/utils.py +199 -0
- sentry_sdk/integrations/openfeature.py +35 -0
- sentry_sdk/integrations/opentelemetry/__init__.py +7 -0
- sentry_sdk/integrations/opentelemetry/consts.py +5 -0
- sentry_sdk/integrations/opentelemetry/integration.py +58 -0
- sentry_sdk/integrations/opentelemetry/propagator.py +117 -0
- sentry_sdk/integrations/opentelemetry/span_processor.py +391 -0
- sentry_sdk/integrations/otlp.py +82 -0
- sentry_sdk/integrations/pure_eval.py +141 -0
- sentry_sdk/integrations/pydantic_ai/__init__.py +47 -0
- sentry_sdk/integrations/pydantic_ai/consts.py +1 -0
- sentry_sdk/integrations/pydantic_ai/patches/__init__.py +4 -0
- sentry_sdk/integrations/pydantic_ai/patches/agent_run.py +215 -0
- sentry_sdk/integrations/pydantic_ai/patches/graph_nodes.py +110 -0
- sentry_sdk/integrations/pydantic_ai/patches/model_request.py +40 -0
- sentry_sdk/integrations/pydantic_ai/patches/tools.py +98 -0
- sentry_sdk/integrations/pydantic_ai/spans/__init__.py +3 -0
- sentry_sdk/integrations/pydantic_ai/spans/ai_client.py +246 -0
- sentry_sdk/integrations/pydantic_ai/spans/execute_tool.py +49 -0
- sentry_sdk/integrations/pydantic_ai/spans/invoke_agent.py +112 -0
- sentry_sdk/integrations/pydantic_ai/utils.py +223 -0
- sentry_sdk/integrations/pymongo.py +214 -0
- sentry_sdk/integrations/pyramid.py +112 -68
- sentry_sdk/integrations/quart.py +237 -0
- sentry_sdk/integrations/ray.py +165 -0
- sentry_sdk/integrations/redis/__init__.py +48 -0
- sentry_sdk/integrations/redis/_async_common.py +116 -0
- sentry_sdk/integrations/redis/_sync_common.py +119 -0
- sentry_sdk/integrations/redis/consts.py +19 -0
- sentry_sdk/integrations/redis/modules/__init__.py +0 -0
- sentry_sdk/integrations/redis/modules/caches.py +118 -0
- sentry_sdk/integrations/redis/modules/queries.py +65 -0
- sentry_sdk/integrations/redis/rb.py +32 -0
- sentry_sdk/integrations/redis/redis.py +69 -0
- sentry_sdk/integrations/redis/redis_cluster.py +107 -0
- sentry_sdk/integrations/redis/redis_py_cluster_legacy.py +50 -0
- sentry_sdk/integrations/redis/utils.py +148 -0
- sentry_sdk/integrations/rq.py +95 -37
- sentry_sdk/integrations/rust_tracing.py +284 -0
- sentry_sdk/integrations/sanic.py +294 -123
- sentry_sdk/integrations/serverless.py +48 -19
- sentry_sdk/integrations/socket.py +96 -0
- sentry_sdk/integrations/spark/__init__.py +4 -0
- sentry_sdk/integrations/spark/spark_driver.py +316 -0
- sentry_sdk/integrations/spark/spark_worker.py +116 -0
- sentry_sdk/integrations/sqlalchemy.py +142 -0
- sentry_sdk/integrations/starlette.py +737 -0
- sentry_sdk/integrations/starlite.py +292 -0
- sentry_sdk/integrations/statsig.py +37 -0
- sentry_sdk/integrations/stdlib.py +235 -29
- sentry_sdk/integrations/strawberry.py +394 -0
- sentry_sdk/integrations/sys_exit.py +70 -0
- sentry_sdk/integrations/threading.py +158 -28
- sentry_sdk/integrations/tornado.py +84 -52
- sentry_sdk/integrations/trytond.py +50 -0
- sentry_sdk/integrations/typer.py +60 -0
- sentry_sdk/integrations/unleash.py +33 -0
- sentry_sdk/integrations/unraisablehook.py +53 -0
- sentry_sdk/integrations/wsgi.py +201 -119
- sentry_sdk/logger.py +96 -0
- sentry_sdk/metrics.py +81 -0
- sentry_sdk/monitor.py +120 -0
- sentry_sdk/profiler/__init__.py +49 -0
- sentry_sdk/profiler/continuous_profiler.py +730 -0
- sentry_sdk/profiler/transaction_profiler.py +839 -0
- sentry_sdk/profiler/utils.py +195 -0
- sentry_sdk/py.typed +0 -0
- sentry_sdk/scope.py +1713 -85
- sentry_sdk/scrubber.py +177 -0
- sentry_sdk/serializer.py +405 -0
- sentry_sdk/session.py +177 -0
- sentry_sdk/sessions.py +275 -0
- sentry_sdk/spotlight.py +242 -0
- sentry_sdk/tracing.py +1486 -0
- sentry_sdk/tracing_utils.py +1236 -0
- sentry_sdk/transport.py +806 -134
- sentry_sdk/types.py +52 -0
- sentry_sdk/utils.py +1625 -465
- sentry_sdk/worker.py +54 -25
- sentry_sdk-2.46.0.dist-info/METADATA +268 -0
- sentry_sdk-2.46.0.dist-info/RECORD +189 -0
- {sentry_sdk-0.7.5.dist-info → sentry_sdk-2.46.0.dist-info}/WHEEL +1 -1
- sentry_sdk-2.46.0.dist-info/entry_points.txt +2 -0
- sentry_sdk-2.46.0.dist-info/licenses/LICENSE +21 -0
- sentry_sdk/integrations/celery.py +0 -119
- sentry_sdk-0.7.5.dist-info/LICENSE +0 -9
- sentry_sdk-0.7.5.dist-info/METADATA +0 -36
- sentry_sdk-0.7.5.dist-info/RECORD +0 -39
- {sentry_sdk-0.7.5.dist-info → sentry_sdk-2.46.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,337 @@
|
|
|
1
|
+
from functools import wraps
|
|
2
|
+
from typing import Any, Callable, List, Optional
|
|
3
|
+
|
|
4
|
+
import sentry_sdk
|
|
5
|
+
from sentry_sdk.ai.utils import (
|
|
6
|
+
set_data_normalized,
|
|
7
|
+
normalize_message_roles,
|
|
8
|
+
truncate_and_annotate_messages,
|
|
9
|
+
)
|
|
10
|
+
from sentry_sdk.consts import OP, SPANDATA
|
|
11
|
+
from sentry_sdk.integrations import DidNotEnable, Integration
|
|
12
|
+
from sentry_sdk.scope import should_send_default_pii
|
|
13
|
+
from sentry_sdk.utils import safe_serialize
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
try:
|
|
17
|
+
from langgraph.graph import StateGraph
|
|
18
|
+
from langgraph.pregel import Pregel
|
|
19
|
+
except ImportError:
|
|
20
|
+
raise DidNotEnable("langgraph not installed")
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class LanggraphIntegration(Integration):
|
|
24
|
+
identifier = "langgraph"
|
|
25
|
+
origin = f"auto.ai.{identifier}"
|
|
26
|
+
|
|
27
|
+
def __init__(self, include_prompts=True):
|
|
28
|
+
# type: (LanggraphIntegration, bool) -> None
|
|
29
|
+
self.include_prompts = include_prompts
|
|
30
|
+
|
|
31
|
+
@staticmethod
|
|
32
|
+
def setup_once():
|
|
33
|
+
# type: () -> None
|
|
34
|
+
# LangGraph lets users create agents using a StateGraph or the Functional API.
|
|
35
|
+
# StateGraphs are then compiled to a CompiledStateGraph. Both CompiledStateGraph and
|
|
36
|
+
# the functional API execute on a Pregel instance. Pregel is the runtime for the graph
|
|
37
|
+
# and the invocation happens on Pregel, so patching the invoke methods takes care of both.
|
|
38
|
+
# The streaming methods are not patched, because due to some internal reasons, LangGraph
|
|
39
|
+
# will automatically patch the streaming methods to run through invoke, and by doing this
|
|
40
|
+
# we prevent duplicate spans for invocations.
|
|
41
|
+
StateGraph.compile = _wrap_state_graph_compile(StateGraph.compile)
|
|
42
|
+
if hasattr(Pregel, "invoke"):
|
|
43
|
+
Pregel.invoke = _wrap_pregel_invoke(Pregel.invoke)
|
|
44
|
+
if hasattr(Pregel, "ainvoke"):
|
|
45
|
+
Pregel.ainvoke = _wrap_pregel_ainvoke(Pregel.ainvoke)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def _get_graph_name(graph_obj):
|
|
49
|
+
# type: (Any) -> Optional[str]
|
|
50
|
+
for attr in ["name", "graph_name", "__name__", "_name"]:
|
|
51
|
+
if hasattr(graph_obj, attr):
|
|
52
|
+
name = getattr(graph_obj, attr)
|
|
53
|
+
if name and isinstance(name, str):
|
|
54
|
+
return name
|
|
55
|
+
return None
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def _normalize_langgraph_message(message):
|
|
59
|
+
# type: (Any) -> Any
|
|
60
|
+
if not hasattr(message, "content"):
|
|
61
|
+
return None
|
|
62
|
+
|
|
63
|
+
parsed = {"role": getattr(message, "type", None), "content": message.content}
|
|
64
|
+
|
|
65
|
+
for attr in ["name", "tool_calls", "function_call", "tool_call_id"]:
|
|
66
|
+
if hasattr(message, attr):
|
|
67
|
+
value = getattr(message, attr)
|
|
68
|
+
if value is not None:
|
|
69
|
+
parsed[attr] = value
|
|
70
|
+
|
|
71
|
+
return parsed
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def _parse_langgraph_messages(state):
|
|
75
|
+
# type: (Any) -> Optional[List[Any]]
|
|
76
|
+
if not state:
|
|
77
|
+
return None
|
|
78
|
+
|
|
79
|
+
messages = None
|
|
80
|
+
|
|
81
|
+
if isinstance(state, dict):
|
|
82
|
+
messages = state.get("messages")
|
|
83
|
+
elif hasattr(state, "messages"):
|
|
84
|
+
messages = state.messages
|
|
85
|
+
elif hasattr(state, "get") and callable(state.get):
|
|
86
|
+
try:
|
|
87
|
+
messages = state.get("messages")
|
|
88
|
+
except Exception:
|
|
89
|
+
pass
|
|
90
|
+
|
|
91
|
+
if not messages or not isinstance(messages, (list, tuple)):
|
|
92
|
+
return None
|
|
93
|
+
|
|
94
|
+
normalized_messages = []
|
|
95
|
+
for message in messages:
|
|
96
|
+
try:
|
|
97
|
+
normalized = _normalize_langgraph_message(message)
|
|
98
|
+
if normalized:
|
|
99
|
+
normalized_messages.append(normalized)
|
|
100
|
+
except Exception:
|
|
101
|
+
continue
|
|
102
|
+
|
|
103
|
+
return normalized_messages if normalized_messages else None
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def _wrap_state_graph_compile(f):
|
|
107
|
+
# type: (Callable[..., Any]) -> Callable[..., Any]
|
|
108
|
+
@wraps(f)
|
|
109
|
+
def new_compile(self, *args, **kwargs):
|
|
110
|
+
# type: (Any, Any, Any) -> Any
|
|
111
|
+
integration = sentry_sdk.get_client().get_integration(LanggraphIntegration)
|
|
112
|
+
if integration is None:
|
|
113
|
+
return f(self, *args, **kwargs)
|
|
114
|
+
with sentry_sdk.start_span(
|
|
115
|
+
op=OP.GEN_AI_CREATE_AGENT,
|
|
116
|
+
origin=LanggraphIntegration.origin,
|
|
117
|
+
) as span:
|
|
118
|
+
compiled_graph = f(self, *args, **kwargs)
|
|
119
|
+
|
|
120
|
+
compiled_graph_name = getattr(compiled_graph, "name", None)
|
|
121
|
+
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "create_agent")
|
|
122
|
+
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, compiled_graph_name)
|
|
123
|
+
|
|
124
|
+
if compiled_graph_name:
|
|
125
|
+
span.description = f"create_agent {compiled_graph_name}"
|
|
126
|
+
else:
|
|
127
|
+
span.description = "create_agent"
|
|
128
|
+
|
|
129
|
+
if kwargs.get("model", None) is not None:
|
|
130
|
+
span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, kwargs.get("model"))
|
|
131
|
+
|
|
132
|
+
tools = None
|
|
133
|
+
get_graph = getattr(compiled_graph, "get_graph", None)
|
|
134
|
+
if get_graph and callable(get_graph):
|
|
135
|
+
graph_obj = compiled_graph.get_graph()
|
|
136
|
+
nodes = getattr(graph_obj, "nodes", None)
|
|
137
|
+
if nodes and isinstance(nodes, dict):
|
|
138
|
+
tools_node = nodes.get("tools")
|
|
139
|
+
if tools_node:
|
|
140
|
+
data = getattr(tools_node, "data", None)
|
|
141
|
+
if data and hasattr(data, "tools_by_name"):
|
|
142
|
+
tools = list(data.tools_by_name.keys())
|
|
143
|
+
|
|
144
|
+
if tools is not None:
|
|
145
|
+
span.set_data(SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, tools)
|
|
146
|
+
|
|
147
|
+
return compiled_graph
|
|
148
|
+
|
|
149
|
+
return new_compile
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
def _wrap_pregel_invoke(f):
|
|
153
|
+
# type: (Callable[..., Any]) -> Callable[..., Any]
|
|
154
|
+
|
|
155
|
+
@wraps(f)
|
|
156
|
+
def new_invoke(self, *args, **kwargs):
|
|
157
|
+
# type: (Any, Any, Any) -> Any
|
|
158
|
+
integration = sentry_sdk.get_client().get_integration(LanggraphIntegration)
|
|
159
|
+
if integration is None:
|
|
160
|
+
return f(self, *args, **kwargs)
|
|
161
|
+
|
|
162
|
+
graph_name = _get_graph_name(self)
|
|
163
|
+
span_name = (
|
|
164
|
+
f"invoke_agent {graph_name}".strip() if graph_name else "invoke_agent"
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
with sentry_sdk.start_span(
|
|
168
|
+
op=OP.GEN_AI_INVOKE_AGENT,
|
|
169
|
+
name=span_name,
|
|
170
|
+
origin=LanggraphIntegration.origin,
|
|
171
|
+
) as span:
|
|
172
|
+
if graph_name:
|
|
173
|
+
span.set_data(SPANDATA.GEN_AI_PIPELINE_NAME, graph_name)
|
|
174
|
+
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, graph_name)
|
|
175
|
+
|
|
176
|
+
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
|
|
177
|
+
|
|
178
|
+
# Store input messages to later compare with output
|
|
179
|
+
input_messages = None
|
|
180
|
+
if (
|
|
181
|
+
len(args) > 0
|
|
182
|
+
and should_send_default_pii()
|
|
183
|
+
and integration.include_prompts
|
|
184
|
+
):
|
|
185
|
+
input_messages = _parse_langgraph_messages(args[0])
|
|
186
|
+
if input_messages:
|
|
187
|
+
normalized_input_messages = normalize_message_roles(input_messages)
|
|
188
|
+
scope = sentry_sdk.get_current_scope()
|
|
189
|
+
messages_data = truncate_and_annotate_messages(
|
|
190
|
+
normalized_input_messages, span, scope
|
|
191
|
+
)
|
|
192
|
+
if messages_data is not None:
|
|
193
|
+
set_data_normalized(
|
|
194
|
+
span,
|
|
195
|
+
SPANDATA.GEN_AI_REQUEST_MESSAGES,
|
|
196
|
+
messages_data,
|
|
197
|
+
unpack=False,
|
|
198
|
+
)
|
|
199
|
+
|
|
200
|
+
result = f(self, *args, **kwargs)
|
|
201
|
+
|
|
202
|
+
_set_response_attributes(span, input_messages, result, integration)
|
|
203
|
+
|
|
204
|
+
return result
|
|
205
|
+
|
|
206
|
+
return new_invoke
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
def _wrap_pregel_ainvoke(f):
|
|
210
|
+
# type: (Callable[..., Any]) -> Callable[..., Any]
|
|
211
|
+
|
|
212
|
+
@wraps(f)
|
|
213
|
+
async def new_ainvoke(self, *args, **kwargs):
|
|
214
|
+
# type: (Any, Any, Any) -> Any
|
|
215
|
+
integration = sentry_sdk.get_client().get_integration(LanggraphIntegration)
|
|
216
|
+
if integration is None:
|
|
217
|
+
return await f(self, *args, **kwargs)
|
|
218
|
+
|
|
219
|
+
graph_name = _get_graph_name(self)
|
|
220
|
+
span_name = (
|
|
221
|
+
f"invoke_agent {graph_name}".strip() if graph_name else "invoke_agent"
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
with sentry_sdk.start_span(
|
|
225
|
+
op=OP.GEN_AI_INVOKE_AGENT,
|
|
226
|
+
name=span_name,
|
|
227
|
+
origin=LanggraphIntegration.origin,
|
|
228
|
+
) as span:
|
|
229
|
+
if graph_name:
|
|
230
|
+
span.set_data(SPANDATA.GEN_AI_PIPELINE_NAME, graph_name)
|
|
231
|
+
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, graph_name)
|
|
232
|
+
|
|
233
|
+
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
|
|
234
|
+
|
|
235
|
+
input_messages = None
|
|
236
|
+
if (
|
|
237
|
+
len(args) > 0
|
|
238
|
+
and should_send_default_pii()
|
|
239
|
+
and integration.include_prompts
|
|
240
|
+
):
|
|
241
|
+
input_messages = _parse_langgraph_messages(args[0])
|
|
242
|
+
if input_messages:
|
|
243
|
+
normalized_input_messages = normalize_message_roles(input_messages)
|
|
244
|
+
scope = sentry_sdk.get_current_scope()
|
|
245
|
+
messages_data = truncate_and_annotate_messages(
|
|
246
|
+
normalized_input_messages, span, scope
|
|
247
|
+
)
|
|
248
|
+
if messages_data is not None:
|
|
249
|
+
set_data_normalized(
|
|
250
|
+
span,
|
|
251
|
+
SPANDATA.GEN_AI_REQUEST_MESSAGES,
|
|
252
|
+
messages_data,
|
|
253
|
+
unpack=False,
|
|
254
|
+
)
|
|
255
|
+
|
|
256
|
+
result = await f(self, *args, **kwargs)
|
|
257
|
+
|
|
258
|
+
_set_response_attributes(span, input_messages, result, integration)
|
|
259
|
+
|
|
260
|
+
return result
|
|
261
|
+
|
|
262
|
+
return new_ainvoke
|
|
263
|
+
|
|
264
|
+
|
|
265
|
+
def _get_new_messages(input_messages, output_messages):
|
|
266
|
+
# type: (Optional[List[Any]], Optional[List[Any]]) -> Optional[List[Any]]
|
|
267
|
+
"""Extract only the new messages added during this invocation."""
|
|
268
|
+
if not output_messages:
|
|
269
|
+
return None
|
|
270
|
+
|
|
271
|
+
if not input_messages:
|
|
272
|
+
return output_messages
|
|
273
|
+
|
|
274
|
+
# only return the new messages, aka the output messages that are not in the input messages
|
|
275
|
+
input_count = len(input_messages)
|
|
276
|
+
new_messages = (
|
|
277
|
+
output_messages[input_count:] if len(output_messages) > input_count else []
|
|
278
|
+
)
|
|
279
|
+
|
|
280
|
+
return new_messages if new_messages else None
|
|
281
|
+
|
|
282
|
+
|
|
283
|
+
def _extract_llm_response_text(messages):
|
|
284
|
+
# type: (Optional[List[Any]]) -> Optional[str]
|
|
285
|
+
if not messages:
|
|
286
|
+
return None
|
|
287
|
+
|
|
288
|
+
for message in reversed(messages):
|
|
289
|
+
if isinstance(message, dict):
|
|
290
|
+
role = message.get("role")
|
|
291
|
+
if role in ["assistant", "ai"]:
|
|
292
|
+
content = message.get("content")
|
|
293
|
+
if content and isinstance(content, str):
|
|
294
|
+
return content
|
|
295
|
+
|
|
296
|
+
return None
|
|
297
|
+
|
|
298
|
+
|
|
299
|
+
def _extract_tool_calls(messages):
|
|
300
|
+
# type: (Optional[List[Any]]) -> Optional[List[Any]]
|
|
301
|
+
if not messages:
|
|
302
|
+
return None
|
|
303
|
+
|
|
304
|
+
tool_calls = []
|
|
305
|
+
for message in messages:
|
|
306
|
+
if isinstance(message, dict):
|
|
307
|
+
msg_tool_calls = message.get("tool_calls")
|
|
308
|
+
if msg_tool_calls and isinstance(msg_tool_calls, list):
|
|
309
|
+
tool_calls.extend(msg_tool_calls)
|
|
310
|
+
|
|
311
|
+
return tool_calls if tool_calls else None
|
|
312
|
+
|
|
313
|
+
|
|
314
|
+
def _set_response_attributes(span, input_messages, result, integration):
|
|
315
|
+
# type: (Any, Optional[List[Any]], Any, LanggraphIntegration) -> None
|
|
316
|
+
if not (should_send_default_pii() and integration.include_prompts):
|
|
317
|
+
return
|
|
318
|
+
|
|
319
|
+
parsed_response_messages = _parse_langgraph_messages(result)
|
|
320
|
+
new_messages = _get_new_messages(input_messages, parsed_response_messages)
|
|
321
|
+
|
|
322
|
+
llm_response_text = _extract_llm_response_text(new_messages)
|
|
323
|
+
if llm_response_text:
|
|
324
|
+
set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, llm_response_text)
|
|
325
|
+
elif new_messages:
|
|
326
|
+
set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, new_messages)
|
|
327
|
+
else:
|
|
328
|
+
set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, result)
|
|
329
|
+
|
|
330
|
+
tool_calls = _extract_tool_calls(new_messages)
|
|
331
|
+
if tool_calls:
|
|
332
|
+
set_data_normalized(
|
|
333
|
+
span,
|
|
334
|
+
SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS,
|
|
335
|
+
safe_serialize(tool_calls),
|
|
336
|
+
unpack=False,
|
|
337
|
+
)
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
from typing import TYPE_CHECKING
|
|
2
|
+
|
|
3
|
+
from sentry_sdk.feature_flags import add_feature_flag
|
|
4
|
+
from sentry_sdk.integrations import DidNotEnable, Integration
|
|
5
|
+
|
|
6
|
+
try:
|
|
7
|
+
import ldclient
|
|
8
|
+
from ldclient.hook import Hook, Metadata
|
|
9
|
+
|
|
10
|
+
if TYPE_CHECKING:
|
|
11
|
+
from ldclient import LDClient
|
|
12
|
+
from ldclient.hook import EvaluationSeriesContext
|
|
13
|
+
from ldclient.evaluation import EvaluationDetail
|
|
14
|
+
|
|
15
|
+
from typing import Any
|
|
16
|
+
except ImportError:
|
|
17
|
+
raise DidNotEnable("LaunchDarkly is not installed")
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class LaunchDarklyIntegration(Integration):
|
|
21
|
+
identifier = "launchdarkly"
|
|
22
|
+
|
|
23
|
+
def __init__(self, ld_client=None):
|
|
24
|
+
# type: (LDClient | None) -> None
|
|
25
|
+
"""
|
|
26
|
+
:param client: An initialized LDClient instance. If a client is not provided, this
|
|
27
|
+
integration will attempt to use the shared global instance.
|
|
28
|
+
"""
|
|
29
|
+
try:
|
|
30
|
+
client = ld_client or ldclient.get()
|
|
31
|
+
except Exception as exc:
|
|
32
|
+
raise DidNotEnable("Error getting LaunchDarkly client. " + repr(exc))
|
|
33
|
+
|
|
34
|
+
if not client.is_initialized():
|
|
35
|
+
raise DidNotEnable("LaunchDarkly client is not initialized.")
|
|
36
|
+
|
|
37
|
+
# Register the flag collection hook with the LD client.
|
|
38
|
+
client.add_hook(LaunchDarklyHook())
|
|
39
|
+
|
|
40
|
+
@staticmethod
|
|
41
|
+
def setup_once():
|
|
42
|
+
# type: () -> None
|
|
43
|
+
pass
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class LaunchDarklyHook(Hook):
|
|
47
|
+
@property
|
|
48
|
+
def metadata(self):
|
|
49
|
+
# type: () -> Metadata
|
|
50
|
+
return Metadata(name="sentry-flag-auditor")
|
|
51
|
+
|
|
52
|
+
def after_evaluation(self, series_context, data, detail):
|
|
53
|
+
# type: (EvaluationSeriesContext, dict[Any, Any], EvaluationDetail) -> dict[Any, Any]
|
|
54
|
+
if isinstance(detail.value, bool):
|
|
55
|
+
add_feature_flag(series_context.key, detail.value)
|
|
56
|
+
|
|
57
|
+
return data
|
|
58
|
+
|
|
59
|
+
def before_evaluation(self, series_context, data):
|
|
60
|
+
# type: (EvaluationSeriesContext, dict[Any, Any]) -> dict[Any, Any]
|
|
61
|
+
return data # No-op.
|
|
@@ -0,0 +1,287 @@
|
|
|
1
|
+
from typing import TYPE_CHECKING
|
|
2
|
+
|
|
3
|
+
import sentry_sdk
|
|
4
|
+
from sentry_sdk import consts
|
|
5
|
+
from sentry_sdk.ai.monitoring import record_token_usage
|
|
6
|
+
from sentry_sdk.ai.utils import (
|
|
7
|
+
get_start_span_function,
|
|
8
|
+
set_data_normalized,
|
|
9
|
+
truncate_and_annotate_messages,
|
|
10
|
+
)
|
|
11
|
+
from sentry_sdk.consts import SPANDATA
|
|
12
|
+
from sentry_sdk.integrations import DidNotEnable, Integration
|
|
13
|
+
from sentry_sdk.scope import should_send_default_pii
|
|
14
|
+
from sentry_sdk.utils import event_from_exception
|
|
15
|
+
|
|
16
|
+
if TYPE_CHECKING:
|
|
17
|
+
from typing import Any, Dict
|
|
18
|
+
from datetime import datetime
|
|
19
|
+
|
|
20
|
+
try:
|
|
21
|
+
import litellm # type: ignore[import-not-found]
|
|
22
|
+
except ImportError:
|
|
23
|
+
raise DidNotEnable("LiteLLM not installed")
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def _get_metadata_dict(kwargs):
|
|
27
|
+
# type: (Dict[str, Any]) -> Dict[str, Any]
|
|
28
|
+
"""Get the metadata dictionary from the kwargs."""
|
|
29
|
+
litellm_params = kwargs.setdefault("litellm_params", {})
|
|
30
|
+
|
|
31
|
+
# we need this weird little dance, as metadata might be set but may be None initially
|
|
32
|
+
metadata = litellm_params.get("metadata")
|
|
33
|
+
if metadata is None:
|
|
34
|
+
metadata = {}
|
|
35
|
+
litellm_params["metadata"] = metadata
|
|
36
|
+
return metadata
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def _input_callback(kwargs):
|
|
40
|
+
# type: (Dict[str, Any]) -> None
|
|
41
|
+
"""Handle the start of a request."""
|
|
42
|
+
integration = sentry_sdk.get_client().get_integration(LiteLLMIntegration)
|
|
43
|
+
|
|
44
|
+
if integration is None:
|
|
45
|
+
return
|
|
46
|
+
|
|
47
|
+
# Get key parameters
|
|
48
|
+
full_model = kwargs.get("model", "")
|
|
49
|
+
try:
|
|
50
|
+
model, provider, _, _ = litellm.get_llm_provider(full_model)
|
|
51
|
+
except Exception:
|
|
52
|
+
model = full_model
|
|
53
|
+
provider = "unknown"
|
|
54
|
+
|
|
55
|
+
call_type = kwargs.get("call_type", None)
|
|
56
|
+
if call_type == "embedding":
|
|
57
|
+
operation = "embeddings"
|
|
58
|
+
else:
|
|
59
|
+
operation = "chat"
|
|
60
|
+
|
|
61
|
+
# Start a new span/transaction
|
|
62
|
+
span = get_start_span_function()(
|
|
63
|
+
op=(
|
|
64
|
+
consts.OP.GEN_AI_CHAT
|
|
65
|
+
if operation == "chat"
|
|
66
|
+
else consts.OP.GEN_AI_EMBEDDINGS
|
|
67
|
+
),
|
|
68
|
+
name=f"{operation} {model}",
|
|
69
|
+
origin=LiteLLMIntegration.origin,
|
|
70
|
+
)
|
|
71
|
+
span.__enter__()
|
|
72
|
+
|
|
73
|
+
# Store span for later
|
|
74
|
+
_get_metadata_dict(kwargs)["_sentry_span"] = span
|
|
75
|
+
|
|
76
|
+
# Set basic data
|
|
77
|
+
set_data_normalized(span, SPANDATA.GEN_AI_SYSTEM, provider)
|
|
78
|
+
set_data_normalized(span, SPANDATA.GEN_AI_OPERATION_NAME, operation)
|
|
79
|
+
|
|
80
|
+
# Record input/messages if allowed
|
|
81
|
+
if should_send_default_pii() and integration.include_prompts:
|
|
82
|
+
if operation == "embeddings":
|
|
83
|
+
# For embeddings, look for the 'input' parameter
|
|
84
|
+
embedding_input = kwargs.get("input")
|
|
85
|
+
if embedding_input:
|
|
86
|
+
scope = sentry_sdk.get_current_scope()
|
|
87
|
+
# Normalize to list format
|
|
88
|
+
input_list = (
|
|
89
|
+
embedding_input
|
|
90
|
+
if isinstance(embedding_input, list)
|
|
91
|
+
else [embedding_input]
|
|
92
|
+
)
|
|
93
|
+
messages_data = truncate_and_annotate_messages(input_list, span, scope)
|
|
94
|
+
if messages_data is not None:
|
|
95
|
+
set_data_normalized(
|
|
96
|
+
span,
|
|
97
|
+
SPANDATA.GEN_AI_EMBEDDINGS_INPUT,
|
|
98
|
+
messages_data,
|
|
99
|
+
unpack=False,
|
|
100
|
+
)
|
|
101
|
+
else:
|
|
102
|
+
# For chat, look for the 'messages' parameter
|
|
103
|
+
messages = kwargs.get("messages", [])
|
|
104
|
+
if messages:
|
|
105
|
+
scope = sentry_sdk.get_current_scope()
|
|
106
|
+
messages_data = truncate_and_annotate_messages(messages, span, scope)
|
|
107
|
+
if messages_data is not None:
|
|
108
|
+
set_data_normalized(
|
|
109
|
+
span,
|
|
110
|
+
SPANDATA.GEN_AI_REQUEST_MESSAGES,
|
|
111
|
+
messages_data,
|
|
112
|
+
unpack=False,
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
# Record other parameters
|
|
116
|
+
params = {
|
|
117
|
+
"model": SPANDATA.GEN_AI_REQUEST_MODEL,
|
|
118
|
+
"stream": SPANDATA.GEN_AI_RESPONSE_STREAMING,
|
|
119
|
+
"max_tokens": SPANDATA.GEN_AI_REQUEST_MAX_TOKENS,
|
|
120
|
+
"presence_penalty": SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
|
121
|
+
"frequency_penalty": SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
|
122
|
+
"temperature": SPANDATA.GEN_AI_REQUEST_TEMPERATURE,
|
|
123
|
+
"top_p": SPANDATA.GEN_AI_REQUEST_TOP_P,
|
|
124
|
+
}
|
|
125
|
+
for key, attribute in params.items():
|
|
126
|
+
value = kwargs.get(key)
|
|
127
|
+
if value is not None:
|
|
128
|
+
set_data_normalized(span, attribute, value)
|
|
129
|
+
|
|
130
|
+
# Record LiteLLM-specific parameters
|
|
131
|
+
litellm_params = {
|
|
132
|
+
"api_base": kwargs.get("api_base"),
|
|
133
|
+
"api_version": kwargs.get("api_version"),
|
|
134
|
+
"custom_llm_provider": kwargs.get("custom_llm_provider"),
|
|
135
|
+
}
|
|
136
|
+
for key, value in litellm_params.items():
|
|
137
|
+
if value is not None:
|
|
138
|
+
set_data_normalized(span, f"gen_ai.litellm.{key}", value)
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def _success_callback(kwargs, completion_response, start_time, end_time):
|
|
142
|
+
# type: (Dict[str, Any], Any, datetime, datetime) -> None
|
|
143
|
+
"""Handle successful completion."""
|
|
144
|
+
|
|
145
|
+
span = _get_metadata_dict(kwargs).get("_sentry_span")
|
|
146
|
+
if span is None:
|
|
147
|
+
return
|
|
148
|
+
|
|
149
|
+
integration = sentry_sdk.get_client().get_integration(LiteLLMIntegration)
|
|
150
|
+
if integration is None:
|
|
151
|
+
return
|
|
152
|
+
|
|
153
|
+
try:
|
|
154
|
+
# Record model information
|
|
155
|
+
if hasattr(completion_response, "model"):
|
|
156
|
+
set_data_normalized(
|
|
157
|
+
span, SPANDATA.GEN_AI_RESPONSE_MODEL, completion_response.model
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
# Record response content if allowed
|
|
161
|
+
if should_send_default_pii() and integration.include_prompts:
|
|
162
|
+
if hasattr(completion_response, "choices"):
|
|
163
|
+
response_messages = []
|
|
164
|
+
for choice in completion_response.choices:
|
|
165
|
+
if hasattr(choice, "message"):
|
|
166
|
+
if hasattr(choice.message, "model_dump"):
|
|
167
|
+
response_messages.append(choice.message.model_dump())
|
|
168
|
+
elif hasattr(choice.message, "dict"):
|
|
169
|
+
response_messages.append(choice.message.dict())
|
|
170
|
+
else:
|
|
171
|
+
# Fallback for basic message objects
|
|
172
|
+
msg = {}
|
|
173
|
+
if hasattr(choice.message, "role"):
|
|
174
|
+
msg["role"] = choice.message.role
|
|
175
|
+
if hasattr(choice.message, "content"):
|
|
176
|
+
msg["content"] = choice.message.content
|
|
177
|
+
if hasattr(choice.message, "tool_calls"):
|
|
178
|
+
msg["tool_calls"] = choice.message.tool_calls
|
|
179
|
+
response_messages.append(msg)
|
|
180
|
+
|
|
181
|
+
if response_messages:
|
|
182
|
+
set_data_normalized(
|
|
183
|
+
span, SPANDATA.GEN_AI_RESPONSE_TEXT, response_messages
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
# Record token usage
|
|
187
|
+
if hasattr(completion_response, "usage"):
|
|
188
|
+
usage = completion_response.usage
|
|
189
|
+
record_token_usage(
|
|
190
|
+
span,
|
|
191
|
+
input_tokens=getattr(usage, "prompt_tokens", None),
|
|
192
|
+
output_tokens=getattr(usage, "completion_tokens", None),
|
|
193
|
+
total_tokens=getattr(usage, "total_tokens", None),
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
finally:
|
|
197
|
+
# Always finish the span and clean up
|
|
198
|
+
span.__exit__(None, None, None)
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
def _failure_callback(kwargs, exception, start_time, end_time):
|
|
202
|
+
# type: (Dict[str, Any], Exception, datetime, datetime) -> None
|
|
203
|
+
"""Handle request failure."""
|
|
204
|
+
span = _get_metadata_dict(kwargs).get("_sentry_span")
|
|
205
|
+
if span is None:
|
|
206
|
+
return
|
|
207
|
+
|
|
208
|
+
try:
|
|
209
|
+
# Capture the exception
|
|
210
|
+
event, hint = event_from_exception(
|
|
211
|
+
exception,
|
|
212
|
+
client_options=sentry_sdk.get_client().options,
|
|
213
|
+
mechanism={"type": "litellm", "handled": False},
|
|
214
|
+
)
|
|
215
|
+
sentry_sdk.capture_event(event, hint=hint)
|
|
216
|
+
finally:
|
|
217
|
+
# Always finish the span and clean up
|
|
218
|
+
span.__exit__(type(exception), exception, None)
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
class LiteLLMIntegration(Integration):
|
|
222
|
+
"""
|
|
223
|
+
LiteLLM integration for Sentry.
|
|
224
|
+
|
|
225
|
+
This integration automatically captures LiteLLM API calls and sends them to Sentry
|
|
226
|
+
for monitoring and error tracking. It supports all 100+ LLM providers that LiteLLM
|
|
227
|
+
supports, including OpenAI, Anthropic, Google, Cohere, and many others.
|
|
228
|
+
|
|
229
|
+
Features:
|
|
230
|
+
- Automatic exception capture for all LiteLLM calls
|
|
231
|
+
- Token usage tracking across all providers
|
|
232
|
+
- Provider detection and attribution
|
|
233
|
+
- Input/output message capture (configurable)
|
|
234
|
+
- Streaming response support
|
|
235
|
+
- Cost tracking integration
|
|
236
|
+
|
|
237
|
+
Usage:
|
|
238
|
+
|
|
239
|
+
```python
|
|
240
|
+
import litellm
|
|
241
|
+
import sentry_sdk
|
|
242
|
+
|
|
243
|
+
# Initialize Sentry with the LiteLLM integration
|
|
244
|
+
sentry_sdk.init(
|
|
245
|
+
dsn="your-dsn",
|
|
246
|
+
send_default_pii=True
|
|
247
|
+
integrations=[
|
|
248
|
+
sentry_sdk.integrations.LiteLLMIntegration(
|
|
249
|
+
include_prompts=True # Set to False to exclude message content
|
|
250
|
+
)
|
|
251
|
+
]
|
|
252
|
+
)
|
|
253
|
+
|
|
254
|
+
# All LiteLLM calls will now be monitored
|
|
255
|
+
response = litellm.completion(
|
|
256
|
+
model="gpt-3.5-turbo",
|
|
257
|
+
messages=[{"role": "user", "content": "Hello!"}]
|
|
258
|
+
)
|
|
259
|
+
```
|
|
260
|
+
|
|
261
|
+
Configuration:
|
|
262
|
+
- include_prompts (bool): Whether to include prompts and responses in spans.
|
|
263
|
+
Defaults to True. Set to False to exclude potentially sensitive data.
|
|
264
|
+
"""
|
|
265
|
+
|
|
266
|
+
identifier = "litellm"
|
|
267
|
+
origin = f"auto.ai.{identifier}"
|
|
268
|
+
|
|
269
|
+
def __init__(self, include_prompts=True):
|
|
270
|
+
# type: (LiteLLMIntegration, bool) -> None
|
|
271
|
+
self.include_prompts = include_prompts
|
|
272
|
+
|
|
273
|
+
@staticmethod
|
|
274
|
+
def setup_once():
|
|
275
|
+
# type: () -> None
|
|
276
|
+
"""Set up LiteLLM callbacks for monitoring."""
|
|
277
|
+
litellm.input_callback = litellm.input_callback or []
|
|
278
|
+
if _input_callback not in litellm.input_callback:
|
|
279
|
+
litellm.input_callback.append(_input_callback)
|
|
280
|
+
|
|
281
|
+
litellm.success_callback = litellm.success_callback or []
|
|
282
|
+
if _success_callback not in litellm.success_callback:
|
|
283
|
+
litellm.success_callback.append(_success_callback)
|
|
284
|
+
|
|
285
|
+
litellm.failure_callback = litellm.failure_callback or []
|
|
286
|
+
if _failure_callback not in litellm.failure_callback:
|
|
287
|
+
litellm.failure_callback.append(_failure_callback)
|