sentry-sdk 2.36.0__py2.py3-none-any.whl → 2.37.0__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of sentry-sdk might be problematic. Click here for more details.
- sentry_sdk/ai/utils.py +9 -6
- sentry_sdk/consts.py +2 -1
- sentry_sdk/integrations/__init__.py +2 -0
- sentry_sdk/integrations/langchain.py +105 -33
- sentry_sdk/integrations/langgraph.py +321 -0
- sentry_sdk/integrations/openai.py +33 -10
- sentry_sdk/integrations/openai_agents/utils.py +6 -3
- {sentry_sdk-2.36.0.dist-info → sentry_sdk-2.37.0.dist-info}/METADATA +3 -1
- {sentry_sdk-2.36.0.dist-info → sentry_sdk-2.37.0.dist-info}/RECORD +13 -12
- {sentry_sdk-2.36.0.dist-info → sentry_sdk-2.37.0.dist-info}/WHEEL +0 -0
- {sentry_sdk-2.36.0.dist-info → sentry_sdk-2.37.0.dist-info}/entry_points.txt +0 -0
- {sentry_sdk-2.36.0.dist-info → sentry_sdk-2.37.0.dist-info}/licenses/LICENSE +0 -0
- {sentry_sdk-2.36.0.dist-info → sentry_sdk-2.37.0.dist-info}/top_level.txt +0 -0
sentry_sdk/ai/utils.py
CHANGED
|
@@ -1,30 +1,33 @@
|
|
|
1
|
+
import json
|
|
2
|
+
|
|
1
3
|
from typing import TYPE_CHECKING
|
|
2
4
|
|
|
3
5
|
if TYPE_CHECKING:
|
|
4
6
|
from typing import Any
|
|
7
|
+
from sentry_sdk.tracing import Span
|
|
5
8
|
|
|
6
|
-
from sentry_sdk.tracing import Span
|
|
7
9
|
from sentry_sdk.utils import logger
|
|
8
10
|
|
|
9
11
|
|
|
10
12
|
def _normalize_data(data, unpack=True):
|
|
11
13
|
# type: (Any, bool) -> Any
|
|
12
|
-
|
|
13
14
|
# convert pydantic data (e.g. OpenAI v1+) to json compatible format
|
|
14
15
|
if hasattr(data, "model_dump"):
|
|
15
16
|
try:
|
|
16
|
-
return data.model_dump()
|
|
17
|
+
return _normalize_data(data.model_dump(), unpack=unpack)
|
|
17
18
|
except Exception as e:
|
|
18
19
|
logger.warning("Could not convert pydantic data to JSON: %s", e)
|
|
19
|
-
return data
|
|
20
|
+
return data if isinstance(data, (int, float, bool, str)) else str(data)
|
|
21
|
+
|
|
20
22
|
if isinstance(data, list):
|
|
21
23
|
if unpack and len(data) == 1:
|
|
22
24
|
return _normalize_data(data[0], unpack=unpack) # remove empty dimensions
|
|
23
25
|
return list(_normalize_data(x, unpack=unpack) for x in data)
|
|
26
|
+
|
|
24
27
|
if isinstance(data, dict):
|
|
25
28
|
return {k: _normalize_data(v, unpack=unpack) for (k, v) in data.items()}
|
|
26
29
|
|
|
27
|
-
return data
|
|
30
|
+
return data if isinstance(data, (int, float, bool, str)) else str(data)
|
|
28
31
|
|
|
29
32
|
|
|
30
33
|
def set_data_normalized(span, key, value, unpack=True):
|
|
@@ -33,4 +36,4 @@ def set_data_normalized(span, key, value, unpack=True):
|
|
|
33
36
|
if isinstance(normalized, (int, float, bool, str)):
|
|
34
37
|
span.set_data(key, normalized)
|
|
35
38
|
else:
|
|
36
|
-
span.set_data(key,
|
|
39
|
+
span.set_data(key, json.dumps(normalized))
|
sentry_sdk/consts.py
CHANGED
|
@@ -792,6 +792,7 @@ class OP:
|
|
|
792
792
|
FUNCTION_AWS = "function.aws"
|
|
793
793
|
FUNCTION_GCP = "function.gcp"
|
|
794
794
|
GEN_AI_CHAT = "gen_ai.chat"
|
|
795
|
+
GEN_AI_CREATE_AGENT = "gen_ai.create_agent"
|
|
795
796
|
GEN_AI_EMBEDDINGS = "gen_ai.embeddings"
|
|
796
797
|
GEN_AI_EXECUTE_TOOL = "gen_ai.execute_tool"
|
|
797
798
|
GEN_AI_HANDOFF = "gen_ai.handoff"
|
|
@@ -1329,4 +1330,4 @@ DEFAULT_OPTIONS = _get_default_options()
|
|
|
1329
1330
|
del _get_default_options
|
|
1330
1331
|
|
|
1331
1332
|
|
|
1332
|
-
VERSION = "2.
|
|
1333
|
+
VERSION = "2.37.0"
|
|
@@ -95,6 +95,7 @@ _AUTO_ENABLING_INTEGRATIONS = [
|
|
|
95
95
|
"sentry_sdk.integrations.huey.HueyIntegration",
|
|
96
96
|
"sentry_sdk.integrations.huggingface_hub.HuggingfaceHubIntegration",
|
|
97
97
|
"sentry_sdk.integrations.langchain.LangchainIntegration",
|
|
98
|
+
"sentry_sdk.integrations.langgraph.LanggraphIntegration",
|
|
98
99
|
"sentry_sdk.integrations.litestar.LitestarIntegration",
|
|
99
100
|
"sentry_sdk.integrations.loguru.LoguruIntegration",
|
|
100
101
|
"sentry_sdk.integrations.openai.OpenAIIntegration",
|
|
@@ -142,6 +143,7 @@ _MIN_VERSIONS = {
|
|
|
142
143
|
"grpc": (1, 32, 0), # grpcio
|
|
143
144
|
"huggingface_hub": (0, 22),
|
|
144
145
|
"langchain": (0, 1, 0),
|
|
146
|
+
"langgraph": (0, 6, 6),
|
|
145
147
|
"launchdarkly": (9, 8, 0),
|
|
146
148
|
"loguru": (0, 7, 0),
|
|
147
149
|
"openai": (1, 0, 0),
|
|
@@ -51,7 +51,6 @@ DATA_FIELDS = {
|
|
|
51
51
|
"presence_penalty": SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
|
52
52
|
"temperature": SPANDATA.GEN_AI_REQUEST_TEMPERATURE,
|
|
53
53
|
"tool_calls": SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS,
|
|
54
|
-
"tools": SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS,
|
|
55
54
|
"top_k": SPANDATA.GEN_AI_REQUEST_TOP_K,
|
|
56
55
|
"top_p": SPANDATA.GEN_AI_REQUEST_TOP_P,
|
|
57
56
|
}
|
|
@@ -203,8 +202,12 @@ class SentryLangchainCallback(BaseCallbackHandler): # type: ignore[misc]
|
|
|
203
202
|
if key in all_params and all_params[key] is not None:
|
|
204
203
|
set_data_normalized(span, attribute, all_params[key], unpack=False)
|
|
205
204
|
|
|
205
|
+
_set_tools_on_span(span, all_params.get("tools"))
|
|
206
|
+
|
|
206
207
|
if should_send_default_pii() and self.include_prompts:
|
|
207
|
-
set_data_normalized(
|
|
208
|
+
set_data_normalized(
|
|
209
|
+
span, SPANDATA.GEN_AI_REQUEST_MESSAGES, prompts, unpack=False
|
|
210
|
+
)
|
|
208
211
|
|
|
209
212
|
def on_chat_model_start(self, serialized, messages, *, run_id, **kwargs):
|
|
210
213
|
# type: (SentryLangchainCallback, Dict[str, Any], List[List[BaseMessage]], UUID, Any) -> Any
|
|
@@ -246,14 +249,20 @@ class SentryLangchainCallback(BaseCallbackHandler): # type: ignore[misc]
|
|
|
246
249
|
if key in all_params and all_params[key] is not None:
|
|
247
250
|
set_data_normalized(span, attribute, all_params[key], unpack=False)
|
|
248
251
|
|
|
252
|
+
_set_tools_on_span(span, all_params.get("tools"))
|
|
253
|
+
|
|
249
254
|
if should_send_default_pii() and self.include_prompts:
|
|
255
|
+
normalized_messages = []
|
|
256
|
+
for list_ in messages:
|
|
257
|
+
for message in list_:
|
|
258
|
+
normalized_messages.append(
|
|
259
|
+
self._normalize_langchain_message(message)
|
|
260
|
+
)
|
|
250
261
|
set_data_normalized(
|
|
251
262
|
span,
|
|
252
263
|
SPANDATA.GEN_AI_REQUEST_MESSAGES,
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
for list_ in messages
|
|
256
|
-
],
|
|
264
|
+
normalized_messages,
|
|
265
|
+
unpack=False,
|
|
257
266
|
)
|
|
258
267
|
|
|
259
268
|
def on_chat_model_end(self, response, *, run_id, **kwargs):
|
|
@@ -351,9 +360,7 @@ class SentryLangchainCallback(BaseCallbackHandler): # type: ignore[misc]
|
|
|
351
360
|
|
|
352
361
|
if should_send_default_pii() and self.include_prompts:
|
|
353
362
|
set_data_normalized(
|
|
354
|
-
span,
|
|
355
|
-
SPANDATA.GEN_AI_RESPONSE_TEXT,
|
|
356
|
-
finish.return_values.items(),
|
|
363
|
+
span, SPANDATA.GEN_AI_RESPONSE_TEXT, finish.return_values.items()
|
|
357
364
|
)
|
|
358
365
|
|
|
359
366
|
self._exit_span(span_data, run_id)
|
|
@@ -473,13 +480,11 @@ def _get_token_usage(obj):
|
|
|
473
480
|
if usage is not None:
|
|
474
481
|
return usage
|
|
475
482
|
|
|
476
|
-
# check for usage in the object itself
|
|
477
483
|
for name in possible_names:
|
|
478
484
|
usage = _get_value(obj, name)
|
|
479
485
|
if usage is not None:
|
|
480
486
|
return usage
|
|
481
487
|
|
|
482
|
-
# no usage found anywhere
|
|
483
488
|
return None
|
|
484
489
|
|
|
485
490
|
|
|
@@ -531,6 +536,87 @@ def _get_request_data(obj, args, kwargs):
|
|
|
531
536
|
return (agent_name, tools)
|
|
532
537
|
|
|
533
538
|
|
|
539
|
+
def _simplify_langchain_tools(tools):
|
|
540
|
+
# type: (Any) -> Optional[List[Any]]
|
|
541
|
+
"""Parse and simplify tools into a cleaner format."""
|
|
542
|
+
if not tools:
|
|
543
|
+
return None
|
|
544
|
+
|
|
545
|
+
if not isinstance(tools, (list, tuple)):
|
|
546
|
+
return None
|
|
547
|
+
|
|
548
|
+
simplified_tools = []
|
|
549
|
+
for tool in tools:
|
|
550
|
+
try:
|
|
551
|
+
if isinstance(tool, dict):
|
|
552
|
+
|
|
553
|
+
if "function" in tool and isinstance(tool["function"], dict):
|
|
554
|
+
func = tool["function"]
|
|
555
|
+
simplified_tool = {
|
|
556
|
+
"name": func.get("name"),
|
|
557
|
+
"description": func.get("description"),
|
|
558
|
+
}
|
|
559
|
+
if simplified_tool["name"]:
|
|
560
|
+
simplified_tools.append(simplified_tool)
|
|
561
|
+
elif "name" in tool:
|
|
562
|
+
simplified_tool = {
|
|
563
|
+
"name": tool.get("name"),
|
|
564
|
+
"description": tool.get("description"),
|
|
565
|
+
}
|
|
566
|
+
simplified_tools.append(simplified_tool)
|
|
567
|
+
else:
|
|
568
|
+
name = (
|
|
569
|
+
tool.get("name")
|
|
570
|
+
or tool.get("tool_name")
|
|
571
|
+
or tool.get("function_name")
|
|
572
|
+
)
|
|
573
|
+
if name:
|
|
574
|
+
simplified_tools.append(
|
|
575
|
+
{
|
|
576
|
+
"name": name,
|
|
577
|
+
"description": tool.get("description")
|
|
578
|
+
or tool.get("desc"),
|
|
579
|
+
}
|
|
580
|
+
)
|
|
581
|
+
elif hasattr(tool, "name"):
|
|
582
|
+
simplified_tool = {
|
|
583
|
+
"name": getattr(tool, "name", None),
|
|
584
|
+
"description": getattr(tool, "description", None)
|
|
585
|
+
or getattr(tool, "desc", None),
|
|
586
|
+
}
|
|
587
|
+
if simplified_tool["name"]:
|
|
588
|
+
simplified_tools.append(simplified_tool)
|
|
589
|
+
elif hasattr(tool, "__name__"):
|
|
590
|
+
simplified_tools.append(
|
|
591
|
+
{
|
|
592
|
+
"name": tool.__name__,
|
|
593
|
+
"description": getattr(tool, "__doc__", None),
|
|
594
|
+
}
|
|
595
|
+
)
|
|
596
|
+
else:
|
|
597
|
+
tool_str = str(tool)
|
|
598
|
+
if tool_str and tool_str != "":
|
|
599
|
+
simplified_tools.append({"name": tool_str, "description": None})
|
|
600
|
+
except Exception:
|
|
601
|
+
continue
|
|
602
|
+
|
|
603
|
+
return simplified_tools if simplified_tools else None
|
|
604
|
+
|
|
605
|
+
|
|
606
|
+
def _set_tools_on_span(span, tools):
|
|
607
|
+
# type: (Span, Any) -> None
|
|
608
|
+
"""Set available tools data on a span if tools are provided."""
|
|
609
|
+
if tools is not None:
|
|
610
|
+
simplified_tools = _simplify_langchain_tools(tools)
|
|
611
|
+
if simplified_tools:
|
|
612
|
+
set_data_normalized(
|
|
613
|
+
span,
|
|
614
|
+
SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS,
|
|
615
|
+
simplified_tools,
|
|
616
|
+
unpack=False,
|
|
617
|
+
)
|
|
618
|
+
|
|
619
|
+
|
|
534
620
|
def _wrap_configure(f):
|
|
535
621
|
# type: (Callable[..., Any]) -> Callable[..., Any]
|
|
536
622
|
|
|
@@ -601,7 +687,7 @@ def _wrap_configure(f):
|
|
|
601
687
|
]
|
|
602
688
|
elif isinstance(local_callbacks, BaseCallbackHandler):
|
|
603
689
|
local_callbacks = [local_callbacks, sentry_handler]
|
|
604
|
-
else:
|
|
690
|
+
else:
|
|
605
691
|
local_callbacks = [*local_callbacks, sentry_handler]
|
|
606
692
|
|
|
607
693
|
return f(
|
|
@@ -638,10 +724,7 @@ def _wrap_agent_executor_invoke(f):
|
|
|
638
724
|
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
|
|
639
725
|
span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, False)
|
|
640
726
|
|
|
641
|
-
|
|
642
|
-
set_data_normalized(
|
|
643
|
-
span, SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, tools, unpack=False
|
|
644
|
-
)
|
|
727
|
+
_set_tools_on_span(span, tools)
|
|
645
728
|
|
|
646
729
|
# Run the agent
|
|
647
730
|
result = f(self, *args, **kwargs)
|
|
@@ -653,11 +736,7 @@ def _wrap_agent_executor_invoke(f):
|
|
|
653
736
|
and integration.include_prompts
|
|
654
737
|
):
|
|
655
738
|
set_data_normalized(
|
|
656
|
-
span,
|
|
657
|
-
SPANDATA.GEN_AI_REQUEST_MESSAGES,
|
|
658
|
-
[
|
|
659
|
-
input,
|
|
660
|
-
],
|
|
739
|
+
span, SPANDATA.GEN_AI_REQUEST_MESSAGES, [input], unpack=False
|
|
661
740
|
)
|
|
662
741
|
|
|
663
742
|
output = result.get("output")
|
|
@@ -666,7 +745,7 @@ def _wrap_agent_executor_invoke(f):
|
|
|
666
745
|
and should_send_default_pii()
|
|
667
746
|
and integration.include_prompts
|
|
668
747
|
):
|
|
669
|
-
span
|
|
748
|
+
set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, output)
|
|
670
749
|
|
|
671
750
|
return result
|
|
672
751
|
|
|
@@ -698,10 +777,7 @@ def _wrap_agent_executor_stream(f):
|
|
|
698
777
|
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
|
|
699
778
|
span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True)
|
|
700
779
|
|
|
701
|
-
|
|
702
|
-
set_data_normalized(
|
|
703
|
-
span, SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, tools, unpack=False
|
|
704
|
-
)
|
|
780
|
+
_set_tools_on_span(span, tools)
|
|
705
781
|
|
|
706
782
|
input = args[0].get("input") if len(args) >= 1 else None
|
|
707
783
|
if (
|
|
@@ -710,11 +786,7 @@ def _wrap_agent_executor_stream(f):
|
|
|
710
786
|
and integration.include_prompts
|
|
711
787
|
):
|
|
712
788
|
set_data_normalized(
|
|
713
|
-
span,
|
|
714
|
-
SPANDATA.GEN_AI_REQUEST_MESSAGES,
|
|
715
|
-
[
|
|
716
|
-
input,
|
|
717
|
-
],
|
|
789
|
+
span, SPANDATA.GEN_AI_REQUEST_MESSAGES, [input], unpack=False
|
|
718
790
|
)
|
|
719
791
|
|
|
720
792
|
# Run the agent
|
|
@@ -737,7 +809,7 @@ def _wrap_agent_executor_stream(f):
|
|
|
737
809
|
and should_send_default_pii()
|
|
738
810
|
and integration.include_prompts
|
|
739
811
|
):
|
|
740
|
-
span
|
|
812
|
+
set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, output)
|
|
741
813
|
|
|
742
814
|
span.__exit__(None, None, None)
|
|
743
815
|
|
|
@@ -756,7 +828,7 @@ def _wrap_agent_executor_stream(f):
|
|
|
756
828
|
and should_send_default_pii()
|
|
757
829
|
and integration.include_prompts
|
|
758
830
|
):
|
|
759
|
-
span
|
|
831
|
+
set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, output)
|
|
760
832
|
|
|
761
833
|
span.__exit__(None, None, None)
|
|
762
834
|
|
|
@@ -0,0 +1,321 @@
|
|
|
1
|
+
from functools import wraps
|
|
2
|
+
from typing import Any, Callable, List, Optional
|
|
3
|
+
|
|
4
|
+
import sentry_sdk
|
|
5
|
+
from sentry_sdk.ai.utils import set_data_normalized
|
|
6
|
+
from sentry_sdk.consts import OP, SPANDATA
|
|
7
|
+
from sentry_sdk.integrations import DidNotEnable, Integration
|
|
8
|
+
from sentry_sdk.scope import should_send_default_pii
|
|
9
|
+
from sentry_sdk.utils import safe_serialize
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
try:
|
|
13
|
+
from langgraph.graph import StateGraph
|
|
14
|
+
from langgraph.pregel import Pregel
|
|
15
|
+
except ImportError:
|
|
16
|
+
raise DidNotEnable("langgraph not installed")
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class LanggraphIntegration(Integration):
|
|
20
|
+
identifier = "langgraph"
|
|
21
|
+
origin = f"auto.ai.{identifier}"
|
|
22
|
+
|
|
23
|
+
def __init__(self, include_prompts=True):
|
|
24
|
+
# type: (LanggraphIntegration, bool) -> None
|
|
25
|
+
self.include_prompts = include_prompts
|
|
26
|
+
|
|
27
|
+
@staticmethod
|
|
28
|
+
def setup_once():
|
|
29
|
+
# type: () -> None
|
|
30
|
+
# LangGraph lets users create agents using a StateGraph or the Functional API.
|
|
31
|
+
# StateGraphs are then compiled to a CompiledStateGraph. Both CompiledStateGraph and
|
|
32
|
+
# the functional API execute on a Pregel instance. Pregel is the runtime for the graph
|
|
33
|
+
# and the invocation happens on Pregel, so patching the invoke methods takes care of both.
|
|
34
|
+
# The streaming methods are not patched, because due to some internal reasons, LangGraph
|
|
35
|
+
# will automatically patch the streaming methods to run through invoke, and by doing this
|
|
36
|
+
# we prevent duplicate spans for invocations.
|
|
37
|
+
StateGraph.compile = _wrap_state_graph_compile(StateGraph.compile)
|
|
38
|
+
if hasattr(Pregel, "invoke"):
|
|
39
|
+
Pregel.invoke = _wrap_pregel_invoke(Pregel.invoke)
|
|
40
|
+
if hasattr(Pregel, "ainvoke"):
|
|
41
|
+
Pregel.ainvoke = _wrap_pregel_ainvoke(Pregel.ainvoke)
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def _get_graph_name(graph_obj):
|
|
45
|
+
# type: (Any) -> Optional[str]
|
|
46
|
+
for attr in ["name", "graph_name", "__name__", "_name"]:
|
|
47
|
+
if hasattr(graph_obj, attr):
|
|
48
|
+
name = getattr(graph_obj, attr)
|
|
49
|
+
if name and isinstance(name, str):
|
|
50
|
+
return name
|
|
51
|
+
return None
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def _normalize_langgraph_message(message):
|
|
55
|
+
# type: (Any) -> Any
|
|
56
|
+
if not hasattr(message, "content"):
|
|
57
|
+
return None
|
|
58
|
+
|
|
59
|
+
parsed = {"role": getattr(message, "type", None), "content": message.content}
|
|
60
|
+
|
|
61
|
+
for attr in ["name", "tool_calls", "function_call", "tool_call_id"]:
|
|
62
|
+
if hasattr(message, attr):
|
|
63
|
+
value = getattr(message, attr)
|
|
64
|
+
if value is not None:
|
|
65
|
+
parsed[attr] = value
|
|
66
|
+
|
|
67
|
+
return parsed
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def _parse_langgraph_messages(state):
|
|
71
|
+
# type: (Any) -> Optional[List[Any]]
|
|
72
|
+
if not state:
|
|
73
|
+
return None
|
|
74
|
+
|
|
75
|
+
messages = None
|
|
76
|
+
|
|
77
|
+
if isinstance(state, dict):
|
|
78
|
+
messages = state.get("messages")
|
|
79
|
+
elif hasattr(state, "messages"):
|
|
80
|
+
messages = state.messages
|
|
81
|
+
elif hasattr(state, "get") and callable(state.get):
|
|
82
|
+
try:
|
|
83
|
+
messages = state.get("messages")
|
|
84
|
+
except Exception:
|
|
85
|
+
pass
|
|
86
|
+
|
|
87
|
+
if not messages or not isinstance(messages, (list, tuple)):
|
|
88
|
+
return None
|
|
89
|
+
|
|
90
|
+
normalized_messages = []
|
|
91
|
+
for message in messages:
|
|
92
|
+
try:
|
|
93
|
+
normalized = _normalize_langgraph_message(message)
|
|
94
|
+
if normalized:
|
|
95
|
+
normalized_messages.append(normalized)
|
|
96
|
+
except Exception:
|
|
97
|
+
continue
|
|
98
|
+
|
|
99
|
+
return normalized_messages if normalized_messages else None
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def _wrap_state_graph_compile(f):
|
|
103
|
+
# type: (Callable[..., Any]) -> Callable[..., Any]
|
|
104
|
+
@wraps(f)
|
|
105
|
+
def new_compile(self, *args, **kwargs):
|
|
106
|
+
# type: (Any, Any, Any) -> Any
|
|
107
|
+
integration = sentry_sdk.get_client().get_integration(LanggraphIntegration)
|
|
108
|
+
if integration is None:
|
|
109
|
+
return f(self, *args, **kwargs)
|
|
110
|
+
with sentry_sdk.start_span(
|
|
111
|
+
op=OP.GEN_AI_CREATE_AGENT,
|
|
112
|
+
origin=LanggraphIntegration.origin,
|
|
113
|
+
) as span:
|
|
114
|
+
compiled_graph = f(self, *args, **kwargs)
|
|
115
|
+
|
|
116
|
+
compiled_graph_name = getattr(compiled_graph, "name", None)
|
|
117
|
+
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "create_agent")
|
|
118
|
+
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, compiled_graph_name)
|
|
119
|
+
|
|
120
|
+
if compiled_graph_name:
|
|
121
|
+
span.description = f"create_agent {compiled_graph_name}"
|
|
122
|
+
else:
|
|
123
|
+
span.description = "create_agent"
|
|
124
|
+
|
|
125
|
+
if kwargs.get("model", None) is not None:
|
|
126
|
+
span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, kwargs.get("model"))
|
|
127
|
+
|
|
128
|
+
tools = None
|
|
129
|
+
get_graph = getattr(compiled_graph, "get_graph", None)
|
|
130
|
+
if get_graph and callable(get_graph):
|
|
131
|
+
graph_obj = compiled_graph.get_graph()
|
|
132
|
+
nodes = getattr(graph_obj, "nodes", None)
|
|
133
|
+
if nodes and isinstance(nodes, dict):
|
|
134
|
+
tools_node = nodes.get("tools")
|
|
135
|
+
if tools_node:
|
|
136
|
+
data = getattr(tools_node, "data", None)
|
|
137
|
+
if data and hasattr(data, "tools_by_name"):
|
|
138
|
+
tools = list(data.tools_by_name.keys())
|
|
139
|
+
|
|
140
|
+
if tools is not None:
|
|
141
|
+
span.set_data(SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, tools)
|
|
142
|
+
|
|
143
|
+
return compiled_graph
|
|
144
|
+
|
|
145
|
+
return new_compile
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def _wrap_pregel_invoke(f):
|
|
149
|
+
# type: (Callable[..., Any]) -> Callable[..., Any]
|
|
150
|
+
|
|
151
|
+
@wraps(f)
|
|
152
|
+
def new_invoke(self, *args, **kwargs):
|
|
153
|
+
# type: (Any, Any, Any) -> Any
|
|
154
|
+
integration = sentry_sdk.get_client().get_integration(LanggraphIntegration)
|
|
155
|
+
if integration is None:
|
|
156
|
+
return f(self, *args, **kwargs)
|
|
157
|
+
|
|
158
|
+
graph_name = _get_graph_name(self)
|
|
159
|
+
span_name = (
|
|
160
|
+
f"invoke_agent {graph_name}".strip() if graph_name else "invoke_agent"
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
with sentry_sdk.start_span(
|
|
164
|
+
op=OP.GEN_AI_INVOKE_AGENT,
|
|
165
|
+
name=span_name,
|
|
166
|
+
origin=LanggraphIntegration.origin,
|
|
167
|
+
) as span:
|
|
168
|
+
if graph_name:
|
|
169
|
+
span.set_data(SPANDATA.GEN_AI_PIPELINE_NAME, graph_name)
|
|
170
|
+
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, graph_name)
|
|
171
|
+
|
|
172
|
+
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
|
|
173
|
+
|
|
174
|
+
# Store input messages to later compare with output
|
|
175
|
+
input_messages = None
|
|
176
|
+
if (
|
|
177
|
+
len(args) > 0
|
|
178
|
+
and should_send_default_pii()
|
|
179
|
+
and integration.include_prompts
|
|
180
|
+
):
|
|
181
|
+
input_messages = _parse_langgraph_messages(args[0])
|
|
182
|
+
if input_messages:
|
|
183
|
+
set_data_normalized(
|
|
184
|
+
span,
|
|
185
|
+
SPANDATA.GEN_AI_REQUEST_MESSAGES,
|
|
186
|
+
input_messages,
|
|
187
|
+
unpack=False,
|
|
188
|
+
)
|
|
189
|
+
|
|
190
|
+
result = f(self, *args, **kwargs)
|
|
191
|
+
|
|
192
|
+
_set_response_attributes(span, input_messages, result, integration)
|
|
193
|
+
|
|
194
|
+
return result
|
|
195
|
+
|
|
196
|
+
return new_invoke
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
def _wrap_pregel_ainvoke(f):
|
|
200
|
+
# type: (Callable[..., Any]) -> Callable[..., Any]
|
|
201
|
+
|
|
202
|
+
@wraps(f)
|
|
203
|
+
async def new_ainvoke(self, *args, **kwargs):
|
|
204
|
+
# type: (Any, Any, Any) -> Any
|
|
205
|
+
integration = sentry_sdk.get_client().get_integration(LanggraphIntegration)
|
|
206
|
+
if integration is None:
|
|
207
|
+
return await f(self, *args, **kwargs)
|
|
208
|
+
|
|
209
|
+
graph_name = _get_graph_name(self)
|
|
210
|
+
span_name = (
|
|
211
|
+
f"invoke_agent {graph_name}".strip() if graph_name else "invoke_agent"
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
with sentry_sdk.start_span(
|
|
215
|
+
op=OP.GEN_AI_INVOKE_AGENT,
|
|
216
|
+
name=span_name,
|
|
217
|
+
origin=LanggraphIntegration.origin,
|
|
218
|
+
) as span:
|
|
219
|
+
if graph_name:
|
|
220
|
+
span.set_data(SPANDATA.GEN_AI_PIPELINE_NAME, graph_name)
|
|
221
|
+
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, graph_name)
|
|
222
|
+
|
|
223
|
+
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
|
|
224
|
+
|
|
225
|
+
input_messages = None
|
|
226
|
+
if (
|
|
227
|
+
len(args) > 0
|
|
228
|
+
and should_send_default_pii()
|
|
229
|
+
and integration.include_prompts
|
|
230
|
+
):
|
|
231
|
+
input_messages = _parse_langgraph_messages(args[0])
|
|
232
|
+
if input_messages:
|
|
233
|
+
set_data_normalized(
|
|
234
|
+
span,
|
|
235
|
+
SPANDATA.GEN_AI_REQUEST_MESSAGES,
|
|
236
|
+
input_messages,
|
|
237
|
+
unpack=False,
|
|
238
|
+
)
|
|
239
|
+
|
|
240
|
+
result = await f(self, *args, **kwargs)
|
|
241
|
+
|
|
242
|
+
_set_response_attributes(span, input_messages, result, integration)
|
|
243
|
+
|
|
244
|
+
return result
|
|
245
|
+
|
|
246
|
+
return new_ainvoke
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
def _get_new_messages(input_messages, output_messages):
|
|
250
|
+
# type: (Optional[List[Any]], Optional[List[Any]]) -> Optional[List[Any]]
|
|
251
|
+
"""Extract only the new messages added during this invocation."""
|
|
252
|
+
if not output_messages:
|
|
253
|
+
return None
|
|
254
|
+
|
|
255
|
+
if not input_messages:
|
|
256
|
+
return output_messages
|
|
257
|
+
|
|
258
|
+
# only return the new messages, aka the output messages that are not in the input messages
|
|
259
|
+
input_count = len(input_messages)
|
|
260
|
+
new_messages = (
|
|
261
|
+
output_messages[input_count:] if len(output_messages) > input_count else []
|
|
262
|
+
)
|
|
263
|
+
|
|
264
|
+
return new_messages if new_messages else None
|
|
265
|
+
|
|
266
|
+
|
|
267
|
+
def _extract_llm_response_text(messages):
|
|
268
|
+
# type: (Optional[List[Any]]) -> Optional[str]
|
|
269
|
+
if not messages:
|
|
270
|
+
return None
|
|
271
|
+
|
|
272
|
+
for message in reversed(messages):
|
|
273
|
+
if isinstance(message, dict):
|
|
274
|
+
role = message.get("role")
|
|
275
|
+
if role in ["assistant", "ai"]:
|
|
276
|
+
content = message.get("content")
|
|
277
|
+
if content and isinstance(content, str):
|
|
278
|
+
return content
|
|
279
|
+
|
|
280
|
+
return None
|
|
281
|
+
|
|
282
|
+
|
|
283
|
+
def _extract_tool_calls(messages):
|
|
284
|
+
# type: (Optional[List[Any]]) -> Optional[List[Any]]
|
|
285
|
+
if not messages:
|
|
286
|
+
return None
|
|
287
|
+
|
|
288
|
+
tool_calls = []
|
|
289
|
+
for message in messages:
|
|
290
|
+
if isinstance(message, dict):
|
|
291
|
+
msg_tool_calls = message.get("tool_calls")
|
|
292
|
+
if msg_tool_calls and isinstance(msg_tool_calls, list):
|
|
293
|
+
tool_calls.extend(msg_tool_calls)
|
|
294
|
+
|
|
295
|
+
return tool_calls if tool_calls else None
|
|
296
|
+
|
|
297
|
+
|
|
298
|
+
def _set_response_attributes(span, input_messages, result, integration):
|
|
299
|
+
# type: (Any, Optional[List[Any]], Any, LanggraphIntegration) -> None
|
|
300
|
+
if not (should_send_default_pii() and integration.include_prompts):
|
|
301
|
+
return
|
|
302
|
+
|
|
303
|
+
parsed_response_messages = _parse_langgraph_messages(result)
|
|
304
|
+
new_messages = _get_new_messages(input_messages, parsed_response_messages)
|
|
305
|
+
|
|
306
|
+
llm_response_text = _extract_llm_response_text(new_messages)
|
|
307
|
+
if llm_response_text:
|
|
308
|
+
set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, llm_response_text)
|
|
309
|
+
elif new_messages:
|
|
310
|
+
set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, new_messages)
|
|
311
|
+
else:
|
|
312
|
+
set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, result)
|
|
313
|
+
|
|
314
|
+
tool_calls = _extract_tool_calls(new_messages)
|
|
315
|
+
if tool_calls:
|
|
316
|
+
set_data_normalized(
|
|
317
|
+
span,
|
|
318
|
+
SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS,
|
|
319
|
+
safe_serialize(tool_calls),
|
|
320
|
+
unpack=False,
|
|
321
|
+
)
|
|
@@ -179,7 +179,9 @@ def _set_input_data(span, kwargs, operation, integration):
|
|
|
179
179
|
and should_send_default_pii()
|
|
180
180
|
and integration.include_prompts
|
|
181
181
|
):
|
|
182
|
-
set_data_normalized(
|
|
182
|
+
set_data_normalized(
|
|
183
|
+
span, SPANDATA.GEN_AI_REQUEST_MESSAGES, messages, unpack=False
|
|
184
|
+
)
|
|
183
185
|
|
|
184
186
|
# Input attributes: Common
|
|
185
187
|
set_data_normalized(span, SPANDATA.GEN_AI_SYSTEM, "openai")
|
|
@@ -227,25 +229,46 @@ def _set_output_data(span, response, kwargs, integration, finish_span=True):
|
|
|
227
229
|
if should_send_default_pii() and integration.include_prompts:
|
|
228
230
|
response_text = [choice.message.dict() for choice in response.choices]
|
|
229
231
|
if len(response_text) > 0:
|
|
230
|
-
set_data_normalized(
|
|
231
|
-
|
|
232
|
-
SPANDATA.GEN_AI_RESPONSE_TEXT,
|
|
233
|
-
safe_serialize(response_text),
|
|
234
|
-
)
|
|
232
|
+
set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, response_text)
|
|
233
|
+
|
|
235
234
|
_calculate_token_usage(messages, response, span, None, integration.count_tokens)
|
|
235
|
+
|
|
236
236
|
if finish_span:
|
|
237
237
|
span.__exit__(None, None, None)
|
|
238
238
|
|
|
239
239
|
elif hasattr(response, "output"):
|
|
240
240
|
if should_send_default_pii() and integration.include_prompts:
|
|
241
|
-
|
|
242
|
-
|
|
241
|
+
output_messages = {
|
|
242
|
+
"response": [],
|
|
243
|
+
"tool": [],
|
|
244
|
+
} # type: (dict[str, list[Any]])
|
|
245
|
+
|
|
246
|
+
for output in response.output:
|
|
247
|
+
if output.type == "function_call":
|
|
248
|
+
output_messages["tool"].append(output.dict())
|
|
249
|
+
elif output.type == "message":
|
|
250
|
+
for output_message in output.content:
|
|
251
|
+
try:
|
|
252
|
+
output_messages["response"].append(output_message.text)
|
|
253
|
+
except AttributeError:
|
|
254
|
+
# Unknown output message type, just return the json
|
|
255
|
+
output_messages["response"].append(output_message.dict())
|
|
256
|
+
|
|
257
|
+
if len(output_messages["tool"]) > 0:
|
|
243
258
|
set_data_normalized(
|
|
244
259
|
span,
|
|
245
|
-
SPANDATA.
|
|
246
|
-
|
|
260
|
+
SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS,
|
|
261
|
+
output_messages["tool"],
|
|
262
|
+
unpack=False,
|
|
263
|
+
)
|
|
264
|
+
|
|
265
|
+
if len(output_messages["response"]) > 0:
|
|
266
|
+
set_data_normalized(
|
|
267
|
+
span, SPANDATA.GEN_AI_RESPONSE_TEXT, output_messages["response"]
|
|
247
268
|
)
|
|
269
|
+
|
|
248
270
|
_calculate_token_usage(messages, response, span, None, integration.count_tokens)
|
|
271
|
+
|
|
249
272
|
if finish_span:
|
|
250
273
|
span.__exit__(None, None, None)
|
|
251
274
|
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import sentry_sdk
|
|
2
|
+
from sentry_sdk.ai.utils import set_data_normalized
|
|
2
3
|
from sentry_sdk.consts import SPANDATA
|
|
3
4
|
from sentry_sdk.integrations import DidNotEnable
|
|
4
5
|
from sentry_sdk.scope import should_send_default_pii
|
|
@@ -127,7 +128,9 @@ def _set_input_data(span, get_response_kwargs):
|
|
|
127
128
|
if len(messages) > 0:
|
|
128
129
|
request_messages.append({"role": role, "content": messages})
|
|
129
130
|
|
|
130
|
-
|
|
131
|
+
set_data_normalized(
|
|
132
|
+
span, SPANDATA.GEN_AI_REQUEST_MESSAGES, request_messages, unpack=False
|
|
133
|
+
)
|
|
131
134
|
|
|
132
135
|
|
|
133
136
|
def _set_output_data(span, result):
|
|
@@ -157,6 +160,6 @@ def _set_output_data(span, result):
|
|
|
157
160
|
)
|
|
158
161
|
|
|
159
162
|
if len(output_messages["response"]) > 0:
|
|
160
|
-
|
|
161
|
-
SPANDATA.GEN_AI_RESPONSE_TEXT,
|
|
163
|
+
set_data_normalized(
|
|
164
|
+
span, SPANDATA.GEN_AI_RESPONSE_TEXT, output_messages["response"]
|
|
162
165
|
)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: sentry-sdk
|
|
3
|
-
Version: 2.
|
|
3
|
+
Version: 2.37.0
|
|
4
4
|
Summary: Python client for Sentry (https://sentry.io)
|
|
5
5
|
Home-page: https://github.com/getsentry/sentry-python
|
|
6
6
|
Author: Sentry Team and Contributors
|
|
@@ -72,6 +72,8 @@ Provides-Extra: huggingface-hub
|
|
|
72
72
|
Requires-Dist: huggingface_hub>=0.22; extra == "huggingface-hub"
|
|
73
73
|
Provides-Extra: langchain
|
|
74
74
|
Requires-Dist: langchain>=0.0.210; extra == "langchain"
|
|
75
|
+
Provides-Extra: langgraph
|
|
76
|
+
Requires-Dist: langgraph>=0.6.6; extra == "langgraph"
|
|
75
77
|
Provides-Extra: launchdarkly
|
|
76
78
|
Requires-Dist: launchdarkly-server-sdk>=9.8.0; extra == "launchdarkly"
|
|
77
79
|
Provides-Extra: litestar
|
|
@@ -9,7 +9,7 @@ sentry_sdk/_werkzeug.py,sha256=m3GPf-jHd8v3eVOfBHaKw5f0uHoLkXrSO1EcY-8EisY,3734
|
|
|
9
9
|
sentry_sdk/api.py,sha256=OkwQ2tA5YASJ77wLOteUdv_woPF4wL_JTOAMxe9z8k4,15282
|
|
10
10
|
sentry_sdk/attachments.py,sha256=0Dylhm065O6hNFjB40fWCd5Hg4qWSXndmi1TPWglZkI,3109
|
|
11
11
|
sentry_sdk/client.py,sha256=oQcolwFdLvuX4huUaCcpgABy3M5Yb4IhzymlzyrqfkE,38860
|
|
12
|
-
sentry_sdk/consts.py,sha256=
|
|
12
|
+
sentry_sdk/consts.py,sha256=zikq23bebhFyfrsj65Yi8y_7VSP_HMiyXFdWtbM0zUA,49815
|
|
13
13
|
sentry_sdk/debug.py,sha256=ddBehQlAuQC1sg1XO-N4N3diZ0x0iT5RWJwFdrtcsjw,1019
|
|
14
14
|
sentry_sdk/envelope.py,sha256=Mgcib0uLm_5tSVzOrznRLdK9B3CjQ6TEgM1ZIZIfjWo,10355
|
|
15
15
|
sentry_sdk/feature_flags.py,sha256=99JRig6TBkrkBzVCKqYcmVgjsuA_Hk-ul7jFHGhJplc,2233
|
|
@@ -32,12 +32,12 @@ sentry_sdk/utils.py,sha256=Ys7lnnvXZMIR9dcoT30CVxpUx2NnZifSy-TUNrCtMQA,61575
|
|
|
32
32
|
sentry_sdk/worker.py,sha256=VSMaigRMbInVyupSFpBC42bft2oIViea-0C_d9ThnIo,4464
|
|
33
33
|
sentry_sdk/ai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
34
34
|
sentry_sdk/ai/monitoring.py,sha256=bS_KneWCAL9ehml5XiyficoPVx4DUUG6acbH3cjP3I8,5057
|
|
35
|
-
sentry_sdk/ai/utils.py,sha256=
|
|
35
|
+
sentry_sdk/ai/utils.py,sha256=JPK6EM0ZcjWJU1IP2k40Jxf0yD_-ox_FlYxVIn7LVR4,1351
|
|
36
36
|
sentry_sdk/crons/__init__.py,sha256=3Zt6g1-pZZ12uRKKsC8QLm3XgJ4K1VYxgVpNNUygOZY,221
|
|
37
37
|
sentry_sdk/crons/api.py,sha256=mk-UB8Im2LU2rJFdE-TV302EaKnf8kAjwEL0bIV0Hzc,1767
|
|
38
38
|
sentry_sdk/crons/consts.py,sha256=dXqJk5meBSu5rjlGpqAOlkpACnuUi7svQnAFoy1ZNUU,87
|
|
39
39
|
sentry_sdk/crons/decorator.py,sha256=UrjeIqBCbvsuKrfjGkKJbbLBvjw2TQvDWcTO7WwAmrI,3913
|
|
40
|
-
sentry_sdk/integrations/__init__.py,sha256=
|
|
40
|
+
sentry_sdk/integrations/__init__.py,sha256=NNRTFZUcQacKGRCWT8IuVZk2wtifEhnQ34-6mHuekvw,10339
|
|
41
41
|
sentry_sdk/integrations/_asgi_common.py,sha256=Ypg7IctB3iPPY60ebVlzChzgT8GeGpZ0YH8VvJNDlEY,3187
|
|
42
42
|
sentry_sdk/integrations/_wsgi_common.py,sha256=A1-X7l1pZCcrbUhRHkmdKiK_EemEZjn7xToJIvlEuFM,7558
|
|
43
43
|
sentry_sdk/integrations/aiohttp.py,sha256=_rfDKx1arvVQwcC20vh7HG80p8XtgzqKB3iBuPYZy8A,12895
|
|
@@ -71,13 +71,14 @@ sentry_sdk/integrations/graphene.py,sha256=I6ZJ8Apd9dO9XPVvZY7I46-v1eXOW1C1rAkWw
|
|
|
71
71
|
sentry_sdk/integrations/httpx.py,sha256=WwUulqzBLoGGqWUUdQg_MThwQUKzBXnA-m3g_1GOpCE,5866
|
|
72
72
|
sentry_sdk/integrations/huey.py,sha256=wlyxjeWqqJp1X5S3neD5FiZjXcyznm1dl8_u1wIo76U,5443
|
|
73
73
|
sentry_sdk/integrations/huggingface_hub.py,sha256=ypTn17T0vufQwi7ODXONFkB8fMjUrU5b4Q6JZ34bnA4,6717
|
|
74
|
-
sentry_sdk/integrations/langchain.py,sha256=
|
|
74
|
+
sentry_sdk/integrations/langchain.py,sha256=hWni8-sy8gu4mk10RE2yM92XdQUFGI9gochaK0jMlRg,29209
|
|
75
|
+
sentry_sdk/integrations/langgraph.py,sha256=YyDDc14gFCNVuqVmKwX8GRQ17T17WOx2SqqD4IHROPs,11015
|
|
75
76
|
sentry_sdk/integrations/launchdarkly.py,sha256=bvtExuj68xPXZFsQeWTDR-ZBqP087tPuVzP1bNAOZHc,1935
|
|
76
77
|
sentry_sdk/integrations/litestar.py,sha256=jao0f8v5JQagkBg15dUJTdWGPxpS3LmOV301-lwGkGc,11815
|
|
77
78
|
sentry_sdk/integrations/logging.py,sha256=4JC2ehLqd5Tz_rad8YVb9KhZnPcDzLxLh-AjopyNVEc,13905
|
|
78
79
|
sentry_sdk/integrations/loguru.py,sha256=fgivPdQn3rmsMeInUd2wbNlbXPAH9MKhjaqytRVKnsI,6215
|
|
79
80
|
sentry_sdk/integrations/modules.py,sha256=vzLx3Erg77Vl4mnUvAgTg_3teAuWy7zylFpAidBI9I0,820
|
|
80
|
-
sentry_sdk/integrations/openai.py,sha256=
|
|
81
|
+
sentry_sdk/integrations/openai.py,sha256=MtvS7FM41etRn5kPqb37qSflo876L8mcV2skEFwBFlI,23890
|
|
81
82
|
sentry_sdk/integrations/openfeature.py,sha256=-vvdrN4fK0Xhu2ip41bkPIPEqdzv8xzmLu9wRlI2xPA,1131
|
|
82
83
|
sentry_sdk/integrations/pure_eval.py,sha256=OvT76XvllQ_J6ABu3jVNU6KD2QAxnXMtTZ7hqhXNhpY,4581
|
|
83
84
|
sentry_sdk/integrations/pymongo.py,sha256=cPpMGEbXHlV6HTHgmIDL1F-x3w7ZMROXVb4eUhLs3bw,6380
|
|
@@ -123,7 +124,7 @@ sentry_sdk/integrations/grpc/aio/client.py,sha256=csOwlJb7fg9fBnzeNHxr-qpZEmU97I
|
|
|
123
124
|
sentry_sdk/integrations/grpc/aio/server.py,sha256=SCkdikPZRdWyrlnZewsSGpPk4v6AsdSApVAbO-lf_Lk,4019
|
|
124
125
|
sentry_sdk/integrations/openai_agents/__init__.py,sha256=-ydqG0sFIrvJlT9JHO58EZpCAzyy9J59Av8dxn0fHuw,1424
|
|
125
126
|
sentry_sdk/integrations/openai_agents/consts.py,sha256=PTb3vlqkuMPktu21ALK72o5WMIX4-cewTEiTRdHKFdQ,38
|
|
126
|
-
sentry_sdk/integrations/openai_agents/utils.py,sha256=
|
|
127
|
+
sentry_sdk/integrations/openai_agents/utils.py,sha256=7OiJc2BW9yvuVZBd8o7A6sWq0jy2ri9dWwvqMjJG2cw,5571
|
|
127
128
|
sentry_sdk/integrations/openai_agents/patches/__init__.py,sha256=I7C9JZ70Mf8PV3wPdFsxTqvcYl4TYUgSZYfNU2Spb7Y,231
|
|
128
129
|
sentry_sdk/integrations/openai_agents/patches/agent_run.py,sha256=qPmZ--UMQpExxYGEbaDJ7tY_H9VQ6gv0lpim3_impWk,5733
|
|
129
130
|
sentry_sdk/integrations/openai_agents/patches/models.py,sha256=DtwqCmSsYFlhRZquKM2jiTOnnAg97eyCTtJYZkWqdww,1405
|
|
@@ -159,9 +160,9 @@ sentry_sdk/profiler/__init__.py,sha256=3PI3bHk9RSkkOXZKN84DDedk_7M65EiqqaIGo-DYs
|
|
|
159
160
|
sentry_sdk/profiler/continuous_profiler.py,sha256=s0DHkj3RZYRg9HnQQC0G44ku6DaFqRy30fZTMtTYvIs,22828
|
|
160
161
|
sentry_sdk/profiler/transaction_profiler.py,sha256=e3MsUqs-YIp6-nmzpmBYGoWWIF7RyuSGu24Dj-8GXAU,27970
|
|
161
162
|
sentry_sdk/profiler/utils.py,sha256=G5s4tYai9ATJqcHrQ3bOIxlK6jIaHzELrDtU5k3N4HI,6556
|
|
162
|
-
sentry_sdk-2.
|
|
163
|
-
sentry_sdk-2.
|
|
164
|
-
sentry_sdk-2.
|
|
165
|
-
sentry_sdk-2.
|
|
166
|
-
sentry_sdk-2.
|
|
167
|
-
sentry_sdk-2.
|
|
163
|
+
sentry_sdk-2.37.0.dist-info/licenses/LICENSE,sha256=KhQNZg9GKBL6KQvHQNBGMxJsXsRdhLebVp4Sew7t3Qs,1093
|
|
164
|
+
sentry_sdk-2.37.0.dist-info/METADATA,sha256=fo-BB6ui6_ktMw8ywCcwBe9RUT78Bzv4rd1DSu-ZUEw,10358
|
|
165
|
+
sentry_sdk-2.37.0.dist-info/WHEEL,sha256=JNWh1Fm1UdwIQV075glCn4MVuCRs0sotJIq-J6rbxCU,109
|
|
166
|
+
sentry_sdk-2.37.0.dist-info/entry_points.txt,sha256=qacZEz40UspQZD1IukCXykx0JtImqGDOctS5KfOLTko,91
|
|
167
|
+
sentry_sdk-2.37.0.dist-info/top_level.txt,sha256=XrQz30XE9FKXSY_yGLrd9bsv2Rk390GTDJOSujYaMxI,11
|
|
168
|
+
sentry_sdk-2.37.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|