sentry-sdk 2.42.0__py2.py3-none-any.whl → 2.43.0__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of sentry-sdk might be problematic. Click here for more details.

Files changed (46) hide show
  1. sentry_sdk/__init__.py +2 -0
  2. sentry_sdk/_metrics_batcher.py +1 -1
  3. sentry_sdk/ai/utils.py +49 -2
  4. sentry_sdk/client.py +18 -1
  5. sentry_sdk/consts.py +87 -2
  6. sentry_sdk/integrations/__init__.py +2 -0
  7. sentry_sdk/integrations/anthropic.py +8 -5
  8. sentry_sdk/integrations/aws_lambda.py +2 -0
  9. sentry_sdk/integrations/django/caching.py +16 -3
  10. sentry_sdk/integrations/gcp.py +6 -1
  11. sentry_sdk/integrations/google_genai/__init__.py +3 -0
  12. sentry_sdk/integrations/google_genai/utils.py +16 -6
  13. sentry_sdk/integrations/langchain.py +49 -23
  14. sentry_sdk/integrations/langgraph.py +25 -11
  15. sentry_sdk/integrations/litellm.py +17 -6
  16. sentry_sdk/integrations/mcp.py +552 -0
  17. sentry_sdk/integrations/openai.py +33 -9
  18. sentry_sdk/integrations/openai_agents/__init__.py +2 -0
  19. sentry_sdk/integrations/openai_agents/patches/__init__.py +1 -0
  20. sentry_sdk/integrations/openai_agents/patches/error_tracing.py +77 -0
  21. sentry_sdk/integrations/pydantic_ai/__init__.py +47 -0
  22. sentry_sdk/integrations/pydantic_ai/consts.py +1 -0
  23. sentry_sdk/integrations/pydantic_ai/patches/__init__.py +4 -0
  24. sentry_sdk/integrations/pydantic_ai/patches/agent_run.py +217 -0
  25. sentry_sdk/integrations/pydantic_ai/patches/graph_nodes.py +105 -0
  26. sentry_sdk/integrations/pydantic_ai/patches/model_request.py +35 -0
  27. sentry_sdk/integrations/pydantic_ai/patches/tools.py +75 -0
  28. sentry_sdk/integrations/pydantic_ai/spans/__init__.py +3 -0
  29. sentry_sdk/integrations/pydantic_ai/spans/ai_client.py +253 -0
  30. sentry_sdk/integrations/pydantic_ai/spans/execute_tool.py +49 -0
  31. sentry_sdk/integrations/pydantic_ai/spans/invoke_agent.py +112 -0
  32. sentry_sdk/integrations/pydantic_ai/utils.py +175 -0
  33. sentry_sdk/integrations/redis/utils.py +4 -4
  34. sentry_sdk/integrations/starlette.py +1 -1
  35. sentry_sdk/integrations/strawberry.py +10 -9
  36. sentry_sdk/logger.py +14 -2
  37. sentry_sdk/scope.py +13 -6
  38. sentry_sdk/tracing_utils.py +1 -1
  39. sentry_sdk/utils.py +34 -2
  40. {sentry_sdk-2.42.0.dist-info → sentry_sdk-2.43.0.dist-info}/METADATA +6 -1
  41. {sentry_sdk-2.42.0.dist-info → sentry_sdk-2.43.0.dist-info}/RECORD +46 -32
  42. /sentry_sdk/{_metrics.py → metrics.py} +0 -0
  43. {sentry_sdk-2.42.0.dist-info → sentry_sdk-2.43.0.dist-info}/WHEEL +0 -0
  44. {sentry_sdk-2.42.0.dist-info → sentry_sdk-2.43.0.dist-info}/entry_points.txt +0 -0
  45. {sentry_sdk-2.42.0.dist-info → sentry_sdk-2.43.0.dist-info}/licenses/LICENSE +0 -0
  46. {sentry_sdk-2.42.0.dist-info → sentry_sdk-2.43.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,77 @@
1
+ from functools import wraps
2
+
3
+ import sentry_sdk
4
+ from sentry_sdk.consts import SPANSTATUS
5
+ from sentry_sdk.tracing_utils import set_span_errored
6
+
7
+ from typing import TYPE_CHECKING
8
+
9
+ if TYPE_CHECKING:
10
+ from typing import Any, Callable, Optional
11
+
12
+
13
+ def _patch_error_tracing():
14
+ # type: () -> None
15
+ """
16
+ Patches agents error tracing function to inject our span error logic
17
+ when a tool execution fails.
18
+
19
+ In newer versions, the function is at: agents.util._error_tracing.attach_error_to_current_span
20
+ In older versions, it was at: agents._utils.attach_error_to_current_span
21
+
22
+ This works even when the module or function doesn't exist.
23
+ """
24
+ error_tracing_module = None
25
+
26
+ # Try newer location first (agents.util._error_tracing)
27
+ try:
28
+ from agents.util import _error_tracing
29
+
30
+ error_tracing_module = _error_tracing
31
+ except (ImportError, AttributeError):
32
+ pass
33
+
34
+ # Try older location (agents._utils)
35
+ if error_tracing_module is None:
36
+ try:
37
+ import agents._utils
38
+
39
+ error_tracing_module = agents._utils
40
+ except (ImportError, AttributeError):
41
+ # Module doesn't exist in either location, nothing to patch
42
+ return
43
+
44
+ # Check if the function exists
45
+ if not hasattr(error_tracing_module, "attach_error_to_current_span"):
46
+ return
47
+
48
+ original_attach_error = error_tracing_module.attach_error_to_current_span
49
+
50
+ @wraps(original_attach_error)
51
+ def sentry_attach_error_to_current_span(error, *args, **kwargs):
52
+ # type: (Any, *Any, **Any) -> Any
53
+ """
54
+ Wraps agents' error attachment to also set Sentry span status to error.
55
+ This allows us to properly track tool execution errors even though
56
+ the agents library swallows exceptions.
57
+ """
58
+ # Set the current Sentry span to errored
59
+ current_span = sentry_sdk.get_current_span()
60
+ if current_span is not None:
61
+ set_span_errored(current_span)
62
+ current_span.set_data("span.status", "error")
63
+
64
+ # Optionally capture the error details if we have them
65
+ if hasattr(error, "__class__"):
66
+ current_span.set_data("error.type", error.__class__.__name__)
67
+ if hasattr(error, "__str__"):
68
+ error_message = str(error)
69
+ if error_message:
70
+ current_span.set_data("error.message", error_message)
71
+
72
+ # Call the original function
73
+ return original_attach_error(error, *args, **kwargs)
74
+
75
+ error_tracing_module.attach_error_to_current_span = (
76
+ sentry_attach_error_to_current_span
77
+ )
@@ -0,0 +1,47 @@
1
+ from sentry_sdk.integrations import DidNotEnable, Integration
2
+
3
+
4
+ try:
5
+ import pydantic_ai # type: ignore
6
+ except ImportError:
7
+ raise DidNotEnable("pydantic-ai not installed")
8
+
9
+
10
+ from .patches import (
11
+ _patch_agent_run,
12
+ _patch_graph_nodes,
13
+ _patch_model_request,
14
+ _patch_tool_execution,
15
+ )
16
+
17
+
18
+ class PydanticAIIntegration(Integration):
19
+ identifier = "pydantic_ai"
20
+ origin = f"auto.ai.{identifier}"
21
+
22
+ def __init__(self, include_prompts=True):
23
+ # type: (bool) -> None
24
+ """
25
+ Initialize the Pydantic AI integration.
26
+
27
+ Args:
28
+ include_prompts: Whether to include prompts and messages in span data.
29
+ Requires send_default_pii=True. Defaults to True.
30
+ """
31
+ self.include_prompts = include_prompts
32
+
33
+ @staticmethod
34
+ def setup_once():
35
+ # type: () -> None
36
+ """
37
+ Set up the pydantic-ai integration.
38
+
39
+ This patches the key methods in pydantic-ai to create Sentry spans for:
40
+ - Agent invocations (Agent.run methods)
41
+ - Model requests (AI client calls)
42
+ - Tool executions
43
+ """
44
+ _patch_agent_run()
45
+ _patch_graph_nodes()
46
+ _patch_model_request()
47
+ _patch_tool_execution()
@@ -0,0 +1 @@
1
+ SPAN_ORIGIN = "auto.ai.pydantic_ai"
@@ -0,0 +1,4 @@
1
+ from .agent_run import _patch_agent_run # noqa: F401
2
+ from .graph_nodes import _patch_graph_nodes # noqa: F401
3
+ from .model_request import _patch_model_request # noqa: F401
4
+ from .tools import _patch_tool_execution # noqa: F401
@@ -0,0 +1,217 @@
1
+ from functools import wraps
2
+
3
+ import sentry_sdk
4
+ from sentry_sdk.tracing_utils import set_span_errored
5
+ from sentry_sdk.utils import event_from_exception
6
+
7
+ from ..spans import invoke_agent_span, update_invoke_agent_span
8
+
9
+ from typing import TYPE_CHECKING
10
+ from pydantic_ai.agent import Agent # type: ignore
11
+
12
+ if TYPE_CHECKING:
13
+ from typing import Any, Callable, Optional
14
+
15
+
16
+ def _capture_exception(exc):
17
+ # type: (Any) -> None
18
+ set_span_errored()
19
+
20
+ event, hint = event_from_exception(
21
+ exc,
22
+ client_options=sentry_sdk.get_client().options,
23
+ mechanism={"type": "pydantic_ai", "handled": False},
24
+ )
25
+ sentry_sdk.capture_event(event, hint=hint)
26
+
27
+
28
+ class _StreamingContextManagerWrapper:
29
+ """Wrapper for streaming methods that return async context managers."""
30
+
31
+ def __init__(
32
+ self,
33
+ agent,
34
+ original_ctx_manager,
35
+ user_prompt,
36
+ model,
37
+ model_settings,
38
+ is_streaming=True,
39
+ ):
40
+ # type: (Any, Any, Any, Any, Any, bool) -> None
41
+ self.agent = agent
42
+ self.original_ctx_manager = original_ctx_manager
43
+ self.user_prompt = user_prompt
44
+ self.model = model
45
+ self.model_settings = model_settings
46
+ self.is_streaming = is_streaming
47
+ self._isolation_scope = None # type: Any
48
+ self._span = None # type: Optional[sentry_sdk.tracing.Span]
49
+ self._result = None # type: Any
50
+
51
+ async def __aenter__(self):
52
+ # type: () -> Any
53
+ # Set up isolation scope and invoke_agent span
54
+ self._isolation_scope = sentry_sdk.isolation_scope()
55
+ self._isolation_scope.__enter__()
56
+
57
+ # Store agent reference and streaming flag
58
+ sentry_sdk.get_current_scope().set_context(
59
+ "pydantic_ai_agent", {"_agent": self.agent, "_streaming": self.is_streaming}
60
+ )
61
+
62
+ # Create invoke_agent span (will be closed in __aexit__)
63
+ self._span = invoke_agent_span(
64
+ self.user_prompt, self.agent, self.model, self.model_settings
65
+ )
66
+ self._span.__enter__()
67
+
68
+ # Enter the original context manager
69
+ result = await self.original_ctx_manager.__aenter__()
70
+ self._result = result
71
+ return result
72
+
73
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
74
+ # type: (Any, Any, Any) -> None
75
+ try:
76
+ # Exit the original context manager first
77
+ await self.original_ctx_manager.__aexit__(exc_type, exc_val, exc_tb)
78
+
79
+ # Update span with output if successful
80
+ if exc_type is None and self._result and hasattr(self._result, "output"):
81
+ output = (
82
+ self._result.output if hasattr(self._result, "output") else None
83
+ )
84
+ if self._span is not None:
85
+ update_invoke_agent_span(self._span, output)
86
+ finally:
87
+ sentry_sdk.get_current_scope().remove_context("pydantic_ai_agent")
88
+ # Clean up invoke span
89
+ if self._span:
90
+ self._span.__exit__(exc_type, exc_val, exc_tb)
91
+
92
+ # Clean up isolation scope
93
+ if self._isolation_scope:
94
+ self._isolation_scope.__exit__(exc_type, exc_val, exc_tb)
95
+
96
+
97
+ def _create_run_wrapper(original_func, is_streaming=False):
98
+ # type: (Callable[..., Any], bool) -> Callable[..., Any]
99
+ """
100
+ Wraps the Agent.run method to create an invoke_agent span.
101
+
102
+ Args:
103
+ original_func: The original run method
104
+ is_streaming: Whether this is a streaming method (for future use)
105
+ """
106
+
107
+ @wraps(original_func)
108
+ async def wrapper(self, *args, **kwargs):
109
+ # type: (Any, *Any, **Any) -> Any
110
+ # Isolate each workflow so that when agents are run in asyncio tasks they
111
+ # don't touch each other's scopes
112
+ with sentry_sdk.isolation_scope():
113
+ # Store agent reference and streaming flag in Sentry scope for access in nested spans
114
+ # We store the full agent to allow access to tools and system prompts
115
+ sentry_sdk.get_current_scope().set_context(
116
+ "pydantic_ai_agent", {"_agent": self, "_streaming": is_streaming}
117
+ )
118
+
119
+ # Extract parameters for the span
120
+ user_prompt = kwargs.get("user_prompt") or (args[0] if args else None)
121
+ model = kwargs.get("model")
122
+ model_settings = kwargs.get("model_settings")
123
+
124
+ # Create invoke_agent span
125
+ with invoke_agent_span(user_prompt, self, model, model_settings) as span:
126
+ try:
127
+ result = await original_func(self, *args, **kwargs)
128
+
129
+ # Update span with output
130
+ output = result.output if hasattr(result, "output") else None
131
+ update_invoke_agent_span(span, output)
132
+
133
+ return result
134
+ except Exception as exc:
135
+ _capture_exception(exc)
136
+ raise exc from None
137
+ finally:
138
+ sentry_sdk.get_current_scope().remove_context("pydantic_ai_agent")
139
+
140
+ return wrapper
141
+
142
+
143
+ def _create_streaming_wrapper(original_func):
144
+ # type: (Callable[..., Any]) -> Callable[..., Any]
145
+ """
146
+ Wraps run_stream method that returns an async context manager.
147
+ """
148
+
149
+ @wraps(original_func)
150
+ def wrapper(self, *args, **kwargs):
151
+ # type: (Any, *Any, **Any) -> Any
152
+ # Extract parameters for the span
153
+ user_prompt = kwargs.get("user_prompt") or (args[0] if args else None)
154
+ model = kwargs.get("model")
155
+ model_settings = kwargs.get("model_settings")
156
+
157
+ # Call original function to get the context manager
158
+ original_ctx_manager = original_func(self, *args, **kwargs)
159
+
160
+ # Wrap it with our instrumentation
161
+ return _StreamingContextManagerWrapper(
162
+ agent=self,
163
+ original_ctx_manager=original_ctx_manager,
164
+ user_prompt=user_prompt,
165
+ model=model,
166
+ model_settings=model_settings,
167
+ is_streaming=True,
168
+ )
169
+
170
+ return wrapper
171
+
172
+
173
+ def _create_streaming_events_wrapper(original_func):
174
+ # type: (Callable[..., Any]) -> Callable[..., Any]
175
+ """
176
+ Wraps run_stream_events method - no span needed as it delegates to run().
177
+
178
+ Note: run_stream_events internally calls self.run() with an event_stream_handler,
179
+ so the invoke_agent span will be created by the run() wrapper.
180
+ """
181
+
182
+ @wraps(original_func)
183
+ async def wrapper(self, *args, **kwargs):
184
+ # type: (Any, *Any, **Any) -> Any
185
+ # Just call the original generator - it will call run() which has the instrumentation
186
+ try:
187
+ async for event in original_func(self, *args, **kwargs):
188
+ yield event
189
+ except Exception as exc:
190
+ _capture_exception(exc)
191
+ raise exc from None
192
+
193
+ return wrapper
194
+
195
+
196
+ def _patch_agent_run():
197
+ # type: () -> None
198
+ """
199
+ Patches the Agent run methods to create spans for agent execution.
200
+
201
+ This patches both non-streaming (run, run_sync) and streaming
202
+ (run_stream, run_stream_events) methods.
203
+ """
204
+
205
+ # Store original methods
206
+ original_run = Agent.run
207
+ original_run_stream = Agent.run_stream
208
+ original_run_stream_events = Agent.run_stream_events
209
+
210
+ # Wrap and apply patches for non-streaming methods
211
+ Agent.run = _create_run_wrapper(original_run, is_streaming=False)
212
+
213
+ # Wrap and apply patches for streaming methods
214
+ Agent.run_stream = _create_streaming_wrapper(original_run_stream)
215
+ Agent.run_stream_events = _create_streaming_events_wrapper(
216
+ original_run_stream_events
217
+ )
@@ -0,0 +1,105 @@
1
+ from contextlib import asynccontextmanager
2
+ from functools import wraps
3
+
4
+ import sentry_sdk
5
+
6
+ from ..spans import (
7
+ ai_client_span,
8
+ update_ai_client_span,
9
+ )
10
+ from pydantic_ai._agent_graph import ModelRequestNode # type: ignore
11
+
12
+ from typing import TYPE_CHECKING
13
+
14
+ if TYPE_CHECKING:
15
+ from typing import Any, Callable
16
+
17
+
18
+ def _extract_span_data(node, ctx):
19
+ # type: (Any, Any) -> tuple[list[Any], Any, Any]
20
+ """Extract common data needed for creating chat spans.
21
+
22
+ Returns:
23
+ Tuple of (messages, model, model_settings)
24
+ """
25
+ # Extract model and settings from context
26
+ model = None
27
+ model_settings = None
28
+ if hasattr(ctx, "deps"):
29
+ model = getattr(ctx.deps, "model", None)
30
+ model_settings = getattr(ctx.deps, "model_settings", None)
31
+
32
+ # Build full message list: history + current request
33
+ messages = []
34
+ if hasattr(ctx, "state") and hasattr(ctx.state, "message_history"):
35
+ messages.extend(ctx.state.message_history)
36
+
37
+ current_request = getattr(node, "request", None)
38
+ if current_request:
39
+ messages.append(current_request)
40
+
41
+ return messages, model, model_settings
42
+
43
+
44
+ def _patch_graph_nodes():
45
+ # type: () -> None
46
+ """
47
+ Patches the graph node execution to create appropriate spans.
48
+
49
+ ModelRequestNode -> Creates ai_client span for model requests
50
+ CallToolsNode -> Handles tool calls (spans created in tool patching)
51
+ """
52
+
53
+ # Patch ModelRequestNode to create ai_client spans
54
+ original_model_request_run = ModelRequestNode.run
55
+
56
+ @wraps(original_model_request_run)
57
+ async def wrapped_model_request_run(self, ctx):
58
+ # type: (Any, Any) -> Any
59
+ messages, model, model_settings = _extract_span_data(self, ctx)
60
+
61
+ with ai_client_span(messages, None, model, model_settings) as span:
62
+ result = await original_model_request_run(self, ctx)
63
+
64
+ # Extract response from result if available
65
+ model_response = None
66
+ if hasattr(result, "model_response"):
67
+ model_response = result.model_response
68
+
69
+ update_ai_client_span(span, model_response)
70
+ return result
71
+
72
+ ModelRequestNode.run = wrapped_model_request_run
73
+
74
+ # Patch ModelRequestNode.stream for streaming requests
75
+ original_model_request_stream = ModelRequestNode.stream
76
+
77
+ def create_wrapped_stream(original_stream_method):
78
+ # type: (Callable[..., Any]) -> Callable[..., Any]
79
+ """Create a wrapper for ModelRequestNode.stream that creates chat spans."""
80
+
81
+ @asynccontextmanager
82
+ @wraps(original_stream_method)
83
+ async def wrapped_model_request_stream(self, ctx):
84
+ # type: (Any, Any) -> Any
85
+ messages, model, model_settings = _extract_span_data(self, ctx)
86
+
87
+ # Create chat span for streaming request
88
+ with ai_client_span(messages, None, model, model_settings) as span:
89
+ # Call the original stream method
90
+ async with original_stream_method(self, ctx) as stream:
91
+ yield stream
92
+
93
+ # After streaming completes, update span with response data
94
+ # The ModelRequestNode stores the final response in _result
95
+ model_response = None
96
+ if hasattr(self, "_result") and self._result is not None:
97
+ # _result is a NextNode containing the model_response
98
+ if hasattr(self._result, "model_response"):
99
+ model_response = self._result.model_response
100
+
101
+ update_ai_client_span(span, model_response)
102
+
103
+ return wrapped_model_request_stream
104
+
105
+ ModelRequestNode.stream = create_wrapped_stream(original_model_request_stream)
@@ -0,0 +1,35 @@
1
+ from functools import wraps
2
+ from typing import TYPE_CHECKING
3
+
4
+ from pydantic_ai import models # type: ignore
5
+
6
+ from ..spans import ai_client_span, update_ai_client_span
7
+
8
+
9
+ if TYPE_CHECKING:
10
+ from typing import Any
11
+
12
+
13
+ def _patch_model_request():
14
+ # type: () -> None
15
+ """
16
+ Patches model request execution to create AI client spans.
17
+
18
+ In pydantic-ai, model requests are handled through the Model interface.
19
+ We need to patch the request method on models to create spans.
20
+ """
21
+
22
+ # Patch the base Model class's request method
23
+ if hasattr(models, "Model"):
24
+ original_request = models.Model.request
25
+
26
+ @wraps(original_request)
27
+ async def wrapped_request(self, messages, *args, **kwargs):
28
+ # type: (Any, Any, *Any, **Any) -> Any
29
+ # Pass all messages (full conversation history)
30
+ with ai_client_span(messages, None, self, None) as span:
31
+ result = await original_request(self, messages, *args, **kwargs)
32
+ update_ai_client_span(span, result)
33
+ return result
34
+
35
+ models.Model.request = wrapped_request
@@ -0,0 +1,75 @@
1
+ from functools import wraps
2
+
3
+ from pydantic_ai._tool_manager import ToolManager # type: ignore
4
+
5
+ import sentry_sdk
6
+
7
+ from ..spans import execute_tool_span, update_execute_tool_span
8
+
9
+ from typing import TYPE_CHECKING
10
+
11
+ if TYPE_CHECKING:
12
+ from typing import Any
13
+
14
+ try:
15
+ from pydantic_ai.mcp import MCPServer # type: ignore
16
+
17
+ HAS_MCP = True
18
+ except ImportError:
19
+ HAS_MCP = False
20
+
21
+
22
+ def _patch_tool_execution():
23
+ # type: () -> None
24
+ """
25
+ Patch ToolManager._call_tool to create execute_tool spans.
26
+
27
+ This is the single point where ALL tool calls flow through in pydantic_ai,
28
+ regardless of toolset type (function, MCP, combined, wrapper, etc.).
29
+
30
+ By patching here, we avoid:
31
+ - Patching multiple toolset classes
32
+ - Dealing with signature mismatches from instrumented MCP servers
33
+ - Complex nested toolset handling
34
+ """
35
+
36
+ original_call_tool = ToolManager._call_tool
37
+
38
+ @wraps(original_call_tool)
39
+ async def wrapped_call_tool(self, call, allow_partial, wrap_validation_errors):
40
+ # type: (Any, Any, bool, bool) -> Any
41
+
42
+ # Extract tool info before calling original
43
+ name = call.tool_name
44
+ tool = self.tools.get(name) if self.tools else None
45
+
46
+ # Determine tool type by checking tool.toolset
47
+ tool_type = "function" # default
48
+ if tool and HAS_MCP and isinstance(tool.toolset, MCPServer):
49
+ tool_type = "mcp"
50
+
51
+ # Get agent from Sentry scope
52
+ current_span = sentry_sdk.get_current_span()
53
+ if current_span and tool:
54
+ agent_data = (
55
+ sentry_sdk.get_current_scope()._contexts.get("pydantic_ai_agent") or {}
56
+ )
57
+ agent = agent_data.get("_agent")
58
+
59
+ # Get args for span (before validation)
60
+ # call.args can be a string (JSON) or dict
61
+ args_dict = call.args if isinstance(call.args, dict) else {}
62
+
63
+ with execute_tool_span(name, args_dict, agent, tool_type=tool_type) as span:
64
+ result = await original_call_tool(
65
+ self, call, allow_partial, wrap_validation_errors
66
+ )
67
+ update_execute_tool_span(span, result)
68
+ return result
69
+
70
+ # No span context - just call original
71
+ return await original_call_tool(
72
+ self, call, allow_partial, wrap_validation_errors
73
+ )
74
+
75
+ ToolManager._call_tool = wrapped_call_tool
@@ -0,0 +1,3 @@
1
+ from .ai_client import ai_client_span, update_ai_client_span # noqa: F401
2
+ from .execute_tool import execute_tool_span, update_execute_tool_span # noqa: F401
3
+ from .invoke_agent import invoke_agent_span, update_invoke_agent_span # noqa: F401