sentry-sdk 3.0.0a5__py2.py3-none-any.whl → 3.0.0a7__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of sentry-sdk might be problematic. Click here for more details.

Files changed (33) hide show
  1. sentry_sdk/_init_implementation.py +5 -0
  2. sentry_sdk/ai/utils.py +7 -8
  3. sentry_sdk/api.py +13 -2
  4. sentry_sdk/client.py +93 -17
  5. sentry_sdk/consts.py +17 -7
  6. sentry_sdk/crons/api.py +5 -0
  7. sentry_sdk/integrations/anthropic.py +133 -73
  8. sentry_sdk/integrations/asgi.py +10 -9
  9. sentry_sdk/integrations/asyncio.py +85 -20
  10. sentry_sdk/integrations/clickhouse_driver.py +55 -28
  11. sentry_sdk/integrations/fastapi.py +1 -7
  12. sentry_sdk/integrations/gnu_backtrace.py +6 -3
  13. sentry_sdk/integrations/langchain.py +462 -218
  14. sentry_sdk/integrations/litestar.py +1 -1
  15. sentry_sdk/integrations/openai_agents/patches/agent_run.py +0 -2
  16. sentry_sdk/integrations/openai_agents/patches/runner.py +18 -15
  17. sentry_sdk/integrations/quart.py +1 -1
  18. sentry_sdk/integrations/starlette.py +1 -5
  19. sentry_sdk/integrations/starlite.py +2 -2
  20. sentry_sdk/integrations/threading.py +1 -1
  21. sentry_sdk/scope.py +11 -11
  22. sentry_sdk/spotlight.py +1 -162
  23. sentry_sdk/tracing.py +94 -17
  24. sentry_sdk/tracing_utils.py +330 -33
  25. sentry_sdk/transport.py +363 -63
  26. sentry_sdk/utils.py +23 -5
  27. sentry_sdk/worker.py +197 -3
  28. {sentry_sdk-3.0.0a5.dist-info → sentry_sdk-3.0.0a7.dist-info}/METADATA +3 -1
  29. {sentry_sdk-3.0.0a5.dist-info → sentry_sdk-3.0.0a7.dist-info}/RECORD +33 -33
  30. {sentry_sdk-3.0.0a5.dist-info → sentry_sdk-3.0.0a7.dist-info}/WHEEL +0 -0
  31. {sentry_sdk-3.0.0a5.dist-info → sentry_sdk-3.0.0a7.dist-info}/entry_points.txt +0 -0
  32. {sentry_sdk-3.0.0a5.dist-info → sentry_sdk-3.0.0a7.dist-info}/licenses/LICENSE +0 -0
  33. {sentry_sdk-3.0.0a5.dist-info → sentry_sdk-3.0.0a7.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,5 @@
1
1
  from __future__ import annotations
2
+ import warnings
2
3
 
3
4
  from typing import TYPE_CHECKING
4
5
 
@@ -25,6 +26,10 @@ def _init(*args: Optional[str], **kwargs: Any) -> None:
25
26
  setup_scope_context_management()
26
27
  client = sentry_sdk.Client(*args, **kwargs)
27
28
  sentry_sdk.get_global_scope().set_client(client)
29
+ warnings.warn(
30
+ "We won't be continuing development on SDK 3.0. Please use the last stable version of the SDK to get access to the newest features and fixes. See https://github.com/getsentry/sentry-python/discussions/4955",
31
+ stacklevel=2,
32
+ )
28
33
  _check_python_deprecations()
29
34
 
30
35
 
sentry_sdk/ai/utils.py CHANGED
@@ -8,8 +8,7 @@ from sentry_sdk.tracing import Span
8
8
  from sentry_sdk.utils import logger
9
9
 
10
10
 
11
- def _normalize_data(data: Any) -> Any:
12
-
11
+ def _normalize_data(data: Any, unpack: bool = True) -> Any:
13
12
  # convert pydantic data (e.g. OpenAI v1+) to json compatible format
14
13
  if hasattr(data, "model_dump"):
15
14
  try:
@@ -18,17 +17,17 @@ def _normalize_data(data: Any) -> Any:
18
17
  logger.warning("Could not convert pydantic data to JSON: %s", e)
19
18
  return data
20
19
  if isinstance(data, list):
21
- if len(data) == 1:
22
- return _normalize_data(data[0]) # remove empty dimensions
23
- return list(_normalize_data(x) for x in data)
20
+ if unpack and len(data) == 1:
21
+ return _normalize_data(data[0], unpack=unpack) # remove empty dimensions
22
+ return list(_normalize_data(x, unpack=unpack) for x in data)
24
23
  if isinstance(data, dict):
25
- return {k: _normalize_data(v) for (k, v) in data.items()}
24
+ return {k: _normalize_data(v, unpack=unpack) for (k, v) in data.items()}
26
25
 
27
26
  return data
28
27
 
29
28
 
30
- def set_data_normalized(span: Span, key: str, value: Any) -> None:
31
- normalized = _normalize_data(value)
29
+ def set_data_normalized(span: Span, key: str, value: Any, unpack: bool = True) -> None:
30
+ normalized = _normalize_data(value, unpack=unpack)
32
31
  if isinstance(normalized, (int, float, bool, str)):
33
32
  span.set_attribute(key, normalized)
34
33
  else:
sentry_sdk/api.py CHANGED
@@ -229,6 +229,14 @@ def flush(
229
229
  return get_client().flush(timeout=timeout, callback=callback)
230
230
 
231
231
 
232
+ @clientmethod
233
+ async def flush_async(
234
+ timeout: Optional[float] = None,
235
+ callback: Optional[Callable[[int, float], None]] = None,
236
+ ) -> None:
237
+ return await get_client().flush_async(timeout=timeout, callback=callback)
238
+
239
+
232
240
  def start_span(**kwargs: Any) -> Span:
233
241
  """
234
242
  Start and return a span.
@@ -344,8 +352,11 @@ def set_transaction_name(name: str, source: Optional[str] = None) -> None:
344
352
  return get_current_scope().set_transaction_name(name, source)
345
353
 
346
354
 
347
- def update_current_span(op=None, name=None, attributes=None):
348
- # type: (Optional[str], Optional[str], Optional[dict[str, Union[str, int, float, bool]]]) -> None
355
+ def update_current_span(
356
+ op: Optional[str] = None,
357
+ name: Optional[str] = None,
358
+ attributes: Optional[dict[str, Union[str, int, float, bool]]] = None,
359
+ ) -> None:
349
360
  """
350
361
  Update the current active span with the provided parameters.
351
362
 
sentry_sdk/client.py CHANGED
@@ -25,7 +25,7 @@ from sentry_sdk.utils import (
25
25
  )
26
26
  from sentry_sdk.serializer import serialize
27
27
  from sentry_sdk.tracing import trace
28
- from sentry_sdk.transport import BaseHttpTransport, make_transport
28
+ from sentry_sdk.transport import HttpTransportCore, make_transport, AsyncHttpTransport
29
29
  from sentry_sdk.consts import (
30
30
  SPANDATA,
31
31
  DEFAULT_MAX_VALUE_LENGTH,
@@ -214,6 +214,12 @@ class BaseClient:
214
214
  def flush(self, *args: Any, **kwargs: Any) -> None:
215
215
  return None
216
216
 
217
+ async def close_async(self, *args: Any, **kwargs: Any) -> None:
218
+ return None
219
+
220
+ async def flush_async(self, *args: Any, **kwargs: Any) -> None:
221
+ return None
222
+
217
223
  def __enter__(self) -> BaseClient:
218
224
  return self
219
225
 
@@ -406,7 +412,7 @@ class _Client(BaseClient):
406
412
  self.monitor
407
413
  or self.log_batcher
408
414
  or has_profiling_enabled(self.options)
409
- or isinstance(self.transport, BaseHttpTransport)
415
+ or isinstance(self.transport, HttpTransportCore)
410
416
  ):
411
417
  # If we have anything on that could spawn a background thread, we
412
418
  # need to check if it's safe to use them.
@@ -442,12 +448,12 @@ class _Client(BaseClient):
442
448
 
443
449
  previous_total_spans: Optional[int] = None
444
450
  previous_total_breadcrumbs: Optional[int] = None
451
+ is_transaction = event.get("type") == "transaction"
445
452
 
446
453
  if event.get("timestamp") is None:
447
454
  event["timestamp"] = datetime.now(timezone.utc)
448
455
 
449
456
  if scope is not None:
450
- is_transaction = event.get("type") == "transaction"
451
457
  spans_before = len(event.get("spans", []))
452
458
  event_ = scope.apply_to_event(event, hint, self.options)
453
459
 
@@ -488,7 +494,8 @@ class _Client(BaseClient):
488
494
  )
489
495
 
490
496
  if (
491
- self.options["attach_stacktrace"]
497
+ not is_transaction
498
+ and self.options["attach_stacktrace"]
492
499
  and "exception" not in event
493
500
  and "stacktrace" not in event
494
501
  and "threads" not in event
@@ -917,6 +924,14 @@ class _Client(BaseClient):
917
924
 
918
925
  return self.integrations.get(integration_name)
919
926
 
927
+ def _close_components(self) -> None:
928
+ """Kill all client components in the correct order."""
929
+ self.session_flusher.kill()
930
+ if self.log_batcher is not None:
931
+ self.log_batcher.kill()
932
+ if self.monitor:
933
+ self.monitor.kill()
934
+
920
935
  def close(
921
936
  self,
922
937
  timeout: Optional[float] = None,
@@ -927,19 +942,43 @@ class _Client(BaseClient):
927
942
  semantics as :py:meth:`Client.flush`.
928
943
  """
929
944
  if self.transport is not None:
945
+ if isinstance(self.transport, AsyncHttpTransport) and hasattr(
946
+ self.transport, "loop"
947
+ ):
948
+ logger.debug(
949
+ "close() used with AsyncHttpTransport, aborting. Please use close_async() instead."
950
+ )
951
+ return
930
952
  self.flush(timeout=timeout, callback=callback)
931
-
932
- self.session_flusher.kill()
933
-
934
- if self.log_batcher is not None:
935
- self.log_batcher.kill()
936
-
937
- if self.monitor:
938
- self.monitor.kill()
939
-
953
+ self._close_components()
940
954
  self.transport.kill()
941
955
  self.transport = None
942
956
 
957
+ async def close_async(
958
+ self,
959
+ timeout: Optional[float] = None,
960
+ callback: Optional[Callable[[int, float], None]] = None,
961
+ ) -> None:
962
+ """
963
+ Asynchronously close the client and shut down the transport. Arguments have the same
964
+ semantics as :py:meth:`Client.flush_async`.
965
+ """
966
+ if self.transport is not None:
967
+ if not (
968
+ isinstance(self.transport, AsyncHttpTransport)
969
+ and hasattr(self.transport, "loop")
970
+ ):
971
+ logger.debug(
972
+ "close_async() used with non-async transport, aborting. Please use close() instead."
973
+ )
974
+ return
975
+ await self.flush_async(timeout=timeout, callback=callback)
976
+ self._close_components()
977
+ kill_task = self.transport.kill() # type: ignore
978
+ if kill_task is not None:
979
+ await kill_task
980
+ self.transport = None
981
+
943
982
  def flush(
944
983
  self,
945
984
  timeout: Optional[float] = None,
@@ -953,15 +992,52 @@ class _Client(BaseClient):
953
992
  :param callback: Is invoked with the number of pending events and the configured timeout.
954
993
  """
955
994
  if self.transport is not None:
995
+ if isinstance(self.transport, AsyncHttpTransport) and hasattr(
996
+ self.transport, "loop"
997
+ ):
998
+ logger.debug(
999
+ "flush() used with AsyncHttpTransport, aborting. Please use flush_async() instead."
1000
+ )
1001
+ return
956
1002
  if timeout is None:
957
1003
  timeout = self.options["shutdown_timeout"]
958
- self.session_flusher.flush()
959
-
960
- if self.log_batcher is not None:
961
- self.log_batcher.flush()
1004
+ self._flush_components()
962
1005
 
963
1006
  self.transport.flush(timeout=timeout, callback=callback)
964
1007
 
1008
+ async def flush_async(
1009
+ self,
1010
+ timeout: Optional[float] = None,
1011
+ callback: Optional[Callable[[int, float], None]] = None,
1012
+ ) -> None:
1013
+ """
1014
+ Asynchronously wait for the current events to be sent.
1015
+
1016
+ :param timeout: Wait for at most `timeout` seconds. If no `timeout` is provided, the `shutdown_timeout` option value is used.
1017
+
1018
+ :param callback: Is invoked with the number of pending events and the configured timeout.
1019
+ """
1020
+ if self.transport is not None:
1021
+ if not (
1022
+ isinstance(self.transport, AsyncHttpTransport)
1023
+ and hasattr(self.transport, "loop")
1024
+ ):
1025
+ logger.debug(
1026
+ "flush_async() used with non-async transport, aborting. Please use flush() instead."
1027
+ )
1028
+ return
1029
+ if timeout is None:
1030
+ timeout = self.options["shutdown_timeout"]
1031
+ self._flush_components()
1032
+ flush_task = self.transport.flush(timeout=timeout, callback=callback) # type: ignore
1033
+ if flush_task is not None:
1034
+ await flush_task
1035
+
1036
+ def _flush_components(self) -> None:
1037
+ self.session_flusher.flush()
1038
+ if self.log_batcher is not None:
1039
+ self.log_batcher.flush()
1040
+
965
1041
  def __enter__(self) -> _Client:
966
1042
  return self
967
1043
 
sentry_sdk/consts.py CHANGED
@@ -77,7 +77,7 @@ if TYPE_CHECKING:
77
77
  "transport_compression_level": Optional[int],
78
78
  "transport_compression_algo": Optional[CompressionAlgo],
79
79
  "transport_num_pools": Optional[int],
80
- "transport_http2": Optional[bool],
80
+ "transport_async": Optional[bool],
81
81
  },
82
82
  total=False,
83
83
  )
@@ -95,6 +95,17 @@ FALSE_VALUES = [
95
95
  ]
96
96
 
97
97
 
98
+ class SPANTEMPLATE(str, Enum):
99
+ DEFAULT = "default"
100
+ AI_AGENT = "ai_agent"
101
+ AI_TOOL = "ai_tool"
102
+ AI_CHAT = "ai_chat"
103
+
104
+ def __str__(self):
105
+ # type: () -> str
106
+ return self.value
107
+
108
+
98
109
  class SPANDATA:
99
110
  """
100
111
  Additional information describing the type of the span.
@@ -792,6 +803,7 @@ class OP:
792
803
  GEN_AI_EMBEDDINGS = "gen_ai.embeddings"
793
804
  GEN_AI_EXECUTE_TOOL = "gen_ai.execute_tool"
794
805
  GEN_AI_HANDOFF = "gen_ai.handoff"
806
+ GEN_AI_PIPELINE = "gen_ai.pipeline"
795
807
  GEN_AI_INVOKE_AGENT = "gen_ai.invoke_agent"
796
808
  GEN_AI_RESPONSES = "gen_ai.responses"
797
809
  GRAPHQL_EXECUTE = "graphql.execute"
@@ -821,11 +833,6 @@ class OP:
821
833
  HUGGINGFACE_HUB_CHAT_COMPLETIONS_CREATE = (
822
834
  "ai.chat_completions.create.huggingface_hub"
823
835
  )
824
- LANGCHAIN_PIPELINE = "ai.pipeline.langchain"
825
- LANGCHAIN_RUN = "ai.run.langchain"
826
- LANGCHAIN_TOOL = "ai.tool.langchain"
827
- LANGCHAIN_AGENT = "ai.agent.langchain"
828
- LANGCHAIN_CHAT_COMPLETIONS_CREATE = "ai.chat_completions.create.langchain"
829
836
  QUEUE_PROCESS = "queue.process"
830
837
  QUEUE_PUBLISH = "queue.publish"
831
838
  QUEUE_SUBMIT_ARQ = "queue.submit.arq"
@@ -963,6 +970,7 @@ class ClientConstructor:
963
970
  max_stack_frames: Optional[int] = DEFAULT_MAX_STACK_FRAMES,
964
971
  enable_logs: bool = False,
965
972
  before_send_log: Optional[Callable[[Log, Hint], Optional[Log]]] = None,
973
+ http2: Optional[bool] = None,
966
974
  ) -> None:
967
975
  """Initialize the Sentry SDK with the given parameters. All parameters described here can be used in a call to `sentry_sdk.init()`.
968
976
 
@@ -1335,6 +1343,8 @@ class ClientConstructor:
1335
1343
  This is relative to the tracing sample rate - e.g. `0.5` means 50% of sampled transactions will be
1336
1344
  profiled.
1337
1345
 
1346
+ :param http2: Defaults to `True`, enables HTTP/2 support for the SDK.
1347
+
1338
1348
  :param profiles_sampler:
1339
1349
 
1340
1350
  :param profiler_mode:
@@ -1381,4 +1391,4 @@ DEFAULT_OPTIONS = _get_default_options()
1381
1391
  del _get_default_options
1382
1392
 
1383
1393
 
1384
- VERSION = "3.0.0a5"
1394
+ VERSION = "3.0.0a7"
sentry_sdk/crons/api.py CHANGED
@@ -2,6 +2,7 @@ from __future__ import annotations
2
2
  import uuid
3
3
 
4
4
  import sentry_sdk
5
+ from sentry_sdk.utils import logger
5
6
 
6
7
  from typing import TYPE_CHECKING
7
8
 
@@ -53,4 +54,8 @@ def capture_checkin(
53
54
 
54
55
  sentry_sdk.capture_event(check_in_event)
55
56
 
57
+ logger.debug(
58
+ f"[Crons] Captured check-in ({check_in_event.get('check_in_id')}): {check_in_event.get('monitor_slug')} -> {check_in_event.get('status')}"
59
+ )
60
+
56
61
  return check_in_event["check_in_id"]
@@ -1,9 +1,11 @@
1
1
  from __future__ import annotations
2
2
  from functools import wraps
3
+ import json
3
4
  from typing import TYPE_CHECKING
4
5
 
5
6
  import sentry_sdk
6
7
  from sentry_sdk.ai.monitoring import record_token_usage
8
+ from sentry_sdk.ai.utils import set_data_normalized
7
9
  from sentry_sdk.consts import OP, SPANDATA
8
10
  from sentry_sdk.integrations import _check_minimum_version, DidNotEnable, Integration
9
11
  from sentry_sdk.scope import should_send_default_pii
@@ -11,9 +13,15 @@ from sentry_sdk.utils import (
11
13
  capture_internal_exceptions,
12
14
  event_from_exception,
13
15
  package_version,
16
+ safe_serialize,
14
17
  )
15
18
 
16
19
  try:
20
+ try:
21
+ from anthropic import NOT_GIVEN
22
+ except ImportError:
23
+ NOT_GIVEN = None
24
+
17
25
  from anthropic.resources import AsyncMessages, Messages
18
26
 
19
27
  if TYPE_CHECKING:
@@ -51,7 +59,10 @@ def _capture_exception(exc: Any) -> None:
51
59
  sentry_sdk.capture_event(event, hint=hint)
52
60
 
53
61
 
54
- def _calculate_token_usage(result: Messages, span: Span) -> None:
62
+ def _get_token_usage(result: Messages) -> tuple[int, int]:
63
+ """
64
+ Get token usage from the Anthropic response.
65
+ """
55
66
  input_tokens = 0
56
67
  output_tokens = 0
57
68
  if hasattr(result, "usage"):
@@ -61,40 +72,18 @@ def _calculate_token_usage(result: Messages, span: Span) -> None:
61
72
  if hasattr(usage, "output_tokens") and isinstance(usage.output_tokens, int):
62
73
  output_tokens = usage.output_tokens
63
74
 
64
- total_tokens = input_tokens + output_tokens
65
-
66
- record_token_usage(
67
- span,
68
- input_tokens=input_tokens,
69
- output_tokens=output_tokens,
70
- total_tokens=total_tokens,
71
- )
72
-
73
-
74
- def _get_responses(content: list[Any]) -> list[dict[str, Any]]:
75
- """
76
- Get JSON of a Anthropic responses.
77
- """
78
- responses = []
79
- for item in content:
80
- if hasattr(item, "text"):
81
- responses.append(
82
- {
83
- "type": item.type,
84
- "text": item.text,
85
- }
86
- )
87
- return responses
75
+ return input_tokens, output_tokens
88
76
 
89
77
 
90
78
  def _collect_ai_data(
91
79
  event: MessageStreamEvent,
80
+ model: str | None,
92
81
  input_tokens: int,
93
82
  output_tokens: int,
94
83
  content_blocks: list[str],
95
- ) -> tuple[int, int, list[str]]:
84
+ ) -> tuple[str | None, int, int, list[str]]:
96
85
  """
97
- Count token usage and collect content blocks from the AI streaming response.
86
+ Collect model information, token usage, and collect content blocks from the AI streaming response.
98
87
  """
99
88
  with capture_internal_exceptions():
100
89
  if hasattr(event, "type"):
@@ -102,6 +91,7 @@ def _collect_ai_data(
102
91
  usage = event.message.usage
103
92
  input_tokens += usage.input_tokens
104
93
  output_tokens += usage.output_tokens
94
+ model = event.message.model or model
105
95
  elif event.type == "content_block_start":
106
96
  pass
107
97
  elif event.type == "content_block_delta":
@@ -114,34 +104,80 @@ def _collect_ai_data(
114
104
  elif event.type == "message_delta":
115
105
  output_tokens += event.usage.output_tokens
116
106
 
117
- return input_tokens, output_tokens, content_blocks
107
+ return model, input_tokens, output_tokens, content_blocks
118
108
 
119
109
 
120
- def _add_ai_data_to_span(
110
+ def _set_input_data(
111
+ span: Span, kwargs: dict[str, Any], integration: AnthropicIntegration
112
+ ) -> None:
113
+ """
114
+ Set input data for the span based on the provided keyword arguments for the anthropic message creation.
115
+ """
116
+ messages = kwargs.get("messages")
117
+ if (
118
+ messages is not None
119
+ and len(messages) > 0
120
+ and should_send_default_pii()
121
+ and integration.include_prompts
122
+ ):
123
+ set_data_normalized(
124
+ span, SPANDATA.GEN_AI_REQUEST_MESSAGES, safe_serialize(messages)
125
+ )
126
+
127
+ set_data_normalized(
128
+ span, SPANDATA.GEN_AI_RESPONSE_STREAMING, kwargs.get("stream", False)
129
+ )
130
+
131
+ kwargs_keys_to_attributes = {
132
+ "max_tokens": SPANDATA.GEN_AI_REQUEST_MAX_TOKENS,
133
+ "model": SPANDATA.GEN_AI_REQUEST_MODEL,
134
+ "temperature": SPANDATA.GEN_AI_REQUEST_TEMPERATURE,
135
+ "top_k": SPANDATA.GEN_AI_REQUEST_TOP_K,
136
+ "top_p": SPANDATA.GEN_AI_REQUEST_TOP_P,
137
+ }
138
+ for key, attribute in kwargs_keys_to_attributes.items():
139
+ value = kwargs.get(key)
140
+ if value is not NOT_GIVEN and value is not None:
141
+ set_data_normalized(span, attribute, value)
142
+
143
+ # Input attributes: Tools
144
+ tools = kwargs.get("tools")
145
+ if tools is not NOT_GIVEN and tools is not None and len(tools) > 0:
146
+ set_data_normalized(
147
+ span, SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, safe_serialize(tools)
148
+ )
149
+
150
+
151
+ def _set_output_data(
121
152
  span: Span,
122
153
  integration: AnthropicIntegration,
154
+ model: str | None,
123
155
  input_tokens: int,
124
156
  output_tokens: int,
125
- content_blocks: list[str],
157
+ content_blocks: list[Any],
158
+ finish_span: bool = False,
126
159
  ) -> None:
127
160
  """
128
- Add token usage and content blocks from the AI streaming response to the span.
129
- """
130
- with capture_internal_exceptions():
131
- if should_send_default_pii() and integration.include_prompts:
132
- complete_message = "".join(content_blocks)
133
- span.set_attribute(
134
- SPANDATA.AI_RESPONSES,
135
- [{"type": "text", "text": complete_message}],
136
- )
137
- total_tokens = input_tokens + output_tokens
138
- record_token_usage(
161
+ Set output data for the span based on the AI response."""
162
+ span.set_attribute(SPANDATA.GEN_AI_RESPONSE_MODEL, model)
163
+ if should_send_default_pii() and integration.include_prompts:
164
+ set_data_normalized(
139
165
  span,
140
- input_tokens=input_tokens,
141
- output_tokens=output_tokens,
142
- total_tokens=total_tokens,
166
+ SPANDATA.GEN_AI_RESPONSE_TEXT,
167
+ json.dumps(content_blocks),
168
+ unpack=False,
143
169
  )
144
- span.set_attribute(SPANDATA.AI_STREAMING, True)
170
+
171
+ record_token_usage(
172
+ span,
173
+ input_tokens=input_tokens,
174
+ output_tokens=output_tokens,
175
+ )
176
+
177
+ # TODO: GEN_AI_RESPONSE_TOOL_CALLS ?
178
+
179
+ if finish_span:
180
+ span.__exit__(None, None, None)
145
181
 
146
182
 
147
183
  def _sentry_patched_create_common(f: Any, *args: Any, **kwargs: Any) -> Any:
@@ -157,70 +193,94 @@ def _sentry_patched_create_common(f: Any, *args: Any, **kwargs: Any) -> Any:
157
193
  except TypeError:
158
194
  return f(*args, **kwargs)
159
195
 
196
+ model = kwargs.get("model", "")
197
+
160
198
  span = sentry_sdk.start_span(
161
- op=OP.ANTHROPIC_MESSAGES_CREATE,
162
- description="Anthropic messages create",
199
+ op=OP.GEN_AI_CHAT,
200
+ name=f"chat {model}".strip(),
163
201
  origin=AnthropicIntegration.origin,
164
202
  only_as_child_span=True,
165
203
  )
166
204
  span.__enter__()
167
205
 
168
- result = yield f, args, kwargs
206
+ _set_input_data(span, kwargs, integration)
169
207
 
170
- # add data to span and finish it
171
- messages = list(kwargs["messages"])
172
- model = kwargs.get("model")
208
+ result = yield f, args, kwargs
173
209
 
174
210
  with capture_internal_exceptions():
175
- span.set_attribute(SPANDATA.AI_MODEL_ID, model)
176
- span.set_attribute(SPANDATA.AI_STREAMING, False)
177
-
178
- if should_send_default_pii() and integration.include_prompts:
179
- span.set_attribute(SPANDATA.AI_INPUT_MESSAGES, messages)
180
-
181
211
  if hasattr(result, "content"):
182
- if should_send_default_pii() and integration.include_prompts:
183
- span.set_attribute(
184
- SPANDATA.AI_RESPONSES, _get_responses(result.content)
185
- )
186
- _calculate_token_usage(result, span)
187
- span.__exit__(None, None, None)
212
+ input_tokens, output_tokens = _get_token_usage(result)
213
+
214
+ content_blocks = []
215
+ for content_block in result.content:
216
+ if hasattr(content_block, "to_dict"):
217
+ content_blocks.append(content_block.to_dict())
218
+ elif hasattr(content_block, "model_dump"):
219
+ content_blocks.append(content_block.model_dump())
220
+ elif hasattr(content_block, "text"):
221
+ content_blocks.append({"type": "text", "text": content_block.text})
222
+
223
+ _set_output_data(
224
+ span=span,
225
+ integration=integration,
226
+ model=getattr(result, "model", None),
227
+ input_tokens=input_tokens,
228
+ output_tokens=output_tokens,
229
+ content_blocks=content_blocks,
230
+ finish_span=True,
231
+ )
188
232
 
189
233
  # Streaming response
190
234
  elif hasattr(result, "_iterator"):
191
235
  old_iterator = result._iterator
192
236
 
193
237
  def new_iterator() -> Iterator[MessageStreamEvent]:
238
+ model = None
194
239
  input_tokens = 0
195
240
  output_tokens = 0
196
241
  content_blocks: list[str] = []
197
242
 
198
243
  for event in old_iterator:
199
- input_tokens, output_tokens, content_blocks = _collect_ai_data(
200
- event, input_tokens, output_tokens, content_blocks
244
+ model, input_tokens, output_tokens, content_blocks = (
245
+ _collect_ai_data(
246
+ event, model, input_tokens, output_tokens, content_blocks
247
+ )
201
248
  )
202
249
  yield event
203
250
 
204
- _add_ai_data_to_span(
205
- span, integration, input_tokens, output_tokens, content_blocks
251
+ _set_output_data(
252
+ span=span,
253
+ integration=integration,
254
+ model=model,
255
+ input_tokens=input_tokens,
256
+ output_tokens=output_tokens,
257
+ content_blocks=[{"text": "".join(content_blocks), "type": "text"}],
258
+ finish_span=True,
206
259
  )
207
- span.__exit__(None, None, None)
208
260
 
209
261
  async def new_iterator_async() -> AsyncIterator[MessageStreamEvent]:
262
+ model = None
210
263
  input_tokens = 0
211
264
  output_tokens = 0
212
265
  content_blocks: list[str] = []
213
266
 
214
267
  async for event in old_iterator:
215
- input_tokens, output_tokens, content_blocks = _collect_ai_data(
216
- event, input_tokens, output_tokens, content_blocks
268
+ model, input_tokens, output_tokens, content_blocks = (
269
+ _collect_ai_data(
270
+ event, model, input_tokens, output_tokens, content_blocks
271
+ )
217
272
  )
218
273
  yield event
219
274
 
220
- _add_ai_data_to_span(
221
- span, integration, input_tokens, output_tokens, content_blocks
275
+ _set_output_data(
276
+ span=span,
277
+ integration=integration,
278
+ model=model,
279
+ input_tokens=input_tokens,
280
+ output_tokens=output_tokens,
281
+ content_blocks=[{"text": "".join(content_blocks), "type": "text"}],
282
+ finish_span=True,
222
283
  )
223
- span.__exit__(None, None, None)
224
284
 
225
285
  if str(type(result._iterator)) == "<class 'async_generator'>":
226
286
  result._iterator = new_iterator_async()