lmnr 0.6.16__py3-none-any.whl → 0.7.26__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (113) hide show
  1. lmnr/__init__.py +6 -15
  2. lmnr/cli/__init__.py +270 -0
  3. lmnr/cli/datasets.py +371 -0
  4. lmnr/{cli.py → cli/evals.py} +20 -102
  5. lmnr/cli/rules.py +42 -0
  6. lmnr/opentelemetry_lib/__init__.py +9 -2
  7. lmnr/opentelemetry_lib/decorators/__init__.py +274 -168
  8. lmnr/opentelemetry_lib/litellm/__init__.py +352 -38
  9. lmnr/opentelemetry_lib/litellm/utils.py +82 -0
  10. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/__init__.py +849 -0
  11. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/config.py +13 -0
  12. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_emitter.py +211 -0
  13. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_models.py +41 -0
  14. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/span_utils.py +401 -0
  15. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/streaming.py +425 -0
  16. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/utils.py +332 -0
  17. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/version.py +1 -0
  18. lmnr/opentelemetry_lib/opentelemetry/instrumentation/claude_agent/__init__.py +451 -0
  19. lmnr/opentelemetry_lib/opentelemetry/instrumentation/claude_agent/proxy.py +144 -0
  20. lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_agent/__init__.py +100 -0
  21. lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_computer/__init__.py +476 -0
  22. lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_computer/utils.py +12 -0
  23. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py +191 -129
  24. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/schema_utils.py +26 -0
  25. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/utils.py +126 -41
  26. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/__init__.py +488 -0
  27. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/config.py +8 -0
  28. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_emitter.py +143 -0
  29. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_models.py +41 -0
  30. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/span_utils.py +229 -0
  31. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/utils.py +92 -0
  32. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/version.py +1 -0
  33. lmnr/opentelemetry_lib/opentelemetry/instrumentation/kernel/__init__.py +381 -0
  34. lmnr/opentelemetry_lib/opentelemetry/instrumentation/kernel/utils.py +36 -0
  35. lmnr/opentelemetry_lib/opentelemetry/instrumentation/langgraph/__init__.py +16 -16
  36. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/__init__.py +61 -0
  37. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/__init__.py +472 -0
  38. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/chat_wrappers.py +1185 -0
  39. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/completion_wrappers.py +305 -0
  40. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/config.py +16 -0
  41. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +312 -0
  42. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/event_emitter.py +100 -0
  43. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/event_models.py +41 -0
  44. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py +68 -0
  45. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/utils.py +197 -0
  46. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v0/__init__.py +176 -0
  47. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/__init__.py +368 -0
  48. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +325 -0
  49. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py +135 -0
  50. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/responses_wrappers.py +786 -0
  51. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/version.py +1 -0
  52. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openhands_ai/__init__.py +388 -0
  53. lmnr/opentelemetry_lib/opentelemetry/instrumentation/opentelemetry/__init__.py +69 -0
  54. lmnr/opentelemetry_lib/opentelemetry/instrumentation/skyvern/__init__.py +59 -61
  55. lmnr/opentelemetry_lib/opentelemetry/instrumentation/threading/__init__.py +197 -0
  56. lmnr/opentelemetry_lib/tracing/__init__.py +119 -18
  57. lmnr/opentelemetry_lib/tracing/_instrument_initializers.py +124 -25
  58. lmnr/opentelemetry_lib/tracing/attributes.py +4 -0
  59. lmnr/opentelemetry_lib/tracing/context.py +200 -0
  60. lmnr/opentelemetry_lib/tracing/exporter.py +109 -15
  61. lmnr/opentelemetry_lib/tracing/instruments.py +22 -5
  62. lmnr/opentelemetry_lib/tracing/processor.py +128 -30
  63. lmnr/opentelemetry_lib/tracing/span.py +398 -0
  64. lmnr/opentelemetry_lib/tracing/tracer.py +40 -1
  65. lmnr/opentelemetry_lib/tracing/utils.py +62 -0
  66. lmnr/opentelemetry_lib/utils/package_check.py +9 -0
  67. lmnr/opentelemetry_lib/utils/wrappers.py +11 -0
  68. lmnr/sdk/browser/background_send_events.py +158 -0
  69. lmnr/sdk/browser/browser_use_cdp_otel.py +100 -0
  70. lmnr/sdk/browser/browser_use_otel.py +12 -12
  71. lmnr/sdk/browser/bubus_otel.py +71 -0
  72. lmnr/sdk/browser/cdp_utils.py +518 -0
  73. lmnr/sdk/browser/inject_script.js +514 -0
  74. lmnr/sdk/browser/patchright_otel.py +18 -44
  75. lmnr/sdk/browser/playwright_otel.py +104 -187
  76. lmnr/sdk/browser/pw_utils.py +249 -210
  77. lmnr/sdk/browser/recorder/record.umd.min.cjs +84 -0
  78. lmnr/sdk/browser/utils.py +1 -1
  79. lmnr/sdk/client/asynchronous/async_client.py +47 -15
  80. lmnr/sdk/client/asynchronous/resources/__init__.py +2 -7
  81. lmnr/sdk/client/asynchronous/resources/browser_events.py +1 -0
  82. lmnr/sdk/client/asynchronous/resources/datasets.py +131 -0
  83. lmnr/sdk/client/asynchronous/resources/evals.py +122 -18
  84. lmnr/sdk/client/asynchronous/resources/evaluators.py +85 -0
  85. lmnr/sdk/client/asynchronous/resources/tags.py +4 -10
  86. lmnr/sdk/client/synchronous/resources/__init__.py +2 -2
  87. lmnr/sdk/client/synchronous/resources/datasets.py +131 -0
  88. lmnr/sdk/client/synchronous/resources/evals.py +83 -17
  89. lmnr/sdk/client/synchronous/resources/evaluators.py +85 -0
  90. lmnr/sdk/client/synchronous/resources/tags.py +4 -10
  91. lmnr/sdk/client/synchronous/sync_client.py +47 -15
  92. lmnr/sdk/datasets/__init__.py +94 -0
  93. lmnr/sdk/datasets/file_utils.py +91 -0
  94. lmnr/sdk/decorators.py +103 -23
  95. lmnr/sdk/evaluations.py +122 -33
  96. lmnr/sdk/laminar.py +816 -333
  97. lmnr/sdk/log.py +7 -2
  98. lmnr/sdk/types.py +124 -143
  99. lmnr/sdk/utils.py +115 -2
  100. lmnr/version.py +1 -1
  101. {lmnr-0.6.16.dist-info → lmnr-0.7.26.dist-info}/METADATA +71 -78
  102. lmnr-0.7.26.dist-info/RECORD +116 -0
  103. lmnr-0.7.26.dist-info/WHEEL +4 -0
  104. lmnr-0.7.26.dist-info/entry_points.txt +3 -0
  105. lmnr/opentelemetry_lib/tracing/context_properties.py +0 -65
  106. lmnr/sdk/browser/rrweb/rrweb.umd.min.cjs +0 -98
  107. lmnr/sdk/client/asynchronous/resources/agent.py +0 -329
  108. lmnr/sdk/client/synchronous/resources/agent.py +0 -323
  109. lmnr/sdk/datasets.py +0 -60
  110. lmnr-0.6.16.dist-info/LICENSE +0 -75
  111. lmnr-0.6.16.dist-info/RECORD +0 -61
  112. lmnr-0.6.16.dist-info/WHEEL +0 -4
  113. lmnr-0.6.16.dist-info/entry_points.txt +0 -3
@@ -0,0 +1,13 @@
1
+ from typing import Callable, Optional
2
+
3
+ from typing_extensions import Coroutine
4
+
5
+
6
+ class Config:
7
+ enrich_token_usage = False
8
+ exception_logger = None
9
+ get_common_metrics_attributes: Callable[[], dict] = lambda: {}
10
+ upload_base64_image: Optional[
11
+ Callable[[str, str, str, str], Coroutine[None, None, str]]
12
+ ] = None
13
+ use_legacy_attributes = True
@@ -0,0 +1,211 @@
1
+ from dataclasses import asdict
2
+ from enum import Enum
3
+ from typing import Optional, Union
4
+
5
+ from opentelemetry._events import Event, EventLogger
6
+ from .event_models import (
7
+ ChoiceEvent,
8
+ MessageEvent,
9
+ ToolCall,
10
+ )
11
+ from .utils import (
12
+ should_emit_events,
13
+ should_send_prompts,
14
+ )
15
+ from opentelemetry.semconv._incubating.attributes import (
16
+ gen_ai_attributes as GenAIAttributes,
17
+ )
18
+
19
+
20
+ class Roles(Enum):
21
+ USER = "user"
22
+ ASSISTANT = "assistant"
23
+ SYSTEM = "system"
24
+ TOOL = "tool"
25
+
26
+
27
+ VALID_MESSAGE_ROLES = {role.value for role in Roles}
28
+ """The valid roles for naming the message event."""
29
+
30
+ EVENT_ATTRIBUTES = {
31
+ GenAIAttributes.GEN_AI_SYSTEM: GenAIAttributes.GenAiSystemValues.ANTHROPIC.value
32
+ }
33
+ """The attributes to be used for the event."""
34
+
35
+
36
+ def emit_input_events(event_logger: Optional[EventLogger], kwargs):
37
+ if kwargs.get("prompt") is not None:
38
+ emit_event(
39
+ MessageEvent(content=kwargs.get("prompt"), role="user"), event_logger
40
+ )
41
+
42
+ elif kwargs.get("messages") is not None:
43
+ if kwargs.get("system"):
44
+ emit_event(
45
+ MessageEvent(content=kwargs.get("system"), role="system"), event_logger
46
+ )
47
+ for message in kwargs.get("messages"):
48
+ emit_event(
49
+ MessageEvent(content=message.get("content"), role=message.get("role")),
50
+ event_logger,
51
+ )
52
+ if kwargs.get("tools") is not None:
53
+ emit_event(
54
+ MessageEvent(content={"tools": kwargs.get("tools")}, role="user"),
55
+ event_logger,
56
+ )
57
+
58
+
59
+ def emit_response_events(event_logger: Optional[EventLogger], response):
60
+ if not isinstance(response, dict):
61
+ response = dict(response)
62
+
63
+ if response.get("completion"):
64
+ emit_event(
65
+ ChoiceEvent(
66
+ index=0,
67
+ message={
68
+ "content": response.get("completion"),
69
+ "role": response.get("role", "assistant"),
70
+ },
71
+ finish_reason=response.get("stop_reason"),
72
+ ),
73
+ event_logger,
74
+ )
75
+ elif response.get("content"):
76
+ for i, completion in enumerate(response.get("content")):
77
+ # Parse message
78
+ if completion.type == "text":
79
+ message = {
80
+ "content": completion.text,
81
+ "role": response.get("role", "assistant"),
82
+ }
83
+ elif completion.type == "thinking":
84
+ message = {
85
+ "content": completion.thinking,
86
+ "role": response.get("role", "assistant"),
87
+ }
88
+ elif completion.type == "tool_use":
89
+ message = {
90
+ "content": None,
91
+ "role": response.get("role", "assistant"),
92
+ }
93
+ else:
94
+ message = {
95
+ "content": None,
96
+ "role": response.get("role", "assistant"),
97
+ }
98
+
99
+ # Parse tool calls
100
+ if completion.type == "tool_use":
101
+ tool_calls = [
102
+ ToolCall(
103
+ id=completion.id,
104
+ function={
105
+ "name": completion.name,
106
+ "arguments": completion.input,
107
+ },
108
+ type="function",
109
+ )
110
+ ]
111
+ else:
112
+ tool_calls = None
113
+
114
+ # Emit the event
115
+ emit_event(
116
+ ChoiceEvent(
117
+ index=i,
118
+ message=message,
119
+ finish_reason=response.get("stop_reason"),
120
+ tool_calls=tool_calls,
121
+ ),
122
+ event_logger,
123
+ )
124
+
125
+
126
+ def emit_streaming_response_events(
127
+ event_logger: Optional[EventLogger], complete_response: dict
128
+ ):
129
+ for message in complete_response.get("events", []):
130
+ emit_event(
131
+ ChoiceEvent(
132
+ index=message.get("index", 0),
133
+ message={
134
+ "content": {
135
+ "type": message.get("type"),
136
+ "content": message.get("text"),
137
+ },
138
+ "role": message.get("role", "assistant"),
139
+ },
140
+ finish_reason=message.get("finish_reason", "unknown"),
141
+ ),
142
+ event_logger,
143
+ )
144
+
145
+
146
+ def emit_event(
147
+ event: Union[MessageEvent, ChoiceEvent], event_logger: EventLogger
148
+ ) -> None:
149
+ """
150
+ Emit an event to the OpenTelemetry SDK.
151
+
152
+ Args:
153
+ event: The event to emit.
154
+ """
155
+ if not should_emit_events():
156
+ return
157
+
158
+ if isinstance(event, MessageEvent):
159
+ _emit_message_event(event, event_logger)
160
+ elif isinstance(event, ChoiceEvent):
161
+ _emit_choice_event(event, event_logger)
162
+ else:
163
+ raise TypeError("Unsupported event type")
164
+
165
+
166
+ def _emit_message_event(event: MessageEvent, event_logger: EventLogger) -> None:
167
+ body = asdict(event)
168
+
169
+ if event.role in VALID_MESSAGE_ROLES:
170
+ name = "gen_ai.{}.message".format(event.role)
171
+ # According to the semantic conventions, the role is conditionally required if available
172
+ # and not equal to the "role" in the message name. So, remove the role from the body if
173
+ # it is the same as the in the event name.
174
+ body.pop("role", None)
175
+ else:
176
+ name = "gen_ai.user.message"
177
+
178
+ # According to the semantic conventions, only the assistant role has tool call
179
+ if event.role != Roles.ASSISTANT.value and event.tool_calls is not None:
180
+ del body["tool_calls"]
181
+ elif event.tool_calls is None:
182
+ del body["tool_calls"]
183
+
184
+ if not should_send_prompts():
185
+ del body["content"]
186
+ if body.get("tool_calls") is not None:
187
+ for tool_call in body["tool_calls"]:
188
+ tool_call["function"].pop("arguments", None)
189
+
190
+ event_logger.emit(Event(name=name, body=body, attributes=EVENT_ATTRIBUTES))
191
+
192
+
193
+ def _emit_choice_event(event: ChoiceEvent, event_logger: EventLogger) -> None:
194
+ body = asdict(event)
195
+ if event.message["role"] == Roles.ASSISTANT.value:
196
+ # According to the semantic conventions, the role is conditionally required if available
197
+ # and not equal to "assistant", so remove the role from the body if it is "assistant".
198
+ body["message"].pop("role", None)
199
+
200
+ if event.tool_calls is None:
201
+ del body["tool_calls"]
202
+
203
+ if not should_send_prompts():
204
+ body["message"].pop("content", None)
205
+ if body.get("tool_calls") is not None:
206
+ for tool_call in body["tool_calls"]:
207
+ tool_call["function"].pop("arguments", None)
208
+
209
+ event_logger.emit(
210
+ Event(name="gen_ai.choice", body=body, attributes=EVENT_ATTRIBUTES)
211
+ )
@@ -0,0 +1,41 @@
1
+ from dataclasses import dataclass
2
+ from typing import Any, List, Literal, Optional, TypedDict
3
+
4
+
5
+ class _FunctionToolCall(TypedDict):
6
+ function_name: str
7
+ arguments: Optional[dict[str, Any]]
8
+
9
+
10
+ class ToolCall(TypedDict):
11
+ """Represents a tool call in the AI model."""
12
+
13
+ id: str
14
+ function: _FunctionToolCall
15
+ type: Literal["function"]
16
+
17
+
18
+ class CompletionMessage(TypedDict):
19
+ """Represents a message in the AI model."""
20
+
21
+ content: Any
22
+ role: str = "assistant"
23
+
24
+
25
+ @dataclass
26
+ class MessageEvent:
27
+ """Represents an input event for the AI model."""
28
+
29
+ content: Any
30
+ role: str = "user"
31
+ tool_calls: Optional[List[ToolCall]] = None
32
+
33
+
34
+ @dataclass
35
+ class ChoiceEvent:
36
+ """Represents a completion event for the AI model."""
37
+
38
+ index: int
39
+ message: CompletionMessage
40
+ finish_reason: str = "unknown"
41
+ tool_calls: Optional[List[ToolCall]] = None
@@ -0,0 +1,401 @@
1
+ import json
2
+ import logging
3
+ from typing import Any, Dict
4
+
5
+ from .config import Config
6
+ from .utils import (
7
+ JSONEncoder,
8
+ dont_throw,
9
+ model_as_dict,
10
+ should_send_prompts,
11
+ _extract_response_data,
12
+ _aextract_response_data,
13
+ set_span_attribute,
14
+ )
15
+ from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import (
16
+ GEN_AI_RESPONSE_ID,
17
+ )
18
+ from opentelemetry.semconv_ai import SpanAttributes
19
+
20
+ logger = logging.getLogger(__name__)
21
+
22
+
23
+ def _is_base64_image(item: Dict[str, Any]) -> bool:
24
+ if not isinstance(item, dict):
25
+ return False
26
+
27
+ if not isinstance(item.get("source"), dict):
28
+ return False
29
+
30
+ if item.get("type") != "image" or item["source"].get("type") != "base64":
31
+ return False
32
+
33
+ return True
34
+
35
+
36
+ async def _process_image_item(item, trace_id, span_id, message_index, content_index):
37
+ if not Config.upload_base64_image:
38
+ return item
39
+
40
+ image_format = item.get("source").get("media_type").split("/")[1]
41
+ image_name = f"message_{message_index}_content_{content_index}.{image_format}"
42
+ base64_string = item.get("source").get("data")
43
+ url = await Config.upload_base64_image(trace_id, span_id, image_name, base64_string)
44
+
45
+ return {"type": "image_url", "image_url": {"url": url}}
46
+
47
+
48
+ async def _dump_content(message_index, content, span):
49
+ if isinstance(content, str):
50
+ return content
51
+ elif isinstance(content, list):
52
+ # If the content is a list of text blocks, concatenate them.
53
+ # This is more commonly used in prompt caching.
54
+ if all([model_as_dict(item).get("type") == "text" for item in content]):
55
+ return "".join([model_as_dict(item).get("text") for item in content])
56
+
57
+ content = [
58
+ (
59
+ await _process_image_item(
60
+ model_as_dict(item),
61
+ span.context.trace_id,
62
+ span.context.span_id,
63
+ message_index,
64
+ j,
65
+ )
66
+ if _is_base64_image(model_as_dict(item))
67
+ else model_as_dict(item)
68
+ )
69
+ for j, item in enumerate(content)
70
+ ]
71
+
72
+ return json.dumps(content, cls=JSONEncoder)
73
+
74
+
75
+ @dont_throw
76
+ async def aset_input_attributes(span, kwargs):
77
+ from .utils import set_span_attribute
78
+
79
+ set_span_attribute(span, SpanAttributes.LLM_REQUEST_MODEL, kwargs.get("model"))
80
+ set_span_attribute(
81
+ span, SpanAttributes.LLM_REQUEST_MAX_TOKENS, kwargs.get("max_tokens_to_sample")
82
+ )
83
+ set_span_attribute(
84
+ span, SpanAttributes.LLM_REQUEST_TEMPERATURE, kwargs.get("temperature")
85
+ )
86
+ set_span_attribute(span, SpanAttributes.LLM_REQUEST_TOP_P, kwargs.get("top_p"))
87
+ set_span_attribute(
88
+ span, SpanAttributes.LLM_FREQUENCY_PENALTY, kwargs.get("frequency_penalty")
89
+ )
90
+ set_span_attribute(
91
+ span, SpanAttributes.LLM_PRESENCE_PENALTY, kwargs.get("presence_penalty")
92
+ )
93
+ set_span_attribute(span, SpanAttributes.LLM_IS_STREAMING, kwargs.get("stream"))
94
+ set_span_attribute(
95
+ span, "anthropic.request.service_tier", kwargs.get("service_tier")
96
+ )
97
+
98
+ if should_send_prompts():
99
+ if kwargs.get("prompt") is not None:
100
+ set_span_attribute(
101
+ span, f"{SpanAttributes.LLM_PROMPTS}.0.user", kwargs.get("prompt")
102
+ )
103
+
104
+ elif kwargs.get("messages") is not None:
105
+ has_system_message = False
106
+ if kwargs.get("system"):
107
+ has_system_message = True
108
+ set_span_attribute(
109
+ span,
110
+ f"{SpanAttributes.LLM_PROMPTS}.0.content",
111
+ await _dump_content(
112
+ message_index=0, span=span, content=kwargs.get("system")
113
+ ),
114
+ )
115
+ set_span_attribute(
116
+ span,
117
+ f"{SpanAttributes.LLM_PROMPTS}.0.role",
118
+ "system",
119
+ )
120
+ for i, message in enumerate(kwargs.get("messages")):
121
+ prompt_index = i + (1 if has_system_message else 0)
122
+ content = message.get("content")
123
+ tool_use_blocks = []
124
+ other_blocks = []
125
+ if isinstance(content, list):
126
+ for block in content:
127
+ if dict(block).get("type") == "tool_use":
128
+ tool_use_blocks.append(dict(block))
129
+ else:
130
+ other_blocks.append(block)
131
+ content = other_blocks
132
+ set_span_attribute(
133
+ span,
134
+ f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.content",
135
+ await _dump_content(message_index=i, span=span, content=content),
136
+ )
137
+ set_span_attribute(
138
+ span,
139
+ f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.role",
140
+ message.get("role"),
141
+ )
142
+ if tool_use_blocks:
143
+ for tool_num, tool_use_block in enumerate(tool_use_blocks):
144
+ set_span_attribute(
145
+ span,
146
+ f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.tool_calls.{tool_num}.id",
147
+ tool_use_block.get("id"),
148
+ )
149
+ set_span_attribute(
150
+ span,
151
+ f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.tool_calls.{tool_num}.name",
152
+ tool_use_block.get("name"),
153
+ )
154
+ set_span_attribute(
155
+ span,
156
+ f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.tool_calls.{tool_num}.arguments",
157
+ json.dumps(tool_use_block.get("input")),
158
+ )
159
+
160
+ if kwargs.get("tools") is not None:
161
+ for i, tool in enumerate(kwargs.get("tools")):
162
+ prefix = f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.{i}"
163
+ set_span_attribute(span, f"{prefix}.name", tool.get("name"))
164
+ set_span_attribute(
165
+ span, f"{prefix}.description", tool.get("description")
166
+ )
167
+ input_schema = tool.get("input_schema")
168
+ if input_schema is not None:
169
+ set_span_attribute(
170
+ span, f"{prefix}.input_schema", json.dumps(input_schema)
171
+ )
172
+
173
+
174
+ async def _aset_span_completions(span, response):
175
+ if not should_send_prompts():
176
+ return
177
+
178
+ response = await _aextract_response_data(response)
179
+ index = 0
180
+ prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{index}"
181
+ set_span_attribute(span, f"{prefix}.finish_reason", response.get("stop_reason"))
182
+ if response.get("role"):
183
+ set_span_attribute(span, f"{prefix}.role", response.get("role"))
184
+
185
+ if response.get("completion"):
186
+ set_span_attribute(span, f"{prefix}.content", response.get("completion"))
187
+ elif response.get("content"):
188
+ tool_call_index = 0
189
+ text = ""
190
+ for content in response.get("content"):
191
+ content_block_type = content.type
192
+ # usually, Antrhopic responds with just one text block,
193
+ # but the API allows for multiple text blocks, so concatenate them
194
+ if content_block_type == "text" and hasattr(content, "text"):
195
+ text += content.text
196
+ elif content_block_type == "thinking":
197
+ content = dict(content)
198
+ # override the role to thinking
199
+ set_span_attribute(
200
+ span,
201
+ f"{prefix}.role",
202
+ "thinking",
203
+ )
204
+ set_span_attribute(
205
+ span,
206
+ f"{prefix}.content",
207
+ content.get("thinking"),
208
+ )
209
+ # increment the index for subsequent content blocks
210
+ index += 1
211
+ prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{index}"
212
+ # set the role to the original role on the next completions
213
+ set_span_attribute(
214
+ span,
215
+ f"{prefix}.role",
216
+ response.get("role"),
217
+ )
218
+ elif content_block_type == "tool_use":
219
+ content = dict(content)
220
+ set_span_attribute(
221
+ span,
222
+ f"{prefix}.tool_calls.{tool_call_index}.id",
223
+ content.get("id"),
224
+ )
225
+ set_span_attribute(
226
+ span,
227
+ f"{prefix}.tool_calls.{tool_call_index}.name",
228
+ content.get("name"),
229
+ )
230
+ tool_arguments = content.get("input")
231
+ if tool_arguments is not None:
232
+ set_span_attribute(
233
+ span,
234
+ f"{prefix}.tool_calls.{tool_call_index}.arguments",
235
+ json.dumps(tool_arguments),
236
+ )
237
+ tool_call_index += 1
238
+ set_span_attribute(span, f"{prefix}.content", text)
239
+
240
+
241
+ def _set_span_completions(span, response):
242
+ if not should_send_prompts():
243
+ return
244
+ from .utils import set_span_attribute
245
+
246
+ index = 0
247
+ prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{index}"
248
+ set_span_attribute(span, f"{prefix}.finish_reason", response.get("stop_reason"))
249
+ if response.get("role"):
250
+ set_span_attribute(span, f"{prefix}.role", response.get("role"))
251
+
252
+ if response.get("completion"):
253
+ set_span_attribute(span, f"{prefix}.content", response.get("completion"))
254
+ elif response.get("content"):
255
+ tool_call_index = 0
256
+ text = ""
257
+ for content in response.get("content"):
258
+ content_block_type = content.type
259
+ # usually, Antrhopic responds with just one text block,
260
+ # but the API allows for multiple text blocks, so concatenate them
261
+ if content_block_type == "text" and hasattr(content, "text"):
262
+ text += content.text or ""
263
+ elif content_block_type == "thinking":
264
+ content = dict(content)
265
+ # override the role to thinking
266
+ set_span_attribute(
267
+ span,
268
+ f"{prefix}.role",
269
+ "thinking",
270
+ )
271
+ set_span_attribute(
272
+ span,
273
+ f"{prefix}.content",
274
+ content.get("thinking"),
275
+ )
276
+ # increment the index for subsequent content blocks
277
+ index += 1
278
+ prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{index}"
279
+ # set the role to the original role on the next completions
280
+ set_span_attribute(
281
+ span,
282
+ f"{prefix}.role",
283
+ response.get("role"),
284
+ )
285
+ elif content_block_type == "tool_use":
286
+ content = dict(content)
287
+ set_span_attribute(
288
+ span,
289
+ f"{prefix}.tool_calls.{tool_call_index}.id",
290
+ content.get("id"),
291
+ )
292
+ set_span_attribute(
293
+ span,
294
+ f"{prefix}.tool_calls.{tool_call_index}.name",
295
+ content.get("name"),
296
+ )
297
+ tool_arguments = content.get("input")
298
+ if tool_arguments is not None:
299
+ set_span_attribute(
300
+ span,
301
+ f"{prefix}.tool_calls.{tool_call_index}.arguments",
302
+ json.dumps(tool_arguments),
303
+ )
304
+ tool_call_index += 1
305
+ set_span_attribute(span, f"{prefix}.content", text)
306
+
307
+
308
+ @dont_throw
309
+ async def aset_response_attributes(span, response):
310
+ response = await _aextract_response_data(response)
311
+ set_span_attribute(span, SpanAttributes.LLM_RESPONSE_MODEL, response.get("model"))
312
+ set_span_attribute(span, GEN_AI_RESPONSE_ID, response.get("id"))
313
+
314
+ if usage := response.get("usage"):
315
+ if hasattr(usage, "service_tier"):
316
+ set_span_attribute(
317
+ span,
318
+ "anthropic.response.service_tier",
319
+ usage.service_tier,
320
+ )
321
+ prompt_tokens = usage.input_tokens
322
+ completion_tokens = usage.output_tokens
323
+ set_span_attribute(span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, prompt_tokens)
324
+ set_span_attribute(
325
+ span, SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, completion_tokens
326
+ )
327
+ set_span_attribute(
328
+ span,
329
+ SpanAttributes.LLM_USAGE_TOTAL_TOKENS,
330
+ prompt_tokens + completion_tokens,
331
+ )
332
+
333
+ await _aset_span_completions(span, response)
334
+
335
+
336
+ @dont_throw
337
+ def set_response_attributes(span, response):
338
+ response = _extract_response_data(response)
339
+ set_span_attribute(span, SpanAttributes.LLM_RESPONSE_MODEL, response.get("model"))
340
+ set_span_attribute(span, GEN_AI_RESPONSE_ID, response.get("id"))
341
+
342
+ if usage := response.get("usage"):
343
+ if hasattr(usage, "service_tier"):
344
+ set_span_attribute(
345
+ span,
346
+ "anthropic.response.service_tier",
347
+ usage.service_tier,
348
+ )
349
+ prompt_tokens = usage.input_tokens
350
+ completion_tokens = usage.output_tokens
351
+ set_span_attribute(span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, prompt_tokens)
352
+ set_span_attribute(
353
+ span, SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, completion_tokens
354
+ )
355
+ set_span_attribute(
356
+ span,
357
+ SpanAttributes.LLM_USAGE_TOTAL_TOKENS,
358
+ prompt_tokens + completion_tokens,
359
+ )
360
+
361
+ _set_span_completions(span, response)
362
+
363
+
364
+ @dont_throw
365
+ def set_streaming_response_attributes(span, complete_response_events):
366
+ if not should_send_prompts():
367
+ return
368
+
369
+ if not span.is_recording() or not complete_response_events:
370
+ return
371
+
372
+ index = 0
373
+ for event in complete_response_events:
374
+ prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{index}"
375
+ set_span_attribute(span, f"{prefix}.finish_reason", event.get("finish_reason"))
376
+ role = "thinking" if event.get("type") == "thinking" else "assistant"
377
+ # Thinking is added as a separate completion, so we need to increment the index
378
+ if event.get("type") == "thinking":
379
+ index += 1
380
+ set_span_attribute(span, f"{prefix}.role", role)
381
+ if event.get("type") == "tool_use":
382
+ set_span_attribute(
383
+ span,
384
+ f"{prefix}.tool_calls.0.id",
385
+ event.get("id"),
386
+ )
387
+ set_span_attribute(
388
+ span,
389
+ f"{prefix}.tool_calls.0.name",
390
+ event.get("name"),
391
+ )
392
+ tool_arguments = event.get("input")
393
+ if tool_arguments is not None:
394
+ set_span_attribute(
395
+ span,
396
+ f"{prefix}.tool_calls.0.arguments",
397
+ # already stringified
398
+ tool_arguments,
399
+ )
400
+ else:
401
+ set_span_attribute(span, f"{prefix}.content", event.get("text"))