lmnr 0.6.20__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. lmnr/__init__.py +0 -4
  2. lmnr/opentelemetry_lib/decorators/__init__.py +211 -151
  3. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/__init__.py +678 -0
  4. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/config.py +13 -0
  5. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_emitter.py +211 -0
  6. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_models.py +41 -0
  7. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/span_utils.py +256 -0
  8. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/streaming.py +295 -0
  9. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/utils.py +179 -0
  10. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/version.py +1 -0
  11. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py +4 -0
  12. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/__init__.py +488 -0
  13. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/config.py +8 -0
  14. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_emitter.py +143 -0
  15. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_models.py +41 -0
  16. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/span_utils.py +229 -0
  17. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/utils.py +92 -0
  18. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/version.py +1 -0
  19. lmnr/opentelemetry_lib/opentelemetry/instrumentation/langgraph/__init__.py +16 -16
  20. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/chat_wrappers.py +3 -0
  21. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/completion_wrappers.py +3 -0
  22. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/utils.py +3 -3
  23. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +3 -0
  24. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/responses_wrappers.py +7 -0
  25. lmnr/opentelemetry_lib/opentelemetry/instrumentation/threading/__init__.py +190 -0
  26. lmnr/opentelemetry_lib/tracing/__init__.py +90 -2
  27. lmnr/opentelemetry_lib/tracing/_instrument_initializers.py +12 -7
  28. lmnr/opentelemetry_lib/tracing/context.py +109 -0
  29. lmnr/opentelemetry_lib/tracing/processor.py +6 -7
  30. lmnr/opentelemetry_lib/tracing/tracer.py +29 -0
  31. lmnr/opentelemetry_lib/utils/package_check.py +9 -0
  32. lmnr/sdk/browser/browser_use_otel.py +9 -7
  33. lmnr/sdk/browser/patchright_otel.py +14 -26
  34. lmnr/sdk/browser/playwright_otel.py +72 -73
  35. lmnr/sdk/browser/pw_utils.py +436 -119
  36. lmnr/sdk/client/asynchronous/resources/browser_events.py +1 -0
  37. lmnr/sdk/decorators.py +39 -4
  38. lmnr/sdk/evaluations.py +23 -9
  39. lmnr/sdk/laminar.py +181 -209
  40. lmnr/sdk/types.py +0 -6
  41. lmnr/version.py +1 -1
  42. {lmnr-0.6.20.dist-info → lmnr-0.7.0.dist-info}/METADATA +10 -8
  43. {lmnr-0.6.20.dist-info → lmnr-0.7.0.dist-info}/RECORD +45 -29
  44. {lmnr-0.6.20.dist-info → lmnr-0.7.0.dist-info}/WHEEL +1 -1
  45. lmnr/opentelemetry_lib/tracing/context_properties.py +0 -65
  46. {lmnr-0.6.20.dist-info → lmnr-0.7.0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,211 @@
1
+ from dataclasses import asdict
2
+ from enum import Enum
3
+ from typing import Optional, Union
4
+
5
+ from opentelemetry._events import Event, EventLogger
6
+ from .event_models import (
7
+ ChoiceEvent,
8
+ MessageEvent,
9
+ ToolCall,
10
+ )
11
+ from .utils import (
12
+ should_emit_events,
13
+ should_send_prompts,
14
+ )
15
+ from opentelemetry.semconv._incubating.attributes import (
16
+ gen_ai_attributes as GenAIAttributes,
17
+ )
18
+
19
+
20
+ class Roles(Enum):
21
+ USER = "user"
22
+ ASSISTANT = "assistant"
23
+ SYSTEM = "system"
24
+ TOOL = "tool"
25
+
26
+
27
+ VALID_MESSAGE_ROLES = {role.value for role in Roles}
28
+ """The valid roles for naming the message event."""
29
+
30
+ EVENT_ATTRIBUTES = {
31
+ GenAIAttributes.GEN_AI_SYSTEM: GenAIAttributes.GenAiSystemValues.ANTHROPIC.value
32
+ }
33
+ """The attributes to be used for the event."""
34
+
35
+
36
+ def emit_input_events(event_logger: Optional[EventLogger], kwargs):
37
+ if kwargs.get("prompt") is not None:
38
+ emit_event(
39
+ MessageEvent(content=kwargs.get("prompt"), role="user"), event_logger
40
+ )
41
+
42
+ elif kwargs.get("messages") is not None:
43
+ if kwargs.get("system"):
44
+ emit_event(
45
+ MessageEvent(content=kwargs.get("system"), role="system"), event_logger
46
+ )
47
+ for message in kwargs.get("messages"):
48
+ emit_event(
49
+ MessageEvent(content=message.get("content"), role=message.get("role")),
50
+ event_logger,
51
+ )
52
+ if kwargs.get("tools") is not None:
53
+ emit_event(
54
+ MessageEvent(content={"tools": kwargs.get("tools")}, role="user"),
55
+ event_logger,
56
+ )
57
+
58
+
59
+ def emit_response_events(event_logger: Optional[EventLogger], response):
60
+ if not isinstance(response, dict):
61
+ response = dict(response)
62
+
63
+ if response.get("completion"):
64
+ emit_event(
65
+ ChoiceEvent(
66
+ index=0,
67
+ message={
68
+ "content": response.get("completion"),
69
+ "role": response.get("role", "assistant"),
70
+ },
71
+ finish_reason=response.get("stop_reason"),
72
+ ),
73
+ event_logger,
74
+ )
75
+ elif response.get("content"):
76
+ for i, completion in enumerate(response.get("content")):
77
+ # Parse message
78
+ if completion.type == "text":
79
+ message = {
80
+ "content": completion.text,
81
+ "role": response.get("role", "assistant"),
82
+ }
83
+ elif completion.type == "thinking":
84
+ message = {
85
+ "content": completion.thinking,
86
+ "role": response.get("role", "assistant"),
87
+ }
88
+ elif completion.type == "tool_use":
89
+ message = {
90
+ "content": None,
91
+ "role": response.get("role", "assistant"),
92
+ }
93
+ else:
94
+ message = {
95
+ "content": None,
96
+ "role": response.get("role", "assistant"),
97
+ }
98
+
99
+ # Parse tool calls
100
+ if completion.type == "tool_use":
101
+ tool_calls = [
102
+ ToolCall(
103
+ id=completion.id,
104
+ function={
105
+ "name": completion.name,
106
+ "arguments": completion.input,
107
+ },
108
+ type="function",
109
+ )
110
+ ]
111
+ else:
112
+ tool_calls = None
113
+
114
+ # Emit the event
115
+ emit_event(
116
+ ChoiceEvent(
117
+ index=i,
118
+ message=message,
119
+ finish_reason=response.get("stop_reason"),
120
+ tool_calls=tool_calls,
121
+ ),
122
+ event_logger,
123
+ )
124
+
125
+
126
+ def emit_streaming_response_events(
127
+ event_logger: Optional[EventLogger], complete_response: dict
128
+ ):
129
+ for message in complete_response.get("events", []):
130
+ emit_event(
131
+ ChoiceEvent(
132
+ index=message.get("index", 0),
133
+ message={
134
+ "content": {
135
+ "type": message.get("type"),
136
+ "content": message.get("text"),
137
+ },
138
+ "role": message.get("role", "assistant"),
139
+ },
140
+ finish_reason=message.get("finish_reason", "unknown"),
141
+ ),
142
+ event_logger,
143
+ )
144
+
145
+
146
+ def emit_event(
147
+ event: Union[MessageEvent, ChoiceEvent], event_logger: EventLogger
148
+ ) -> None:
149
+ """
150
+ Emit an event to the OpenTelemetry SDK.
151
+
152
+ Args:
153
+ event: The event to emit.
154
+ """
155
+ if not should_emit_events():
156
+ return
157
+
158
+ if isinstance(event, MessageEvent):
159
+ _emit_message_event(event, event_logger)
160
+ elif isinstance(event, ChoiceEvent):
161
+ _emit_choice_event(event, event_logger)
162
+ else:
163
+ raise TypeError("Unsupported event type")
164
+
165
+
166
+ def _emit_message_event(event: MessageEvent, event_logger: EventLogger) -> None:
167
+ body = asdict(event)
168
+
169
+ if event.role in VALID_MESSAGE_ROLES:
170
+ name = "gen_ai.{}.message".format(event.role)
171
+ # According to the semantic conventions, the role is conditionally required if available
172
+ # and not equal to the "role" in the message name. So, remove the role from the body if
173
+ # it is the same as the in the event name.
174
+ body.pop("role", None)
175
+ else:
176
+ name = "gen_ai.user.message"
177
+
178
+ # According to the semantic conventions, only the assistant role has tool call
179
+ if event.role != Roles.ASSISTANT.value and event.tool_calls is not None:
180
+ del body["tool_calls"]
181
+ elif event.tool_calls is None:
182
+ del body["tool_calls"]
183
+
184
+ if not should_send_prompts():
185
+ del body["content"]
186
+ if body.get("tool_calls") is not None:
187
+ for tool_call in body["tool_calls"]:
188
+ tool_call["function"].pop("arguments", None)
189
+
190
+ event_logger.emit(Event(name=name, body=body, attributes=EVENT_ATTRIBUTES))
191
+
192
+
193
+ def _emit_choice_event(event: ChoiceEvent, event_logger: EventLogger) -> None:
194
+ body = asdict(event)
195
+ if event.message["role"] == Roles.ASSISTANT.value:
196
+ # According to the semantic conventions, the role is conditionally required if available
197
+ # and not equal to "assistant", so remove the role from the body if it is "assistant".
198
+ body["message"].pop("role", None)
199
+
200
+ if event.tool_calls is None:
201
+ del body["tool_calls"]
202
+
203
+ if not should_send_prompts():
204
+ body["message"].pop("content", None)
205
+ if body.get("tool_calls") is not None:
206
+ for tool_call in body["tool_calls"]:
207
+ tool_call["function"].pop("arguments", None)
208
+
209
+ event_logger.emit(
210
+ Event(name="gen_ai.choice", body=body, attributes=EVENT_ATTRIBUTES)
211
+ )
@@ -0,0 +1,41 @@
1
+ from dataclasses import dataclass
2
+ from typing import Any, List, Literal, Optional, TypedDict
3
+
4
+
5
+ class _FunctionToolCall(TypedDict):
6
+ function_name: str
7
+ arguments: Optional[dict[str, Any]]
8
+
9
+
10
+ class ToolCall(TypedDict):
11
+ """Represents a tool call in the AI model."""
12
+
13
+ id: str
14
+ function: _FunctionToolCall
15
+ type: Literal["function"]
16
+
17
+
18
+ class CompletionMessage(TypedDict):
19
+ """Represents a message in the AI model."""
20
+
21
+ content: Any
22
+ role: str = "assistant"
23
+
24
+
25
+ @dataclass
26
+ class MessageEvent:
27
+ """Represents an input event for the AI model."""
28
+
29
+ content: Any
30
+ role: str = "user"
31
+ tool_calls: Optional[List[ToolCall]] = None
32
+
33
+
34
+ @dataclass
35
+ class ChoiceEvent:
36
+ """Represents a completion event for the AI model."""
37
+
38
+ index: int
39
+ message: CompletionMessage
40
+ finish_reason: str = "unknown"
41
+ tool_calls: Optional[List[ToolCall]] = None
@@ -0,0 +1,256 @@
1
+ import json
2
+ import logging
3
+ from typing import Any, Dict
4
+
5
+ from .config import Config
6
+ from .utils import (
7
+ JSONEncoder,
8
+ dont_throw,
9
+ model_as_dict,
10
+ should_send_prompts,
11
+ )
12
+ from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import (
13
+ GEN_AI_RESPONSE_ID,
14
+ )
15
+ from opentelemetry.semconv_ai import SpanAttributes
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ def _is_base64_image(item: Dict[str, Any]) -> bool:
21
+ if not isinstance(item, dict):
22
+ return False
23
+
24
+ if not isinstance(item.get("source"), dict):
25
+ return False
26
+
27
+ if item.get("type") != "image" or item["source"].get("type") != "base64":
28
+ return False
29
+
30
+ return True
31
+
32
+
33
+ async def _process_image_item(item, trace_id, span_id, message_index, content_index):
34
+ if not Config.upload_base64_image:
35
+ return item
36
+
37
+ image_format = item.get("source").get("media_type").split("/")[1]
38
+ image_name = f"message_{message_index}_content_{content_index}.{image_format}"
39
+ base64_string = item.get("source").get("data")
40
+ url = await Config.upload_base64_image(trace_id, span_id, image_name, base64_string)
41
+
42
+ return {"type": "image_url", "image_url": {"url": url}}
43
+
44
+
45
+ async def _dump_content(message_index, content, span):
46
+ if isinstance(content, str):
47
+ return content
48
+ elif isinstance(content, list):
49
+ # If the content is a list of text blocks, concatenate them.
50
+ # This is more commonly used in prompt caching.
51
+ if all([model_as_dict(item).get("type") == "text" for item in content]):
52
+ return "".join([model_as_dict(item).get("text") for item in content])
53
+
54
+ content = [
55
+ (
56
+ await _process_image_item(
57
+ model_as_dict(item),
58
+ span.context.trace_id,
59
+ span.context.span_id,
60
+ message_index,
61
+ j,
62
+ )
63
+ if _is_base64_image(model_as_dict(item))
64
+ else model_as_dict(item)
65
+ )
66
+ for j, item in enumerate(content)
67
+ ]
68
+
69
+ return json.dumps(content, cls=JSONEncoder)
70
+
71
+
72
+ @dont_throw
73
+ async def aset_input_attributes(span, kwargs):
74
+ from .utils import set_span_attribute
75
+
76
+ set_span_attribute(span, SpanAttributes.LLM_REQUEST_MODEL, kwargs.get("model"))
77
+ set_span_attribute(
78
+ span, SpanAttributes.LLM_REQUEST_MAX_TOKENS, kwargs.get("max_tokens_to_sample")
79
+ )
80
+ set_span_attribute(
81
+ span, SpanAttributes.LLM_REQUEST_TEMPERATURE, kwargs.get("temperature")
82
+ )
83
+ set_span_attribute(span, SpanAttributes.LLM_REQUEST_TOP_P, kwargs.get("top_p"))
84
+ set_span_attribute(
85
+ span, SpanAttributes.LLM_FREQUENCY_PENALTY, kwargs.get("frequency_penalty")
86
+ )
87
+ set_span_attribute(
88
+ span, SpanAttributes.LLM_PRESENCE_PENALTY, kwargs.get("presence_penalty")
89
+ )
90
+ set_span_attribute(span, SpanAttributes.LLM_IS_STREAMING, kwargs.get("stream"))
91
+
92
+ if should_send_prompts():
93
+ if kwargs.get("prompt") is not None:
94
+ set_span_attribute(
95
+ span, f"{SpanAttributes.LLM_PROMPTS}.0.user", kwargs.get("prompt")
96
+ )
97
+
98
+ elif kwargs.get("messages") is not None:
99
+ has_system_message = False
100
+ if kwargs.get("system"):
101
+ has_system_message = True
102
+ set_span_attribute(
103
+ span,
104
+ f"{SpanAttributes.LLM_PROMPTS}.0.content",
105
+ await _dump_content(
106
+ message_index=0, span=span, content=kwargs.get("system")
107
+ ),
108
+ )
109
+ set_span_attribute(
110
+ span,
111
+ f"{SpanAttributes.LLM_PROMPTS}.0.role",
112
+ "system",
113
+ )
114
+ for i, message in enumerate(kwargs.get("messages")):
115
+ prompt_index = i + (1 if has_system_message else 0)
116
+ set_span_attribute(
117
+ span,
118
+ f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.content",
119
+ await _dump_content(
120
+ message_index=i, span=span, content=message.get("content")
121
+ ),
122
+ )
123
+ set_span_attribute(
124
+ span,
125
+ f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.role",
126
+ message.get("role"),
127
+ )
128
+
129
+ if kwargs.get("tools") is not None:
130
+ for i, tool in enumerate(kwargs.get("tools")):
131
+ prefix = f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.{i}"
132
+ set_span_attribute(span, f"{prefix}.name", tool.get("name"))
133
+ set_span_attribute(
134
+ span, f"{prefix}.description", tool.get("description")
135
+ )
136
+ input_schema = tool.get("input_schema")
137
+ if input_schema is not None:
138
+ set_span_attribute(
139
+ span, f"{prefix}.input_schema", json.dumps(input_schema)
140
+ )
141
+
142
+
143
+ def _set_span_completions(span, response):
144
+ if not should_send_prompts():
145
+ return
146
+ from .utils import set_span_attribute
147
+
148
+ index = 0
149
+ prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{index}"
150
+ set_span_attribute(span, f"{prefix}.finish_reason", response.get("stop_reason"))
151
+ if response.get("role"):
152
+ set_span_attribute(span, f"{prefix}.role", response.get("role"))
153
+
154
+ if response.get("completion"):
155
+ set_span_attribute(span, f"{prefix}.content", response.get("completion"))
156
+ elif response.get("content"):
157
+ tool_call_index = 0
158
+ text = ""
159
+ for content in response.get("content"):
160
+ content_block_type = content.type
161
+ # usually, Antrhopic responds with just one text block,
162
+ # but the API allows for multiple text blocks, so concatenate them
163
+ if content_block_type == "text":
164
+ text += content.text
165
+ elif content_block_type == "thinking":
166
+ content = dict(content)
167
+ # override the role to thinking
168
+ set_span_attribute(
169
+ span,
170
+ f"{prefix}.role",
171
+ "thinking",
172
+ )
173
+ set_span_attribute(
174
+ span,
175
+ f"{prefix}.content",
176
+ content.get("thinking"),
177
+ )
178
+ # increment the index for subsequent content blocks
179
+ index += 1
180
+ prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{index}"
181
+ # set the role to the original role on the next completions
182
+ set_span_attribute(
183
+ span,
184
+ f"{prefix}.role",
185
+ response.get("role"),
186
+ )
187
+ elif content_block_type == "tool_use":
188
+ content = dict(content)
189
+ set_span_attribute(
190
+ span,
191
+ f"{prefix}.tool_calls.{tool_call_index}.id",
192
+ content.get("id"),
193
+ )
194
+ set_span_attribute(
195
+ span,
196
+ f"{prefix}.tool_calls.{tool_call_index}.name",
197
+ content.get("name"),
198
+ )
199
+ tool_arguments = content.get("input")
200
+ if tool_arguments is not None:
201
+ set_span_attribute(
202
+ span,
203
+ f"{prefix}.tool_calls.{tool_call_index}.arguments",
204
+ json.dumps(tool_arguments),
205
+ )
206
+ tool_call_index += 1
207
+ set_span_attribute(span, f"{prefix}.content", text)
208
+
209
+
210
+ @dont_throw
211
+ def set_response_attributes(span, response):
212
+ from .utils import set_span_attribute
213
+
214
+ if not isinstance(response, dict):
215
+ response = response.__dict__
216
+ set_span_attribute(span, SpanAttributes.LLM_RESPONSE_MODEL, response.get("model"))
217
+ set_span_attribute(span, GEN_AI_RESPONSE_ID, response.get("id"))
218
+
219
+ if response.get("usage"):
220
+ prompt_tokens = response.get("usage").input_tokens
221
+ completion_tokens = response.get("usage").output_tokens
222
+ set_span_attribute(span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, prompt_tokens)
223
+ set_span_attribute(
224
+ span, SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, completion_tokens
225
+ )
226
+ set_span_attribute(
227
+ span,
228
+ SpanAttributes.LLM_USAGE_TOTAL_TOKENS,
229
+ prompt_tokens + completion_tokens,
230
+ )
231
+
232
+ _set_span_completions(span, response)
233
+
234
+
235
+ @dont_throw
236
+ def set_streaming_response_attributes(span, complete_response_events):
237
+ if not should_send_prompts():
238
+ return
239
+
240
+ from .utils import set_span_attribute
241
+
242
+ if not span.is_recording() or not complete_response_events:
243
+ return
244
+
245
+ try:
246
+ for event in complete_response_events:
247
+ index = event.get("index")
248
+ prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{index}"
249
+ set_span_attribute(
250
+ span, f"{prefix}.finish_reason", event.get("finish_reason")
251
+ )
252
+ role = "thinking" if event.get("type") == "thinking" else "assistant"
253
+ set_span_attribute(span, f"{prefix}.role", role)
254
+ set_span_attribute(span, f"{prefix}.content", event.get("text"))
255
+ except Exception as e:
256
+ logger.warning("Failed to set completion attributes, error: %s", str(e))