lmnr 0.6.16__py3-none-any.whl → 0.7.26__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lmnr/__init__.py +6 -15
- lmnr/cli/__init__.py +270 -0
- lmnr/cli/datasets.py +371 -0
- lmnr/{cli.py → cli/evals.py} +20 -102
- lmnr/cli/rules.py +42 -0
- lmnr/opentelemetry_lib/__init__.py +9 -2
- lmnr/opentelemetry_lib/decorators/__init__.py +274 -168
- lmnr/opentelemetry_lib/litellm/__init__.py +352 -38
- lmnr/opentelemetry_lib/litellm/utils.py +82 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/__init__.py +849 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/config.py +13 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_emitter.py +211 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_models.py +41 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/span_utils.py +401 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/streaming.py +425 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/utils.py +332 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/version.py +1 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/claude_agent/__init__.py +451 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/claude_agent/proxy.py +144 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_agent/__init__.py +100 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_computer/__init__.py +476 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_computer/utils.py +12 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py +191 -129
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/schema_utils.py +26 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/utils.py +126 -41
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/__init__.py +488 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/config.py +8 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_emitter.py +143 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_models.py +41 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/span_utils.py +229 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/utils.py +92 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/version.py +1 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/kernel/__init__.py +381 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/kernel/utils.py +36 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/langgraph/__init__.py +16 -16
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/__init__.py +61 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/__init__.py +472 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/chat_wrappers.py +1185 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/completion_wrappers.py +305 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/config.py +16 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +312 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/event_emitter.py +100 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/event_models.py +41 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py +68 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/utils.py +197 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v0/__init__.py +176 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/__init__.py +368 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +325 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py +135 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/responses_wrappers.py +786 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/version.py +1 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openhands_ai/__init__.py +388 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/opentelemetry/__init__.py +69 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/skyvern/__init__.py +59 -61
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/threading/__init__.py +197 -0
- lmnr/opentelemetry_lib/tracing/__init__.py +119 -18
- lmnr/opentelemetry_lib/tracing/_instrument_initializers.py +124 -25
- lmnr/opentelemetry_lib/tracing/attributes.py +4 -0
- lmnr/opentelemetry_lib/tracing/context.py +200 -0
- lmnr/opentelemetry_lib/tracing/exporter.py +109 -15
- lmnr/opentelemetry_lib/tracing/instruments.py +22 -5
- lmnr/opentelemetry_lib/tracing/processor.py +128 -30
- lmnr/opentelemetry_lib/tracing/span.py +398 -0
- lmnr/opentelemetry_lib/tracing/tracer.py +40 -1
- lmnr/opentelemetry_lib/tracing/utils.py +62 -0
- lmnr/opentelemetry_lib/utils/package_check.py +9 -0
- lmnr/opentelemetry_lib/utils/wrappers.py +11 -0
- lmnr/sdk/browser/background_send_events.py +158 -0
- lmnr/sdk/browser/browser_use_cdp_otel.py +100 -0
- lmnr/sdk/browser/browser_use_otel.py +12 -12
- lmnr/sdk/browser/bubus_otel.py +71 -0
- lmnr/sdk/browser/cdp_utils.py +518 -0
- lmnr/sdk/browser/inject_script.js +514 -0
- lmnr/sdk/browser/patchright_otel.py +18 -44
- lmnr/sdk/browser/playwright_otel.py +104 -187
- lmnr/sdk/browser/pw_utils.py +249 -210
- lmnr/sdk/browser/recorder/record.umd.min.cjs +84 -0
- lmnr/sdk/browser/utils.py +1 -1
- lmnr/sdk/client/asynchronous/async_client.py +47 -15
- lmnr/sdk/client/asynchronous/resources/__init__.py +2 -7
- lmnr/sdk/client/asynchronous/resources/browser_events.py +1 -0
- lmnr/sdk/client/asynchronous/resources/datasets.py +131 -0
- lmnr/sdk/client/asynchronous/resources/evals.py +122 -18
- lmnr/sdk/client/asynchronous/resources/evaluators.py +85 -0
- lmnr/sdk/client/asynchronous/resources/tags.py +4 -10
- lmnr/sdk/client/synchronous/resources/__init__.py +2 -2
- lmnr/sdk/client/synchronous/resources/datasets.py +131 -0
- lmnr/sdk/client/synchronous/resources/evals.py +83 -17
- lmnr/sdk/client/synchronous/resources/evaluators.py +85 -0
- lmnr/sdk/client/synchronous/resources/tags.py +4 -10
- lmnr/sdk/client/synchronous/sync_client.py +47 -15
- lmnr/sdk/datasets/__init__.py +94 -0
- lmnr/sdk/datasets/file_utils.py +91 -0
- lmnr/sdk/decorators.py +103 -23
- lmnr/sdk/evaluations.py +122 -33
- lmnr/sdk/laminar.py +816 -333
- lmnr/sdk/log.py +7 -2
- lmnr/sdk/types.py +124 -143
- lmnr/sdk/utils.py +115 -2
- lmnr/version.py +1 -1
- {lmnr-0.6.16.dist-info → lmnr-0.7.26.dist-info}/METADATA +71 -78
- lmnr-0.7.26.dist-info/RECORD +116 -0
- lmnr-0.7.26.dist-info/WHEEL +4 -0
- lmnr-0.7.26.dist-info/entry_points.txt +3 -0
- lmnr/opentelemetry_lib/tracing/context_properties.py +0 -65
- lmnr/sdk/browser/rrweb/rrweb.umd.min.cjs +0 -98
- lmnr/sdk/client/asynchronous/resources/agent.py +0 -329
- lmnr/sdk/client/synchronous/resources/agent.py +0 -323
- lmnr/sdk/datasets.py +0 -60
- lmnr-0.6.16.dist-info/LICENSE +0 -75
- lmnr-0.6.16.dist-info/RECORD +0 -61
- lmnr-0.6.16.dist-info/WHEEL +0 -4
- lmnr-0.6.16.dist-info/entry_points.txt +0 -3
|
@@ -1,6 +1,8 @@
|
|
|
1
1
|
import base64
|
|
2
|
+
from collections import defaultdict
|
|
2
3
|
import logging
|
|
3
4
|
import traceback
|
|
5
|
+
from typing_extensions import TypedDict
|
|
4
6
|
|
|
5
7
|
from .config import (
|
|
6
8
|
Config,
|
|
@@ -33,10 +35,64 @@ class ProcessedContentPart(pydantic.BaseModel):
|
|
|
33
35
|
image_url: ImageUrl | None = pydantic.Field(default=None)
|
|
34
36
|
|
|
35
37
|
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
38
|
+
class ProcessChunkResult(TypedDict):
|
|
39
|
+
role: str
|
|
40
|
+
model_version: str | None
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def merge_text_parts(
|
|
44
|
+
parts: list[types.PartDict | types.File | types.Part | str],
|
|
45
|
+
) -> list[types.Part]:
|
|
46
|
+
if not parts:
|
|
47
|
+
return []
|
|
48
|
+
|
|
49
|
+
merged_parts: list[types.Part] = []
|
|
50
|
+
accumulated_text = ""
|
|
51
|
+
|
|
52
|
+
for part in parts:
|
|
53
|
+
# Handle string input - treat as text
|
|
54
|
+
if isinstance(part, str):
|
|
55
|
+
accumulated_text += part
|
|
56
|
+
# Handle File objects - they are not text, so don't merge
|
|
57
|
+
elif isinstance(part, types.File):
|
|
58
|
+
# Flush any accumulated text first
|
|
59
|
+
if accumulated_text:
|
|
60
|
+
merged_parts.append(types.Part(text=accumulated_text))
|
|
61
|
+
accumulated_text = ""
|
|
62
|
+
# Add the File as-is (wrapped in a Part if needed)
|
|
63
|
+
# Note: File objects should be passed through as-is in the original part
|
|
64
|
+
merged_parts.append(part)
|
|
65
|
+
# Handle Part and PartDict (dicts)
|
|
66
|
+
else:
|
|
67
|
+
part_dict = to_dict(part)
|
|
68
|
+
|
|
69
|
+
# Check if this is a text part
|
|
70
|
+
if part_dict.get("text") is not None:
|
|
71
|
+
accumulated_text += part_dict.get("text")
|
|
72
|
+
else:
|
|
73
|
+
# Non-text part (inline_data, function_call, etc.)
|
|
74
|
+
# Flush any accumulated text first
|
|
75
|
+
if accumulated_text:
|
|
76
|
+
merged_parts.append(types.Part(text=accumulated_text))
|
|
77
|
+
accumulated_text = ""
|
|
78
|
+
|
|
79
|
+
# Add the non-text part as-is
|
|
80
|
+
if isinstance(part, types.Part):
|
|
81
|
+
merged_parts.append(part)
|
|
82
|
+
elif isinstance(part, dict):
|
|
83
|
+
# Convert dict to Part object
|
|
84
|
+
merged_parts.append(types.Part(**part_dict))
|
|
85
|
+
|
|
86
|
+
# Don't forget to add any remaining accumulated text
|
|
87
|
+
if accumulated_text:
|
|
88
|
+
merged_parts.append(types.Part(text=accumulated_text))
|
|
89
|
+
|
|
90
|
+
return merged_parts
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def set_span_attribute(span: Span, name: str, value: Any):
|
|
94
|
+
if value is not None and value != "":
|
|
95
|
+
span.set_attribute(name, value)
|
|
40
96
|
return
|
|
41
97
|
|
|
42
98
|
|
|
@@ -84,7 +140,7 @@ def get_content(
|
|
|
84
140
|
content: (
|
|
85
141
|
ProcessedContentPart | dict | list[ProcessedContentPart | dict] | str | None
|
|
86
142
|
),
|
|
87
|
-
) -> list[
|
|
143
|
+
) -> dict | list[dict] | None:
|
|
88
144
|
if isinstance(content, dict):
|
|
89
145
|
return content.get("content") or content.get("image_url")
|
|
90
146
|
if isinstance(content, ProcessedContentPart):
|
|
@@ -98,7 +154,8 @@ def get_content(
|
|
|
98
154
|
else:
|
|
99
155
|
return None
|
|
100
156
|
elif isinstance(content, list):
|
|
101
|
-
|
|
157
|
+
contents_list = [get_content(item) for item in content]
|
|
158
|
+
return [item for item in contents_list if item is not None]
|
|
102
159
|
elif isinstance(content, str):
|
|
103
160
|
return {
|
|
104
161
|
"type": "text",
|
|
@@ -110,9 +167,6 @@ def get_content(
|
|
|
110
167
|
|
|
111
168
|
def process_content_union(
|
|
112
169
|
content: types.ContentUnion | types.ContentUnionDict,
|
|
113
|
-
trace_id: str | None = None,
|
|
114
|
-
span_id: str | None = None,
|
|
115
|
-
message_index: int = 0,
|
|
116
170
|
) -> ProcessedContentPart | dict | list[ProcessedContentPart | dict] | None:
|
|
117
171
|
if isinstance(content, types.Content):
|
|
118
172
|
parts = to_dict(content).get("parts", [])
|
|
@@ -123,25 +177,16 @@ def process_content_union(
|
|
|
123
177
|
return _process_part_union(content)
|
|
124
178
|
elif isinstance(content, dict):
|
|
125
179
|
if "parts" in content:
|
|
126
|
-
return [
|
|
127
|
-
_process_part_union(
|
|
128
|
-
item, trace_id, span_id, message_index, content_index
|
|
129
|
-
)
|
|
130
|
-
for content_index, item in enumerate(content.get("parts", []))
|
|
131
|
-
]
|
|
180
|
+
return [_process_part_union(item) for item in content.get("parts", [])]
|
|
132
181
|
else:
|
|
133
182
|
# Assume it's PartDict
|
|
134
|
-
return _process_part_union(content
|
|
183
|
+
return _process_part_union(content)
|
|
135
184
|
else:
|
|
136
185
|
return None
|
|
137
186
|
|
|
138
187
|
|
|
139
188
|
def _process_part_union(
|
|
140
189
|
content: types.PartDict | types.File | types.Part | str,
|
|
141
|
-
trace_id: str | None = None,
|
|
142
|
-
span_id: str | None = None,
|
|
143
|
-
message_index: int = 0,
|
|
144
|
-
content_index: int = 0,
|
|
145
190
|
) -> ProcessedContentPart | dict | None:
|
|
146
191
|
if isinstance(content, str):
|
|
147
192
|
return ProcessedContentPart(content=content)
|
|
@@ -154,36 +199,31 @@ def _process_part_union(
|
|
|
154
199
|
)
|
|
155
200
|
return ProcessedContentPart(content=f"files/{name}")
|
|
156
201
|
elif isinstance(content, (types.Part, dict)):
|
|
157
|
-
return _process_part(content
|
|
202
|
+
return _process_part(content)
|
|
158
203
|
else:
|
|
159
204
|
return None
|
|
160
205
|
|
|
161
206
|
|
|
162
207
|
def _process_part(
|
|
163
208
|
content: types.Part,
|
|
164
|
-
trace_id: str | None = None,
|
|
165
|
-
span_id: str | None = None,
|
|
166
|
-
message_index: int = 0,
|
|
167
|
-
content_index: int = 0,
|
|
168
209
|
) -> ProcessedContentPart | dict | None:
|
|
169
210
|
part_dict = to_dict(content)
|
|
170
211
|
if part_dict.get("inline_data"):
|
|
171
212
|
blob = to_dict(part_dict.get("inline_data"))
|
|
172
|
-
if blob.get("mime_type").startswith("image/"):
|
|
173
|
-
return _process_image_item(
|
|
174
|
-
blob, trace_id, span_id, message_index, content_index
|
|
175
|
-
)
|
|
213
|
+
if blob.get("mime_type", "").startswith("image/"):
|
|
214
|
+
return _process_image_item(blob)
|
|
176
215
|
else:
|
|
177
216
|
# currently, only images are supported
|
|
178
217
|
return ProcessedContentPart(
|
|
179
218
|
content=blob.get("mime_type") or "unknown_media"
|
|
180
219
|
)
|
|
181
|
-
elif part_dict.get("function_call"):
|
|
220
|
+
elif function_call := part_dict.get("function_call"):
|
|
221
|
+
function_call_dict = to_dict(function_call)
|
|
182
222
|
return ProcessedContentPart(
|
|
183
223
|
function_call=ToolCall(
|
|
184
|
-
name=
|
|
185
|
-
id=
|
|
186
|
-
arguments=
|
|
224
|
+
name=function_call_dict.get("name"),
|
|
225
|
+
id=function_call_dict.get("id"),
|
|
226
|
+
arguments=function_call_dict.get("args", {}),
|
|
187
227
|
)
|
|
188
228
|
)
|
|
189
229
|
elif part_dict.get("text") is not None:
|
|
@@ -220,26 +260,71 @@ def with_tracer_wrapper(func):
|
|
|
220
260
|
return _with_tracer
|
|
221
261
|
|
|
222
262
|
|
|
223
|
-
def _process_image_item(
|
|
224
|
-
blob: dict[str, Any],
|
|
225
|
-
trace_id: str,
|
|
226
|
-
span_id: str,
|
|
227
|
-
message_index: int,
|
|
228
|
-
content_index: int,
|
|
229
|
-
) -> ProcessedContentPart | dict | None:
|
|
263
|
+
def _process_image_item(blob: dict[str, Any]) -> ProcessedContentPart | dict | None:
|
|
230
264
|
# Convert to openai format, so backends can handle it
|
|
231
265
|
data = blob.get("data")
|
|
232
266
|
encoded_data = (
|
|
233
267
|
base64.b64encode(data).decode("utf-8") if isinstance(data, bytes) else data
|
|
234
268
|
)
|
|
269
|
+
mime_type = blob.get("mime_type", "image/unknown")
|
|
270
|
+
image_type = mime_type.split("/")[1] if "/" in mime_type else "unknown"
|
|
271
|
+
|
|
235
272
|
return (
|
|
236
273
|
ProcessedContentPart(
|
|
237
274
|
image_url=ImageUrl(
|
|
238
275
|
image_url=ImageUrlInner(
|
|
239
|
-
url=f"data:image/{
|
|
276
|
+
url=f"data:image/{image_type};base64,{encoded_data}",
|
|
240
277
|
)
|
|
241
278
|
)
|
|
242
279
|
)
|
|
243
280
|
if Config.convert_image_to_openai_format
|
|
244
281
|
else blob
|
|
245
282
|
)
|
|
283
|
+
|
|
284
|
+
|
|
285
|
+
@dont_throw
|
|
286
|
+
def process_stream_chunk(
|
|
287
|
+
chunk: types.GenerateContentResponse,
|
|
288
|
+
existing_role: str,
|
|
289
|
+
existing_model_version: str | None,
|
|
290
|
+
# ============================== #
|
|
291
|
+
# mutable states, passed by reference
|
|
292
|
+
aggregated_usage_metadata: defaultdict[str, int],
|
|
293
|
+
final_parts: list[types.Part | None],
|
|
294
|
+
# ============================== #
|
|
295
|
+
) -> ProcessChunkResult:
|
|
296
|
+
role = existing_role
|
|
297
|
+
model_version = existing_model_version
|
|
298
|
+
|
|
299
|
+
if chunk.model_version:
|
|
300
|
+
model_version = chunk.model_version
|
|
301
|
+
|
|
302
|
+
# Currently gemini throws an error if you pass more than one candidate
|
|
303
|
+
# with streaming
|
|
304
|
+
if chunk.candidates and len(chunk.candidates) > 0 and chunk.candidates[0].content:
|
|
305
|
+
final_parts += chunk.candidates[0].content.parts or []
|
|
306
|
+
role = chunk.candidates[0].content.role or role
|
|
307
|
+
if chunk.usage_metadata:
|
|
308
|
+
usage_dict = to_dict(chunk.usage_metadata)
|
|
309
|
+
# prompt token count is sent in every chunk
|
|
310
|
+
# (and is less by 1 in the last chunk, so we set it once);
|
|
311
|
+
# total token count in every chunk is greater by prompt token count than it should be,
|
|
312
|
+
# thus this awkward logic here
|
|
313
|
+
if aggregated_usage_metadata.get("prompt_token_count") is None:
|
|
314
|
+
# or 0, not .get(key, 0), because sometimes the value is explicitly None
|
|
315
|
+
aggregated_usage_metadata["prompt_token_count"] = (
|
|
316
|
+
usage_dict.get("prompt_token_count") or 0
|
|
317
|
+
)
|
|
318
|
+
aggregated_usage_metadata["total_token_count"] = (
|
|
319
|
+
usage_dict.get("total_token_count") or 0
|
|
320
|
+
)
|
|
321
|
+
aggregated_usage_metadata["candidates_token_count"] += (
|
|
322
|
+
usage_dict.get("candidates_token_count") or 0
|
|
323
|
+
)
|
|
324
|
+
aggregated_usage_metadata["total_token_count"] += (
|
|
325
|
+
usage_dict.get("candidates_token_count") or 0
|
|
326
|
+
)
|
|
327
|
+
return ProcessChunkResult(
|
|
328
|
+
role=role,
|
|
329
|
+
model_version=model_version,
|
|
330
|
+
)
|