lmnr 0.6.16__py3-none-any.whl → 0.7.26__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lmnr/__init__.py +6 -15
- lmnr/cli/__init__.py +270 -0
- lmnr/cli/datasets.py +371 -0
- lmnr/{cli.py → cli/evals.py} +20 -102
- lmnr/cli/rules.py +42 -0
- lmnr/opentelemetry_lib/__init__.py +9 -2
- lmnr/opentelemetry_lib/decorators/__init__.py +274 -168
- lmnr/opentelemetry_lib/litellm/__init__.py +352 -38
- lmnr/opentelemetry_lib/litellm/utils.py +82 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/__init__.py +849 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/config.py +13 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_emitter.py +211 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_models.py +41 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/span_utils.py +401 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/streaming.py +425 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/utils.py +332 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/version.py +1 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/claude_agent/__init__.py +451 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/claude_agent/proxy.py +144 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_agent/__init__.py +100 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_computer/__init__.py +476 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_computer/utils.py +12 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py +191 -129
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/schema_utils.py +26 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/utils.py +126 -41
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/__init__.py +488 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/config.py +8 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_emitter.py +143 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_models.py +41 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/span_utils.py +229 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/utils.py +92 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/version.py +1 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/kernel/__init__.py +381 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/kernel/utils.py +36 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/langgraph/__init__.py +16 -16
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/__init__.py +61 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/__init__.py +472 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/chat_wrappers.py +1185 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/completion_wrappers.py +305 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/config.py +16 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +312 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/event_emitter.py +100 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/event_models.py +41 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py +68 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/utils.py +197 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v0/__init__.py +176 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/__init__.py +368 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +325 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py +135 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/responses_wrappers.py +786 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/version.py +1 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openhands_ai/__init__.py +388 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/opentelemetry/__init__.py +69 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/skyvern/__init__.py +59 -61
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/threading/__init__.py +197 -0
- lmnr/opentelemetry_lib/tracing/__init__.py +119 -18
- lmnr/opentelemetry_lib/tracing/_instrument_initializers.py +124 -25
- lmnr/opentelemetry_lib/tracing/attributes.py +4 -0
- lmnr/opentelemetry_lib/tracing/context.py +200 -0
- lmnr/opentelemetry_lib/tracing/exporter.py +109 -15
- lmnr/opentelemetry_lib/tracing/instruments.py +22 -5
- lmnr/opentelemetry_lib/tracing/processor.py +128 -30
- lmnr/opentelemetry_lib/tracing/span.py +398 -0
- lmnr/opentelemetry_lib/tracing/tracer.py +40 -1
- lmnr/opentelemetry_lib/tracing/utils.py +62 -0
- lmnr/opentelemetry_lib/utils/package_check.py +9 -0
- lmnr/opentelemetry_lib/utils/wrappers.py +11 -0
- lmnr/sdk/browser/background_send_events.py +158 -0
- lmnr/sdk/browser/browser_use_cdp_otel.py +100 -0
- lmnr/sdk/browser/browser_use_otel.py +12 -12
- lmnr/sdk/browser/bubus_otel.py +71 -0
- lmnr/sdk/browser/cdp_utils.py +518 -0
- lmnr/sdk/browser/inject_script.js +514 -0
- lmnr/sdk/browser/patchright_otel.py +18 -44
- lmnr/sdk/browser/playwright_otel.py +104 -187
- lmnr/sdk/browser/pw_utils.py +249 -210
- lmnr/sdk/browser/recorder/record.umd.min.cjs +84 -0
- lmnr/sdk/browser/utils.py +1 -1
- lmnr/sdk/client/asynchronous/async_client.py +47 -15
- lmnr/sdk/client/asynchronous/resources/__init__.py +2 -7
- lmnr/sdk/client/asynchronous/resources/browser_events.py +1 -0
- lmnr/sdk/client/asynchronous/resources/datasets.py +131 -0
- lmnr/sdk/client/asynchronous/resources/evals.py +122 -18
- lmnr/sdk/client/asynchronous/resources/evaluators.py +85 -0
- lmnr/sdk/client/asynchronous/resources/tags.py +4 -10
- lmnr/sdk/client/synchronous/resources/__init__.py +2 -2
- lmnr/sdk/client/synchronous/resources/datasets.py +131 -0
- lmnr/sdk/client/synchronous/resources/evals.py +83 -17
- lmnr/sdk/client/synchronous/resources/evaluators.py +85 -0
- lmnr/sdk/client/synchronous/resources/tags.py +4 -10
- lmnr/sdk/client/synchronous/sync_client.py +47 -15
- lmnr/sdk/datasets/__init__.py +94 -0
- lmnr/sdk/datasets/file_utils.py +91 -0
- lmnr/sdk/decorators.py +103 -23
- lmnr/sdk/evaluations.py +122 -33
- lmnr/sdk/laminar.py +816 -333
- lmnr/sdk/log.py +7 -2
- lmnr/sdk/types.py +124 -143
- lmnr/sdk/utils.py +115 -2
- lmnr/version.py +1 -1
- {lmnr-0.6.16.dist-info → lmnr-0.7.26.dist-info}/METADATA +71 -78
- lmnr-0.7.26.dist-info/RECORD +116 -0
- lmnr-0.7.26.dist-info/WHEEL +4 -0
- lmnr-0.7.26.dist-info/entry_points.txt +3 -0
- lmnr/opentelemetry_lib/tracing/context_properties.py +0 -65
- lmnr/sdk/browser/rrweb/rrweb.umd.min.cjs +0 -98
- lmnr/sdk/client/asynchronous/resources/agent.py +0 -329
- lmnr/sdk/client/synchronous/resources/agent.py +0 -323
- lmnr/sdk/datasets.py +0 -60
- lmnr-0.6.16.dist-info/LICENSE +0 -75
- lmnr-0.6.16.dist-info/RECORD +0 -61
- lmnr-0.6.16.dist-info/WHEEL +0 -4
- lmnr-0.6.16.dist-info/entry_points.txt +0 -3
lmnr/sdk/log.py
CHANGED
|
@@ -62,11 +62,16 @@ class VerboseFormatter(CustomFormatter):
|
|
|
62
62
|
return formatter.format(record)
|
|
63
63
|
|
|
64
64
|
|
|
65
|
-
def get_default_logger(
|
|
65
|
+
def get_default_logger(
|
|
66
|
+
name: str, level: int = logging.INFO, propagate: bool = False, verbose: bool = True
|
|
67
|
+
) -> logging.Logger:
|
|
66
68
|
logger = logging.getLogger(name)
|
|
67
69
|
logger.setLevel(level)
|
|
68
70
|
console_log_handler = logging.StreamHandler()
|
|
69
|
-
|
|
71
|
+
if verbose:
|
|
72
|
+
console_log_handler.setFormatter(VerboseColorfulFormatter())
|
|
73
|
+
else:
|
|
74
|
+
console_log_handler.setFormatter(ColorfulFormatter())
|
|
70
75
|
logger.addHandler(console_log_handler)
|
|
71
76
|
logger.propagate = propagate
|
|
72
77
|
return logger
|
lmnr/sdk/types.py
CHANGED
|
@@ -3,15 +3,17 @@ from __future__ import annotations # For "Self" | str | ... type hint
|
|
|
3
3
|
import json
|
|
4
4
|
import logging
|
|
5
5
|
import datetime
|
|
6
|
-
import
|
|
7
|
-
import pydantic.alias_generators
|
|
6
|
+
from pydantic import BaseModel, Field
|
|
8
7
|
import uuid
|
|
9
8
|
|
|
10
9
|
from enum import Enum
|
|
11
10
|
from opentelemetry.trace import SpanContext, TraceFlags
|
|
12
|
-
from typing import Any, Awaitable, Callable,
|
|
11
|
+
from typing import Any, Awaitable, Callable, Optional
|
|
12
|
+
from typing_extensions import TypedDict # compatibility with python < 3.12
|
|
13
13
|
|
|
14
|
-
from .utils import serialize
|
|
14
|
+
from .utils import serialize, json_dumps
|
|
15
|
+
|
|
16
|
+
DEFAULT_DATAPOINT_MAX_DATA_LENGTH = 16_000_000 # 16MB
|
|
15
17
|
|
|
16
18
|
|
|
17
19
|
Numeric = int | float
|
|
@@ -23,12 +25,24 @@ EvaluationDatapointMetadata = Any | None # must be JSON-serializable
|
|
|
23
25
|
|
|
24
26
|
|
|
25
27
|
# EvaluationDatapoint is a single data point in the evaluation
|
|
26
|
-
class Datapoint(
|
|
28
|
+
class Datapoint(BaseModel):
|
|
27
29
|
# input to the executor function.
|
|
28
30
|
data: EvaluationDatapointData
|
|
29
31
|
# input to the evaluator function (alongside the executor output).
|
|
30
|
-
target: EvaluationDatapointTarget =
|
|
31
|
-
metadata: EvaluationDatapointMetadata =
|
|
32
|
+
target: EvaluationDatapointTarget = Field(default_factory=dict)
|
|
33
|
+
metadata: EvaluationDatapointMetadata = Field(default_factory=dict)
|
|
34
|
+
id: uuid.UUID | None = Field(default=None)
|
|
35
|
+
created_at: datetime.datetime | None = Field(default=None, alias="createdAt")
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class Dataset(BaseModel):
|
|
39
|
+
id: uuid.UUID = Field()
|
|
40
|
+
name: str = Field()
|
|
41
|
+
created_at: datetime.datetime = Field(alias="createdAt")
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class PushDatapointsResponse(BaseModel):
|
|
45
|
+
dataset_id: uuid.UUID = Field(alias="datasetId")
|
|
32
46
|
|
|
33
47
|
|
|
34
48
|
ExecutorFunctionReturnType = Any
|
|
@@ -49,11 +63,16 @@ EvaluatorFunction = Callable[
|
|
|
49
63
|
]
|
|
50
64
|
|
|
51
65
|
|
|
52
|
-
class
|
|
53
|
-
|
|
66
|
+
class HumanEvaluatorOptionsEntry(TypedDict):
|
|
67
|
+
label: str
|
|
68
|
+
value: float
|
|
69
|
+
|
|
54
70
|
|
|
71
|
+
class HumanEvaluator(BaseModel):
|
|
72
|
+
options: list[HumanEvaluatorOptionsEntry] = Field(default_factory=list)
|
|
55
73
|
|
|
56
|
-
|
|
74
|
+
|
|
75
|
+
class InitEvaluationResponse(BaseModel):
|
|
57
76
|
id: uuid.UUID
|
|
58
77
|
createdAt: datetime.datetime
|
|
59
78
|
groupId: str
|
|
@@ -61,34 +80,65 @@ class InitEvaluationResponse(pydantic.BaseModel):
|
|
|
61
80
|
projectId: uuid.UUID
|
|
62
81
|
|
|
63
82
|
|
|
64
|
-
class
|
|
83
|
+
class EvaluationDatapointDatasetLink(BaseModel):
|
|
84
|
+
dataset_id: uuid.UUID
|
|
85
|
+
datapoint_id: uuid.UUID
|
|
86
|
+
created_at: datetime.datetime
|
|
87
|
+
|
|
88
|
+
def to_dict(self):
|
|
89
|
+
return {
|
|
90
|
+
"datasetId": str(self.dataset_id),
|
|
91
|
+
"datapointId": str(self.datapoint_id),
|
|
92
|
+
"createdAt": self.created_at.isoformat(),
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
class PartialEvaluationDatapoint(BaseModel):
|
|
65
97
|
id: uuid.UUID
|
|
66
98
|
data: EvaluationDatapointData
|
|
67
99
|
target: EvaluationDatapointTarget
|
|
68
100
|
index: int
|
|
69
101
|
trace_id: uuid.UUID
|
|
70
102
|
executor_span_id: uuid.UUID
|
|
71
|
-
metadata: EvaluationDatapointMetadata =
|
|
103
|
+
metadata: EvaluationDatapointMetadata = Field(default=None)
|
|
104
|
+
dataset_link: EvaluationDatapointDatasetLink | None = Field(default=None)
|
|
72
105
|
|
|
73
106
|
# uuid is not serializable by default, so we need to convert it to a string
|
|
74
|
-
def to_dict(self):
|
|
107
|
+
def to_dict(self, max_data_length: int = DEFAULT_DATAPOINT_MAX_DATA_LENGTH):
|
|
108
|
+
serialized_data = serialize(self.data)
|
|
109
|
+
serialized_target = serialize(self.target)
|
|
110
|
+
str_data = json_dumps(serialized_data)
|
|
111
|
+
str_target = json_dumps(serialized_target)
|
|
75
112
|
try:
|
|
76
113
|
return {
|
|
77
114
|
"id": str(self.id),
|
|
78
|
-
"data":
|
|
79
|
-
|
|
115
|
+
"data": (
|
|
116
|
+
str_data[:max_data_length]
|
|
117
|
+
if len(str_data) > max_data_length
|
|
118
|
+
else serialized_data
|
|
119
|
+
),
|
|
120
|
+
"target": (
|
|
121
|
+
str_target[:max_data_length]
|
|
122
|
+
if len(str_target) > max_data_length
|
|
123
|
+
else serialized_target
|
|
124
|
+
),
|
|
80
125
|
"index": self.index,
|
|
81
126
|
"traceId": str(self.trace_id),
|
|
82
127
|
"executorSpanId": str(self.executor_span_id),
|
|
83
128
|
"metadata": (
|
|
84
129
|
serialize(self.metadata) if self.metadata is not None else {}
|
|
85
130
|
),
|
|
131
|
+
"datasetLink": (
|
|
132
|
+
self.dataset_link.to_dict()
|
|
133
|
+
if self.dataset_link is not None
|
|
134
|
+
else None
|
|
135
|
+
),
|
|
86
136
|
}
|
|
87
137
|
except Exception as e:
|
|
88
138
|
raise ValueError(f"Error serializing PartialEvaluationDatapoint: {e}")
|
|
89
139
|
|
|
90
140
|
|
|
91
|
-
class EvaluationResultDatapoint(
|
|
141
|
+
class EvaluationResultDatapoint(BaseModel):
|
|
92
142
|
id: uuid.UUID
|
|
93
143
|
index: int
|
|
94
144
|
data: EvaluationDatapointData
|
|
@@ -97,18 +147,37 @@ class EvaluationResultDatapoint(pydantic.BaseModel):
|
|
|
97
147
|
scores: dict[str, Optional[Numeric]]
|
|
98
148
|
trace_id: uuid.UUID
|
|
99
149
|
executor_span_id: uuid.UUID
|
|
100
|
-
metadata: EvaluationDatapointMetadata =
|
|
150
|
+
metadata: EvaluationDatapointMetadata = Field(default=None)
|
|
151
|
+
dataset_link: EvaluationDatapointDatasetLink | None = Field(default=None)
|
|
101
152
|
|
|
102
153
|
# uuid is not serializable by default, so we need to convert it to a string
|
|
103
|
-
def to_dict(self):
|
|
154
|
+
def to_dict(self, max_data_length: int = DEFAULT_DATAPOINT_MAX_DATA_LENGTH):
|
|
104
155
|
try:
|
|
156
|
+
serialized_data = serialize(self.data)
|
|
157
|
+
serialized_target = serialize(self.target)
|
|
158
|
+
serialized_executor_output = serialize(self.executor_output)
|
|
159
|
+
str_data = json.dumps(serialized_data)
|
|
160
|
+
str_target = json.dumps(serialized_target)
|
|
161
|
+
str_executor_output = json.dumps(serialized_executor_output)
|
|
105
162
|
return {
|
|
106
163
|
# preserve only preview of the data, target and executor output
|
|
107
164
|
# (full data is in trace)
|
|
108
165
|
"id": str(self.id),
|
|
109
|
-
"data":
|
|
110
|
-
|
|
111
|
-
|
|
166
|
+
"data": (
|
|
167
|
+
str_data[:max_data_length]
|
|
168
|
+
if len(str_data) > max_data_length
|
|
169
|
+
else serialized_data
|
|
170
|
+
),
|
|
171
|
+
"target": (
|
|
172
|
+
str_target[:max_data_length]
|
|
173
|
+
if len(str_target) > max_data_length
|
|
174
|
+
else serialized_target
|
|
175
|
+
),
|
|
176
|
+
"executorOutput": (
|
|
177
|
+
str_executor_output[:max_data_length]
|
|
178
|
+
if len(str_executor_output) > max_data_length
|
|
179
|
+
else serialized_executor_output
|
|
180
|
+
),
|
|
112
181
|
"scores": self.scores,
|
|
113
182
|
"traceId": str(self.trace_id),
|
|
114
183
|
"executorSpanId": str(self.executor_span_id),
|
|
@@ -116,6 +185,11 @@ class EvaluationResultDatapoint(pydantic.BaseModel):
|
|
|
116
185
|
"metadata": (
|
|
117
186
|
serialize(self.metadata) if self.metadata is not None else {}
|
|
118
187
|
),
|
|
188
|
+
"datasetLink": (
|
|
189
|
+
self.dataset_link.to_dict()
|
|
190
|
+
if self.dataset_link is not None
|
|
191
|
+
else None
|
|
192
|
+
),
|
|
119
193
|
}
|
|
120
194
|
except Exception as e:
|
|
121
195
|
raise ValueError(f"Error serializing EvaluationResultDatapoint: {e}")
|
|
@@ -136,24 +210,17 @@ class TraceType(Enum):
|
|
|
136
210
|
EVALUATION = "EVALUATION"
|
|
137
211
|
|
|
138
212
|
|
|
139
|
-
class GetDatapointsResponse(
|
|
213
|
+
class GetDatapointsResponse(BaseModel):
|
|
140
214
|
items: list[Datapoint]
|
|
141
|
-
|
|
215
|
+
total_count: int = Field(alias="totalCount")
|
|
142
216
|
|
|
143
217
|
|
|
144
|
-
class
|
|
145
|
-
OFF = 0
|
|
146
|
-
META_ONLY = 1
|
|
147
|
-
ALL = 2
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
class LaminarSpanContext(pydantic.BaseModel):
|
|
218
|
+
class LaminarSpanContext(BaseModel):
|
|
151
219
|
"""
|
|
152
220
|
A span context that can be used to continue a trace across services. This
|
|
153
221
|
is a slightly modified version of the OpenTelemetry span context. For
|
|
154
|
-
usage examples, see `Laminar.
|
|
155
|
-
`Laminar.
|
|
156
|
-
`Laminar.deserialize_laminar_span_context`.
|
|
222
|
+
usage examples, see `Laminar.serialize_span_context`,
|
|
223
|
+
`Laminar.get_span_context`, and `Laminar.deserialize_laminar_span_context`.
|
|
157
224
|
|
|
158
225
|
The difference between this and the OpenTelemetry span context is that
|
|
159
226
|
the `trace_id` and `span_id` are stored as UUIDs instead of integers for
|
|
@@ -162,7 +229,13 @@ class LaminarSpanContext(pydantic.BaseModel):
|
|
|
162
229
|
|
|
163
230
|
trace_id: uuid.UUID
|
|
164
231
|
span_id: uuid.UUID
|
|
165
|
-
is_remote: bool =
|
|
232
|
+
is_remote: bool = Field(default=False)
|
|
233
|
+
span_path: list[str] = Field(default=[])
|
|
234
|
+
span_ids_path: list[str] = Field(default=[]) # stringified UUIDs
|
|
235
|
+
user_id: str | None = Field(default=None)
|
|
236
|
+
session_id: str | None = Field(default=None)
|
|
237
|
+
trace_type: TraceType | None = Field(default=None)
|
|
238
|
+
metadata: dict[str, Any] | None = Field(default=None)
|
|
166
239
|
|
|
167
240
|
def __str__(self) -> str:
|
|
168
241
|
return self.model_dump_json()
|
|
@@ -193,7 +266,7 @@ class LaminarSpanContext(pydantic.BaseModel):
|
|
|
193
266
|
"Please use `LaminarSpanContext` instead."
|
|
194
267
|
)
|
|
195
268
|
return span_context
|
|
196
|
-
elif isinstance(span_context, dict
|
|
269
|
+
elif isinstance(span_context, (dict, str)):
|
|
197
270
|
try:
|
|
198
271
|
laminar_span_context = cls.deserialize(span_context)
|
|
199
272
|
return SpanContext(
|
|
@@ -215,6 +288,13 @@ class LaminarSpanContext(pydantic.BaseModel):
|
|
|
215
288
|
"trace_id": data.get("trace_id") or data.get("traceId"),
|
|
216
289
|
"span_id": data.get("span_id") or data.get("spanId"),
|
|
217
290
|
"is_remote": data.get("is_remote") or data.get("isRemote", False),
|
|
291
|
+
"span_path": data.get("span_path") or data.get("spanPath", []),
|
|
292
|
+
"span_ids_path": data.get("span_ids_path")
|
|
293
|
+
or data.get("spanIdsPath", []),
|
|
294
|
+
"user_id": data.get("user_id") or data.get("userId"),
|
|
295
|
+
"session_id": data.get("session_id") or data.get("sessionId"),
|
|
296
|
+
"trace_type": data.get("trace_type") or data.get("traceType"),
|
|
297
|
+
"metadata": data.get("metadata") or data.get("metadata", {}),
|
|
218
298
|
}
|
|
219
299
|
return cls.model_validate(converted_data)
|
|
220
300
|
elif isinstance(data, str):
|
|
@@ -230,113 +310,14 @@ class ModelProvider(str, Enum):
|
|
|
230
310
|
GEMINI = "gemini"
|
|
231
311
|
|
|
232
312
|
|
|
233
|
-
class
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
agent_state: str | None = pydantic.Field(default=None)
|
|
241
|
-
parent_span_context: str | None = pydantic.Field(default=None)
|
|
242
|
-
model_provider: ModelProvider | None = pydantic.Field(default=None)
|
|
243
|
-
model: str | None = pydantic.Field(default=None)
|
|
244
|
-
stream: bool = pydantic.Field(default=False)
|
|
245
|
-
enable_thinking: bool = pydantic.Field(default=True)
|
|
246
|
-
cdp_url: str | None = pydantic.Field(default=None)
|
|
247
|
-
return_screenshots: bool = pydantic.Field(default=False)
|
|
248
|
-
return_storage_state: bool = pydantic.Field(default=False)
|
|
249
|
-
return_agent_state: bool = pydantic.Field(default=False)
|
|
250
|
-
timeout: int | None = pydantic.Field(default=None)
|
|
251
|
-
max_steps: int | None = pydantic.Field(default=None)
|
|
252
|
-
thinking_token_budget: int | None = pydantic.Field(default=None)
|
|
253
|
-
start_url: str | None = pydantic.Field(default=None)
|
|
254
|
-
disable_give_control: bool = pydantic.Field(default=False)
|
|
255
|
-
user_agent: str | None = pydantic.Field(default=None)
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
class ActionResult(pydantic.BaseModel):
|
|
259
|
-
model_config = pydantic.ConfigDict(
|
|
260
|
-
alias_generator=pydantic.alias_generators.to_camel
|
|
261
|
-
)
|
|
262
|
-
is_done: bool = pydantic.Field(default=False)
|
|
263
|
-
content: str | None = pydantic.Field(default=None)
|
|
264
|
-
error: str | None = pydantic.Field(default=None)
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
class AgentOutput(pydantic.BaseModel):
|
|
268
|
-
model_config = pydantic.ConfigDict(
|
|
269
|
-
alias_generator=pydantic.alias_generators.to_camel
|
|
270
|
-
)
|
|
271
|
-
result: ActionResult = pydantic.Field(default_factory=ActionResult)
|
|
272
|
-
# Browser state with data related to auth, such as cookies.
|
|
273
|
-
# A stringified JSON object.
|
|
274
|
-
# Only returned if return_storage_state is True.
|
|
275
|
-
# CAUTION: This object may become large. It also may contain sensitive data.
|
|
276
|
-
storage_state: str | None = pydantic.Field(default=None)
|
|
277
|
-
# Agent state with data related to the agent's state, such as the chat history.
|
|
278
|
-
# A stringified JSON object.
|
|
279
|
-
# Only returned if return_agent_state is True.
|
|
280
|
-
# CAUTION: This object is large.
|
|
281
|
-
agent_state: str | None = pydantic.Field(default=None)
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
class StepChunkContent(pydantic.BaseModel):
|
|
285
|
-
model_config = pydantic.ConfigDict(
|
|
286
|
-
alias_generator=pydantic.alias_generators.to_camel
|
|
287
|
-
)
|
|
288
|
-
chunk_type: Literal["step"] = pydantic.Field(default="step")
|
|
289
|
-
message_id: uuid.UUID = pydantic.Field()
|
|
290
|
-
action_result: ActionResult = pydantic.Field()
|
|
291
|
-
summary: str = pydantic.Field()
|
|
292
|
-
screenshot: str | None = pydantic.Field(default=None)
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
class TimeoutChunkContent(pydantic.BaseModel):
|
|
296
|
-
"""Chunk content to indicate that timeout has been hit. The only difference from a regular step
|
|
297
|
-
is the chunk type. This is the last chunk in the stream.
|
|
298
|
-
"""
|
|
299
|
-
|
|
300
|
-
model_config = pydantic.ConfigDict(
|
|
301
|
-
alias_generator=pydantic.alias_generators.to_camel
|
|
302
|
-
)
|
|
303
|
-
chunk_type: Literal["timeout"] = pydantic.Field(default="timeout")
|
|
304
|
-
message_id: uuid.UUID = pydantic.Field()
|
|
305
|
-
summary: str = pydantic.Field()
|
|
306
|
-
screenshot: str | None = pydantic.Field(default=None)
|
|
313
|
+
class MaskInputOptions(TypedDict):
|
|
314
|
+
textarea: bool | None
|
|
315
|
+
text: bool | None
|
|
316
|
+
number: bool | None
|
|
317
|
+
select: bool | None
|
|
318
|
+
email: bool | None
|
|
319
|
+
tel: bool | None
|
|
307
320
|
|
|
308
321
|
|
|
309
|
-
class
|
|
310
|
-
|
|
311
|
-
is the last chunk in the stream.
|
|
312
|
-
"""
|
|
313
|
-
|
|
314
|
-
model_config = pydantic.ConfigDict(
|
|
315
|
-
alias_generator=pydantic.alias_generators.to_camel
|
|
316
|
-
)
|
|
317
|
-
|
|
318
|
-
chunk_type: Literal["finalOutput"] = pydantic.Field(default="finalOutput")
|
|
319
|
-
message_id: uuid.UUID = pydantic.Field()
|
|
320
|
-
content: AgentOutput = pydantic.Field()
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
class ErrorChunkContent(pydantic.BaseModel):
|
|
324
|
-
"""Chunk content to indicate that an error has occurred. Typically, this
|
|
325
|
-
is the last chunk in the stream.
|
|
326
|
-
"""
|
|
327
|
-
|
|
328
|
-
model_config = pydantic.ConfigDict(
|
|
329
|
-
alias_generator=pydantic.alias_generators.to_camel
|
|
330
|
-
)
|
|
331
|
-
chunk_type: Literal["error"] = pydantic.Field(default="error")
|
|
332
|
-
message_id: uuid.UUID = pydantic.Field()
|
|
333
|
-
error: str = pydantic.Field()
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
class RunAgentResponseChunk(pydantic.RootModel):
|
|
337
|
-
root: (
|
|
338
|
-
StepChunkContent
|
|
339
|
-
| FinalOutputChunkContent
|
|
340
|
-
| ErrorChunkContent
|
|
341
|
-
| TimeoutChunkContent
|
|
342
|
-
)
|
|
322
|
+
class SessionRecordingOptions(TypedDict):
|
|
323
|
+
mask_input_options: MaskInputOptions | None
|
lmnr/sdk/utils.py
CHANGED
|
@@ -1,15 +1,19 @@
|
|
|
1
|
-
import asyncio
|
|
2
1
|
import datetime
|
|
3
2
|
import dataclasses
|
|
4
3
|
import dotenv
|
|
5
4
|
import enum
|
|
6
5
|
import inspect
|
|
7
6
|
import os
|
|
7
|
+
import orjson
|
|
8
8
|
import pydantic
|
|
9
9
|
import queue
|
|
10
10
|
import typing
|
|
11
11
|
import uuid
|
|
12
12
|
|
|
13
|
+
from lmnr.sdk.log import get_default_logger
|
|
14
|
+
|
|
15
|
+
logger = get_default_logger(__name__)
|
|
16
|
+
|
|
13
17
|
|
|
14
18
|
def is_method(func: typing.Callable) -> bool:
|
|
15
19
|
# inspect.ismethod is True for bound methods only, but in the decorator,
|
|
@@ -33,7 +37,7 @@ def is_async(func: typing.Callable) -> bool:
|
|
|
33
37
|
return False
|
|
34
38
|
|
|
35
39
|
# Check if the function is asynchronous
|
|
36
|
-
if
|
|
40
|
+
if inspect.iscoroutinefunction(func):
|
|
37
41
|
return True
|
|
38
42
|
|
|
39
43
|
# Fallback: check if the function's code object contains 'async'.
|
|
@@ -128,3 +132,112 @@ def is_otel_attribute_value_type(value: typing.Any) -> bool:
|
|
|
128
132
|
)
|
|
129
133
|
return True
|
|
130
134
|
return False
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
def get_otel_env_var(var_name: str) -> str | None:
|
|
138
|
+
"""Get OTEL environment variable with priority order.
|
|
139
|
+
|
|
140
|
+
Checks in order:
|
|
141
|
+
1. OTEL_EXPORTER_OTLP_TRACES_{var_name}
|
|
142
|
+
2. OTEL_EXPORTER_OTLP_{var_name}
|
|
143
|
+
3. OTEL_{var_name}
|
|
144
|
+
|
|
145
|
+
Args:
|
|
146
|
+
var_name: The variable name (e.g., 'ENDPOINT', 'HEADERS', 'TIMEOUT')
|
|
147
|
+
|
|
148
|
+
Returns:
|
|
149
|
+
str | None: The environment variable value or None if not found
|
|
150
|
+
"""
|
|
151
|
+
candidates = [
|
|
152
|
+
f"OTEL_EXPORTER_OTLP_TRACES_{var_name}",
|
|
153
|
+
f"OTEL_EXPORTER_OTLP_{var_name}",
|
|
154
|
+
f"OTEL_{var_name}",
|
|
155
|
+
]
|
|
156
|
+
|
|
157
|
+
for candidate in candidates:
|
|
158
|
+
if value := from_env(candidate):
|
|
159
|
+
return value
|
|
160
|
+
return None
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
def parse_otel_headers(headers_str: str | None) -> dict[str, str]:
|
|
164
|
+
"""Parse OTEL headers string into dictionary.
|
|
165
|
+
|
|
166
|
+
Format: key1=value1,key2=value2
|
|
167
|
+
Values are URL-decoded.
|
|
168
|
+
|
|
169
|
+
Args:
|
|
170
|
+
headers_str: Headers string in OTEL format
|
|
171
|
+
|
|
172
|
+
Returns:
|
|
173
|
+
dict[str, str]: Parsed headers dictionary
|
|
174
|
+
"""
|
|
175
|
+
if not headers_str:
|
|
176
|
+
return {}
|
|
177
|
+
|
|
178
|
+
headers = {}
|
|
179
|
+
for pair in headers_str.split(","):
|
|
180
|
+
if "=" in pair:
|
|
181
|
+
key, value = pair.split("=", 1)
|
|
182
|
+
import urllib.parse
|
|
183
|
+
|
|
184
|
+
headers[key.strip()] = urllib.parse.unquote(value.strip())
|
|
185
|
+
return headers
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
def format_id(id_value: str | int | uuid.UUID) -> str:
|
|
189
|
+
"""Format trace/span/evaluation ID to a UUID string, or return valid UUID strings as-is.
|
|
190
|
+
|
|
191
|
+
Args:
|
|
192
|
+
id_value: The ID in various formats (UUID, int, or valid UUID string)
|
|
193
|
+
|
|
194
|
+
Returns:
|
|
195
|
+
str: UUID string representation
|
|
196
|
+
|
|
197
|
+
Raises:
|
|
198
|
+
ValueError: If id_value cannot be converted to a valid UUID
|
|
199
|
+
"""
|
|
200
|
+
if isinstance(id_value, uuid.UUID):
|
|
201
|
+
return str(id_value)
|
|
202
|
+
elif isinstance(id_value, int):
|
|
203
|
+
return str(uuid.UUID(int=id_value))
|
|
204
|
+
elif isinstance(id_value, str):
|
|
205
|
+
uuid.UUID(id_value)
|
|
206
|
+
return id_value
|
|
207
|
+
else:
|
|
208
|
+
raise ValueError(f"Invalid ID type: {type(id_value)}")
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
DEFAULT_PLACEHOLDER = {}
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
def default_json(o):
|
|
215
|
+
if isinstance(o, pydantic.BaseModel):
|
|
216
|
+
return o.model_dump()
|
|
217
|
+
|
|
218
|
+
# Handle various sequence types, but not strings or bytes
|
|
219
|
+
if isinstance(o, (list, tuple, set, frozenset)):
|
|
220
|
+
return list(o)
|
|
221
|
+
|
|
222
|
+
try:
|
|
223
|
+
return str(o)
|
|
224
|
+
except Exception:
|
|
225
|
+
logger.debug("Failed to serialize data to JSON, inner type: %s", type(o))
|
|
226
|
+
pass
|
|
227
|
+
return DEFAULT_PLACEHOLDER
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+
def json_dumps(data: dict) -> str:
|
|
231
|
+
try:
|
|
232
|
+
return orjson.dumps(
|
|
233
|
+
data,
|
|
234
|
+
default=default_json,
|
|
235
|
+
option=orjson.OPT_SERIALIZE_DATACLASS
|
|
236
|
+
| orjson.OPT_SERIALIZE_UUID
|
|
237
|
+
| orjson.OPT_UTC_Z
|
|
238
|
+
| orjson.OPT_NON_STR_KEYS,
|
|
239
|
+
).decode("utf-8")
|
|
240
|
+
except Exception:
|
|
241
|
+
# Log the exception and return a placeholder if serialization completely fails
|
|
242
|
+
logger.info("Failed to serialize data to JSON, type: %s", type(data))
|
|
243
|
+
return "{}" # Return an empty JSON object as a fallback
|