lmnr 0.7.12__py3-none-any.whl → 0.7.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lmnr might be problematic. Click here for more details.

@@ -55,7 +55,7 @@ class TracerWrapper(object):
55
55
  exporter: SpanExporter | None = None,
56
56
  instruments: set[Instruments] | None = None,
57
57
  block_instruments: set[Instruments] | None = None,
58
- base_url: str = "https://api.lmnr.ai",
58
+ base_url: str | None = None,
59
59
  port: int = 8443,
60
60
  http_port: int = 443,
61
61
  project_api_key: str | None = None,
@@ -69,7 +69,7 @@ class TracerWrapper(object):
69
69
  # Silence some opentelemetry warnings
70
70
  logging.getLogger("opentelemetry.trace").setLevel(otel_logger_level)
71
71
 
72
- base_http_url = f"{base_url}:{http_port}"
72
+ base_http_url = f"{base_url}:{http_port}" if base_url else None
73
73
  with cls._lock:
74
74
  if not hasattr(cls, "instance"):
75
75
  cls._initialize_logger(cls)
@@ -78,14 +78,18 @@ class TracerWrapper(object):
78
78
  # Store session recording options
79
79
  cls.session_recording_options = session_recording_options or {}
80
80
 
81
- obj._client = LaminarClient(
82
- base_url=base_http_url,
83
- project_api_key=project_api_key,
84
- )
85
- obj._async_client = AsyncLaminarClient(
86
- base_url=base_http_url,
87
- project_api_key=project_api_key,
88
- )
81
+ if project_api_key:
82
+ obj._client = LaminarClient(
83
+ base_url=base_http_url or "https://api.lmnr.ai",
84
+ project_api_key=project_api_key,
85
+ )
86
+ obj._async_client = AsyncLaminarClient(
87
+ base_url=base_http_url or "https://api.lmnr.ai",
88
+ project_api_key=project_api_key,
89
+ )
90
+ else:
91
+ obj._client = None
92
+ obj._async_client = None
89
93
 
90
94
  obj._resource = Resource(attributes=TracerWrapper.resource_attributes)
91
95
 
@@ -91,6 +91,9 @@ class BrowserUseSessionInstrumentorInitializer(InstrumentorInitializer):
91
91
  if version and parse(version) >= parse("0.6.0rc1"):
92
92
  from lmnr.sdk.browser.browser_use_cdp_otel import BrowserUseInstrumentor
93
93
 
94
+ if async_client is None:
95
+ return None
96
+
94
97
  return BrowserUseInstrumentor(async_client)
95
98
 
96
99
  return None
@@ -348,6 +351,9 @@ class PatchrightInstrumentorInitializer(InstrumentorInitializer):
348
351
 
349
352
  from lmnr.sdk.browser.patchright_otel import PatchrightInstrumentor
350
353
 
354
+ if client is None and async_client is None:
355
+ return None
356
+
351
357
  return PatchrightInstrumentor(client, async_client)
352
358
 
353
359
 
@@ -372,6 +378,9 @@ class PlaywrightInstrumentorInitializer(InstrumentorInitializer):
372
378
 
373
379
  from lmnr.sdk.browser.playwright_otel import PlaywrightInstrumentor
374
380
 
381
+ if client is None and async_client is None:
382
+ return None
383
+
375
384
  return PlaywrightInstrumentor(client, async_client)
376
385
 
377
386
 
@@ -10,7 +10,10 @@ from opentelemetry.exporter.otlp.proto.http.trace_exporter import (
10
10
  OTLPSpanExporter as HTTPOTLPSpanExporter,
11
11
  )
12
12
 
13
- from lmnr.sdk.utils import from_env
13
+ from lmnr.sdk.log import get_default_logger
14
+ from lmnr.sdk.utils import from_env, get_otel_env_var, parse_otel_headers
15
+
16
+ logger = get_default_logger(__name__)
14
17
 
15
18
 
16
19
  class LaminarSpanExporter(SpanExporter):
@@ -39,16 +42,39 @@ class LaminarSpanExporter(SpanExporter):
39
42
  final_url = f"{url}:{port or 443}"
40
43
  api_key = api_key or from_env("LMNR_PROJECT_API_KEY")
41
44
  self.endpoint = final_url
42
- self.headers = (
43
- {"Authorization": f"Bearer {api_key}"}
44
- if force_http
45
- else {"authorization": f"Bearer {api_key}"}
46
- )
45
+ if api_key:
46
+ self.headers = (
47
+ {"Authorization": f"Bearer {api_key}"}
48
+ if force_http
49
+ else {"authorization": f"Bearer {api_key}"}
50
+ )
51
+ elif get_otel_env_var("HEADERS"):
52
+ self.headers = parse_otel_headers(get_otel_env_var("HEADERS"))
53
+ else:
54
+ self.headers = {}
47
55
  self.timeout = timeout_seconds
48
56
  self.force_http = force_http
49
- self._init_instance()
57
+ if get_otel_env_var("ENDPOINT"):
58
+ if not base_url:
59
+ self.endpoint = get_otel_env_var("ENDPOINT")
60
+ else:
61
+ logger.warning(
62
+ "OTEL_ENDPOINT is set, but Laminar base URL is also set. Ignoring OTEL_ENDPOINT."
63
+ )
64
+ protocol = get_otel_env_var("PROTOCOL") or "grpc/protobuf"
65
+ exporter_type = from_env("OTEL_EXPORTER") or "otlp_grpc"
66
+ self.force_http = (
67
+ protocol in ("http/protobuf", "http/json")
68
+ or exporter_type == "otlp_http"
69
+ )
70
+ if not self.endpoint:
71
+ raise ValueError(
72
+ "Laminar base URL is not set and OTEL_ENDPOINT is not set. Please either\n"
73
+ "- set the LMNR_BASE_URL environment variable\n"
74
+ "- set the OTEL_ENDPOINT environment variable\n"
75
+ "- pass the base_url parameter to Laminar.initialize"
76
+ )
50
77
 
51
- def _init_instance(self):
52
78
  if self.force_http:
53
79
  self.instance = HTTPOTLPSpanExporter(
54
80
  endpoint=self.endpoint,
@@ -12,7 +12,7 @@ from lmnr.sdk.client.asynchronous.resources import (
12
12
  AsyncBrowserEvents,
13
13
  AsyncEvals,
14
14
  AsyncTags,
15
- AsyncEvaluators
15
+ AsyncEvaluators,
16
16
  )
17
17
  from lmnr.sdk.utils import from_env
18
18
 
@@ -66,6 +66,26 @@ class AsyncLaminarClient:
66
66
  self.__client = httpx.AsyncClient(
67
67
  headers=self._headers(),
68
68
  timeout=timeout,
69
+ # Context: If the server responds with a 413, the connection becomes
70
+ # poisoned and freezes on subsequent requests, and there is no way
71
+ # to recover or recycle such connection.
72
+ # Setting max_keepalive_connections to 0 will resolve this, but is
73
+ # less efficient, as it will create a new connection
74
+ # (not client, so still better) for each request.
75
+ #
76
+ # Note: from my experiments with a simple python server, forcing the
77
+ # server to read/consume the request payload from the socket seems
78
+ # to resolve this, but I haven't figured out how to do that in our
79
+ # real actix-web backend server and whether it makes sense to do so.
80
+ #
81
+ # TODO: investigate if there are better ways to fix this rather than
82
+ # setting keepalive_expiry to 0. Other alternative: migrate to
83
+ # requests + aiohttp.
84
+ #
85
+ # limits=httpx.Limits(
86
+ # max_keepalive_connections=0,
87
+ # keepalive_expiry=0,
88
+ # ),
69
89
  )
70
90
 
71
91
  # Initialize resource objects
@@ -157,5 +177,3 @@ class AsyncLaminarClient:
157
177
  "Content-Type": "application/json",
158
178
  "Accept": "application/json",
159
179
  }
160
-
161
-
@@ -1,21 +1,31 @@
1
1
  """Evals resource for interacting with Laminar evaluations API."""
2
2
 
3
- from typing import Any
3
+ import urllib
4
4
  import uuid
5
+ from typing import Any
5
6
 
6
7
  from lmnr.sdk.client.asynchronous.resources.base import BaseAsyncResource
8
+ from lmnr.sdk.log import get_default_logger
7
9
  from lmnr.sdk.types import (
10
+ GetDatapointsResponse,
8
11
  InitEvaluationResponse,
9
12
  EvaluationResultDatapoint,
10
13
  PartialEvaluationDatapoint,
11
14
  )
15
+ from lmnr.sdk.utils import serialize
16
+
17
+ INITIAL_EVALUATION_DATAPOINT_MAX_DATA_LENGTH = 16_000_000 # 16MB
18
+ logger = get_default_logger(__name__)
12
19
 
13
20
 
14
21
  class AsyncEvals(BaseAsyncResource):
15
22
  """Resource for interacting with Laminar evaluations API."""
16
23
 
17
24
  async def init(
18
- self, name: str | None = None, group_name: str | None = None, metadata: dict[str, Any] | None = None
25
+ self,
26
+ name: str | None = None,
27
+ group_name: str | None = None,
28
+ metadata: dict[str, Any] | None = None,
19
29
  ) -> InitEvaluationResponse:
20
30
  """Initialize a new evaluation.
21
31
 
@@ -51,7 +61,7 @@ class AsyncEvals(BaseAsyncResource):
51
61
  ) -> uuid.UUID:
52
62
  """
53
63
  Create a new evaluation and return its ID.
54
-
64
+
55
65
  Parameters:
56
66
  name (str | None, optional): Optional name of the evaluation.
57
67
  group_name (str | None, optional): An identifier to group evaluations.
@@ -60,7 +70,9 @@ class AsyncEvals(BaseAsyncResource):
60
70
  Returns:
61
71
  uuid.UUID: The evaluation ID.
62
72
  """
63
- evaluation = await self.init(name=name, group_name=group_name, metadata=metadata)
73
+ evaluation = await self.init(
74
+ name=name, group_name=group_name, metadata=metadata
75
+ )
64
76
  return evaluation.id
65
77
 
66
78
  async def create_datapoint(
@@ -74,7 +86,7 @@ class AsyncEvals(BaseAsyncResource):
74
86
  ) -> uuid.UUID:
75
87
  """
76
88
  Create a datapoint for an evaluation.
77
-
89
+
78
90
  Parameters:
79
91
  eval_id (uuid.UUID): The evaluation ID.
80
92
  data: The input data for the executor.
@@ -82,13 +94,13 @@ class AsyncEvals(BaseAsyncResource):
82
94
  metadata (dict[str, Any] | None, optional): Optional metadata.
83
95
  index (int | None, optional): Optional index of the datapoint.
84
96
  trace_id (uuid.UUID | None, optional): Optional trace ID.
85
-
97
+
86
98
  Returns:
87
99
  uuid.UUID: The datapoint ID.
88
100
  """
89
-
101
+
90
102
  datapoint_id = uuid.uuid4()
91
-
103
+
92
104
  # Create a minimal datapoint first
93
105
  partial_datapoint = PartialEvaluationDatapoint(
94
106
  id=datapoint_id,
@@ -99,7 +111,7 @@ class AsyncEvals(BaseAsyncResource):
99
111
  executor_span_id=uuid.uuid4(), # Will be updated when executor runs
100
112
  metadata=metadata,
101
113
  )
102
-
114
+
103
115
  await self.save_datapoints(eval_id, [partial_datapoint])
104
116
  return datapoint_id
105
117
 
@@ -119,18 +131,61 @@ class AsyncEvals(BaseAsyncResource):
119
131
  Raises:
120
132
  ValueError: If there's an error saving the datapoints.
121
133
  """
134
+ length = INITIAL_EVALUATION_DATAPOINT_MAX_DATA_LENGTH
135
+ points = [datapoint.to_dict(max_data_length=length) for datapoint in datapoints]
122
136
  response = await self._client.post(
123
137
  self._base_url + f"/v1/evals/{eval_id}/datapoints",
124
138
  json={
125
- "points": [datapoint.to_dict() for datapoint in datapoints],
139
+ "points": points,
126
140
  "groupName": group_name,
127
141
  },
128
142
  headers=self._headers(),
129
143
  )
144
+ if response.status_code == 413:
145
+ await self._retry_save_datapoints(eval_id, datapoints, group_name)
146
+ return
147
+
130
148
  if response.status_code != 200:
131
- raise ValueError(f"Error saving evaluation datapoints: {response.text}")
132
-
133
-
149
+ raise ValueError(
150
+ f"Error saving evaluation datapoints: [{response.status_code}] {response.text}"
151
+ )
152
+
153
+ async def get_datapoints(
154
+ self,
155
+ dataset_name: str,
156
+ offset: int,
157
+ limit: int,
158
+ ) -> GetDatapointsResponse:
159
+ """Get datapoints from a dataset.
160
+
161
+ Args:
162
+ dataset_name (str): The name of the dataset.
163
+ offset (int): The offset to start from.
164
+ limit (int): The maximum number of datapoints to return.
165
+
166
+ Returns:
167
+ GetDatapointsResponse: The response containing the datapoints.
168
+
169
+ Raises:
170
+ ValueError: If there's an error fetching the datapoints.
171
+ """
172
+ params = {"name": dataset_name, "offset": offset, "limit": limit}
173
+ url = (
174
+ self._base_url + "/v1/datasets/datapoints?" + urllib.parse.urlencode(params)
175
+ )
176
+ response = await self._client.get(url, headers=self._headers())
177
+ if response.status_code != 200:
178
+ try:
179
+ resp_json = response.json()
180
+ raise ValueError(
181
+ f"Error fetching datapoints: [{response.status_code}] {resp_json}"
182
+ )
183
+ except Exception:
184
+ raise ValueError(
185
+ f"Error fetching datapoints: [{response.status_code}] {response.text}"
186
+ )
187
+ return GetDatapointsResponse.model_validate(response.json())
188
+
134
189
  async def update_datapoint(
135
190
  self,
136
191
  eval_id: uuid.UUID,
@@ -146,17 +201,59 @@ class AsyncEvals(BaseAsyncResource):
146
201
  executor_output (Any): The executor output.
147
202
  scores (dict[str, float | int] | None, optional): The scores. Defaults to None.
148
203
  """
149
-
204
+
150
205
  response = await self._client.post(
151
206
  self._base_url + f"/v1/evals/{eval_id}/datapoints/{datapoint_id}",
152
207
  json={
153
- "executorOutput": executor_output,
208
+ "executorOutput": (
209
+ str(serialize(executor_output))[
210
+ :INITIAL_EVALUATION_DATAPOINT_MAX_DATA_LENGTH
211
+ ]
212
+ if executor_output is not None
213
+ else None
214
+ ),
154
215
  "scores": scores,
155
216
  },
156
217
  headers=self._headers(),
157
218
  )
158
219
 
159
220
  if response.status_code != 200:
160
- raise ValueError(f"Error updating evaluation datapoint: {response.text}")
161
-
162
-
221
+ raise ValueError(
222
+ f"Error updating evaluation datapoint: [{response.status_code}] {response.text}"
223
+ )
224
+
225
+ async def _retry_save_datapoints(
226
+ self,
227
+ eval_id: uuid.UUID,
228
+ datapoints: list[EvaluationResultDatapoint | PartialEvaluationDatapoint],
229
+ group_name: str | None = None,
230
+ initial_length: int = INITIAL_EVALUATION_DATAPOINT_MAX_DATA_LENGTH,
231
+ max_retries: int = 20,
232
+ ):
233
+ retry = 0
234
+ length = initial_length
235
+ while retry < max_retries:
236
+ retry += 1
237
+ length = length // 2
238
+ logger.debug(
239
+ f"Retrying save datapoints: {retry} of {max_retries}, length: {length}"
240
+ )
241
+ if length == 0:
242
+ raise ValueError("Error saving evaluation datapoints")
243
+ points = [
244
+ datapoint.to_dict(max_data_length=length) for datapoint in datapoints
245
+ ]
246
+ response = await self._client.post(
247
+ self._base_url + f"/v1/evals/{eval_id}/datapoints",
248
+ json={
249
+ "points": points,
250
+ "groupName": group_name,
251
+ },
252
+ headers=self._headers(),
253
+ )
254
+ if response.status_code != 413:
255
+ break
256
+ if response.status_code != 200:
257
+ raise ValueError(
258
+ f"Error saving evaluation datapoints: [{response.status_code}] {response.text}"
259
+ )
@@ -5,19 +5,27 @@ import urllib.parse
5
5
  from typing import Any
6
6
 
7
7
  from lmnr.sdk.client.synchronous.resources.base import BaseResource
8
+ from lmnr.sdk.log import get_default_logger
8
9
  from lmnr.sdk.types import (
9
- InitEvaluationResponse,
10
+ GetDatapointsResponse,
10
11
  EvaluationResultDatapoint,
12
+ InitEvaluationResponse,
11
13
  PartialEvaluationDatapoint,
12
- GetDatapointsResponse,
13
14
  )
15
+ from lmnr.sdk.utils import serialize
16
+
17
+ INITIAL_EVALUATION_DATAPOINT_MAX_DATA_LENGTH = 16_000_000 # 16MB
18
+ logger = get_default_logger(__name__)
14
19
 
15
20
 
16
21
  class Evals(BaseResource):
17
22
  """Resource for interacting with Laminar evaluations API."""
18
23
 
19
24
  def init(
20
- self, name: str | None = None, group_name: str | None = None, metadata: dict[str, Any] | None = None
25
+ self,
26
+ name: str | None = None,
27
+ group_name: str | None = None,
28
+ metadata: dict[str, Any] | None = None,
21
29
  ) -> InitEvaluationResponse:
22
30
  """Initialize a new evaluation.
23
31
 
@@ -53,7 +61,7 @@ class Evals(BaseResource):
53
61
  ) -> uuid.UUID:
54
62
  """
55
63
  Create a new evaluation and return its ID.
56
-
64
+
57
65
  Parameters:
58
66
  name (str | None, optional): Optional name of the evaluation.
59
67
  group_name (str | None, optional): An identifier to group evaluations.
@@ -76,7 +84,7 @@ class Evals(BaseResource):
76
84
  ) -> uuid.UUID:
77
85
  """
78
86
  Create a datapoint for an evaluation.
79
-
87
+
80
88
  Parameters:
81
89
  eval_id (uuid.UUID): The evaluation ID.
82
90
  data: The input data for the executor.
@@ -84,13 +92,13 @@ class Evals(BaseResource):
84
92
  metadata (dict[str, Any] | None, optional): Optional metadata.
85
93
  index (int | None, optional): Optional index of the datapoint.
86
94
  trace_id (uuid.UUID | None, optional): Optional trace ID.
87
-
95
+
88
96
  Returns:
89
97
  uuid.UUID: The datapoint ID.
90
98
  """
91
-
99
+
92
100
  datapoint_id = uuid.uuid4()
93
-
101
+
94
102
  # Create a minimal datapoint first
95
103
  partial_datapoint = PartialEvaluationDatapoint(
96
104
  id=datapoint_id,
@@ -101,7 +109,7 @@ class Evals(BaseResource):
101
109
  executor_span_id=uuid.uuid4(), # Will be updated when executor runs
102
110
  metadata=metadata,
103
111
  )
104
-
112
+
105
113
  self.save_datapoints(eval_id, [partial_datapoint])
106
114
  return datapoint_id
107
115
 
@@ -121,16 +129,24 @@ class Evals(BaseResource):
121
129
  Raises:
122
130
  ValueError: If there's an error saving the datapoints.
123
131
  """
132
+ length = INITIAL_EVALUATION_DATAPOINT_MAX_DATA_LENGTH
133
+ points = [datapoint.to_dict(max_data_length=length) for datapoint in datapoints]
124
134
  response = self._client.post(
125
135
  self._base_url + f"/v1/evals/{eval_id}/datapoints",
126
136
  json={
127
- "points": [datapoint.to_dict() for datapoint in datapoints],
137
+ "points": points,
128
138
  "groupName": group_name,
129
139
  },
130
140
  headers=self._headers(),
131
141
  )
142
+ if response.status_code == 413:
143
+ self._retry_save_datapoints(eval_id, datapoints, group_name)
144
+ return
145
+
132
146
  if response.status_code != 200:
133
- raise ValueError(f"Error saving evaluation datapoints: {response.text}")
147
+ raise ValueError(
148
+ f"Error saving evaluation datapoints: [{response.status_code}] {response.text}"
149
+ )
134
150
 
135
151
  def update_datapoint(
136
152
  self,
@@ -147,11 +163,17 @@ class Evals(BaseResource):
147
163
  executor_output (Any): The executor output.
148
164
  scores (dict[str, float | int] | None, optional): The scores. Defaults to None.
149
165
  """
150
-
166
+
151
167
  response = self._client.post(
152
168
  self._base_url + f"/v1/evals/{eval_id}/datapoints/{datapoint_id}",
153
169
  json={
154
- "executorOutput": executor_output,
170
+ "executorOutput": (
171
+ str(serialize(executor_output))[
172
+ :INITIAL_EVALUATION_DATAPOINT_MAX_DATA_LENGTH
173
+ ]
174
+ if executor_output is not None
175
+ else None
176
+ ),
155
177
  "scores": scores,
156
178
  },
157
179
  headers=self._headers(),
@@ -195,3 +217,39 @@ class Evals(BaseResource):
195
217
  f"Error fetching datapoints: [{response.status_code}] {response.text}"
196
218
  )
197
219
  return GetDatapointsResponse.model_validate(response.json())
220
+
221
+ def _retry_save_datapoints(
222
+ self,
223
+ eval_id: uuid.UUID,
224
+ datapoints: list[EvaluationResultDatapoint | PartialEvaluationDatapoint],
225
+ group_name: str | None = None,
226
+ initial_length: int = INITIAL_EVALUATION_DATAPOINT_MAX_DATA_LENGTH,
227
+ max_retries: int = 20,
228
+ ):
229
+ retry = 0
230
+ length = initial_length
231
+ while retry < max_retries:
232
+ retry += 1
233
+ length = length // 2
234
+ logger.debug(
235
+ f"Retrying save datapoints: {retry} of {max_retries}, length: {length}"
236
+ )
237
+ if length == 0:
238
+ raise ValueError("Error saving evaluation datapoints")
239
+ points = [
240
+ datapoint.to_dict(max_data_length=length) for datapoint in datapoints
241
+ ]
242
+ response = self._client.post(
243
+ self._base_url + f"/v1/evals/{eval_id}/datapoints",
244
+ json={
245
+ "points": points,
246
+ "groupName": group_name,
247
+ },
248
+ headers=self._headers(),
249
+ )
250
+ if response.status_code != 413:
251
+ break
252
+ if response.status_code != 200:
253
+ raise ValueError(
254
+ f"Error saving evaluation datapoints: [{response.status_code}] {response.text}"
255
+ )
@@ -71,6 +71,26 @@ class LaminarClient:
71
71
  self.__client = httpx.Client(
72
72
  headers=self._headers(),
73
73
  timeout=timeout,
74
+ # Context: If the server responds with a 413, the connection becomes
75
+ # poisoned and freezes on subsequent requests, and there is no way
76
+ # to recover or recycle such connection.
77
+ # Setting max_keepalive_connections to 0 will resolve this, but is
78
+ # less efficient, as it will create a new connection
79
+ # (not client, so still better) for each request.
80
+ #
81
+ # Note: from my experiments with a simple python server, forcing the
82
+ # server to read/consume the request payload from the socket seems
83
+ # to resolve this, but I haven't figured out how to do that in our
84
+ # real actix-web backend server and whether it makes sense to do so.
85
+ #
86
+ # TODO: investigate if there are better ways to fix this rather than
87
+ # setting keepalive_expiry to 0. Other alternative: migrate to
88
+ # requests + aiohttp.
89
+ #
90
+ # limits=httpx.Limits(
91
+ # max_keepalive_connections=0,
92
+ # keepalive_expiry=0,
93
+ # ),
74
94
  )
75
95
 
76
96
  # Initialize resource objects
@@ -169,5 +189,3 @@ class LaminarClient:
169
189
  "Content-Type": "application/json",
170
190
  "Accept": "application/json",
171
191
  }
172
-
173
-
lmnr/sdk/laminar.py CHANGED
@@ -23,6 +23,8 @@ from lmnr.opentelemetry_lib.tracing.attributes import (
23
23
  )
24
24
  from lmnr.opentelemetry_lib import MAX_MANUAL_SPAN_PAYLOAD_SIZE
25
25
  from lmnr.opentelemetry_lib.decorators import json_dumps
26
+ from lmnr.sdk.utils import get_otel_env_var
27
+
26
28
  from opentelemetry import trace
27
29
  from opentelemetry import context as context_api
28
30
  from opentelemetry.trace import INVALID_TRACE_ID, Span, Status, StatusCode, use_span
@@ -140,7 +142,12 @@ class Laminar:
140
142
  return
141
143
 
142
144
  cls.__project_api_key = project_api_key or from_env("LMNR_PROJECT_API_KEY")
143
- if not cls.__project_api_key:
145
+
146
+ if (
147
+ not cls.__project_api_key
148
+ and not get_otel_env_var("ENDPOINT")
149
+ and not get_otel_env_var("HEADERS")
150
+ ):
144
151
  raise ValueError(
145
152
  "Please initialize the Laminar object with"
146
153
  " your project API key or set the LMNR_PROJECT_API_KEY"
@@ -149,14 +156,15 @@ class Laminar:
149
156
 
150
157
  cls._initialize_logger()
151
158
 
152
- url = base_url or from_env("LMNR_BASE_URL") or "https://api.lmnr.ai"
153
- url = url.rstrip("/")
154
- if not url.startswith("http:") and not url.startswith("https:"):
155
- url = f"https://{url}"
156
- if match := re.search(r":(\d{1,5})$", url):
157
- url = url[: -len(match.group(0))]
158
- cls.__logger.info(f"Ignoring port in base URL: {match.group(1)}")
159
- http_url = base_http_url or url
159
+ url = base_url or from_env("LMNR_BASE_URL")
160
+ if url:
161
+ url = url.rstrip("/")
162
+ if not url.startswith("http:") and not url.startswith("https:"):
163
+ url = f"https://{url}"
164
+ if match := re.search(r":(\d{1,5})$", url):
165
+ url = url[: -len(match.group(0))]
166
+ cls.__logger.info(f"Ignoring port in base URL: {match.group(1)}")
167
+ http_url = base_http_url or url or "https://api.lmnr.ai"
160
168
  if not http_url.startswith("http:") and not http_url.startswith("https:"):
161
169
  http_url = f"https://{http_url}"
162
170
  if match := re.search(r":(\d{1,5})$", http_url):
lmnr/sdk/types.py CHANGED
@@ -14,7 +14,7 @@ from typing_extensions import TypedDict # compatibility with python < 3.12
14
14
 
15
15
  from .utils import serialize
16
16
 
17
- EVALUATION_DATAPOINT_MAX_DATA_LENGTH = 8_000_000 # 8MB
17
+ DEFAULT_DATAPOINT_MAX_DATA_LENGTH = 16_000_000 # 16MB
18
18
 
19
19
 
20
20
  Numeric = int | float
@@ -79,16 +79,26 @@ class PartialEvaluationDatapoint(pydantic.BaseModel):
79
79
  metadata: EvaluationDatapointMetadata = pydantic.Field(default=None)
80
80
 
81
81
  # uuid is not serializable by default, so we need to convert it to a string
82
- def to_dict(self):
82
+ def to_dict(self, max_data_length: int = DEFAULT_DATAPOINT_MAX_DATA_LENGTH):
83
+ serialized_data = serialize(self.data)
84
+ serialized_target = serialize(self.target)
85
+ # TODO: use json_dumps instead of json.dumps once we
86
+ # move it to utils so we can avoid circular imports
87
+ str_data = json.dumps(serialized_data)
88
+ str_target = json.dumps(serialized_target)
83
89
  try:
84
90
  return {
85
91
  "id": str(self.id),
86
- "data": str(serialize(self.data))[
87
- :EVALUATION_DATAPOINT_MAX_DATA_LENGTH
88
- ],
89
- "target": str(serialize(self.target))[
90
- :EVALUATION_DATAPOINT_MAX_DATA_LENGTH
91
- ],
92
+ "data": (
93
+ str_data[:max_data_length]
94
+ if len(str_data) > max_data_length
95
+ else serialized_data
96
+ ),
97
+ "target": (
98
+ str_target[:max_data_length]
99
+ if len(str_target) > max_data_length
100
+ else serialized_target
101
+ ),
92
102
  "index": self.index,
93
103
  "traceId": str(self.trace_id),
94
104
  "executorSpanId": str(self.executor_span_id),
@@ -112,21 +122,33 @@ class EvaluationResultDatapoint(pydantic.BaseModel):
112
122
  metadata: EvaluationDatapointMetadata = pydantic.Field(default=None)
113
123
 
114
124
  # uuid is not serializable by default, so we need to convert it to a string
115
- def to_dict(self):
125
+ def to_dict(self, max_data_length: int = DEFAULT_DATAPOINT_MAX_DATA_LENGTH):
116
126
  try:
127
+ serialized_data = serialize(self.data)
128
+ serialized_target = serialize(self.target)
129
+ serialized_executor_output = serialize(self.executor_output)
130
+ str_data = json.dumps(serialized_data)
131
+ str_target = json.dumps(serialized_target)
132
+ str_executor_output = json.dumps(serialized_executor_output)
117
133
  return {
118
134
  # preserve only preview of the data, target and executor output
119
135
  # (full data is in trace)
120
136
  "id": str(self.id),
121
- "data": str(serialize(self.data))[
122
- :EVALUATION_DATAPOINT_MAX_DATA_LENGTH
123
- ],
124
- "target": str(serialize(self.target))[
125
- :EVALUATION_DATAPOINT_MAX_DATA_LENGTH
126
- ],
127
- "executorOutput": str(serialize(self.executor_output))[
128
- :EVALUATION_DATAPOINT_MAX_DATA_LENGTH
129
- ],
137
+ "data": (
138
+ str_data[:max_data_length]
139
+ if len(str_data) > max_data_length
140
+ else serialized_data
141
+ ),
142
+ "target": (
143
+ str_target[:max_data_length]
144
+ if len(str_target) > max_data_length
145
+ else serialized_target
146
+ ),
147
+ "executorOutput": (
148
+ str_executor_output[:max_data_length]
149
+ if len(str_executor_output) > max_data_length
150
+ else serialized_executor_output
151
+ ),
130
152
  "scores": self.scores,
131
153
  "traceId": str(self.trace_id),
132
154
  "executorSpanId": str(self.executor_span_id),
lmnr/sdk/utils.py CHANGED
@@ -130,6 +130,57 @@ def is_otel_attribute_value_type(value: typing.Any) -> bool:
130
130
  return False
131
131
 
132
132
 
133
+ def get_otel_env_var(var_name: str) -> str | None:
134
+ """Get OTEL environment variable with priority order.
135
+
136
+ Checks in order:
137
+ 1. OTEL_EXPORTER_OTLP_TRACES_{var_name}
138
+ 2. OTEL_EXPORTER_OTLP_{var_name}
139
+ 3. OTEL_{var_name}
140
+
141
+ Args:
142
+ var_name: The variable name (e.g., 'ENDPOINT', 'HEADERS', 'TIMEOUT')
143
+
144
+ Returns:
145
+ str | None: The environment variable value or None if not found
146
+ """
147
+ candidates = [
148
+ f"OTEL_EXPORTER_OTLP_TRACES_{var_name}",
149
+ f"OTEL_EXPORTER_OTLP_{var_name}",
150
+ f"OTEL_{var_name}",
151
+ ]
152
+
153
+ for candidate in candidates:
154
+ if value := from_env(candidate):
155
+ return value
156
+ return None
157
+
158
+
159
+ def parse_otel_headers(headers_str: str | None) -> dict[str, str]:
160
+ """Parse OTEL headers string into dictionary.
161
+
162
+ Format: key1=value1,key2=value2
163
+ Values are URL-decoded.
164
+
165
+ Args:
166
+ headers_str: Headers string in OTEL format
167
+
168
+ Returns:
169
+ dict[str, str]: Parsed headers dictionary
170
+ """
171
+ if not headers_str:
172
+ return {}
173
+
174
+ headers = {}
175
+ for pair in headers_str.split(","):
176
+ if "=" in pair:
177
+ key, value = pair.split("=", 1)
178
+ import urllib.parse
179
+
180
+ headers[key.strip()] = urllib.parse.unquote(value.strip())
181
+ return headers
182
+
183
+
133
184
  def format_id(id_value: str | int | uuid.UUID) -> str:
134
185
  """Format trace/span/evaluation ID to a UUID string, or return valid UUID strings as-is.
135
186
 
lmnr/version.py CHANGED
@@ -3,7 +3,7 @@ import httpx
3
3
  from packaging import version
4
4
 
5
5
 
6
- __version__ = "0.7.12"
6
+ __version__ = "0.7.14"
7
7
  PYTHON_VERSION = f"{sys.version_info.major}.{sys.version_info.minor}"
8
8
 
9
9
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lmnr
3
- Version: 0.7.12
3
+ Version: 0.7.14
4
4
  Summary: Python SDK for Laminar
5
5
  Author: lmnr.ai
6
6
  Author-email: lmnr.ai <founders@lmnr.ai>
@@ -26,52 +26,52 @@ Requires-Dist: grpcio>=1
26
26
  Requires-Dist: httpx>=0.24.0
27
27
  Requires-Dist: orjson>=3.0.0
28
28
  Requires-Dist: packaging>=22.0
29
- Requires-Dist: opentelemetry-instrumentation-alephalpha>=0.46.2 ; extra == 'alephalpha'
30
- Requires-Dist: opentelemetry-instrumentation-alephalpha>=0.46.2 ; extra == 'all'
31
- Requires-Dist: opentelemetry-instrumentation-bedrock>=0.46.2 ; extra == 'all'
32
- Requires-Dist: opentelemetry-instrumentation-chromadb>=0.46.2 ; extra == 'all'
33
- Requires-Dist: opentelemetry-instrumentation-cohere>=0.46.2 ; extra == 'all'
34
- Requires-Dist: opentelemetry-instrumentation-crewai>=0.46.2 ; extra == 'all'
35
- Requires-Dist: opentelemetry-instrumentation-haystack>=0.46.2 ; extra == 'all'
36
- Requires-Dist: opentelemetry-instrumentation-lancedb>=0.46.2 ; extra == 'all'
37
- Requires-Dist: opentelemetry-instrumentation-langchain>=0.46.2 ; extra == 'all'
38
- Requires-Dist: opentelemetry-instrumentation-llamaindex>=0.46.2 ; extra == 'all'
39
- Requires-Dist: opentelemetry-instrumentation-marqo>=0.46.2 ; extra == 'all'
40
- Requires-Dist: opentelemetry-instrumentation-mcp>=0.46.2 ; extra == 'all'
41
- Requires-Dist: opentelemetry-instrumentation-milvus>=0.46.2 ; extra == 'all'
42
- Requires-Dist: opentelemetry-instrumentation-mistralai>=0.46.2 ; extra == 'all'
43
- Requires-Dist: opentelemetry-instrumentation-ollama>=0.46.2 ; extra == 'all'
44
- Requires-Dist: opentelemetry-instrumentation-pinecone>=0.46.2 ; extra == 'all'
45
- Requires-Dist: opentelemetry-instrumentation-qdrant>=0.46.2 ; extra == 'all'
46
- Requires-Dist: opentelemetry-instrumentation-replicate>=0.46.2 ; extra == 'all'
47
- Requires-Dist: opentelemetry-instrumentation-sagemaker>=0.46.2 ; extra == 'all'
48
- Requires-Dist: opentelemetry-instrumentation-together>=0.46.2 ; extra == 'all'
49
- Requires-Dist: opentelemetry-instrumentation-transformers>=0.46.2 ; extra == 'all'
50
- Requires-Dist: opentelemetry-instrumentation-vertexai>=0.46.2 ; extra == 'all'
51
- Requires-Dist: opentelemetry-instrumentation-watsonx>=0.46.2 ; extra == 'all'
52
- Requires-Dist: opentelemetry-instrumentation-weaviate>=0.46.2 ; extra == 'all'
53
- Requires-Dist: opentelemetry-instrumentation-bedrock>=0.46.2 ; extra == 'bedrock'
54
- Requires-Dist: opentelemetry-instrumentation-chromadb>=0.46.2 ; extra == 'chromadb'
55
- Requires-Dist: opentelemetry-instrumentation-cohere>=0.46.2 ; extra == 'cohere'
56
- Requires-Dist: opentelemetry-instrumentation-crewai>=0.46.2 ; extra == 'crewai'
57
- Requires-Dist: opentelemetry-instrumentation-haystack>=0.46.2 ; extra == 'haystack'
58
- Requires-Dist: opentelemetry-instrumentation-lancedb>=0.46.2 ; extra == 'lancedb'
59
- Requires-Dist: opentelemetry-instrumentation-langchain>=0.46.2 ; extra == 'langchain'
60
- Requires-Dist: opentelemetry-instrumentation-llamaindex>=0.46.2 ; extra == 'llamaindex'
61
- Requires-Dist: opentelemetry-instrumentation-marqo>=0.46.2 ; extra == 'marqo'
62
- Requires-Dist: opentelemetry-instrumentation-mcp>=0.46.2 ; extra == 'mcp'
63
- Requires-Dist: opentelemetry-instrumentation-milvus>=0.46.2 ; extra == 'milvus'
64
- Requires-Dist: opentelemetry-instrumentation-mistralai>=0.46.2 ; extra == 'mistralai'
65
- Requires-Dist: opentelemetry-instrumentation-ollama>=0.46.2 ; extra == 'ollama'
66
- Requires-Dist: opentelemetry-instrumentation-pinecone>=0.46.2 ; extra == 'pinecone'
67
- Requires-Dist: opentelemetry-instrumentation-qdrant>=0.46.2 ; extra == 'qdrant'
68
- Requires-Dist: opentelemetry-instrumentation-replicate>=0.46.2 ; extra == 'replicate'
69
- Requires-Dist: opentelemetry-instrumentation-sagemaker>=0.46.2 ; extra == 'sagemaker'
70
- Requires-Dist: opentelemetry-instrumentation-together>=0.46.2 ; extra == 'together'
71
- Requires-Dist: opentelemetry-instrumentation-transformers>=0.46.2 ; extra == 'transformers'
72
- Requires-Dist: opentelemetry-instrumentation-vertexai>=0.46.2 ; extra == 'vertexai'
73
- Requires-Dist: opentelemetry-instrumentation-watsonx>=0.46.2 ; extra == 'watsonx'
74
- Requires-Dist: opentelemetry-instrumentation-weaviate>=0.46.2 ; extra == 'weaviate'
29
+ Requires-Dist: opentelemetry-instrumentation-alephalpha>=0.47.1 ; extra == 'alephalpha'
30
+ Requires-Dist: opentelemetry-instrumentation-alephalpha>=0.47.1 ; extra == 'all'
31
+ Requires-Dist: opentelemetry-instrumentation-bedrock>=0.47.1 ; extra == 'all'
32
+ Requires-Dist: opentelemetry-instrumentation-chromadb>=0.47.1 ; extra == 'all'
33
+ Requires-Dist: opentelemetry-instrumentation-cohere>=0.47.1 ; extra == 'all'
34
+ Requires-Dist: opentelemetry-instrumentation-crewai>=0.47.1 ; extra == 'all'
35
+ Requires-Dist: opentelemetry-instrumentation-haystack>=0.47.1 ; extra == 'all'
36
+ Requires-Dist: opentelemetry-instrumentation-lancedb>=0.47.1 ; extra == 'all'
37
+ Requires-Dist: opentelemetry-instrumentation-langchain>=0.47.1 ; extra == 'all'
38
+ Requires-Dist: opentelemetry-instrumentation-llamaindex>=0.47.1 ; extra == 'all'
39
+ Requires-Dist: opentelemetry-instrumentation-marqo>=0.47.1 ; extra == 'all'
40
+ Requires-Dist: opentelemetry-instrumentation-mcp>=0.47.1 ; extra == 'all'
41
+ Requires-Dist: opentelemetry-instrumentation-milvus>=0.47.1 ; extra == 'all'
42
+ Requires-Dist: opentelemetry-instrumentation-mistralai>=0.47.1 ; extra == 'all'
43
+ Requires-Dist: opentelemetry-instrumentation-ollama>=0.47.1 ; extra == 'all'
44
+ Requires-Dist: opentelemetry-instrumentation-pinecone>=0.47.1 ; extra == 'all'
45
+ Requires-Dist: opentelemetry-instrumentation-qdrant>=0.47.1 ; extra == 'all'
46
+ Requires-Dist: opentelemetry-instrumentation-replicate>=0.47.1 ; extra == 'all'
47
+ Requires-Dist: opentelemetry-instrumentation-sagemaker>=0.47.1 ; extra == 'all'
48
+ Requires-Dist: opentelemetry-instrumentation-together>=0.47.1 ; extra == 'all'
49
+ Requires-Dist: opentelemetry-instrumentation-transformers>=0.47.1 ; extra == 'all'
50
+ Requires-Dist: opentelemetry-instrumentation-vertexai>=0.47.1 ; extra == 'all'
51
+ Requires-Dist: opentelemetry-instrumentation-watsonx>=0.47.1 ; extra == 'all'
52
+ Requires-Dist: opentelemetry-instrumentation-weaviate>=0.47.1 ; extra == 'all'
53
+ Requires-Dist: opentelemetry-instrumentation-bedrock>=0.47.1 ; extra == 'bedrock'
54
+ Requires-Dist: opentelemetry-instrumentation-chromadb>=0.47.1 ; extra == 'chromadb'
55
+ Requires-Dist: opentelemetry-instrumentation-cohere>=0.47.1 ; extra == 'cohere'
56
+ Requires-Dist: opentelemetry-instrumentation-crewai>=0.47.1 ; extra == 'crewai'
57
+ Requires-Dist: opentelemetry-instrumentation-haystack>=0.47.1 ; extra == 'haystack'
58
+ Requires-Dist: opentelemetry-instrumentation-lancedb>=0.47.1 ; extra == 'lancedb'
59
+ Requires-Dist: opentelemetry-instrumentation-langchain>=0.47.1 ; extra == 'langchain'
60
+ Requires-Dist: opentelemetry-instrumentation-llamaindex>=0.47.1 ; extra == 'llamaindex'
61
+ Requires-Dist: opentelemetry-instrumentation-marqo>=0.47.1 ; extra == 'marqo'
62
+ Requires-Dist: opentelemetry-instrumentation-mcp>=0.47.1 ; extra == 'mcp'
63
+ Requires-Dist: opentelemetry-instrumentation-milvus>=0.47.1 ; extra == 'milvus'
64
+ Requires-Dist: opentelemetry-instrumentation-mistralai>=0.47.1 ; extra == 'mistralai'
65
+ Requires-Dist: opentelemetry-instrumentation-ollama>=0.47.1 ; extra == 'ollama'
66
+ Requires-Dist: opentelemetry-instrumentation-pinecone>=0.47.1 ; extra == 'pinecone'
67
+ Requires-Dist: opentelemetry-instrumentation-qdrant>=0.47.1 ; extra == 'qdrant'
68
+ Requires-Dist: opentelemetry-instrumentation-replicate>=0.47.1 ; extra == 'replicate'
69
+ Requires-Dist: opentelemetry-instrumentation-sagemaker>=0.47.1 ; extra == 'sagemaker'
70
+ Requires-Dist: opentelemetry-instrumentation-together>=0.47.1 ; extra == 'together'
71
+ Requires-Dist: opentelemetry-instrumentation-transformers>=0.47.1 ; extra == 'transformers'
72
+ Requires-Dist: opentelemetry-instrumentation-vertexai>=0.47.1 ; extra == 'vertexai'
73
+ Requires-Dist: opentelemetry-instrumentation-watsonx>=0.47.1 ; extra == 'watsonx'
74
+ Requires-Dist: opentelemetry-instrumentation-weaviate>=0.47.1 ; extra == 'weaviate'
75
75
  Requires-Python: >=3.10, <4
76
76
  Provides-Extra: alephalpha
77
77
  Provides-Extra: all
@@ -49,11 +49,11 @@ lmnr/opentelemetry_lib/opentelemetry/instrumentation/openhands_ai/__init__.py,sh
49
49
  lmnr/opentelemetry_lib/opentelemetry/instrumentation/opentelemetry/__init__.py,sha256=1f86cdf738e2f68586b0a4569bb1e40edddd85c529f511ef49945ddb7b61fab5,2648
50
50
  lmnr/opentelemetry_lib/opentelemetry/instrumentation/skyvern/__init__.py,sha256=764e4fe979fb08d7821419a3cc5c3ae89a6664b626ef928259f8f175c939eaea,6334
51
51
  lmnr/opentelemetry_lib/opentelemetry/instrumentation/threading/__init__.py,sha256=90aa8558467d7e469fe1a6c75372c113da403557715f03b522b2fab94b287c40,6320
52
- lmnr/opentelemetry_lib/tracing/__init__.py,sha256=6e29f4d8d7ce1b63567f18f469914c9673594cfbbfb084ebc876fc5443936994,11322
53
- lmnr/opentelemetry_lib/tracing/_instrument_initializers.py,sha256=4f4447f1eabf8330380b48376e297ae26a2d86f3b56e5824ac0bdbf0e0c1f25a,17166
52
+ lmnr/opentelemetry_lib/tracing/__init__.py,sha256=a6e1913c4e80493bf0ecb49fca7bd589e297d8e1b7e52c1898792bbba2003fe8,11558
53
+ lmnr/opentelemetry_lib/tracing/_instrument_initializers.py,sha256=865953b6ad6e72c9e852929c9695ea0ebc5614de20e6d68fbcfc84046c333c15,17386
54
54
  lmnr/opentelemetry_lib/tracing/attributes.py,sha256=a879e337ff4e8569a4454544d303ccbc3b04bd42e1cdb765eb563aeaa08f731d,1653
55
55
  lmnr/opentelemetry_lib/tracing/context.py,sha256=83f842be0fc29a96647cbf005c39ea761b0fb5913c4102f965411f47906a6135,4103
56
- lmnr/opentelemetry_lib/tracing/exporter.py,sha256=a5a1e7627061f30109c0b7c4c951a2a1ed2e346af33c66804b7c981dc3550468,2470
56
+ lmnr/opentelemetry_lib/tracing/exporter.py,sha256=48ad51595b3ee6a9655643a09323b14c24120191ef980e40252007a3ddc48367,3692
57
57
  lmnr/opentelemetry_lib/tracing/instruments.py,sha256=a748249130f8610e9d9f64f90f082e643f300765ebf46899a51bd9b6d6f5a03e,6066
58
58
  lmnr/opentelemetry_lib/tracing/processor.py,sha256=74a4c6967c6b0b0672d1292c626fe294375f426a87056362f56c5c4195647d0b,4279
59
59
  lmnr/opentelemetry_lib/tracing/tracer.py,sha256=33769a9a97385f5697eb0e0a6b1813a57ed956c7a8379d7ac2523e700e7dd528,1362
@@ -73,32 +73,32 @@ lmnr/sdk/browser/playwright_otel.py,sha256=859d220d856c8fe7104863efca0c6a3ed5464
73
73
  lmnr/sdk/browser/pw_utils.py,sha256=a75769eb977d8e56c38a0eefad09b87550b872f8d4df186b36a8c4d4af2bffaf,29021
74
74
  lmnr/sdk/browser/recorder/record.umd.min.cjs,sha256=f09c09052c2fc474efb0405e63d8d26ed2184b994513ce8aee04efdac8be155d,181235
75
75
  lmnr/sdk/browser/utils.py,sha256=4a668776d2938108d25fbcecd61c8e1710a4da3e56230d5fefca5964dd09e3c1,2371
76
- lmnr/sdk/client/asynchronous/async_client.py,sha256=e8feae007506cd2e4b08e72706f5f1bb4ea54492b4aa6b68ef184a129de8f466,4948
76
+ lmnr/sdk/client/asynchronous/async_client.py,sha256=4c0161d19453cf5af350d5237828b55f1f11967dfc9269a088fa59fb1ecb998c,6055
77
77
  lmnr/sdk/client/asynchronous/resources/__init__.py,sha256=993423ea462aa8ea37d8d91662341c1ca0711cb2447cd476aacc373858f76135,481
78
78
  lmnr/sdk/client/asynchronous/resources/agent.py,sha256=3a78372b62912cdeda831d7ff9a671306713fce185dff646b452e6f1a3cc6d8c,17788
79
79
  lmnr/sdk/client/asynchronous/resources/base.py,sha256=689e37435ae5b60db7210688e1e79a64a724c554e00d46c226b0a18500941281,986
80
80
  lmnr/sdk/client/asynchronous/resources/browser_events.py,sha256=76f4d7599ee54c0592198585301adc2e9c338398332eb11e445f763f4ab4c7ca,1164
81
- lmnr/sdk/client/asynchronous/resources/evals.py,sha256=8c6f8096916657ef269463b2d0585795d9cedad056a047abcde6365ff0b320bd,5761
81
+ lmnr/sdk/client/asynchronous/resources/evals.py,sha256=0e13db91327e035c44b9e3a9dc9715906c24a797e0903e041aa272f440c85e32,9203
82
82
  lmnr/sdk/client/asynchronous/resources/evaluators.py,sha256=964046f5146e89032fbb701b883f4f3a7cb996aeb9ff368f86e8f967df2fef10,2918
83
83
  lmnr/sdk/client/asynchronous/resources/tags.py,sha256=14fc2e38cae2f6fe126dc8dca085d7ad02d8d7c1a09bc4b5b5b8e38a0edf7348,2314
84
84
  lmnr/sdk/client/synchronous/resources/__init__.py,sha256=685792a8c8494ea061592b86cb63d6bb0dca8d9848181aa11b7d97d5714df337,403
85
85
  lmnr/sdk/client/synchronous/resources/agent.py,sha256=9a74eeeada0dd8b6e0984850fa6759d02ccd02792b1a292caf2b34032330cf60,17809
86
86
  lmnr/sdk/client/synchronous/resources/base.py,sha256=9ded59675d1498d90cac4095bc295c1097dc1499521af697382f0aea66533dd6,971
87
87
  lmnr/sdk/client/synchronous/resources/browser_events.py,sha256=f6b1585997ac5d0a269c581b679f74b4614c4da363d0e0334fd45c1700fcabf6,1135
88
- lmnr/sdk/client/synchronous/resources/evals.py,sha256=415fefe234519f8affb24d858efa9d6c0735f966b6194977a96ac2ce16d066c0,7008
88
+ lmnr/sdk/client/synchronous/resources/evals.py,sha256=50ce1051554413224f0e6fb9f9a9474af3b4ae48a38c2821ea617d1c46ecc7d2,9026
89
89
  lmnr/sdk/client/synchronous/resources/evaluators.py,sha256=3cd6a17e7a9cc0441c2d20bf6cf46ce3720131cc30053e2cd124e5668c75f49a,2879
90
90
  lmnr/sdk/client/synchronous/resources/tags.py,sha256=123deec43128662c21cb275b2df6a102372f875315b0bd36806555394c1d4b5b,2270
91
- lmnr/sdk/client/synchronous/sync_client.py,sha256=0bebe88e3aed689505e9ed3d32036f76df4c3496e4d659162bd41abedc026f16,5299
91
+ lmnr/sdk/client/synchronous/sync_client.py,sha256=ac3ee2f85617c00f4ab0b934166d137dba451876d2bdd8722d7eb3607031aa42,6405
92
92
  lmnr/sdk/datasets.py,sha256=3fd851c5f97bf88eaa84b1451a053eaff23b4497cbb45eac2f9ea0e5f2886c00,1708
93
93
  lmnr/sdk/decorators.py,sha256=c709b76a814e019c919fd811591850787a2f266b7b6f46123f66ddd92e1092d5,6920
94
94
  lmnr/sdk/eval_control.py,sha256=291394ac385c653ae9b5167e871bebeb4fe8fc6b7ff2ed38e636f87015dcba86,184
95
95
  lmnr/sdk/evaluations.py,sha256=7e55cbca77fa32cb64cb77aed8076a1994258a5b652c7f1d45231928e4aefe26,23885
96
- lmnr/sdk/laminar.py,sha256=1bd434297a1487679093fed27113d4b031a4129d65b0517f6a668dad75f4c4bd,38106
96
+ lmnr/sdk/laminar.py,sha256=24adfd64da01d7fd69ba9437cf9860a5c64aa6baab1bb92d8ba143db1be12e96,38313
97
97
  lmnr/sdk/log.py,sha256=9edfd83263f0d4845b1b2d1beeae2b4ed3f8628de941f371a893d72b79c348d4,2213
98
- lmnr/sdk/types.py,sha256=f8a8368e225c4d2f82df54d92f029065afb60c3eff494c77c6e574963ed524ff,13454
99
- lmnr/sdk/utils.py,sha256=0c5a81c305dcd3922f4b31c4f42cf83719c03888725838395adae167de92db76,5019
100
- lmnr/version.py,sha256=c8627d415d619760cd3a9ef67286da4e10a35e6bf840e9e24f3221987e222aa8,1322
101
- lmnr-0.7.12.dist-info/WHEEL,sha256=ab6157bc637547491fb4567cd7ddf26b04d63382916ca16c29a5c8e94c9c9ef7,79
102
- lmnr-0.7.12.dist-info/entry_points.txt,sha256=abdf3411b7dd2d7329a241f2da6669bab4e314a747a586ecdb9f888f3035003c,39
103
- lmnr-0.7.12.dist-info/METADATA,sha256=a67d7715fe3222f9824606190898cc679ca3d0215851dac36c6a966aa5bb6122,14195
104
- lmnr-0.7.12.dist-info/RECORD,,
98
+ lmnr/sdk/types.py,sha256=d8061ca90dd582b408a893ebbbeb1586e8750ed30433ef4f6d63423a078511b0,14574
99
+ lmnr/sdk/utils.py,sha256=4114559ba6ae57fcba2de2bfaa09339688ce5752c36f028a7b55e51eae624947,6307
100
+ lmnr/version.py,sha256=fdf6db3bfdec750eb7f9e086f1383d81089cece53220f95a2462f750866164cc,1322
101
+ lmnr-0.7.14.dist-info/WHEEL,sha256=ab6157bc637547491fb4567cd7ddf26b04d63382916ca16c29a5c8e94c9c9ef7,79
102
+ lmnr-0.7.14.dist-info/entry_points.txt,sha256=abdf3411b7dd2d7329a241f2da6669bab4e314a747a586ecdb9f888f3035003c,39
103
+ lmnr-0.7.14.dist-info/METADATA,sha256=6d8d77fa8ee86053d6d0796676ebcbd09160e3c639a5ff040983169a030d8ad2,14195
104
+ lmnr-0.7.14.dist-info/RECORD,,
File without changes