lmnr 0.4.56__py3-none-any.whl → 0.4.58__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -34,6 +34,7 @@ class Traceloop:
34
34
  instruments: Optional[Set[Instruments]] = None,
35
35
  base_http_url: Optional[str] = None,
36
36
  project_api_key: Optional[str] = None,
37
+ max_export_batch_size: Optional[int] = None,
37
38
  ) -> None:
38
39
  if not is_tracing_enabled():
39
40
  return
@@ -73,4 +74,5 @@ class Traceloop:
73
74
  instruments=instruments,
74
75
  base_http_url=base_http_url,
75
76
  project_api_key=project_api_key,
77
+ max_export_batch_size=max_export_batch_size,
76
78
  )
@@ -83,6 +83,7 @@ class TracerWrapper(object):
83
83
  instruments: Optional[Set[Instruments]] = None,
84
84
  base_http_url: Optional[str] = None,
85
85
  project_api_key: Optional[str] = None,
86
+ max_export_batch_size: Optional[int] = None,
86
87
  ) -> "TracerWrapper":
87
88
  cls._initialize_logger(cls)
88
89
  if not hasattr(cls, "instance"):
@@ -109,7 +110,8 @@ class TracerWrapper(object):
109
110
  )
110
111
  else:
111
112
  obj.__spans_processor: SpanProcessor = BatchSpanProcessor(
112
- obj.__spans_exporter
113
+ obj.__spans_exporter,
114
+ max_export_batch_size=max_export_batch_size,
113
115
  )
114
116
  obj.__spans_processor_original_on_start = None
115
117
 
@@ -430,6 +432,10 @@ def init_openai_instrumentor(should_enrich_metrics: bool):
430
432
  instrumentor = OpenAIInstrumentor(
431
433
  enrich_assistant=should_enrich_metrics,
432
434
  enrich_token_usage=should_enrich_metrics,
435
+ # Default in the package provided is an empty function, which
436
+ # results in dropping the image data if we don't explicitly
437
+ # set it to None.
438
+ upload_base64_image=None,
433
439
  )
434
440
  if not instrumentor.is_instrumented_by_opentelemetry:
435
441
  instrumentor.instrument()
@@ -1,5 +1,6 @@
1
1
  import opentelemetry
2
2
  import uuid
3
+ import asyncio
3
4
 
4
5
  try:
5
6
  from playwright.async_api import BrowserContext, Page
@@ -9,22 +10,19 @@ try:
9
10
  )
10
11
  except ImportError as e:
11
12
  raise ImportError(
12
- f"Attempated to import {__file__}, but it is designed "
13
+ f"Attempted to import {__file__}, but it is designed "
13
14
  "to patch Playwright, which is not installed. Use `pip install playwright` "
14
15
  "to install Playwright or remove this import."
15
16
  ) from e
16
17
 
17
18
  _original_new_page = None
18
- _original_goto = None
19
19
  _original_new_page_async = None
20
- _original_goto_async = None
21
20
 
22
21
  INJECT_PLACEHOLDER = """
23
22
  ([baseUrl, projectApiKey]) => {
24
23
  const serverUrl = `${baseUrl}/v1/browser-sessions/events`;
25
- const BATCH_SIZE = 16;
26
24
  const FLUSH_INTERVAL = 1000;
27
- const HEARTBEAT_INTERVAL = 1000; // 1 second heartbeat
25
+ const HEARTBEAT_INTERVAL = 1000;
28
26
 
29
27
  window.rrwebEventsBatch = [];
30
28
 
@@ -36,13 +34,37 @@ INJECT_PLACEHOLDER = """
36
34
  traceId: window.traceId,
37
35
  events: window.rrwebEventsBatch
38
36
  };
39
-
37
+
40
38
  try {
41
- await fetch(serverUrl, {
39
+ const jsonString = JSON.stringify(eventsPayload);
40
+ const uint8Array = new TextEncoder().encode(jsonString);
41
+
42
+ const cs = new CompressionStream('gzip');
43
+ const compressedStream = await new Response(
44
+ new Response(uint8Array).body.pipeThrough(cs)
45
+ ).arrayBuffer();
46
+
47
+ const compressedArray = new Uint8Array(compressedStream);
48
+
49
+ const blob = new Blob([compressedArray], { type: 'application/octet-stream' });
50
+
51
+ const response = await fetch(serverUrl, {
42
52
  method: 'POST',
43
- headers: { 'Content-Type': 'application/json', 'Authorization': `Bearer ${projectApiKey}` },
44
- body: JSON.stringify(eventsPayload),
53
+ headers: {
54
+ 'Content-Type': 'application/json',
55
+ 'Content-Encoding': 'gzip',
56
+ 'Authorization': `Bearer ${projectApiKey}`
57
+ },
58
+ body: blob,
59
+ credentials: 'omit',
60
+ mode: 'cors',
61
+ cache: 'no-cache',
45
62
  });
63
+
64
+ if (!response.ok) {
65
+ throw new Error(`HTTP error! status: ${response.status}`);
66
+ }
67
+
46
68
  window.rrwebEventsBatch = [];
47
69
  } catch (error) {
48
70
  console.error('Failed to send events:', error);
@@ -51,10 +73,9 @@ INJECT_PLACEHOLDER = """
51
73
 
52
74
  setInterval(() => window.sendBatch(), FLUSH_INTERVAL);
53
75
 
54
- // Add heartbeat event
55
76
  setInterval(() => {
56
77
  window.rrwebEventsBatch.push({
57
- type: 6, // Custom event type
78
+ type: 6,
58
79
  data: { source: 'heartbeat' },
59
80
  timestamp: Date.now()
60
81
  });
@@ -62,15 +83,10 @@ INJECT_PLACEHOLDER = """
62
83
 
63
84
  window.rrweb.record({
64
85
  emit(event) {
65
- window.rrwebEventsBatch.push(event);
66
-
67
- if (window.rrwebEventsBatch.length >= BATCH_SIZE) {
68
- window.sendBatch();
69
- }
86
+ window.rrwebEventsBatch.push(event);
70
87
  }
71
88
  });
72
89
 
73
- // Simplified beforeunload handler
74
90
  window.addEventListener('beforeunload', () => {
75
91
  window.sendBatch();
76
92
  });
@@ -79,6 +95,7 @@ INJECT_PLACEHOLDER = """
79
95
 
80
96
 
81
97
  def init_playwright_tracing(http_url: str, project_api_key: str):
98
+
82
99
  def inject_rrweb(page: SyncPage):
83
100
  # Get current trace ID from active span
84
101
  current_span = opentelemetry.trace.get_current_span()
@@ -95,7 +112,7 @@ def init_playwright_tracing(http_url: str, project_api_key: str):
95
112
  [trace_id, session_id],
96
113
  )
97
114
 
98
- # Load rrweb and set up recording
115
+ # Load rrweb from CDN
99
116
  page.add_script_tag(
100
117
  url="https://cdn.jsdelivr.net/npm/rrweb@latest/dist/rrweb.min.js"
101
118
  )
@@ -107,86 +124,125 @@ def init_playwright_tracing(http_url: str, project_api_key: str):
107
124
  )
108
125
 
109
126
  async def inject_rrweb_async(page: Page):
110
- # Wait for the page to be in a ready state first
111
- await page.wait_for_load_state("domcontentloaded")
112
-
113
- # Get current trace ID from active span
114
- current_span = opentelemetry.trace.get_current_span()
115
- current_span.set_attribute("lmnr.internal.has_browser_session", True)
116
- trace_id = format(current_span.get_span_context().trace_id, "032x")
117
- session_id = str(uuid.uuid4().hex)
118
-
119
- # Wait for any existing script load to complete
120
- await page.wait_for_load_state("networkidle")
121
-
122
- # Generate UUID session ID and set trace ID
123
- await page.evaluate(
124
- """([traceId, sessionId]) => {
125
- window.rrwebSessionId = sessionId;
126
- window.traceId = traceId;
127
- }""",
128
- [trace_id, session_id],
129
- )
130
-
131
- # Load rrweb and set up recording
132
- await page.add_script_tag(
133
- url="https://cdn.jsdelivr.net/npm/rrweb@latest/dist/rrweb.min.js"
134
- )
127
+ try:
128
+ # Wait for the page to be in a ready state first
129
+ await page.wait_for_load_state("domcontentloaded")
130
+
131
+ # Get current trace ID from active span
132
+ current_span = opentelemetry.trace.get_current_span()
133
+ current_span.set_attribute("lmnr.internal.has_browser_session", True)
134
+ trace_id = format(current_span.get_span_context().trace_id, "032x")
135
+ session_id = str(uuid.uuid4().hex)
136
+
137
+ # Generate UUID session ID and set trace ID
138
+ await page.evaluate(
139
+ """([traceId, sessionId]) => {
140
+ window.rrwebSessionId = sessionId;
141
+ window.traceId = traceId;
142
+ }""",
143
+ [trace_id, session_id],
144
+ )
145
+
146
+ # Load rrweb from CDN
147
+ await page.add_script_tag(
148
+ url="https://cdn.jsdelivr.net/npm/rrweb@latest/dist/rrweb.min.js"
149
+ )
150
+
151
+ await page.wait_for_function(
152
+ """(() => window.rrweb || 'rrweb' in window)"""
153
+ )
154
+
155
+ # Update the recording setup to include trace ID
156
+ await page.evaluate(
157
+ INJECT_PLACEHOLDER,
158
+ [http_url, project_api_key],
159
+ )
160
+ except Exception as e:
161
+ print(f"Error injecting rrweb: {e}")
162
+
163
+ def handle_navigation(page: SyncPage):
164
+ def on_load():
165
+ inject_rrweb(page)
166
+
167
+ page.on("load", on_load)
168
+ inject_rrweb(page)
135
169
 
136
- await page.wait_for_function("""(() => window.rrweb || 'rrweb' in window)""")
170
+ async def handle_navigation_async(page: Page):
171
+ async def on_load():
172
+ await inject_rrweb_async(page)
137
173
 
138
- # Update the recording setup to include trace ID
139
- await page.evaluate(
140
- INJECT_PLACEHOLDER,
141
- [http_url, project_api_key],
142
- )
174
+ page.on("load", lambda: asyncio.create_task(on_load()))
175
+ await inject_rrweb_async(page)
143
176
 
144
177
  async def patched_new_page_async(self: BrowserContext, *args, **kwargs):
145
- # Call the original new_page (returns a Page object)
178
+ # Modify CSP to allow required domains
179
+ async def handle_route(route):
180
+ try:
181
+ response = await route.fetch()
182
+ headers = dict(response.headers)
183
+
184
+ # Find and modify CSP header
185
+ for header_name in headers:
186
+ if header_name.lower() == "content-security-policy":
187
+ csp = headers[header_name]
188
+ parts = csp.split(";")
189
+ for i, part in enumerate(parts):
190
+ if "script-src" in part:
191
+ parts[i] = f"{part.strip()} cdn.jsdelivr.net"
192
+ elif "connect-src" in part:
193
+ parts[i] = f"{part.strip()} " + http_url
194
+ if not any("connect-src" in part for part in parts):
195
+ parts.append(" connect-src 'self' " + http_url)
196
+ headers[header_name] = ";".join(parts)
197
+
198
+ await route.fulfill(response=response, headers=headers)
199
+ except Exception:
200
+ await route.continue_()
201
+
202
+ await self.route("**/*", handle_route)
146
203
  page = await _original_new_page_async(self, *args, **kwargs)
147
- # Inject rrweb automatically after the page is created
148
- await inject_rrweb_async(page)
204
+ await handle_navigation_async(page)
149
205
  return page
150
206
 
151
- async def patched_goto_async(self: Page, *args, **kwargs):
152
- # Call the original goto
153
- result = await _original_goto_async(self, *args, **kwargs)
154
- # Inject rrweb after navigation
155
- await inject_rrweb_async(self)
156
- return result
157
-
158
207
  def patched_new_page(self: SyncBrowserContext, *args, **kwargs):
159
- # Call the original new_page (returns a Page object)
208
+ # Modify CSP to allow required domains
209
+ def handle_route(route):
210
+ try:
211
+ response = route.fetch()
212
+ headers = dict(response.headers)
213
+
214
+ # Find and modify CSP header
215
+ for header_name in headers:
216
+ if header_name.lower() == "content-security-policy":
217
+ csp = headers[header_name]
218
+ parts = csp.split(";")
219
+ for i, part in enumerate(parts):
220
+ if "script-src" in part:
221
+ parts[i] = f"{part.strip()} cdn.jsdelivr.net"
222
+ elif "connect-src" in part:
223
+ parts[i] = f"{part.strip()} " + http_url
224
+ if not any("connect-src" in part for part in parts):
225
+ parts.append(" connect-src 'self' " + http_url)
226
+ headers[header_name] = ";".join(parts)
227
+
228
+ route.fulfill(response=response, headers=headers)
229
+ except Exception:
230
+ # Continue with the original request without modification
231
+ route.continue_()
232
+
233
+ self.route("**/*", handle_route)
160
234
  page = _original_new_page(self, *args, **kwargs)
161
- # Inject rrweb automatically after the page is created
162
- inject_rrweb(page)
235
+ handle_navigation(page)
163
236
  return page
164
237
 
165
- def patched_goto(self: SyncPage, *args, **kwargs):
166
- # Call the original goto
167
- result = _original_goto(self, *args, **kwargs)
168
- # Inject rrweb after navigation
169
- inject_rrweb(self)
170
- return result
171
-
172
238
  def patch_browser():
173
- """
174
- Overrides BrowserContext.new_page with a patched async function
175
- that injects rrweb into every new page.
176
- """
177
- global _original_new_page, _original_goto, _original_new_page_async, _original_goto_async
178
- if _original_new_page_async is None or _original_goto_async is None:
239
+ global _original_new_page, _original_new_page_async
240
+ if _original_new_page_async is None:
179
241
  _original_new_page_async = BrowserContext.new_page
180
242
  BrowserContext.new_page = patched_new_page_async
181
243
 
182
- _original_goto_async = Page.goto
183
- Page.goto = patched_goto_async
184
-
185
- if _original_new_page is None or _original_goto is None:
244
+ if _original_new_page is None:
186
245
  _original_new_page = SyncBrowserContext.new_page
187
246
  SyncBrowserContext.new_page = patched_new_page
188
247
 
189
- _original_goto = SyncPage.goto
190
- SyncPage.goto = patched_goto
191
-
192
248
  patch_browser()
lmnr/sdk/evaluations.py CHANGED
@@ -2,7 +2,6 @@ import asyncio
2
2
  import re
3
3
  import sys
4
4
  import uuid
5
-
6
5
  from tqdm import tqdm
7
6
  from typing import Any, Awaitable, Optional, Set, Union
8
7
 
@@ -27,9 +26,12 @@ from .types import (
27
26
  from .utils import is_async
28
27
 
29
28
  DEFAULT_BATCH_SIZE = 5
29
+ MAX_EXPORT_BATCH_SIZE = 64
30
30
 
31
31
 
32
- def get_evaluation_url(project_id: str, evaluation_id: str, base_url: Optional[str] = None):
32
+ def get_evaluation_url(
33
+ project_id: str, evaluation_id: str, base_url: Optional[str] = None
34
+ ):
33
35
  if not base_url:
34
36
  base_url = "https://www.lmnr.ai"
35
37
 
@@ -39,7 +41,7 @@ def get_evaluation_url(project_id: str, evaluation_id: str, base_url: Optional[s
39
41
  if url.endswith("localhost") or url.endswith("127.0.0.1"):
40
42
  # We best effort assume that the frontend is running on port 3000
41
43
  # TODO: expose the frontend port?
42
- url = url + ":3000"
44
+ url = url + ":5667"
43
45
  return f"{url}/project/{project_id}/evaluations/{evaluation_id}"
44
46
 
45
47
 
@@ -97,13 +99,14 @@ class Evaluation:
97
99
  evaluators: dict[str, EvaluatorFunction],
98
100
  human_evaluators: list[HumanEvaluator] = [],
99
101
  name: Optional[str] = None,
100
- group_id: Optional[str] = None,
101
- batch_size: int = DEFAULT_BATCH_SIZE,
102
+ group_name: Optional[str] = None,
103
+ concurrency_limit: int = DEFAULT_BATCH_SIZE,
102
104
  project_api_key: Optional[str] = None,
103
105
  base_url: Optional[str] = None,
104
106
  http_port: Optional[int] = None,
105
107
  grpc_port: Optional[int] = None,
106
108
  instruments: Optional[Set[Instruments]] = None,
109
+ max_export_batch_size: Optional[int] = MAX_EXPORT_BATCH_SIZE,
107
110
  ):
108
111
  """
109
112
  Initializes an instance of the Evaluations class.
@@ -131,12 +134,12 @@ class Evaluation:
131
134
  Used to identify the evaluation in the group.\
132
135
  If not provided, a random name will be generated.
133
136
  Defaults to None.
134
- group_id (Optional[str], optional): an identifier to group\
135
- evaluations. Only evaluations within the same group_id can be\
137
+ group_name (Optional[str], optional): an identifier to group\
138
+ evaluations. Only evaluations within the same group_name can be\
136
139
  visually compared. If not provided, "default" is assigned.
137
140
  Defaults to None
138
- batch_size (int, optional): The batch size for evaluation. This many\
139
- data points will be evaluated in parallel.
141
+ concurrency_limit (int, optional): The concurrency limit for evaluation. This many\
142
+ data points will be evaluated in parallel with a pool of workers.
140
143
  Defaults to DEFAULT_BATCH_SIZE.
141
144
  project_api_key (Optional[str], optional): The project API key.\
142
145
  If not provided, LMNR_PROJECT_API_KEY environment variable is\
@@ -180,17 +183,20 @@ class Evaluation:
180
183
  self.data = data
181
184
  self.executor = executor
182
185
  self.evaluators = evaluators
183
- self.group_id = group_id
186
+ self.group_name = group_name
184
187
  self.name = name
185
- self.batch_size = batch_size
188
+ self.concurrency_limit = concurrency_limit
189
+ self.batch_size = concurrency_limit
186
190
  self._logger = get_default_logger(self.__class__.__name__)
187
191
  self.human_evaluators = human_evaluators
192
+ self.upload_tasks = [] # Add this line to track upload tasks
188
193
  L.initialize(
189
194
  project_api_key=project_api_key,
190
195
  base_url=base_url,
191
196
  http_port=http_port,
192
197
  grpc_port=grpc_port,
193
198
  instruments=instruments,
199
+ max_export_batch_size=max_export_batch_size,
194
200
  )
195
201
 
196
202
  async def run(self) -> Awaitable[None]:
@@ -200,49 +206,57 @@ class Evaluation:
200
206
 
201
207
  async def _run(self) -> None:
202
208
  self.reporter.start(len(self.data))
203
-
204
209
  try:
205
- result_datapoints = await self._evaluate_in_batches()
210
+ evaluation = await L.init_eval(name=self.name, group_name=self.group_name)
211
+ result_datapoints = await self._evaluate_in_batches(evaluation.id)
212
+
213
+ # Wait for all background upload tasks to complete
214
+ if self.upload_tasks:
215
+ self._logger.debug(
216
+ f"Waiting for {len(self.upload_tasks)} upload tasks to complete"
217
+ )
218
+ await asyncio.gather(*self.upload_tasks)
219
+ self._logger.debug("All upload tasks completed")
206
220
  except Exception as e:
207
221
  self.reporter.stopWithError(e)
208
222
  self.is_finished = True
209
223
  return
210
224
 
211
- # For now add all human evaluators to all result datapoints
212
- # In the future, we will add ways to specify which human evaluators
213
- # to add to which result datapoints, e.g. sample some randomly
214
- for result_datapoint in result_datapoints:
215
- result_datapoint.human_evaluators = self.human_evaluators or {}
216
-
217
- evaluation = await L.create_evaluation(
218
- data=result_datapoints, group_id=self.group_id, name=self.name
219
- )
220
225
  average_scores = get_average_scores(result_datapoints)
221
226
  self.reporter.stop(average_scores, evaluation.projectId, evaluation.id)
222
227
  self.is_finished = True
223
228
 
224
- async def _evaluate_in_batches(self) -> list[EvaluationResultDatapoint]:
225
- result_datapoints = []
226
- for i in range(0, len(self.data), self.batch_size):
227
- batch = (
228
- self.data[i : i + self.batch_size]
229
- if isinstance(self.data, list)
230
- else self.data.slice(i, i + self.batch_size)
231
- )
232
- batch_datapoints = await self._evaluate_batch(batch)
233
- result_datapoints.extend(batch_datapoints)
234
- self.reporter.update(len(batch))
235
- return result_datapoints
236
-
237
- async def _evaluate_batch(
238
- self, batch: list[Datapoint]
229
+ async def _evaluate_in_batches(
230
+ self, eval_id: uuid.UUID
239
231
  ) -> list[EvaluationResultDatapoint]:
240
- batch_promises = [self._evaluate_datapoint(datapoint) for datapoint in batch]
241
- results = await asyncio.gather(*batch_promises)
242
- return results
232
+
233
+ semaphore = asyncio.Semaphore(self.concurrency_limit)
234
+ tasks = []
235
+ data_iter = self.data if isinstance(self.data, list) else range(len(self.data))
236
+
237
+ async def evaluate_task(datapoint, index):
238
+ try:
239
+ result = await self._evaluate_datapoint(eval_id, datapoint, index)
240
+ self.reporter.update(1)
241
+ return index, result
242
+ finally:
243
+ semaphore.release()
244
+
245
+ # Create tasks only after acquiring semaphore
246
+ for idx, item in enumerate(data_iter):
247
+ await semaphore.acquire()
248
+ datapoint = item if isinstance(self.data, list) else self.data[item]
249
+ task = asyncio.create_task(evaluate_task(datapoint, idx))
250
+ tasks.append(task)
251
+
252
+ # Wait for all tasks to complete and preserve order
253
+ results = await asyncio.gather(*tasks)
254
+ ordered_results = [result for _, result in sorted(results, key=lambda x: x[0])]
255
+
256
+ return ordered_results
243
257
 
244
258
  async def _evaluate_datapoint(
245
- self, datapoint: Datapoint
259
+ self, eval_id: uuid.UUID, datapoint: Datapoint, index: int
246
260
  ) -> EvaluationResultDatapoint:
247
261
  with L.start_as_current_span("evaluation") as evaluation_span:
248
262
  L._set_trace_type(trace_type=TraceType.EVALUATION)
@@ -251,11 +265,15 @@ class Evaluation:
251
265
  "executor", input={"data": datapoint.data}
252
266
  ) as executor_span:
253
267
  executor_span.set_attribute(SPAN_TYPE, SpanType.EXECUTOR.value)
254
- output = (
255
- await self.executor(datapoint.data)
256
- if is_async(self.executor)
257
- else self.executor(datapoint.data)
258
- )
268
+ # Run synchronous executors in a thread pool to avoid blocking
269
+ if not is_async(self.executor):
270
+ loop = asyncio.get_event_loop()
271
+ output = await loop.run_in_executor(
272
+ None, self.executor, datapoint.data
273
+ )
274
+ else:
275
+ output = await self.executor(datapoint.data)
276
+
259
277
  L.set_span_output(output)
260
278
  executor_span_id = uuid.UUID(
261
279
  int=executor_span.get_span_context().span_id
@@ -283,14 +301,28 @@ class Evaluation:
283
301
  scores.update(value)
284
302
 
285
303
  trace_id = uuid.UUID(int=evaluation_span.get_span_context().trace_id)
286
- return EvaluationResultDatapoint(
287
- data=datapoint.data,
288
- target=target,
289
- executor_output=output,
290
- scores=scores,
291
- trace_id=trace_id,
292
- executor_span_id=executor_span_id,
293
- )
304
+
305
+ datapoint = EvaluationResultDatapoint(
306
+ data=datapoint.data,
307
+ target=target,
308
+ executor_output=output,
309
+ scores=scores,
310
+ trace_id=trace_id,
311
+ # For now add all human evaluators to all result datapoints
312
+ # In the future, we will add ways to specify which human evaluators
313
+ # to add to which result datapoints, e.g. sample some randomly
314
+ human_evaluators=self.human_evaluators,
315
+ executor_span_id=executor_span_id,
316
+ index=index,
317
+ )
318
+
319
+ # Create background upload task without awaiting it
320
+ upload_task = asyncio.create_task(
321
+ L.save_eval_datapoints(eval_id, [datapoint], self.group_name)
322
+ )
323
+ self.upload_tasks.append(upload_task)
324
+
325
+ return datapoint
294
326
 
295
327
 
296
328
  def evaluate(
@@ -299,8 +331,9 @@ def evaluate(
299
331
  evaluators: dict[str, EvaluatorFunction],
300
332
  human_evaluators: list[HumanEvaluator] = [],
301
333
  name: Optional[str] = None,
302
- group_id: Optional[str] = None,
303
- batch_size: int = DEFAULT_BATCH_SIZE,
334
+ group_id: Optional[str] = None, # Deprecated
335
+ group_name: Optional[str] = None,
336
+ concurrency_limit: int = DEFAULT_BATCH_SIZE,
304
337
  project_api_key: Optional[str] = None,
305
338
  base_url: Optional[str] = None,
306
339
  http_port: Optional[int] = None,
@@ -318,12 +351,12 @@ def evaluate(
318
351
 
319
352
  Parameters:
320
353
  data (Union[list[EvaluationDatapoint|dict]], EvaluationDataset]):\
321
- List of data points to evaluate or an evaluation dataset.
322
- `data` is the input to the executor function,
323
- `target` is the input to the evaluator function.
354
+ List of data points to evaluate or an evaluation dataset.
355
+ `data` is the input to the executor function,
356
+ `target` is the input to the evaluator function.
324
357
  executor (Callable[..., Any]): The executor function.\
325
- Takes the data point + any additional arguments\
326
- and returns the output to evaluate.
358
+ Takes the data point + any additional arguments\
359
+ and returns the output to evaluate.
327
360
  evaluators (List[Callable[..., Any]]):
328
361
  evaluators (dict[str, Callable[..., Any]]): Evaluator functions and\
329
362
  names. Each evaluator function takes the output of the executor\
@@ -337,14 +370,19 @@ def evaluate(
337
370
  evaluator only holds the queue name.
338
371
  Defaults to an empty list.
339
372
  name (Optional[str], optional): Optional name of the evaluation.\
340
- Used to identify the evaluation in the group.\
341
- If not provided, a random name will be generated.
342
- Defaults to None.
343
- group_id (Optional[str], optional): an identifier to group evaluations.\
373
+ Used to identify the evaluation in the group. If not provided, a\
374
+ random name will be generated.
375
+ Defaults to None.
376
+ group_id (Optional[str], optional): [DEPRECATED] Use group_name instead.
377
+ An identifier to group evaluations.\
344
378
  Only evaluations within the same group_id can be\
345
379
  visually compared. If not provided, set to "default".
346
380
  Defaults to None
347
- batch_size (int, optional): The batch size for evaluation.
381
+ group_name (Optional[str], optional): An identifier to group evaluations.\
382
+ Only evaluations within the same group_name can be visually compared.\
383
+ If not provided, set to "default".
384
+ Defaults to None
385
+ concurrency_limit (int, optional): The concurrency limit for evaluation.
348
386
  Defaults to DEFAULT_BATCH_SIZE.
349
387
  project_api_key (Optional[str], optional): The project API key.
350
388
  Defaults to None.
@@ -363,15 +401,19 @@ def evaluate(
363
401
  will be used.
364
402
  Defaults to None.
365
403
  """
404
+ if group_id:
405
+ raise DeprecationWarning("group_id is deprecated. Use group_name instead.")
406
+
407
+ group_name = group_name or group_id
366
408
 
367
409
  evaluation = Evaluation(
368
410
  data=data,
369
411
  executor=executor,
370
412
  evaluators=evaluators,
371
- group_id=group_id,
413
+ group_name=group_name,
372
414
  human_evaluators=human_evaluators,
373
415
  name=name,
374
- batch_size=batch_size,
416
+ concurrency_limit=concurrency_limit,
375
417
  project_api_key=project_api_key,
376
418
  base_url=base_url,
377
419
  http_port=http_port,
lmnr/sdk/laminar.py CHANGED
@@ -47,7 +47,8 @@ from lmnr.openllmetry_sdk.tracing.tracing import (
47
47
  from .log import VerboseColorfulFormatter
48
48
 
49
49
  from .types import (
50
- CreateEvaluationResponse,
50
+ HumanEvaluator,
51
+ InitEvaluationResponse,
51
52
  EvaluationResultDatapoint,
52
53
  GetDatapointsResponse,
53
54
  PipelineRunError,
@@ -78,6 +79,7 @@ class Laminar:
78
79
  grpc_port: Optional[int] = None,
79
80
  instruments: Optional[Set[Instruments]] = None,
80
81
  disable_batch: bool = False,
82
+ max_export_batch_size: Optional[int] = None,
81
83
  ):
82
84
  """Initialize Laminar context across the application.
83
85
  This method must be called before using any other Laminar methods or
@@ -150,6 +152,7 @@ class Laminar:
150
152
  ),
151
153
  instruments=instruments,
152
154
  disable_batch=disable_batch,
155
+ max_export_batch_size=max_export_batch_size,
153
156
  )
154
157
 
155
158
  @classmethod
@@ -689,33 +692,42 @@ class Laminar:
689
692
  set_association_properties(props)
690
693
 
691
694
  @classmethod
692
- async def create_evaluation(
693
- cls,
694
- data: list[EvaluationResultDatapoint],
695
- group_id: Optional[str] = None,
696
- name: Optional[str] = None,
697
- ) -> CreateEvaluationResponse:
695
+ async def init_eval(
696
+ cls, name: Optional[str] = None, group_name: Optional[str] = None
697
+ ) -> InitEvaluationResponse:
698
698
  async with aiohttp.ClientSession() as session:
699
699
  async with session.post(
700
- cls.__base_http_url + "/v1/evaluations",
700
+ cls.__base_http_url + "/v1/evals",
701
701
  json={
702
- "groupId": group_id,
703
702
  "name": name,
704
- "points": [datapoint.to_dict() for datapoint in data],
703
+ "groupName": group_name,
705
704
  },
706
705
  headers=cls._headers(),
707
706
  ) as response:
708
- if response.status != 200:
709
- try:
710
- resp_json = await response.json()
711
- raise ValueError(
712
- f"Error creating evaluation {json.dumps(resp_json)}"
713
- )
714
- except aiohttp.ClientError:
715
- text = await response.text()
716
- raise ValueError(f"Error creating evaluation {text}")
717
707
  resp_json = await response.json()
718
- return CreateEvaluationResponse.model_validate(resp_json)
708
+ return InitEvaluationResponse.model_validate(resp_json)
709
+
710
+ @classmethod
711
+ async def save_eval_datapoints(
712
+ cls,
713
+ eval_id: uuid.UUID,
714
+ datapoints: list[EvaluationResultDatapoint],
715
+ groupName: Optional[str] = None,
716
+ ):
717
+ async with aiohttp.ClientSession() as session:
718
+
719
+ async with session.post(
720
+ cls.__base_http_url + f"/v1/evals/{eval_id}/datapoints",
721
+ json={
722
+ "points": [datapoint.to_dict() for datapoint in datapoints],
723
+ "groupName": groupName,
724
+ },
725
+ headers=cls._headers(),
726
+ ) as response:
727
+ if response.status != 200:
728
+ raise ValueError(
729
+ f"Error saving evaluation datapoints: {response.text}"
730
+ )
719
731
 
720
732
  @classmethod
721
733
  def get_datapoints(
lmnr/sdk/types.py CHANGED
@@ -141,11 +141,8 @@ EvaluatorFunction = Callable[
141
141
  class HumanEvaluator(pydantic.BaseModel):
142
142
  queueName: str
143
143
 
144
- def __init__(self, queue_name: str):
145
- super().__init__(queueName=queue_name)
146
144
 
147
-
148
- class CreateEvaluationResponse(pydantic.BaseModel):
145
+ class InitEvaluationResponse(pydantic.BaseModel):
149
146
  id: uuid.UUID
150
147
  createdAt: datetime.datetime
151
148
  groupId: str
@@ -161,14 +158,17 @@ class EvaluationResultDatapoint(pydantic.BaseModel):
161
158
  human_evaluators: list[HumanEvaluator] = pydantic.Field(default_factory=list)
162
159
  trace_id: uuid.UUID
163
160
  executor_span_id: uuid.UUID
161
+ index: int
164
162
 
165
163
  # uuid is not serializable by default, so we need to convert it to a string
166
164
  def to_dict(self):
167
165
  try:
168
166
  return {
169
- "data": serialize(self.data),
170
- "target": serialize(self.target),
171
- "executorOutput": serialize(self.executor_output),
167
+ # preserve only preview of the data, target and executor output
168
+ # (full data is in trace)
169
+ "data": str(serialize(self.data))[:100],
170
+ "target": str(serialize(self.target))[:100],
171
+ "executorOutput": str(serialize(self.executor_output))[:100],
172
172
  "scores": self.scores,
173
173
  "traceId": str(self.trace_id),
174
174
  "humanEvaluators": [
@@ -180,6 +180,7 @@ class EvaluationResultDatapoint(pydantic.BaseModel):
180
180
  for v in self.human_evaluators
181
181
  ],
182
182
  "executorSpanId": str(self.executor_span_id),
183
+ "index": self.index,
183
184
  }
184
185
  except Exception as e:
185
186
  raise ValueError(f"Error serializing EvaluationResultDatapoint: {e}")
lmnr/sdk/utils.py CHANGED
@@ -49,7 +49,7 @@ def is_iterator(o: typing.Any) -> bool:
49
49
  return hasattr(o, "__iter__") and hasattr(o, "__next__")
50
50
 
51
51
 
52
- def serialize(obj: typing.Any) -> dict[str, typing.Any]:
52
+ def serialize(obj: typing.Any) -> typing.Union[str, dict[str, typing.Any]]:
53
53
  def serialize_inner(o: typing.Any):
54
54
  if isinstance(o, (datetime.datetime, datetime.date)):
55
55
  return o.strftime("%Y-%m-%dT%H:%M:%S.%f%z")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: lmnr
3
- Version: 0.4.56
3
+ Version: 0.4.58
4
4
  Summary: Python SDK for Laminar
5
5
  License: Apache-2.0
6
6
  Author: lmnr.ai
@@ -41,65 +41,64 @@ Provides-Extra: watsonx
41
41
  Provides-Extra: weaviate
42
42
  Requires-Dist: aiohttp (>=3.0)
43
43
  Requires-Dist: argparse (>=1.0)
44
- Requires-Dist: deprecated (>=1.0)
45
44
  Requires-Dist: grpcio (<1.68.0)
46
45
  Requires-Dist: opentelemetry-api (>=1.28.0)
47
46
  Requires-Dist: opentelemetry-exporter-otlp-proto-grpc (>=1.28.0)
48
47
  Requires-Dist: opentelemetry-exporter-otlp-proto-http (>=1.28.0)
49
- Requires-Dist: opentelemetry-instrumentation-alephalpha (>=0.36.1) ; extra == "alephalpha"
50
- Requires-Dist: opentelemetry-instrumentation-alephalpha (>=0.36.1) ; extra == "all"
51
- Requires-Dist: opentelemetry-instrumentation-anthropic (>=0.36.1) ; extra == "all"
52
- Requires-Dist: opentelemetry-instrumentation-anthropic (>=0.36.1) ; extra == "anthropic"
53
- Requires-Dist: opentelemetry-instrumentation-bedrock (>=0.36.1) ; extra == "all"
54
- Requires-Dist: opentelemetry-instrumentation-bedrock (>=0.36.1) ; extra == "bedrock"
55
- Requires-Dist: opentelemetry-instrumentation-chromadb (>=0.36.1) ; extra == "all"
56
- Requires-Dist: opentelemetry-instrumentation-chromadb (>=0.36.1) ; extra == "chromadb"
57
- Requires-Dist: opentelemetry-instrumentation-cohere (>=0.36.1) ; extra == "all"
58
- Requires-Dist: opentelemetry-instrumentation-cohere (>=0.36.1) ; extra == "cohere"
59
- Requires-Dist: opentelemetry-instrumentation-google-generativeai (>=0.36.1) ; extra == "all"
60
- Requires-Dist: opentelemetry-instrumentation-google-generativeai (>=0.36.1) ; extra == "google-generativeai"
61
- Requires-Dist: opentelemetry-instrumentation-groq (>=0.36.1) ; extra == "all"
62
- Requires-Dist: opentelemetry-instrumentation-groq (>=0.36.1) ; extra == "groq"
63
- Requires-Dist: opentelemetry-instrumentation-haystack (>=0.36.1) ; extra == "all"
64
- Requires-Dist: opentelemetry-instrumentation-haystack (>=0.36.1) ; extra == "haystack"
65
- Requires-Dist: opentelemetry-instrumentation-lancedb (>=0.36.1) ; extra == "all"
66
- Requires-Dist: opentelemetry-instrumentation-lancedb (>=0.36.1) ; extra == "lancedb"
67
- Requires-Dist: opentelemetry-instrumentation-langchain (>=0.36.1) ; extra == "all"
68
- Requires-Dist: opentelemetry-instrumentation-langchain (>=0.36.1) ; extra == "langchain"
69
- Requires-Dist: opentelemetry-instrumentation-llamaindex (>=0.36.1) ; extra == "all"
70
- Requires-Dist: opentelemetry-instrumentation-llamaindex (>=0.36.1) ; extra == "llamaindex"
71
- Requires-Dist: opentelemetry-instrumentation-marqo (>=0.36.1) ; extra == "all"
72
- Requires-Dist: opentelemetry-instrumentation-marqo (>=0.36.1) ; extra == "marqo"
73
- Requires-Dist: opentelemetry-instrumentation-milvus (>=0.36.1) ; extra == "all"
74
- Requires-Dist: opentelemetry-instrumentation-milvus (>=0.36.1) ; extra == "milvus"
75
- Requires-Dist: opentelemetry-instrumentation-mistralai (>=0.36.1) ; extra == "all"
76
- Requires-Dist: opentelemetry-instrumentation-mistralai (>=0.36.1) ; extra == "mistralai"
77
- Requires-Dist: opentelemetry-instrumentation-ollama (>=0.36.1) ; extra == "all"
78
- Requires-Dist: opentelemetry-instrumentation-ollama (>=0.36.1) ; extra == "ollama"
79
- Requires-Dist: opentelemetry-instrumentation-openai (>=0.36.1) ; extra == "all"
80
- Requires-Dist: opentelemetry-instrumentation-openai (>=0.36.1) ; extra == "openai"
81
- Requires-Dist: opentelemetry-instrumentation-pinecone (>=0.36.1) ; extra == "all"
82
- Requires-Dist: opentelemetry-instrumentation-pinecone (>=0.36.1) ; extra == "pinecone"
83
- Requires-Dist: opentelemetry-instrumentation-qdrant (>=0.36.1) ; extra == "all"
84
- Requires-Dist: opentelemetry-instrumentation-qdrant (>=0.36.1) ; extra == "qdrant"
85
- Requires-Dist: opentelemetry-instrumentation-replicate (>=0.36.1) ; extra == "all"
86
- Requires-Dist: opentelemetry-instrumentation-replicate (>=0.36.1) ; extra == "replicate"
48
+ Requires-Dist: opentelemetry-instrumentation-alephalpha (>=0.37.1) ; extra == "alephalpha"
49
+ Requires-Dist: opentelemetry-instrumentation-alephalpha (>=0.37.1) ; extra == "all"
50
+ Requires-Dist: opentelemetry-instrumentation-anthropic (>=0.37.1) ; extra == "all"
51
+ Requires-Dist: opentelemetry-instrumentation-anthropic (>=0.37.1) ; extra == "anthropic"
52
+ Requires-Dist: opentelemetry-instrumentation-bedrock (>=0.37.1) ; extra == "all"
53
+ Requires-Dist: opentelemetry-instrumentation-bedrock (>=0.37.1) ; extra == "bedrock"
54
+ Requires-Dist: opentelemetry-instrumentation-chromadb (>=0.37.1) ; extra == "all"
55
+ Requires-Dist: opentelemetry-instrumentation-chromadb (>=0.37.1) ; extra == "chromadb"
56
+ Requires-Dist: opentelemetry-instrumentation-cohere (>=0.37.1) ; extra == "all"
57
+ Requires-Dist: opentelemetry-instrumentation-cohere (>=0.37.1) ; extra == "cohere"
58
+ Requires-Dist: opentelemetry-instrumentation-google-generativeai (>=0.37.1) ; extra == "all"
59
+ Requires-Dist: opentelemetry-instrumentation-google-generativeai (>=0.37.1) ; extra == "google-generativeai"
60
+ Requires-Dist: opentelemetry-instrumentation-groq (>=0.37.1) ; extra == "all"
61
+ Requires-Dist: opentelemetry-instrumentation-groq (>=0.37.1) ; extra == "groq"
62
+ Requires-Dist: opentelemetry-instrumentation-haystack (>=0.37.1) ; extra == "all"
63
+ Requires-Dist: opentelemetry-instrumentation-haystack (>=0.37.1) ; extra == "haystack"
64
+ Requires-Dist: opentelemetry-instrumentation-lancedb (>=0.37.1) ; extra == "all"
65
+ Requires-Dist: opentelemetry-instrumentation-lancedb (>=0.37.1) ; extra == "lancedb"
66
+ Requires-Dist: opentelemetry-instrumentation-langchain (>=0.37.1) ; extra == "all"
67
+ Requires-Dist: opentelemetry-instrumentation-langchain (>=0.37.1) ; extra == "langchain"
68
+ Requires-Dist: opentelemetry-instrumentation-llamaindex (>=0.37.1) ; extra == "all"
69
+ Requires-Dist: opentelemetry-instrumentation-llamaindex (>=0.37.1) ; extra == "llamaindex"
70
+ Requires-Dist: opentelemetry-instrumentation-marqo (>=0.37.1) ; extra == "all"
71
+ Requires-Dist: opentelemetry-instrumentation-marqo (>=0.37.1) ; extra == "marqo"
72
+ Requires-Dist: opentelemetry-instrumentation-milvus (>=0.37.1) ; extra == "all"
73
+ Requires-Dist: opentelemetry-instrumentation-milvus (>=0.37.1) ; extra == "milvus"
74
+ Requires-Dist: opentelemetry-instrumentation-mistralai (>=0.37.1) ; extra == "all"
75
+ Requires-Dist: opentelemetry-instrumentation-mistralai (>=0.37.1) ; extra == "mistralai"
76
+ Requires-Dist: opentelemetry-instrumentation-ollama (>=0.37.1) ; extra == "all"
77
+ Requires-Dist: opentelemetry-instrumentation-ollama (>=0.37.1) ; extra == "ollama"
78
+ Requires-Dist: opentelemetry-instrumentation-openai (>=0.37.1) ; extra == "all"
79
+ Requires-Dist: opentelemetry-instrumentation-openai (>=0.37.1) ; extra == "openai"
80
+ Requires-Dist: opentelemetry-instrumentation-pinecone (>=0.37.1) ; extra == "all"
81
+ Requires-Dist: opentelemetry-instrumentation-pinecone (>=0.37.1) ; extra == "pinecone"
82
+ Requires-Dist: opentelemetry-instrumentation-qdrant (>=0.37.1) ; extra == "all"
83
+ Requires-Dist: opentelemetry-instrumentation-qdrant (>=0.37.1) ; extra == "qdrant"
84
+ Requires-Dist: opentelemetry-instrumentation-replicate (>=0.37.1) ; extra == "all"
85
+ Requires-Dist: opentelemetry-instrumentation-replicate (>=0.37.1) ; extra == "replicate"
87
86
  Requires-Dist: opentelemetry-instrumentation-requests (>=0.50b0)
88
- Requires-Dist: opentelemetry-instrumentation-sagemaker (>=0.36.1) ; extra == "all"
89
- Requires-Dist: opentelemetry-instrumentation-sagemaker (>=0.36.1) ; extra == "sagemaker"
87
+ Requires-Dist: opentelemetry-instrumentation-sagemaker (>=0.37.1) ; extra == "all"
88
+ Requires-Dist: opentelemetry-instrumentation-sagemaker (>=0.37.1) ; extra == "sagemaker"
90
89
  Requires-Dist: opentelemetry-instrumentation-sqlalchemy (>=0.50b0)
91
90
  Requires-Dist: opentelemetry-instrumentation-threading (>=0.50b0)
92
- Requires-Dist: opentelemetry-instrumentation-together (>=0.36.1) ; extra == "all"
93
- Requires-Dist: opentelemetry-instrumentation-together (>=0.36.1) ; extra == "together"
94
- Requires-Dist: opentelemetry-instrumentation-transformers (>=0.36.1) ; extra == "all"
95
- Requires-Dist: opentelemetry-instrumentation-transformers (>=0.36.1) ; extra == "transformers"
91
+ Requires-Dist: opentelemetry-instrumentation-together (>=0.37.1) ; extra == "all"
92
+ Requires-Dist: opentelemetry-instrumentation-together (>=0.37.1) ; extra == "together"
93
+ Requires-Dist: opentelemetry-instrumentation-transformers (>=0.37.1) ; extra == "all"
94
+ Requires-Dist: opentelemetry-instrumentation-transformers (>=0.37.1) ; extra == "transformers"
96
95
  Requires-Dist: opentelemetry-instrumentation-urllib3 (>=0.50b0)
97
- Requires-Dist: opentelemetry-instrumentation-vertexai (>=0.36.1) ; extra == "all"
98
- Requires-Dist: opentelemetry-instrumentation-vertexai (>=0.36.1) ; extra == "vertexai"
99
- Requires-Dist: opentelemetry-instrumentation-watsonx (>=0.36.1) ; extra == "all"
100
- Requires-Dist: opentelemetry-instrumentation-watsonx (>=0.36.1) ; extra == "watsonx"
101
- Requires-Dist: opentelemetry-instrumentation-weaviate (>=0.36.1) ; extra == "all"
102
- Requires-Dist: opentelemetry-instrumentation-weaviate (>=0.36.1) ; extra == "weaviate"
96
+ Requires-Dist: opentelemetry-instrumentation-vertexai (>=0.37.1) ; extra == "all"
97
+ Requires-Dist: opentelemetry-instrumentation-vertexai (>=0.37.1) ; extra == "vertexai"
98
+ Requires-Dist: opentelemetry-instrumentation-watsonx (>=0.37.1) ; extra == "all"
99
+ Requires-Dist: opentelemetry-instrumentation-watsonx (>=0.37.1) ; extra == "watsonx"
100
+ Requires-Dist: opentelemetry-instrumentation-weaviate (>=0.37.1) ; extra == "all"
101
+ Requires-Dist: opentelemetry-instrumentation-weaviate (>=0.37.1) ; extra == "weaviate"
103
102
  Requires-Dist: opentelemetry-sdk (>=1.28.0)
104
103
  Requires-Dist: opentelemetry-semantic-conventions-ai (>=0.4.2)
105
104
  Requires-Dist: pydantic (>=2.0.3)
@@ -2,7 +2,7 @@ lmnr/__init__.py,sha256=Bqxs-8Mh4h69pOHURgBCgo9EW1GwChebxP6wUX2-bsU,452
2
2
  lmnr/cli.py,sha256=4J2RZQhHM3jJcjFvBC4PChQTS-ukxykVvI0X6lTkK-o,2918
3
3
  lmnr/openllmetry_sdk/.flake8,sha256=bCxuDlGx3YQ55QHKPiGJkncHanh9qGjQJUujcFa3lAU,150
4
4
  lmnr/openllmetry_sdk/.python-version,sha256=9OLQBQVbD4zE4cJsPePhnAfV_snrPSoqEQw-PXgPMOs,6
5
- lmnr/openllmetry_sdk/__init__.py,sha256=Z-SwgbFRgNRRk2rcO2AY6HpK7nXR0WSscYQ8n0S9xmM,2533
5
+ lmnr/openllmetry_sdk/__init__.py,sha256=TpFNPrRosz-BUpWdfT9ROiZPTGA_JshNwqOfiXlR0MU,2643
6
6
  lmnr/openllmetry_sdk/config/__init__.py,sha256=DliMGp2NjYAqRFLKpWQPUKjGMHRO8QsVfazBA1qENQ8,248
7
7
  lmnr/openllmetry_sdk/decorators/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
8
  lmnr/openllmetry_sdk/decorators/base.py,sha256=BhfTJHjGnKXZRyug41wnmvjbg2UDq2p7eLEak7RsCXI,5779
@@ -11,7 +11,7 @@ lmnr/openllmetry_sdk/tracing/__init__.py,sha256=xT73L1t2si2CM6QmMiTZ7zn-dKKYBLNr
11
11
  lmnr/openllmetry_sdk/tracing/attributes.py,sha256=B_4KVYWAUu-6DQmsm2eCJQcTxm8pG1EByCBK3uOPkuI,1293
12
12
  lmnr/openllmetry_sdk/tracing/content_allow_list.py,sha256=3feztm6PBWNelc8pAZUcQyEGyeSpNiVKjOaDk65l2ps,846
13
13
  lmnr/openllmetry_sdk/tracing/context_manager.py,sha256=rdSus-p-TaevQ8hIAhfbnZr5dTqRvACDkzXGDpflncY,306
14
- lmnr/openllmetry_sdk/tracing/tracing.py,sha256=QZEB5K1bJ2GmfSLSIa22kAll4iD-TXN5hlRND-2JO0E,32838
14
+ lmnr/openllmetry_sdk/tracing/tracing.py,sha256=nKBP7KpfZE70EpfhJ9yPmtXtjy_331O0s3XwLSG2U0c,33191
15
15
  lmnr/openllmetry_sdk/utils/__init__.py,sha256=pNhf0G3vTd5ccoc03i1MXDbricSaiqCbi1DLWhSekK8,604
16
16
  lmnr/openllmetry_sdk/utils/in_memory_span_exporter.py,sha256=H_4TRaThMO1H6vUQ0OpQvzJk_fZH0OOsRAM1iZQXsR8,2112
17
17
  lmnr/openllmetry_sdk/utils/json_encoder.py,sha256=dK6b_axr70IYL7Vv-bu4wntvDDuyntoqsHaddqX7P58,463
@@ -19,17 +19,17 @@ lmnr/openllmetry_sdk/utils/package_check.py,sha256=Da4WoTX6J9naODs99DnY9BA-2MxH2
19
19
  lmnr/openllmetry_sdk/version.py,sha256=OlatFEFA4ttqSSIiV8jdE-sq3KG5zu2hnC4B4mzWF3s,23
20
20
  lmnr/sdk/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
21
21
  lmnr/sdk/browser/__init__.py,sha256=NSP5sB-dm-f0FP70_GMvVrNFwc5rHf7SW0_Oisyo3cE,343
22
- lmnr/sdk/browser/playwright_patch.py,sha256=6E6eNQmts-dn1qNNB4CAJwpmr6UFxqEuvh1Gh-o8bWI,6487
22
+ lmnr/sdk/browser/playwright_patch.py,sha256=Hu0O_gJghlKeDHo5RiKoEEWnzbyO6AvoX00ITiiSmIA,8838
23
23
  lmnr/sdk/datasets.py,sha256=hJcQcwTJbtA4COoVG3god4xll9TBSDMfvrhKmMfanjg,1567
24
24
  lmnr/sdk/decorators.py,sha256=ja2EUWUWvFOp28ER0k78PRuxNahwCVyH0TdM3U-xY7U,1856
25
25
  lmnr/sdk/eval_control.py,sha256=G6Fg3Xx_KWv72iBaWlNMdyRTF2bZFQnwJ68sJNSpIcY,177
26
- lmnr/sdk/evaluations.py,sha256=dUIMEmKUzkOmHZ3nxlddk9kKm518C6xvElpgtNsql10,16344
27
- lmnr/sdk/laminar.py,sha256=B4vx8WGtfAYF-NLWXJj7KZLR6KZIkC02GMtoPqalwiU,31545
26
+ lmnr/sdk/evaluations.py,sha256=didZ1TqZKASqwaJsgXqibnGGRM37bpMjCPt66FO7tI4,18172
27
+ lmnr/sdk/laminar.py,sha256=COF0bnaU8xSZAV5tRDuP7U4RMpB8AgiadbU-rz_FRq8,31846
28
28
  lmnr/sdk/log.py,sha256=nt_YMmPw1IRbGy0b7q4rTtP4Yo3pQfNxqJPXK3nDSNQ,2213
29
- lmnr/sdk/types.py,sha256=FCNoFoa0ingOvpXGfbiETVsakYyq9Zpoc56MXJ1YDzQ,6390
30
- lmnr/sdk/utils.py,sha256=Uk8y15x-sd5tP2ERONahElLDJVEy_3dA_1_5g9A6auY,3358
31
- lmnr-0.4.56.dist-info/LICENSE,sha256=67b_wJHVV1CBaWkrKFWU1wyqTPSdzH77Ls-59631COg,10411
32
- lmnr-0.4.56.dist-info/METADATA,sha256=lcz6sK6hDBN-7vto1rSB_zUxoQmqWw3z-5iTHSDJgBM,13861
33
- lmnr-0.4.56.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
34
- lmnr-0.4.56.dist-info/entry_points.txt,sha256=K1jE20ww4jzHNZLnsfWBvU3YKDGBgbOiYG5Y7ivQcq4,37
35
- lmnr-0.4.56.dist-info/RECORD,,
29
+ lmnr/sdk/types.py,sha256=Y4msdSM_IvQ5LOfV2jvk4R0-6skW5Ilml466a6swul4,6506
30
+ lmnr/sdk/utils.py,sha256=sD1YEqhdPaHweY2VGmjMF9MC-X7Ikdc49E01D-HF77E,3377
31
+ lmnr-0.4.58.dist-info/LICENSE,sha256=67b_wJHVV1CBaWkrKFWU1wyqTPSdzH77Ls-59631COg,10411
32
+ lmnr-0.4.58.dist-info/METADATA,sha256=2lASt_slr6vqL-AMM9JgOCqWTrycELl0t_XzaASytw4,13827
33
+ lmnr-0.4.58.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
34
+ lmnr-0.4.58.dist-info/entry_points.txt,sha256=K1jE20ww4jzHNZLnsfWBvU3YKDGBgbOiYG5Y7ivQcq4,37
35
+ lmnr-0.4.58.dist-info/RECORD,,
File without changes
File without changes