lmnr 0.5.0__py3-none-any.whl → 0.5.1a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,9 +2,7 @@ import logging
2
2
  import uuid
3
3
 
4
4
  from lmnr.sdk.browser.pw_utils import handle_navigation_async, handle_navigation_sync
5
- from lmnr.sdk.browser.utils import with_tracer_and_client_wrapper
6
- from lmnr.sdk.client.asynchronous.async_client import AsyncLaminarClient
7
- from lmnr.sdk.client.synchronous.sync_client import LaminarClient
5
+ from lmnr.sdk.browser.utils import with_tracer_wrapper
8
6
  from lmnr.version import __version__
9
7
 
10
8
  from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
@@ -18,12 +16,15 @@ from opentelemetry.trace import (
18
16
  set_span_in_context,
19
17
  )
20
18
  from opentelemetry.context import get_current
21
- from typing import Collection
19
+ from typing import Collection, Optional
22
20
  from wrapt import wrap_function_wrapper
23
21
 
24
22
  try:
25
23
  from playwright.async_api import Browser
26
- from playwright.sync_api import Browser as SyncBrowser
24
+ from playwright.sync_api import (
25
+ Browser as SyncBrowser,
26
+ BrowserContext as SyncBrowserContext,
27
+ )
27
28
  except ImportError as e:
28
29
  raise ImportError(
29
30
  f"Attempted to import {__file__}, but it is designed "
@@ -36,12 +37,12 @@ _instruments = ("playwright >= 1.9.0",)
36
37
  logger = logging.getLogger(__name__)
37
38
 
38
39
  _context_spans: dict[str, Span] = {}
40
+ _project_api_key: Optional[str] = None
41
+ _base_http_url: Optional[str] = None
39
42
 
40
43
 
41
- @with_tracer_and_client_wrapper
42
- def _wrap_new_page(
43
- tracer: Tracer, client: LaminarClient, to_wrap, wrapped, instance, args, kwargs
44
- ):
44
+ @with_tracer_wrapper
45
+ def _wrap_new_page(tracer: Tracer, to_wrap, wrapped, instance, args, kwargs):
45
46
  with tracer.start_as_current_span(
46
47
  f"{to_wrap.get('object')}.{to_wrap.get('method')}"
47
48
  ) as span:
@@ -49,13 +50,15 @@ def _wrap_new_page(
49
50
  session_id = str(uuid.uuid4().hex)
50
51
  trace_id = format(get_current_span().get_span_context().trace_id, "032x")
51
52
  span.set_attribute("lmnr.internal.has_browser_session", True)
52
- handle_navigation_sync(page, session_id, trace_id, client)
53
+ handle_navigation_sync(
54
+ page, session_id, trace_id, _project_api_key, _base_http_url
55
+ )
53
56
  return page
54
57
 
55
58
 
56
- @with_tracer_and_client_wrapper
59
+ @with_tracer_wrapper
57
60
  async def _wrap_new_page_async(
58
- tracer: Tracer, client: AsyncLaminarClient, to_wrap, wrapped, instance, args, kwargs
61
+ tracer: Tracer, to_wrap, wrapped, instance, args, kwargs
59
62
  ):
60
63
  with tracer.start_as_current_span(
61
64
  f"{to_wrap.get('object')}.{to_wrap.get('method')}"
@@ -64,14 +67,14 @@ async def _wrap_new_page_async(
64
67
  session_id = str(uuid.uuid4().hex)
65
68
  trace_id = format(span.get_span_context().trace_id, "032x")
66
69
  span.set_attribute("lmnr.internal.has_browser_session", True)
67
- await handle_navigation_async(page, session_id, trace_id, client)
70
+ await handle_navigation_async(
71
+ page, session_id, trace_id, _project_api_key, _base_http_url
72
+ )
68
73
  return page
69
74
 
70
75
 
71
- @with_tracer_and_client_wrapper
72
- def _wrap_new_browser_sync(
73
- tracer: Tracer, client: LaminarClient, to_wrap, wrapped, instance, args, kwargs
74
- ):
76
+ @with_tracer_wrapper
77
+ def _wrap_new_browser_sync(tracer: Tracer, to_wrap, wrapped, instance, args, kwargs):
75
78
  global _context_spans
76
79
  browser: SyncBrowser = wrapped(*args, **kwargs)
77
80
  session_id = str(uuid.uuid4().hex)
@@ -84,15 +87,23 @@ def _wrap_new_browser_sync(
84
87
  set_span_in_context(span, get_current())
85
88
  _context_spans[id(context)] = span
86
89
  span.set_attribute("lmnr.internal.has_browser_session", True)
90
+ trace_id = format(span.get_span_context().trace_id, "032x")
91
+ context.on(
92
+ "page",
93
+ lambda page: handle_navigation_sync(
94
+ page, session_id, trace_id, _project_api_key, _base_http_url
95
+ ),
96
+ )
87
97
  for page in context.pages:
88
- trace_id = format(span.get_span_context().trace_id, "032x")
89
- handle_navigation_sync(page, session_id, trace_id, client)
98
+ handle_navigation_sync(
99
+ page, session_id, trace_id, _project_api_key, _base_http_url
100
+ )
90
101
  return browser
91
102
 
92
103
 
93
- @with_tracer_and_client_wrapper
104
+ @with_tracer_wrapper
94
105
  async def _wrap_new_browser_async(
95
- tracer: Tracer, client: AsyncLaminarClient, to_wrap, wrapped, instance, args, kwargs
106
+ tracer: Tracer, to_wrap, wrapped, instance, args, kwargs
96
107
  ):
97
108
  global _context_spans
98
109
  browser: Browser = await wrapped(*args, **kwargs)
@@ -106,16 +117,80 @@ async def _wrap_new_browser_async(
106
117
  set_span_in_context(span, get_current())
107
118
  _context_spans[id(context)] = span
108
119
  span.set_attribute("lmnr.internal.has_browser_session", True)
120
+ trace_id = format(span.get_span_context().trace_id, "032x")
121
+
122
+ async def handle_page_navigation(page):
123
+ return await handle_navigation_async(
124
+ page, session_id, trace_id, _project_api_key, _base_http_url
125
+ )
126
+
127
+ context.on("page", handle_page_navigation)
109
128
  for page in context.pages:
110
- trace_id = format(span.get_span_context().trace_id, "032x")
111
- await handle_navigation_async(page, session_id, trace_id, client)
129
+ await handle_navigation_async(
130
+ page, session_id, trace_id, _project_api_key, _base_http_url
131
+ )
112
132
  return browser
113
133
 
114
134
 
115
- @with_tracer_and_client_wrapper
135
+ @with_tracer_wrapper
136
+ def _wrap_new_context_sync(tracer: Tracer, to_wrap, wrapped, instance, args, kwargs):
137
+ context: SyncBrowserContext = wrapped(*args, **kwargs)
138
+ session_id = str(uuid.uuid4().hex)
139
+ span = get_current_span()
140
+ if span == INVALID_SPAN:
141
+ span = tracer.start_span(
142
+ name=f"{to_wrap.get('object')}.{to_wrap.get('method')}"
143
+ )
144
+ set_span_in_context(span, get_current())
145
+ _context_spans[id(context)] = span
146
+ span.set_attribute("lmnr.internal.has_browser_session", True)
147
+ trace_id = format(span.get_span_context().trace_id, "032x")
148
+
149
+ context.on(
150
+ "page",
151
+ lambda page: handle_navigation_sync(
152
+ page, session_id, trace_id, _project_api_key, _base_http_url
153
+ ),
154
+ )
155
+ for page in context.pages:
156
+ handle_navigation_sync(
157
+ page, session_id, trace_id, _project_api_key, _base_http_url
158
+ )
159
+ return context
160
+
161
+
162
+ @with_tracer_wrapper
163
+ async def _wrap_new_context_async(
164
+ tracer: Tracer, to_wrap, wrapped, instance, args, kwargs
165
+ ):
166
+ context: SyncBrowserContext = await wrapped(*args, **kwargs)
167
+ session_id = str(uuid.uuid4().hex)
168
+ span = get_current_span()
169
+ if span == INVALID_SPAN:
170
+ span = tracer.start_span(
171
+ name=f"{to_wrap.get('object')}.{to_wrap.get('method')}"
172
+ )
173
+ set_span_in_context(span, get_current())
174
+ _context_spans[id(context)] = span
175
+ span.set_attribute("lmnr.internal.has_browser_session", True)
176
+ trace_id = format(span.get_span_context().trace_id, "032x")
177
+
178
+ async def handle_page_navigation(page):
179
+ return await handle_navigation_async(
180
+ page, session_id, trace_id, _project_api_key, _base_http_url
181
+ )
182
+
183
+ context.on("page", handle_page_navigation)
184
+ for page in context.pages:
185
+ await handle_navigation_async(
186
+ page, session_id, trace_id, _project_api_key, _base_http_url
187
+ )
188
+ return context
189
+
190
+
191
+ @with_tracer_wrapper
116
192
  def _wrap_close_browser_sync(
117
193
  tracer: Tracer,
118
- client: LaminarClient,
119
194
  to_wrap,
120
195
  wrapped,
121
196
  instance: SyncBrowser,
@@ -133,10 +208,9 @@ def _wrap_close_browser_sync(
133
208
  return wrapped(*args, **kwargs)
134
209
 
135
210
 
136
- @with_tracer_and_client_wrapper
211
+ @with_tracer_wrapper
137
212
  async def _wrap_close_browser_async(
138
213
  tracer: Tracer,
139
- client: AsyncLaminarClient,
140
214
  to_wrap,
141
215
  wrapped,
142
216
  instance: Browser,
@@ -191,6 +265,18 @@ WRAPPED_METHODS = [
191
265
  "method": "close",
192
266
  "wrapper": _wrap_close_browser_sync,
193
267
  },
268
+ {
269
+ "package": "playwright.sync_api",
270
+ "object": "Browser",
271
+ "method": "new_context",
272
+ "wrapper": _wrap_new_context_sync,
273
+ },
274
+ {
275
+ "package": "playwright.sync_api",
276
+ "object": "BrowserType",
277
+ "method": "launch_persistent_context",
278
+ "wrapper": _wrap_new_context_sync,
279
+ },
194
280
  ]
195
281
 
196
282
  WRAPPED_METHODS_ASYNC = [
@@ -230,22 +316,40 @@ WRAPPED_METHODS_ASYNC = [
230
316
  "method": "close",
231
317
  "wrapper": _wrap_close_browser_async,
232
318
  },
319
+ {
320
+ "package": "playwright.async_api",
321
+ "object": "Browser",
322
+ "method": "new_context",
323
+ "wrapper": _wrap_new_context_async,
324
+ },
325
+ {
326
+ "package": "playwright.sync_api",
327
+ "object": "BrowserType",
328
+ "method": "launch_persistent_context",
329
+ "wrapper": _wrap_new_context_sync,
330
+ },
233
331
  ]
234
332
 
235
333
 
236
334
  class PlaywrightInstrumentor(BaseInstrumentor):
237
- def __init__(self, client: LaminarClient, async_client: AsyncLaminarClient):
335
+ def __init__(self):
238
336
  super().__init__()
239
- self.client = client
240
- self.async_client = async_client
241
337
 
242
338
  def instrumentation_dependencies(self) -> Collection[str]:
243
339
  return _instruments
244
340
 
245
341
  def _instrument(self, **kwargs):
342
+ global _project_api_key, _base_http_url
343
+
246
344
  tracer_provider = kwargs.get("tracer_provider")
247
345
  tracer = get_tracer(__name__, __version__, tracer_provider)
248
346
 
347
+ if kwargs.get("project_api_key"):
348
+ _project_api_key = kwargs.get("project_api_key")
349
+
350
+ if kwargs.get("base_http_url"):
351
+ _base_http_url = kwargs.get("base_http_url")
352
+
249
353
  for wrapped_method in WRAPPED_METHODS:
250
354
  wrap_package = wrapped_method.get("package")
251
355
  wrap_object = wrapped_method.get("object")
@@ -256,7 +360,6 @@ class PlaywrightInstrumentor(BaseInstrumentor):
256
360
  f"{wrap_object}.{wrap_method}",
257
361
  wrapped_method.get("wrapper")(
258
362
  tracer,
259
- self.client,
260
363
  wrapped_method,
261
364
  ),
262
365
  )
@@ -274,7 +377,6 @@ class PlaywrightInstrumentor(BaseInstrumentor):
274
377
  f"{wrap_object}.{wrap_method}",
275
378
  wrapped_method.get("wrapper")(
276
379
  tracer,
277
- self.async_client,
278
380
  wrapped_method,
279
381
  ),
280
382
  )
@@ -3,6 +3,7 @@ import logging
3
3
  import os
4
4
  import time
5
5
  import threading
6
+ from typing import Optional
6
7
 
7
8
  from opentelemetry import trace
8
9
 
@@ -26,6 +27,8 @@ logger = logging.getLogger(__name__)
26
27
  # Track pages we've already instrumented to avoid double-instrumentation
27
28
  instrumented_pages = set()
28
29
  async_instrumented_pages = set()
30
+ client: Optional[LaminarClient] = None
31
+ async_client: Optional[AsyncLaminarClient] = None
29
32
 
30
33
 
31
34
  current_dir = os.path.dirname(os.path.abspath(__file__))
@@ -36,7 +39,7 @@ INJECT_PLACEHOLDER = """
36
39
  () => {
37
40
  const BATCH_SIZE = 1000; // Maximum events to store in memory
38
41
 
39
- window.lmnrRrwebEventsBatch = [];
42
+ window.lmnrRrwebEventsBatch = new Set();
40
43
 
41
44
  // Utility function to compress individual event data
42
45
  async function compressEventData(data) {
@@ -50,8 +53,8 @@ INJECT_PLACEHOLDER = """
50
53
 
51
54
  window.lmnrGetAndClearEvents = () => {
52
55
  const events = window.lmnrRrwebEventsBatch;
53
- window.lmnrRrwebEventsBatch = [];
54
- return events;
56
+ window.lmnrRrwebEventsBatch = new Set();
57
+ return Array.from(events);
55
58
  };
56
59
 
57
60
  // Add heartbeat events
@@ -62,11 +65,11 @@ INJECT_PLACEHOLDER = """
62
65
  timestamp: Date.now()
63
66
  };
64
67
 
65
- window.lmnrRrwebEventsBatch.push(heartbeat);
68
+ window.lmnrRrwebEventsBatch.add(heartbeat);
66
69
 
67
70
  // Prevent memory issues by limiting batch size
68
- if (window.lmnrRrwebEventsBatch.length > BATCH_SIZE) {
69
- window.lmnrRrwebEventsBatch = window.lmnrRrwebEventsBatch.slice(-BATCH_SIZE);
71
+ if (window.lmnrRrwebEventsBatch.size > BATCH_SIZE) {
72
+ window.lmnrRrwebEventsBatch = new Set(Array.from(window.lmnrRrwebEventsBatch).slice(-BATCH_SIZE));
70
73
  }
71
74
  }, 1000);
72
75
 
@@ -81,7 +84,7 @@ INJECT_PLACEHOLDER = """
81
84
  ...event,
82
85
  data: await compressEventData(event.data)
83
86
  };
84
- window.lmnrRrwebEventsBatch.push(compressedEvent);
87
+ window.lmnrRrwebEventsBatch.add(compressedEvent);
85
88
  }
86
89
  });
87
90
  }
@@ -220,8 +223,21 @@ async def inject_rrweb_async(page: Page):
220
223
 
221
224
  @observe(name="playwright.page", ignore_input=True, ignore_output=True)
222
225
  def handle_navigation_sync(
223
- page: SyncPage, session_id: str, trace_id: str, client: LaminarClient
226
+ page: SyncPage,
227
+ session_id: str,
228
+ trace_id: str,
229
+ project_api_key: Optional[str] = None,
230
+ base_http_url: Optional[str] = None,
224
231
  ):
232
+ global client
233
+ if client is None:
234
+ client = LaminarClient(base_url=base_http_url, project_api_key=project_api_key)
235
+ if (base_http_url is not None and base_http_url != client.base_url) or (
236
+ project_api_key is not None and project_api_key != client.project_api_key
237
+ ):
238
+ if client is not None:
239
+ client.close()
240
+ client = LaminarClient(base_url=base_http_url, project_api_key=project_api_key)
225
241
  trace.get_current_span().set_attribute("lmnr.internal.has_browser_session", True)
226
242
  # Check if we've already instrumented this page
227
243
  page_id = id(page)
@@ -239,11 +255,10 @@ def handle_navigation_sync(
239
255
  inject_rrweb_sync(page)
240
256
 
241
257
  def collection_loop():
242
- while not page.is_closed(): # Stop when page closes
258
+ while not page.is_closed():
243
259
  send_events_sync(page, session_id, trace_id, client)
244
260
  time.sleep(2)
245
261
 
246
- # Clean up when page closes
247
262
  if page_id in instrumented_pages:
248
263
  instrumented_pages.remove(page_id)
249
264
 
@@ -253,8 +268,25 @@ def handle_navigation_sync(
253
268
 
254
269
  @observe(name="playwright.page", ignore_input=True, ignore_output=True)
255
270
  async def handle_navigation_async(
256
- page: Page, session_id: str, trace_id: str, client: AsyncLaminarClient
271
+ page: Page,
272
+ session_id: str,
273
+ trace_id: str,
274
+ project_api_key: Optional[str] = None,
275
+ base_http_url: Optional[str] = None,
257
276
  ):
277
+ global async_client
278
+ if async_client is None:
279
+ async_client = AsyncLaminarClient(
280
+ base_url=base_http_url, project_api_key=project_api_key
281
+ )
282
+ if (base_http_url is not None and base_http_url != async_client.base_url) or (
283
+ project_api_key is not None and project_api_key != async_client.project_api_key
284
+ ):
285
+ if async_client is not None:
286
+ await async_client.close()
287
+ async_client = AsyncLaminarClient(
288
+ base_url=base_http_url, project_api_key=project_api_key
289
+ )
258
290
  trace.get_current_span().set_attribute("lmnr.internal.has_browser_session", True)
259
291
  # Check if we've already instrumented this page
260
292
  page_id = id(page)
@@ -271,19 +303,19 @@ async def handle_navigation_async(
271
303
  page.on("load", lambda: asyncio.create_task(on_load()))
272
304
  await inject_rrweb_async(page)
273
305
 
274
- async def collection_loop():
306
+ async def collection_loop(client: AsyncLaminarClient):
275
307
  try:
276
- while not page.is_closed(): # Stop when page closes
308
+ while not page.is_closed():
277
309
  await send_events_async(page, session_id, trace_id, client)
278
310
  await asyncio.sleep(2)
279
- # Clean up when page closes
311
+
280
312
  async_instrumented_pages.remove(page_id)
281
313
  logger.info("Event collection stopped")
282
314
  except Exception as e:
283
315
  logger.error(f"Event collection stopped: {e}")
284
316
 
285
317
  # Create and store task
286
- task = asyncio.create_task(collection_loop())
318
+ task = asyncio.create_task(collection_loop(async_client))
287
319
 
288
320
  # Clean up task when page closes
289
321
  page.on("close", lambda: task.cancel())
@@ -137,6 +137,14 @@ class AsyncLaminarClient:
137
137
  """
138
138
  await self.__client.aclose()
139
139
 
140
+ @property
141
+ def base_url(self) -> str:
142
+ return self.__base_url
143
+
144
+ @property
145
+ def project_api_key(self) -> str:
146
+ return self.__project_api_key
147
+
140
148
  async def __aenter__(self: _T) -> _T:
141
149
  return self
142
150
 
@@ -35,6 +35,7 @@ class AsyncAgent(BaseAsyncResource):
35
35
  model_provider: Optional[ModelProvider] = None,
36
36
  model: Optional[str] = None,
37
37
  enable_thinking: bool = True,
38
+ return_screenshots: bool = False,
38
39
  ) -> AsyncIterator[RunAgentResponseChunk]:
39
40
  """Run Laminar index agent in streaming mode.
40
41
 
@@ -45,7 +46,7 @@ class AsyncAgent(BaseAsyncResource):
45
46
  model_provider (Optional[ModelProvider], optional): LLM model provider
46
47
  model (Optional[str], optional): LLM model name
47
48
  enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
48
-
49
+ return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
49
50
  Returns:
50
51
  AsyncIterator[RunAgentResponseChunk]: a generator of response chunks
51
52
  """
@@ -59,6 +60,7 @@ class AsyncAgent(BaseAsyncResource):
59
60
  model_provider: Optional[ModelProvider] = None,
60
61
  model: Optional[str] = None,
61
62
  enable_thinking: bool = True,
63
+ return_screenshots: bool = False,
62
64
  ) -> AgentOutput:
63
65
  """Run Laminar index agent.
64
66
 
@@ -68,7 +70,7 @@ class AsyncAgent(BaseAsyncResource):
68
70
  model_provider (Optional[ModelProvider], optional): LLM model provider
69
71
  model (Optional[str], optional): LLM model name
70
72
  enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
71
-
73
+ return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
72
74
  Returns:
73
75
  AgentOutput: agent output
74
76
  """
@@ -83,6 +85,7 @@ class AsyncAgent(BaseAsyncResource):
83
85
  model: Optional[str] = None,
84
86
  stream: Literal[False] = False,
85
87
  enable_thinking: bool = True,
88
+ return_screenshots: bool = False,
86
89
  ) -> AgentOutput:
87
90
  """Run Laminar index agent.
88
91
 
@@ -93,7 +96,7 @@ class AsyncAgent(BaseAsyncResource):
93
96
  model (Optional[str], optional): LLM model name
94
97
  stream (Literal[False], optional): whether to stream the agent's response
95
98
  enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
96
-
99
+ return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
97
100
  Returns:
98
101
  AgentOutput: agent output
99
102
  """
@@ -107,6 +110,7 @@ class AsyncAgent(BaseAsyncResource):
107
110
  model: Optional[str] = None,
108
111
  stream: bool = False,
109
112
  enable_thinking: bool = True,
113
+ return_screenshots: bool = False,
110
114
  ) -> Union[AgentOutput, Awaitable[AsyncIterator[RunAgentResponseChunk]]]:
111
115
  """Run Laminar index agent.
112
116
 
@@ -117,7 +121,7 @@ class AsyncAgent(BaseAsyncResource):
117
121
  model (Optional[str], optional): LLM model name
118
122
  stream (bool, optional): whether to stream the agent's response
119
123
  enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
120
-
124
+ return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
121
125
  Returns:
122
126
  Union[AgentOutput, AsyncIterator[RunAgentResponseChunk]]: agent output or a generator of response chunks
123
127
  """
@@ -146,6 +150,7 @@ class AsyncAgent(BaseAsyncResource):
146
150
  # https://aws.amazon.com/blogs/networking-and-content-delivery/introducing-nlb-tcp-configurable-idle-timeout/
147
151
  stream=True,
148
152
  enable_thinking=enable_thinking,
153
+ return_screenshots=return_screenshots,
149
154
  )
150
155
 
151
156
  # For streaming case, use a generator function
@@ -27,6 +27,7 @@ class Agent(BaseResource):
27
27
  model_provider: Optional[ModelProvider] = None,
28
28
  model: Optional[str] = None,
29
29
  enable_thinking: bool = True,
30
+ return_screenshots: bool = False,
30
31
  ) -> Generator[RunAgentResponseChunk, None, None]:
31
32
  """Run Laminar index agent in streaming mode.
32
33
 
@@ -37,7 +38,7 @@ class Agent(BaseResource):
37
38
  model_provider (Optional[ModelProvider], optional): LLM model provider
38
39
  model (Optional[str], optional): LLM model name
39
40
  enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
40
-
41
+ return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
41
42
  Returns:
42
43
  Generator[RunAgentResponseChunk, None, None]: a generator of response chunks
43
44
  """
@@ -51,6 +52,7 @@ class Agent(BaseResource):
51
52
  model_provider: Optional[ModelProvider] = None,
52
53
  model: Optional[str] = None,
53
54
  enable_thinking: bool = True,
55
+ return_screenshots: bool = False,
54
56
  ) -> AgentOutput:
55
57
  """Run Laminar index agent.
56
58
 
@@ -60,6 +62,7 @@ class Agent(BaseResource):
60
62
  model_provider (Optional[ModelProvider], optional): LLM model provider
61
63
  model (Optional[str], optional): LLM model name
62
64
  enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
65
+ return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
63
66
 
64
67
  Returns:
65
68
  AgentOutput: agent output
@@ -75,6 +78,7 @@ class Agent(BaseResource):
75
78
  model: Optional[str] = None,
76
79
  stream: Literal[False] = False,
77
80
  enable_thinking: bool = True,
81
+ return_screenshots: bool = False,
78
82
  ) -> AgentOutput:
79
83
  """Run Laminar index agent.
80
84
 
@@ -85,7 +89,7 @@ class Agent(BaseResource):
85
89
  model (Optional[str], optional): LLM model name
86
90
  stream (Literal[False], optional): whether to stream the agent's response
87
91
  enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
88
- cdp_url (Optional[str], optional): CDP URL to connect to an existing browser session.
92
+ return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
89
93
 
90
94
  Returns:
91
95
  AgentOutput: agent output
@@ -100,6 +104,7 @@ class Agent(BaseResource):
100
104
  model: Optional[str] = None,
101
105
  stream: bool = False,
102
106
  enable_thinking: bool = True,
107
+ return_screenshots: bool = False,
103
108
  ) -> Union[AgentOutput, Generator[RunAgentResponseChunk, None, None]]:
104
109
  """Run Laminar index agent.
105
110
 
@@ -110,7 +115,7 @@ class Agent(BaseResource):
110
115
  model (Optional[str], optional): LLM model name
111
116
  stream (bool, optional): whether to stream the agent's response
112
117
  enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
113
- cdp_url (Optional[str], optional): CDP URL to connect to an existing browser session.
118
+ return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
114
119
 
115
120
  Returns:
116
121
  Union[AgentOutput, Generator[RunAgentResponseChunk, None, None]]: agent output or a generator of response chunks
@@ -140,6 +145,7 @@ class Agent(BaseResource):
140
145
  # https://aws.amazon.com/blogs/networking-and-content-delivery/introducing-nlb-tcp-configurable-idle-timeout/
141
146
  stream=True,
142
147
  enable_thinking=enable_thinking,
148
+ return_screenshots=return_screenshots,
143
149
  )
144
150
 
145
151
  # For streaming case, use a generator function
@@ -150,6 +150,14 @@ class LaminarClient:
150
150
  if hasattr(self, "_client"):
151
151
  self.__client.close()
152
152
 
153
+ @property
154
+ def base_url(self) -> str:
155
+ return self.__base_url
156
+
157
+ @property
158
+ def project_api_key(self) -> str:
159
+ return self.__project_api_key
160
+
153
161
  def __enter__(self: _T) -> _T:
154
162
  return self
155
163
 
lmnr/sdk/evaluations.py CHANGED
@@ -35,15 +35,13 @@ MAX_EXPORT_BATCH_SIZE = 64
35
35
  def get_evaluation_url(
36
36
  project_id: str, evaluation_id: str, base_url: Optional[str] = None
37
37
  ):
38
- if not base_url:
38
+ if not base_url or base_url == "https://api.lmnr.ai":
39
39
  base_url = "https://www.lmnr.ai"
40
40
 
41
41
  url = base_url
42
- if url.endswith("/"):
43
- url = url[:-1]
42
+ url = re.sub(r"\/$", "", url)
44
43
  if url.endswith("localhost") or url.endswith("127.0.0.1"):
45
- # We best effort assume that the frontend is running on port 3000
46
- # TODO: expose the frontend port?
44
+ # We best effort assume that the frontend is running on port 5667
47
45
  url = url + ":5667"
48
46
  return f"{url}/project/{project_id}/evaluations/{evaluation_id}"
49
47
 
@@ -408,8 +406,8 @@ def evaluate(
408
406
 
409
407
  If there is no event loop, creates it and runs the evaluation until
410
408
  completion.
411
- If there is an event loop, schedules the evaluation as a task in the
412
- event loop and returns an awaitable handle.
409
+ If there is an event loop, returns an awaitable handle immediately. IMPORTANT:
410
+ You must await the call to `evaluate`.
413
411
 
414
412
  Parameters:
415
413
  data (Union[list[EvaluationDatapoint|dict]], EvaluationDataset]):\
@@ -482,6 +480,6 @@ def evaluate(
482
480
  else:
483
481
  loop = asyncio.get_event_loop()
484
482
  if loop.is_running():
485
- return loop.run_until_complete(evaluation.run())
483
+ return evaluation.run()
486
484
  else:
487
485
  return asyncio.run(evaluation.run())
lmnr/sdk/laminar.py CHANGED
@@ -2,7 +2,7 @@ from contextlib import contextmanager
2
2
  from contextvars import Context
3
3
  from lmnr.openllmetry_sdk import TracerManager
4
4
  from lmnr.openllmetry_sdk.instruments import Instruments
5
- from lmnr.openllmetry_sdk.tracing import get_tracer
5
+ from lmnr.openllmetry_sdk import get_tracer
6
6
  from lmnr.openllmetry_sdk.tracing.attributes import (
7
7
  ASSOCIATION_PROPERTIES,
8
8
  Attributes,
@@ -638,9 +638,22 @@ class Laminar:
638
638
  ) -> LaminarSpanContext:
639
639
  return LaminarSpanContext.deserialize(span_context)
640
640
 
641
+ @classmethod
642
+ def flush(cls) -> bool:
643
+ """Flush the internal tracer.
644
+
645
+ Returns:
646
+ bool: True if the tracer was flushed, False otherwise
647
+ (e.g. no tracer or timeout).
648
+ """
649
+ if not cls.is_initialized():
650
+ return False
651
+ return TracerManager.flush()
652
+
641
653
  @classmethod
642
654
  def shutdown(cls):
643
- TracerManager.flush()
655
+ cls.__initialized = False
656
+ return TracerManager.shutdown()
644
657
 
645
658
  @classmethod
646
659
  def set_session(