lmnr 0.5.0__tar.gz → 0.5.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {lmnr-0.5.0 → lmnr-0.5.1}/PKG-INFO +2 -2
- {lmnr-0.5.0 → lmnr-0.5.1}/README.md +1 -1
- {lmnr-0.5.0 → lmnr-0.5.1}/pyproject.toml +1 -1
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/openllmetry_sdk/__init__.py +2 -3
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/openllmetry_sdk/tracing/tracing.py +6 -1
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/sdk/browser/playwright_otel.py +90 -4
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/sdk/browser/pw_utils.py +7 -7
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/sdk/client/asynchronous/resources/agent.py +9 -4
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/sdk/client/synchronous/resources/agent.py +9 -3
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/sdk/evaluations.py +6 -8
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/sdk/laminar.py +14 -1
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/sdk/types.py +10 -7
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/version.py +1 -1
- {lmnr-0.5.0 → lmnr-0.5.1}/LICENSE +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/__init__.py +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/cli.py +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/openllmetry_sdk/.flake8 +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/openllmetry_sdk/config/__init__.py +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/openllmetry_sdk/decorators/__init__.py +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/openllmetry_sdk/decorators/base.py +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/openllmetry_sdk/instruments.py +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/openllmetry_sdk/tracing/__init__.py +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/openllmetry_sdk/tracing/attributes.py +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/openllmetry_sdk/tracing/content_allow_list.py +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/openllmetry_sdk/tracing/context_manager.py +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/openllmetry_sdk/utils/__init__.py +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/openllmetry_sdk/utils/in_memory_span_exporter.py +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/openllmetry_sdk/utils/json_encoder.py +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/openllmetry_sdk/utils/package_check.py +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/py.typed +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/sdk/__init__.py +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/sdk/browser/__init__.py +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/sdk/browser/browser_use_otel.py +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/sdk/browser/rrweb/rrweb.min.js +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/sdk/browser/utils.py +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/sdk/client/asynchronous/async_client.py +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/sdk/client/asynchronous/resources/__init__.py +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/sdk/client/asynchronous/resources/base.py +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/sdk/client/asynchronous/resources/browser_events.py +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/sdk/client/asynchronous/resources/evals.py +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/sdk/client/asynchronous/resources/pipeline.py +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/sdk/client/asynchronous/resources/semantic_search.py +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/sdk/client/synchronous/resources/__init__.py +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/sdk/client/synchronous/resources/base.py +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/sdk/client/synchronous/resources/browser_events.py +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/sdk/client/synchronous/resources/evals.py +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/sdk/client/synchronous/resources/pipeline.py +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/sdk/client/synchronous/resources/semantic_search.py +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/sdk/client/synchronous/sync_client.py +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/sdk/datasets.py +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/sdk/decorators.py +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/sdk/eval_control.py +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/sdk/log.py +0 -0
- {lmnr-0.5.0 → lmnr-0.5.1}/src/lmnr/sdk/utils.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: lmnr
|
3
|
-
Version: 0.5.
|
3
|
+
Version: 0.5.1
|
4
4
|
Summary: Python SDK for Laminar
|
5
5
|
License: Apache-2.0
|
6
6
|
Author: lmnr.ai
|
@@ -373,7 +373,7 @@ async for chunk in client.agent.run(
|
|
373
373
|
):
|
374
374
|
if chunk.chunkType == 'step':
|
375
375
|
print(chunk.summary)
|
376
|
-
|
376
|
+
elif chunk.chunkType == 'finalOutput':
|
377
377
|
print(chunk.content.result.content)
|
378
378
|
```
|
379
379
|
|
@@ -236,8 +236,13 @@ class TracerWrapper(object):
|
|
236
236
|
cls.__span_id_to_path = {}
|
237
237
|
cls.__span_id_lists = {}
|
238
238
|
|
239
|
-
def
|
239
|
+
def shutdown(self):
|
240
240
|
self.__spans_processor.force_flush()
|
241
|
+
self.__spans_processor.shutdown()
|
242
|
+
self.__tracer_provider.shutdown()
|
243
|
+
|
244
|
+
def flush(self):
|
245
|
+
return self.__spans_processor.force_flush()
|
241
246
|
|
242
247
|
def get_tracer(self):
|
243
248
|
return self.__tracer_provider.get_tracer(TRACER_NAME)
|
@@ -22,8 +22,11 @@ from typing import Collection
|
|
22
22
|
from wrapt import wrap_function_wrapper
|
23
23
|
|
24
24
|
try:
|
25
|
-
from playwright.async_api import Browser
|
26
|
-
from playwright.sync_api import
|
25
|
+
from playwright.async_api import Browser, BrowserContext
|
26
|
+
from playwright.sync_api import (
|
27
|
+
Browser as SyncBrowser,
|
28
|
+
BrowserContext as SyncBrowserContext,
|
29
|
+
)
|
27
30
|
except ImportError as e:
|
28
31
|
raise ImportError(
|
29
32
|
f"Attempted to import {__file__}, but it is designed "
|
@@ -84,8 +87,12 @@ def _wrap_new_browser_sync(
|
|
84
87
|
set_span_in_context(span, get_current())
|
85
88
|
_context_spans[id(context)] = span
|
86
89
|
span.set_attribute("lmnr.internal.has_browser_session", True)
|
90
|
+
trace_id = format(span.get_span_context().trace_id, "032x")
|
91
|
+
context.on(
|
92
|
+
"page",
|
93
|
+
lambda page: handle_navigation_sync(page, session_id, trace_id, client),
|
94
|
+
)
|
87
95
|
for page in context.pages:
|
88
|
-
trace_id = format(span.get_span_context().trace_id, "032x")
|
89
96
|
handle_navigation_sync(page, session_id, trace_id, client)
|
90
97
|
return browser
|
91
98
|
|
@@ -106,12 +113,67 @@ async def _wrap_new_browser_async(
|
|
106
113
|
set_span_in_context(span, get_current())
|
107
114
|
_context_spans[id(context)] = span
|
108
115
|
span.set_attribute("lmnr.internal.has_browser_session", True)
|
116
|
+
trace_id = format(span.get_span_context().trace_id, "032x")
|
117
|
+
|
118
|
+
async def handle_page_navigation(page):
|
119
|
+
return await handle_navigation_async(page, session_id, trace_id, client)
|
120
|
+
|
121
|
+
context.on("page", handle_page_navigation)
|
109
122
|
for page in context.pages:
|
110
|
-
trace_id = format(span.get_span_context().trace_id, "032x")
|
111
123
|
await handle_navigation_async(page, session_id, trace_id, client)
|
112
124
|
return browser
|
113
125
|
|
114
126
|
|
127
|
+
@with_tracer_and_client_wrapper
|
128
|
+
def _wrap_new_context_sync(
|
129
|
+
tracer: Tracer, client: LaminarClient, to_wrap, wrapped, instance, args, kwargs
|
130
|
+
):
|
131
|
+
context: SyncBrowserContext = wrapped(*args, **kwargs)
|
132
|
+
session_id = str(uuid.uuid4().hex)
|
133
|
+
span = get_current_span()
|
134
|
+
if span == INVALID_SPAN:
|
135
|
+
span = tracer.start_span(
|
136
|
+
name=f"{to_wrap.get('object')}.{to_wrap.get('method')}"
|
137
|
+
)
|
138
|
+
set_span_in_context(span, get_current())
|
139
|
+
_context_spans[id(context)] = span
|
140
|
+
span.set_attribute("lmnr.internal.has_browser_session", True)
|
141
|
+
trace_id = format(span.get_span_context().trace_id, "032x")
|
142
|
+
|
143
|
+
context.on(
|
144
|
+
"page",
|
145
|
+
lambda page: handle_navigation_sync(page, session_id, trace_id, client),
|
146
|
+
)
|
147
|
+
for page in context.pages:
|
148
|
+
handle_navigation_sync(page, session_id, trace_id, client)
|
149
|
+
return context
|
150
|
+
|
151
|
+
|
152
|
+
@with_tracer_and_client_wrapper
|
153
|
+
async def _wrap_new_context_async(
|
154
|
+
tracer: Tracer, client: AsyncLaminarClient, to_wrap, wrapped, instance, args, kwargs
|
155
|
+
):
|
156
|
+
context: SyncBrowserContext = await wrapped(*args, **kwargs)
|
157
|
+
session_id = str(uuid.uuid4().hex)
|
158
|
+
span = get_current_span()
|
159
|
+
if span == INVALID_SPAN:
|
160
|
+
span = tracer.start_span(
|
161
|
+
name=f"{to_wrap.get('object')}.{to_wrap.get('method')}"
|
162
|
+
)
|
163
|
+
set_span_in_context(span, get_current())
|
164
|
+
_context_spans[id(context)] = span
|
165
|
+
span.set_attribute("lmnr.internal.has_browser_session", True)
|
166
|
+
trace_id = format(span.get_span_context().trace_id, "032x")
|
167
|
+
|
168
|
+
async def handle_page_navigation(page):
|
169
|
+
return await handle_navigation_async(page, session_id, trace_id, client)
|
170
|
+
|
171
|
+
context.on("page", handle_page_navigation)
|
172
|
+
for page in context.pages:
|
173
|
+
await handle_navigation_async(page, session_id, trace_id, client)
|
174
|
+
return context
|
175
|
+
|
176
|
+
|
115
177
|
@with_tracer_and_client_wrapper
|
116
178
|
def _wrap_close_browser_sync(
|
117
179
|
tracer: Tracer,
|
@@ -191,6 +253,18 @@ WRAPPED_METHODS = [
|
|
191
253
|
"method": "close",
|
192
254
|
"wrapper": _wrap_close_browser_sync,
|
193
255
|
},
|
256
|
+
{
|
257
|
+
"package": "playwright.sync_api",
|
258
|
+
"object": "Browser",
|
259
|
+
"method": "new_context",
|
260
|
+
"wrapper": _wrap_new_context_sync,
|
261
|
+
},
|
262
|
+
{
|
263
|
+
"package": "playwright.sync_api",
|
264
|
+
"object": "BrowserType",
|
265
|
+
"method": "launch_persistent_context",
|
266
|
+
"wrapper": _wrap_new_context_sync,
|
267
|
+
},
|
194
268
|
]
|
195
269
|
|
196
270
|
WRAPPED_METHODS_ASYNC = [
|
@@ -230,6 +304,18 @@ WRAPPED_METHODS_ASYNC = [
|
|
230
304
|
"method": "close",
|
231
305
|
"wrapper": _wrap_close_browser_async,
|
232
306
|
},
|
307
|
+
{
|
308
|
+
"package": "playwright.async_api",
|
309
|
+
"object": "Browser",
|
310
|
+
"method": "new_context",
|
311
|
+
"wrapper": _wrap_new_context_async,
|
312
|
+
},
|
313
|
+
{
|
314
|
+
"package": "playwright.sync_api",
|
315
|
+
"object": "BrowserType",
|
316
|
+
"method": "launch_persistent_context",
|
317
|
+
"wrapper": _wrap_new_context_sync,
|
318
|
+
},
|
233
319
|
]
|
234
320
|
|
235
321
|
|
@@ -36,7 +36,7 @@ INJECT_PLACEHOLDER = """
|
|
36
36
|
() => {
|
37
37
|
const BATCH_SIZE = 1000; // Maximum events to store in memory
|
38
38
|
|
39
|
-
window.lmnrRrwebEventsBatch =
|
39
|
+
window.lmnrRrwebEventsBatch = new Set();
|
40
40
|
|
41
41
|
// Utility function to compress individual event data
|
42
42
|
async function compressEventData(data) {
|
@@ -50,8 +50,8 @@ INJECT_PLACEHOLDER = """
|
|
50
50
|
|
51
51
|
window.lmnrGetAndClearEvents = () => {
|
52
52
|
const events = window.lmnrRrwebEventsBatch;
|
53
|
-
window.lmnrRrwebEventsBatch =
|
54
|
-
return events;
|
53
|
+
window.lmnrRrwebEventsBatch = new Set();
|
54
|
+
return Array.from(events);
|
55
55
|
};
|
56
56
|
|
57
57
|
// Add heartbeat events
|
@@ -62,11 +62,11 @@ INJECT_PLACEHOLDER = """
|
|
62
62
|
timestamp: Date.now()
|
63
63
|
};
|
64
64
|
|
65
|
-
window.lmnrRrwebEventsBatch.
|
65
|
+
window.lmnrRrwebEventsBatch.add(heartbeat);
|
66
66
|
|
67
67
|
// Prevent memory issues by limiting batch size
|
68
|
-
if (window.lmnrRrwebEventsBatch.
|
69
|
-
window.lmnrRrwebEventsBatch = window.lmnrRrwebEventsBatch.slice(-BATCH_SIZE);
|
68
|
+
if (window.lmnrRrwebEventsBatch.size > BATCH_SIZE) {
|
69
|
+
window.lmnrRrwebEventsBatch = new Set(Array.from(window.lmnrRrwebEventsBatch).slice(-BATCH_SIZE));
|
70
70
|
}
|
71
71
|
}, 1000);
|
72
72
|
|
@@ -81,7 +81,7 @@ INJECT_PLACEHOLDER = """
|
|
81
81
|
...event,
|
82
82
|
data: await compressEventData(event.data)
|
83
83
|
};
|
84
|
-
window.lmnrRrwebEventsBatch.
|
84
|
+
window.lmnrRrwebEventsBatch.add(compressedEvent);
|
85
85
|
}
|
86
86
|
});
|
87
87
|
}
|
@@ -35,6 +35,7 @@ class AsyncAgent(BaseAsyncResource):
|
|
35
35
|
model_provider: Optional[ModelProvider] = None,
|
36
36
|
model: Optional[str] = None,
|
37
37
|
enable_thinking: bool = True,
|
38
|
+
return_screenshots: bool = False,
|
38
39
|
) -> AsyncIterator[RunAgentResponseChunk]:
|
39
40
|
"""Run Laminar index agent in streaming mode.
|
40
41
|
|
@@ -45,7 +46,7 @@ class AsyncAgent(BaseAsyncResource):
|
|
45
46
|
model_provider (Optional[ModelProvider], optional): LLM model provider
|
46
47
|
model (Optional[str], optional): LLM model name
|
47
48
|
enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
|
48
|
-
|
49
|
+
return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
|
49
50
|
Returns:
|
50
51
|
AsyncIterator[RunAgentResponseChunk]: a generator of response chunks
|
51
52
|
"""
|
@@ -59,6 +60,7 @@ class AsyncAgent(BaseAsyncResource):
|
|
59
60
|
model_provider: Optional[ModelProvider] = None,
|
60
61
|
model: Optional[str] = None,
|
61
62
|
enable_thinking: bool = True,
|
63
|
+
return_screenshots: bool = False,
|
62
64
|
) -> AgentOutput:
|
63
65
|
"""Run Laminar index agent.
|
64
66
|
|
@@ -68,7 +70,7 @@ class AsyncAgent(BaseAsyncResource):
|
|
68
70
|
model_provider (Optional[ModelProvider], optional): LLM model provider
|
69
71
|
model (Optional[str], optional): LLM model name
|
70
72
|
enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
|
71
|
-
|
73
|
+
return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
|
72
74
|
Returns:
|
73
75
|
AgentOutput: agent output
|
74
76
|
"""
|
@@ -83,6 +85,7 @@ class AsyncAgent(BaseAsyncResource):
|
|
83
85
|
model: Optional[str] = None,
|
84
86
|
stream: Literal[False] = False,
|
85
87
|
enable_thinking: bool = True,
|
88
|
+
return_screenshots: bool = False,
|
86
89
|
) -> AgentOutput:
|
87
90
|
"""Run Laminar index agent.
|
88
91
|
|
@@ -93,7 +96,7 @@ class AsyncAgent(BaseAsyncResource):
|
|
93
96
|
model (Optional[str], optional): LLM model name
|
94
97
|
stream (Literal[False], optional): whether to stream the agent's response
|
95
98
|
enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
|
96
|
-
|
99
|
+
return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
|
97
100
|
Returns:
|
98
101
|
AgentOutput: agent output
|
99
102
|
"""
|
@@ -107,6 +110,7 @@ class AsyncAgent(BaseAsyncResource):
|
|
107
110
|
model: Optional[str] = None,
|
108
111
|
stream: bool = False,
|
109
112
|
enable_thinking: bool = True,
|
113
|
+
return_screenshots: bool = False,
|
110
114
|
) -> Union[AgentOutput, Awaitable[AsyncIterator[RunAgentResponseChunk]]]:
|
111
115
|
"""Run Laminar index agent.
|
112
116
|
|
@@ -117,7 +121,7 @@ class AsyncAgent(BaseAsyncResource):
|
|
117
121
|
model (Optional[str], optional): LLM model name
|
118
122
|
stream (bool, optional): whether to stream the agent's response
|
119
123
|
enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
|
120
|
-
|
124
|
+
return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
|
121
125
|
Returns:
|
122
126
|
Union[AgentOutput, AsyncIterator[RunAgentResponseChunk]]: agent output or a generator of response chunks
|
123
127
|
"""
|
@@ -146,6 +150,7 @@ class AsyncAgent(BaseAsyncResource):
|
|
146
150
|
# https://aws.amazon.com/blogs/networking-and-content-delivery/introducing-nlb-tcp-configurable-idle-timeout/
|
147
151
|
stream=True,
|
148
152
|
enable_thinking=enable_thinking,
|
153
|
+
return_screenshots=return_screenshots,
|
149
154
|
)
|
150
155
|
|
151
156
|
# For streaming case, use a generator function
|
@@ -27,6 +27,7 @@ class Agent(BaseResource):
|
|
27
27
|
model_provider: Optional[ModelProvider] = None,
|
28
28
|
model: Optional[str] = None,
|
29
29
|
enable_thinking: bool = True,
|
30
|
+
return_screenshots: bool = False,
|
30
31
|
) -> Generator[RunAgentResponseChunk, None, None]:
|
31
32
|
"""Run Laminar index agent in streaming mode.
|
32
33
|
|
@@ -37,7 +38,7 @@ class Agent(BaseResource):
|
|
37
38
|
model_provider (Optional[ModelProvider], optional): LLM model provider
|
38
39
|
model (Optional[str], optional): LLM model name
|
39
40
|
enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
|
40
|
-
|
41
|
+
return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
|
41
42
|
Returns:
|
42
43
|
Generator[RunAgentResponseChunk, None, None]: a generator of response chunks
|
43
44
|
"""
|
@@ -51,6 +52,7 @@ class Agent(BaseResource):
|
|
51
52
|
model_provider: Optional[ModelProvider] = None,
|
52
53
|
model: Optional[str] = None,
|
53
54
|
enable_thinking: bool = True,
|
55
|
+
return_screenshots: bool = False,
|
54
56
|
) -> AgentOutput:
|
55
57
|
"""Run Laminar index agent.
|
56
58
|
|
@@ -60,6 +62,7 @@ class Agent(BaseResource):
|
|
60
62
|
model_provider (Optional[ModelProvider], optional): LLM model provider
|
61
63
|
model (Optional[str], optional): LLM model name
|
62
64
|
enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
|
65
|
+
return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
|
63
66
|
|
64
67
|
Returns:
|
65
68
|
AgentOutput: agent output
|
@@ -75,6 +78,7 @@ class Agent(BaseResource):
|
|
75
78
|
model: Optional[str] = None,
|
76
79
|
stream: Literal[False] = False,
|
77
80
|
enable_thinking: bool = True,
|
81
|
+
return_screenshots: bool = False,
|
78
82
|
) -> AgentOutput:
|
79
83
|
"""Run Laminar index agent.
|
80
84
|
|
@@ -85,7 +89,7 @@ class Agent(BaseResource):
|
|
85
89
|
model (Optional[str], optional): LLM model name
|
86
90
|
stream (Literal[False], optional): whether to stream the agent's response
|
87
91
|
enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
|
88
|
-
|
92
|
+
return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
|
89
93
|
|
90
94
|
Returns:
|
91
95
|
AgentOutput: agent output
|
@@ -100,6 +104,7 @@ class Agent(BaseResource):
|
|
100
104
|
model: Optional[str] = None,
|
101
105
|
stream: bool = False,
|
102
106
|
enable_thinking: bool = True,
|
107
|
+
return_screenshots: bool = False,
|
103
108
|
) -> Union[AgentOutput, Generator[RunAgentResponseChunk, None, None]]:
|
104
109
|
"""Run Laminar index agent.
|
105
110
|
|
@@ -110,7 +115,7 @@ class Agent(BaseResource):
|
|
110
115
|
model (Optional[str], optional): LLM model name
|
111
116
|
stream (bool, optional): whether to stream the agent's response
|
112
117
|
enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
|
113
|
-
|
118
|
+
return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
|
114
119
|
|
115
120
|
Returns:
|
116
121
|
Union[AgentOutput, Generator[RunAgentResponseChunk, None, None]]: agent output or a generator of response chunks
|
@@ -140,6 +145,7 @@ class Agent(BaseResource):
|
|
140
145
|
# https://aws.amazon.com/blogs/networking-and-content-delivery/introducing-nlb-tcp-configurable-idle-timeout/
|
141
146
|
stream=True,
|
142
147
|
enable_thinking=enable_thinking,
|
148
|
+
return_screenshots=return_screenshots,
|
143
149
|
)
|
144
150
|
|
145
151
|
# For streaming case, use a generator function
|
@@ -35,15 +35,13 @@ MAX_EXPORT_BATCH_SIZE = 64
|
|
35
35
|
def get_evaluation_url(
|
36
36
|
project_id: str, evaluation_id: str, base_url: Optional[str] = None
|
37
37
|
):
|
38
|
-
if not base_url:
|
38
|
+
if not base_url or base_url == "https://api.lmnr.ai":
|
39
39
|
base_url = "https://www.lmnr.ai"
|
40
40
|
|
41
41
|
url = base_url
|
42
|
-
|
43
|
-
url = url[:-1]
|
42
|
+
url = re.sub(r"\/$", "", url)
|
44
43
|
if url.endswith("localhost") or url.endswith("127.0.0.1"):
|
45
|
-
# We best effort assume that the frontend is running on port
|
46
|
-
# TODO: expose the frontend port?
|
44
|
+
# We best effort assume that the frontend is running on port 5667
|
47
45
|
url = url + ":5667"
|
48
46
|
return f"{url}/project/{project_id}/evaluations/{evaluation_id}"
|
49
47
|
|
@@ -408,8 +406,8 @@ def evaluate(
|
|
408
406
|
|
409
407
|
If there is no event loop, creates it and runs the evaluation until
|
410
408
|
completion.
|
411
|
-
If there is an event loop,
|
412
|
-
|
409
|
+
If there is an event loop, returns an awaitable handle immediately. IMPORTANT:
|
410
|
+
You must await the call to `evaluate`.
|
413
411
|
|
414
412
|
Parameters:
|
415
413
|
data (Union[list[EvaluationDatapoint|dict]], EvaluationDataset]):\
|
@@ -482,6 +480,6 @@ def evaluate(
|
|
482
480
|
else:
|
483
481
|
loop = asyncio.get_event_loop()
|
484
482
|
if loop.is_running():
|
485
|
-
return
|
483
|
+
return evaluation.run()
|
486
484
|
else:
|
487
485
|
return asyncio.run(evaluation.run())
|
@@ -638,9 +638,22 @@ class Laminar:
|
|
638
638
|
) -> LaminarSpanContext:
|
639
639
|
return LaminarSpanContext.deserialize(span_context)
|
640
640
|
|
641
|
+
@classmethod
|
642
|
+
def flush(cls) -> bool:
|
643
|
+
"""Flush the internal tracer.
|
644
|
+
|
645
|
+
Returns:
|
646
|
+
bool: True if the tracer was flushed, False otherwise
|
647
|
+
(e.g. no tracer or timeout).
|
648
|
+
"""
|
649
|
+
if not cls.is_initialized():
|
650
|
+
return False
|
651
|
+
return TracerManager.flush()
|
652
|
+
|
641
653
|
@classmethod
|
642
654
|
def shutdown(cls):
|
643
|
-
|
655
|
+
# other shutdown logic could be added here
|
656
|
+
cls.flush()
|
644
657
|
|
645
658
|
@classmethod
|
646
659
|
def set_session(
|
@@ -367,19 +367,21 @@ class ModelProvider(str, Enum):
|
|
367
367
|
|
368
368
|
class RunAgentRequest(pydantic.BaseModel):
|
369
369
|
prompt: str
|
370
|
-
state: Optional[str] = None
|
371
|
-
parent_span_context: Optional[str] = None
|
372
|
-
model_provider: Optional[ModelProvider] = None
|
373
|
-
model: Optional[str] = None
|
374
|
-
stream: bool = False
|
375
|
-
enable_thinking: bool = True
|
376
|
-
cdp_url: Optional[str] = None
|
370
|
+
state: Optional[str] = pydantic.Field(default=None)
|
371
|
+
parent_span_context: Optional[str] = pydantic.Field(default=None)
|
372
|
+
model_provider: Optional[ModelProvider] = pydantic.Field(default=None)
|
373
|
+
model: Optional[str] = pydantic.Field(default=None)
|
374
|
+
stream: bool = pydantic.Field(default=False)
|
375
|
+
enable_thinking: bool = pydantic.Field(default=True)
|
376
|
+
cdp_url: Optional[str] = pydantic.Field(default=None)
|
377
|
+
return_screenshots: bool = pydantic.Field(default=False)
|
377
378
|
|
378
379
|
def to_dict(self):
|
379
380
|
result = {
|
380
381
|
"prompt": self.prompt,
|
381
382
|
"stream": self.stream,
|
382
383
|
"enableThinking": self.enable_thinking,
|
384
|
+
"returnScreenshots": self.return_screenshots,
|
383
385
|
}
|
384
386
|
if self.state:
|
385
387
|
result["state"] = self.state
|
@@ -409,6 +411,7 @@ class StepChunkContent(pydantic.BaseModel):
|
|
409
411
|
messageId: uuid.UUID
|
410
412
|
actionResult: ActionResult
|
411
413
|
summary: str
|
414
|
+
screenshot: Optional[str] = pydantic.Field(default=None)
|
412
415
|
|
413
416
|
|
414
417
|
class FinalOutputChunkContent(pydantic.BaseModel):
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|