lmnr 0.5.1a0__py3-none-any.whl → 0.5.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lmnr/__init__.py +0 -8
- lmnr/openllmetry_sdk/__init__.py +5 -33
- lmnr/openllmetry_sdk/decorators/base.py +24 -17
- lmnr/openllmetry_sdk/instruments.py +1 -0
- lmnr/openllmetry_sdk/opentelemetry/instrumentation/google_genai/__init__.py +454 -0
- lmnr/openllmetry_sdk/opentelemetry/instrumentation/google_genai/config.py +9 -0
- lmnr/openllmetry_sdk/opentelemetry/instrumentation/google_genai/utils.py +216 -0
- lmnr/openllmetry_sdk/tracing/__init__.py +1 -0
- lmnr/openllmetry_sdk/tracing/context_manager.py +13 -0
- lmnr/openllmetry_sdk/tracing/tracing.py +230 -252
- lmnr/sdk/browser/playwright_otel.py +42 -58
- lmnr/sdk/browser/pw_utils.py +8 -40
- lmnr/sdk/client/asynchronous/async_client.py +0 -34
- lmnr/sdk/client/asynchronous/resources/__init__.py +0 -4
- lmnr/sdk/client/asynchronous/resources/agent.py +96 -6
- lmnr/sdk/client/synchronous/resources/__init__.py +1 -3
- lmnr/sdk/client/synchronous/resources/agent.py +94 -8
- lmnr/sdk/client/synchronous/sync_client.py +0 -36
- lmnr/sdk/decorators.py +16 -2
- lmnr/sdk/laminar.py +3 -3
- lmnr/sdk/types.py +84 -170
- lmnr/sdk/utils.py +8 -1
- lmnr/version.py +1 -1
- {lmnr-0.5.1a0.dist-info → lmnr-0.5.2.dist-info}/METADATA +57 -57
- lmnr-0.5.2.dist-info/RECORD +54 -0
- lmnr/sdk/client/asynchronous/resources/pipeline.py +0 -89
- lmnr/sdk/client/asynchronous/resources/semantic_search.py +0 -60
- lmnr/sdk/client/synchronous/resources/pipeline.py +0 -89
- lmnr/sdk/client/synchronous/resources/semantic_search.py +0 -60
- lmnr-0.5.1a0.dist-info/RECORD +0 -54
- {lmnr-0.5.1a0.dist-info → lmnr-0.5.2.dist-info}/LICENSE +0 -0
- {lmnr-0.5.1a0.dist-info → lmnr-0.5.2.dist-info}/WHEEL +0 -0
- {lmnr-0.5.1a0.dist-info → lmnr-0.5.2.dist-info}/entry_points.txt +0 -0
@@ -27,7 +27,16 @@ class Agent(BaseResource):
|
|
27
27
|
model_provider: Optional[ModelProvider] = None,
|
28
28
|
model: Optional[str] = None,
|
29
29
|
enable_thinking: bool = True,
|
30
|
+
agent_state: Optional[str] = None,
|
31
|
+
storage_state: Optional[str] = None,
|
30
32
|
return_screenshots: bool = False,
|
33
|
+
return_agent_state: bool = False,
|
34
|
+
return_storage_state: bool = False,
|
35
|
+
timeout: Optional[int] = None,
|
36
|
+
cdp_url: Optional[str] = None,
|
37
|
+
max_steps: Optional[int] = None,
|
38
|
+
thinking_token_budget: Optional[int] = None,
|
39
|
+
start_url: Optional[str] = None,
|
31
40
|
) -> Generator[RunAgentResponseChunk, None, None]:
|
32
41
|
"""Run Laminar index agent in streaming mode.
|
33
42
|
|
@@ -38,7 +47,16 @@ class Agent(BaseResource):
|
|
38
47
|
model_provider (Optional[ModelProvider], optional): LLM model provider
|
39
48
|
model (Optional[str], optional): LLM model name
|
40
49
|
enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
|
50
|
+
agent_state (Optional[str], optional): the agent's state as returned by the previous agent run. Default to None.
|
51
|
+
storage_state (Optional[str], optional): the browser's storage state as returned by the previous agent run. Default to None.
|
41
52
|
return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
|
53
|
+
return_agent_state (bool, optional): whether to return the agent's state. Default to False.
|
54
|
+
return_storage_state (bool, optional): whether to return the storage state. Default to False.
|
55
|
+
timeout (Optional[int], optional): timeout seconds for the agent's response. Default to None.
|
56
|
+
cdp_url (Optional[str], optional): Chrome DevTools Protocol URL of an existing browser session. Default to None.
|
57
|
+
max_steps (Optional[int], optional): maximum number of steps the agent can take. If not set, the backend will use a default value (currently 100). Default to None.
|
58
|
+
thinking_token_budget (Optional[int], optional): maximum number of tokens the underlying LLM can spend on thinking in each step, if supported by the model. Default to None.
|
59
|
+
start_url (Optional[str], optional): the URL to start the agent on. Must be a valid URL - refer to https://playwright.dev/docs/api/class-page#page-goto. If not specified, the agent infers this from the prompt. Default to None.
|
42
60
|
Returns:
|
43
61
|
Generator[RunAgentResponseChunk, None, None]: a generator of response chunks
|
44
62
|
"""
|
@@ -52,7 +70,16 @@ class Agent(BaseResource):
|
|
52
70
|
model_provider: Optional[ModelProvider] = None,
|
53
71
|
model: Optional[str] = None,
|
54
72
|
enable_thinking: bool = True,
|
73
|
+
agent_state: Optional[str] = None,
|
74
|
+
storage_state: Optional[str] = None,
|
55
75
|
return_screenshots: bool = False,
|
76
|
+
return_agent_state: bool = False,
|
77
|
+
return_storage_state: bool = False,
|
78
|
+
timeout: Optional[int] = None,
|
79
|
+
cdp_url: Optional[str] = None,
|
80
|
+
max_steps: Optional[int] = None,
|
81
|
+
thinking_token_budget: Optional[int] = None,
|
82
|
+
start_url: Optional[str] = None,
|
56
83
|
) -> AgentOutput:
|
57
84
|
"""Run Laminar index agent.
|
58
85
|
|
@@ -62,8 +89,16 @@ class Agent(BaseResource):
|
|
62
89
|
model_provider (Optional[ModelProvider], optional): LLM model provider
|
63
90
|
model (Optional[str], optional): LLM model name
|
64
91
|
enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
|
92
|
+
agent_state (Optional[str], optional): the agent's state as returned by the previous agent run. Default to None.
|
93
|
+
storage_state (Optional[str], optional): the browser's storage state as returned by the previous agent run. Default to None.
|
65
94
|
return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
|
66
|
-
|
95
|
+
return_agent_state (bool, optional): whether to return the agent's state. Default to False.
|
96
|
+
return_storage_state (bool, optional): whether to return the storage state. Default to False.
|
97
|
+
timeout (Optional[int], optional): timeout seconds for the agent's response. Default to None.
|
98
|
+
cdp_url (Optional[str], optional): Chrome DevTools Protocol URL of an existing browser session. Default to None.
|
99
|
+
max_steps (Optional[int], optional): maximum number of steps the agent can take. If not set, the backend will use a default value (currently 100). Default to None.
|
100
|
+
thinking_token_budget (Optional[int], optional): maximum number of tokens the underlying LLM can spend on thinking in each step, if supported by the model. Default to None.
|
101
|
+
start_url (Optional[str], optional): the URL to start the agent on. Must be a valid URL - refer to https://playwright.dev/docs/api/class-page#page-goto. If not specified, the agent infers this from the prompt. Default to None.
|
67
102
|
Returns:
|
68
103
|
AgentOutput: agent output
|
69
104
|
"""
|
@@ -78,7 +113,16 @@ class Agent(BaseResource):
|
|
78
113
|
model: Optional[str] = None,
|
79
114
|
stream: Literal[False] = False,
|
80
115
|
enable_thinking: bool = True,
|
116
|
+
agent_state: Optional[str] = None,
|
117
|
+
storage_state: Optional[str] = None,
|
81
118
|
return_screenshots: bool = False,
|
119
|
+
return_agent_state: bool = False,
|
120
|
+
return_storage_state: bool = False,
|
121
|
+
timeout: Optional[int] = None,
|
122
|
+
cdp_url: Optional[str] = None,
|
123
|
+
max_steps: Optional[int] = None,
|
124
|
+
thinking_token_budget: Optional[int] = None,
|
125
|
+
start_url: Optional[str] = None,
|
82
126
|
) -> AgentOutput:
|
83
127
|
"""Run Laminar index agent.
|
84
128
|
|
@@ -89,8 +133,16 @@ class Agent(BaseResource):
|
|
89
133
|
model (Optional[str], optional): LLM model name
|
90
134
|
stream (Literal[False], optional): whether to stream the agent's response
|
91
135
|
enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
|
136
|
+
agent_state (Optional[str], optional): the agent's state as returned by the previous agent run. Default to None.
|
137
|
+
storage_state (Optional[str], optional): the browser's storage state as returned by the previous agent run. Default to None.
|
92
138
|
return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
|
93
|
-
|
139
|
+
return_agent_state (bool, optional): whether to return the agent's state. Default to False.
|
140
|
+
return_storage_state (bool, optional): whether to return the storage state. Default to False.
|
141
|
+
timeout (Optional[int], optional): timeout seconds for the agent's response. Default to None.
|
142
|
+
cdp_url (Optional[str], optional): Chrome DevTools Protocol URL of an existing browser session. Default to None.
|
143
|
+
max_steps (Optional[int], optional): maximum number of steps the agent can take. If not set, the backend will use a default value (currently 100). Default to None.
|
144
|
+
thinking_token_budget (Optional[int], optional): maximum number of tokens the underlying LLM can spend on thinking in each step, if supported by the model. Default to None.
|
145
|
+
start_url (Optional[str], optional): the URL to start the agent on. Must be a valid URL - refer to https://playwright.dev/docs/api/class-page#page-goto. If not specified, the agent infers this from the prompt. Default to None.
|
94
146
|
Returns:
|
95
147
|
AgentOutput: agent output
|
96
148
|
"""
|
@@ -104,7 +156,16 @@ class Agent(BaseResource):
|
|
104
156
|
model: Optional[str] = None,
|
105
157
|
stream: bool = False,
|
106
158
|
enable_thinking: bool = True,
|
159
|
+
agent_state: Optional[str] = None,
|
160
|
+
storage_state: Optional[str] = None,
|
107
161
|
return_screenshots: bool = False,
|
162
|
+
return_agent_state: bool = False,
|
163
|
+
return_storage_state: bool = False,
|
164
|
+
timeout: Optional[int] = None,
|
165
|
+
cdp_url: Optional[str] = None,
|
166
|
+
max_steps: Optional[int] = None,
|
167
|
+
thinking_token_budget: Optional[int] = None,
|
168
|
+
start_url: Optional[str] = None,
|
108
169
|
) -> Union[AgentOutput, Generator[RunAgentResponseChunk, None, None]]:
|
109
170
|
"""Run Laminar index agent.
|
110
171
|
|
@@ -115,7 +176,16 @@ class Agent(BaseResource):
|
|
115
176
|
model (Optional[str], optional): LLM model name
|
116
177
|
stream (bool, optional): whether to stream the agent's response
|
117
178
|
enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
|
179
|
+
agent_state (Optional[str], optional): the agent's state as returned by the previous agent run. Default to None.
|
180
|
+
storage_state (Optional[str], optional): the browser's storage state as returned by the previous agent run. Default to None.
|
118
181
|
return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
|
182
|
+
return_agent_state (bool, optional): whether to return the agent's state. Default to False.
|
183
|
+
return_storage_state (bool, optional): whether to return the storage state. Default to False.
|
184
|
+
timeout (Optional[int], optional): timeout seconds for the agent's response. Default to None.
|
185
|
+
cdp_url (Optional[str], optional): Chrome DevTools Protocol URL of an existing browser session. Default to None.
|
186
|
+
max_steps (Optional[int], optional): maximum number of steps the agent can take. If not set, the backend will use a default value (currently 100). Default to None.
|
187
|
+
thinking_token_budget (Optional[int], optional): maximum number of tokens the underlying LLM can spend on thinking in each step, if supported by the model. Default to None.
|
188
|
+
start_url (Optional[str], optional): the URL to start the agent on. Must be a valid URL - refer to https://playwright.dev/docs/api/class-page#page-goto. If not specified, the agent infers this from the prompt. Default to None.
|
119
189
|
|
120
190
|
Returns:
|
121
191
|
Union[AgentOutput, Generator[RunAgentResponseChunk, None, None]]: agent output or a generator of response chunks
|
@@ -137,15 +207,23 @@ class Agent(BaseResource):
|
|
137
207
|
parent_span_context=parent_span_context,
|
138
208
|
model_provider=model_provider,
|
139
209
|
model=model,
|
140
|
-
|
141
|
-
|
210
|
+
agent_state=agent_state,
|
211
|
+
storage_state=storage_state,
|
212
|
+
# We always connect to stream, because our network configuration
|
213
|
+
# has a hard fixed idle timeout of 350 seconds.
|
142
214
|
# This means that if we don't stream, the connection will be closed.
|
143
215
|
# For now, we just return the content of the final chunk if `stream` is
|
144
216
|
# `False`.
|
145
|
-
# https://aws.amazon.com/blogs/networking-and-content-delivery/introducing-nlb-tcp-configurable-idle-timeout/
|
146
217
|
stream=True,
|
147
218
|
enable_thinking=enable_thinking,
|
148
219
|
return_screenshots=return_screenshots,
|
220
|
+
return_agent_state=return_agent_state,
|
221
|
+
return_storage_state=return_storage_state,
|
222
|
+
timeout=timeout,
|
223
|
+
cdp_url=cdp_url,
|
224
|
+
max_steps=max_steps,
|
225
|
+
thinking_token_budget=thinking_token_budget,
|
226
|
+
start_url=start_url,
|
149
227
|
)
|
150
228
|
|
151
229
|
# For streaming case, use a generator function
|
@@ -169,7 +247,7 @@ class Agent(BaseResource):
|
|
169
247
|
with self._client.stream(
|
170
248
|
"POST",
|
171
249
|
self._base_url + "/v1/agent/run",
|
172
|
-
json=request.
|
250
|
+
json=request.model_dump(by_alias=True),
|
173
251
|
headers=self._headers(),
|
174
252
|
) as response:
|
175
253
|
for line in response.iter_lines():
|
@@ -182,6 +260,8 @@ class Agent(BaseResource):
|
|
182
260
|
if line:
|
183
261
|
chunk = RunAgentResponseChunk.model_validate_json(line)
|
184
262
|
yield chunk.root
|
263
|
+
if chunk.root.chunk_type in ["finalOutput", "error"]:
|
264
|
+
break
|
185
265
|
|
186
266
|
def __run_non_streaming(self, request: RunAgentRequest) -> AgentOutput:
|
187
267
|
"""Run agent in non-streaming mode.
|
@@ -197,9 +277,11 @@ class Agent(BaseResource):
|
|
197
277
|
with self._client.stream(
|
198
278
|
"POST",
|
199
279
|
self._base_url + "/v1/agent/run",
|
200
|
-
json=request.
|
280
|
+
json=request.model_dump(by_alias=True),
|
201
281
|
headers=self._headers(),
|
202
282
|
) as response:
|
283
|
+
if response.status_code != 200:
|
284
|
+
raise RuntimeError(response.read())
|
203
285
|
for line in response.iter_lines():
|
204
286
|
line = str(line)
|
205
287
|
if line.startswith("[DONE]"):
|
@@ -209,7 +291,11 @@ class Agent(BaseResource):
|
|
209
291
|
line = line[6:]
|
210
292
|
if line:
|
211
293
|
chunk = RunAgentResponseChunk.model_validate_json(line)
|
212
|
-
if chunk.root.
|
294
|
+
if chunk.root.chunk_type == "finalOutput":
|
213
295
|
final_chunk = chunk.root
|
296
|
+
elif chunk.root.chunk_type == "error":
|
297
|
+
raise RuntimeError(chunk.root.error)
|
298
|
+
elif chunk.root.chunk_type == "timeout":
|
299
|
+
raise TimeoutError("Agent timed out")
|
214
300
|
|
215
301
|
return final_chunk.content if final_chunk is not None else AgentOutput()
|
@@ -11,8 +11,6 @@ from lmnr.sdk.client.synchronous.resources import (
|
|
11
11
|
Agent,
|
12
12
|
BrowserEvents,
|
13
13
|
Evals,
|
14
|
-
Pipeline,
|
15
|
-
SemanticSearch,
|
16
14
|
)
|
17
15
|
from lmnr.sdk.utils import from_env
|
18
16
|
|
@@ -25,8 +23,6 @@ class LaminarClient:
|
|
25
23
|
__client: httpx.Client = None
|
26
24
|
|
27
25
|
# Resource properties
|
28
|
-
__pipeline: Optional[Pipeline] = None
|
29
|
-
__semantic_search: Optional[SemanticSearch] = None
|
30
26
|
__agent: Optional[Agent] = None
|
31
27
|
__evals: Optional[Evals] = None
|
32
28
|
|
@@ -71,36 +67,12 @@ class LaminarClient:
|
|
71
67
|
)
|
72
68
|
|
73
69
|
# Initialize resource objects
|
74
|
-
self.__pipeline = Pipeline(
|
75
|
-
self.__client, self.__base_url, self.__project_api_key
|
76
|
-
)
|
77
|
-
self.__semantic_search = SemanticSearch(
|
78
|
-
self.__client, self.__base_url, self.__project_api_key
|
79
|
-
)
|
80
70
|
self.__agent = Agent(self.__client, self.__base_url, self.__project_api_key)
|
81
71
|
self.__evals = Evals(self.__client, self.__base_url, self.__project_api_key)
|
82
72
|
self.__browser_events = BrowserEvents(
|
83
73
|
self.__client, self.__base_url, self.__project_api_key
|
84
74
|
)
|
85
75
|
|
86
|
-
@property
|
87
|
-
def pipeline(self) -> Pipeline:
|
88
|
-
"""Get the Pipeline resource.
|
89
|
-
|
90
|
-
Returns:
|
91
|
-
Pipeline: The Pipeline resource instance.
|
92
|
-
"""
|
93
|
-
return self.__pipeline
|
94
|
-
|
95
|
-
@property
|
96
|
-
def semantic_search(self) -> SemanticSearch:
|
97
|
-
"""Get the SemanticSearch resource.
|
98
|
-
|
99
|
-
Returns:
|
100
|
-
SemanticSearch: The SemanticSearch resource instance.
|
101
|
-
"""
|
102
|
-
return self.__semantic_search
|
103
|
-
|
104
76
|
@property
|
105
77
|
def agent(self) -> Agent:
|
106
78
|
"""Get the Agent resource.
|
@@ -150,14 +122,6 @@ class LaminarClient:
|
|
150
122
|
if hasattr(self, "_client"):
|
151
123
|
self.__client.close()
|
152
124
|
|
153
|
-
@property
|
154
|
-
def base_url(self) -> str:
|
155
|
-
return self.__base_url
|
156
|
-
|
157
|
-
@property
|
158
|
-
def project_api_key(self) -> str:
|
159
|
-
return self.__project_api_key
|
160
|
-
|
161
125
|
def __enter__(self: _T) -> _T:
|
162
126
|
return self
|
163
127
|
|
lmnr/sdk/decorators.py
CHANGED
@@ -24,6 +24,7 @@ def observe(
|
|
24
24
|
ignore_input: bool = False,
|
25
25
|
ignore_output: bool = False,
|
26
26
|
span_type: Union[Literal["DEFAULT"], Literal["LLM"], Literal["TOOL"]] = "DEFAULT",
|
27
|
+
ignore_inputs: Optional[list[str]] = None,
|
27
28
|
) -> Callable[[Callable[P, R]], Callable[P, R]]:
|
28
29
|
"""The main decorator entrypoint for Laminar. This is used to wrap
|
29
30
|
functions and methods to create spans.
|
@@ -32,9 +33,20 @@ def observe(
|
|
32
33
|
name (Optional[str], optional): Name of the span. Function
|
33
34
|
name is used if not specified.
|
34
35
|
Defaults to None.
|
35
|
-
session_id (Optional[str], optional): Session ID to associate with the
|
36
|
+
session_id (Optional[str], optional): Session ID to associate with the\
|
36
37
|
span and the following context. Defaults to None.
|
37
|
-
|
38
|
+
ignore_input (bool, optional): Whether to ignore ALL input of the\
|
39
|
+
wrapped function. Defaults to False.
|
40
|
+
ignore_output (bool, optional): Whether to ignore ALL output of the\
|
41
|
+
wrapped function. Defaults to False.
|
42
|
+
span_type (Union[Literal["DEFAULT"], Literal["LLM"], Literal["TOOL"]], optional): Type of the span.
|
43
|
+
Defaults to "DEFAULT".
|
44
|
+
ignore_inputs (Optional[list[str]], optional): List of input keys to ignore.
|
45
|
+
For example, if the wrapped function takes three arguments,\
|
46
|
+
def foo(a, b, `sensitive_data`), and you want to ignore the\
|
47
|
+
`sensitive_data` argument, you can pass ["sensitive_data"] to\
|
48
|
+
this argument.
|
49
|
+
Defaults to None.
|
38
50
|
Raises:
|
39
51
|
Exception: re-raises the exception if the wrapped function raises
|
40
52
|
an exception
|
@@ -58,6 +70,7 @@ def observe(
|
|
58
70
|
ignore_input=ignore_input,
|
59
71
|
ignore_output=ignore_output,
|
60
72
|
span_type=span_type,
|
73
|
+
ignore_inputs=ignore_inputs,
|
61
74
|
)(func)
|
62
75
|
if is_async(func)
|
63
76
|
else entity_method(
|
@@ -65,6 +78,7 @@ def observe(
|
|
65
78
|
ignore_input=ignore_input,
|
66
79
|
ignore_output=ignore_output,
|
67
80
|
span_type=span_type,
|
81
|
+
ignore_inputs=ignore_inputs,
|
68
82
|
)(func)
|
69
83
|
)
|
70
84
|
|
lmnr/sdk/laminar.py
CHANGED
@@ -2,7 +2,7 @@ from contextlib import contextmanager
|
|
2
2
|
from contextvars import Context
|
3
3
|
from lmnr.openllmetry_sdk import TracerManager
|
4
4
|
from lmnr.openllmetry_sdk.instruments import Instruments
|
5
|
-
from lmnr.openllmetry_sdk import get_tracer
|
5
|
+
from lmnr.openllmetry_sdk.tracing import get_tracer
|
6
6
|
from lmnr.openllmetry_sdk.tracing.attributes import (
|
7
7
|
ASSOCIATION_PROPERTIES,
|
8
8
|
Attributes,
|
@@ -652,8 +652,8 @@ class Laminar:
|
|
652
652
|
|
653
653
|
@classmethod
|
654
654
|
def shutdown(cls):
|
655
|
-
cls.
|
656
|
-
|
655
|
+
if cls.is_initialized():
|
656
|
+
TracerManager.shutdown()
|
657
657
|
|
658
658
|
@classmethod
|
659
659
|
def set_session(
|
lmnr/sdk/types.py
CHANGED
@@ -1,118 +1,20 @@
|
|
1
1
|
import logging
|
2
2
|
import datetime
|
3
3
|
from enum import Enum
|
4
|
-
import httpx
|
5
4
|
import json
|
6
5
|
from opentelemetry.trace import SpanContext, TraceFlags
|
7
6
|
import pydantic
|
8
7
|
from typing import Any, Awaitable, Callable, Literal, Optional, Union
|
9
8
|
import uuid
|
10
9
|
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
class ChatMessage(pydantic.BaseModel):
|
15
|
-
role: str
|
16
|
-
content: str
|
17
|
-
|
10
|
+
import pydantic.alias_generators
|
18
11
|
|
19
|
-
|
20
|
-
condition: str
|
21
|
-
value: "NodeInput"
|
12
|
+
from .utils import serialize
|
22
13
|
|
23
14
|
|
24
15
|
Numeric = Union[int, float]
|
25
16
|
NumericTypes = (int, float) # for use with isinstance
|
26
17
|
|
27
|
-
NodeInput = Union[str, list[ChatMessage], ConditionedValue, Numeric, bool]
|
28
|
-
PipelineOutput = Union[NodeInput]
|
29
|
-
|
30
|
-
|
31
|
-
class PipelineRunRequest(pydantic.BaseModel):
|
32
|
-
inputs: dict[str, NodeInput]
|
33
|
-
pipeline: str
|
34
|
-
env: dict[str, str] = pydantic.Field(default_factory=dict)
|
35
|
-
metadata: dict[str, str] = pydantic.Field(default_factory=dict)
|
36
|
-
stream: bool = pydantic.Field(default=False)
|
37
|
-
parent_span_id: Optional[uuid.UUID] = pydantic.Field(default=None)
|
38
|
-
trace_id: Optional[uuid.UUID] = pydantic.Field(default=None)
|
39
|
-
|
40
|
-
# uuid is not serializable by default, so we need to convert it to a string
|
41
|
-
def to_dict(self):
|
42
|
-
return {
|
43
|
-
"inputs": {
|
44
|
-
k: v.model_dump() if isinstance(v, pydantic.BaseModel) else serialize(v)
|
45
|
-
for k, v in self.inputs.items()
|
46
|
-
},
|
47
|
-
"pipeline": self.pipeline,
|
48
|
-
"env": self.env,
|
49
|
-
"metadata": self.metadata,
|
50
|
-
"stream": self.stream,
|
51
|
-
"parentSpanId": str(self.parent_span_id) if self.parent_span_id else None,
|
52
|
-
"traceId": str(self.trace_id) if self.trace_id else None,
|
53
|
-
}
|
54
|
-
|
55
|
-
|
56
|
-
class PipelineRunResponse(pydantic.BaseModel):
|
57
|
-
outputs: dict[str, dict[str, PipelineOutput]]
|
58
|
-
run_id: str
|
59
|
-
|
60
|
-
|
61
|
-
class SemanticSearchRequest(pydantic.BaseModel):
|
62
|
-
query: str
|
63
|
-
dataset_id: uuid.UUID
|
64
|
-
limit: Optional[int] = pydantic.Field(default=None)
|
65
|
-
threshold: Optional[float] = pydantic.Field(default=None, ge=0.0, le=1.0)
|
66
|
-
|
67
|
-
def to_dict(self):
|
68
|
-
res = {
|
69
|
-
"query": self.query,
|
70
|
-
"datasetId": str(self.dataset_id),
|
71
|
-
}
|
72
|
-
if self.limit is not None:
|
73
|
-
res["limit"] = self.limit
|
74
|
-
if self.threshold is not None:
|
75
|
-
res["threshold"] = self.threshold
|
76
|
-
return res
|
77
|
-
|
78
|
-
|
79
|
-
class SemanticSearchResult(pydantic.BaseModel):
|
80
|
-
dataset_id: uuid.UUID
|
81
|
-
score: float
|
82
|
-
data: dict[str, Any]
|
83
|
-
content: str
|
84
|
-
|
85
|
-
|
86
|
-
class SemanticSearchResponse(pydantic.BaseModel):
|
87
|
-
results: list[SemanticSearchResult]
|
88
|
-
|
89
|
-
|
90
|
-
class PipelineRunError(Exception):
|
91
|
-
error_code: str
|
92
|
-
error_message: str
|
93
|
-
|
94
|
-
def __init__(self, response: httpx.Response):
|
95
|
-
try:
|
96
|
-
resp_json = response.json()
|
97
|
-
try:
|
98
|
-
resp_dict = dict(resp_json)
|
99
|
-
except Exception:
|
100
|
-
resp_dict = {}
|
101
|
-
self.error_code = resp_dict.get("error_code")
|
102
|
-
self.error_message = resp_dict.get("error_message")
|
103
|
-
super().__init__(self.error_message)
|
104
|
-
except Exception:
|
105
|
-
super().__init__(response.text)
|
106
|
-
|
107
|
-
def __str__(self) -> str:
|
108
|
-
try:
|
109
|
-
return str(
|
110
|
-
{"error_code": self.error_code, "error_message": self.error_message}
|
111
|
-
)
|
112
|
-
except Exception:
|
113
|
-
return super().__str__()
|
114
|
-
|
115
|
-
|
116
18
|
EvaluationDatapointData = Any # non-null, must be JSON-serializable
|
117
19
|
EvaluationDatapointTarget = Optional[Any] # must be JSON-serializable
|
118
20
|
EvaluationDatapointMetadata = Optional[Any] # must be JSON-serializable
|
@@ -322,52 +224,18 @@ class LaminarSpanContext(pydantic.BaseModel):
|
|
322
224
|
class ModelProvider(str, Enum):
|
323
225
|
ANTHROPIC = "anthropic"
|
324
226
|
BEDROCK = "bedrock"
|
325
|
-
|
326
|
-
|
327
|
-
# class AgentChatMessageContentTextBlock(pydantic.BaseModel):
|
328
|
-
# type: Literal["text"]
|
329
|
-
# text: str
|
330
|
-
|
331
|
-
|
332
|
-
# class AgentChatMessageImageUrlBlock(pydantic.BaseModel):
|
333
|
-
# type: Literal["image"]
|
334
|
-
# imageUrl: str
|
335
|
-
|
336
|
-
|
337
|
-
# class AgentChatMessageImageBase64Block(pydantic.BaseModel):
|
338
|
-
# type: Literal["image"]
|
339
|
-
# imageB64: str
|
340
|
-
|
341
|
-
|
342
|
-
# class AgentChatMessageImageBlock(pydantic.RootModel):
|
343
|
-
# root: Union[AgentChatMessageImageUrlBlock, AgentChatMessageImageBase64Block]
|
344
|
-
|
345
|
-
|
346
|
-
# class AgentChatMessageContentBlock(pydantic.RootModel):
|
347
|
-
# root: Union[AgentChatMessageContentTextBlock, AgentChatMessageImageBlock]
|
348
|
-
|
349
|
-
|
350
|
-
# class AgentChatMessageContent(pydantic.RootModel):
|
351
|
-
# root: Union[str, list[AgentChatMessageContentBlock]]
|
352
|
-
|
353
|
-
|
354
|
-
# class AgentChatMessage(pydantic.BaseModel):
|
355
|
-
# role: str
|
356
|
-
# content: AgentChatMessageContent
|
357
|
-
# name: Optional[str] = None
|
358
|
-
# toolCallId: Optional[str] = None
|
359
|
-
# isStateMessage: bool = False
|
360
|
-
|
361
|
-
|
362
|
-
# class AgentState(pydantic.BaseModel):
|
363
|
-
# messages: str = pydantic.Field(default="")
|
364
|
-
# messages: list[AgentChatMessage] = pydantic.Field(default_factory=list)
|
365
|
-
# browser_state: Optional[BrowserState] = None
|
227
|
+
OPENAI = "openai"
|
228
|
+
GEMINI = "gemini"
|
366
229
|
|
367
230
|
|
368
231
|
class RunAgentRequest(pydantic.BaseModel):
|
232
|
+
model_config = pydantic.ConfigDict(
|
233
|
+
alias_generator=pydantic.alias_generators.to_camel,
|
234
|
+
populate_by_name=True,
|
235
|
+
)
|
369
236
|
prompt: str
|
370
|
-
|
237
|
+
storage_state: Optional[str] = pydantic.Field(default=None)
|
238
|
+
agent_state: Optional[str] = pydantic.Field(default=None)
|
371
239
|
parent_span_context: Optional[str] = pydantic.Field(default=None)
|
372
240
|
model_provider: Optional[ModelProvider] = pydantic.Field(default=None)
|
373
241
|
model: Optional[str] = pydantic.Field(default=None)
|
@@ -375,50 +243,96 @@ class RunAgentRequest(pydantic.BaseModel):
|
|
375
243
|
enable_thinking: bool = pydantic.Field(default=True)
|
376
244
|
cdp_url: Optional[str] = pydantic.Field(default=None)
|
377
245
|
return_screenshots: bool = pydantic.Field(default=False)
|
378
|
-
|
379
|
-
|
380
|
-
|
381
|
-
|
382
|
-
|
383
|
-
|
384
|
-
"returnScreenshots": self.return_screenshots,
|
385
|
-
}
|
386
|
-
if self.state:
|
387
|
-
result["state"] = self.state
|
388
|
-
if self.parent_span_context:
|
389
|
-
result["parentSpanContext"] = self.parent_span_context
|
390
|
-
if self.model_provider:
|
391
|
-
result["modelProvider"] = self.model_provider.value
|
392
|
-
if self.model:
|
393
|
-
result["model"] = self.model
|
394
|
-
if self.cdp_url:
|
395
|
-
result["cdpUrl"] = self.cdp_url
|
396
|
-
return result
|
246
|
+
return_storage_state: bool = pydantic.Field(default=False)
|
247
|
+
return_agent_state: bool = pydantic.Field(default=False)
|
248
|
+
timeout: Optional[int] = pydantic.Field(default=None)
|
249
|
+
max_steps: Optional[int] = pydantic.Field(default=None)
|
250
|
+
thinking_token_budget: Optional[int] = pydantic.Field(default=None)
|
251
|
+
start_url: Optional[str] = pydantic.Field(default=None)
|
397
252
|
|
398
253
|
|
399
254
|
class ActionResult(pydantic.BaseModel):
|
400
|
-
|
255
|
+
model_config = pydantic.ConfigDict(
|
256
|
+
alias_generator=pydantic.alias_generators.to_camel
|
257
|
+
)
|
258
|
+
is_done: bool = pydantic.Field(default=False)
|
401
259
|
content: Optional[str] = pydantic.Field(default=None)
|
402
260
|
error: Optional[str] = pydantic.Field(default=None)
|
403
261
|
|
404
262
|
|
405
263
|
class AgentOutput(pydantic.BaseModel):
|
264
|
+
model_config = pydantic.ConfigDict(
|
265
|
+
alias_generator=pydantic.alias_generators.to_camel
|
266
|
+
)
|
406
267
|
result: ActionResult = pydantic.Field(default_factory=ActionResult)
|
268
|
+
# Browser state with data related to auth, such as cookies.
|
269
|
+
# A stringified JSON object.
|
270
|
+
# Only returned if return_storage_state is True.
|
271
|
+
# CAUTION: This object may become large. It also may contain sensitive data.
|
272
|
+
storage_state: Optional[str] = pydantic.Field(default=None)
|
273
|
+
# Agent state with data related to the agent's state, such as the chat history.
|
274
|
+
# A stringified JSON object.
|
275
|
+
# Only returned if return_agent_state is True.
|
276
|
+
# CAUTION: This object is large.
|
277
|
+
agent_state: Optional[str] = pydantic.Field(default=None)
|
407
278
|
|
408
279
|
|
409
280
|
class StepChunkContent(pydantic.BaseModel):
|
410
|
-
|
411
|
-
|
412
|
-
|
413
|
-
|
281
|
+
model_config = pydantic.ConfigDict(
|
282
|
+
alias_generator=pydantic.alias_generators.to_camel
|
283
|
+
)
|
284
|
+
chunk_type: Literal["step"] = pydantic.Field(default="step")
|
285
|
+
message_id: uuid.UUID = pydantic.Field()
|
286
|
+
action_result: ActionResult = pydantic.Field()
|
287
|
+
summary: str = pydantic.Field()
|
288
|
+
screenshot: Optional[str] = pydantic.Field(default=None)
|
289
|
+
|
290
|
+
|
291
|
+
class TimeoutChunkContent(pydantic.BaseModel):
|
292
|
+
"""Chunk content to indicate that timeout has been hit. The only difference from a regular step
|
293
|
+
is the chunk type. This is the last chunk in the stream.
|
294
|
+
"""
|
295
|
+
|
296
|
+
model_config = pydantic.ConfigDict(
|
297
|
+
alias_generator=pydantic.alias_generators.to_camel
|
298
|
+
)
|
299
|
+
chunk_type: Literal["timeout"] = pydantic.Field(default="timeout")
|
300
|
+
message_id: uuid.UUID = pydantic.Field()
|
301
|
+
summary: str = pydantic.Field()
|
414
302
|
screenshot: Optional[str] = pydantic.Field(default=None)
|
415
303
|
|
416
304
|
|
417
305
|
class FinalOutputChunkContent(pydantic.BaseModel):
|
418
|
-
|
419
|
-
|
420
|
-
|
306
|
+
"""Chunk content to indicate that the agent has finished executing. This
|
307
|
+
is the last chunk in the stream.
|
308
|
+
"""
|
309
|
+
|
310
|
+
model_config = pydantic.ConfigDict(
|
311
|
+
alias_generator=pydantic.alias_generators.to_camel
|
312
|
+
)
|
313
|
+
|
314
|
+
chunk_type: Literal["finalOutput"] = pydantic.Field(default="finalOutput")
|
315
|
+
message_id: uuid.UUID = pydantic.Field()
|
316
|
+
content: AgentOutput = pydantic.Field()
|
317
|
+
|
318
|
+
|
319
|
+
class ErrorChunkContent(pydantic.BaseModel):
|
320
|
+
"""Chunk content to indicate that an error has occurred. Typically, this
|
321
|
+
is the last chunk in the stream.
|
322
|
+
"""
|
323
|
+
|
324
|
+
model_config = pydantic.ConfigDict(
|
325
|
+
alias_generator=pydantic.alias_generators.to_camel
|
326
|
+
)
|
327
|
+
chunk_type: Literal["error"] = pydantic.Field(default="error")
|
328
|
+
message_id: uuid.UUID = pydantic.Field()
|
329
|
+
error: str = pydantic.Field()
|
421
330
|
|
422
331
|
|
423
332
|
class RunAgentResponseChunk(pydantic.RootModel):
|
424
|
-
root: Union[
|
333
|
+
root: Union[
|
334
|
+
StepChunkContent,
|
335
|
+
FinalOutputChunkContent,
|
336
|
+
ErrorChunkContent,
|
337
|
+
TimeoutChunkContent,
|
338
|
+
]
|
lmnr/sdk/utils.py
CHANGED
@@ -88,13 +88,20 @@ def get_input_from_func_args(
|
|
88
88
|
is_method: bool = False,
|
89
89
|
func_args: list[typing.Any] = [],
|
90
90
|
func_kwargs: dict[str, typing.Any] = {},
|
91
|
+
ignore_inputs: typing.Optional[list[str]] = None,
|
91
92
|
) -> dict[str, typing.Any]:
|
92
93
|
# Remove implicitly passed "self" or "cls" argument for
|
93
94
|
# instance or class methods
|
94
|
-
res =
|
95
|
+
res = {
|
96
|
+
k: v
|
97
|
+
for k, v in func_kwargs.items()
|
98
|
+
if not (ignore_inputs and k in ignore_inputs)
|
99
|
+
}
|
95
100
|
for i, k in enumerate(inspect.signature(func).parameters.keys()):
|
96
101
|
if is_method and k in ["self", "cls"]:
|
97
102
|
continue
|
103
|
+
if ignore_inputs and k in ignore_inputs:
|
104
|
+
continue
|
98
105
|
# If param has default value, then it's not present in func args
|
99
106
|
if i < len(func_args):
|
100
107
|
res[k] = func_args[i]
|