lmnr 0.4.66__py3-none-any.whl → 0.5.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lmnr/__init__.py +30 -0
- lmnr/openllmetry_sdk/__init__.py +4 -16
- lmnr/openllmetry_sdk/tracing/attributes.py +0 -1
- lmnr/openllmetry_sdk/tracing/tracing.py +30 -10
- lmnr/sdk/browser/browser_use_otel.py +4 -4
- lmnr/sdk/browser/playwright_otel.py +299 -228
- lmnr/sdk/browser/pw_utils.py +289 -0
- lmnr/sdk/browser/utils.py +18 -53
- lmnr/sdk/client/asynchronous/async_client.py +157 -0
- lmnr/sdk/client/asynchronous/resources/__init__.py +13 -0
- lmnr/sdk/client/asynchronous/resources/agent.py +220 -0
- lmnr/sdk/client/asynchronous/resources/base.py +32 -0
- lmnr/sdk/client/asynchronous/resources/browser_events.py +40 -0
- lmnr/sdk/client/asynchronous/resources/evals.py +64 -0
- lmnr/sdk/client/asynchronous/resources/pipeline.py +89 -0
- lmnr/sdk/client/asynchronous/resources/semantic_search.py +60 -0
- lmnr/sdk/client/synchronous/resources/__init__.py +7 -0
- lmnr/sdk/client/synchronous/resources/agent.py +215 -0
- lmnr/sdk/client/synchronous/resources/base.py +32 -0
- lmnr/sdk/client/synchronous/resources/browser_events.py +40 -0
- lmnr/sdk/client/synchronous/resources/evals.py +102 -0
- lmnr/sdk/client/synchronous/resources/pipeline.py +89 -0
- lmnr/sdk/client/synchronous/resources/semantic_search.py +60 -0
- lmnr/sdk/client/synchronous/sync_client.py +170 -0
- lmnr/sdk/datasets.py +7 -2
- lmnr/sdk/evaluations.py +59 -35
- lmnr/sdk/laminar.py +34 -174
- lmnr/sdk/types.py +124 -23
- lmnr/sdk/utils.py +10 -0
- lmnr/version.py +6 -6
- {lmnr-0.4.66.dist-info → lmnr-0.5.1.dist-info}/METADATA +88 -38
- lmnr-0.5.1.dist-info/RECORD +55 -0
- {lmnr-0.4.66.dist-info → lmnr-0.5.1.dist-info}/WHEEL +1 -1
- lmnr/sdk/client.py +0 -313
- lmnr-0.4.66.dist-info/RECORD +0 -39
- {lmnr-0.4.66.dist-info → lmnr-0.5.1.dist-info}/LICENSE +0 -0
- {lmnr-0.4.66.dist-info → lmnr-0.5.1.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,215 @@
|
|
1
|
+
"""Agent resource for interacting with Laminar agents."""
|
2
|
+
|
3
|
+
from typing import Generator, Literal, Optional, Union, overload
|
4
|
+
import uuid
|
5
|
+
|
6
|
+
from lmnr.sdk.client.synchronous.resources.base import BaseResource
|
7
|
+
from opentelemetry import trace
|
8
|
+
|
9
|
+
from lmnr.sdk.types import (
|
10
|
+
AgentOutput,
|
11
|
+
LaminarSpanContext,
|
12
|
+
ModelProvider,
|
13
|
+
RunAgentRequest,
|
14
|
+
RunAgentResponseChunk,
|
15
|
+
)
|
16
|
+
|
17
|
+
|
18
|
+
class Agent(BaseResource):
|
19
|
+
"""Resource for interacting with Laminar agents."""
|
20
|
+
|
21
|
+
@overload
|
22
|
+
def run(
|
23
|
+
self,
|
24
|
+
prompt: str,
|
25
|
+
stream: Literal[True],
|
26
|
+
parent_span_context: Optional[Union[LaminarSpanContext, str]] = None,
|
27
|
+
model_provider: Optional[ModelProvider] = None,
|
28
|
+
model: Optional[str] = None,
|
29
|
+
enable_thinking: bool = True,
|
30
|
+
return_screenshots: bool = False,
|
31
|
+
) -> Generator[RunAgentResponseChunk, None, None]:
|
32
|
+
"""Run Laminar index agent in streaming mode.
|
33
|
+
|
34
|
+
Args:
|
35
|
+
prompt (str): prompt for the agent
|
36
|
+
stream (Literal[True]): whether to stream the agent's response
|
37
|
+
parent_span_context (Optional[Union[LaminarSpanContext, str]], optional): span context if the agent is part of a trace
|
38
|
+
model_provider (Optional[ModelProvider], optional): LLM model provider
|
39
|
+
model (Optional[str], optional): LLM model name
|
40
|
+
enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
|
41
|
+
return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
|
42
|
+
Returns:
|
43
|
+
Generator[RunAgentResponseChunk, None, None]: a generator of response chunks
|
44
|
+
"""
|
45
|
+
pass
|
46
|
+
|
47
|
+
@overload
|
48
|
+
def run(
|
49
|
+
self,
|
50
|
+
prompt: str,
|
51
|
+
parent_span_context: Optional[Union[LaminarSpanContext, str]] = None,
|
52
|
+
model_provider: Optional[ModelProvider] = None,
|
53
|
+
model: Optional[str] = None,
|
54
|
+
enable_thinking: bool = True,
|
55
|
+
return_screenshots: bool = False,
|
56
|
+
) -> AgentOutput:
|
57
|
+
"""Run Laminar index agent.
|
58
|
+
|
59
|
+
Args:
|
60
|
+
prompt (str): prompt for the agent
|
61
|
+
parent_span_context (Optional[Union[LaminarSpanContext, str]], optional): span context if the agent is part of a trace
|
62
|
+
model_provider (Optional[ModelProvider], optional): LLM model provider
|
63
|
+
model (Optional[str], optional): LLM model name
|
64
|
+
enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
|
65
|
+
return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
|
66
|
+
|
67
|
+
Returns:
|
68
|
+
AgentOutput: agent output
|
69
|
+
"""
|
70
|
+
pass
|
71
|
+
|
72
|
+
@overload
|
73
|
+
def run(
|
74
|
+
self,
|
75
|
+
prompt: str,
|
76
|
+
parent_span_context: Optional[Union[LaminarSpanContext, str]] = None,
|
77
|
+
model_provider: Optional[ModelProvider] = None,
|
78
|
+
model: Optional[str] = None,
|
79
|
+
stream: Literal[False] = False,
|
80
|
+
enable_thinking: bool = True,
|
81
|
+
return_screenshots: bool = False,
|
82
|
+
) -> AgentOutput:
|
83
|
+
"""Run Laminar index agent.
|
84
|
+
|
85
|
+
Args:
|
86
|
+
prompt (str): prompt for the agent
|
87
|
+
parent_span_context (Optional[Union[LaminarSpanContext, str]], optional): span context if the agent is part of a trace
|
88
|
+
model_provider (Optional[ModelProvider], optional): LLM model provider
|
89
|
+
model (Optional[str], optional): LLM model name
|
90
|
+
stream (Literal[False], optional): whether to stream the agent's response
|
91
|
+
enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
|
92
|
+
return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
|
93
|
+
|
94
|
+
Returns:
|
95
|
+
AgentOutput: agent output
|
96
|
+
"""
|
97
|
+
pass
|
98
|
+
|
99
|
+
def run(
|
100
|
+
self,
|
101
|
+
prompt: str,
|
102
|
+
parent_span_context: Optional[Union[LaminarSpanContext, str]] = None,
|
103
|
+
model_provider: Optional[ModelProvider] = None,
|
104
|
+
model: Optional[str] = None,
|
105
|
+
stream: bool = False,
|
106
|
+
enable_thinking: bool = True,
|
107
|
+
return_screenshots: bool = False,
|
108
|
+
) -> Union[AgentOutput, Generator[RunAgentResponseChunk, None, None]]:
|
109
|
+
"""Run Laminar index agent.
|
110
|
+
|
111
|
+
Args:
|
112
|
+
prompt (str): prompt for the agent
|
113
|
+
parent_span_context (Optional[Union[LaminarSpanContext, str]], optional): span context if the agent is part of a trace
|
114
|
+
model_provider (Optional[ModelProvider], optional): LLM model provider
|
115
|
+
model (Optional[str], optional): LLM model name
|
116
|
+
stream (bool, optional): whether to stream the agent's response
|
117
|
+
enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
|
118
|
+
return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
|
119
|
+
|
120
|
+
Returns:
|
121
|
+
Union[AgentOutput, Generator[RunAgentResponseChunk, None, None]]: agent output or a generator of response chunks
|
122
|
+
"""
|
123
|
+
if parent_span_context is None:
|
124
|
+
span = trace.get_current_span()
|
125
|
+
if span != trace.INVALID_SPAN:
|
126
|
+
parent_span_context = LaminarSpanContext(
|
127
|
+
trace_id=uuid.UUID(int=span.get_span_context().trace_id),
|
128
|
+
span_id=uuid.UUID(int=span.get_span_context().span_id),
|
129
|
+
is_remote=span.get_span_context().is_remote,
|
130
|
+
)
|
131
|
+
if parent_span_context is not None and isinstance(
|
132
|
+
parent_span_context, LaminarSpanContext
|
133
|
+
):
|
134
|
+
parent_span_context = str(parent_span_context)
|
135
|
+
request = RunAgentRequest(
|
136
|
+
prompt=prompt,
|
137
|
+
parent_span_context=parent_span_context,
|
138
|
+
model_provider=model_provider,
|
139
|
+
model=model,
|
140
|
+
# We always connect to stream, because our TLS listeners on AWS
|
141
|
+
# Network load balancers have a hard fixed idle timeout of 350 seconds.
|
142
|
+
# This means that if we don't stream, the connection will be closed.
|
143
|
+
# For now, we just return the content of the final chunk if `stream` is
|
144
|
+
# `False`.
|
145
|
+
# https://aws.amazon.com/blogs/networking-and-content-delivery/introducing-nlb-tcp-configurable-idle-timeout/
|
146
|
+
stream=True,
|
147
|
+
enable_thinking=enable_thinking,
|
148
|
+
return_screenshots=return_screenshots,
|
149
|
+
)
|
150
|
+
|
151
|
+
# For streaming case, use a generator function
|
152
|
+
if stream:
|
153
|
+
return self.__run_streaming(request)
|
154
|
+
else:
|
155
|
+
# For non-streaming case, process all chunks and return the final result
|
156
|
+
return self.__run_non_streaming(request)
|
157
|
+
|
158
|
+
def __run_streaming(
|
159
|
+
self, request: RunAgentRequest
|
160
|
+
) -> Generator[RunAgentResponseChunk, None, None]:
|
161
|
+
"""Run agent in streaming mode.
|
162
|
+
|
163
|
+
Args:
|
164
|
+
request (RunAgentRequest): The request to run the agent with.
|
165
|
+
|
166
|
+
Yields:
|
167
|
+
RunAgentResponseChunk: Chunks of the agent's response.
|
168
|
+
"""
|
169
|
+
with self._client.stream(
|
170
|
+
"POST",
|
171
|
+
self._base_url + "/v1/agent/run",
|
172
|
+
json=request.to_dict(),
|
173
|
+
headers=self._headers(),
|
174
|
+
) as response:
|
175
|
+
for line in response.iter_lines():
|
176
|
+
line = str(line)
|
177
|
+
if line.startswith("[DONE]"):
|
178
|
+
break
|
179
|
+
if not line.startswith("data: "):
|
180
|
+
continue
|
181
|
+
line = line[6:]
|
182
|
+
if line:
|
183
|
+
chunk = RunAgentResponseChunk.model_validate_json(line)
|
184
|
+
yield chunk.root
|
185
|
+
|
186
|
+
def __run_non_streaming(self, request: RunAgentRequest) -> AgentOutput:
|
187
|
+
"""Run agent in non-streaming mode.
|
188
|
+
|
189
|
+
Args:
|
190
|
+
request (RunAgentRequest): The request to run the agent with.
|
191
|
+
|
192
|
+
Returns:
|
193
|
+
AgentOutput: The agent's output.
|
194
|
+
"""
|
195
|
+
final_chunk = None
|
196
|
+
|
197
|
+
with self._client.stream(
|
198
|
+
"POST",
|
199
|
+
self._base_url + "/v1/agent/run",
|
200
|
+
json=request.to_dict(),
|
201
|
+
headers=self._headers(),
|
202
|
+
) as response:
|
203
|
+
for line in response.iter_lines():
|
204
|
+
line = str(line)
|
205
|
+
if line.startswith("[DONE]"):
|
206
|
+
break
|
207
|
+
if not line.startswith("data: "):
|
208
|
+
continue
|
209
|
+
line = line[6:]
|
210
|
+
if line:
|
211
|
+
chunk = RunAgentResponseChunk.model_validate_json(line)
|
212
|
+
if chunk.root.chunkType == "finalOutput":
|
213
|
+
final_chunk = chunk.root
|
214
|
+
|
215
|
+
return final_chunk.content if final_chunk is not None else AgentOutput()
|
@@ -0,0 +1,32 @@
|
|
1
|
+
"""Base class for resource objects."""
|
2
|
+
|
3
|
+
import httpx
|
4
|
+
|
5
|
+
|
6
|
+
class BaseResource:
|
7
|
+
"""Base class for all API resources."""
|
8
|
+
|
9
|
+
def __init__(self, client: httpx.Client, base_url: str, project_api_key: str):
|
10
|
+
"""Initialize the resource.
|
11
|
+
|
12
|
+
Args:
|
13
|
+
client (httpx.Client): HTTP client instance
|
14
|
+
base_url (str): Base URL for the API
|
15
|
+
project_api_key (str): Project API key
|
16
|
+
"""
|
17
|
+
self._client = client
|
18
|
+
self._base_url = base_url
|
19
|
+
self._project_api_key = project_api_key
|
20
|
+
|
21
|
+
def _headers(self) -> dict[str, str]:
|
22
|
+
"""Generate request headers with authentication.
|
23
|
+
|
24
|
+
Returns:
|
25
|
+
dict[str, str]: Headers dictionary
|
26
|
+
"""
|
27
|
+
assert self._project_api_key is not None, "Project API key is not set"
|
28
|
+
return {
|
29
|
+
"Authorization": "Bearer " + self._project_api_key,
|
30
|
+
"Content-Type": "application/json",
|
31
|
+
"Accept": "application/json",
|
32
|
+
}
|
@@ -0,0 +1,40 @@
|
|
1
|
+
"""Resource for sending browser events."""
|
2
|
+
|
3
|
+
import gzip
|
4
|
+
import json
|
5
|
+
|
6
|
+
from lmnr.sdk.client.synchronous.resources.base import BaseResource
|
7
|
+
|
8
|
+
from lmnr.version import PYTHON_VERSION, __version__
|
9
|
+
|
10
|
+
|
11
|
+
class BrowserEvents(BaseResource):
|
12
|
+
"""Resource for sending browser events."""
|
13
|
+
|
14
|
+
def send(
|
15
|
+
self,
|
16
|
+
session_id: str,
|
17
|
+
trace_id: str,
|
18
|
+
events: list[dict],
|
19
|
+
):
|
20
|
+
url = self._base_url + "/v1/browser-sessions/events"
|
21
|
+
payload = {
|
22
|
+
"sessionId": session_id,
|
23
|
+
"traceId": trace_id,
|
24
|
+
"events": events,
|
25
|
+
"source": f"python@{PYTHON_VERSION}",
|
26
|
+
"sdkVersion": __version__,
|
27
|
+
}
|
28
|
+
compressed_payload = gzip.compress(json.dumps(payload).encode("utf-8"))
|
29
|
+
response = self._client.post(
|
30
|
+
url,
|
31
|
+
content=compressed_payload,
|
32
|
+
headers={
|
33
|
+
**self._headers(),
|
34
|
+
"Content-Encoding": "gzip",
|
35
|
+
},
|
36
|
+
)
|
37
|
+
if response.status_code != 200:
|
38
|
+
raise ValueError(
|
39
|
+
f"Failed to send events: [{response.status_code}] {response.text}"
|
40
|
+
)
|
@@ -0,0 +1,102 @@
|
|
1
|
+
"""Evals resource for interacting with Laminar evaluations API."""
|
2
|
+
|
3
|
+
import uuid
|
4
|
+
import urllib.parse
|
5
|
+
from typing import Optional
|
6
|
+
|
7
|
+
from lmnr.sdk.client.synchronous.resources.base import BaseResource
|
8
|
+
from lmnr.sdk.types import (
|
9
|
+
InitEvaluationResponse,
|
10
|
+
EvaluationResultDatapoint,
|
11
|
+
GetDatapointsResponse,
|
12
|
+
)
|
13
|
+
|
14
|
+
|
15
|
+
class Evals(BaseResource):
|
16
|
+
"""Resource for interacting with Laminar evaluations API."""
|
17
|
+
|
18
|
+
def init(
|
19
|
+
self, name: Optional[str] = None, group_name: Optional[str] = None
|
20
|
+
) -> InitEvaluationResponse:
|
21
|
+
"""Initialize a new evaluation.
|
22
|
+
|
23
|
+
Args:
|
24
|
+
name (Optional[str], optional): Name of the evaluation. Defaults to None.
|
25
|
+
group_name (Optional[str], optional): Group name for the evaluation. Defaults to None.
|
26
|
+
|
27
|
+
Returns:
|
28
|
+
InitEvaluationResponse: The response from the initialization request.
|
29
|
+
"""
|
30
|
+
response = self._client.post(
|
31
|
+
self._base_url + "/v1/evals",
|
32
|
+
json={
|
33
|
+
"name": name,
|
34
|
+
"groupName": group_name,
|
35
|
+
},
|
36
|
+
headers=self._headers(),
|
37
|
+
)
|
38
|
+
resp_json = response.json()
|
39
|
+
return InitEvaluationResponse.model_validate(resp_json)
|
40
|
+
|
41
|
+
def save_datapoints(
|
42
|
+
self,
|
43
|
+
eval_id: uuid.UUID,
|
44
|
+
datapoints: list[EvaluationResultDatapoint],
|
45
|
+
group_name: Optional[str] = None,
|
46
|
+
):
|
47
|
+
"""Save evaluation datapoints.
|
48
|
+
|
49
|
+
Args:
|
50
|
+
eval_id (uuid.UUID): The evaluation ID.
|
51
|
+
datapoints (list[EvaluationResultDatapoint]): The datapoints to save.
|
52
|
+
group_name (Optional[str], optional): Group name for the datapoints. Defaults to None.
|
53
|
+
|
54
|
+
Raises:
|
55
|
+
ValueError: If there's an error saving the datapoints.
|
56
|
+
"""
|
57
|
+
response = self._client.post(
|
58
|
+
self._base_url + f"/v1/evals/{eval_id}/datapoints",
|
59
|
+
json={
|
60
|
+
"points": [datapoint.to_dict() for datapoint in datapoints],
|
61
|
+
"groupName": group_name,
|
62
|
+
},
|
63
|
+
headers=self._headers(),
|
64
|
+
)
|
65
|
+
if response.status_code != 200:
|
66
|
+
raise ValueError(f"Error saving evaluation datapoints: {response.text}")
|
67
|
+
|
68
|
+
def get_datapoints(
|
69
|
+
self,
|
70
|
+
dataset_name: str,
|
71
|
+
offset: int,
|
72
|
+
limit: int,
|
73
|
+
) -> GetDatapointsResponse:
|
74
|
+
"""Get datapoints from a dataset.
|
75
|
+
|
76
|
+
Args:
|
77
|
+
dataset_name (str): The name of the dataset.
|
78
|
+
offset (int): The offset to start from.
|
79
|
+
limit (int): The maximum number of datapoints to return.
|
80
|
+
|
81
|
+
Returns:
|
82
|
+
GetDatapointsResponse: The response containing the datapoints.
|
83
|
+
|
84
|
+
Raises:
|
85
|
+
ValueError: If there's an error fetching the datapoints.
|
86
|
+
"""
|
87
|
+
params = {"name": dataset_name, "offset": offset, "limit": limit}
|
88
|
+
url = (
|
89
|
+
self._base_url + "/v1/datasets/datapoints?" + urllib.parse.urlencode(params)
|
90
|
+
)
|
91
|
+
response = self._client.get(url, headers=self._headers())
|
92
|
+
if response.status_code != 200:
|
93
|
+
try:
|
94
|
+
resp_json = response.json()
|
95
|
+
raise ValueError(
|
96
|
+
f"Error fetching datapoints: [{response.status_code}] {resp_json}"
|
97
|
+
)
|
98
|
+
except Exception:
|
99
|
+
raise ValueError(
|
100
|
+
f"Error fetching datapoints: [{response.status_code}] {response.text}"
|
101
|
+
)
|
102
|
+
return GetDatapointsResponse.model_validate(response.json())
|
@@ -0,0 +1,89 @@
|
|
1
|
+
"""Pipeline resource for running Laminar pipelines."""
|
2
|
+
|
3
|
+
import uuid
|
4
|
+
from typing import Optional
|
5
|
+
from opentelemetry import trace
|
6
|
+
|
7
|
+
from lmnr.sdk.client.synchronous.resources.base import BaseResource
|
8
|
+
from lmnr.sdk.types import (
|
9
|
+
NodeInput,
|
10
|
+
PipelineRunError,
|
11
|
+
PipelineRunRequest,
|
12
|
+
PipelineRunResponse,
|
13
|
+
)
|
14
|
+
|
15
|
+
|
16
|
+
class Pipeline(BaseResource):
|
17
|
+
"""Resource for interacting with Laminar pipelines."""
|
18
|
+
|
19
|
+
def run(
|
20
|
+
self,
|
21
|
+
pipeline: str,
|
22
|
+
inputs: dict[str, NodeInput],
|
23
|
+
env: dict[str, str] = {},
|
24
|
+
metadata: dict[str, str] = {},
|
25
|
+
parent_span_id: Optional[uuid.UUID] = None,
|
26
|
+
trace_id: Optional[uuid.UUID] = None,
|
27
|
+
) -> PipelineRunResponse:
|
28
|
+
"""Run a pipeline with the given inputs and environment variables.
|
29
|
+
|
30
|
+
Args:
|
31
|
+
pipeline (str): pipeline name
|
32
|
+
inputs (dict[str, NodeInput]): input values for the pipeline
|
33
|
+
env (dict[str, str], optional): environment variables for the pipeline
|
34
|
+
metadata (dict[str, str], optional): metadata for the pipeline run
|
35
|
+
parent_span_id (Optional[uuid.UUID], optional): parent span id for the pipeline
|
36
|
+
trace_id (Optional[uuid.UUID], optional): trace id for the pipeline
|
37
|
+
|
38
|
+
Raises:
|
39
|
+
ValueError: if the project API key is not set
|
40
|
+
PipelineRunError: if the pipeline run fails
|
41
|
+
|
42
|
+
Returns:
|
43
|
+
PipelineRunResponse: response from the pipeline run
|
44
|
+
"""
|
45
|
+
if self._project_api_key is None:
|
46
|
+
raise ValueError(
|
47
|
+
"Please initialize the Laminar object with your project "
|
48
|
+
"API key or set the LMNR_PROJECT_API_KEY environment variable"
|
49
|
+
)
|
50
|
+
|
51
|
+
current_span = trace.get_current_span()
|
52
|
+
if current_span != trace.INVALID_SPAN:
|
53
|
+
parent_span_id = parent_span_id or uuid.UUID(
|
54
|
+
int=current_span.get_span_context().span_id
|
55
|
+
)
|
56
|
+
trace_id = trace_id or uuid.UUID(
|
57
|
+
int=current_span.get_span_context().trace_id
|
58
|
+
)
|
59
|
+
|
60
|
+
request = PipelineRunRequest(
|
61
|
+
inputs=inputs,
|
62
|
+
pipeline=pipeline,
|
63
|
+
env=env or {},
|
64
|
+
metadata=metadata,
|
65
|
+
parent_span_id=parent_span_id,
|
66
|
+
trace_id=trace_id,
|
67
|
+
)
|
68
|
+
|
69
|
+
response = self._client.post(
|
70
|
+
self._base_url + "/v1/pipeline/run",
|
71
|
+
json=request.to_dict(),
|
72
|
+
headers=self._headers(),
|
73
|
+
)
|
74
|
+
|
75
|
+
if response.status_code != 200:
|
76
|
+
raise PipelineRunError(response)
|
77
|
+
|
78
|
+
try:
|
79
|
+
from pydantic.alias_generators import to_snake
|
80
|
+
|
81
|
+
resp_json = response.json()
|
82
|
+
keys = list(resp_json.keys())
|
83
|
+
for key in keys:
|
84
|
+
value = resp_json[key]
|
85
|
+
del resp_json[key]
|
86
|
+
resp_json[to_snake(key)] = value
|
87
|
+
return PipelineRunResponse(**resp_json)
|
88
|
+
except Exception:
|
89
|
+
raise PipelineRunError(response)
|
@@ -0,0 +1,60 @@
|
|
1
|
+
"""SemanticSearch resource for interacting with Laminar semantic search API."""
|
2
|
+
|
3
|
+
import uuid
|
4
|
+
from typing import Optional
|
5
|
+
|
6
|
+
from lmnr.sdk.client.synchronous.resources.base import BaseResource
|
7
|
+
from lmnr.sdk.types import (
|
8
|
+
SemanticSearchRequest,
|
9
|
+
SemanticSearchResponse,
|
10
|
+
)
|
11
|
+
|
12
|
+
|
13
|
+
class SemanticSearch(BaseResource):
|
14
|
+
"""Resource for interacting with Laminar semantic search API."""
|
15
|
+
|
16
|
+
def search(
|
17
|
+
self,
|
18
|
+
query: str,
|
19
|
+
dataset_id: uuid.UUID,
|
20
|
+
limit: Optional[int] = None,
|
21
|
+
threshold: Optional[float] = None,
|
22
|
+
) -> SemanticSearchResponse:
|
23
|
+
"""Perform a semantic search on the given dataset.
|
24
|
+
|
25
|
+
Args:
|
26
|
+
query (str): query to search for
|
27
|
+
dataset_id (uuid.UUID): dataset ID created in the UI
|
28
|
+
limit (Optional[int], optional): maximum number of results to return
|
29
|
+
threshold (Optional[float], optional): lowest similarity score to return
|
30
|
+
|
31
|
+
Raises:
|
32
|
+
ValueError: if an error happens while performing the semantic search
|
33
|
+
|
34
|
+
Returns:
|
35
|
+
SemanticSearchResponse: response from the semantic search
|
36
|
+
"""
|
37
|
+
request = SemanticSearchRequest(
|
38
|
+
query=query,
|
39
|
+
dataset_id=dataset_id,
|
40
|
+
limit=limit,
|
41
|
+
threshold=threshold,
|
42
|
+
)
|
43
|
+
response = self._client.post(
|
44
|
+
self._base_url + "/v1/semantic-search",
|
45
|
+
json=request.to_dict(),
|
46
|
+
headers=self._headers(),
|
47
|
+
)
|
48
|
+
if response.status_code != 200:
|
49
|
+
raise ValueError(
|
50
|
+
f"Error performing semantic search: [{response.status_code}] {response.text}"
|
51
|
+
)
|
52
|
+
try:
|
53
|
+
resp_json = response.json()
|
54
|
+
for result in resp_json["results"]:
|
55
|
+
result["dataset_id"] = uuid.UUID(result["datasetId"])
|
56
|
+
return SemanticSearchResponse(**resp_json)
|
57
|
+
except Exception as e:
|
58
|
+
raise ValueError(
|
59
|
+
f"Error parsing semantic search response: status={response.status_code} error={e}"
|
60
|
+
)
|