lmnr 0.4.66__py3-none-any.whl → 0.5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. lmnr/__init__.py +30 -0
  2. lmnr/openllmetry_sdk/__init__.py +4 -16
  3. lmnr/openllmetry_sdk/tracing/attributes.py +0 -1
  4. lmnr/openllmetry_sdk/tracing/tracing.py +30 -10
  5. lmnr/sdk/browser/browser_use_otel.py +4 -4
  6. lmnr/sdk/browser/playwright_otel.py +299 -228
  7. lmnr/sdk/browser/pw_utils.py +289 -0
  8. lmnr/sdk/browser/utils.py +18 -53
  9. lmnr/sdk/client/asynchronous/async_client.py +157 -0
  10. lmnr/sdk/client/asynchronous/resources/__init__.py +13 -0
  11. lmnr/sdk/client/asynchronous/resources/agent.py +220 -0
  12. lmnr/sdk/client/asynchronous/resources/base.py +32 -0
  13. lmnr/sdk/client/asynchronous/resources/browser_events.py +40 -0
  14. lmnr/sdk/client/asynchronous/resources/evals.py +64 -0
  15. lmnr/sdk/client/asynchronous/resources/pipeline.py +89 -0
  16. lmnr/sdk/client/asynchronous/resources/semantic_search.py +60 -0
  17. lmnr/sdk/client/synchronous/resources/__init__.py +7 -0
  18. lmnr/sdk/client/synchronous/resources/agent.py +215 -0
  19. lmnr/sdk/client/synchronous/resources/base.py +32 -0
  20. lmnr/sdk/client/synchronous/resources/browser_events.py +40 -0
  21. lmnr/sdk/client/synchronous/resources/evals.py +102 -0
  22. lmnr/sdk/client/synchronous/resources/pipeline.py +89 -0
  23. lmnr/sdk/client/synchronous/resources/semantic_search.py +60 -0
  24. lmnr/sdk/client/synchronous/sync_client.py +170 -0
  25. lmnr/sdk/datasets.py +7 -2
  26. lmnr/sdk/evaluations.py +59 -35
  27. lmnr/sdk/laminar.py +34 -174
  28. lmnr/sdk/types.py +124 -23
  29. lmnr/sdk/utils.py +10 -0
  30. lmnr/version.py +6 -6
  31. {lmnr-0.4.66.dist-info → lmnr-0.5.1.dist-info}/METADATA +88 -38
  32. lmnr-0.5.1.dist-info/RECORD +55 -0
  33. {lmnr-0.4.66.dist-info → lmnr-0.5.1.dist-info}/WHEEL +1 -1
  34. lmnr/sdk/client.py +0 -313
  35. lmnr-0.4.66.dist-info/RECORD +0 -39
  36. {lmnr-0.4.66.dist-info → lmnr-0.5.1.dist-info}/LICENSE +0 -0
  37. {lmnr-0.4.66.dist-info → lmnr-0.5.1.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,220 @@
1
+ """Agent resource for interacting with Laminar agents."""
2
+
3
+ from typing import (
4
+ AsyncGenerator,
5
+ AsyncIterator,
6
+ Awaitable,
7
+ Literal,
8
+ Optional,
9
+ Union,
10
+ overload,
11
+ )
12
+ import uuid
13
+
14
+ from lmnr.sdk.client.asynchronous.resources.base import BaseAsyncResource
15
+ from lmnr.sdk.types import (
16
+ AgentOutput,
17
+ LaminarSpanContext,
18
+ ModelProvider,
19
+ RunAgentRequest,
20
+ RunAgentResponseChunk,
21
+ )
22
+
23
+ from opentelemetry import trace
24
+
25
+
26
+ class AsyncAgent(BaseAsyncResource):
27
+ """Resource for interacting with Laminar agents."""
28
+
29
+ @overload
30
+ async def run(
31
+ self,
32
+ prompt: str,
33
+ stream: Literal[True],
34
+ parent_span_context: Optional[Union[LaminarSpanContext, str]] = None,
35
+ model_provider: Optional[ModelProvider] = None,
36
+ model: Optional[str] = None,
37
+ enable_thinking: bool = True,
38
+ return_screenshots: bool = False,
39
+ ) -> AsyncIterator[RunAgentResponseChunk]:
40
+ """Run Laminar index agent in streaming mode.
41
+
42
+ Args:
43
+ prompt (str): prompt for the agent
44
+ stream (Literal[True]): whether to stream the agent's response
45
+ parent_span_context (Optional[Union[LaminarSpanContext, str]], optional): span context if the agent is part of a trace
46
+ model_provider (Optional[ModelProvider], optional): LLM model provider
47
+ model (Optional[str], optional): LLM model name
48
+ enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
49
+ return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
50
+ Returns:
51
+ AsyncIterator[RunAgentResponseChunk]: a generator of response chunks
52
+ """
53
+ pass
54
+
55
+ @overload
56
+ async def run(
57
+ self,
58
+ prompt: str,
59
+ parent_span_context: Optional[Union[LaminarSpanContext, str]] = None,
60
+ model_provider: Optional[ModelProvider] = None,
61
+ model: Optional[str] = None,
62
+ enable_thinking: bool = True,
63
+ return_screenshots: bool = False,
64
+ ) -> AgentOutput:
65
+ """Run Laminar index agent.
66
+
67
+ Args:
68
+ prompt (str): prompt for the agent
69
+ parent_span_context (Optional[LaminarSpanContext], optional): span context if the agent is part of a trace
70
+ model_provider (Optional[ModelProvider], optional): LLM model provider
71
+ model (Optional[str], optional): LLM model name
72
+ enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
73
+ return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
74
+ Returns:
75
+ AgentOutput: agent output
76
+ """
77
+ pass
78
+
79
+ @overload
80
+ async def run(
81
+ self,
82
+ prompt: str,
83
+ parent_span_context: Optional[Union[LaminarSpanContext, str]] = None,
84
+ model_provider: Optional[ModelProvider] = None,
85
+ model: Optional[str] = None,
86
+ stream: Literal[False] = False,
87
+ enable_thinking: bool = True,
88
+ return_screenshots: bool = False,
89
+ ) -> AgentOutput:
90
+ """Run Laminar index agent.
91
+
92
+ Args:
93
+ prompt (str): prompt for the agent
94
+ parent_span_context (Optional[Union[LaminarSpanContext, str]], optional): span context if the agent is part of a trace
95
+ model_provider (Optional[ModelProvider], optional): LLM model provider
96
+ model (Optional[str], optional): LLM model name
97
+ stream (Literal[False], optional): whether to stream the agent's response
98
+ enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
99
+ return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
100
+ Returns:
101
+ AgentOutput: agent output
102
+ """
103
+ pass
104
+
105
+ async def run(
106
+ self,
107
+ prompt: str,
108
+ parent_span_context: Optional[Union[LaminarSpanContext, str]] = None,
109
+ model_provider: Optional[ModelProvider] = None,
110
+ model: Optional[str] = None,
111
+ stream: bool = False,
112
+ enable_thinking: bool = True,
113
+ return_screenshots: bool = False,
114
+ ) -> Union[AgentOutput, Awaitable[AsyncIterator[RunAgentResponseChunk]]]:
115
+ """Run Laminar index agent.
116
+
117
+ Args:
118
+ prompt (str): prompt for the agent
119
+ parent_span_context (Optional[Union[LaminarSpanContext, str]], optional): span context if the agent is part of a trace
120
+ model_provider (Optional[ModelProvider], optional): LLM model provider
121
+ model (Optional[str], optional): LLM model name
122
+ stream (bool, optional): whether to stream the agent's response
123
+ enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
124
+ return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
125
+ Returns:
126
+ Union[AgentOutput, AsyncIterator[RunAgentResponseChunk]]: agent output or a generator of response chunks
127
+ """
128
+ if parent_span_context is None:
129
+ span = trace.get_current_span()
130
+ if span != trace.INVALID_SPAN:
131
+ parent_span_context = LaminarSpanContext(
132
+ trace_id=uuid.UUID(int=span.get_span_context().trace_id),
133
+ span_id=uuid.UUID(int=span.get_span_context().span_id),
134
+ is_remote=span.get_span_context().is_remote,
135
+ )
136
+ if parent_span_context is not None and isinstance(
137
+ parent_span_context, LaminarSpanContext
138
+ ):
139
+ parent_span_context = str(parent_span_context)
140
+ request = RunAgentRequest(
141
+ prompt=prompt,
142
+ parent_span_context=parent_span_context,
143
+ model_provider=model_provider,
144
+ model=model,
145
+ # We always connect to stream, because our TLS listeners on AWS
146
+ # Network load balancers have a hard fixed idle timeout of 350 seconds.
147
+ # This means that if we don't stream, the connection will be closed.
148
+ # For now, we just return the content of the final chunk if `stream` is
149
+ # `False`.
150
+ # https://aws.amazon.com/blogs/networking-and-content-delivery/introducing-nlb-tcp-configurable-idle-timeout/
151
+ stream=True,
152
+ enable_thinking=enable_thinking,
153
+ return_screenshots=return_screenshots,
154
+ )
155
+
156
+ # For streaming case, use a generator function
157
+ if stream:
158
+ return self.__run_streaming(request)
159
+ else:
160
+ # For non-streaming case, process all chunks and return the final result
161
+ return await self.__run_non_streaming(request)
162
+
163
+ async def __run_streaming(
164
+ self, request: RunAgentRequest
165
+ ) -> AsyncGenerator[RunAgentResponseChunk, None]:
166
+ """Run agent in streaming mode.
167
+
168
+ Args:
169
+ request (RunAgentRequest): The request to run the agent with.
170
+
171
+ Yields:
172
+ RunAgentResponseChunk: Chunks of the agent's response.
173
+ """
174
+ async with self._client.stream(
175
+ "POST",
176
+ self._base_url + "/v1/agent/run",
177
+ json=request.to_dict(),
178
+ headers=self._headers(),
179
+ ) as response:
180
+ async for line in response.aiter_lines():
181
+ line = str(line)
182
+ if line.startswith("[DONE]"):
183
+ break
184
+ if not line.startswith("data: "):
185
+ continue
186
+ line = line[6:]
187
+ if line:
188
+ chunk = RunAgentResponseChunk.model_validate_json(line)
189
+ yield chunk.root
190
+
191
+ async def __run_non_streaming(self, request: RunAgentRequest) -> AgentOutput:
192
+ """Run agent in non-streaming mode.
193
+
194
+ Args:
195
+ request (RunAgentRequest): The request to run the agent with.
196
+
197
+ Returns:
198
+ AgentOutput: The agent's output.
199
+ """
200
+ final_chunk = None
201
+
202
+ async with self._client.stream(
203
+ "POST",
204
+ self._base_url + "/v1/agent/run",
205
+ json=request.to_dict(),
206
+ headers=self._headers(),
207
+ ) as response:
208
+ async for line in response.aiter_lines():
209
+ line = str(line)
210
+ if line.startswith("[DONE]"):
211
+ break
212
+ if not line.startswith("data: "):
213
+ continue
214
+ line = line[6:]
215
+ if line:
216
+ chunk = RunAgentResponseChunk.model_validate_json(line)
217
+ if chunk.root.chunkType == "finalOutput":
218
+ final_chunk = chunk.root
219
+
220
+ return final_chunk.content if final_chunk is not None else AgentOutput()
@@ -0,0 +1,32 @@
1
+ """Base class for resource objects."""
2
+
3
+ import httpx
4
+
5
+
6
+ class BaseAsyncResource:
7
+ """Base class for all API resources."""
8
+
9
+ def __init__(self, client: httpx.AsyncClient, base_url: str, project_api_key: str):
10
+ """Initialize the resource.
11
+
12
+ Args:
13
+ client (httpx.AsyncClient): HTTP client instance
14
+ base_url (str): Base URL for the API
15
+ project_api_key (str): Project API key
16
+ """
17
+ self._client = client
18
+ self._base_url = base_url
19
+ self._project_api_key = project_api_key
20
+
21
+ def _headers(self) -> dict[str, str]:
22
+ """Generate request headers with authentication.
23
+
24
+ Returns:
25
+ dict[str, str]: Headers dictionary
26
+ """
27
+ assert self._project_api_key is not None, "Project API key is not set"
28
+ return {
29
+ "Authorization": "Bearer " + self._project_api_key,
30
+ "Content-Type": "application/json",
31
+ "Accept": "application/json",
32
+ }
@@ -0,0 +1,40 @@
1
+ """Resource for sending browser events."""
2
+
3
+ import gzip
4
+ import json
5
+
6
+ from lmnr.sdk.client.asynchronous.resources.base import BaseAsyncResource
7
+
8
+ from lmnr.version import PYTHON_VERSION, __version__
9
+
10
+
11
+ class AsyncBrowserEvents(BaseAsyncResource):
12
+ """Resource for sending browser events."""
13
+
14
+ async def send(
15
+ self,
16
+ session_id: str,
17
+ trace_id: str,
18
+ events: list[dict],
19
+ ):
20
+ url = self._base_url + "/v1/browser-sessions/events"
21
+ payload = {
22
+ "sessionId": session_id,
23
+ "traceId": trace_id,
24
+ "events": events,
25
+ "source": f"python@{PYTHON_VERSION}",
26
+ "sdkVersion": __version__,
27
+ }
28
+ compressed_payload = gzip.compress(json.dumps(payload).encode("utf-8"))
29
+ response = await self._client.post(
30
+ url,
31
+ content=compressed_payload,
32
+ headers={
33
+ **self._headers(),
34
+ "Content-Encoding": "gzip",
35
+ },
36
+ )
37
+ if response.status_code != 200:
38
+ raise ValueError(
39
+ f"Failed to send events: [{response.status_code}] {response.text}"
40
+ )
@@ -0,0 +1,64 @@
1
+ """Evals resource for interacting with Laminar evaluations API."""
2
+
3
+ import uuid
4
+ from typing import Optional
5
+
6
+ from lmnr.sdk.client.asynchronous.resources.base import BaseAsyncResource
7
+ from lmnr.sdk.types import (
8
+ InitEvaluationResponse,
9
+ EvaluationResultDatapoint,
10
+ )
11
+
12
+
13
+ class AsyncEvals(BaseAsyncResource):
14
+ """Resource for interacting with Laminar evaluations API."""
15
+
16
+ async def init(
17
+ self, name: Optional[str] = None, group_name: Optional[str] = None
18
+ ) -> InitEvaluationResponse:
19
+ """Initialize a new evaluation.
20
+
21
+ Args:
22
+ name (Optional[str], optional): Name of the evaluation. Defaults to None.
23
+ group_name (Optional[str], optional): Group name for the evaluation. Defaults to None.
24
+
25
+ Returns:
26
+ InitEvaluationResponse: The response from the initialization request.
27
+ """
28
+ response = await self._client.post(
29
+ self._base_url + "/v1/evals",
30
+ json={
31
+ "name": name,
32
+ "groupName": group_name,
33
+ },
34
+ headers=self._headers(),
35
+ )
36
+ resp_json = response.json()
37
+ return InitEvaluationResponse.model_validate(resp_json)
38
+
39
+ async def save_datapoints(
40
+ self,
41
+ eval_id: uuid.UUID,
42
+ datapoints: list[EvaluationResultDatapoint],
43
+ group_name: Optional[str] = None,
44
+ ):
45
+ """Save evaluation datapoints.
46
+
47
+ Args:
48
+ eval_id (uuid.UUID): The evaluation ID.
49
+ datapoints (list[EvaluationResultDatapoint]): The datapoints to save.
50
+ group_name (Optional[str], optional): Group name for the datapoints. Defaults to None.
51
+
52
+ Raises:
53
+ ValueError: If there's an error saving the datapoints.
54
+ """
55
+ response = await self._client.post(
56
+ self._base_url + f"/v1/evals/{eval_id}/datapoints",
57
+ json={
58
+ "points": [datapoint.to_dict() for datapoint in datapoints],
59
+ "groupName": group_name,
60
+ },
61
+ headers=self._headers(),
62
+ )
63
+ if response.status_code != 200:
64
+ raise ValueError(f"Error saving evaluation datapoints: {response.text}")
@@ -0,0 +1,89 @@
1
+ """Pipeline resource for running Laminar pipelines."""
2
+
3
+ import uuid
4
+ from typing import Optional
5
+ from opentelemetry import trace
6
+
7
+ from lmnr.sdk.client.asynchronous.resources.base import BaseAsyncResource
8
+ from lmnr.sdk.types import (
9
+ NodeInput,
10
+ PipelineRunError,
11
+ PipelineRunRequest,
12
+ PipelineRunResponse,
13
+ )
14
+
15
+
16
+ class AsyncPipeline(BaseAsyncResource):
17
+ """Resource for interacting with Laminar pipelines."""
18
+
19
+ async def run(
20
+ self,
21
+ pipeline: str,
22
+ inputs: dict[str, NodeInput],
23
+ env: dict[str, str] = {},
24
+ metadata: dict[str, str] = {},
25
+ parent_span_id: Optional[uuid.UUID] = None,
26
+ trace_id: Optional[uuid.UUID] = None,
27
+ ) -> PipelineRunResponse:
28
+ """Run a pipeline with the given inputs and environment variables.
29
+
30
+ Args:
31
+ pipeline (str): pipeline name
32
+ inputs (dict[str, NodeInput]): input values for the pipeline
33
+ env (dict[str, str], optional): environment variables for the pipeline
34
+ metadata (dict[str, str], optional): metadata for the pipeline run
35
+ parent_span_id (Optional[uuid.UUID], optional): parent span id for the pipeline
36
+ trace_id (Optional[uuid.UUID], optional): trace id for the pipeline
37
+
38
+ Raises:
39
+ ValueError: if the project API key is not set
40
+ PipelineRunError: if the pipeline run fails
41
+
42
+ Returns:
43
+ PipelineRunResponse: response from the pipeline run
44
+ """
45
+ if self._project_api_key is None:
46
+ raise ValueError(
47
+ "Please initialize the Laminar object with your project "
48
+ "API key or set the LMNR_PROJECT_API_KEY environment variable"
49
+ )
50
+
51
+ current_span = trace.get_current_span()
52
+ if current_span != trace.INVALID_SPAN:
53
+ parent_span_id = parent_span_id or uuid.UUID(
54
+ int=current_span.get_span_context().span_id
55
+ )
56
+ trace_id = trace_id or uuid.UUID(
57
+ int=current_span.get_span_context().trace_id
58
+ )
59
+
60
+ request = PipelineRunRequest(
61
+ inputs=inputs,
62
+ pipeline=pipeline,
63
+ env=env or {},
64
+ metadata=metadata,
65
+ parent_span_id=parent_span_id,
66
+ trace_id=trace_id,
67
+ )
68
+
69
+ response = await self._client.post(
70
+ self._base_url + "/v1/pipeline/run",
71
+ json=request.to_dict(),
72
+ headers=self._headers(),
73
+ )
74
+
75
+ if response.status_code != 200:
76
+ raise PipelineRunError(response)
77
+
78
+ try:
79
+ from pydantic.alias_generators import to_snake
80
+
81
+ resp_json = response.json()
82
+ keys = list(resp_json.keys())
83
+ for key in keys:
84
+ value = resp_json[key]
85
+ del resp_json[key]
86
+ resp_json[to_snake(key)] = value
87
+ return PipelineRunResponse(**resp_json)
88
+ except Exception:
89
+ raise PipelineRunError(response)
@@ -0,0 +1,60 @@
1
+ """SemanticSearch resource for interacting with Laminar semantic search API."""
2
+
3
+ import uuid
4
+ from typing import Optional
5
+
6
+ from lmnr.sdk.client.asynchronous.resources.base import BaseAsyncResource
7
+ from lmnr.sdk.types import (
8
+ SemanticSearchRequest,
9
+ SemanticSearchResponse,
10
+ )
11
+
12
+
13
+ class AsyncSemanticSearch(BaseAsyncResource):
14
+ """Resource for interacting with Laminar semantic search API."""
15
+
16
+ async def search(
17
+ self,
18
+ query: str,
19
+ dataset_id: uuid.UUID,
20
+ limit: Optional[int] = None,
21
+ threshold: Optional[float] = None,
22
+ ) -> SemanticSearchResponse:
23
+ """Perform a semantic search on the given dataset.
24
+
25
+ Args:
26
+ query (str): query to search for
27
+ dataset_id (uuid.UUID): dataset ID created in the UI
28
+ limit (Optional[int], optional): maximum number of results to return
29
+ threshold (Optional[float], optional): lowest similarity score to return
30
+
31
+ Raises:
32
+ ValueError: if an error happens while performing the semantic search
33
+
34
+ Returns:
35
+ SemanticSearchResponse: response from the semantic search
36
+ """
37
+ request = SemanticSearchRequest(
38
+ query=query,
39
+ dataset_id=dataset_id,
40
+ limit=limit,
41
+ threshold=threshold,
42
+ )
43
+ response = await self._client.post(
44
+ self._base_url + "/v1/semantic-search",
45
+ json=request.to_dict(),
46
+ headers=self._headers(),
47
+ )
48
+ if response.status_code != 200:
49
+ raise ValueError(
50
+ f"Error performing semantic search: [{response.status_code}] {response.text}"
51
+ )
52
+ try:
53
+ resp_json = response.json()
54
+ for result in resp_json["results"]:
55
+ result["dataset_id"] = uuid.UUID(result["datasetId"])
56
+ return SemanticSearchResponse(**resp_json)
57
+ except Exception as e:
58
+ raise ValueError(
59
+ f"Error parsing semantic search response: status={response.status_code} error={e}"
60
+ )
@@ -0,0 +1,7 @@
1
+ from lmnr.sdk.client.synchronous.resources.agent import Agent
2
+ from lmnr.sdk.client.synchronous.resources.browser_events import BrowserEvents
3
+ from lmnr.sdk.client.synchronous.resources.evals import Evals
4
+ from lmnr.sdk.client.synchronous.resources.pipeline import Pipeline
5
+ from lmnr.sdk.client.synchronous.resources.semantic_search import SemanticSearch
6
+
7
+ __all__ = ["Pipeline", "SemanticSearch", "Agent", "Evals", "BrowserEvents"]