lmnr 0.6.3__py3-none-any.whl → 0.6.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. lmnr/cli.py +1 -2
  2. lmnr/opentelemetry_lib/__init__.py +6 -7
  3. lmnr/opentelemetry_lib/decorators/__init__.py +9 -9
  4. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py +2 -2
  5. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/config.py +4 -4
  6. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/utils.py +19 -19
  7. lmnr/opentelemetry_lib/tracing/__init__.py +6 -7
  8. lmnr/opentelemetry_lib/tracing/_instrument_initializers.py +32 -33
  9. lmnr/opentelemetry_lib/tracing/context_properties.py +2 -3
  10. lmnr/opentelemetry_lib/tracing/exporter.py +4 -5
  11. lmnr/opentelemetry_lib/tracing/instruments.py +5 -6
  12. lmnr/opentelemetry_lib/tracing/processor.py +6 -7
  13. lmnr/sdk/browser/utils.py +4 -3
  14. lmnr/sdk/client/asynchronous/async_client.py +26 -11
  15. lmnr/sdk/client/asynchronous/resources/__init__.py +2 -0
  16. lmnr/sdk/client/asynchronous/resources/agent.py +89 -91
  17. lmnr/sdk/client/asynchronous/resources/evals.py +7 -8
  18. lmnr/sdk/client/asynchronous/resources/tags.py +89 -0
  19. lmnr/sdk/client/synchronous/resources/__init__.py +2 -1
  20. lmnr/sdk/client/synchronous/resources/agent.py +91 -91
  21. lmnr/sdk/client/synchronous/resources/evals.py +7 -8
  22. lmnr/sdk/client/synchronous/resources/tags.py +89 -0
  23. lmnr/sdk/client/synchronous/sync_client.py +28 -13
  24. lmnr/sdk/decorators.py +27 -29
  25. lmnr/sdk/evaluations.py +49 -46
  26. lmnr/sdk/laminar.py +67 -59
  27. lmnr/sdk/types.py +45 -39
  28. lmnr/sdk/utils.py +3 -3
  29. lmnr/version.py +1 -1
  30. {lmnr-0.6.3.dist-info → lmnr-0.6.4.dist-info}/METADATA +1 -1
  31. lmnr-0.6.4.dist-info/RECORD +56 -0
  32. lmnr-0.6.3.dist-info/RECORD +0 -54
  33. {lmnr-0.6.3.dist-info → lmnr-0.6.4.dist-info}/LICENSE +0 -0
  34. {lmnr-0.6.3.dist-info → lmnr-0.6.4.dist-info}/WHEEL +0 -0
  35. {lmnr-0.6.3.dist-info → lmnr-0.6.4.dist-info}/entry_points.txt +0 -0
lmnr/sdk/browser/utils.py CHANGED
@@ -1,7 +1,6 @@
1
1
  import asyncio
2
2
  import logging
3
3
  import time
4
- from typing import Union
5
4
 
6
5
  from lmnr.sdk.client.asynchronous.async_client import AsyncLaminarClient
7
6
  from lmnr.sdk.client.synchronous.sync_client import LaminarClient
@@ -24,13 +23,15 @@ def with_tracer_wrapper(func):
24
23
  def with_tracer_and_client_wrapper(func):
25
24
  """Helper for providing tracer and client for wrapper functions."""
26
25
 
27
- def _with_tracer(tracer, client: Union[LaminarClient, AsyncLaminarClient], to_wrap):
26
+ def _with_tracer_and_client(
27
+ tracer, client: LaminarClient | AsyncLaminarClient, to_wrap
28
+ ):
28
29
  def wrapper(wrapped, instance, args, kwargs):
29
30
  return func(tracer, client, to_wrap, wrapped, instance, args, kwargs)
30
31
 
31
32
  return wrapper
32
33
 
33
- return _with_tracer
34
+ return _with_tracer_and_client
34
35
 
35
36
 
36
37
  def retry_sync(func, retries=5, delay=0.5, error_message="Operation failed"):
@@ -4,13 +4,14 @@ Laminar HTTP client. Used to send data to/from the Laminar API.
4
4
 
5
5
  import httpx
6
6
  import re
7
- from typing import Optional, TypeVar
7
+ from typing import TypeVar
8
8
  from types import TracebackType
9
9
 
10
10
  from lmnr.sdk.client.asynchronous.resources import (
11
11
  AsyncAgent,
12
12
  AsyncBrowserEvents,
13
13
  AsyncEvals,
14
+ AsyncTags,
14
15
  )
15
16
  from lmnr.sdk.utils import from_env
16
17
 
@@ -24,17 +25,20 @@ class AsyncLaminarClient:
24
25
 
25
26
  def __init__(
26
27
  self,
27
- base_url: Optional[str] = None,
28
- project_api_key: Optional[str] = None,
29
- port: Optional[int] = None,
28
+ base_url: str | None = None,
29
+ project_api_key: str | None = None,
30
+ port: int | None = None,
30
31
  timeout: int = 3600,
31
32
  ):
32
33
  """Initializer for the Laminar HTTP client.
33
34
 
34
35
  Args:
35
- base_url (str): base URL of the Laminar API.
36
- project_api_key (str): Laminar project API key
37
- port (Optional[int], optional): port of the Laminar API HTTP server.\
36
+ base_url (str | None): base URL of the Laminar API. If not
37
+ provided, the LMNR_BASE_URL environment variable is used or we
38
+ default to "https://api.lmnr.ai".
39
+ project_api_key (str | None): Laminar project API key. If not
40
+ provided, the LMNR_PROJECT_API_KEY environment variable is used.
41
+ port (int | None, optional): port of the Laminar API HTTP server.\
38
42
  Overrides any port in the base URL.
39
43
  Defaults to None. If none is provided, the default port (443) will
40
44
  be used.
@@ -73,6 +77,7 @@ class AsyncLaminarClient:
73
77
  self.__browser_events = AsyncBrowserEvents(
74
78
  self.__client, self.__base_url, self.__project_api_key
75
79
  )
80
+ self.__tags = AsyncTags(self.__client, self.__base_url, self.__project_api_key)
76
81
 
77
82
  @property
78
83
  def agent(self) -> AsyncAgent:
@@ -101,6 +106,15 @@ class AsyncLaminarClient:
101
106
  """
102
107
  return self.__browser_events
103
108
 
109
+ @property
110
+ def tags(self) -> AsyncTags:
111
+ """Get the Tags resource.
112
+
113
+ Returns:
114
+ AsyncTags: The Tags resource instance.
115
+ """
116
+ return self.__tags
117
+
104
118
  def is_closed(self) -> bool:
105
119
  return self.__client.is_closed
106
120
 
@@ -109,16 +123,17 @@ class AsyncLaminarClient:
109
123
 
110
124
  The client will *not* be usable after this.
111
125
  """
112
- await self.__client.aclose()
126
+ if hasattr(self, "__client"):
127
+ await self.__client.aclose()
113
128
 
114
129
  async def __aenter__(self: _T) -> _T:
115
130
  return self
116
131
 
117
132
  async def __aexit__(
118
133
  self,
119
- exc_type: Optional[type[BaseException]],
120
- exc: Optional[BaseException],
121
- exc_tb: Optional[TracebackType],
134
+ exc_type: type[BaseException] | None,
135
+ exc: BaseException | None,
136
+ exc_tb: TracebackType | None,
122
137
  ) -> None:
123
138
  await self.close()
124
139
 
@@ -1,9 +1,11 @@
1
1
  from lmnr.sdk.client.asynchronous.resources.agent import AsyncAgent
2
2
  from lmnr.sdk.client.asynchronous.resources.browser_events import AsyncBrowserEvents
3
3
  from lmnr.sdk.client.asynchronous.resources.evals import AsyncEvals
4
+ from lmnr.sdk.client.asynchronous.resources.tags import AsyncTags
4
5
 
5
6
  __all__ = [
6
7
  "AsyncAgent",
7
8
  "AsyncEvals",
8
9
  "AsyncBrowserEvents",
10
+ "AsyncTags",
9
11
  ]
@@ -5,8 +5,6 @@ from typing import (
5
5
  AsyncIterator,
6
6
  Awaitable,
7
7
  Literal,
8
- Optional,
9
- Union,
10
8
  overload,
11
9
  )
12
10
  import uuid
@@ -31,44 +29,44 @@ class AsyncAgent(BaseAsyncResource):
31
29
  self,
32
30
  prompt: str,
33
31
  stream: Literal[True],
34
- parent_span_context: Optional[Union[LaminarSpanContext, str]] = None,
35
- model_provider: Optional[ModelProvider] = None,
36
- model: Optional[str] = None,
32
+ parent_span_context: LaminarSpanContext | str | None = None,
33
+ model_provider: ModelProvider | None = None,
34
+ model: str | None = None,
37
35
  enable_thinking: bool = True,
38
- agent_state: Optional[str] = None,
39
- storage_state: Optional[str] = None,
36
+ agent_state: str | None = None,
37
+ storage_state: str | None = None,
40
38
  return_screenshots: bool = False,
41
39
  return_agent_state: bool = False,
42
40
  return_storage_state: bool = False,
43
41
  disable_give_control: bool = False,
44
- timeout: Optional[int] = None,
45
- cdp_url: Optional[str] = None,
46
- max_steps: Optional[int] = None,
47
- thinking_token_budget: Optional[int] = None,
48
- start_url: Optional[str] = None,
49
- user_agent: Optional[str] = None,
42
+ timeout: int | None = None,
43
+ cdp_url: str | None = None,
44
+ max_steps: int | None = None,
45
+ thinking_token_budget: int | None = None,
46
+ start_url: str | None = None,
47
+ user_agent: str | None = None,
50
48
  ) -> AsyncIterator[RunAgentResponseChunk]:
51
49
  """Run Laminar index agent in streaming mode.
52
50
 
53
51
  Args:
54
52
  prompt (str): prompt for the agent
55
53
  stream (Literal[True]): whether to stream the agent's response
56
- parent_span_context (Optional[Union[LaminarSpanContext, str]], optional): span context if the agent is part of a trace
57
- model_provider (Optional[ModelProvider], optional): LLM model provider
58
- model (Optional[str], optional): LLM model name
54
+ parent_span_context (LaminarSpanContext | str | None, optional): span context if the agent is part of a trace
55
+ model_provider (ModelProvider | None, optional): LLM model provider
56
+ model (str | None, optional): LLM model name
59
57
  enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
60
- agent_state (Optional[str], optional): the agent's state as returned by the previous agent run. Default to None.
61
- storage_state (Optional[str], optional): the browser's storage state as returned by the previous agent run. Default to None.
58
+ agent_state (str | None, optional): the agent's state as returned by the previous agent run. Default to None.
59
+ storage_state (str | None, optional): the browser's storage state as returned by the previous agent run. Default to None.
62
60
  return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
63
61
  return_agent_state (bool, optional): whether to return the agent's state in the final chunk. Default to False.
64
62
  return_storage_state (bool, optional): whether to return the storage state in the final chunk. Default to False.
65
63
  disable_give_control (bool, optional): whether to NOT give the agent additional direction to give control to the user for tasks such as login. Default to False.
66
- timeout (Optional[int], optional): timeout seconds for the agent's response. Default to None.
67
- cdp_url (Optional[str], optional): Chrome DevTools Protocol URL of an existing browser session. Default to None.
68
- max_steps (Optional[int], optional): maximum number of steps the agent can take. If not set, the backend will use a default value (currently 100). Default to None.
69
- thinking_token_budget (Optional[int], optional): maximum number of tokens the underlying LLM can spend on thinking in each step, if supported by the model. Default to None.
70
- start_url (Optional[str], optional): the URL to start the agent on. Must be a valid URL - refer to https://playwright.dev/docs/api/class-page#page-goto. If not specified, the agent infers this from the prompt. Default to None.
71
- user_agent (Optional[str], optional): the user to be sent to the browser. If not specified, Laminar uses the default user agent. Default to None.
64
+ timeout (int | None, optional): timeout seconds for the agent's response. Default to None.
65
+ cdp_url (str | None, optional): Chrome DevTools Protocol URL of an existing browser session. Default to None.
66
+ max_steps (int | None, optional): maximum number of steps the agent can take. If not set, the backend will use a default value (currently 100). Default to None.
67
+ thinking_token_budget (int | None, optional): maximum number of tokens the underlying LLM can spend on thinking in each step, if supported by the model. Default to None.
68
+ start_url (str | None, optional): the URL to start the agent on. Must be a valid URL - refer to https://playwright.dev/docs/api/class-page#page-goto. If not specified, the agent infers this from the prompt. Default to None.
69
+ user_agent (str | None, optional): the user to be sent to the browser. If not specified, Laminar uses the default user agent. Default to None.
72
70
 
73
71
  Returns:
74
72
  AsyncIterator[RunAgentResponseChunk]: a generator of response chunks
@@ -79,43 +77,43 @@ class AsyncAgent(BaseAsyncResource):
79
77
  async def run(
80
78
  self,
81
79
  prompt: str,
82
- parent_span_context: Optional[Union[LaminarSpanContext, str]] = None,
83
- model_provider: Optional[ModelProvider] = None,
84
- model: Optional[str] = None,
80
+ parent_span_context: LaminarSpanContext | str | None = None,
81
+ model_provider: ModelProvider | None = None,
82
+ model: str | None = None,
85
83
  enable_thinking: bool = True,
86
- agent_state: Optional[str] = None,
87
- storage_state: Optional[str] = None,
84
+ agent_state: str | None = None,
85
+ storage_state: str | None = None,
88
86
  return_screenshots: bool = False,
89
87
  return_agent_state: bool = False,
90
88
  return_storage_state: bool = False,
91
89
  disable_give_control: bool = False,
92
- timeout: Optional[int] = None,
93
- cdp_url: Optional[str] = None,
94
- max_steps: Optional[int] = None,
95
- thinking_token_budget: Optional[int] = None,
96
- start_url: Optional[str] = None,
97
- user_agent: Optional[str] = None,
90
+ timeout: int | None = None,
91
+ cdp_url: str | None = None,
92
+ max_steps: int | None = None,
93
+ thinking_token_budget: int | None = None,
94
+ start_url: str | None = None,
95
+ user_agent: str | None = None,
98
96
  ) -> AgentOutput:
99
97
  """Run Laminar index agent.
100
98
 
101
99
  Args:
102
100
  prompt (str): prompt for the agent
103
- parent_span_context (Optional[LaminarSpanContext], optional): span context if the agent is part of a trace
104
- model_provider (Optional[ModelProvider], optional): LLM model provider
105
- model (Optional[str], optional): LLM model name
101
+ parent_span_context (LaminarSpanContext | str | None, optional): span context if the agent is part of a trace
102
+ model_provider (ModelProvider | None, optional): LLM model provider
103
+ model (str | None, optional): LLM model name
106
104
  enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
107
- agent_state (Optional[str], optional): the agent's state as returned by the previous agent run. Default to None.
108
- storage_state (Optional[str], optional): the browser's storage state as returned by the previous agent run. Default to None.
105
+ agent_state (str | None, optional): the agent's state as returned by the previous agent run. Default to None.
106
+ storage_state (str | None, optional): the browser's storage state as returned by the previous agent run. Default to None.
109
107
  return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
110
108
  return_agent_state (bool, optional): whether to return the agent's state. Default to False.
111
109
  return_storage_state (bool, optional): whether to return the storage state. Default to False.
112
110
  disable_give_control (bool, optional): whether to NOT give the agent additional direction to give control to the user for tasks such as login. Default to False.
113
- timeout (Optional[int], optional): timeout seconds for the agent's response. Default to None.
114
- cdp_url (Optional[str], optional): Chrome DevTools Protocol URL of an existing browser session. Default to None.
115
- max_steps (Optional[int], optional): maximum number of steps the agent can take. If not set, the backend will use a default value (currently 100). Default to None.
116
- thinking_token_budget (Optional[int], optional): maximum number of tokens the underlying LLM can spend on thinking in each step, if supported by the model. Default to None.
117
- start_url (Optional[str], optional): the URL to start the agent on. Must be a valid URL - refer to https://playwright.dev/docs/api/class-page#page-goto. If not specified, the agent infers this from the prompt. Default to None.
118
- user_agent (Optional[str], optional): the user to be sent to the browser. If not specified, Laminar uses the default user agent. Default to None.
111
+ timeout (int | None, optional): timeout seconds for the agent's response. Default to None.
112
+ cdp_url (str | None, optional): Chrome DevTools Protocol URL of an existing browser session. Default to None.
113
+ max_steps (int | None, optional): maximum number of steps the agent can take. If not set, the backend will use a default value (currently 100). Default to None.
114
+ thinking_token_budget (int | None, optional): maximum number of tokens the underlying LLM can spend on thinking in each step, if supported by the model. Default to None.
115
+ start_url (str | None, optional): the URL to start the agent on. Must be a valid URL - refer to https://playwright.dev/docs/api/class-page#page-goto. If not specified, the agent infers this from the prompt. Default to None.
116
+ user_agent (str | None, optional): the user to be sent to the browser. If not specified, Laminar uses the default user agent. Default to None.
119
117
 
120
118
  Returns:
121
119
  AgentOutput: agent output
@@ -126,44 +124,44 @@ class AsyncAgent(BaseAsyncResource):
126
124
  async def run(
127
125
  self,
128
126
  prompt: str,
129
- parent_span_context: Optional[Union[LaminarSpanContext, str]] = None,
130
- model_provider: Optional[ModelProvider] = None,
131
- model: Optional[str] = None,
127
+ parent_span_context: LaminarSpanContext | str | None = None,
128
+ model_provider: ModelProvider | None = None,
129
+ model: str | None = None,
132
130
  stream: Literal[False] = False,
133
131
  enable_thinking: bool = True,
134
- agent_state: Optional[str] = None,
135
- storage_state: Optional[str] = None,
132
+ agent_state: str | None = None,
133
+ storage_state: str | None = None,
136
134
  return_screenshots: bool = False,
137
135
  return_agent_state: bool = False,
138
136
  return_storage_state: bool = False,
139
137
  disable_give_control: bool = False,
140
- timeout: Optional[int] = None,
141
- max_steps: Optional[int] = None,
142
- thinking_token_budget: Optional[int] = None,
143
- start_url: Optional[str] = None,
144
- user_agent: Optional[str] = None,
138
+ timeout: int | None = None,
139
+ max_steps: int | None = None,
140
+ thinking_token_budget: int | None = None,
141
+ start_url: str | None = None,
142
+ user_agent: str | None = None,
145
143
  ) -> AgentOutput:
146
144
  """Run Laminar index agent.
147
145
 
148
146
  Args:
149
147
  prompt (str): prompt for the agent
150
- parent_span_context (Optional[Union[LaminarSpanContext, str]], optional): span context if the agent is part of a trace
151
- model_provider (Optional[ModelProvider], optional): LLM model provider
152
- model (Optional[str], optional): LLM model name
148
+ parent_span_context (LaminarSpanContext | str | None, optional): span context if the agent is part of a trace
149
+ model_provider (ModelProvider | None, optional): LLM model provider
150
+ model (str | None, optional): LLM model name
153
151
  stream (Literal[False], optional): whether to stream the agent's response
154
152
  enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
155
- agent_state (Optional[str], optional): the agent's state as returned by the previous agent run. Default to None.
156
- storage_state (Optional[str], optional): the browser's storage state as returned by the previous agent run. Default to None.
153
+ agent_state (str | None, optional): the agent's state as returned by the previous agent run. Default to None.
154
+ storage_state (str | None, optional): the browser's storage state as returned by the previous agent run. Default to None.
157
155
  return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
158
156
  return_agent_state (bool, optional): whether to return the agent's state. Default to False.
159
157
  return_storage_state (bool, optional): whether to return the storage state. Default to False.
160
158
  disable_give_control (bool, optional): whether to NOT give the agent additional direction to give control to the user for tasks such as login. Default to False.
161
- timeout (Optional[int], optional): timeout seconds for the agent's response. Default to None.
162
- cdp_url (Optional[str], optional): Chrome DevTools Protocol URL of an existing browser session. Default to None.
163
- max_steps (Optional[int], optional): maximum number of steps the agent can take. If not set, the backend will use a default value (currently 100). Default to None.
164
- thinking_token_budget (Optional[int], optional): maximum number of tokens the underlying LLM can spend on thinking in each step, if supported by the model. Default to None.
165
- start_url (Optional[str], optional): the URL to start the agent on. Must be a valid URL - refer to https://playwright.dev/docs/api/class-page#page-goto. If not specified, the agent infers this from the prompt. Default to None.
166
- user_agent (Optional[str], optional): the user to be sent to the browser. If not specified, Laminar uses the default user agent. Default to None.
159
+ timeout (int | None, optional): timeout seconds for the agent's response. Default to None.
160
+ cdp_url (str | None, optional): Chrome DevTools Protocol URL of an existing browser session. Default to None.
161
+ max_steps (int | None, optional): maximum number of steps the agent can take. If not set, the backend will use a default value (currently 100). Default to None.
162
+ thinking_token_budget (int | None, optional): maximum number of tokens the underlying LLM can spend on thinking in each step, if supported by the model. Default to None.
163
+ start_url (str | None, optional): the URL to start the agent on. Must be a valid URL - refer to https://playwright.dev/docs/api/class-page#page-goto. If not specified, the agent infers this from the prompt. Default to None.
164
+ user_agent (str | None, optional): the user to be sent to the browser. If not specified, Laminar uses the default user agent. Default to None.
167
165
 
168
166
  Returns:
169
167
  AgentOutput: agent output
@@ -173,48 +171,48 @@ class AsyncAgent(BaseAsyncResource):
173
171
  async def run(
174
172
  self,
175
173
  prompt: str,
176
- parent_span_context: Optional[Union[LaminarSpanContext, str]] = None,
177
- model_provider: Optional[ModelProvider] = None,
178
- model: Optional[str] = None,
174
+ parent_span_context: LaminarSpanContext | str | None = None,
175
+ model_provider: ModelProvider | None = None,
176
+ model: str | None = None,
179
177
  stream: bool = False,
180
178
  enable_thinking: bool = True,
181
- agent_state: Optional[str] = None,
182
- storage_state: Optional[str] = None,
179
+ agent_state: str | None = None,
180
+ storage_state: str | None = None,
183
181
  return_screenshots: bool = False,
184
182
  return_agent_state: bool = False,
185
183
  return_storage_state: bool = False,
186
184
  disable_give_control: bool = False,
187
- timeout: Optional[int] = None,
188
- cdp_url: Optional[str] = None,
189
- max_steps: Optional[int] = None,
190
- thinking_token_budget: Optional[int] = None,
191
- start_url: Optional[str] = None,
192
- user_agent: Optional[str] = None,
193
- ) -> Union[AgentOutput, Awaitable[AsyncIterator[RunAgentResponseChunk]]]:
185
+ timeout: int | None = None,
186
+ cdp_url: str | None = None,
187
+ max_steps: int | None = None,
188
+ thinking_token_budget: int | None = None,
189
+ start_url: str | None = None,
190
+ user_agent: str | None = None,
191
+ ) -> AgentOutput | Awaitable[AsyncIterator[RunAgentResponseChunk]]:
194
192
  """Run Laminar index agent.
195
193
 
196
194
  Args:
197
195
  prompt (str): prompt for the agent
198
- parent_span_context (Optional[Union[LaminarSpanContext, str]], optional): span context if the agent is part of a trace
199
- model_provider (Optional[ModelProvider], optional): LLM model provider
200
- model (Optional[str], optional): LLM model name
196
+ parent_span_context (LaminarSpanContext | str | None, optional): span context if the agent is part of a trace
197
+ model_provider (ModelProvider | None, optional): LLM model provider
198
+ model (str | None, optional): LLM model name
201
199
  stream (bool, optional): whether to stream the agent's response
202
200
  enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
203
- agent_state (Optional[str], optional): the agent's state as returned by the previous agent run. Default to None.
204
- storage_state (Optional[str], optional): the browser's storage state as returned by the previous agent run. Default to None.
201
+ agent_state (str | None, optional): the agent's state as returned by the previous agent run. Default to None.
202
+ storage_state (str | None, optional): the browser's storage state as returned by the previous agent run. Default to None.
205
203
  return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
206
204
  return_agent_state (bool, optional): whether to return the agent's state. Default to False.
207
205
  return_storage_state (bool, optional): whether to return the storage state. Default to False.
208
206
  disable_give_control (bool, optional): whether to NOT give the agent additional direction to give control to the user for tasks such as login. Default to False.
209
- timeout (Optional[int], optional): timeout seconds for the agent's response. Default to None.
210
- cdp_url (Optional[str], optional): Chrome DevTools Protocol URL of an existing browser session. Default to None.
211
- max_steps (Optional[int], optional): maximum number of steps the agent can take. If not set, the backend will use a default value (currently 100). Default to None.
212
- thinking_token_budget (Optional[int], optional): maximum number of tokens the underlying LLM can spend on thinking in each step, if supported by the model. Default to None.
213
- start_url (Optional[str], optional): the URL to start the agent on. Must be a valid URL - refer to https://playwright.dev/docs/api/class-page#page-goto. If not specified, the agent infers this from the prompt. Default to None.
214
- user_agent (Optional[str], optional): the user to be sent to the browser. If not specified, Laminar uses the default user agent. Default to None.
207
+ timeout (int | None, optional): timeout seconds for the agent's response. Default to None.
208
+ cdp_url (str | None, optional): Chrome DevTools Protocol URL of an existing browser session. Default to None.
209
+ max_steps (int | None, optional): maximum number of steps the agent can take. If not set, the backend will use a default value (currently 100). Default to None.
210
+ thinking_token_budget (int | None, optional): maximum number of tokens the underlying LLM can spend on thinking in each step, if supported by the model. Default to None.
211
+ start_url (str | None, optional): the URL to start the agent on. Must be a valid URL - refer to https://playwright.dev/docs/api/class-page#page-goto. If not specified, the agent infers this from the prompt. Default to None.
212
+
215
213
 
216
214
  Returns:
217
- Union[AgentOutput, AsyncIterator[RunAgentResponseChunk]]: agent output or a generator of response chunks
215
+ AgentOutput | AsyncIterator[RunAgentResponseChunk]: agent output or a generator of response chunks
218
216
  """
219
217
  if parent_span_context is None:
220
218
  span = trace.get_current_span()
@@ -1,7 +1,6 @@
1
1
  """Evals resource for interacting with Laminar evaluations API."""
2
2
 
3
3
  import uuid
4
- from typing import Optional, Union
5
4
 
6
5
  from lmnr.sdk.client.asynchronous.resources.base import BaseAsyncResource
7
6
  from lmnr.sdk.types import (
@@ -15,13 +14,13 @@ class AsyncEvals(BaseAsyncResource):
15
14
  """Resource for interacting with Laminar evaluations API."""
16
15
 
17
16
  async def init(
18
- self, name: Optional[str] = None, group_name: Optional[str] = None
17
+ self, name: str | None = None, group_name: str | None = None
19
18
  ) -> InitEvaluationResponse:
20
19
  """Initialize a new evaluation.
21
20
 
22
21
  Args:
23
- name (Optional[str], optional): Name of the evaluation. Defaults to None.
24
- group_name (Optional[str], optional): Group name for the evaluation. Defaults to None.
22
+ name (str | None, optional): Name of the evaluation. Defaults to None.
23
+ group_name (str | None, optional): Group name for the evaluation. Defaults to None.
25
24
 
26
25
  Returns:
27
26
  InitEvaluationResponse: The response from the initialization request.
@@ -40,15 +39,15 @@ class AsyncEvals(BaseAsyncResource):
40
39
  async def save_datapoints(
41
40
  self,
42
41
  eval_id: uuid.UUID,
43
- datapoints: list[Union[EvaluationResultDatapoint, PartialEvaluationDatapoint]],
44
- group_name: Optional[str] = None,
42
+ datapoints: list[EvaluationResultDatapoint | PartialEvaluationDatapoint],
43
+ group_name: str | None = None,
45
44
  ):
46
45
  """Save evaluation datapoints.
47
46
 
48
47
  Args:
49
48
  eval_id (uuid.UUID): The evaluation ID.
50
- datapoints (list[Union[EvaluationResultDatapoint, PartialEvaluationDatapoint]]): The datapoints to save.
51
- group_name (Optional[str], optional): Group name for the datapoints. Defaults to None.
49
+ datapoints (list[EvaluationResultDatapoint | PartialEvaluationDatapoint]): The datapoints to save.
50
+ group_name (str | None, optional): Group name for the datapoints. Defaults to None.
52
51
 
53
52
  Raises:
54
53
  ValueError: If there's an error saving the datapoints.
@@ -0,0 +1,89 @@
1
+ """Resource for tagging traces."""
2
+
3
+ import json
4
+ import uuid
5
+
6
+ from lmnr.sdk.client.asynchronous.resources.base import BaseAsyncResource
7
+ from lmnr.sdk.log import get_default_logger
8
+
9
+ logger = get_default_logger(__name__)
10
+
11
+
12
+ class AsyncTags(BaseAsyncResource):
13
+ """Resource for tagging traces."""
14
+
15
+ async def tag(
16
+ self,
17
+ trace_id: str | int | uuid.UUID,
18
+ tags: list[str] | str,
19
+ ):
20
+ """Tag a trace with a list of tags. Note that the trace must be ended
21
+ before tagging it. You may want to call `Laminar.flush()` after the
22
+ trace that you want to tag.
23
+
24
+ Args:
25
+ trace_id (str | int | uuid.UUID): The trace id to tag.
26
+ tags (list[str] | str): The tag or list of tags to add to the trace.
27
+
28
+ Raises:
29
+ ValueError: If the trace id is not a valid UUID.
30
+
31
+ Returns:
32
+ list[dict]: The response from the server.
33
+
34
+ Example:
35
+ ```python
36
+ from lmnr import Laminar, AsyncLaminarClient, observe
37
+
38
+ Laminar.initialize()
39
+ client = AsyncLaminarClient()
40
+ trace_id = None
41
+
42
+ @observe()
43
+ def foo():
44
+ trace_id = Laminar.get_trace_id()
45
+ pass
46
+
47
+ # make sure `foo` is called outside a trace context
48
+ foo()
49
+
50
+ # or make sure the trace is ended by this point
51
+ Laminar.flush()
52
+
53
+ await client.tags.tag(trace_id, "my_tag")
54
+ ```
55
+ """
56
+ trace_tags = tags if isinstance(tags, list) else [tags]
57
+ if isinstance(trace_id, uuid.UUID):
58
+ trace_id = str(trace_id)
59
+ elif isinstance(trace_id, int):
60
+ trace_id = str(uuid.UUID(int=trace_id))
61
+ elif isinstance(trace_id, str):
62
+ uuid.UUID(trace_id) # Will raise ValueError if invalid
63
+ else:
64
+ raise ValueError(f"Invalid trace id: {trace_id}")
65
+
66
+ url = self._base_url + "/v1/tag"
67
+ payload = {
68
+ "traceId": trace_id,
69
+ "names": trace_tags,
70
+ }
71
+ response = await self._client.post(
72
+ url,
73
+ content=json.dumps(payload),
74
+ headers={
75
+ **self._headers(),
76
+ },
77
+ )
78
+
79
+ if response.status_code == 404:
80
+ logger.warning(
81
+ f"Trace {trace_id} not found. The trace may have not been ended yet."
82
+ )
83
+ return []
84
+
85
+ if response.status_code != 200:
86
+ raise ValueError(
87
+ f"Failed to tag trace: [{response.status_code}] {response.text}"
88
+ )
89
+ return response.json()
@@ -1,5 +1,6 @@
1
1
  from lmnr.sdk.client.synchronous.resources.agent import Agent
2
2
  from lmnr.sdk.client.synchronous.resources.browser_events import BrowserEvents
3
3
  from lmnr.sdk.client.synchronous.resources.evals import Evals
4
+ from lmnr.sdk.client.synchronous.resources.tags import Tags
4
5
 
5
- __all__ = ["Agent", "Evals", "BrowserEvents"]
6
+ __all__ = ["Agent", "Evals", "BrowserEvents", "Tags"]