lmnr 0.6.16__py3-none-any.whl → 0.7.26__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (113) hide show
  1. lmnr/__init__.py +6 -15
  2. lmnr/cli/__init__.py +270 -0
  3. lmnr/cli/datasets.py +371 -0
  4. lmnr/{cli.py → cli/evals.py} +20 -102
  5. lmnr/cli/rules.py +42 -0
  6. lmnr/opentelemetry_lib/__init__.py +9 -2
  7. lmnr/opentelemetry_lib/decorators/__init__.py +274 -168
  8. lmnr/opentelemetry_lib/litellm/__init__.py +352 -38
  9. lmnr/opentelemetry_lib/litellm/utils.py +82 -0
  10. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/__init__.py +849 -0
  11. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/config.py +13 -0
  12. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_emitter.py +211 -0
  13. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_models.py +41 -0
  14. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/span_utils.py +401 -0
  15. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/streaming.py +425 -0
  16. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/utils.py +332 -0
  17. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/version.py +1 -0
  18. lmnr/opentelemetry_lib/opentelemetry/instrumentation/claude_agent/__init__.py +451 -0
  19. lmnr/opentelemetry_lib/opentelemetry/instrumentation/claude_agent/proxy.py +144 -0
  20. lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_agent/__init__.py +100 -0
  21. lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_computer/__init__.py +476 -0
  22. lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_computer/utils.py +12 -0
  23. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py +191 -129
  24. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/schema_utils.py +26 -0
  25. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/utils.py +126 -41
  26. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/__init__.py +488 -0
  27. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/config.py +8 -0
  28. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_emitter.py +143 -0
  29. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_models.py +41 -0
  30. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/span_utils.py +229 -0
  31. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/utils.py +92 -0
  32. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/version.py +1 -0
  33. lmnr/opentelemetry_lib/opentelemetry/instrumentation/kernel/__init__.py +381 -0
  34. lmnr/opentelemetry_lib/opentelemetry/instrumentation/kernel/utils.py +36 -0
  35. lmnr/opentelemetry_lib/opentelemetry/instrumentation/langgraph/__init__.py +16 -16
  36. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/__init__.py +61 -0
  37. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/__init__.py +472 -0
  38. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/chat_wrappers.py +1185 -0
  39. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/completion_wrappers.py +305 -0
  40. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/config.py +16 -0
  41. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +312 -0
  42. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/event_emitter.py +100 -0
  43. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/event_models.py +41 -0
  44. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py +68 -0
  45. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/utils.py +197 -0
  46. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v0/__init__.py +176 -0
  47. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/__init__.py +368 -0
  48. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +325 -0
  49. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py +135 -0
  50. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/responses_wrappers.py +786 -0
  51. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/version.py +1 -0
  52. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openhands_ai/__init__.py +388 -0
  53. lmnr/opentelemetry_lib/opentelemetry/instrumentation/opentelemetry/__init__.py +69 -0
  54. lmnr/opentelemetry_lib/opentelemetry/instrumentation/skyvern/__init__.py +59 -61
  55. lmnr/opentelemetry_lib/opentelemetry/instrumentation/threading/__init__.py +197 -0
  56. lmnr/opentelemetry_lib/tracing/__init__.py +119 -18
  57. lmnr/opentelemetry_lib/tracing/_instrument_initializers.py +124 -25
  58. lmnr/opentelemetry_lib/tracing/attributes.py +4 -0
  59. lmnr/opentelemetry_lib/tracing/context.py +200 -0
  60. lmnr/opentelemetry_lib/tracing/exporter.py +109 -15
  61. lmnr/opentelemetry_lib/tracing/instruments.py +22 -5
  62. lmnr/opentelemetry_lib/tracing/processor.py +128 -30
  63. lmnr/opentelemetry_lib/tracing/span.py +398 -0
  64. lmnr/opentelemetry_lib/tracing/tracer.py +40 -1
  65. lmnr/opentelemetry_lib/tracing/utils.py +62 -0
  66. lmnr/opentelemetry_lib/utils/package_check.py +9 -0
  67. lmnr/opentelemetry_lib/utils/wrappers.py +11 -0
  68. lmnr/sdk/browser/background_send_events.py +158 -0
  69. lmnr/sdk/browser/browser_use_cdp_otel.py +100 -0
  70. lmnr/sdk/browser/browser_use_otel.py +12 -12
  71. lmnr/sdk/browser/bubus_otel.py +71 -0
  72. lmnr/sdk/browser/cdp_utils.py +518 -0
  73. lmnr/sdk/browser/inject_script.js +514 -0
  74. lmnr/sdk/browser/patchright_otel.py +18 -44
  75. lmnr/sdk/browser/playwright_otel.py +104 -187
  76. lmnr/sdk/browser/pw_utils.py +249 -210
  77. lmnr/sdk/browser/recorder/record.umd.min.cjs +84 -0
  78. lmnr/sdk/browser/utils.py +1 -1
  79. lmnr/sdk/client/asynchronous/async_client.py +47 -15
  80. lmnr/sdk/client/asynchronous/resources/__init__.py +2 -7
  81. lmnr/sdk/client/asynchronous/resources/browser_events.py +1 -0
  82. lmnr/sdk/client/asynchronous/resources/datasets.py +131 -0
  83. lmnr/sdk/client/asynchronous/resources/evals.py +122 -18
  84. lmnr/sdk/client/asynchronous/resources/evaluators.py +85 -0
  85. lmnr/sdk/client/asynchronous/resources/tags.py +4 -10
  86. lmnr/sdk/client/synchronous/resources/__init__.py +2 -2
  87. lmnr/sdk/client/synchronous/resources/datasets.py +131 -0
  88. lmnr/sdk/client/synchronous/resources/evals.py +83 -17
  89. lmnr/sdk/client/synchronous/resources/evaluators.py +85 -0
  90. lmnr/sdk/client/synchronous/resources/tags.py +4 -10
  91. lmnr/sdk/client/synchronous/sync_client.py +47 -15
  92. lmnr/sdk/datasets/__init__.py +94 -0
  93. lmnr/sdk/datasets/file_utils.py +91 -0
  94. lmnr/sdk/decorators.py +103 -23
  95. lmnr/sdk/evaluations.py +122 -33
  96. lmnr/sdk/laminar.py +816 -333
  97. lmnr/sdk/log.py +7 -2
  98. lmnr/sdk/types.py +124 -143
  99. lmnr/sdk/utils.py +115 -2
  100. lmnr/version.py +1 -1
  101. {lmnr-0.6.16.dist-info → lmnr-0.7.26.dist-info}/METADATA +71 -78
  102. lmnr-0.7.26.dist-info/RECORD +116 -0
  103. lmnr-0.7.26.dist-info/WHEEL +4 -0
  104. lmnr-0.7.26.dist-info/entry_points.txt +3 -0
  105. lmnr/opentelemetry_lib/tracing/context_properties.py +0 -65
  106. lmnr/sdk/browser/rrweb/rrweb.umd.min.cjs +0 -98
  107. lmnr/sdk/client/asynchronous/resources/agent.py +0 -329
  108. lmnr/sdk/client/synchronous/resources/agent.py +0 -323
  109. lmnr/sdk/datasets.py +0 -60
  110. lmnr-0.6.16.dist-info/LICENSE +0 -75
  111. lmnr-0.6.16.dist-info/RECORD +0 -61
  112. lmnr-0.6.16.dist-info/WHEEL +0 -4
  113. lmnr-0.6.16.dist-info/entry_points.txt +0 -3
@@ -1,329 +0,0 @@
1
- """Agent resource for interacting with Laminar agents."""
2
-
3
- from typing import (
4
- AsyncGenerator,
5
- AsyncIterator,
6
- Awaitable,
7
- Literal,
8
- overload,
9
- )
10
- import uuid
11
-
12
- from lmnr.sdk.client.asynchronous.resources.base import BaseAsyncResource
13
- from lmnr.sdk.types import (
14
- AgentOutput,
15
- LaminarSpanContext,
16
- ModelProvider,
17
- RunAgentRequest,
18
- RunAgentResponseChunk,
19
- )
20
-
21
- from opentelemetry import trace
22
-
23
-
24
- class AsyncAgent(BaseAsyncResource):
25
- """Resource for interacting with Laminar agents."""
26
-
27
- @overload
28
- async def run(
29
- self,
30
- prompt: str,
31
- stream: Literal[True],
32
- parent_span_context: LaminarSpanContext | str | None = None,
33
- model_provider: ModelProvider | None = None,
34
- model: str | None = None,
35
- enable_thinking: bool = True,
36
- agent_state: str | None = None,
37
- storage_state: str | None = None,
38
- return_screenshots: bool = False,
39
- return_agent_state: bool = False,
40
- return_storage_state: bool = False,
41
- disable_give_control: bool = False,
42
- timeout: int | None = None,
43
- cdp_url: str | None = None,
44
- max_steps: int | None = None,
45
- thinking_token_budget: int | None = None,
46
- start_url: str | None = None,
47
- user_agent: str | None = None,
48
- ) -> AsyncIterator[RunAgentResponseChunk]:
49
- """Run Laminar index agent in streaming mode.
50
-
51
- Args:
52
- prompt (str): prompt for the agent
53
- stream (Literal[True]): whether to stream the agent's response
54
- parent_span_context (LaminarSpanContext | str | None, optional): span context if the agent is part of a trace
55
- model_provider (ModelProvider | None, optional): LLM model provider
56
- model (str | None, optional): LLM model name
57
- enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
58
- agent_state (str | None, optional): the agent's state as returned by the previous agent run. Default to None.
59
- storage_state (str | None, optional): the browser's storage state as returned by the previous agent run. Default to None.
60
- return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
61
- return_agent_state (bool, optional): whether to return the agent's state in the final chunk. Default to False.
62
- return_storage_state (bool, optional): whether to return the storage state in the final chunk. Default to False.
63
- disable_give_control (bool, optional): whether to NOT give the agent additional direction to give control to the user for tasks such as login. Default to False.
64
- timeout (int | None, optional): timeout seconds for the agent's response. Default to None.
65
- cdp_url (str | None, optional): Chrome DevTools Protocol URL of an existing browser session. Default to None.
66
- max_steps (int | None, optional): maximum number of steps the agent can take. If not set, the backend will use a default value (currently 100). Default to None.
67
- thinking_token_budget (int | None, optional): maximum number of tokens the underlying LLM can spend on thinking in each step, if supported by the model. Default to None.
68
- start_url (str | None, optional): the URL to start the agent on. Must be a valid URL - refer to https://playwright.dev/docs/api/class-page#page-goto. If not specified, the agent infers this from the prompt. Default to None.
69
- user_agent (str | None, optional): the user to be sent to the browser. If not specified, Laminar uses the default user agent. Default to None.
70
-
71
- Returns:
72
- AsyncIterator[RunAgentResponseChunk]: a generator of response chunks
73
- """
74
- pass
75
-
76
- @overload
77
- async def run(
78
- self,
79
- prompt: str,
80
- parent_span_context: LaminarSpanContext | str | None = None,
81
- model_provider: ModelProvider | None = None,
82
- model: str | None = None,
83
- enable_thinking: bool = True,
84
- agent_state: str | None = None,
85
- storage_state: str | None = None,
86
- return_screenshots: bool = False,
87
- return_agent_state: bool = False,
88
- return_storage_state: bool = False,
89
- disable_give_control: bool = False,
90
- timeout: int | None = None,
91
- cdp_url: str | None = None,
92
- max_steps: int | None = None,
93
- thinking_token_budget: int | None = None,
94
- start_url: str | None = None,
95
- user_agent: str | None = None,
96
- ) -> AgentOutput:
97
- """Run Laminar index agent.
98
-
99
- Args:
100
- prompt (str): prompt for the agent
101
- parent_span_context (LaminarSpanContext | str | None, optional): span context if the agent is part of a trace
102
- model_provider (ModelProvider | None, optional): LLM model provider
103
- model (str | None, optional): LLM model name
104
- enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
105
- agent_state (str | None, optional): the agent's state as returned by the previous agent run. Default to None.
106
- storage_state (str | None, optional): the browser's storage state as returned by the previous agent run. Default to None.
107
- return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
108
- return_agent_state (bool, optional): whether to return the agent's state. Default to False.
109
- return_storage_state (bool, optional): whether to return the storage state. Default to False.
110
- disable_give_control (bool, optional): whether to NOT give the agent additional direction to give control to the user for tasks such as login. Default to False.
111
- timeout (int | None, optional): timeout seconds for the agent's response. Default to None.
112
- cdp_url (str | None, optional): Chrome DevTools Protocol URL of an existing browser session. Default to None.
113
- max_steps (int | None, optional): maximum number of steps the agent can take. If not set, the backend will use a default value (currently 100). Default to None.
114
- thinking_token_budget (int | None, optional): maximum number of tokens the underlying LLM can spend on thinking in each step, if supported by the model. Default to None.
115
- start_url (str | None, optional): the URL to start the agent on. Must be a valid URL - refer to https://playwright.dev/docs/api/class-page#page-goto. If not specified, the agent infers this from the prompt. Default to None.
116
- user_agent (str | None, optional): the user to be sent to the browser. If not specified, Laminar uses the default user agent. Default to None.
117
-
118
- Returns:
119
- AgentOutput: agent output
120
- """
121
- pass
122
-
123
- @overload
124
- async def run(
125
- self,
126
- prompt: str,
127
- parent_span_context: LaminarSpanContext | str | None = None,
128
- model_provider: ModelProvider | None = None,
129
- model: str | None = None,
130
- stream: Literal[False] = False,
131
- enable_thinking: bool = True,
132
- agent_state: str | None = None,
133
- storage_state: str | None = None,
134
- return_screenshots: bool = False,
135
- return_agent_state: bool = False,
136
- return_storage_state: bool = False,
137
- disable_give_control: bool = False,
138
- timeout: int | None = None,
139
- max_steps: int | None = None,
140
- thinking_token_budget: int | None = None,
141
- start_url: str | None = None,
142
- user_agent: str | None = None,
143
- ) -> AgentOutput:
144
- """Run Laminar index agent.
145
-
146
- Args:
147
- prompt (str): prompt for the agent
148
- parent_span_context (LaminarSpanContext | str | None, optional): span context if the agent is part of a trace
149
- model_provider (ModelProvider | None, optional): LLM model provider
150
- model (str | None, optional): LLM model name
151
- stream (Literal[False], optional): whether to stream the agent's response
152
- enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
153
- agent_state (str | None, optional): the agent's state as returned by the previous agent run. Default to None.
154
- storage_state (str | None, optional): the browser's storage state as returned by the previous agent run. Default to None.
155
- return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
156
- return_agent_state (bool, optional): whether to return the agent's state. Default to False.
157
- return_storage_state (bool, optional): whether to return the storage state. Default to False.
158
- disable_give_control (bool, optional): whether to NOT give the agent additional direction to give control to the user for tasks such as login. Default to False.
159
- timeout (int | None, optional): timeout seconds for the agent's response. Default to None.
160
- cdp_url (str | None, optional): Chrome DevTools Protocol URL of an existing browser session. Default to None.
161
- max_steps (int | None, optional): maximum number of steps the agent can take. If not set, the backend will use a default value (currently 100). Default to None.
162
- thinking_token_budget (int | None, optional): maximum number of tokens the underlying LLM can spend on thinking in each step, if supported by the model. Default to None.
163
- start_url (str | None, optional): the URL to start the agent on. Must be a valid URL - refer to https://playwright.dev/docs/api/class-page#page-goto. If not specified, the agent infers this from the prompt. Default to None.
164
- user_agent (str | None, optional): the user to be sent to the browser. If not specified, Laminar uses the default user agent. Default to None.
165
-
166
- Returns:
167
- AgentOutput: agent output
168
- """
169
- pass
170
-
171
- async def run(
172
- self,
173
- prompt: str,
174
- parent_span_context: LaminarSpanContext | str | None = None,
175
- model_provider: ModelProvider | None = None,
176
- model: str | None = None,
177
- stream: bool = False,
178
- enable_thinking: bool = True,
179
- agent_state: str | None = None,
180
- storage_state: str | None = None,
181
- return_screenshots: bool = False,
182
- return_agent_state: bool = False,
183
- return_storage_state: bool = False,
184
- disable_give_control: bool = False,
185
- timeout: int | None = None,
186
- cdp_url: str | None = None,
187
- max_steps: int | None = None,
188
- thinking_token_budget: int | None = None,
189
- start_url: str | None = None,
190
- user_agent: str | None = None,
191
- ) -> AgentOutput | Awaitable[AsyncIterator[RunAgentResponseChunk]]:
192
- """Run Laminar index agent.
193
-
194
- Args:
195
- prompt (str): prompt for the agent
196
- parent_span_context (LaminarSpanContext | str | None, optional): span context if the agent is part of a trace
197
- model_provider (ModelProvider | None, optional): LLM model provider
198
- model (str | None, optional): LLM model name
199
- stream (bool, optional): whether to stream the agent's response
200
- enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
201
- agent_state (str | None, optional): the agent's state as returned by the previous agent run. Default to None.
202
- storage_state (str | None, optional): the browser's storage state as returned by the previous agent run. Default to None.
203
- return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
204
- return_agent_state (bool, optional): whether to return the agent's state. Default to False.
205
- return_storage_state (bool, optional): whether to return the storage state. Default to False.
206
- disable_give_control (bool, optional): whether to NOT give the agent additional direction to give control to the user for tasks such as login. Default to False.
207
- timeout (int | None, optional): timeout seconds for the agent's response. Default to None.
208
- cdp_url (str | None, optional): Chrome DevTools Protocol URL of an existing browser session. Default to None.
209
- max_steps (int | None, optional): maximum number of steps the agent can take. If not set, the backend will use a default value (currently 100). Default to None.
210
- thinking_token_budget (int | None, optional): maximum number of tokens the underlying LLM can spend on thinking in each step, if supported by the model. Default to None.
211
- start_url (str | None, optional): the URL to start the agent on. Must be a valid URL - refer to https://playwright.dev/docs/api/class-page#page-goto. If not specified, the agent infers this from the prompt. Default to None.
212
-
213
-
214
- Returns:
215
- AgentOutput | AsyncIterator[RunAgentResponseChunk]: agent output or a generator of response chunks
216
- """
217
- if parent_span_context is None:
218
- span = trace.get_current_span()
219
- if span != trace.INVALID_SPAN:
220
- parent_span_context = LaminarSpanContext(
221
- trace_id=uuid.UUID(int=span.get_span_context().trace_id),
222
- span_id=uuid.UUID(int=span.get_span_context().span_id),
223
- is_remote=span.get_span_context().is_remote,
224
- )
225
- if parent_span_context is not None and isinstance(
226
- parent_span_context, LaminarSpanContext
227
- ):
228
- parent_span_context = str(parent_span_context)
229
- request = RunAgentRequest(
230
- prompt=prompt,
231
- parent_span_context=parent_span_context,
232
- model_provider=model_provider,
233
- model=model,
234
- agent_state=agent_state,
235
- storage_state=storage_state,
236
- # We always connect to stream, because our network configuration
237
- # has a hard fixed idle timeout of 350 seconds.
238
- # This means that if we don't stream, the connection will be closed.
239
- # For now, we just return the content of the final chunk if `stream` is
240
- # `False`.
241
- stream=True,
242
- enable_thinking=enable_thinking,
243
- return_screenshots=return_screenshots,
244
- return_agent_state=return_agent_state,
245
- return_storage_state=return_storage_state,
246
- disable_give_control=disable_give_control,
247
- user_agent=user_agent,
248
- timeout=timeout,
249
- cdp_url=cdp_url,
250
- max_steps=max_steps,
251
- thinking_token_budget=thinking_token_budget,
252
- start_url=start_url,
253
- )
254
-
255
- # For streaming case, use a generator function
256
- if stream:
257
- return self.__run_streaming(request)
258
- else:
259
- # For non-streaming case, process all chunks and return the final result
260
- return await self.__run_non_streaming(request)
261
-
262
- async def __run_streaming(
263
- self, request: RunAgentRequest
264
- ) -> AsyncGenerator[RunAgentResponseChunk, None]:
265
- """Run agent in streaming mode.
266
-
267
- Args:
268
- request (RunAgentRequest): The request to run the agent with.
269
-
270
- Yields:
271
- RunAgentResponseChunk: Chunks of the agent's response.
272
- """
273
- async with self._client.stream(
274
- "POST",
275
- self._base_url + "/v1/agent/run",
276
- json=request.model_dump(by_alias=True),
277
- headers=self._headers(),
278
- ) as response:
279
- if response.status_code != 200:
280
- raise RuntimeError(await response.read())
281
- async for line in response.aiter_lines():
282
- line = str(line)
283
- if line.startswith("[DONE]"):
284
- break
285
- if not line.startswith("data: "):
286
- continue
287
- line = line[6:]
288
- if line:
289
- chunk = RunAgentResponseChunk.model_validate_json(line)
290
- yield chunk.root
291
- if chunk.root.chunk_type in ["finalOutput", "error", "timeout"]:
292
- break
293
-
294
- async def __run_non_streaming(self, request: RunAgentRequest) -> AgentOutput:
295
- """Run agent in non-streaming mode.
296
-
297
- Args:
298
- request (RunAgentRequest): The request to run the agent with.
299
-
300
- Returns:
301
- AgentOutput: The agent's output.
302
- """
303
- final_chunk = None
304
-
305
- async with self._client.stream(
306
- "POST",
307
- self._base_url + "/v1/agent/run",
308
- json=request.model_dump(by_alias=True),
309
- headers=self._headers(),
310
- ) as response:
311
- if response.status_code != 200:
312
- raise RuntimeError(await response.read())
313
- async for line in response.aiter_lines():
314
- line = str(line)
315
- if line.startswith("[DONE]"):
316
- break
317
- if not line.startswith("data: "):
318
- continue
319
- line = line[6:]
320
- if line:
321
- chunk = RunAgentResponseChunk.model_validate_json(line)
322
- if chunk.root.chunk_type == "finalOutput":
323
- final_chunk = chunk.root
324
- elif chunk.root.chunk_type == "error":
325
- raise RuntimeError(chunk.root.error)
326
- elif chunk.root.chunk_type == "timeout":
327
- raise TimeoutError("Agent timed out")
328
-
329
- return final_chunk.content if final_chunk is not None else AgentOutput()
@@ -1,323 +0,0 @@
1
- """Agent resource for interacting with Laminar agents."""
2
-
3
- from typing import Generator, Literal, overload
4
- import uuid
5
-
6
- from lmnr.sdk.client.synchronous.resources.base import BaseResource
7
- from opentelemetry import trace
8
-
9
- from lmnr.sdk.types import (
10
- AgentOutput,
11
- LaminarSpanContext,
12
- ModelProvider,
13
- RunAgentRequest,
14
- RunAgentResponseChunk,
15
- )
16
-
17
-
18
- class Agent(BaseResource):
19
- """Resource for interacting with Laminar agents."""
20
-
21
- @overload
22
- def run(
23
- self,
24
- prompt: str,
25
- stream: Literal[True],
26
- parent_span_context: LaminarSpanContext | str | None = None,
27
- model_provider: ModelProvider | None = None,
28
- model: str | None = None,
29
- enable_thinking: bool = True,
30
- agent_state: str | None = None,
31
- storage_state: str | None = None,
32
- return_screenshots: bool = False,
33
- return_agent_state: bool = False,
34
- return_storage_state: bool = False,
35
- disable_give_control: bool = False,
36
- timeout: int | None = None,
37
- cdp_url: str | None = None,
38
- max_steps: int | None = None,
39
- thinking_token_budget: int | None = None,
40
- start_url: str | None = None,
41
- user_agent: str | None = None,
42
- ) -> Generator[RunAgentResponseChunk, None, None]:
43
- """Run Laminar index agent in streaming mode.
44
-
45
- Args:
46
- prompt (str): prompt for the agent
47
- stream (Literal[True]): whether to stream the agent's response
48
- parent_span_context (LaminarSpanContext | str | None, optional): span context if the agent is part of a trace
49
- model_provider (ModelProvider | None, optional): LLM model provider
50
- model (str | None, optional): LLM model name
51
- enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
52
- agent_state (str | None, optional): the agent's state as returned by the previous agent run. Default to None.
53
- storage_state (str | None, optional): the browser's storage state as returned by the previous agent run. Default to None.
54
- return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
55
- return_agent_state (bool, optional): whether to return the agent's state. Default to False.
56
- return_storage_state (bool, optional): whether to return the storage state. Default to False.
57
- disable_give_control (bool, optional): whether to NOT give the agent additional direction to give control to the user for tasks such as login. Default to False.
58
- timeout (int | None, optional): timeout seconds for the agent's response. Default to None.
59
- cdp_url (str | None, optional): Chrome DevTools Protocol URL of an existing browser session. Default to None.
60
- max_steps (int | None, optional): maximum number of steps the agent can take. If not set, the backend will use a default value (currently 100). Default to None.
61
- thinking_token_budget (int | None, optional): maximum number of tokens the underlying LLM can spend on thinking in each step, if supported by the model. Default to None.
62
- start_url (str | None, optional): the URL to start the agent on. Must be a valid URL - refer to https://playwright.dev/docs/api/class-page#page-goto. If not specified, the agent infers this from the prompt. Default to None.
63
- user_agent (str | None, optional): the user to be sent to the browser. If not specified, Laminar uses the default user agent. Default to None.
64
- Returns:
65
- Generator[RunAgentResponseChunk, None, None]: a generator of response chunks
66
- """
67
- pass
68
-
69
- @overload
70
- def run(
71
- self,
72
- prompt: str,
73
- parent_span_context: LaminarSpanContext | str | None = None,
74
- model_provider: ModelProvider | None = None,
75
- model: str | None = None,
76
- enable_thinking: bool = True,
77
- agent_state: str | None = None,
78
- storage_state: str | None = None,
79
- return_screenshots: bool = False,
80
- return_agent_state: bool = False,
81
- disable_give_control: bool = False,
82
- return_storage_state: bool = False,
83
- timeout: int | None = None,
84
- cdp_url: str | None = None,
85
- max_steps: int | None = None,
86
- thinking_token_budget: int | None = None,
87
- start_url: str | None = None,
88
- user_agent: str | None = None,
89
- ) -> AgentOutput:
90
- """Run Laminar index agent.
91
-
92
- Args:
93
- prompt (str): prompt for the agent
94
- parent_span_context (LaminarSpanContext | str | None, optional): span context if the agent is part of a trace
95
- model_provider (ModelProvider | None, optional): LLM model provider
96
- model (str | None, optional): LLM model name
97
- enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
98
- agent_state (str | None, optional): the agent's state as returned by the previous agent run. Default to None.
99
- storage_state (str | None, optional): the browser's storage state as returned by the previous agent run. Default to None.
100
- return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
101
- return_agent_state (bool, optional): whether to return the agent's state. Default to False.
102
- return_storage_state (bool, optional): whether to return the storage state. Default to False.
103
- disable_give_control (bool, optional): whether to NOT give the agent additional direction to give control to the user for tasks such as login. Default to False.
104
- timeout (int | None, optional): timeout seconds for the agent's response. Default to None.
105
- cdp_url (str | None, optional): Chrome DevTools Protocol URL of an existing browser session. Default to None.
106
- max_steps (int | None, optional): maximum number of steps the agent can take. If not set, the backend will use a default value (currently 100). Default to None.
107
- thinking_token_budget (int | None, optional): maximum number of tokens the underlying LLM can spend on thinking in each step, if supported by the model. Default to None.
108
- start_url (str | None, optional): the URL to start the agent on. Must be a valid URL - refer to https://playwright.dev/docs/api/class-page#page-goto. If not specified, the agent infers this from the prompt. Default to None.
109
- user_agent (str | None, optional): the user to be sent to the browser. If not specified, Laminar uses the default user agent. Default to None.
110
-
111
- Returns:
112
- AgentOutput: agent output
113
- """
114
- pass
115
-
116
- @overload
117
- def run(
118
- self,
119
- prompt: str,
120
- parent_span_context: LaminarSpanContext | str | None = None,
121
- model_provider: ModelProvider | None = None,
122
- model: str | None = None,
123
- stream: Literal[False] = False,
124
- enable_thinking: bool = True,
125
- agent_state: str | None = None,
126
- storage_state: str | None = None,
127
- return_screenshots: bool = False,
128
- return_agent_state: bool = False,
129
- return_storage_state: bool = False,
130
- disable_give_control: bool = False,
131
- timeout: int | None = None,
132
- cdp_url: str | None = None,
133
- max_steps: int | None = None,
134
- thinking_token_budget: int | None = None,
135
- start_url: str | None = None,
136
- user_agent: str | None = None,
137
- ) -> AgentOutput:
138
- """Run Laminar index agent.
139
-
140
- Args:
141
- prompt (str): prompt for the agent
142
- parent_span_context (LaminarSpanContext | str | None, optional): span context if the agent is part of a trace
143
- model_provider (ModelProvider | None, optional): LLM model provider
144
- model (str | None, optional): LLM model name
145
- stream (Literal[False], optional): whether to stream the agent's response
146
- enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
147
- agent_state (str | None, optional): the agent's state as returned by the previous agent run. Default to None.
148
- storage_state (str | None, optional): the browser's storage state as returned by the previous agent run. Default to None.
149
- return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
150
- return_agent_state (bool, optional): whether to return the agent's state. Default to False.
151
- return_storage_state (bool, optional): whether to return the storage state. Default to False.
152
- disable_give_control (bool, optional): whether to NOT give the agent additional direction to give control to the user for tasks such as login. Default to False.
153
- timeout (int | None, optional): timeout seconds for the agent's response. Default to None.
154
- cdp_url (str | None, optional): Chrome DevTools Protocol URL of an existing browser session. Default to None.
155
- max_steps (int | None, optional): maximum number of steps the agent can take. If not set, the backend will use a default value (currently 100). Default to None.
156
- thinking_token_budget (int | None, optional): maximum number of tokens the underlying LLM can spend on thinking in each step, if supported by the model. Default to None.
157
- start_url (str | None, optional): the URL to start the agent on. Must be a valid URL - refer to https://playwright.dev/docs/api/class-page#page-goto. If not specified, the agent infers this from the prompt. Default to None.
158
- user_agent (str | None, optional): the user to be sent to the browser. If not specified, Laminar uses the default user agent. Default to None.
159
-
160
- Returns:
161
- AgentOutput: agent output
162
- """
163
- pass
164
-
165
- def run(
166
- self,
167
- prompt: str,
168
- parent_span_context: LaminarSpanContext | str | None = None,
169
- model_provider: ModelProvider | None = None,
170
- model: str | None = None,
171
- stream: bool = False,
172
- enable_thinking: bool = True,
173
- agent_state: str | None = None,
174
- storage_state: str | None = None,
175
- return_screenshots: bool = False,
176
- return_agent_state: bool = False,
177
- return_storage_state: bool = False,
178
- disable_give_control: bool = False,
179
- timeout: int | None = None,
180
- cdp_url: str | None = None,
181
- max_steps: int | None = None,
182
- thinking_token_budget: int | None = None,
183
- start_url: str | None = None,
184
- user_agent: str | None = None,
185
- ) -> AgentOutput | Generator[RunAgentResponseChunk, None, None]:
186
- """Run Laminar index agent.
187
-
188
- Args:
189
- prompt (str): prompt for the agent
190
- parent_span_context (LaminarSpanContext | str | None, optional): span context if the agent is part of a trace
191
- model_provider (ModelProvider | None, optional): LLM model provider
192
- model (str | None, optional): LLM model name
193
- stream (bool, optional): whether to stream the agent's response
194
- enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
195
- agent_state (str | None, optional): the agent's state as returned by the previous agent run. Default to None.
196
- storage_state (str | None, optional): the browser's storage state as returned by the previous agent run. Default to None.
197
- return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
198
- return_agent_state (bool, optional): whether to return the agent's state. Default to False.
199
- return_storage_state (bool, optional): whether to return the storage state. Default to False.
200
- disable_give_control (bool, optional): whether to NOT give the agent additional direction to give control to the user for tasks such as login. Default to False.
201
- timeout (int | None, optional): timeout seconds for the agent's response. Default to None.
202
- cdp_url (str | None, optional): Chrome DevTools Protocol URL of an existing browser session. Default to None.
203
- max_steps (int | None, optional): maximum number of steps the agent can take. If not set, the backend will use a default value (currently 100). Default to None.
204
- thinking_token_budget (int | None, optional): maximum number of tokens the underlying LLM can spend on thinking in each step, if supported by the model. Default to None.
205
- start_url (str | None, optional): the URL to start the agent on. Must be a valid URL - refer to https://playwright.dev/docs/api/class-page#page-goto. If not specified, the agent infers this from the prompt. Default to None.
206
- user_agent (str | None, optional): the user to be sent to the browser. If not specified, Laminar uses the default user agent. Default to None.
207
-
208
- Returns:
209
- AgentOutput | Generator[RunAgentResponseChunk, None, None]: agent output or a generator of response chunks
210
- """
211
- if parent_span_context is None:
212
- span = trace.get_current_span()
213
- if span != trace.INVALID_SPAN:
214
- parent_span_context = LaminarSpanContext(
215
- trace_id=uuid.UUID(int=span.get_span_context().trace_id),
216
- span_id=uuid.UUID(int=span.get_span_context().span_id),
217
- is_remote=span.get_span_context().is_remote,
218
- )
219
- if parent_span_context is not None and isinstance(
220
- parent_span_context, LaminarSpanContext
221
- ):
222
- parent_span_context = str(parent_span_context)
223
- request = RunAgentRequest(
224
- prompt=prompt,
225
- parent_span_context=parent_span_context,
226
- model_provider=model_provider,
227
- model=model,
228
- agent_state=agent_state,
229
- storage_state=storage_state,
230
- # We always connect to stream, because our network configuration
231
- # has a hard fixed idle timeout of 350 seconds.
232
- # This means that if we don't stream, the connection will be closed.
233
- # For now, we just return the content of the final chunk if `stream` is
234
- # `False`.
235
- stream=True,
236
- enable_thinking=enable_thinking,
237
- return_screenshots=return_screenshots,
238
- return_agent_state=return_agent_state,
239
- return_storage_state=return_storage_state,
240
- timeout=timeout,
241
- cdp_url=cdp_url,
242
- max_steps=max_steps,
243
- thinking_token_budget=thinking_token_budget,
244
- start_url=start_url,
245
- disable_give_control=disable_give_control,
246
- user_agent=user_agent,
247
- )
248
-
249
- # For streaming case, use a generator function
250
- if stream:
251
- return self.__run_streaming(request)
252
- else:
253
- # For non-streaming case, process all chunks and return the final result
254
- return self.__run_non_streaming(request)
255
-
256
- def __run_streaming(
257
- self, request: RunAgentRequest
258
- ) -> Generator[RunAgentResponseChunk, None, None]:
259
- """Run agent in streaming mode.
260
-
261
- Args:
262
- request (RunAgentRequest): The request to run the agent with.
263
-
264
- Yields:
265
- RunAgentResponseChunk: Chunks of the agent's response.
266
- """
267
- with self._client.stream(
268
- "POST",
269
- self._base_url + "/v1/agent/run",
270
- json=request.model_dump(by_alias=True),
271
- headers=self._headers(),
272
- ) as response:
273
- if response.status_code != 200:
274
- raise RuntimeError(response.read())
275
- for line in response.iter_lines():
276
- line = str(line)
277
- if line.startswith("[DONE]"):
278
- break
279
- if not line.startswith("data: "):
280
- continue
281
- line = line[6:]
282
- if line:
283
- chunk = RunAgentResponseChunk.model_validate_json(line)
284
- yield chunk.root
285
- if chunk.root.chunk_type in ["finalOutput", "error", "timeout"]:
286
- break
287
-
288
- def __run_non_streaming(self, request: RunAgentRequest) -> AgentOutput:
289
- """Run agent in non-streaming mode.
290
-
291
- Args:
292
- request (RunAgentRequest): The request to run the agent with.
293
-
294
- Returns:
295
- AgentOutput: The agent's output.
296
- """
297
- final_chunk = None
298
-
299
- with self._client.stream(
300
- "POST",
301
- self._base_url + "/v1/agent/run",
302
- json=request.model_dump(by_alias=True),
303
- headers=self._headers(),
304
- ) as response:
305
- if response.status_code != 200:
306
- raise RuntimeError(response.read())
307
- for line in response.iter_lines():
308
- line = str(line)
309
- if line.startswith("[DONE]"):
310
- break
311
- if not line.startswith("data: "):
312
- continue
313
- line = line[6:]
314
- if line:
315
- chunk = RunAgentResponseChunk.model_validate_json(line)
316
- if chunk.root.chunk_type == "finalOutput":
317
- final_chunk = chunk.root
318
- elif chunk.root.chunk_type == "error":
319
- raise RuntimeError(chunk.root.error)
320
- elif chunk.root.chunk_type == "timeout":
321
- raise TimeoutError("Agent timed out")
322
-
323
- return final_chunk.content if final_chunk is not None else AgentOutput()