lmnr 0.5.1__py3-none-any.whl → 0.5.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,216 @@
1
+ import logging
2
+ import traceback
3
+ import json
4
+
5
+ from .config import (
6
+ Config,
7
+ )
8
+ from google.genai import types
9
+ from google.genai._common import BaseModel
10
+ import pydantic
11
+ from opentelemetry.trace import Span
12
+ from typing import Any, Optional, Union
13
+
14
+
15
+ def set_span_attribute(span: Span, name: str, value: str):
16
+ if value is not None:
17
+ if value != "":
18
+ span.set_attribute(name, value)
19
+ return
20
+
21
+
22
+ def dont_throw(func):
23
+ """
24
+ A decorator that wraps the passed in function and logs exceptions instead of throwing them.
25
+
26
+ @param func: The function to wrap
27
+ @return: The wrapper function
28
+ """
29
+ # Obtain a logger specific to the function's module
30
+ logger = logging.getLogger(func.__module__)
31
+
32
+ def wrapper(*args, **kwargs):
33
+ try:
34
+ return func(*args, **kwargs)
35
+ except Exception as e:
36
+ logger.debug(
37
+ "OpenLLMetry failed to trace in %s, error: %s",
38
+ func.__name__,
39
+ traceback.format_exc(),
40
+ )
41
+ if Config.exception_logger:
42
+ Config.exception_logger(e)
43
+
44
+ return wrapper
45
+
46
+
47
+ def to_dict(obj: Union[BaseModel, pydantic.BaseModel, dict]) -> dict[str, Any]:
48
+ try:
49
+ if isinstance(obj, BaseModel):
50
+ return obj.model_dump()
51
+ elif isinstance(obj, pydantic.BaseModel):
52
+ return obj.model_dump()
53
+ elif isinstance(obj, dict):
54
+ return obj
55
+ else:
56
+ return dict(obj)
57
+ except Exception:
58
+ return dict(obj)
59
+
60
+
61
+ def process_content_union(
62
+ content: Union[types.ContentUnion, types.ContentUnionDict],
63
+ trace_id: Optional[str] = None,
64
+ span_id: Optional[str] = None,
65
+ message_index: int = 0,
66
+ ) -> Optional[str]:
67
+ parts = _process_content_union(content, trace_id, span_id, message_index)
68
+ if parts is None:
69
+ return None
70
+ if isinstance(parts, str):
71
+ return parts
72
+ elif isinstance(parts, list):
73
+ if len(parts) == 1 and isinstance(parts[0], str):
74
+ return parts[0]
75
+ return json.dumps(
76
+ [
77
+ {"type": "text", "text": part} if isinstance(part, str) else part
78
+ for part in parts
79
+ ]
80
+ )
81
+ else:
82
+ return None
83
+
84
+
85
+ def _process_content_union(
86
+ content: Union[types.ContentUnion, types.ContentUnionDict],
87
+ trace_id: Optional[str] = None,
88
+ span_id: Optional[str] = None,
89
+ message_index: int = 0,
90
+ ) -> Union[str, list[str], None]:
91
+ if isinstance(content, types.Content):
92
+ parts = to_dict(content).get("parts", [])
93
+ return [_process_part(part) for part in parts]
94
+ elif isinstance(content, list):
95
+ return [_process_part_union(item) for item in content]
96
+ elif isinstance(content, (types.Part, types.File, str)):
97
+ return _process_part_union(content)
98
+ elif isinstance(content, dict):
99
+ if "parts" in content:
100
+ return [
101
+ _process_part_union(
102
+ item, trace_id, span_id, message_index, content_index
103
+ )
104
+ for content_index, item in enumerate(content.get("parts", []))
105
+ ]
106
+ else:
107
+ # Assume it's PartDict
108
+ return _process_part_union(content, trace_id, span_id, message_index)
109
+ else:
110
+ return None
111
+
112
+
113
+ def _process_part_union(
114
+ content: Union[types.PartDict, types.File, types.Part, str],
115
+ trace_id: Optional[str] = None,
116
+ span_id: Optional[str] = None,
117
+ message_index: int = 0,
118
+ content_index: int = 0,
119
+ ) -> Optional[str]:
120
+ if isinstance(content, str):
121
+ return content
122
+ elif isinstance(content, types.File):
123
+ content_dict = to_dict(content)
124
+ name = (
125
+ content_dict.get("name")
126
+ or content_dict.get("display_name")
127
+ or content_dict.get("uri")
128
+ )
129
+ return f"files/{name}"
130
+ elif isinstance(content, (types.Part, dict)):
131
+ return _process_part(content, trace_id, span_id, message_index, content_index)
132
+ else:
133
+ return None
134
+
135
+
136
+ def _process_part(
137
+ content: types.Part,
138
+ trace_id: Optional[str] = None,
139
+ span_id: Optional[str] = None,
140
+ message_index: int = 0,
141
+ content_index: int = 0,
142
+ ) -> Optional[str]:
143
+ part_dict = to_dict(content)
144
+ if part_dict.get("text") is not None:
145
+ return part_dict.get("text")
146
+ elif part_dict.get("inline_data"):
147
+ blob = to_dict(part_dict.get("inline_data"))
148
+ if blob.get("mime_type").startswith("image/"):
149
+ return _process_image_item(
150
+ blob, trace_id, span_id, message_index, content_index
151
+ )
152
+ else:
153
+ # currently, only images are supported
154
+ return blob.get("mime_type") or "unknown_media"
155
+ else:
156
+ return None
157
+
158
+
159
+ def role_from_content_union(
160
+ content: Union[types.ContentUnion, types.ContentUnionDict],
161
+ ) -> Optional[str]:
162
+ if isinstance(content, types.Content):
163
+ return to_dict(content).get("role")
164
+ elif isinstance(content, list) and len(content) > 0:
165
+ return role_from_content_union(content[0])
166
+ else:
167
+ return None
168
+
169
+
170
+ def with_tracer_wrapper(func):
171
+ """Helper for providing tracer for wrapper functions."""
172
+
173
+ def _with_tracer(tracer, to_wrap):
174
+ def wrapper(wrapped, instance, args, kwargs):
175
+ return func(tracer, to_wrap, wrapped, instance, args, kwargs)
176
+
177
+ return wrapper
178
+
179
+ return _with_tracer
180
+
181
+
182
+ def _run_async(method):
183
+ import asyncio
184
+ import threading
185
+
186
+ try:
187
+ loop = asyncio.get_running_loop()
188
+ except RuntimeError:
189
+ loop = None
190
+
191
+ if loop and loop.is_running():
192
+ thread = threading.Thread(target=lambda: asyncio.run(method))
193
+ thread.start()
194
+ thread.join()
195
+ else:
196
+ asyncio.run(method)
197
+
198
+
199
+ def _process_image_item(
200
+ blob: dict[str, Any],
201
+ trace_id: str,
202
+ span_id: str,
203
+ message_index: int,
204
+ content_index: int,
205
+ ):
206
+ # Convert to openai format, so backends can handle it
207
+ return (
208
+ {
209
+ "type": "image_url",
210
+ "image_url": {
211
+ "url": f"data:image/{blob.get('mime_type').split('/')[1]};base64,{blob.get('data')}",
212
+ },
213
+ }
214
+ if Config.convert_image_to_openai_format
215
+ else blob
216
+ )
@@ -84,6 +84,8 @@ class TracerWrapper(object):
84
84
  __span_id_lists: dict[int, list[str]] = {}
85
85
  __client: LaminarClient = None
86
86
  __async_client: AsyncLaminarClient = None
87
+ __spans_processor: SpanProcessor = None
88
+ __spans_exporter: SpanExporter = None
87
89
 
88
90
  def __new__(
89
91
  cls,
@@ -115,10 +117,10 @@ class TracerWrapper(object):
115
117
  obj.__resource = Resource(attributes=TracerWrapper.resource_attributes)
116
118
  obj.__tracer_provider = init_tracer_provider(resource=obj.__resource)
117
119
  if processor:
118
- obj.__spans_processor: SpanProcessor = processor
120
+ obj.__spans_processor = processor
119
121
  obj.__spans_processor_original_on_start = processor.on_start
120
122
  else:
121
- obj.__spans_exporter: SpanExporter = (
123
+ obj.__spans_exporter = (
122
124
  exporter
123
125
  if exporter
124
126
  else init_spans_exporter(
@@ -126,11 +128,9 @@ class TracerWrapper(object):
126
128
  )
127
129
  )
128
130
  if disable_batch or is_notebook():
129
- obj.__spans_processor: SpanProcessor = SimpleSpanProcessor(
130
- obj.__spans_exporter
131
- )
131
+ obj.__spans_processor = SimpleSpanProcessor(obj.__spans_exporter)
132
132
  else:
133
- obj.__spans_processor: SpanProcessor = BatchSpanProcessor(
133
+ obj.__spans_processor = BatchSpanProcessor(
134
134
  obj.__spans_exporter,
135
135
  max_export_batch_size=max_export_batch_size,
136
136
  )
@@ -237,8 +237,6 @@ class TracerWrapper(object):
237
237
  cls.__span_id_lists = {}
238
238
 
239
239
  def shutdown(self):
240
- self.__spans_processor.force_flush()
241
- self.__spans_processor.shutdown()
242
240
  self.__tracer_provider.shutdown()
243
241
 
244
242
  def flush(self):
@@ -382,6 +380,9 @@ def init_instrumentations(
382
380
  elif instrument == Instruments.GOOGLE_GENERATIVEAI:
383
381
  if init_google_generativeai_instrumentor():
384
382
  instrument_set = True
383
+ elif instrument == Instruments.GOOGLE_GENAI:
384
+ if init_google_genai_instrumentor():
385
+ instrument_set = True
385
386
  elif instrument == Instruments.GROQ:
386
387
  if init_groq_instrumentor():
387
388
  instrument_set = True
@@ -622,6 +623,27 @@ def init_google_generativeai_instrumentor():
622
623
  return False
623
624
 
624
625
 
626
+ def init_google_genai_instrumentor():
627
+ try:
628
+ if is_package_installed("google-genai"):
629
+ # TODO: uncomment this once we migrate to the contrib package
630
+ # and is_package_installed(
631
+ # "opentelemetry-instrumentation-google-genai"
632
+ # ):
633
+ # from opentelemetry.instrumentation.google_genai import (
634
+ from ..opentelemetry.instrumentation.google_genai import (
635
+ GoogleGenAiSdkInstrumentor,
636
+ )
637
+
638
+ instrumentor = GoogleGenAiSdkInstrumentor()
639
+ if not instrumentor.is_instrumented_by_opentelemetry:
640
+ instrumentor.instrument()
641
+ return True
642
+ except Exception as e:
643
+ module_logger.error(f"Error initializing Google GenAI instrumentor: {e}")
644
+ return False
645
+
646
+
625
647
  def init_haystack_instrumentor():
626
648
  try:
627
649
  if is_package_installed("haystack") and is_package_installed(
@@ -11,8 +11,6 @@ from lmnr.sdk.client.asynchronous.resources import (
11
11
  AsyncAgent,
12
12
  AsyncBrowserEvents,
13
13
  AsyncEvals,
14
- AsyncPipeline,
15
- AsyncSemanticSearch,
16
14
  )
17
15
  from lmnr.sdk.utils import from_env
18
16
 
@@ -66,12 +64,6 @@ class AsyncLaminarClient:
66
64
  )
67
65
 
68
66
  # Initialize resource objects
69
- self.__pipeline = AsyncPipeline(
70
- self.__client, self.__base_url, self.__project_api_key
71
- )
72
- self.__semantic_search = AsyncSemanticSearch(
73
- self.__client, self.__base_url, self.__project_api_key
74
- )
75
67
  self.__agent = AsyncAgent(
76
68
  self.__client, self.__base_url, self.__project_api_key
77
69
  )
@@ -82,24 +74,6 @@ class AsyncLaminarClient:
82
74
  self.__client, self.__base_url, self.__project_api_key
83
75
  )
84
76
 
85
- @property
86
- def pipeline(self) -> AsyncPipeline:
87
- """Get the Pipeline resource.
88
-
89
- Returns:
90
- Pipeline: The Pipeline resource instance.
91
- """
92
- return self.__pipeline
93
-
94
- @property
95
- def semantic_search(self) -> AsyncSemanticSearch:
96
- """Get the SemanticSearch resource.
97
-
98
- Returns:
99
- SemanticSearch: The SemanticSearch resource instance.
100
- """
101
- return self.__semantic_search
102
-
103
77
  @property
104
78
  def agent(self) -> AsyncAgent:
105
79
  """Get the Agent resource.
@@ -1,12 +1,8 @@
1
1
  from lmnr.sdk.client.asynchronous.resources.agent import AsyncAgent
2
2
  from lmnr.sdk.client.asynchronous.resources.browser_events import AsyncBrowserEvents
3
3
  from lmnr.sdk.client.asynchronous.resources.evals import AsyncEvals
4
- from lmnr.sdk.client.asynchronous.resources.pipeline import AsyncPipeline
5
- from lmnr.sdk.client.asynchronous.resources.semantic_search import AsyncSemanticSearch
6
4
 
7
5
  __all__ = [
8
- "AsyncPipeline",
9
- "AsyncSemanticSearch",
10
6
  "AsyncAgent",
11
7
  "AsyncEvals",
12
8
  "AsyncBrowserEvents",
@@ -35,7 +35,16 @@ class AsyncAgent(BaseAsyncResource):
35
35
  model_provider: Optional[ModelProvider] = None,
36
36
  model: Optional[str] = None,
37
37
  enable_thinking: bool = True,
38
+ agent_state: Optional[str] = None,
39
+ storage_state: Optional[str] = None,
38
40
  return_screenshots: bool = False,
41
+ return_agent_state: bool = False,
42
+ return_storage_state: bool = False,
43
+ timeout: Optional[int] = None,
44
+ cdp_url: Optional[str] = None,
45
+ max_steps: Optional[int] = None,
46
+ thinking_token_budget: Optional[int] = None,
47
+ start_url: Optional[str] = None,
39
48
  ) -> AsyncIterator[RunAgentResponseChunk]:
40
49
  """Run Laminar index agent in streaming mode.
41
50
 
@@ -46,7 +55,17 @@ class AsyncAgent(BaseAsyncResource):
46
55
  model_provider (Optional[ModelProvider], optional): LLM model provider
47
56
  model (Optional[str], optional): LLM model name
48
57
  enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
58
+ agent_state (Optional[str], optional): the agent's state as returned by the previous agent run. Default to None.
59
+ storage_state (Optional[str], optional): the browser's storage state as returned by the previous agent run. Default to None.
49
60
  return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
61
+ return_agent_state (bool, optional): whether to return the agent's state in the final chunk. Default to False.
62
+ return_storage_state (bool, optional): whether to return the storage state in the final chunk. Default to False.
63
+ timeout (Optional[int], optional): timeout seconds for the agent's response. Default to None.
64
+ cdp_url (Optional[str], optional): Chrome DevTools Protocol URL of an existing browser session. Default to None.
65
+ max_steps (Optional[int], optional): maximum number of steps the agent can take. If not set, the backend will use a default value (currently 100). Default to None.
66
+ thinking_token_budget (Optional[int], optional): maximum number of tokens the underlying LLM can spend on thinking in each step, if supported by the model. Default to None.
67
+ start_url (Optional[str], optional): the URL to start the agent on. Must be a valid URL - refer to https://playwright.dev/docs/api/class-page#page-goto. If not specified, the agent infers this from the prompt. Default to None.
68
+
50
69
  Returns:
51
70
  AsyncIterator[RunAgentResponseChunk]: a generator of response chunks
52
71
  """
@@ -60,7 +79,16 @@ class AsyncAgent(BaseAsyncResource):
60
79
  model_provider: Optional[ModelProvider] = None,
61
80
  model: Optional[str] = None,
62
81
  enable_thinking: bool = True,
82
+ agent_state: Optional[str] = None,
83
+ storage_state: Optional[str] = None,
63
84
  return_screenshots: bool = False,
85
+ return_agent_state: bool = False,
86
+ return_storage_state: bool = False,
87
+ timeout: Optional[int] = None,
88
+ cdp_url: Optional[str] = None,
89
+ max_steps: Optional[int] = None,
90
+ thinking_token_budget: Optional[int] = None,
91
+ start_url: Optional[str] = None,
64
92
  ) -> AgentOutput:
65
93
  """Run Laminar index agent.
66
94
 
@@ -70,7 +98,17 @@ class AsyncAgent(BaseAsyncResource):
70
98
  model_provider (Optional[ModelProvider], optional): LLM model provider
71
99
  model (Optional[str], optional): LLM model name
72
100
  enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
101
+ agent_state (Optional[str], optional): the agent's state as returned by the previous agent run. Default to None.
102
+ storage_state (Optional[str], optional): the browser's storage state as returned by the previous agent run. Default to None.
73
103
  return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
104
+ return_agent_state (bool, optional): whether to return the agent's state. Default to False.
105
+ return_storage_state (bool, optional): whether to return the storage state. Default to False.
106
+ timeout (Optional[int], optional): timeout seconds for the agent's response. Default to None.
107
+ cdp_url (Optional[str], optional): Chrome DevTools Protocol URL of an existing browser session. Default to None.
108
+ max_steps (Optional[int], optional): maximum number of steps the agent can take. If not set, the backend will use a default value (currently 100). Default to None.
109
+ thinking_token_budget (Optional[int], optional): maximum number of tokens the underlying LLM can spend on thinking in each step, if supported by the model. Default to None.
110
+ start_url (Optional[str], optional): the URL to start the agent on. Must be a valid URL - refer to https://playwright.dev/docs/api/class-page#page-goto. If not specified, the agent infers this from the prompt. Default to None.
111
+
74
112
  Returns:
75
113
  AgentOutput: agent output
76
114
  """
@@ -85,7 +123,15 @@ class AsyncAgent(BaseAsyncResource):
85
123
  model: Optional[str] = None,
86
124
  stream: Literal[False] = False,
87
125
  enable_thinking: bool = True,
126
+ agent_state: Optional[str] = None,
127
+ storage_state: Optional[str] = None,
88
128
  return_screenshots: bool = False,
129
+ return_agent_state: bool = False,
130
+ return_storage_state: bool = False,
131
+ timeout: Optional[int] = None,
132
+ max_steps: Optional[int] = None,
133
+ thinking_token_budget: Optional[int] = None,
134
+ start_url: Optional[str] = None,
89
135
  ) -> AgentOutput:
90
136
  """Run Laminar index agent.
91
137
 
@@ -96,7 +142,16 @@ class AsyncAgent(BaseAsyncResource):
96
142
  model (Optional[str], optional): LLM model name
97
143
  stream (Literal[False], optional): whether to stream the agent's response
98
144
  enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
145
+ agent_state (Optional[str], optional): the agent's state as returned by the previous agent run. Default to None.
146
+ storage_state (Optional[str], optional): the browser's storage state as returned by the previous agent run. Default to None.
99
147
  return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
148
+ return_agent_state (bool, optional): whether to return the agent's state. Default to False.
149
+ return_storage_state (bool, optional): whether to return the storage state. Default to False.
150
+ timeout (Optional[int], optional): timeout seconds for the agent's response. Default to None.
151
+ cdp_url (Optional[str], optional): Chrome DevTools Protocol URL of an existing browser session. Default to None.
152
+ max_steps (Optional[int], optional): maximum number of steps the agent can take. If not set, the backend will use a default value (currently 100). Default to None.
153
+ thinking_token_budget (Optional[int], optional): maximum number of tokens the underlying LLM can spend on thinking in each step, if supported by the model. Default to None.
154
+ start_url (Optional[str], optional): the URL to start the agent on. Must be a valid URL - refer to https://playwright.dev/docs/api/class-page#page-goto. If not specified, the agent infers this from the prompt. Default to None.
100
155
  Returns:
101
156
  AgentOutput: agent output
102
157
  """
@@ -110,7 +165,16 @@ class AsyncAgent(BaseAsyncResource):
110
165
  model: Optional[str] = None,
111
166
  stream: bool = False,
112
167
  enable_thinking: bool = True,
168
+ agent_state: Optional[str] = None,
169
+ storage_state: Optional[str] = None,
113
170
  return_screenshots: bool = False,
171
+ return_agent_state: bool = False,
172
+ return_storage_state: bool = False,
173
+ timeout: Optional[int] = None,
174
+ cdp_url: Optional[str] = None,
175
+ max_steps: Optional[int] = None,
176
+ thinking_token_budget: Optional[int] = None,
177
+ start_url: Optional[str] = None,
114
178
  ) -> Union[AgentOutput, Awaitable[AsyncIterator[RunAgentResponseChunk]]]:
115
179
  """Run Laminar index agent.
116
180
 
@@ -121,7 +185,17 @@ class AsyncAgent(BaseAsyncResource):
121
185
  model (Optional[str], optional): LLM model name
122
186
  stream (bool, optional): whether to stream the agent's response
123
187
  enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
188
+ agent_state (Optional[str], optional): the agent's state as returned by the previous agent run. Default to None.
189
+ storage_state (Optional[str], optional): the browser's storage state as returned by the previous agent run. Default to None.
124
190
  return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
191
+ return_agent_state (bool, optional): whether to return the agent's state. Default to False.
192
+ return_storage_state (bool, optional): whether to return the storage state. Default to False.
193
+ timeout (Optional[int], optional): timeout seconds for the agent's response. Default to None.
194
+ cdp_url (Optional[str], optional): Chrome DevTools Protocol URL of an existing browser session. Default to None.
195
+ max_steps (Optional[int], optional): maximum number of steps the agent can take. If not set, the backend will use a default value (currently 100). Default to None.
196
+ thinking_token_budget (Optional[int], optional): maximum number of tokens the underlying LLM can spend on thinking in each step, if supported by the model. Default to None.
197
+ start_url (Optional[str], optional): the URL to start the agent on. Must be a valid URL - refer to https://playwright.dev/docs/api/class-page#page-goto. If not specified, the agent infers this from the prompt. Default to None.
198
+
125
199
  Returns:
126
200
  Union[AgentOutput, AsyncIterator[RunAgentResponseChunk]]: agent output or a generator of response chunks
127
201
  """
@@ -142,15 +216,23 @@ class AsyncAgent(BaseAsyncResource):
142
216
  parent_span_context=parent_span_context,
143
217
  model_provider=model_provider,
144
218
  model=model,
145
- # We always connect to stream, because our TLS listeners on AWS
146
- # Network load balancers have a hard fixed idle timeout of 350 seconds.
219
+ agent_state=agent_state,
220
+ storage_state=storage_state,
221
+ # We always connect to stream, because our network configuration
222
+ # has a hard fixed idle timeout of 350 seconds.
147
223
  # This means that if we don't stream, the connection will be closed.
148
224
  # For now, we just return the content of the final chunk if `stream` is
149
225
  # `False`.
150
- # https://aws.amazon.com/blogs/networking-and-content-delivery/introducing-nlb-tcp-configurable-idle-timeout/
151
226
  stream=True,
152
227
  enable_thinking=enable_thinking,
153
228
  return_screenshots=return_screenshots,
229
+ return_agent_state=return_agent_state,
230
+ return_storage_state=return_storage_state,
231
+ timeout=timeout,
232
+ cdp_url=cdp_url,
233
+ max_steps=max_steps,
234
+ thinking_token_budget=thinking_token_budget,
235
+ start_url=start_url,
154
236
  )
155
237
 
156
238
  # For streaming case, use a generator function
@@ -174,7 +256,7 @@ class AsyncAgent(BaseAsyncResource):
174
256
  async with self._client.stream(
175
257
  "POST",
176
258
  self._base_url + "/v1/agent/run",
177
- json=request.to_dict(),
259
+ json=request.model_dump(by_alias=True),
178
260
  headers=self._headers(),
179
261
  ) as response:
180
262
  async for line in response.aiter_lines():
@@ -187,6 +269,8 @@ class AsyncAgent(BaseAsyncResource):
187
269
  if line:
188
270
  chunk = RunAgentResponseChunk.model_validate_json(line)
189
271
  yield chunk.root
272
+ if chunk.root.chunk_type in ["finalOutput", "error"]:
273
+ break
190
274
 
191
275
  async def __run_non_streaming(self, request: RunAgentRequest) -> AgentOutput:
192
276
  """Run agent in non-streaming mode.
@@ -202,9 +286,11 @@ class AsyncAgent(BaseAsyncResource):
202
286
  async with self._client.stream(
203
287
  "POST",
204
288
  self._base_url + "/v1/agent/run",
205
- json=request.to_dict(),
289
+ json=request.model_dump(by_alias=True),
206
290
  headers=self._headers(),
207
291
  ) as response:
292
+ if response.status_code != 200:
293
+ raise RuntimeError(await response.read())
208
294
  async for line in response.aiter_lines():
209
295
  line = str(line)
210
296
  if line.startswith("[DONE]"):
@@ -214,7 +300,11 @@ class AsyncAgent(BaseAsyncResource):
214
300
  line = line[6:]
215
301
  if line:
216
302
  chunk = RunAgentResponseChunk.model_validate_json(line)
217
- if chunk.root.chunkType == "finalOutput":
303
+ if chunk.root.chunk_type == "finalOutput":
218
304
  final_chunk = chunk.root
305
+ elif chunk.root.chunk_type == "error":
306
+ raise RuntimeError(chunk.root.error)
307
+ elif chunk.root.chunk_type == "timeout":
308
+ raise TimeoutError("Agent timed out")
219
309
 
220
310
  return final_chunk.content if final_chunk is not None else AgentOutput()
@@ -1,7 +1,5 @@
1
1
  from lmnr.sdk.client.synchronous.resources.agent import Agent
2
2
  from lmnr.sdk.client.synchronous.resources.browser_events import BrowserEvents
3
3
  from lmnr.sdk.client.synchronous.resources.evals import Evals
4
- from lmnr.sdk.client.synchronous.resources.pipeline import Pipeline
5
- from lmnr.sdk.client.synchronous.resources.semantic_search import SemanticSearch
6
4
 
7
- __all__ = ["Pipeline", "SemanticSearch", "Agent", "Evals", "BrowserEvents"]
5
+ __all__ = ["Agent", "Evals", "BrowserEvents"]