grasp_agents 0.4.6__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. grasp_agents/cloud_llm.py +191 -218
  2. grasp_agents/comm_processor.py +101 -100
  3. grasp_agents/errors.py +69 -9
  4. grasp_agents/litellm/__init__.py +106 -0
  5. grasp_agents/litellm/completion_chunk_converters.py +68 -0
  6. grasp_agents/litellm/completion_converters.py +72 -0
  7. grasp_agents/litellm/converters.py +138 -0
  8. grasp_agents/litellm/lite_llm.py +210 -0
  9. grasp_agents/litellm/message_converters.py +66 -0
  10. grasp_agents/llm.py +84 -49
  11. grasp_agents/llm_agent.py +136 -120
  12. grasp_agents/llm_agent_memory.py +3 -3
  13. grasp_agents/llm_policy_executor.py +167 -174
  14. grasp_agents/memory.py +4 -0
  15. grasp_agents/openai/__init__.py +24 -9
  16. grasp_agents/openai/completion_chunk_converters.py +6 -6
  17. grasp_agents/openai/completion_converters.py +12 -14
  18. grasp_agents/openai/content_converters.py +1 -3
  19. grasp_agents/openai/converters.py +6 -8
  20. grasp_agents/openai/message_converters.py +21 -3
  21. grasp_agents/openai/openai_llm.py +155 -103
  22. grasp_agents/openai/tool_converters.py +4 -6
  23. grasp_agents/packet.py +5 -2
  24. grasp_agents/packet_pool.py +14 -13
  25. grasp_agents/printer.py +234 -72
  26. grasp_agents/processor.py +228 -88
  27. grasp_agents/prompt_builder.py +2 -2
  28. grasp_agents/run_context.py +11 -20
  29. grasp_agents/runner.py +42 -0
  30. grasp_agents/typing/completion.py +16 -9
  31. grasp_agents/typing/completion_chunk.py +51 -22
  32. grasp_agents/typing/events.py +95 -19
  33. grasp_agents/typing/message.py +25 -1
  34. grasp_agents/typing/tool.py +2 -0
  35. grasp_agents/usage_tracker.py +31 -37
  36. grasp_agents/utils.py +95 -84
  37. grasp_agents/workflow/looped_workflow.py +60 -11
  38. grasp_agents/workflow/sequential_workflow.py +43 -11
  39. grasp_agents/workflow/workflow_processor.py +25 -24
  40. {grasp_agents-0.4.6.dist-info → grasp_agents-0.5.0.dist-info}/METADATA +7 -6
  41. grasp_agents-0.5.0.dist-info/RECORD +57 -0
  42. grasp_agents-0.4.6.dist-info/RECORD +0 -50
  43. {grasp_agents-0.4.6.dist-info → grasp_agents-0.5.0.dist-info}/WHEEL +0 -0
  44. {grasp_agents-0.4.6.dist-info → grasp_agents-0.5.0.dist-info}/licenses/LICENSE.md +0 -0
grasp_agents/llm_agent.py CHANGED
@@ -1,12 +1,12 @@
1
1
  from collections.abc import AsyncIterator, Sequence
2
2
  from pathlib import Path
3
- from typing import Any, ClassVar, Generic, Protocol, TypeVar
3
+ from typing import Any, ClassVar, Generic, Protocol, TypeVar, cast
4
4
 
5
5
  from pydantic import BaseModel
6
6
 
7
7
  from .comm_processor import CommProcessor
8
8
  from .llm import LLM, LLMSettings
9
- from .llm_agent_memory import LLMAgentMemory, MakeMemoryHandler
9
+ from .llm_agent_memory import LLMAgentMemory, PrepareMemoryHandler
10
10
  from .llm_policy_executor import (
11
11
  ExitToolCallLoopHandler,
12
12
  LLMPolicyExecutor,
@@ -21,7 +21,12 @@ from .prompt_builder import (
21
21
  from .run_context import CtxT, RunContext
22
22
  from .typing.content import Content, ImageData
23
23
  from .typing.converters import Converters
24
- from .typing.events import Event, ProcOutputEvent, SystemMessageEvent, UserMessageEvent
24
+ from .typing.events import (
25
+ Event,
26
+ ProcPayloadOutputEvent,
27
+ SystemMessageEvent,
28
+ UserMessageEvent,
29
+ )
25
30
  from .typing.io import InT, LLMPrompt, LLMPromptArgs, OutT_co, ProcName
26
31
  from .typing.message import Message, Messages, SystemMessage, UserMessage
27
32
  from .typing.tool import BaseTool
@@ -75,7 +80,7 @@ class LLMAgent(
75
80
  # Agent memory management
76
81
  reset_memory_on_run: bool = False,
77
82
  # Retries
78
- num_par_run_retries: int = 0,
83
+ max_retries: int = 0,
79
84
  # Multi-agent routing
80
85
  packet_pool: PacketPool[CtxT] | None = None,
81
86
  recipients: list[ProcName] | None = None,
@@ -84,7 +89,7 @@ class LLMAgent(
84
89
  name=name,
85
90
  packet_pool=packet_pool,
86
91
  recipients=recipients,
87
- num_par_run_retries=num_par_run_retries,
92
+ max_retries=max_retries,
88
93
  )
89
94
 
90
95
  # Agent memory
@@ -94,19 +99,28 @@ class LLMAgent(
94
99
 
95
100
  # LLM policy executor
96
101
 
97
- self._used_default_llm_response_format: bool = False
98
- if llm.response_format is None and tools is None:
99
- llm.response_format = self.out_type
100
- self._used_default_llm_response_format = True
102
+ self._used_default_llm_response_schema: bool = False
103
+ if llm.response_schema is None and tools is None:
104
+ llm.response_schema = self.out_type
105
+ self._used_default_llm_response_schema = True
106
+
107
+ if issubclass(self._out_type, BaseModel):
108
+ final_answer_type = self._out_type
109
+ elif not final_answer_as_tool_call:
110
+ final_answer_type = BaseModel
111
+ else:
112
+ raise TypeError(
113
+ "Final answer type must be a subclass of BaseModel if "
114
+ "final_answer_as_tool_call is True."
115
+ )
101
116
 
102
- self._policy_executor: LLMPolicyExecutor[OutT_co, CtxT] = LLMPolicyExecutor[
103
- self.out_type, CtxT
104
- ](
117
+ self._policy_executor: LLMPolicyExecutor[CtxT] = LLMPolicyExecutor[CtxT](
105
118
  agent_name=self.name,
106
119
  llm=llm,
107
120
  tools=tools,
108
121
  max_turns=max_turns,
109
122
  react_mode=react_mode,
123
+ final_answer_type=final_answer_type,
110
124
  final_answer_as_tool_call=final_answer_as_tool_call,
111
125
  )
112
126
 
@@ -124,9 +138,7 @@ class LLMAgent(
124
138
  usr_args_schema=usr_args_schema,
125
139
  )
126
140
 
127
- # self.no_tqdm = getattr(llm, "no_tqdm", False)
128
-
129
- self._make_memory_impl: MakeMemoryHandler | None = None
141
+ self._prepare_memory_impl: PrepareMemoryHandler | None = None
130
142
  self._parse_output_impl: ParseOutputHandler[InT, OutT_co, CtxT] | None = None
131
143
  self._register_overridden_handlers()
132
144
 
@@ -158,6 +170,18 @@ class LLMAgent(
158
170
  def in_prompt(self) -> LLMPrompt | None:
159
171
  return self._prompt_builder.in_prompt_template
160
172
 
173
+ def _prepare_memory(
174
+ self,
175
+ memory: LLMAgentMemory,
176
+ in_args: InT | None = None,
177
+ sys_prompt: LLMPrompt | None = None,
178
+ ctx: RunContext[Any] | None = None,
179
+ ) -> None:
180
+ if self._prepare_memory_impl:
181
+ return self._prepare_memory_impl(
182
+ memory=memory, in_args=in_args, sys_prompt=sys_prompt, ctx=ctx
183
+ )
184
+
161
185
  def _memorize_inputs(
162
186
  self,
163
187
  chat_inputs: LLMPrompt | Sequence[str | ImageData] | None = None,
@@ -165,7 +189,7 @@ class LLMAgent(
165
189
  in_args: InT | None = None,
166
190
  memory: LLMAgentMemory,
167
191
  ctx: RunContext[CtxT] | None = None,
168
- ) -> tuple[SystemMessage | None, UserMessage | None, LLMAgentMemory]:
192
+ ) -> tuple[SystemMessage | None, UserMessage | None]:
169
193
  # 1. Get run arguments
170
194
  sys_args: LLMPromptArgs | None = None
171
195
  usr_args: LLMPromptArgs | None = None
@@ -187,24 +211,24 @@ class LLMAgent(
187
211
  if self._reset_memory_on_run or memory.is_empty:
188
212
  memory.reset(formatted_sys_prompt)
189
213
  if formatted_sys_prompt is not None:
190
- system_message = memory.message_history[0] # type: ignore[union-attr]
191
- elif self._make_memory_impl:
192
- memory = self._make_memory_impl(
193
- prev_memory=memory,
214
+ system_message = cast("SystemMessage", memory.message_history[0])
215
+ else:
216
+ self._prepare_memory(
217
+ memory=memory,
194
218
  in_args=in_args,
195
219
  sys_prompt=formatted_sys_prompt,
196
220
  ctx=ctx,
197
221
  )
198
222
 
199
- # 3. Make and add user messages
223
+ # 3. Make and add input messages
200
224
 
201
- user_message = self._prompt_builder.make_user_message(
225
+ input_message = self._prompt_builder.make_input_message(
202
226
  chat_inputs=chat_inputs, in_args=in_args, usr_args=usr_args, ctx=ctx
203
227
  )
204
- if user_message:
205
- memory.update([user_message])
228
+ if input_message:
229
+ memory.update([input_message])
206
230
 
207
- return system_message, user_message, memory
231
+ return system_message, input_message
208
232
 
209
233
  def _parse_output(
210
234
  self,
@@ -220,8 +244,9 @@ class LLMAgent(
220
244
 
221
245
  return validate_obj_from_json_or_py_string(
222
246
  str(conversation[-1].content or ""),
223
- adapter=self._out_type_adapter,
224
- from_substring=True,
247
+ schema=self._out_type,
248
+ from_substring=False,
249
+ strip_language_markdown=True,
225
250
  )
226
251
 
227
252
  async def _process(
@@ -230,18 +255,18 @@ class LLMAgent(
230
255
  *,
231
256
  in_args: InT | None = None,
232
257
  memory: LLMAgentMemory,
233
- run_id: str,
258
+ call_id: str,
234
259
  ctx: RunContext[CtxT] | None = None,
235
260
  ) -> Sequence[OutT_co]:
236
- system_message, user_message, memory = self._memorize_inputs(
261
+ system_message, input_message = self._memorize_inputs(
237
262
  chat_inputs=chat_inputs, in_args=in_args, memory=memory, ctx=ctx
238
263
  )
239
264
  if system_message:
240
- self._print_messages([system_message], run_id=run_id, ctx=ctx)
241
- if user_message:
242
- self._print_messages([user_message], run_id=run_id, ctx=ctx)
265
+ self._print_messages([system_message], call_id=call_id, ctx=ctx)
266
+ if input_message:
267
+ self._print_messages([input_message], call_id=call_id, ctx=ctx)
243
268
 
244
- await self._policy_executor.execute(memory, run_id=run_id, ctx=ctx)
269
+ await self._policy_executor.execute(memory, call_id=call_id, ctx=ctx)
245
270
 
246
271
  return [
247
272
  self._parse_output(
@@ -255,83 +280,41 @@ class LLMAgent(
255
280
  *,
256
281
  in_args: InT | None = None,
257
282
  memory: LLMAgentMemory,
258
- run_id: str,
283
+ call_id: str,
259
284
  ctx: RunContext[CtxT] | None = None,
260
285
  ) -> AsyncIterator[Event[Any]]:
261
- system_message, user_message, memory = self._memorize_inputs(
286
+ system_message, input_message = self._memorize_inputs(
262
287
  chat_inputs=chat_inputs, in_args=in_args, memory=memory, ctx=ctx
263
288
  )
264
289
  if system_message:
265
- yield SystemMessageEvent(data=system_message)
266
- if user_message:
267
- yield UserMessageEvent(data=user_message)
290
+ self._print_messages([system_message], call_id=call_id, ctx=ctx)
291
+ yield SystemMessageEvent(
292
+ data=system_message, proc_name=self.name, call_id=call_id
293
+ )
294
+ if input_message:
295
+ self._print_messages([input_message], call_id=call_id, ctx=ctx)
296
+ yield UserMessageEvent(
297
+ data=input_message, proc_name=self.name, call_id=call_id
298
+ )
268
299
 
269
- # 4. Run tool call loop (new messages are added to the message
270
- # history inside the loop)
271
300
  async for event in self._policy_executor.execute_stream(
272
- memory, run_id=run_id, ctx=ctx
301
+ memory, call_id=call_id, ctx=ctx
273
302
  ):
274
303
  yield event
275
304
 
276
305
  output = self._parse_output(
277
306
  conversation=memory.message_history, in_args=in_args, ctx=ctx
278
307
  )
279
- yield ProcOutputEvent(data=output, name=self.name)
308
+ yield ProcPayloadOutputEvent(data=output, proc_name=self.name, call_id=call_id)
280
309
 
281
310
  def _print_messages(
282
311
  self,
283
312
  messages: Sequence[Message],
284
- run_id: str,
313
+ call_id: str,
285
314
  ctx: RunContext[CtxT] | None = None,
286
315
  ) -> None:
287
- if ctx:
288
- ctx.printer.print_llm_messages(
289
- messages, agent_name=self.name, run_id=run_id
290
- )
291
-
292
- # -- Decorators for custom implementations --
293
-
294
- def make_system_prompt(
295
- self, func: MakeSystemPromptHandler[CtxT]
296
- ) -> MakeSystemPromptHandler[CtxT]:
297
- self._prompt_builder.make_system_prompt_impl = func
298
-
299
- return func
300
-
301
- def make_input_content(
302
- self, func: MakeInputContentHandler[InT, CtxT]
303
- ) -> MakeInputContentHandler[InT, CtxT]:
304
- self._prompt_builder.make_input_content_impl = func
305
-
306
- return func
307
-
308
- def parse_output(
309
- self, func: ParseOutputHandler[InT, OutT_co, CtxT]
310
- ) -> ParseOutputHandler[InT, OutT_co, CtxT]:
311
- if self._used_default_llm_response_format:
312
- self._policy_executor.llm.response_format = None
313
- self._parse_output_impl = func
314
-
315
- return func
316
-
317
- def make_memory(self, func: MakeMemoryHandler) -> MakeMemoryHandler:
318
- self._make_memory_impl = func
319
-
320
- return func
321
-
322
- def manage_memory(
323
- self, func: ManageMemoryHandler[CtxT]
324
- ) -> ManageMemoryHandler[CtxT]:
325
- self._policy_executor.manage_memory_impl = func
326
-
327
- return func
328
-
329
- def exit_tool_call_loop(
330
- self, func: ExitToolCallLoopHandler[CtxT]
331
- ) -> ExitToolCallLoopHandler[CtxT]:
332
- self._policy_executor.exit_tool_call_loop_impl = func
333
-
334
- return func
316
+ if ctx and ctx.printer:
317
+ ctx.printer.print_messages(messages, agent_name=self.name, call_id=call_id)
335
318
 
336
319
  # -- Override these methods in subclasses if needed --
337
320
 
@@ -339,36 +322,36 @@ class LLMAgent(
339
322
  cur_cls = type(self)
340
323
  base_cls = LLMAgent[Any, Any, Any]
341
324
 
325
+ # Prompt builder
326
+
342
327
  if cur_cls._make_system_prompt is not base_cls._make_system_prompt: # noqa: SLF001
343
328
  self._prompt_builder.make_system_prompt_impl = self._make_system_prompt
344
329
 
345
330
  if cur_cls._make_input_content is not base_cls._make_input_content: # noqa: SLF001
346
331
  self._prompt_builder.make_input_content_impl = self._make_input_content
347
332
 
348
- if cur_cls._make_memory is not base_cls._make_memory: # noqa: SLF001
349
- self._make_memory_impl = self._make_memory
350
-
351
- if cur_cls._manage_memory is not base_cls._manage_memory: # noqa: SLF001
352
- self._policy_executor.manage_memory_impl = self._manage_memory
333
+ # Policy executor
353
334
 
354
335
  if (
355
336
  cur_cls._exit_tool_call_loop is not base_cls._exit_tool_call_loop # noqa: SLF001
356
337
  ):
357
338
  self._policy_executor.exit_tool_call_loop_impl = self._exit_tool_call_loop
358
339
 
340
+ if cur_cls._manage_memory is not base_cls._manage_memory: # noqa: SLF001
341
+ self._policy_executor.manage_memory_impl = self._manage_memory
342
+
343
+ # Make sure default LLM response schema is not used when custom output
344
+ # parsing is provided
359
345
  if (
360
346
  cur_cls._parse_output is not base_cls._parse_output # noqa: SLF001
361
- and self._used_default_llm_response_format
347
+ and self._used_default_llm_response_schema
362
348
  ):
363
- self._policy_executor.llm.response_format = None
349
+ self._policy_executor.llm.response_schema = None
364
350
 
365
351
  def _make_system_prompt(
366
352
  self, sys_args: LLMPromptArgs | None, *, ctx: RunContext[CtxT] | None = None
367
- ) -> str:
368
- raise NotImplementedError(
369
- "LLMAgent._format_sys_args must be overridden by a subclass "
370
- "if it's intended to be used as the system arguments formatter."
371
- )
353
+ ) -> str | None:
354
+ return self._prompt_builder.make_system_prompt(sys_args=sys_args, ctx=ctx)
372
355
 
373
356
  def _make_input_content(
374
357
  self,
@@ -377,19 +360,8 @@ class LLMAgent(
377
360
  usr_args: LLMPromptArgs | None = None,
378
361
  ctx: RunContext[CtxT] | None = None,
379
362
  ) -> Content:
380
- raise NotImplementedError(
381
- "LLMAgent._format_in_args must be overridden by a subclass"
382
- )
383
-
384
- def _make_memory(
385
- self,
386
- prev_memory: LLMAgentMemory,
387
- in_args: Sequence[InT] | None = None,
388
- sys_prompt: LLMPrompt | None = None,
389
- ctx: RunContext[Any] | None = None,
390
- ) -> LLMAgentMemory:
391
- raise NotImplementedError(
392
- "LLMAgent._make_memory must be overridden by a subclass"
363
+ return self._prompt_builder.make_input_content(
364
+ in_args=in_args, usr_args=usr_args, ctx=ctx
393
365
  )
394
366
 
395
367
  def _exit_tool_call_loop(
@@ -399,8 +371,8 @@ class LLMAgent(
399
371
  ctx: RunContext[CtxT] | None = None,
400
372
  **kwargs: Any,
401
373
  ) -> bool:
402
- raise NotImplementedError(
403
- "LLMAgent._exit_tool_call_loop must be overridden by a subclass"
374
+ return self._policy_executor._exit_tool_call_loop( # type: ignore[return-value]
375
+ conversation=conversation, ctx=ctx, **kwargs
404
376
  )
405
377
 
406
378
  def _manage_memory(
@@ -410,6 +382,50 @@ class LLMAgent(
410
382
  ctx: RunContext[CtxT] | None = None,
411
383
  **kwargs: Any,
412
384
  ) -> None:
413
- raise NotImplementedError(
414
- "LLMAgent._manage_memory must be overridden by a subclass"
385
+ return self._policy_executor._manage_memory( # type: ignore[return-value]
386
+ memory=memory, ctx=ctx, **kwargs
415
387
  )
388
+
389
+ # Decorators for custom implementations as an alternative to overriding methods
390
+
391
+ def make_system_prompt(
392
+ self, func: MakeSystemPromptHandler[CtxT]
393
+ ) -> MakeSystemPromptHandler[CtxT]:
394
+ self._prompt_builder.make_system_prompt_impl = func
395
+
396
+ return func
397
+
398
+ def make_input_content(
399
+ self, func: MakeInputContentHandler[InT, CtxT]
400
+ ) -> MakeInputContentHandler[InT, CtxT]:
401
+ self._prompt_builder.make_input_content_impl = func
402
+
403
+ return func
404
+
405
+ def parse_output(
406
+ self, func: ParseOutputHandler[InT, OutT_co, CtxT]
407
+ ) -> ParseOutputHandler[InT, OutT_co, CtxT]:
408
+ if self._used_default_llm_response_schema:
409
+ self._policy_executor.llm.response_schema = None
410
+ self._parse_output_impl = func
411
+
412
+ return func
413
+
414
+ def prepare_memory(self, func: PrepareMemoryHandler) -> PrepareMemoryHandler:
415
+ self._prepare_memory_impl = func
416
+
417
+ return func
418
+
419
+ def manage_memory(
420
+ self, func: ManageMemoryHandler[CtxT]
421
+ ) -> ManageMemoryHandler[CtxT]:
422
+ self._policy_executor.manage_memory_impl = func
423
+
424
+ return func
425
+
426
+ def exit_tool_call_loop(
427
+ self, func: ExitToolCallLoopHandler[CtxT]
428
+ ) -> ExitToolCallLoopHandler[CtxT]:
429
+ self._policy_executor.exit_tool_call_loop_impl = func
430
+
431
+ return func
@@ -9,14 +9,14 @@ from .typing.io import LLMPrompt
9
9
  from .typing.message import Message, Messages, SystemMessage
10
10
 
11
11
 
12
- class MakeMemoryHandler(Protocol):
12
+ class PrepareMemoryHandler(Protocol):
13
13
  def __call__(
14
14
  self,
15
- prev_memory: "LLMAgentMemory",
15
+ memory: "LLMAgentMemory",
16
16
  in_args: Any | None,
17
17
  sys_prompt: LLMPrompt | None,
18
18
  ctx: RunContext[Any] | None,
19
- ) -> "LLMAgentMemory": ...
19
+ ) -> None: ...
20
20
 
21
21
 
22
22
  class LLMAgentMemory(Memory):