grasp_agents 0.3.8__tar.gz → 0.3.9__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/PKG-INFO +1 -1
  2. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/pyproject.toml +1 -1
  3. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/llm_policy_executor.py +21 -22
  4. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/usage_tracker.py +14 -4
  5. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/.gitignore +0 -0
  6. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/LICENSE.md +0 -0
  7. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/README.md +0 -0
  8. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/__init__.py +0 -0
  9. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/cloud_llm.py +0 -0
  10. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/comm_processor.py +0 -0
  11. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/costs_dict.yaml +0 -0
  12. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/generics_utils.py +0 -0
  13. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/grasp_logging.py +0 -0
  14. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/http_client.py +0 -0
  15. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/llm.py +0 -0
  16. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/llm_agent.py +0 -0
  17. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/llm_agent_memory.py +1 -1
  18. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/memory.py +0 -0
  19. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/message_history.py +0 -0
  20. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/openai/__init__.py +0 -0
  21. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/openai/completion_chunk_converters.py +0 -0
  22. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/openai/completion_converters.py +0 -0
  23. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/openai/content_converters.py +0 -0
  24. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/openai/converters.py +0 -0
  25. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/openai/message_converters.py +0 -0
  26. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/openai/openai_llm.py +0 -0
  27. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/openai/tool_converters.py +0 -0
  28. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/packet.py +0 -0
  29. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/packet_pool.py +0 -0
  30. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/printer.py +0 -0
  31. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/processor.py +0 -0
  32. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/prompt_builder.py +0 -0
  33. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/rate_limiting/__init__.py +0 -0
  34. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/rate_limiting/rate_limiter_chunked.py +0 -0
  35. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/rate_limiting/types.py +0 -0
  36. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/rate_limiting/utils.py +0 -0
  37. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/run_context.py +0 -0
  38. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/typing/__init__.py +0 -0
  39. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/typing/completion.py +0 -0
  40. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/typing/completion_chunk.py +0 -0
  41. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/typing/content.py +0 -0
  42. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/typing/converters.py +0 -0
  43. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/typing/events.py +0 -0
  44. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/typing/io.py +0 -0
  45. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/typing/message.py +0 -0
  46. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/typing/tool.py +0 -0
  47. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/utils.py +0 -0
  48. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/workflow/__init__.py +0 -0
  49. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/workflow/looped_workflow.py +0 -0
  50. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/workflow/parallel_processor.py +0 -0
  51. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/workflow/sequential_workflow.py +0 -0
  52. {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/workflow/workflow_processor.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: grasp_agents
3
- Version: 0.3.8
3
+ Version: 0.3.9
4
4
  Summary: Grasp Agents Library
5
5
  License-File: LICENSE.md
6
6
  Requires-Python: <4,>=3.11.4
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "grasp_agents"
3
- version = "0.3.8"
3
+ version = "0.3.9"
4
4
  description = "Grasp Agents Library"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.11.4,<4"
@@ -1,7 +1,7 @@
1
1
  import asyncio
2
2
  import json
3
3
  from collections.abc import AsyncIterator, Coroutine, Sequence
4
- from itertools import starmap
4
+ from itertools import chain, starmap
5
5
  from logging import getLogger
6
6
  from typing import Any, ClassVar, Generic, Protocol, TypeVar
7
7
 
@@ -141,19 +141,14 @@ class LLMPolicyExecutor(AutoInstanceAttributesMixin, Generic[_FinalAnswerT, CtxT
141
141
  completion_batch = await self.llm.generate_completion_batch(
142
142
  memory.message_history, tool_choice=tool_choice
143
143
  )
144
- if (
145
- len(completion_batch[0].messages) > 1
146
- and memory.message_history.batch_size > 1
147
- ):
148
- raise ValueError(
149
- "Batch size must be 1 when generating completions with n>1."
150
- )
151
- message_batch = [c.messages[0] for c in completion_batch]
144
+ message_batch = list(
145
+ chain.from_iterable([c.messages for c in completion_batch])
146
+ )
152
147
  memory.update(message_batch=message_batch)
153
148
 
154
149
  if ctx is not None:
155
150
  ctx.completions[self.agent_name].extend(completion_batch)
156
- self._track_usage(completion_batch, ctx=ctx)
151
+ self._track_usage(self.agent_name, completion_batch, ctx=ctx)
157
152
  self._print_completions(completion_batch, ctx=ctx)
158
153
 
159
154
  return message_batch
@@ -179,16 +174,14 @@ class LLMPolicyExecutor(AutoInstanceAttributesMixin, Generic[_FinalAnswerT, CtxT
179
174
 
180
175
  if completion is None:
181
176
  raise RuntimeError("No completion generated during stream.")
182
- if len(completion.messages) > 1:
183
- raise ValueError("Streaming completion must have n=1")
184
177
 
185
- message = completion.messages[0]
186
- memory.update(message_batch=[message])
178
+ memory.update(message_batch=completion.messages)
187
179
 
188
- yield GenMessageEvent(name=self.agent_name, data=message)
180
+ for message in completion.messages:
181
+ yield GenMessageEvent(name=self.agent_name, data=message)
189
182
 
190
183
  if ctx is not None:
191
- self._track_usage([completion], ctx=ctx)
184
+ self._track_usage(self.agent_name, [completion], ctx=ctx)
192
185
  ctx.completions[self.agent_name].append(completion)
193
186
 
194
187
  async def call_tools(
@@ -207,7 +200,7 @@ class LLMPolicyExecutor(AutoInstanceAttributesMixin, Generic[_FinalAnswerT, CtxT
207
200
  tool_messages = list(
208
201
  starmap(ToolMessage.from_tool_output, zip(outs, calls, strict=False))
209
202
  )
210
- memory.update(message_list=tool_messages)
203
+ memory.update(tool_messages)
211
204
 
212
205
  if ctx is not None:
213
206
  ctx.printer.print_llm_messages(tool_messages, agent_name=self.agent_name)
@@ -234,7 +227,7 @@ class LLMPolicyExecutor(AutoInstanceAttributesMixin, Generic[_FinalAnswerT, CtxT
234
227
  name=self.agent_name, content=tool_call.tool_arguments
235
228
  )
236
229
  gen_message.tool_calls = None
237
- memory.update(message_list=[final_answer_message])
230
+ memory.update([final_answer_message])
238
231
  return final_answer_message
239
232
 
240
233
  return final_answer_message
@@ -247,7 +240,7 @@ class LLMPolicyExecutor(AutoInstanceAttributesMixin, Generic[_FinalAnswerT, CtxT
247
240
  user_message = UserMessage.from_text(
248
241
  "Exceeded the maximum number of turns: provide a final answer now!"
249
242
  )
250
- memory.update(message_list=[user_message])
243
+ memory.update([user_message])
251
244
  if ctx is not None:
252
245
  ctx.printer.print_llm_messages([user_message], agent_name=self.agent_name)
253
246
 
@@ -263,6 +256,7 @@ class LLMPolicyExecutor(AutoInstanceAttributesMixin, Generic[_FinalAnswerT, CtxT
263
256
  raise RuntimeError(
264
257
  "Final answer tool call did not return a final answer message."
265
258
  )
259
+
266
260
  return final_answer_message
267
261
 
268
262
  async def _generate_final_answer_stream(
@@ -273,7 +267,7 @@ class LLMPolicyExecutor(AutoInstanceAttributesMixin, Generic[_FinalAnswerT, CtxT
273
267
  user_message = UserMessage.from_text(
274
268
  "Exceeded the maximum number of turns: provide a final answer now!",
275
269
  )
276
- memory.update(message_list=[user_message])
270
+ memory.update([user_message])
277
271
  yield UserMessageEvent(name=self.agent_name, data=user_message)
278
272
 
279
273
  tool_choice = NamedToolChoice(name=self._final_answer_tool_name)
@@ -447,10 +441,15 @@ class LLMPolicyExecutor(AutoInstanceAttributesMixin, Generic[_FinalAnswerT, CtxT
447
441
  turns += 1
448
442
 
449
443
  def _track_usage(
450
- self, completion_batch: Sequence[Completion], ctx: RunContext[CtxT]
444
+ self,
445
+ agent_name: str,
446
+ completion_batch: Sequence[Completion],
447
+ ctx: RunContext[CtxT],
451
448
  ) -> None:
452
449
  ctx.usage_tracker.update(
453
- completions=completion_batch, model_name=self.llm.model_name
450
+ agent_name=agent_name,
451
+ completions=completion_batch,
452
+ model_name=self.llm.model_name,
454
453
  )
455
454
 
456
455
  def get_final_answer_tool(self) -> BaseTool[BaseModel, None, Any]:
@@ -1,4 +1,5 @@
1
1
  import logging
2
+ from collections import defaultdict
2
3
  from collections.abc import Sequence
3
4
  from pathlib import Path
4
5
  from typing import Any, TypeAlias
@@ -23,7 +24,7 @@ class UsageTracker(BaseModel):
23
24
  source_id: str
24
25
  costs_dict_path: str | Path = COSTS_DICT_PATH
25
26
  costs_dict: CostsDict | None = None
26
- total_usage: Usage = Field(default_factory=Usage)
27
+ usages: dict[str, Usage] = Field(default_factory=dict)
27
28
 
28
29
  def __init__(self, **kwargs: Any) -> None:
29
30
  super().__init__(**kwargs)
@@ -58,7 +59,10 @@ class UsageTracker(BaseModel):
58
59
  usage.cost = (input_cost + output_cost + reasoning_cost + cached_cost) / 1e6
59
60
 
60
61
  def update(
61
- self, completions: Sequence[Completion], model_name: str | None = None
62
+ self,
63
+ agent_name: str,
64
+ completions: Sequence[Completion],
65
+ model_name: str | None = None,
62
66
  ) -> None:
63
67
  if model_name is not None and self.costs_dict is not None:
64
68
  model_costs_dict = self.costs_dict.get(model_name.split(":", 1)[-1])
@@ -71,10 +75,16 @@ class UsageTracker(BaseModel):
71
75
  self._add_cost_to_usage(
72
76
  usage=completion.usage, model_costs_dict=model_costs_dict
73
77
  )
74
- self.total_usage += completion.usage
78
+ if agent_name not in self.usages:
79
+ self.usages[agent_name] = Usage()
80
+ self.usages[agent_name] += completion.usage
81
+
82
+ @property
83
+ def total_usage(self) -> Usage:
84
+ return sum((usage for usage in self.usages.values()), Usage())
75
85
 
76
86
  def reset(self) -> None:
77
- self.total_usage = Usage()
87
+ self.usages = defaultdict(Usage)
78
88
 
79
89
  def print_usage(self) -> None:
80
90
  usage = self.total_usage
File without changes
File without changes
File without changes
@@ -30,9 +30,9 @@ class LLMAgentMemory(Memory):
30
30
 
31
31
  def update(
32
32
  self,
33
+ message_list: Sequence[Message] | None = None,
33
34
  *,
34
35
  message_batch: Sequence[Message] | None = None,
35
- message_list: Sequence[Message] | None = None,
36
36
  ctx: RunContext[Any] | None = None,
37
37
  ):
38
38
  if message_batch is not None and message_list is not None: