grasp_agents 0.3.8__tar.gz → 0.3.9__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/PKG-INFO +1 -1
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/pyproject.toml +1 -1
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/llm_policy_executor.py +21 -22
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/usage_tracker.py +14 -4
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/.gitignore +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/LICENSE.md +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/README.md +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/__init__.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/cloud_llm.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/comm_processor.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/costs_dict.yaml +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/generics_utils.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/grasp_logging.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/http_client.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/llm.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/llm_agent.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/llm_agent_memory.py +1 -1
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/memory.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/message_history.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/openai/__init__.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/openai/completion_chunk_converters.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/openai/completion_converters.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/openai/content_converters.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/openai/converters.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/openai/message_converters.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/openai/openai_llm.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/openai/tool_converters.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/packet.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/packet_pool.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/printer.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/processor.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/prompt_builder.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/rate_limiting/__init__.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/rate_limiting/rate_limiter_chunked.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/rate_limiting/types.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/rate_limiting/utils.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/run_context.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/typing/__init__.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/typing/completion.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/typing/completion_chunk.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/typing/content.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/typing/converters.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/typing/events.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/typing/io.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/typing/message.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/typing/tool.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/utils.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/workflow/__init__.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/workflow/looped_workflow.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/workflow/parallel_processor.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/workflow/sequential_workflow.py +0 -0
- {grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/workflow/workflow_processor.py +0 -0
@@ -1,7 +1,7 @@
|
|
1
1
|
import asyncio
|
2
2
|
import json
|
3
3
|
from collections.abc import AsyncIterator, Coroutine, Sequence
|
4
|
-
from itertools import starmap
|
4
|
+
from itertools import chain, starmap
|
5
5
|
from logging import getLogger
|
6
6
|
from typing import Any, ClassVar, Generic, Protocol, TypeVar
|
7
7
|
|
@@ -141,19 +141,14 @@ class LLMPolicyExecutor(AutoInstanceAttributesMixin, Generic[_FinalAnswerT, CtxT
|
|
141
141
|
completion_batch = await self.llm.generate_completion_batch(
|
142
142
|
memory.message_history, tool_choice=tool_choice
|
143
143
|
)
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
):
|
148
|
-
raise ValueError(
|
149
|
-
"Batch size must be 1 when generating completions with n>1."
|
150
|
-
)
|
151
|
-
message_batch = [c.messages[0] for c in completion_batch]
|
144
|
+
message_batch = list(
|
145
|
+
chain.from_iterable([c.messages for c in completion_batch])
|
146
|
+
)
|
152
147
|
memory.update(message_batch=message_batch)
|
153
148
|
|
154
149
|
if ctx is not None:
|
155
150
|
ctx.completions[self.agent_name].extend(completion_batch)
|
156
|
-
self._track_usage(completion_batch, ctx=ctx)
|
151
|
+
self._track_usage(self.agent_name, completion_batch, ctx=ctx)
|
157
152
|
self._print_completions(completion_batch, ctx=ctx)
|
158
153
|
|
159
154
|
return message_batch
|
@@ -179,16 +174,14 @@ class LLMPolicyExecutor(AutoInstanceAttributesMixin, Generic[_FinalAnswerT, CtxT
|
|
179
174
|
|
180
175
|
if completion is None:
|
181
176
|
raise RuntimeError("No completion generated during stream.")
|
182
|
-
if len(completion.messages) > 1:
|
183
|
-
raise ValueError("Streaming completion must have n=1")
|
184
177
|
|
185
|
-
|
186
|
-
memory.update(message_batch=[message])
|
178
|
+
memory.update(message_batch=completion.messages)
|
187
179
|
|
188
|
-
|
180
|
+
for message in completion.messages:
|
181
|
+
yield GenMessageEvent(name=self.agent_name, data=message)
|
189
182
|
|
190
183
|
if ctx is not None:
|
191
|
-
self._track_usage([completion], ctx=ctx)
|
184
|
+
self._track_usage(self.agent_name, [completion], ctx=ctx)
|
192
185
|
ctx.completions[self.agent_name].append(completion)
|
193
186
|
|
194
187
|
async def call_tools(
|
@@ -207,7 +200,7 @@ class LLMPolicyExecutor(AutoInstanceAttributesMixin, Generic[_FinalAnswerT, CtxT
|
|
207
200
|
tool_messages = list(
|
208
201
|
starmap(ToolMessage.from_tool_output, zip(outs, calls, strict=False))
|
209
202
|
)
|
210
|
-
memory.update(
|
203
|
+
memory.update(tool_messages)
|
211
204
|
|
212
205
|
if ctx is not None:
|
213
206
|
ctx.printer.print_llm_messages(tool_messages, agent_name=self.agent_name)
|
@@ -234,7 +227,7 @@ class LLMPolicyExecutor(AutoInstanceAttributesMixin, Generic[_FinalAnswerT, CtxT
|
|
234
227
|
name=self.agent_name, content=tool_call.tool_arguments
|
235
228
|
)
|
236
229
|
gen_message.tool_calls = None
|
237
|
-
memory.update(
|
230
|
+
memory.update([final_answer_message])
|
238
231
|
return final_answer_message
|
239
232
|
|
240
233
|
return final_answer_message
|
@@ -247,7 +240,7 @@ class LLMPolicyExecutor(AutoInstanceAttributesMixin, Generic[_FinalAnswerT, CtxT
|
|
247
240
|
user_message = UserMessage.from_text(
|
248
241
|
"Exceeded the maximum number of turns: provide a final answer now!"
|
249
242
|
)
|
250
|
-
memory.update(
|
243
|
+
memory.update([user_message])
|
251
244
|
if ctx is not None:
|
252
245
|
ctx.printer.print_llm_messages([user_message], agent_name=self.agent_name)
|
253
246
|
|
@@ -263,6 +256,7 @@ class LLMPolicyExecutor(AutoInstanceAttributesMixin, Generic[_FinalAnswerT, CtxT
|
|
263
256
|
raise RuntimeError(
|
264
257
|
"Final answer tool call did not return a final answer message."
|
265
258
|
)
|
259
|
+
|
266
260
|
return final_answer_message
|
267
261
|
|
268
262
|
async def _generate_final_answer_stream(
|
@@ -273,7 +267,7 @@ class LLMPolicyExecutor(AutoInstanceAttributesMixin, Generic[_FinalAnswerT, CtxT
|
|
273
267
|
user_message = UserMessage.from_text(
|
274
268
|
"Exceeded the maximum number of turns: provide a final answer now!",
|
275
269
|
)
|
276
|
-
memory.update(
|
270
|
+
memory.update([user_message])
|
277
271
|
yield UserMessageEvent(name=self.agent_name, data=user_message)
|
278
272
|
|
279
273
|
tool_choice = NamedToolChoice(name=self._final_answer_tool_name)
|
@@ -447,10 +441,15 @@ class LLMPolicyExecutor(AutoInstanceAttributesMixin, Generic[_FinalAnswerT, CtxT
|
|
447
441
|
turns += 1
|
448
442
|
|
449
443
|
def _track_usage(
|
450
|
-
self,
|
444
|
+
self,
|
445
|
+
agent_name: str,
|
446
|
+
completion_batch: Sequence[Completion],
|
447
|
+
ctx: RunContext[CtxT],
|
451
448
|
) -> None:
|
452
449
|
ctx.usage_tracker.update(
|
453
|
-
|
450
|
+
agent_name=agent_name,
|
451
|
+
completions=completion_batch,
|
452
|
+
model_name=self.llm.model_name,
|
454
453
|
)
|
455
454
|
|
456
455
|
def get_final_answer_tool(self) -> BaseTool[BaseModel, None, Any]:
|
@@ -1,4 +1,5 @@
|
|
1
1
|
import logging
|
2
|
+
from collections import defaultdict
|
2
3
|
from collections.abc import Sequence
|
3
4
|
from pathlib import Path
|
4
5
|
from typing import Any, TypeAlias
|
@@ -23,7 +24,7 @@ class UsageTracker(BaseModel):
|
|
23
24
|
source_id: str
|
24
25
|
costs_dict_path: str | Path = COSTS_DICT_PATH
|
25
26
|
costs_dict: CostsDict | None = None
|
26
|
-
|
27
|
+
usages: dict[str, Usage] = Field(default_factory=dict)
|
27
28
|
|
28
29
|
def __init__(self, **kwargs: Any) -> None:
|
29
30
|
super().__init__(**kwargs)
|
@@ -58,7 +59,10 @@ class UsageTracker(BaseModel):
|
|
58
59
|
usage.cost = (input_cost + output_cost + reasoning_cost + cached_cost) / 1e6
|
59
60
|
|
60
61
|
def update(
|
61
|
-
self,
|
62
|
+
self,
|
63
|
+
agent_name: str,
|
64
|
+
completions: Sequence[Completion],
|
65
|
+
model_name: str | None = None,
|
62
66
|
) -> None:
|
63
67
|
if model_name is not None and self.costs_dict is not None:
|
64
68
|
model_costs_dict = self.costs_dict.get(model_name.split(":", 1)[-1])
|
@@ -71,10 +75,16 @@ class UsageTracker(BaseModel):
|
|
71
75
|
self._add_cost_to_usage(
|
72
76
|
usage=completion.usage, model_costs_dict=model_costs_dict
|
73
77
|
)
|
74
|
-
|
78
|
+
if agent_name not in self.usages:
|
79
|
+
self.usages[agent_name] = Usage()
|
80
|
+
self.usages[agent_name] += completion.usage
|
81
|
+
|
82
|
+
@property
|
83
|
+
def total_usage(self) -> Usage:
|
84
|
+
return sum((usage for usage in self.usages.values()), Usage())
|
75
85
|
|
76
86
|
def reset(self) -> None:
|
77
|
-
self.
|
87
|
+
self.usages = defaultdict(Usage)
|
78
88
|
|
79
89
|
def print_usage(self) -> None:
|
80
90
|
usage = self.total_usage
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
@@ -30,9 +30,9 @@ class LLMAgentMemory(Memory):
|
|
30
30
|
|
31
31
|
def update(
|
32
32
|
self,
|
33
|
+
message_list: Sequence[Message] | None = None,
|
33
34
|
*,
|
34
35
|
message_batch: Sequence[Message] | None = None,
|
35
|
-
message_list: Sequence[Message] | None = None,
|
36
36
|
ctx: RunContext[Any] | None = None,
|
37
37
|
):
|
38
38
|
if message_batch is not None and message_list is not None:
|
File without changes
|
File without changes
|
File without changes
|
{grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/openai/completion_chunk_converters.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{grasp_agents-0.3.8 → grasp_agents-0.3.9}/src/grasp_agents/rate_limiting/rate_limiter_chunked.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|