grasp_agents 0.2.3__tar.gz → 0.2.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/PKG-INFO +3 -4
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/README.md +2 -3
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/pyproject.toml +1 -1
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/base_agent.py +1 -1
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/cloud_llm.py +10 -2
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/comm_agent.py +1 -1
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/llm_agent.py +43 -17
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/openai/completion_converters.py +7 -2
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/openai/message_converters.py +13 -8
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/openai/openai_llm.py +4 -6
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/openai/tool_converters.py +9 -8
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/printer.py +1 -1
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/prompt_builder.py +13 -12
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/run_context.py +1 -1
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/tool_orchestrator.py +2 -2
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/typing/message.py +1 -1
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/usage_tracker.py +2 -1
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/workflow/looped_agent.py +8 -4
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/workflow/sequential_agent.py +6 -3
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/workflow/workflow_agent.py +2 -1
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/.gitignore +0 -0
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/LICENSE.md +0 -0
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/__init__.py +0 -0
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/agent_message.py +0 -0
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/agent_message_pool.py +0 -0
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/costs_dict.yaml +0 -0
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/generics_utils.py +0 -0
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/grasp_logging.py +0 -0
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/http_client.py +0 -0
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/llm.py +0 -0
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/llm_agent_state.py +0 -0
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/memory.py +0 -0
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/openai/__init__.py +0 -0
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/openai/content_converters.py +0 -0
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/openai/converters.py +0 -0
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/rate_limiting/__init__.py +0 -0
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/rate_limiting/rate_limiter_chunked.py +0 -0
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/rate_limiting/types.py +0 -0
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/rate_limiting/utils.py +0 -0
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/typing/__init__.py +0 -0
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/typing/completion.py +0 -0
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/typing/content.py +0 -0
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/typing/converters.py +0 -0
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/typing/io.py +0 -0
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/typing/tool.py +0 -0
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/utils.py +0 -0
- {grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/workflow/__init__.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: grasp_agents
|
3
|
-
Version: 0.2.
|
3
|
+
Version: 0.2.4
|
4
4
|
Summary: Grasp Agents Library
|
5
5
|
License-File: LICENSE.md
|
6
6
|
Requires-Python: <4,>=3.11.4
|
@@ -168,9 +168,8 @@ Problem = str
|
|
168
168
|
teacher = LLMAgent[Any, Problem, None](
|
169
169
|
agent_id="teacher",
|
170
170
|
llm=OpenAILLM(
|
171
|
-
model_name="gpt-4.1",
|
172
|
-
|
173
|
-
llm_settings=OpenAILLMSettings(temperature=0.1),
|
171
|
+
model_name="openai:gpt-4.1",
|
172
|
+
llm_settings=OpenAILLMSettings(temperature=0.1)
|
174
173
|
),
|
175
174
|
tools=[AskStudentTool()],
|
176
175
|
max_turns=20,
|
@@ -152,9 +152,8 @@ Problem = str
|
|
152
152
|
teacher = LLMAgent[Any, Problem, None](
|
153
153
|
agent_id="teacher",
|
154
154
|
llm=OpenAILLM(
|
155
|
-
model_name="gpt-4.1",
|
156
|
-
|
157
|
-
llm_settings=OpenAILLMSettings(temperature=0.1),
|
155
|
+
model_name="openai:gpt-4.1",
|
156
|
+
llm_settings=OpenAILLMSettings(temperature=0.1)
|
158
157
|
),
|
159
158
|
tools=[AskStudentTool()],
|
160
159
|
max_turns=20,
|
@@ -106,7 +106,6 @@ class CloudLLM(LLM[SettingsT, ConvertT], Generic[SettingsT, ConvertT]):
|
|
106
106
|
tools: list[BaseTool[BaseModel, Any, Any]] | None = None,
|
107
107
|
response_format: type | None = None,
|
108
108
|
# Connection settings
|
109
|
-
api_provider: APIProvider = "openai",
|
110
109
|
async_http_client_params: (
|
111
110
|
dict[str, Any] | AsyncHTTPClientParams | None
|
112
111
|
) = None,
|
@@ -134,7 +133,16 @@ class CloudLLM(LLM[SettingsT, ConvertT], Generic[SettingsT, ConvertT]):
|
|
134
133
|
)
|
135
134
|
|
136
135
|
self._model_name = model_name
|
136
|
+
|
137
|
+
api_provider = model_name.split(":", 1)[0]
|
138
|
+
api_model_name = model_name.split(":", 1)[-1]
|
139
|
+
if api_provider not in PROVIDERS:
|
140
|
+
raise ValueError(
|
141
|
+
f"API provider '{api_provider}' is not supported. "
|
142
|
+
f"Supported providers are: {', '.join(PROVIDERS.keys())}"
|
143
|
+
)
|
137
144
|
self._api_provider: APIProvider = api_provider
|
145
|
+
self._api_model_name: str = api_model_name
|
138
146
|
|
139
147
|
self._struct_output_support: bool = any(
|
140
148
|
fnmatch.fnmatch(self._model_name, pat)
|
@@ -284,7 +292,7 @@ class CloudLLM(LLM[SettingsT, ConvertT], Generic[SettingsT, ConvertT]):
|
|
284
292
|
and not message.tool_calls
|
285
293
|
):
|
286
294
|
validate_obj_from_json_or_py_string(
|
287
|
-
message.content,
|
295
|
+
message.content or "",
|
288
296
|
adapter=self._response_format_pyd,
|
289
297
|
from_substring=True,
|
290
298
|
)
|
@@ -99,7 +99,7 @@ class CommunicatingAgent(
|
|
99
99
|
@abstractmethod
|
100
100
|
async def run(
|
101
101
|
self,
|
102
|
-
|
102
|
+
chat_inputs: Any | None = None,
|
103
103
|
*,
|
104
104
|
ctx: RunContextWrapper[CtxT] | None = None,
|
105
105
|
rcv_message: AgentMessage[InT, AgentState] | None = None,
|
@@ -186,19 +186,42 @@ class LLMAgent(
|
|
186
186
|
)
|
187
187
|
|
188
188
|
return validate_obj_from_json_or_py_string(
|
189
|
-
str(conversation[-1].content),
|
189
|
+
str(conversation[-1].content or ""),
|
190
190
|
adapter=self._out_type_adapter,
|
191
191
|
from_substring=True,
|
192
192
|
)
|
193
193
|
|
194
|
+
@staticmethod
|
195
|
+
def _validate_run_inputs(
|
196
|
+
chat_inputs: LLMPrompt | Sequence[str | ImageData] | None = None,
|
197
|
+
rcv_args: InT | Sequence[InT] | None = None,
|
198
|
+
rcv_message: AgentMessage[InT, AgentState] | None = None,
|
199
|
+
entry_point: bool = False,
|
200
|
+
) -> None:
|
201
|
+
multiple_inputs_err_message = (
|
202
|
+
"Only one of chat_inputs, rcv_args, or rcv_message must be provided."
|
203
|
+
)
|
204
|
+
if chat_inputs is not None and rcv_args is not None:
|
205
|
+
raise ValueError(multiple_inputs_err_message)
|
206
|
+
if chat_inputs is not None and rcv_message is not None:
|
207
|
+
raise ValueError(multiple_inputs_err_message)
|
208
|
+
if rcv_args is not None and rcv_message is not None:
|
209
|
+
raise ValueError(multiple_inputs_err_message)
|
210
|
+
|
211
|
+
if entry_point and rcv_message is not None:
|
212
|
+
raise ValueError(
|
213
|
+
"Entry point agent cannot receive messages from other agents."
|
214
|
+
)
|
215
|
+
|
194
216
|
@final
|
195
217
|
async def run(
|
196
218
|
self,
|
197
|
-
|
219
|
+
chat_inputs: LLMPrompt | Sequence[str | ImageData] | None = None,
|
198
220
|
*,
|
199
|
-
ctx: RunContextWrapper[CtxT] | None = None,
|
200
221
|
rcv_message: AgentMessage[InT, AgentState] | None = None,
|
222
|
+
rcv_args: InT | Sequence[InT] | None = None,
|
201
223
|
entry_point: bool = False,
|
224
|
+
ctx: RunContextWrapper[CtxT] | None = None,
|
202
225
|
forbid_state_change: bool = False,
|
203
226
|
**gen_kwargs: Any, # noqa: ARG002
|
204
227
|
) -> AgentMessage[OutT, LLMAgentState]:
|
@@ -211,16 +234,12 @@ class LLMAgent(
|
|
211
234
|
sys_args = run_args.sys
|
212
235
|
usr_args = run_args.usr
|
213
236
|
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
"There must be no received message with user inputs"
|
221
|
-
)
|
222
|
-
|
223
|
-
cur_state = self.state.model_copy(deep=True)
|
237
|
+
self._validate_run_inputs(
|
238
|
+
chat_inputs=chat_inputs,
|
239
|
+
rcv_args=rcv_args,
|
240
|
+
rcv_message=rcv_message,
|
241
|
+
entry_point=entry_point,
|
242
|
+
)
|
224
243
|
|
225
244
|
# 1. Make system prompt (can be None)
|
226
245
|
formatted_sys_prompt = self._prompt_builder.make_sys_prompt(
|
@@ -229,6 +248,7 @@ class LLMAgent(
|
|
229
248
|
|
230
249
|
# 2. Set agent state
|
231
250
|
|
251
|
+
cur_state = self.state.model_copy(deep=True)
|
232
252
|
rcv_state = rcv_message.sender_state if rcv_message else None
|
233
253
|
prev_mh_len = len(cur_state.message_history)
|
234
254
|
|
@@ -244,10 +264,16 @@ class LLMAgent(
|
|
244
264
|
self._print_sys_msg(state=state, prev_mh_len=prev_mh_len, ctx=ctx)
|
245
265
|
|
246
266
|
# 3. Make and add user messages (can be empty)
|
267
|
+
_rcv_args_batch: Sequence[InT] | None = None
|
268
|
+
if rcv_message is not None:
|
269
|
+
_rcv_args_batch = rcv_message.payloads
|
270
|
+
elif rcv_args is not None:
|
271
|
+
_rcv_args_batch = rcv_args if isinstance(rcv_args, Sequence) else [rcv_args] # type: ignore[assignment]
|
272
|
+
|
247
273
|
user_message_batch = self._prompt_builder.make_user_messages(
|
248
|
-
|
274
|
+
chat_inputs=chat_inputs,
|
249
275
|
usr_args=usr_args,
|
250
|
-
rcv_args_batch=
|
276
|
+
rcv_args_batch=_rcv_args_batch,
|
251
277
|
entry_point=entry_point,
|
252
278
|
ctx=ctx,
|
253
279
|
)
|
@@ -257,7 +283,7 @@ class LLMAgent(
|
|
257
283
|
|
258
284
|
if not self.tools:
|
259
285
|
# 4. Generate messages without tools
|
260
|
-
await self._tool_orchestrator.generate_once(
|
286
|
+
await self._tool_orchestrator.generate_once(state=state, ctx=ctx)
|
261
287
|
else:
|
262
288
|
# 4. Run tool call loop (new messages are added to the message
|
263
289
|
# history inside the loop)
|
@@ -285,7 +311,7 @@ class LLMAgent(
|
|
285
311
|
interaction_record = InteractionRecord(
|
286
312
|
source_id=self.agent_id,
|
287
313
|
recipient_ids=recipient_ids,
|
288
|
-
|
314
|
+
chat_inputs=chat_inputs,
|
289
315
|
sys_prompt=self.sys_prompt,
|
290
316
|
inp_prompt=self.inp_prompt,
|
291
317
|
sys_args=sys_args,
|
@@ -14,17 +14,22 @@ def from_api_completion(
|
|
14
14
|
) -> Completion:
|
15
15
|
choices: list[CompletionChoice] = []
|
16
16
|
if api_completion.choices is None: # type: ignore
|
17
|
-
#
|
17
|
+
# Some providers return None for the choices when there is an error
|
18
18
|
# TODO: add custom error types
|
19
19
|
raise RuntimeError(
|
20
20
|
f"Completion API error: {getattr(api_completion, 'error', None)}"
|
21
21
|
)
|
22
22
|
for api_choice in api_completion.choices:
|
23
23
|
# TODO: currently no way to assign individual message usages when len(choices) > 1
|
24
|
+
finish_reason = api_choice.finish_reason
|
25
|
+
# Some providers return None for the message when finish_reason is other than "stop"
|
26
|
+
if api_choice.message is None: # type: ignore
|
27
|
+
raise RuntimeError(
|
28
|
+
f"API returned None for message with finish_reason: {finish_reason}"
|
29
|
+
)
|
24
30
|
message = from_api_assistant_message(
|
25
31
|
api_choice.message, api_completion.usage, model_id=model_id
|
26
32
|
)
|
27
|
-
finish_reason = api_choice.finish_reason
|
28
33
|
choices.append(CompletionChoice(message=message, finish_reason=finish_reason))
|
29
34
|
|
30
35
|
return Completion(choices=choices, model_id=model_id)
|
@@ -51,11 +51,6 @@ def from_api_assistant_message(
|
|
51
51
|
api_usage: ChatCompletionUsage | None = None,
|
52
52
|
model_id: str | None = None,
|
53
53
|
) -> AssistantMessage:
|
54
|
-
content = api_message.content or ""
|
55
|
-
assert isinstance(content, str), (
|
56
|
-
"Only string content is currently supported in assistant messages"
|
57
|
-
)
|
58
|
-
|
59
54
|
usage = None
|
60
55
|
if api_usage is not None:
|
61
56
|
reasoning_tokens = None
|
@@ -88,7 +83,7 @@ def from_api_assistant_message(
|
|
88
83
|
]
|
89
84
|
|
90
85
|
return AssistantMessage(
|
91
|
-
content=content,
|
86
|
+
content=api_message.content,
|
92
87
|
usage=usage,
|
93
88
|
tool_calls=tool_calls,
|
94
89
|
refusal=api_message.refusal,
|
@@ -113,12 +108,22 @@ def to_api_assistant_message(
|
|
113
108
|
for tool_call in message.tool_calls
|
114
109
|
]
|
115
110
|
|
116
|
-
|
111
|
+
api_message = ChatCompletionAssistantMessageParam(
|
117
112
|
role="assistant",
|
118
113
|
content=message.content,
|
119
|
-
tool_calls=api_tool_calls
|
114
|
+
tool_calls=api_tool_calls or [],
|
120
115
|
refusal=message.refusal,
|
121
116
|
)
|
117
|
+
if message.content is None and not api_tool_calls:
|
118
|
+
# Some API providers return None in the generated content without errors,
|
119
|
+
# even though None in the input content is not accepted.
|
120
|
+
api_message["content"] = "<empty>"
|
121
|
+
if api_tool_calls is None:
|
122
|
+
api_message.pop("tool_calls")
|
123
|
+
if message.refusal is None:
|
124
|
+
api_message.pop("refusal")
|
125
|
+
|
126
|
+
return api_message
|
122
127
|
|
123
128
|
|
124
129
|
def from_api_system_message(
|
@@ -7,7 +7,7 @@ from openai import AsyncOpenAI
|
|
7
7
|
from openai._types import NOT_GIVEN # type: ignore[import]
|
8
8
|
from pydantic import BaseModel
|
9
9
|
|
10
|
-
from ..cloud_llm import
|
10
|
+
from ..cloud_llm import CloudLLM, CloudLLMSettings
|
11
11
|
from ..http_client import AsyncHTTPClientParams
|
12
12
|
from ..rate_limiting.rate_limiter_chunked import RateLimiterC
|
13
13
|
from ..typing.message import AssistantMessage, Conversation
|
@@ -69,7 +69,6 @@ class OpenAILLM(CloudLLM[OpenAILLMSettings, OpenAIConverters]):
|
|
69
69
|
tools: list[BaseTool[BaseModel, Any, Any]] | None = None,
|
70
70
|
response_format: type | None = None,
|
71
71
|
# Connection settings
|
72
|
-
api_provider: APIProvider = "openai",
|
73
72
|
async_http_client_params: (
|
74
73
|
dict[str, Any] | AsyncHTTPClientParams | None
|
75
74
|
) = None,
|
@@ -92,7 +91,6 @@ class OpenAILLM(CloudLLM[OpenAILLMSettings, OpenAIConverters]):
|
|
92
91
|
converters=OpenAIConverters(),
|
93
92
|
tools=tools,
|
94
93
|
response_format=response_format,
|
95
|
-
api_provider=api_provider,
|
96
94
|
async_http_client_params=async_http_client_params,
|
97
95
|
rate_limiter=rate_limiter,
|
98
96
|
rate_limiter_rpm=rate_limiter_rpm,
|
@@ -125,7 +123,7 @@ class OpenAILLM(CloudLLM[OpenAILLMSettings, OpenAIConverters]):
|
|
125
123
|
tool_choice = api_tool_choice or NOT_GIVEN
|
126
124
|
|
127
125
|
return await self._client.chat.completions.create(
|
128
|
-
model=self.
|
126
|
+
model=self._api_model_name,
|
129
127
|
messages=api_messages,
|
130
128
|
tools=tools,
|
131
129
|
tool_choice=tool_choice,
|
@@ -146,7 +144,7 @@ class OpenAILLM(CloudLLM[OpenAILLMSettings, OpenAIConverters]):
|
|
146
144
|
response_format = api_response_format or NOT_GIVEN
|
147
145
|
|
148
146
|
return await self._client.beta.chat.completions.parse(
|
149
|
-
model=self.
|
147
|
+
model=self._api_model_name,
|
150
148
|
messages=api_messages,
|
151
149
|
tools=tools,
|
152
150
|
tool_choice=tool_choice,
|
@@ -167,7 +165,7 @@ class OpenAILLM(CloudLLM[OpenAILLMSettings, OpenAIConverters]):
|
|
167
165
|
tool_choice = api_tool_choice or NOT_GIVEN
|
168
166
|
|
169
167
|
return await self._client.chat.completions.create(
|
170
|
-
model=self.
|
168
|
+
model=self._api_model_name,
|
171
169
|
messages=api_messages,
|
172
170
|
tools=tools,
|
173
171
|
tool_choice=tool_choice,
|
@@ -15,15 +15,16 @@ from . import (
|
|
15
15
|
def to_api_tool(
|
16
16
|
tool: BaseTool[BaseModel, Any, Any],
|
17
17
|
) -> ChatCompletionToolParam:
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
parameters=tool.in_schema.model_json_schema(),
|
24
|
-
strict=tool.strict,
|
25
|
-
),
|
18
|
+
function = ChatCompletionFunctionDefinition(
|
19
|
+
name=tool.name,
|
20
|
+
description=tool.description,
|
21
|
+
parameters=tool.in_schema.model_json_schema(),
|
22
|
+
strict=tool.strict,
|
26
23
|
)
|
24
|
+
if tool.strict is None:
|
25
|
+
function.pop("strict")
|
26
|
+
|
27
|
+
return ChatCompletionToolParam(type="function", function=function)
|
27
28
|
|
28
29
|
|
29
30
|
def to_api_tool_choice(
|
@@ -88,7 +88,7 @@ class Printer:
|
|
88
88
|
|
89
89
|
role = message.role
|
90
90
|
usage = message.usage if isinstance(message, AssistantMessage) else None
|
91
|
-
content_str = self.content_to_str(message.content, message.role)
|
91
|
+
content_str = self.content_to_str(message.content or "", message.role)
|
92
92
|
|
93
93
|
if self.color_by == "agent_id":
|
94
94
|
color = self.get_agent_color(agent_id)
|
@@ -118,13 +118,13 @@ class PromptBuilder(AutoInstanceAttributesMixin, Generic[InT, CtxT]):
|
|
118
118
|
return [UserMessage.from_text(text, model_id=self._agent_id)]
|
119
119
|
|
120
120
|
def _usr_messages_from_content_parts(
|
121
|
-
self, content_parts:
|
122
|
-
) ->
|
121
|
+
self, content_parts: Sequence[str | ImageData]
|
122
|
+
) -> Sequence[UserMessage]:
|
123
123
|
return [UserMessage.from_content_parts(content_parts, model_id=self._agent_id)]
|
124
124
|
|
125
125
|
def _usr_messages_from_rcv_args(
|
126
126
|
self, rcv_args_batch: Sequence[InT]
|
127
|
-
) ->
|
127
|
+
) -> Sequence[UserMessage]:
|
128
128
|
return [
|
129
129
|
UserMessage.from_text(
|
130
130
|
self._rcv_args_type_adapter.dump_json(
|
@@ -173,28 +173,28 @@ class PromptBuilder(AutoInstanceAttributesMixin, Generic[InT, CtxT]):
|
|
173
173
|
|
174
174
|
def make_user_messages(
|
175
175
|
self,
|
176
|
-
|
176
|
+
chat_inputs: LLMPrompt | Sequence[str | ImageData] | None = None,
|
177
177
|
usr_args: UserRunArgs | None = None,
|
178
178
|
rcv_args_batch: Sequence[InT] | None = None,
|
179
179
|
entry_point: bool = False,
|
180
180
|
ctx: RunContextWrapper[CtxT] | None = None,
|
181
181
|
) -> Sequence[UserMessage]:
|
182
182
|
# 1) Direct user input (e.g. chat input)
|
183
|
-
if
|
183
|
+
if chat_inputs is not None or entry_point:
|
184
184
|
"""
|
185
|
-
* If
|
185
|
+
* If chat inputs are provided, use them instead of the predefined
|
186
186
|
input prompt template
|
187
187
|
* In a multi-agent system, the predefined input prompt is used to
|
188
188
|
construct agent inputs using the combination of received
|
189
189
|
and user arguments.
|
190
190
|
However, the first agent run (entry point) has no received
|
191
|
-
messages, so we use the
|
191
|
+
messages, so we use the chat inputs directly, if provided.
|
192
192
|
"""
|
193
|
-
if isinstance(
|
194
|
-
return self._usr_messages_from_text(
|
195
|
-
|
196
|
-
|
197
|
-
|
193
|
+
if isinstance(chat_inputs, LLMPrompt):
|
194
|
+
return self._usr_messages_from_text(chat_inputs)
|
195
|
+
|
196
|
+
if isinstance(chat_inputs, Sequence) and chat_inputs:
|
197
|
+
return self._usr_messages_from_content_parts(chat_inputs)
|
198
198
|
|
199
199
|
# 2) No input prompt template + received args → raw JSON messages
|
200
200
|
if self.inp_prompt is None and rcv_args_batch:
|
@@ -208,6 +208,7 @@ class PromptBuilder(AutoInstanceAttributesMixin, Generic[InT, CtxT]):
|
|
208
208
|
rcv_args_batch=rcv_args_batch,
|
209
209
|
ctx=ctx,
|
210
210
|
)
|
211
|
+
|
211
212
|
return []
|
212
213
|
|
213
214
|
def _make_batched(
|
@@ -32,7 +32,7 @@ class InteractionRecord(BaseModel, Generic[InT, OutT, StateT]):
|
|
32
32
|
source_id: str
|
33
33
|
recipient_ids: Sequence[AgentID]
|
34
34
|
state: StateT
|
35
|
-
|
35
|
+
chat_inputs: LLMPrompt | Sequence[str | ImageData] | None = None
|
36
36
|
sys_prompt: LLMPrompt | None = None
|
37
37
|
inp_prompt: LLMPrompt | None = None
|
38
38
|
sys_args: SystemRunArgs | None = None
|
@@ -103,11 +103,11 @@ class ToolOrchestrator(Generic[CtxT]):
|
|
103
103
|
|
104
104
|
async def generate_once(
|
105
105
|
self,
|
106
|
-
|
106
|
+
state: LLMAgentState,
|
107
107
|
tool_choice: ToolChoice | None = None,
|
108
108
|
ctx: RunContextWrapper[CtxT] | None = None,
|
109
109
|
) -> Sequence[AssistantMessage]:
|
110
|
-
message_history =
|
110
|
+
message_history = state.message_history
|
111
111
|
message_batch = await self.llm.generate_message_batch(
|
112
112
|
message_history, tool_choice=tool_choice
|
113
113
|
)
|
@@ -61,7 +61,7 @@ class MessageBase(BaseModel):
|
|
61
61
|
|
62
62
|
class AssistantMessage(MessageBase):
|
63
63
|
role: Literal[Role.ASSISTANT] = Role.ASSISTANT
|
64
|
-
content: str
|
64
|
+
content: str | None
|
65
65
|
usage: Usage | None = None
|
66
66
|
tool_calls: Sequence[ToolCall] | None = None
|
67
67
|
refusal: str | None = None
|
@@ -19,6 +19,7 @@ CostsDict: TypeAlias = dict[str, ModelCostsDict]
|
|
19
19
|
|
20
20
|
|
21
21
|
class UsageTracker(BaseModel):
|
22
|
+
# TODO: specify different costs per provider:model, not just per model
|
22
23
|
source_id: str
|
23
24
|
costs_dict_path: str | Path = COSTS_DICT_PATH
|
24
25
|
costs_dict: CostsDict | None = None
|
@@ -60,7 +61,7 @@ class UsageTracker(BaseModel):
|
|
60
61
|
self, messages: Sequence[Message], model_name: str | None = None
|
61
62
|
) -> None:
|
62
63
|
if model_name is not None and self.costs_dict is not None:
|
63
|
-
model_costs_dict = self.costs_dict.get(model_name)
|
64
|
+
model_costs_dict = self.costs_dict.get(model_name.split(":", 1)[-1])
|
64
65
|
else:
|
65
66
|
model_costs_dict = None
|
66
67
|
|
@@ -67,7 +67,8 @@ class LoopedWorkflowAgent(WorkflowAgent[InT, OutT, CtxT], Generic[InT, OutT, Ctx
|
|
67
67
|
def _exit_workflow_loop(
|
68
68
|
self,
|
69
69
|
output_message: AgentMessage[OutT, Any],
|
70
|
-
|
70
|
+
*,
|
71
|
+
ctx: RunContextWrapper[CtxT] | None = None,
|
71
72
|
**kwargs: Any,
|
72
73
|
) -> bool:
|
73
74
|
if self._exit_workflow_loop_impl:
|
@@ -78,8 +79,9 @@ class LoopedWorkflowAgent(WorkflowAgent[InT, OutT, CtxT], Generic[InT, OutT, Ctx
|
|
78
79
|
@final
|
79
80
|
async def run(
|
80
81
|
self,
|
81
|
-
|
82
|
+
chat_inputs: Any | None = None,
|
82
83
|
*,
|
84
|
+
rcv_args: InT | Sequence[InT] | None = None,
|
83
85
|
rcv_message: AgentMessage[InT, Any] | None = None,
|
84
86
|
ctx: RunContextWrapper[CtxT] | None = None,
|
85
87
|
entry_point: bool = False,
|
@@ -93,7 +95,8 @@ class LoopedWorkflowAgent(WorkflowAgent[InT, OutT, CtxT], Generic[InT, OutT, Ctx
|
|
93
95
|
while True:
|
94
96
|
for subagent in self.subagents:
|
95
97
|
agent_message = await subagent.run(
|
96
|
-
|
98
|
+
chat_inputs=chat_inputs,
|
99
|
+
rcv_args=rcv_args,
|
97
100
|
rcv_message=agent_message,
|
98
101
|
entry_point=entry_point,
|
99
102
|
forbid_state_change=forbid_state_change,
|
@@ -112,5 +115,6 @@ class LoopedWorkflowAgent(WorkflowAgent[InT, OutT, CtxT], Generic[InT, OutT, Ctx
|
|
112
115
|
)
|
113
116
|
return exit_message
|
114
117
|
|
115
|
-
|
118
|
+
chat_inputs = None
|
119
|
+
rcv_args = None
|
116
120
|
entry_point = False
|
@@ -36,8 +36,9 @@ class SequentialWorkflowAgent(WorkflowAgent[InT, OutT, CtxT], Generic[InT, OutT,
|
|
36
36
|
@final
|
37
37
|
async def run(
|
38
38
|
self,
|
39
|
-
|
39
|
+
chat_inputs: Any | None = None,
|
40
40
|
*,
|
41
|
+
rcv_args: InT | Sequence[InT] | None = None,
|
41
42
|
rcv_message: AgentMessage[InT, Any] | None = None,
|
42
43
|
ctx: RunContextWrapper[CtxT] | None = None,
|
43
44
|
entry_point: bool = False,
|
@@ -47,14 +48,16 @@ class SequentialWorkflowAgent(WorkflowAgent[InT, OutT, CtxT], Generic[InT, OutT,
|
|
47
48
|
agent_message = rcv_message
|
48
49
|
for subagent in self.subagents:
|
49
50
|
agent_message = await subagent.run(
|
50
|
-
|
51
|
+
chat_inputs=chat_inputs,
|
52
|
+
rcv_args=rcv_args,
|
51
53
|
rcv_message=agent_message,
|
52
54
|
entry_point=entry_point,
|
53
55
|
forbid_state_change=forbid_state_change,
|
54
56
|
ctx=ctx,
|
55
57
|
**kwargs,
|
56
58
|
)
|
57
|
-
|
59
|
+
chat_inputs = None
|
60
|
+
rcv_args = None
|
58
61
|
entry_point = False
|
59
62
|
|
60
63
|
return cast("AgentMessage[OutT, Any]", agent_message)
|
@@ -61,8 +61,9 @@ class WorkflowAgent(
|
|
61
61
|
@abstractmethod
|
62
62
|
async def run(
|
63
63
|
self,
|
64
|
-
|
64
|
+
chat_inputs: Any | None = None,
|
65
65
|
*,
|
66
|
+
rcv_args: InT | Sequence[InT] | None = None,
|
66
67
|
rcv_message: AgentMessage[InT, Any] | None = None,
|
67
68
|
ctx: RunContextWrapper[CtxT] | None = None,
|
68
69
|
entry_point: bool = False,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{grasp_agents-0.2.3 → grasp_agents-0.2.4}/src/grasp_agents/rate_limiting/rate_limiter_chunked.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|