grasp_agents 0.2.4__tar.gz → 0.2.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/PKG-INFO +1 -1
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/pyproject.toml +1 -1
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/comm_agent.py +9 -9
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/llm_agent.py +43 -43
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/llm_agent_state.py +9 -9
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/prompt_builder.py +50 -45
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/run_context.py +2 -2
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/workflow/looped_agent.py +6 -6
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/workflow/sequential_agent.py +6 -6
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/workflow/workflow_agent.py +2 -2
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/.gitignore +0 -0
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/LICENSE.md +0 -0
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/README.md +0 -0
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/__init__.py +0 -0
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/agent_message.py +0 -0
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/agent_message_pool.py +0 -0
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/base_agent.py +0 -0
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/cloud_llm.py +0 -0
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/costs_dict.yaml +0 -0
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/generics_utils.py +0 -0
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/grasp_logging.py +0 -0
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/http_client.py +0 -0
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/llm.py +0 -0
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/memory.py +0 -0
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/openai/__init__.py +0 -0
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/openai/completion_converters.py +0 -0
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/openai/content_converters.py +0 -0
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/openai/converters.py +0 -0
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/openai/message_converters.py +0 -0
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/openai/openai_llm.py +0 -0
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/openai/tool_converters.py +0 -0
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/printer.py +0 -0
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/rate_limiting/__init__.py +0 -0
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/rate_limiting/rate_limiter_chunked.py +0 -0
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/rate_limiting/types.py +0 -0
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/rate_limiting/utils.py +0 -0
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/tool_orchestrator.py +0 -0
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/typing/__init__.py +0 -0
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/typing/completion.py +0 -0
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/typing/content.py +0 -0
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/typing/converters.py +0 -0
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/typing/io.py +0 -0
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/typing/message.py +0 -0
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/typing/tool.py +0 -0
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/usage_tracker.py +0 -0
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/utils.py +0 -0
- {grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/workflow/__init__.py +0 -0
@@ -51,7 +51,7 @@ class CommunicatingAgent(
|
|
51
51
|
self._in_type: type[InT]
|
52
52
|
super().__init__(agent_id=agent_id, **kwargs)
|
53
53
|
|
54
|
-
self.
|
54
|
+
self._in_args_type_adapter: TypeAdapter[InT] = TypeAdapter(self._in_type)
|
55
55
|
self.recipient_ids = recipient_ids or []
|
56
56
|
|
57
57
|
self._message_pool = message_pool or AgentMessagePool()
|
@@ -102,7 +102,7 @@ class CommunicatingAgent(
|
|
102
102
|
chat_inputs: Any | None = None,
|
103
103
|
*,
|
104
104
|
ctx: RunContextWrapper[CtxT] | None = None,
|
105
|
-
|
105
|
+
in_message: AgentMessage[InT, AgentState] | None = None,
|
106
106
|
entry_point: bool = False,
|
107
107
|
forbid_state_change: bool = False,
|
108
108
|
**kwargs: Any,
|
@@ -113,7 +113,7 @@ class CommunicatingAgent(
|
|
113
113
|
self, ctx: RunContextWrapper[CtxT] | None = None, **run_kwargs: Any
|
114
114
|
) -> None:
|
115
115
|
output_message = await self.run(
|
116
|
-
ctx=ctx,
|
116
|
+
ctx=ctx, in_message=None, entry_point=True, **run_kwargs
|
117
117
|
)
|
118
118
|
await self.post_message(output_message)
|
119
119
|
|
@@ -140,8 +140,8 @@ class CommunicatingAgent(
|
|
140
140
|
ctx: RunContextWrapper[CtxT] | None = None,
|
141
141
|
**run_kwargs: Any,
|
142
142
|
) -> None:
|
143
|
-
|
144
|
-
out_message = await self.run(ctx=ctx,
|
143
|
+
in_message = cast("AgentMessage[InT, AgentState]", message)
|
144
|
+
out_message = await self.run(ctx=ctx, in_message=in_message, **run_kwargs)
|
145
145
|
|
146
146
|
if self._exit_condition(output_message=out_message, ctx=ctx):
|
147
147
|
await self._message_pool.stop_all()
|
@@ -199,14 +199,14 @@ class CommunicatingAgent(
|
|
199
199
|
inp: InT,
|
200
200
|
ctx: RunContextWrapper[CtxT] | None = None,
|
201
201
|
) -> OutT:
|
202
|
-
|
203
|
-
|
204
|
-
payloads=[
|
202
|
+
in_args = in_type.model_validate(inp)
|
203
|
+
in_message = AgentMessage[in_type, AgentState](
|
204
|
+
payloads=[in_args],
|
205
205
|
sender_id="<tool_user>",
|
206
206
|
recipient_ids=[agent_instance.agent_id],
|
207
207
|
)
|
208
208
|
agent_result = await agent_instance.run(
|
209
|
-
|
209
|
+
in_message=in_message,
|
210
210
|
entry_point=False,
|
211
211
|
forbid_state_change=True,
|
212
212
|
ctx=ctx,
|
@@ -52,7 +52,7 @@ class ParseOutputHandler(Protocol[InT, OutT, CtxT]):
|
|
52
52
|
self,
|
53
53
|
conversation: Conversation,
|
54
54
|
*,
|
55
|
-
|
55
|
+
in_args: InT | None,
|
56
56
|
batch_idx: int,
|
57
57
|
ctx: RunContextWrapper[CtxT] | None,
|
58
58
|
) -> OutT: ...
|
@@ -74,8 +74,8 @@ class LLMAgent(
|
|
74
74
|
# LLM
|
75
75
|
llm: LLM[LLMSettings, Converters],
|
76
76
|
# Input prompt template (combines user and received arguments)
|
77
|
-
|
78
|
-
|
77
|
+
in_prompt: LLMPrompt | None = None,
|
78
|
+
in_prompt_path: str | Path | None = None,
|
79
79
|
# System prompt template
|
80
80
|
sys_prompt: LLMPrompt | None = None,
|
81
81
|
sys_prompt_path: str | Path | None = None,
|
@@ -119,13 +119,13 @@ class LLMAgent(
|
|
119
119
|
|
120
120
|
# Prompt builder
|
121
121
|
sys_prompt = get_prompt(prompt_text=sys_prompt, prompt_path=sys_prompt_path)
|
122
|
-
|
122
|
+
in_prompt = get_prompt(prompt_text=in_prompt, prompt_path=in_prompt_path)
|
123
123
|
self._prompt_builder: PromptBuilder[InT, CtxT] = PromptBuilder[
|
124
124
|
self.in_type, CtxT
|
125
125
|
](
|
126
126
|
agent_id=self._agent_id,
|
127
127
|
sys_prompt=sys_prompt,
|
128
|
-
|
128
|
+
in_prompt=in_prompt,
|
129
129
|
sys_args_schema=sys_args_schema,
|
130
130
|
usr_args_schema=usr_args_schema,
|
131
131
|
)
|
@@ -159,14 +159,14 @@ class LLMAgent(
|
|
159
159
|
return self._prompt_builder.sys_prompt
|
160
160
|
|
161
161
|
@property
|
162
|
-
def
|
163
|
-
return self._prompt_builder.
|
162
|
+
def in_prompt(self) -> LLMPrompt | None:
|
163
|
+
return self._prompt_builder.in_prompt
|
164
164
|
|
165
165
|
def _parse_output(
|
166
166
|
self,
|
167
167
|
conversation: Conversation,
|
168
168
|
*,
|
169
|
-
|
169
|
+
in_args: InT | None = None,
|
170
170
|
batch_idx: int = 0,
|
171
171
|
ctx: RunContextWrapper[CtxT] | None = None,
|
172
172
|
) -> OutT:
|
@@ -180,7 +180,7 @@ class LLMAgent(
|
|
180
180
|
|
181
181
|
return self._parse_output_impl(
|
182
182
|
conversation=conversation,
|
183
|
-
|
183
|
+
in_args=in_args,
|
184
184
|
batch_idx=batch_idx,
|
185
185
|
ctx=ctx,
|
186
186
|
)
|
@@ -194,21 +194,21 @@ class LLMAgent(
|
|
194
194
|
@staticmethod
|
195
195
|
def _validate_run_inputs(
|
196
196
|
chat_inputs: LLMPrompt | Sequence[str | ImageData] | None = None,
|
197
|
-
|
198
|
-
|
197
|
+
in_args: InT | Sequence[InT] | None = None,
|
198
|
+
in_message: AgentMessage[InT, AgentState] | None = None,
|
199
199
|
entry_point: bool = False,
|
200
200
|
) -> None:
|
201
201
|
multiple_inputs_err_message = (
|
202
|
-
"Only one of chat_inputs,
|
202
|
+
"Only one of chat_inputs, in_args, or in_message must be provided."
|
203
203
|
)
|
204
|
-
if chat_inputs is not None and
|
204
|
+
if chat_inputs is not None and in_args is not None:
|
205
205
|
raise ValueError(multiple_inputs_err_message)
|
206
|
-
if chat_inputs is not None and
|
206
|
+
if chat_inputs is not None and in_message is not None:
|
207
207
|
raise ValueError(multiple_inputs_err_message)
|
208
|
-
if
|
208
|
+
if in_args is not None and in_message is not None:
|
209
209
|
raise ValueError(multiple_inputs_err_message)
|
210
210
|
|
211
|
-
if entry_point and
|
211
|
+
if entry_point and in_message is not None:
|
212
212
|
raise ValueError(
|
213
213
|
"Entry point agent cannot receive messages from other agents."
|
214
214
|
)
|
@@ -218,8 +218,8 @@ class LLMAgent(
|
|
218
218
|
self,
|
219
219
|
chat_inputs: LLMPrompt | Sequence[str | ImageData] | None = None,
|
220
220
|
*,
|
221
|
-
|
222
|
-
|
221
|
+
in_message: AgentMessage[InT, AgentState] | None = None,
|
222
|
+
in_args: InT | Sequence[InT] | None = None,
|
223
223
|
entry_point: bool = False,
|
224
224
|
ctx: RunContextWrapper[CtxT] | None = None,
|
225
225
|
forbid_state_change: bool = False,
|
@@ -236,8 +236,8 @@ class LLMAgent(
|
|
236
236
|
|
237
237
|
self._validate_run_inputs(
|
238
238
|
chat_inputs=chat_inputs,
|
239
|
-
|
240
|
-
|
239
|
+
in_args=in_args,
|
240
|
+
in_message=in_message,
|
241
241
|
entry_point=entry_point,
|
242
242
|
)
|
243
243
|
|
@@ -249,12 +249,12 @@ class LLMAgent(
|
|
249
249
|
# 2. Set agent state
|
250
250
|
|
251
251
|
cur_state = self.state.model_copy(deep=True)
|
252
|
-
|
252
|
+
in_state = in_message.sender_state if in_message else None
|
253
253
|
prev_mh_len = len(cur_state.message_history)
|
254
254
|
|
255
|
-
state = LLMAgentState.
|
255
|
+
state = LLMAgentState.from_cur_and_in_states(
|
256
256
|
cur_state=cur_state,
|
257
|
-
|
257
|
+
in_state=in_state,
|
258
258
|
sys_prompt=formatted_sys_prompt,
|
259
259
|
strategy=self.set_state_strategy,
|
260
260
|
set_agent_state_impl=self._set_agent_state_impl,
|
@@ -264,16 +264,16 @@ class LLMAgent(
|
|
264
264
|
self._print_sys_msg(state=state, prev_mh_len=prev_mh_len, ctx=ctx)
|
265
265
|
|
266
266
|
# 3. Make and add user messages (can be empty)
|
267
|
-
|
268
|
-
if
|
269
|
-
|
270
|
-
elif
|
271
|
-
|
267
|
+
_in_args_batch: Sequence[InT] | None = None
|
268
|
+
if in_message is not None:
|
269
|
+
_in_args_batch = in_message.payloads
|
270
|
+
elif in_args is not None:
|
271
|
+
_in_args_batch = in_args if isinstance(in_args, Sequence) else [in_args] # type: ignore[assignment]
|
272
272
|
|
273
273
|
user_message_batch = self._prompt_builder.make_user_messages(
|
274
274
|
chat_inputs=chat_inputs,
|
275
275
|
usr_args=usr_args,
|
276
|
-
|
276
|
+
in_args_batch=_in_args_batch,
|
277
277
|
entry_point=entry_point,
|
278
278
|
ctx=ctx,
|
279
279
|
)
|
@@ -291,14 +291,14 @@ class LLMAgent(
|
|
291
291
|
|
292
292
|
# 5. Parse outputs
|
293
293
|
batch_size = state.message_history.batch_size
|
294
|
-
|
294
|
+
in_args_batch = in_message.payloads if in_message else batch_size * [None]
|
295
295
|
val_output_batch = [
|
296
296
|
self._out_type_adapter.validate_python(
|
297
|
-
self._parse_output(conversation=conv,
|
297
|
+
self._parse_output(conversation=conv, in_args=in_args, ctx=ctx)
|
298
298
|
)
|
299
|
-
for conv,
|
299
|
+
for conv, in_args in zip(
|
300
300
|
state.message_history.batched_conversations,
|
301
|
-
|
301
|
+
in_args_batch,
|
302
302
|
strict=False,
|
303
303
|
)
|
304
304
|
]
|
@@ -313,10 +313,10 @@ class LLMAgent(
|
|
313
313
|
recipient_ids=recipient_ids,
|
314
314
|
chat_inputs=chat_inputs,
|
315
315
|
sys_prompt=self.sys_prompt,
|
316
|
-
|
316
|
+
in_prompt=self.in_prompt,
|
317
317
|
sys_args=sys_args,
|
318
318
|
usr_args=usr_args,
|
319
|
-
|
319
|
+
in_args=(in_message.payloads if in_message is not None else None),
|
320
320
|
outputs=val_output_batch,
|
321
321
|
state=state,
|
322
322
|
)
|
@@ -369,10 +369,10 @@ class LLMAgent(
|
|
369
369
|
|
370
370
|
return func
|
371
371
|
|
372
|
-
def
|
372
|
+
def format_in_args_handler(
|
373
373
|
self, func: FormatInputArgsHandler[InT, CtxT]
|
374
374
|
) -> FormatInputArgsHandler[InT, CtxT]:
|
375
|
-
self._prompt_builder.
|
375
|
+
self._prompt_builder.format_in_args_impl = func
|
376
376
|
|
377
377
|
return func
|
378
378
|
|
@@ -411,8 +411,8 @@ class LLMAgent(
|
|
411
411
|
if cur_cls._format_sys_args is not base_cls._format_sys_args: # noqa: SLF001
|
412
412
|
self._prompt_builder.format_sys_args_impl = self._format_sys_args
|
413
413
|
|
414
|
-
if cur_cls.
|
415
|
-
self._prompt_builder.
|
414
|
+
if cur_cls._format_in_args is not base_cls._format_in_args: # noqa: SLF001
|
415
|
+
self._prompt_builder.format_in_args_impl = self._format_in_args
|
416
416
|
|
417
417
|
if cur_cls._set_agent_state is not base_cls._set_agent_state: # noqa: SLF001
|
418
418
|
self._set_agent_state_impl = self._set_agent_state
|
@@ -438,23 +438,23 @@ class LLMAgent(
|
|
438
438
|
"if it's intended to be used as the system arguments formatter."
|
439
439
|
)
|
440
440
|
|
441
|
-
def
|
441
|
+
def _format_in_args(
|
442
442
|
self,
|
443
443
|
*,
|
444
444
|
usr_args: LLMPromptArgs,
|
445
|
-
|
445
|
+
in_args: InT,
|
446
446
|
batch_idx: int = 0,
|
447
447
|
ctx: RunContextWrapper[CtxT] | None = None,
|
448
448
|
) -> LLMFormattedArgs:
|
449
449
|
raise NotImplementedError(
|
450
|
-
"LLMAgent.
|
450
|
+
"LLMAgent._format_in_args must be overridden by a subclass"
|
451
451
|
)
|
452
452
|
|
453
453
|
def _set_agent_state(
|
454
454
|
self,
|
455
455
|
cur_state: LLMAgentState,
|
456
456
|
*,
|
457
|
-
|
457
|
+
in_state: AgentState | None,
|
458
458
|
sys_prompt: LLMPrompt | None,
|
459
459
|
ctx: RunContextWrapper[Any] | None,
|
460
460
|
) -> LLMAgentState:
|
@@ -15,7 +15,7 @@ class SetAgentState(Protocol):
|
|
15
15
|
self,
|
16
16
|
cur_state: "LLMAgentState",
|
17
17
|
*,
|
18
|
-
|
18
|
+
in_state: AgentState | None,
|
19
19
|
sys_prompt: LLMPrompt | None,
|
20
20
|
ctx: RunContextWrapper[Any] | None,
|
21
21
|
) -> "LLMAgentState": ...
|
@@ -29,11 +29,11 @@ class LLMAgentState(AgentState):
|
|
29
29
|
return self.message_history.batch_size
|
30
30
|
|
31
31
|
@classmethod
|
32
|
-
def
|
32
|
+
def from_cur_and_in_states(
|
33
33
|
cls,
|
34
34
|
cur_state: "LLMAgentState",
|
35
35
|
*,
|
36
|
-
|
36
|
+
in_state: Optional["AgentState"] = None,
|
37
37
|
sys_prompt: LLMPrompt | None = None,
|
38
38
|
strategy: SetAgentStateStrategy = "from_sender",
|
39
39
|
set_agent_state_impl: SetAgentState | None = None,
|
@@ -50,13 +50,13 @@ class LLMAgentState(AgentState):
|
|
50
50
|
upd_mh.reset(sys_prompt)
|
51
51
|
|
52
52
|
elif strategy == "from_sender":
|
53
|
-
|
54
|
-
|
55
|
-
if
|
53
|
+
in_mh = (
|
54
|
+
in_state.message_history
|
55
|
+
if in_state and isinstance(in_state, "LLMAgentState")
|
56
56
|
else None
|
57
57
|
)
|
58
|
-
if
|
59
|
-
|
58
|
+
if in_mh:
|
59
|
+
in_mh = deepcopy(in_mh)
|
60
60
|
else:
|
61
61
|
upd_mh.reset(sys_prompt)
|
62
62
|
|
@@ -66,7 +66,7 @@ class LLMAgentState(AgentState):
|
|
66
66
|
)
|
67
67
|
return set_agent_state_impl(
|
68
68
|
cur_state=cur_state,
|
69
|
-
|
69
|
+
in_state=in_state,
|
70
70
|
sys_prompt=sys_prompt,
|
71
71
|
ctx=ctx,
|
72
72
|
)
|
@@ -35,7 +35,7 @@ class FormatInputArgsHandler(Protocol[InT, CtxT]):
|
|
35
35
|
self,
|
36
36
|
*,
|
37
37
|
usr_args: LLMPromptArgs,
|
38
|
-
|
38
|
+
in_args: InT,
|
39
39
|
batch_idx: int,
|
40
40
|
ctx: RunContextWrapper[CtxT] | None,
|
41
41
|
) -> LLMFormattedArgs: ...
|
@@ -48,7 +48,7 @@ class PromptBuilder(AutoInstanceAttributesMixin, Generic[InT, CtxT]):
|
|
48
48
|
self,
|
49
49
|
agent_id: str,
|
50
50
|
sys_prompt: LLMPrompt | None,
|
51
|
-
|
51
|
+
in_prompt: LLMPrompt | None,
|
52
52
|
sys_args_schema: type[LLMPromptArgs],
|
53
53
|
usr_args_schema: type[LLMPromptArgs],
|
54
54
|
):
|
@@ -57,13 +57,13 @@ class PromptBuilder(AutoInstanceAttributesMixin, Generic[InT, CtxT]):
|
|
57
57
|
|
58
58
|
self._agent_id = agent_id
|
59
59
|
self.sys_prompt = sys_prompt
|
60
|
-
self.
|
60
|
+
self.in_prompt = in_prompt
|
61
61
|
self.sys_args_schema = sys_args_schema
|
62
62
|
self.usr_args_schema = usr_args_schema
|
63
63
|
self.format_sys_args_impl: FormatSystemArgsHandler[CtxT] | None = None
|
64
|
-
self.
|
64
|
+
self.format_in_args_impl: FormatInputArgsHandler[InT, CtxT] | None = None
|
65
65
|
|
66
|
-
self.
|
66
|
+
self._in_args_type_adapter: TypeAdapter[InT] = TypeAdapter(self._in_type)
|
67
67
|
|
68
68
|
def _format_sys_args(
|
69
69
|
self,
|
@@ -75,31 +75,31 @@ class PromptBuilder(AutoInstanceAttributesMixin, Generic[InT, CtxT]):
|
|
75
75
|
|
76
76
|
return sys_args.model_dump(exclude_unset=True)
|
77
77
|
|
78
|
-
def
|
78
|
+
def _format_in_args(
|
79
79
|
self,
|
80
80
|
*,
|
81
81
|
usr_args: LLMPromptArgs,
|
82
|
-
|
82
|
+
in_args: InT,
|
83
83
|
batch_idx: int = 0,
|
84
84
|
ctx: RunContextWrapper[CtxT] | None = None,
|
85
85
|
) -> LLMFormattedArgs:
|
86
|
-
if self.
|
87
|
-
return self.
|
88
|
-
usr_args=usr_args,
|
86
|
+
if self.format_in_args_impl:
|
87
|
+
return self.format_in_args_impl(
|
88
|
+
usr_args=usr_args, in_args=in_args, batch_idx=batch_idx, ctx=ctx
|
89
89
|
)
|
90
90
|
|
91
|
-
if not isinstance(
|
91
|
+
if not isinstance(in_args, BaseModel) and in_args is not None:
|
92
92
|
raise TypeError(
|
93
93
|
"Cannot apply default formatting to non-BaseModel received arguments."
|
94
94
|
)
|
95
95
|
|
96
96
|
usr_args_ = usr_args
|
97
|
-
|
97
|
+
in_args_ = DummySchema() if in_args is None else in_args
|
98
98
|
|
99
99
|
usr_args_dump = usr_args_.model_dump(exclude_unset=True)
|
100
|
-
|
100
|
+
in_args_dump = in_args_.model_dump(exclude={"selected_recipient_ids"})
|
101
101
|
|
102
|
-
return usr_args_dump |
|
102
|
+
return usr_args_dump | in_args_dump
|
103
103
|
|
104
104
|
def make_sys_prompt(
|
105
105
|
self,
|
@@ -122,13 +122,13 @@ class PromptBuilder(AutoInstanceAttributesMixin, Generic[InT, CtxT]):
|
|
122
122
|
) -> Sequence[UserMessage]:
|
123
123
|
return [UserMessage.from_content_parts(content_parts, model_id=self._agent_id)]
|
124
124
|
|
125
|
-
def
|
126
|
-
self,
|
125
|
+
def _usr_messages_from_in_args(
|
126
|
+
self, in_args_batch: Sequence[InT]
|
127
127
|
) -> Sequence[UserMessage]:
|
128
128
|
return [
|
129
129
|
UserMessage.from_text(
|
130
|
-
self.
|
131
|
-
|
130
|
+
self._in_args_type_adapter.dump_json(
|
131
|
+
inp,
|
132
132
|
exclude_unset=True,
|
133
133
|
indent=2,
|
134
134
|
exclude={"selected_recipient_ids"},
|
@@ -136,46 +136,46 @@ class PromptBuilder(AutoInstanceAttributesMixin, Generic[InT, CtxT]):
|
|
136
136
|
).decode("utf-8"),
|
137
137
|
model_id=self._agent_id,
|
138
138
|
)
|
139
|
-
for
|
139
|
+
for inp in in_args_batch
|
140
140
|
]
|
141
141
|
|
142
142
|
def _usr_messages_from_prompt_template(
|
143
143
|
self,
|
144
|
-
|
144
|
+
in_prompt: LLMPrompt,
|
145
145
|
usr_args: UserRunArgs | None = None,
|
146
|
-
|
146
|
+
in_args_batch: Sequence[InT] | None = None,
|
147
147
|
ctx: RunContextWrapper[CtxT] | None = None,
|
148
148
|
) -> Sequence[UserMessage]:
|
149
|
-
usr_args_batch_,
|
149
|
+
usr_args_batch_, in_args_batch_ = self._make_batched(usr_args, in_args_batch)
|
150
150
|
|
151
151
|
val_usr_args_batch_ = [
|
152
152
|
self.usr_args_schema.model_validate(u) for u in usr_args_batch_
|
153
153
|
]
|
154
|
-
|
155
|
-
self.
|
154
|
+
val_in_args_batch_ = [
|
155
|
+
self._in_args_type_adapter.validate_python(inp) for inp in in_args_batch_
|
156
156
|
]
|
157
157
|
|
158
|
-
|
159
|
-
self.
|
160
|
-
usr_args=val_usr_args,
|
158
|
+
formatted_in_args_batch = [
|
159
|
+
self._format_in_args(
|
160
|
+
usr_args=val_usr_args, in_args=val_in_args, batch_idx=i, ctx=ctx
|
161
161
|
)
|
162
|
-
for i, (val_usr_args,
|
163
|
-
zip(val_usr_args_batch_,
|
162
|
+
for i, (val_usr_args, val_in_args) in enumerate(
|
163
|
+
zip(val_usr_args_batch_, val_in_args_batch_, strict=False)
|
164
164
|
)
|
165
165
|
]
|
166
166
|
|
167
167
|
return [
|
168
168
|
UserMessage.from_formatted_prompt(
|
169
|
-
prompt_template=
|
169
|
+
prompt_template=in_prompt, prompt_args=in_args
|
170
170
|
)
|
171
|
-
for
|
171
|
+
for in_args in formatted_in_args_batch
|
172
172
|
]
|
173
173
|
|
174
174
|
def make_user_messages(
|
175
175
|
self,
|
176
176
|
chat_inputs: LLMPrompt | Sequence[str | ImageData] | None = None,
|
177
177
|
usr_args: UserRunArgs | None = None,
|
178
|
-
|
178
|
+
in_args_batch: Sequence[InT] | None = None,
|
179
179
|
entry_point: bool = False,
|
180
180
|
ctx: RunContextWrapper[CtxT] | None = None,
|
181
181
|
) -> Sequence[UserMessage]:
|
@@ -197,15 +197,20 @@ class PromptBuilder(AutoInstanceAttributesMixin, Generic[InT, CtxT]):
|
|
197
197
|
return self._usr_messages_from_content_parts(chat_inputs)
|
198
198
|
|
199
199
|
# 2) No input prompt template + received args → raw JSON messages
|
200
|
-
if self.
|
201
|
-
return self.
|
200
|
+
if self.in_prompt is None and in_args_batch:
|
201
|
+
return self._usr_messages_from_in_args(in_args_batch)
|
202
202
|
|
203
203
|
# 3) Input prompt template + any args → batch & format
|
204
|
-
if self.
|
204
|
+
if self.in_prompt is not None:
|
205
|
+
if in_args_batch and not isinstance(in_args_batch[0], BaseModel):
|
206
|
+
raise TypeError(
|
207
|
+
"Cannot use the input prompt template with "
|
208
|
+
"non-BaseModel received arguments."
|
209
|
+
)
|
205
210
|
return self._usr_messages_from_prompt_template(
|
206
|
-
|
211
|
+
in_prompt=self.in_prompt,
|
207
212
|
usr_args=usr_args,
|
208
|
-
|
213
|
+
in_args_batch=in_args_batch,
|
209
214
|
ctx=ctx,
|
210
215
|
)
|
211
216
|
|
@@ -214,19 +219,19 @@ class PromptBuilder(AutoInstanceAttributesMixin, Generic[InT, CtxT]):
|
|
214
219
|
def _make_batched(
|
215
220
|
self,
|
216
221
|
usr_args: UserRunArgs | None = None,
|
217
|
-
|
222
|
+
in_args_batch: Sequence[InT] | None = None,
|
218
223
|
) -> tuple[Sequence[LLMPromptArgs | DummySchema], Sequence[InT | DummySchema]]:
|
219
224
|
usr_args_batch_ = (
|
220
225
|
usr_args if isinstance(usr_args, list) else [usr_args or DummySchema()]
|
221
226
|
)
|
222
|
-
|
227
|
+
in_args_batch_ = in_args_batch or [DummySchema()]
|
223
228
|
|
224
229
|
# Broadcast singleton → match lengths
|
225
|
-
if len(usr_args_batch_) == 1 and len(
|
226
|
-
usr_args_batch_ = [deepcopy(usr_args_batch_[0]) for _ in
|
227
|
-
if len(
|
228
|
-
|
229
|
-
if len(usr_args_batch_) != len(
|
230
|
+
if len(usr_args_batch_) == 1 and len(in_args_batch_) > 1:
|
231
|
+
usr_args_batch_ = [deepcopy(usr_args_batch_[0]) for _ in in_args_batch_]
|
232
|
+
if len(in_args_batch_) == 1 and len(usr_args_batch_) > 1:
|
233
|
+
in_args_batch_ = [deepcopy(in_args_batch_[0]) for _ in usr_args_batch_]
|
234
|
+
if len(usr_args_batch_) != len(in_args_batch_):
|
230
235
|
raise ValueError("User args and received args must have the same length")
|
231
236
|
|
232
|
-
return usr_args_batch_,
|
237
|
+
return usr_args_batch_, in_args_batch_
|
@@ -34,10 +34,10 @@ class InteractionRecord(BaseModel, Generic[InT, OutT, StateT]):
|
|
34
34
|
state: StateT
|
35
35
|
chat_inputs: LLMPrompt | Sequence[str | ImageData] | None = None
|
36
36
|
sys_prompt: LLMPrompt | None = None
|
37
|
-
|
37
|
+
in_prompt: LLMPrompt | None = None
|
38
38
|
sys_args: SystemRunArgs | None = None
|
39
39
|
usr_args: UserRunArgs | None = None
|
40
|
-
|
40
|
+
in_args: Sequence[InT] | None = None
|
41
41
|
outputs: Sequence[OutT]
|
42
42
|
|
43
43
|
model_config = ConfigDict(extra="forbid", frozen=True)
|
@@ -81,14 +81,14 @@ class LoopedWorkflowAgent(WorkflowAgent[InT, OutT, CtxT], Generic[InT, OutT, Ctx
|
|
81
81
|
self,
|
82
82
|
chat_inputs: Any | None = None,
|
83
83
|
*,
|
84
|
-
|
85
|
-
|
84
|
+
in_args: InT | Sequence[InT] | None = None,
|
85
|
+
in_message: AgentMessage[InT, Any] | None = None,
|
86
86
|
ctx: RunContextWrapper[CtxT] | None = None,
|
87
87
|
entry_point: bool = False,
|
88
88
|
forbid_state_change: bool = False,
|
89
89
|
**kwargs: Any,
|
90
90
|
) -> AgentMessage[OutT, AgentState]:
|
91
|
-
agent_message =
|
91
|
+
agent_message = in_message
|
92
92
|
num_iterations = 0
|
93
93
|
exit_message: AgentMessage[OutT, Any] | None = None
|
94
94
|
|
@@ -96,8 +96,8 @@ class LoopedWorkflowAgent(WorkflowAgent[InT, OutT, CtxT], Generic[InT, OutT, Ctx
|
|
96
96
|
for subagent in self.subagents:
|
97
97
|
agent_message = await subagent.run(
|
98
98
|
chat_inputs=chat_inputs,
|
99
|
-
|
100
|
-
|
99
|
+
in_args=in_args,
|
100
|
+
in_message=agent_message,
|
101
101
|
entry_point=entry_point,
|
102
102
|
forbid_state_change=forbid_state_change,
|
103
103
|
ctx=ctx,
|
@@ -116,5 +116,5 @@ class LoopedWorkflowAgent(WorkflowAgent[InT, OutT, CtxT], Generic[InT, OutT, Ctx
|
|
116
116
|
return exit_message
|
117
117
|
|
118
118
|
chat_inputs = None
|
119
|
-
|
119
|
+
in_args = None
|
120
120
|
entry_point = False
|
@@ -38,26 +38,26 @@ class SequentialWorkflowAgent(WorkflowAgent[InT, OutT, CtxT], Generic[InT, OutT,
|
|
38
38
|
self,
|
39
39
|
chat_inputs: Any | None = None,
|
40
40
|
*,
|
41
|
-
|
42
|
-
|
41
|
+
in_args: InT | Sequence[InT] | None = None,
|
42
|
+
in_message: AgentMessage[InT, Any] | None = None,
|
43
43
|
ctx: RunContextWrapper[CtxT] | None = None,
|
44
44
|
entry_point: bool = False,
|
45
45
|
forbid_state_change: bool = False,
|
46
46
|
**kwargs: Any,
|
47
47
|
) -> AgentMessage[OutT, Any]:
|
48
|
-
agent_message =
|
48
|
+
agent_message = in_message
|
49
49
|
for subagent in self.subagents:
|
50
50
|
agent_message = await subagent.run(
|
51
51
|
chat_inputs=chat_inputs,
|
52
|
-
|
53
|
-
|
52
|
+
in_args=in_args,
|
53
|
+
in_message=agent_message,
|
54
54
|
entry_point=entry_point,
|
55
55
|
forbid_state_change=forbid_state_change,
|
56
56
|
ctx=ctx,
|
57
57
|
**kwargs,
|
58
58
|
)
|
59
59
|
chat_inputs = None
|
60
|
-
|
60
|
+
in_args = None
|
61
61
|
entry_point = False
|
62
62
|
|
63
63
|
return cast("AgentMessage[OutT, Any]", agent_message)
|
@@ -63,8 +63,8 @@ class WorkflowAgent(
|
|
63
63
|
self,
|
64
64
|
chat_inputs: Any | None = None,
|
65
65
|
*,
|
66
|
-
|
67
|
-
|
66
|
+
in_args: InT | Sequence[InT] | None = None,
|
67
|
+
in_message: AgentMessage[InT, Any] | None = None,
|
68
68
|
ctx: RunContextWrapper[CtxT] | None = None,
|
69
69
|
entry_point: bool = False,
|
70
70
|
forbid_state_change: bool = False,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{grasp_agents-0.2.4 → grasp_agents-0.2.5}/src/grasp_agents/rate_limiting/rate_limiter_chunked.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|