grasp_agents 0.5.9__py3-none-any.whl → 0.5.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- grasp_agents/cloud_llm.py +87 -109
- grasp_agents/litellm/converters.py +4 -2
- grasp_agents/litellm/lite_llm.py +72 -83
- grasp_agents/llm.py +35 -68
- grasp_agents/llm_agent.py +76 -52
- grasp_agents/llm_agent_memory.py +4 -2
- grasp_agents/llm_policy_executor.py +91 -55
- grasp_agents/openai/converters.py +4 -2
- grasp_agents/openai/openai_llm.py +61 -88
- grasp_agents/openai/tool_converters.py +6 -4
- grasp_agents/processors/base_processor.py +18 -10
- grasp_agents/processors/parallel_processor.py +8 -6
- grasp_agents/processors/processor.py +10 -6
- grasp_agents/prompt_builder.py +38 -28
- grasp_agents/run_context.py +1 -1
- grasp_agents/runner.py +1 -1
- grasp_agents/typing/converters.py +3 -1
- grasp_agents/typing/tool.py +15 -5
- grasp_agents/workflow/workflow_processor.py +4 -4
- {grasp_agents-0.5.9.dist-info → grasp_agents-0.5.11.dist-info}/METADATA +4 -5
- {grasp_agents-0.5.9.dist-info → grasp_agents-0.5.11.dist-info}/RECORD +23 -23
- {grasp_agents-0.5.9.dist-info → grasp_agents-0.5.11.dist-info}/WHEEL +0 -0
- {grasp_agents-0.5.9.dist-info → grasp_agents-0.5.11.dist-info}/licenses/LICENSE.md +0 -0
@@ -13,8 +13,10 @@ from . import (
|
|
13
13
|
)
|
14
14
|
|
15
15
|
|
16
|
-
def to_api_tool(
|
17
|
-
|
16
|
+
def to_api_tool(
|
17
|
+
tool: BaseTool[BaseModel, Any, Any], strict: bool | None = None
|
18
|
+
) -> OpenAIToolParam:
|
19
|
+
if strict:
|
18
20
|
return pydantic_function_tool(
|
19
21
|
model=tool.in_type, name=tool.name, description=tool.description
|
20
22
|
)
|
@@ -23,9 +25,9 @@ def to_api_tool(tool: BaseTool[BaseModel, Any, Any]) -> OpenAIToolParam:
|
|
23
25
|
name=tool.name,
|
24
26
|
description=tool.description,
|
25
27
|
parameters=tool.in_type.model_json_schema(),
|
26
|
-
strict=
|
28
|
+
strict=strict,
|
27
29
|
)
|
28
|
-
if
|
30
|
+
if strict is None:
|
29
31
|
function.pop("strict")
|
30
32
|
|
31
33
|
return OpenAIToolParam(type="function", function=function)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
import logging
|
2
2
|
from abc import ABC, abstractmethod
|
3
|
-
from collections.abc import AsyncIterator, Callable, Coroutine
|
3
|
+
from collections.abc import AsyncIterator, Callable, Coroutine, Sequence
|
4
4
|
from functools import wraps
|
5
5
|
from typing import (
|
6
6
|
Any,
|
@@ -37,7 +37,6 @@ from ..typing.tool import BaseTool
|
|
37
37
|
|
38
38
|
logger = logging.getLogger(__name__)
|
39
39
|
|
40
|
-
_OutT_contra = TypeVar("_OutT_contra", contravariant=True)
|
41
40
|
|
42
41
|
F = TypeVar("F", bound=Callable[..., Coroutine[Any, Any, Packet[Any]]])
|
43
42
|
F_stream = TypeVar("F_stream", bound=Callable[..., AsyncIterator[Event[Any]]])
|
@@ -102,10 +101,13 @@ def with_retry_stream(func: F_stream) -> F_stream:
|
|
102
101
|
return cast("F_stream", wrapper)
|
103
102
|
|
104
103
|
|
104
|
+
_OutT_contra = TypeVar("_OutT_contra", contravariant=True)
|
105
|
+
|
106
|
+
|
105
107
|
class RecipientSelector(Protocol[_OutT_contra, CtxT]):
|
106
108
|
def __call__(
|
107
|
-
self, output: _OutT_contra, ctx: RunContext[CtxT]
|
108
|
-
) ->
|
109
|
+
self, output: _OutT_contra, *, ctx: RunContext[CtxT]
|
110
|
+
) -> Sequence[ProcName] | None: ...
|
109
111
|
|
110
112
|
|
111
113
|
class BaseProcessor(AutoInstanceAttributesMixin, ABC, Generic[InT, OutT, MemT, CtxT]):
|
@@ -118,7 +120,7 @@ class BaseProcessor(AutoInstanceAttributesMixin, ABC, Generic[InT, OutT, MemT, C
|
|
118
120
|
self,
|
119
121
|
name: ProcName,
|
120
122
|
max_retries: int = 0,
|
121
|
-
recipients:
|
123
|
+
recipients: Sequence[ProcName] | None = None,
|
122
124
|
**kwargs: Any,
|
123
125
|
) -> None:
|
124
126
|
self._in_type: type[InT]
|
@@ -239,7 +241,7 @@ class BaseProcessor(AutoInstanceAttributesMixin, ABC, Generic[InT, OutT, MemT, C
|
|
239
241
|
) from err
|
240
242
|
|
241
243
|
def _validate_recipients(
|
242
|
-
self, recipients:
|
244
|
+
self, recipients: Sequence[ProcName] | None, call_id: str
|
243
245
|
) -> None:
|
244
246
|
for r in recipients or []:
|
245
247
|
if r not in (self.recipients or []):
|
@@ -252,8 +254,8 @@ class BaseProcessor(AutoInstanceAttributesMixin, ABC, Generic[InT, OutT, MemT, C
|
|
252
254
|
|
253
255
|
@final
|
254
256
|
def _select_recipients(
|
255
|
-
self, output: OutT, ctx: RunContext[CtxT]
|
256
|
-
) ->
|
257
|
+
self, output: OutT, ctx: RunContext[CtxT]
|
258
|
+
) -> Sequence[ProcName] | None:
|
257
259
|
if self.recipient_selector:
|
258
260
|
return self.recipient_selector(output=output, ctx=ctx)
|
259
261
|
|
@@ -310,9 +312,15 @@ class BaseProcessor(AutoInstanceAttributesMixin, ABC, Generic[InT, OutT, MemT, C
|
|
310
312
|
name: str = tool_name
|
311
313
|
description: str = tool_description
|
312
314
|
|
313
|
-
async def run(
|
315
|
+
async def run(
|
316
|
+
self,
|
317
|
+
inp: InT,
|
318
|
+
*,
|
319
|
+
call_id: str | None = None,
|
320
|
+
ctx: RunContext[CtxT] | None = None,
|
321
|
+
) -> OutT:
|
314
322
|
result = await processor_instance.run(
|
315
|
-
in_args=inp, forgetful=True, ctx=ctx
|
323
|
+
in_args=inp, forgetful=True, call_id=call_id, ctx=ctx
|
316
324
|
)
|
317
325
|
|
318
326
|
return result.payloads[0]
|
@@ -30,7 +30,7 @@ class ParallelProcessor(
|
|
30
30
|
in_args: InT | None = None,
|
31
31
|
memory: MemT,
|
32
32
|
call_id: str,
|
33
|
-
ctx: RunContext[CtxT]
|
33
|
+
ctx: RunContext[CtxT],
|
34
34
|
) -> OutT:
|
35
35
|
return cast("OutT", in_args)
|
36
36
|
|
@@ -41,7 +41,7 @@ class ParallelProcessor(
|
|
41
41
|
in_args: InT | None = None,
|
42
42
|
memory: MemT,
|
43
43
|
call_id: str,
|
44
|
-
ctx: RunContext[CtxT]
|
44
|
+
ctx: RunContext[CtxT],
|
45
45
|
) -> AsyncIterator[Event[Any]]:
|
46
46
|
output = cast("OutT", in_args)
|
47
47
|
yield ProcPayloadOutputEvent(data=output, proc_name=self.name, call_id=call_id)
|
@@ -67,7 +67,7 @@ class ParallelProcessor(
|
|
67
67
|
in_args: InT | None = None,
|
68
68
|
forgetful: bool = False,
|
69
69
|
call_id: str,
|
70
|
-
ctx: RunContext[CtxT]
|
70
|
+
ctx: RunContext[CtxT],
|
71
71
|
) -> Packet[OutT]:
|
72
72
|
memory = self.memory.model_copy(deep=True) if forgetful else self.memory
|
73
73
|
|
@@ -86,7 +86,7 @@ class ParallelProcessor(
|
|
86
86
|
return Packet(payloads=[val_output], sender=self.name, recipients=recipients)
|
87
87
|
|
88
88
|
async def _run_parallel(
|
89
|
-
self, in_args: list[InT], call_id: str, ctx: RunContext[CtxT]
|
89
|
+
self, in_args: list[InT], call_id: str, ctx: RunContext[CtxT]
|
90
90
|
) -> Packet[OutT]:
|
91
91
|
tasks = [
|
92
92
|
self._run_single(
|
@@ -114,6 +114,7 @@ class ParallelProcessor(
|
|
114
114
|
ctx: RunContext[CtxT] | None = None,
|
115
115
|
) -> Packet[OutT]:
|
116
116
|
call_id = self._generate_call_id(call_id)
|
117
|
+
ctx = ctx or RunContext[CtxT](state=None) # type: ignore
|
117
118
|
|
118
119
|
val_in_args = self._validate_inputs(
|
119
120
|
call_id=call_id,
|
@@ -143,7 +144,7 @@ class ParallelProcessor(
|
|
143
144
|
in_args: InT | None = None,
|
144
145
|
forgetful: bool = False,
|
145
146
|
call_id: str,
|
146
|
-
ctx: RunContext[CtxT]
|
147
|
+
ctx: RunContext[CtxT],
|
147
148
|
) -> AsyncIterator[Event[Any]]:
|
148
149
|
memory = self.memory.model_copy(deep=True) if forgetful else self.memory
|
149
150
|
|
@@ -178,7 +179,7 @@ class ParallelProcessor(
|
|
178
179
|
self,
|
179
180
|
in_args: list[InT],
|
180
181
|
call_id: str,
|
181
|
-
ctx: RunContext[CtxT]
|
182
|
+
ctx: RunContext[CtxT],
|
182
183
|
) -> AsyncIterator[Event[Any]]:
|
183
184
|
streams = [
|
184
185
|
self._run_single_stream(
|
@@ -222,6 +223,7 @@ class ParallelProcessor(
|
|
222
223
|
ctx: RunContext[CtxT] | None = None,
|
223
224
|
) -> AsyncIterator[Event[Any]]:
|
224
225
|
call_id = self._generate_call_id(call_id)
|
226
|
+
ctx = ctx or RunContext[CtxT](state=None) # type: ignore
|
225
227
|
|
226
228
|
val_in_args = self._validate_inputs(
|
227
229
|
call_id=call_id,
|
@@ -1,5 +1,5 @@
|
|
1
1
|
import logging
|
2
|
-
from collections.abc import AsyncIterator
|
2
|
+
from collections.abc import AsyncIterator, Sequence
|
3
3
|
from typing import Any, ClassVar, Generic, cast
|
4
4
|
|
5
5
|
from ..memory import MemT
|
@@ -25,7 +25,7 @@ class Processor(BaseProcessor[InT, OutT, MemT, CtxT], Generic[InT, OutT, MemT, C
|
|
25
25
|
in_args: list[InT] | None = None,
|
26
26
|
memory: MemT,
|
27
27
|
call_id: str,
|
28
|
-
ctx: RunContext[CtxT]
|
28
|
+
ctx: RunContext[CtxT],
|
29
29
|
) -> list[OutT]:
|
30
30
|
return cast("list[OutT]", in_args)
|
31
31
|
|
@@ -36,7 +36,7 @@ class Processor(BaseProcessor[InT, OutT, MemT, CtxT], Generic[InT, OutT, MemT, C
|
|
36
36
|
in_args: list[InT] | None = None,
|
37
37
|
memory: MemT,
|
38
38
|
call_id: str,
|
39
|
-
ctx: RunContext[CtxT]
|
39
|
+
ctx: RunContext[CtxT],
|
40
40
|
) -> AsyncIterator[Event[Any]]:
|
41
41
|
outputs = await self._process(
|
42
42
|
chat_inputs=chat_inputs,
|
@@ -58,7 +58,7 @@ class Processor(BaseProcessor[InT, OutT, MemT, CtxT], Generic[InT, OutT, MemT, C
|
|
58
58
|
in_args: InT | list[InT] | None = None,
|
59
59
|
forgetful: bool = False,
|
60
60
|
call_id: str | None = None,
|
61
|
-
ctx: RunContext[CtxT]
|
61
|
+
ctx: RunContext[CtxT],
|
62
62
|
) -> tuple[list[InT] | None, MemT, str]:
|
63
63
|
call_id = self._generate_call_id(call_id)
|
64
64
|
|
@@ -74,10 +74,10 @@ class Processor(BaseProcessor[InT, OutT, MemT, CtxT], Generic[InT, OutT, MemT, C
|
|
74
74
|
return val_in_args, memory, call_id
|
75
75
|
|
76
76
|
def _postprocess(
|
77
|
-
self, outputs: list[OutT], call_id: str, ctx: RunContext[CtxT]
|
77
|
+
self, outputs: list[OutT], call_id: str, ctx: RunContext[CtxT]
|
78
78
|
) -> Packet[OutT]:
|
79
79
|
payloads: list[OutT] = []
|
80
|
-
routing: dict[int,
|
80
|
+
routing: dict[int, Sequence[ProcName] | None] = {}
|
81
81
|
for idx, output in enumerate(outputs):
|
82
82
|
val_output = self._validate_output(output, call_id=call_id)
|
83
83
|
recipients = self._select_recipients(output=val_output, ctx=ctx)
|
@@ -105,6 +105,8 @@ class Processor(BaseProcessor[InT, OutT, MemT, CtxT], Generic[InT, OutT, MemT, C
|
|
105
105
|
call_id: str | None = None,
|
106
106
|
ctx: RunContext[CtxT] | None = None,
|
107
107
|
) -> Packet[OutT]:
|
108
|
+
ctx = ctx or RunContext[CtxT](state=None) # type: ignore
|
109
|
+
|
108
110
|
val_in_args, memory, call_id = self._preprocess(
|
109
111
|
chat_inputs=chat_inputs,
|
110
112
|
in_packet=in_packet,
|
@@ -134,6 +136,8 @@ class Processor(BaseProcessor[InT, OutT, MemT, CtxT], Generic[InT, OutT, MemT, C
|
|
134
136
|
call_id: str | None = None,
|
135
137
|
ctx: RunContext[CtxT] | None = None,
|
136
138
|
) -> AsyncIterator[Event[Any]]:
|
139
|
+
ctx = ctx or RunContext[CtxT](state=None) # type: ignore
|
140
|
+
|
137
141
|
val_in_args, memory, call_id = self._preprocess(
|
138
142
|
chat_inputs=chat_inputs,
|
139
143
|
in_packet=in_packet,
|
grasp_agents/prompt_builder.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
import json
|
2
2
|
from collections.abc import Sequence
|
3
|
-
from typing import ClassVar, Generic, Protocol, TypeAlias, TypeVar, final
|
3
|
+
from typing import ClassVar, Generic, Protocol, TypeAlias, TypeVar, cast, final
|
4
4
|
|
5
5
|
from pydantic import BaseModel, TypeAdapter
|
6
6
|
|
@@ -15,12 +15,21 @@ _InT_contra = TypeVar("_InT_contra", contravariant=True)
|
|
15
15
|
|
16
16
|
|
17
17
|
class SystemPromptBuilder(Protocol[CtxT]):
|
18
|
-
def __call__(
|
18
|
+
def __call__(
|
19
|
+
self,
|
20
|
+
*,
|
21
|
+
ctx: RunContext[CtxT],
|
22
|
+
call_id: str,
|
23
|
+
) -> str | None: ...
|
19
24
|
|
20
25
|
|
21
26
|
class InputContentBuilder(Protocol[_InT_contra, CtxT]):
|
22
27
|
def __call__(
|
23
|
-
self,
|
28
|
+
self,
|
29
|
+
in_args: _InT_contra,
|
30
|
+
*,
|
31
|
+
ctx: RunContext[CtxT],
|
32
|
+
call_id: str,
|
24
33
|
) -> Content: ...
|
25
34
|
|
26
35
|
|
@@ -45,9 +54,9 @@ class PromptBuilder(AutoInstanceAttributesMixin, Generic[InT, CtxT]):
|
|
45
54
|
self._in_args_type_adapter: TypeAdapter[InT] = TypeAdapter(self._in_type)
|
46
55
|
|
47
56
|
@final
|
48
|
-
def build_system_prompt(self, ctx: RunContext[CtxT]
|
57
|
+
def build_system_prompt(self, ctx: RunContext[CtxT], call_id: str) -> str | None:
|
49
58
|
if self.system_prompt_builder:
|
50
|
-
return self.system_prompt_builder(ctx=ctx)
|
59
|
+
return self.system_prompt_builder(ctx=ctx, call_id=call_id)
|
51
60
|
|
52
61
|
return self.sys_prompt
|
53
62
|
|
@@ -73,22 +82,20 @@ class PromptBuilder(AutoInstanceAttributesMixin, Generic[InT, CtxT]):
|
|
73
82
|
|
74
83
|
@final
|
75
84
|
def _build_input_content(
|
76
|
-
self,
|
77
|
-
in_args: InT | None = None,
|
78
|
-
ctx: RunContext[CtxT] | None = None,
|
85
|
+
self, in_args: InT | None, ctx: RunContext[CtxT], call_id: str
|
79
86
|
) -> Content:
|
80
|
-
|
81
|
-
if in_args is not None:
|
82
|
-
val_in_args = self._validate_input_args(in_args=in_args)
|
83
|
-
|
84
|
-
if self.input_content_builder:
|
85
|
-
return self.input_content_builder(in_args=val_in_args, ctx=ctx)
|
86
|
-
|
87
|
-
if val_in_args is None:
|
87
|
+
if in_args is None and self._in_type is not type(None):
|
88
88
|
raise InputPromptBuilderError(
|
89
89
|
proc_name=self._agent_name,
|
90
|
-
message="
|
91
|
-
f"
|
90
|
+
message="Either chat inputs or input arguments must be provided "
|
91
|
+
f"when input type is not None [agent_name={self._agent_name}]",
|
92
|
+
)
|
93
|
+
in_args = cast("InT", in_args)
|
94
|
+
|
95
|
+
val_in_args = self._validate_input_args(in_args)
|
96
|
+
if self.input_content_builder:
|
97
|
+
return self.input_content_builder(
|
98
|
+
in_args=val_in_args, ctx=ctx, call_id=call_id
|
92
99
|
)
|
93
100
|
|
94
101
|
if issubclass(self._in_type, BaseModel) and isinstance(val_in_args, BaseModel):
|
@@ -106,23 +113,26 @@ class PromptBuilder(AutoInstanceAttributesMixin, Generic[InT, CtxT]):
|
|
106
113
|
def build_input_message(
|
107
114
|
self,
|
108
115
|
chat_inputs: LLMPrompt | Sequence[str | ImageData] | None = None,
|
116
|
+
*,
|
109
117
|
in_args: InT | None = None,
|
110
|
-
|
118
|
+
call_id: str,
|
119
|
+
ctx: RunContext[CtxT],
|
111
120
|
) -> UserMessage | None:
|
112
|
-
if chat_inputs is not None
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
if chat_inputs:
|
121
|
+
if chat_inputs is not None:
|
122
|
+
if in_args is not None:
|
123
|
+
raise InputPromptBuilderError(
|
124
|
+
proc_name=self._agent_name,
|
125
|
+
message="Cannot use both chat inputs and input arguments "
|
126
|
+
f"at the same time [agent_name={self._agent_name}]",
|
127
|
+
)
|
120
128
|
if isinstance(chat_inputs, LLMPrompt):
|
121
129
|
return UserMessage.from_text(chat_inputs, name=self._agent_name)
|
122
130
|
return UserMessage.from_content_parts(chat_inputs, name=self._agent_name)
|
123
131
|
|
124
132
|
return UserMessage(
|
125
|
-
content=self._build_input_content(
|
133
|
+
content=self._build_input_content(
|
134
|
+
in_args=in_args, ctx=ctx, call_id=call_id
|
135
|
+
),
|
126
136
|
name=self._agent_name,
|
127
137
|
)
|
128
138
|
|
grasp_agents/run_context.py
CHANGED
grasp_agents/runner.py
CHANGED
@@ -69,7 +69,9 @@ class Converters(ABC):
|
|
69
69
|
|
70
70
|
@staticmethod
|
71
71
|
@abstractmethod
|
72
|
-
def to_tool(
|
72
|
+
def to_tool(
|
73
|
+
tool: BaseTool[BaseModel, Any, Any], strict: bool | None = None, **kwargs: Any
|
74
|
+
) -> Any:
|
73
75
|
pass
|
74
76
|
|
75
77
|
@staticmethod
|
grasp_agents/typing/tool.py
CHANGED
@@ -48,8 +48,6 @@ class BaseTool(
|
|
48
48
|
name: str
|
49
49
|
description: str
|
50
50
|
|
51
|
-
strict: bool | None = None
|
52
|
-
|
53
51
|
_in_type: type[_InT] = PrivateAttr()
|
54
52
|
_out_type: type[_OutT_co] = PrivateAttr()
|
55
53
|
|
@@ -62,14 +60,26 @@ class BaseTool(
|
|
62
60
|
return self._out_type
|
63
61
|
|
64
62
|
@abstractmethod
|
65
|
-
async def run(
|
63
|
+
async def run(
|
64
|
+
self,
|
65
|
+
inp: _InT,
|
66
|
+
*,
|
67
|
+
ctx: RunContext[CtxT] | None = None,
|
68
|
+
call_id: str | None = None,
|
69
|
+
) -> _OutT_co:
|
66
70
|
pass
|
67
71
|
|
68
72
|
async def __call__(
|
69
|
-
self,
|
73
|
+
self,
|
74
|
+
*,
|
75
|
+
ctx: RunContext[CtxT] | None = None,
|
76
|
+
call_id: str | None = None,
|
77
|
+
**kwargs: Any,
|
70
78
|
) -> _OutT_co:
|
79
|
+
# NOTE: validation is probably redundant here when tool inputs have been
|
80
|
+
# validated by the LLM already
|
71
81
|
input_args = TypeAdapter(self._in_type).validate_python(kwargs)
|
72
|
-
output = await self.run(input_args, ctx=ctx)
|
82
|
+
output = await self.run(input_args, ctx=ctx, call_id=call_id)
|
73
83
|
|
74
84
|
return TypeAdapter(self._out_type).validate_python(output)
|
75
85
|
|
@@ -21,7 +21,7 @@ class WorkflowProcessor(
|
|
21
21
|
subprocs: Sequence[BaseProcessor[Any, Any, Any, CtxT]],
|
22
22
|
start_proc: BaseProcessor[InT, Any, Any, CtxT],
|
23
23
|
end_proc: BaseProcessor[Any, OutT, Any, CtxT],
|
24
|
-
recipients:
|
24
|
+
recipients: Sequence[ProcName] | None = None,
|
25
25
|
max_retries: int = 0,
|
26
26
|
) -> None:
|
27
27
|
super().__init__(name=name, recipients=recipients, max_retries=max_retries)
|
@@ -57,11 +57,11 @@ class WorkflowProcessor(
|
|
57
57
|
return func
|
58
58
|
|
59
59
|
@property
|
60
|
-
def recipients(self) ->
|
60
|
+
def recipients(self) -> Sequence[ProcName] | None:
|
61
61
|
return self._end_proc.recipients
|
62
62
|
|
63
63
|
@recipients.setter
|
64
|
-
def recipients(self, value:
|
64
|
+
def recipients(self, value: Sequence[ProcName] | None) -> None:
|
65
65
|
if hasattr(self, "_end_proc"):
|
66
66
|
self._end_proc.recipients = value
|
67
67
|
|
@@ -96,7 +96,7 @@ class WorkflowProcessor(
|
|
96
96
|
pass
|
97
97
|
|
98
98
|
@abstractmethod
|
99
|
-
async def run_stream(
|
99
|
+
async def run_stream(
|
100
100
|
self,
|
101
101
|
chat_inputs: Any | None = None,
|
102
102
|
*,
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: grasp_agents
|
3
|
-
Version: 0.5.
|
3
|
+
Version: 0.5.11
|
4
4
|
Summary: Grasp Agents Library
|
5
5
|
License-File: LICENSE.md
|
6
6
|
Requires-Python: <4,>=3.11.4
|
@@ -166,9 +166,7 @@ class AskStudentTool(BaseTool[TeacherQuestion, StudentReply, Any]):
|
|
166
166
|
name: str = "ask_student"
|
167
167
|
description: str = ask_student_tool_description
|
168
168
|
|
169
|
-
async def run(
|
170
|
-
self, inp: TeacherQuestion, ctx: RunContext[Any] | None = None
|
171
|
-
) -> StudentReply:
|
169
|
+
async def run(self, inp: TeacherQuestion, **kwargs: Any) -> StudentReply:
|
172
170
|
return input(inp.question)
|
173
171
|
|
174
172
|
|
@@ -180,7 +178,8 @@ teacher = LLMAgent[None, Problem, None](
|
|
180
178
|
name="teacher",
|
181
179
|
llm=LiteLLM(
|
182
180
|
model_name="gpt-4.1",
|
183
|
-
|
181
|
+
# model_name="claude-sonnet-4-20250514",
|
182
|
+
# llm_settings=LiteLLMSettings(reasoning_effort="low"),
|
184
183
|
),
|
185
184
|
tools=[AskStudentTool()],
|
186
185
|
react_mode=True,
|
@@ -1,40 +1,40 @@
|
|
1
1
|
grasp_agents/__init__.py,sha256=Z3a_j2Etiap9H6lvE8-PQP_OIGMUcHNPeJAJO12B8kY,1031
|
2
|
-
grasp_agents/cloud_llm.py,sha256=
|
2
|
+
grasp_agents/cloud_llm.py,sha256=vwI6gpLOsFqN4KtaTOo75xw8t7uRtdVrYGjopEDmQBw,13091
|
3
3
|
grasp_agents/costs_dict.yaml,sha256=2MFNWtkv5W5WSCcv1Cj13B1iQLVv5Ot9pS_KW2Gu2DA,2510
|
4
4
|
grasp_agents/errors.py,sha256=K-22TCM1Klhsej47Rg5eTqnGiGPaXgKOpdOZZ7cPipw,4633
|
5
5
|
grasp_agents/generics_utils.py,sha256=5Pw3I9dlnKC2VGqYKC4ZZUO3Z_vTNT-NPFovNfPkl6I,6542
|
6
6
|
grasp_agents/grasp_logging.py,sha256=H1GYhXdQvVkmauFDZ-KDwvVmPQHZUUm9sRqX_ObK2xI,1111
|
7
7
|
grasp_agents/http_client.py,sha256=Es8NXGDkp4Nem7g24-jW0KFGA9Hp_o2Cv3cOvjup-iU,859
|
8
|
-
grasp_agents/llm.py,sha256=
|
9
|
-
grasp_agents/llm_agent.py,sha256=
|
10
|
-
grasp_agents/llm_agent_memory.py,sha256=
|
11
|
-
grasp_agents/llm_policy_executor.py,sha256=
|
8
|
+
grasp_agents/llm.py,sha256=IeV2QpR4AldVP3THzSETEnsaDx3DYz5HM6dkikSpy4o,10684
|
9
|
+
grasp_agents/llm_agent.py,sha256=F_ou0pfdztqZzd2yU1jZZZVzcyhsLXfE_i0c4y2fZIQ,14123
|
10
|
+
grasp_agents/llm_agent_memory.py,sha256=XmOT2G8RG5AHd0LR3WuK7VbD-KFFfThmJnuZK2iU3Fs,1856
|
11
|
+
grasp_agents/llm_policy_executor.py,sha256=r0UxwjnVzTBQqLlwvZZ_JL0wl6ZebCgxkcz6I4GdmrM,18136
|
12
12
|
grasp_agents/memory.py,sha256=keHuNEZNSxHT9FKpMohHOCNi7UAz_oRIc91IQEuzaWE,1162
|
13
13
|
grasp_agents/packet.py,sha256=EmE-W4ZSMVZoqClECGFe7OGqrT4FSJ8IVGICrdjtdEY,1462
|
14
14
|
grasp_agents/packet_pool.py,sha256=AF7ZMYY1U6ppNLEn6o0R8QXyWmcLQGcju7_TYQpAudg,4443
|
15
15
|
grasp_agents/printer.py,sha256=wVNCaR9mbFKyzYdT8YpYD1JQqRqHdLtdfiZrwYxaM6Y,11132
|
16
|
-
grasp_agents/prompt_builder.py,sha256=
|
17
|
-
grasp_agents/run_context.py,sha256=
|
18
|
-
grasp_agents/runner.py,sha256=
|
16
|
+
grasp_agents/prompt_builder.py,sha256=wNPphkW8RL8501jV4Z7ncsN_sxBDR9Ax7eILLHr-OYg,6110
|
17
|
+
grasp_agents/run_context.py,sha256=7qVs0T5rLvINmtlXqOoyy2Hu9xPzuFDbcVR6R93NF-0,951
|
18
|
+
grasp_agents/runner.py,sha256=JL2wSKahbPYVd56NRB09cwco43sjhZPI4XYFCZyOXOA,5173
|
19
19
|
grasp_agents/usage_tracker.py,sha256=ZQfVUUpG0C89hyPWT_JgXnjQOxoYmumcQ9t-aCfcMo8,3561
|
20
20
|
grasp_agents/utils.py,sha256=qKmGBwrQHw1-BgqRLuGTPKGs3J_zbrpk3nxnP1iZBiQ,6152
|
21
21
|
grasp_agents/litellm/__init__.py,sha256=wD8RZBYokFDfbS9Cs7nO_zKb3w7RIVwEGj7g2D5CJH0,4510
|
22
22
|
grasp_agents/litellm/completion_chunk_converters.py,sha256=J5PPxzoTBqkvKQnCoBxQxJo7Q8Xfl9cbv2GRZox8Cjo,2689
|
23
23
|
grasp_agents/litellm/completion_converters.py,sha256=JQ7XvQwwc-biFqVMcRO61SL5VGs_SkUvAhUz1QD7EmU,2516
|
24
|
-
grasp_agents/litellm/converters.py,sha256=
|
25
|
-
grasp_agents/litellm/lite_llm.py,sha256=
|
24
|
+
grasp_agents/litellm/converters.py,sha256=XjePHii578sXP26Fyhnv0XfwJ3cNTp5PraggTsvcBXo,4778
|
25
|
+
grasp_agents/litellm/lite_llm.py,sha256=2XsPB-BbM-Y2xNxsKmO0JOJOD_UYj6ndGMjfLkGPAK4,8279
|
26
26
|
grasp_agents/litellm/message_converters.py,sha256=PsGLIJEcAeEoluHIh-utEufJ_9WeMYzXkwnR-8jyULQ,2037
|
27
27
|
grasp_agents/openai/__init__.py,sha256=xaRnblUskiLvypIhMe4NRp9dxCG-gNR7dPiugUbPbhE,4717
|
28
28
|
grasp_agents/openai/completion_chunk_converters.py,sha256=3MnMskdlp7ycsggc1ok1XpCHaP4Us2rLYaxImPLw1eI,2573
|
29
29
|
grasp_agents/openai/completion_converters.py,sha256=UlDeQSl0AEFUS-QI5e8rrjfmXZojSYksJGnrXA7DmIk,2528
|
30
30
|
grasp_agents/openai/content_converters.py,sha256=sMsZhoatuL_8t0IdVaGWIVZLB4nyi1ajD61GewQmeY4,2503
|
31
|
-
grasp_agents/openai/converters.py,sha256=
|
31
|
+
grasp_agents/openai/converters.py,sha256=RKOfMbIJmfFQ7ot0RGR6wrdMbR6_L7PB0UZwxwgM88g,4691
|
32
32
|
grasp_agents/openai/message_converters.py,sha256=fhSN81uK51EGbLyM2-f0MvPX_UBrMy7SF3JQPo-dkXg,4686
|
33
|
-
grasp_agents/openai/openai_llm.py,sha256=
|
34
|
-
grasp_agents/openai/tool_converters.py,sha256=
|
35
|
-
grasp_agents/processors/base_processor.py,sha256=
|
36
|
-
grasp_agents/processors/parallel_processor.py,sha256=
|
37
|
-
grasp_agents/processors/processor.py,sha256=
|
33
|
+
grasp_agents/openai/openai_llm.py,sha256=QjxrZ4fM_FX3ncBjehUjWPCCiI62u_W2XDi7nth1WrY,9737
|
34
|
+
grasp_agents/openai/tool_converters.py,sha256=rNH5t2Wir9nuy8Ei0jaxNuzDaXGqTLmLz3VyrnJhyn0,1196
|
35
|
+
grasp_agents/processors/base_processor.py,sha256=BQ2k8dJY0jTMmidXZdK7JLO2YIQkmkp5boF1fT1o6uQ,10838
|
36
|
+
grasp_agents/processors/parallel_processor.py,sha256=BOXRlPaZ-hooz0hHctqiW_5ldR-yDPYjFxuP7fAbZCI,7911
|
37
|
+
grasp_agents/processors/processor.py,sha256=35MtYKrKtCZZMhV-U1DXBXtCNbCvZGaiiXo_5a3tI6s,5249
|
38
38
|
grasp_agents/rate_limiting/__init__.py,sha256=KRgtF_E7R3YfA2cpYcFcZ7wycV0pWVJ0xRQC7YhiIEQ,158
|
39
39
|
grasp_agents/rate_limiting/rate_limiter_chunked.py,sha256=BPgkUXvhmZhTpZs2T6uujNFuxH_kYHiISuf6_-eNhUc,5544
|
40
40
|
grasp_agents/rate_limiting/types.py,sha256=PbnNhEAcYedQdIpPJWud8HUVcxa_xZS2RDZu4c5jr40,1003
|
@@ -43,16 +43,16 @@ grasp_agents/typing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSu
|
|
43
43
|
grasp_agents/typing/completion.py,sha256=PHJ01m7WI2KYQL8w7W2ti6hMsKEZnzYGaxbNcBCc_IE,2782
|
44
44
|
grasp_agents/typing/completion_chunk.py,sha256=eiOcpyMrH4Ws2XnY3_jj2_g396MqA3zV3lHygHfXt4o,17883
|
45
45
|
grasp_agents/typing/content.py,sha256=XFmLpNWkGhkw5JujO6UsYwhzTHkU67PfhzaXH2waLcQ,3659
|
46
|
-
grasp_agents/typing/converters.py,sha256=
|
46
|
+
grasp_agents/typing/converters.py,sha256=VrsqjuC_1IMj9rTOAMPBJ1N0hHY3Z9fx0zySo4Z-xLQ,3020
|
47
47
|
grasp_agents/typing/events.py,sha256=vFq6qRGofY8NuxOG9ZIN2_CnhAqsAodYLD4b4KtAq2U,12620
|
48
48
|
grasp_agents/typing/io.py,sha256=MGEoUjAwKH1AHYglFkKNpHiielw-NFf13Epg3B4Q7Iw,139
|
49
49
|
grasp_agents/typing/message.py,sha256=o7bN84AgrC5Fm3Wx20gqL9ArAMcEtYvnHnXbb04ngCs,3224
|
50
|
-
grasp_agents/typing/tool.py,sha256=
|
50
|
+
grasp_agents/typing/tool.py,sha256=qwC5baRratcyJWLMQ923IMGHH1hmj9eUtYLnNBcbwUU,2033
|
51
51
|
grasp_agents/workflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
52
52
|
grasp_agents/workflow/looped_workflow.py,sha256=WHp9O3Za2sBVfY_BLOdvPvtY20XsjZQaWSO2-oAFvOY,6806
|
53
53
|
grasp_agents/workflow/sequential_workflow.py,sha256=e3BIWzy_2novmEWNwIteyMbrzvl1-evHrTBE3r3SpU8,3648
|
54
|
-
grasp_agents/workflow/workflow_processor.py,sha256=
|
55
|
-
grasp_agents-0.5.
|
56
|
-
grasp_agents-0.5.
|
57
|
-
grasp_agents-0.5.
|
58
|
-
grasp_agents-0.5.
|
54
|
+
grasp_agents/workflow/workflow_processor.py,sha256=DwHz70UOTp9dkbtzH9KE5LkGcT1RdHV7Hdiby0Bu9tw,3535
|
55
|
+
grasp_agents-0.5.11.dist-info/METADATA,sha256=BkVyEN63RzGsCIJCnm5S38EI2ua9NcbPmr3lRCmWPGs,7021
|
56
|
+
grasp_agents-0.5.11.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
57
|
+
grasp_agents-0.5.11.dist-info/licenses/LICENSE.md,sha256=-nNNdWqGB8gJ2O-peFQ2Irshv5tW5pHKyTcYkwvH7CE,1201
|
58
|
+
grasp_agents-0.5.11.dist-info/RECORD,,
|
File without changes
|
File without changes
|