grasp_agents 0.2.10__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- grasp_agents/__init__.py +15 -14
- grasp_agents/cloud_llm.py +118 -131
- grasp_agents/comm_processor.py +201 -0
- grasp_agents/generics_utils.py +15 -7
- grasp_agents/llm.py +60 -31
- grasp_agents/llm_agent.py +229 -278
- grasp_agents/llm_agent_memory.py +58 -0
- grasp_agents/llm_policy_executor.py +482 -0
- grasp_agents/memory.py +20 -134
- grasp_agents/message_history.py +140 -0
- grasp_agents/openai/__init__.py +54 -36
- grasp_agents/openai/completion_chunk_converters.py +78 -0
- grasp_agents/openai/completion_converters.py +53 -30
- grasp_agents/openai/content_converters.py +13 -14
- grasp_agents/openai/converters.py +44 -68
- grasp_agents/openai/message_converters.py +58 -72
- grasp_agents/openai/openai_llm.py +101 -42
- grasp_agents/openai/tool_converters.py +24 -19
- grasp_agents/packet.py +24 -0
- grasp_agents/packet_pool.py +91 -0
- grasp_agents/printer.py +29 -15
- grasp_agents/processor.py +194 -0
- grasp_agents/prompt_builder.py +173 -176
- grasp_agents/run_context.py +21 -41
- grasp_agents/typing/completion.py +58 -12
- grasp_agents/typing/completion_chunk.py +173 -0
- grasp_agents/typing/converters.py +8 -12
- grasp_agents/typing/events.py +86 -0
- grasp_agents/typing/io.py +4 -13
- grasp_agents/typing/message.py +12 -50
- grasp_agents/typing/tool.py +52 -26
- grasp_agents/usage_tracker.py +6 -6
- grasp_agents/utils.py +3 -3
- grasp_agents/workflow/looped_workflow.py +132 -0
- grasp_agents/workflow/parallel_processor.py +95 -0
- grasp_agents/workflow/sequential_workflow.py +66 -0
- grasp_agents/workflow/workflow_processor.py +78 -0
- {grasp_agents-0.2.10.dist-info → grasp_agents-0.3.1.dist-info}/METADATA +41 -50
- grasp_agents-0.3.1.dist-info/RECORD +51 -0
- grasp_agents/agent_message.py +0 -27
- grasp_agents/agent_message_pool.py +0 -92
- grasp_agents/base_agent.py +0 -51
- grasp_agents/comm_agent.py +0 -217
- grasp_agents/llm_agent_state.py +0 -79
- grasp_agents/tool_orchestrator.py +0 -203
- grasp_agents/workflow/looped_agent.py +0 -120
- grasp_agents/workflow/sequential_agent.py +0 -63
- grasp_agents/workflow/workflow_agent.py +0 -73
- grasp_agents-0.2.10.dist-info/RECORD +0 -46
- {grasp_agents-0.2.10.dist-info → grasp_agents-0.3.1.dist-info}/WHEEL +0 -0
- {grasp_agents-0.2.10.dist-info → grasp_agents-0.3.1.dist-info}/licenses/LICENSE.md +0 -0
@@ -0,0 +1,132 @@
|
|
1
|
+
from collections.abc import Sequence
|
2
|
+
from itertools import pairwise
|
3
|
+
from logging import getLogger
|
4
|
+
from typing import Any, ClassVar, Generic, Protocol, TypeVar, cast, final
|
5
|
+
|
6
|
+
from ..packet_pool import Packet, PacketPool
|
7
|
+
from ..processor import Processor
|
8
|
+
from ..run_context import CtxT, RunContext
|
9
|
+
from ..typing.io import InT_contra, OutT_co, ProcName
|
10
|
+
from .workflow_processor import WorkflowProcessor
|
11
|
+
|
12
|
+
logger = getLogger(__name__)
|
13
|
+
|
14
|
+
_OutT_contra = TypeVar("_OutT_contra", contravariant=True)
|
15
|
+
|
16
|
+
|
17
|
+
class ExitWorkflowLoopHandler(Protocol[_OutT_contra, CtxT]):
|
18
|
+
def __call__(
|
19
|
+
self,
|
20
|
+
out_packet: Packet[_OutT_contra],
|
21
|
+
ctx: RunContext[CtxT] | None,
|
22
|
+
**kwargs: Any,
|
23
|
+
) -> bool: ...
|
24
|
+
|
25
|
+
|
26
|
+
class LoopedWorkflow(
|
27
|
+
WorkflowProcessor[InT_contra, OutT_co, CtxT], Generic[InT_contra, OutT_co, CtxT]
|
28
|
+
):
|
29
|
+
_generic_arg_to_instance_attr_map: ClassVar[dict[int, str]] = {
|
30
|
+
0: "_in_type",
|
31
|
+
1: "_out_type",
|
32
|
+
}
|
33
|
+
|
34
|
+
def __init__(
|
35
|
+
self,
|
36
|
+
name: ProcName,
|
37
|
+
subprocs: Sequence[Processor[Any, Any, Any, CtxT]],
|
38
|
+
exit_proc: Processor[Any, OutT_co, Any, CtxT],
|
39
|
+
packet_pool: PacketPool[CtxT] | None = None,
|
40
|
+
recipients: list[ProcName] | None = None,
|
41
|
+
max_iterations: int = 10,
|
42
|
+
) -> None:
|
43
|
+
super().__init__(
|
44
|
+
subprocs=subprocs,
|
45
|
+
name=name,
|
46
|
+
start_proc=subprocs[0],
|
47
|
+
end_proc=exit_proc,
|
48
|
+
packet_pool=packet_pool,
|
49
|
+
recipients=recipients,
|
50
|
+
)
|
51
|
+
|
52
|
+
for prev_proc, proc in pairwise(subprocs):
|
53
|
+
if prev_proc.out_type != proc.in_type:
|
54
|
+
raise ValueError(
|
55
|
+
f"Output type {prev_proc.out_type} of subprocessor "
|
56
|
+
f"{prev_proc.name} does not match input type {proc.in_type} of "
|
57
|
+
f"subprocessor {proc.name}"
|
58
|
+
)
|
59
|
+
if subprocs[-1].out_type != subprocs[0].in_type:
|
60
|
+
raise ValueError(
|
61
|
+
"Looped workflow's last subprocessor output type "
|
62
|
+
f"{subprocs[-1].out_type} does not match first subprocessor input "
|
63
|
+
f"type {subprocs[0].in_type}"
|
64
|
+
)
|
65
|
+
|
66
|
+
self._max_iterations = max_iterations
|
67
|
+
|
68
|
+
self._exit_workflow_loop_impl: ExitWorkflowLoopHandler[OutT_co, CtxT] | None = (
|
69
|
+
None
|
70
|
+
)
|
71
|
+
|
72
|
+
@property
|
73
|
+
def max_iterations(self) -> int:
|
74
|
+
return self._max_iterations
|
75
|
+
|
76
|
+
def exit_workflow_loop(
|
77
|
+
self, func: ExitWorkflowLoopHandler[OutT_co, CtxT]
|
78
|
+
) -> ExitWorkflowLoopHandler[OutT_co, CtxT]:
|
79
|
+
self._exit_workflow_loop_impl = func
|
80
|
+
|
81
|
+
return func
|
82
|
+
|
83
|
+
def _exit_workflow_loop_fn(
|
84
|
+
self,
|
85
|
+
out_packet: Packet[OutT_co],
|
86
|
+
*,
|
87
|
+
ctx: RunContext[CtxT] | None = None,
|
88
|
+
**kwargs: Any,
|
89
|
+
) -> bool:
|
90
|
+
if self._exit_workflow_loop_impl:
|
91
|
+
return self._exit_workflow_loop_impl(out_packet, ctx=ctx, **kwargs)
|
92
|
+
|
93
|
+
return False
|
94
|
+
|
95
|
+
@final
|
96
|
+
async def run(
|
97
|
+
self,
|
98
|
+
chat_inputs: Any | None = None,
|
99
|
+
*,
|
100
|
+
in_packet: Packet[InT_contra] | None = None,
|
101
|
+
in_args: InT_contra | Sequence[InT_contra] | None = None,
|
102
|
+
ctx: RunContext[CtxT] | None = None,
|
103
|
+
forgetful: bool = False,
|
104
|
+
) -> Packet[OutT_co]:
|
105
|
+
packet = in_packet
|
106
|
+
num_iterations = 0
|
107
|
+
exit_packet: Packet[OutT_co] | None = None
|
108
|
+
|
109
|
+
while True:
|
110
|
+
for subproc in self.subprocs:
|
111
|
+
packet = await subproc.run(
|
112
|
+
chat_inputs=chat_inputs,
|
113
|
+
in_packet=packet,
|
114
|
+
in_args=in_args,
|
115
|
+
forgetful=forgetful,
|
116
|
+
ctx=ctx,
|
117
|
+
)
|
118
|
+
|
119
|
+
if subproc is self._end_proc:
|
120
|
+
num_iterations += 1
|
121
|
+
exit_packet = cast("Packet[OutT_co]", packet)
|
122
|
+
if self._exit_workflow_loop_fn(exit_packet, ctx=ctx):
|
123
|
+
return exit_packet
|
124
|
+
if num_iterations >= self._max_iterations:
|
125
|
+
logger.info(
|
126
|
+
f"Max iterations reached ({self._max_iterations}). "
|
127
|
+
"Exiting loop."
|
128
|
+
)
|
129
|
+
return exit_packet
|
130
|
+
|
131
|
+
chat_inputs = None
|
132
|
+
in_args = None
|
@@ -0,0 +1,95 @@
|
|
1
|
+
import asyncio
|
2
|
+
from collections.abc import Sequence
|
3
|
+
from copy import deepcopy
|
4
|
+
from typing import Any, Generic, cast
|
5
|
+
|
6
|
+
from ..comm_processor import CommProcessor
|
7
|
+
from ..packet import Packet
|
8
|
+
from ..packet_pool import PacketPool
|
9
|
+
from ..processor import Processor
|
10
|
+
from ..run_context import CtxT
|
11
|
+
from ..typing.io import InT_contra, OutT_co, ProcName
|
12
|
+
|
13
|
+
|
14
|
+
class ParallelCommProcessor(
|
15
|
+
CommProcessor[InT_contra, OutT_co, Any, CtxT],
|
16
|
+
Generic[InT_contra, OutT_co, CtxT],
|
17
|
+
):
|
18
|
+
def __init__(
|
19
|
+
self,
|
20
|
+
name: ProcName,
|
21
|
+
processor_type: type[Processor[InT_contra, OutT_co, Any, CtxT]],
|
22
|
+
packet_pool: PacketPool[CtxT] | None = None,
|
23
|
+
recipients: Sequence[ProcName] | None = None,
|
24
|
+
**subproc_init_kwargs: Any,
|
25
|
+
) -> None:
|
26
|
+
self._processor_type = processor_type
|
27
|
+
self._subproc_init_kwargs = deepcopy(subproc_init_kwargs)
|
28
|
+
|
29
|
+
# NOTE: If the processor is an LLMAgent, the parallel subprocessors will share
|
30
|
+
# the same LLM and tools instances. Make sure their state is managed correctly.
|
31
|
+
|
32
|
+
super().__init__(name=name, packet_pool=packet_pool, recipients=recipients)
|
33
|
+
|
34
|
+
@property
|
35
|
+
def processor_type(self) -> type[Processor[InT_contra, OutT_co, Any, CtxT]]:
|
36
|
+
return self._processor_type
|
37
|
+
|
38
|
+
async def _run_subprocessor(
|
39
|
+
self, in_args: InT_contra, name_suffix: str, **subproc_run_kwargs: Any
|
40
|
+
) -> Packet[OutT_co]:
|
41
|
+
subproc_name = f"{self.name}_{name_suffix}"
|
42
|
+
subproc = self._processor_type(name=subproc_name, **self._subproc_init_kwargs)
|
43
|
+
|
44
|
+
return await subproc.run(in_args=in_args, **subproc_run_kwargs)
|
45
|
+
|
46
|
+
def _validate_par_inputs(
|
47
|
+
self,
|
48
|
+
chat_inputs: Any | None,
|
49
|
+
in_packet: Packet[InT_contra] | None,
|
50
|
+
in_args: InT_contra | Sequence[InT_contra] | None,
|
51
|
+
) -> Sequence[InT_contra]:
|
52
|
+
if chat_inputs is not None:
|
53
|
+
raise ValueError(
|
54
|
+
"chat_inputs are not supported in ParallelCommProcessor. "
|
55
|
+
"Use in_packet or in_args."
|
56
|
+
)
|
57
|
+
if in_packet is not None:
|
58
|
+
if not in_packet.payloads:
|
59
|
+
raise ValueError(
|
60
|
+
"ParallelCommProcessor requires at least one input payload in "
|
61
|
+
"in_packet."
|
62
|
+
)
|
63
|
+
return in_packet.payloads
|
64
|
+
if in_args is not None:
|
65
|
+
if not isinstance(in_args, Sequence) or not in_args:
|
66
|
+
raise ValueError("in_args must be a non-empty sequence of input data.")
|
67
|
+
return cast("Sequence[InT_contra]", in_args)
|
68
|
+
raise ValueError(
|
69
|
+
"ParallelCommProcessor requires either in_packet or in_args to be provided."
|
70
|
+
)
|
71
|
+
|
72
|
+
async def run(
|
73
|
+
self,
|
74
|
+
chat_inputs: Any | None = None,
|
75
|
+
*,
|
76
|
+
in_packet: Packet[InT_contra] | None = None,
|
77
|
+
in_args: InT_contra | Sequence[InT_contra] | None = None,
|
78
|
+
**subproc_run_kwargs: Any,
|
79
|
+
) -> Packet[OutT_co]:
|
80
|
+
par_inputs = self._validate_par_inputs(
|
81
|
+
chat_inputs=chat_inputs, in_packet=in_packet, in_args=in_args
|
82
|
+
)
|
83
|
+
tasks = [
|
84
|
+
self._run_subprocessor(
|
85
|
+
in_args=inp, name_suffix=str(n), **subproc_run_kwargs
|
86
|
+
)
|
87
|
+
for n, inp in enumerate(par_inputs)
|
88
|
+
]
|
89
|
+
out_packets = await asyncio.gather(*tasks)
|
90
|
+
|
91
|
+
return Packet(
|
92
|
+
payloads=[out_packet.payloads[0] for out_packet in out_packets],
|
93
|
+
sender=self.name,
|
94
|
+
recipients=(self.recipients or []),
|
95
|
+
)
|
@@ -0,0 +1,66 @@
|
|
1
|
+
from collections.abc import Sequence
|
2
|
+
from itertools import pairwise
|
3
|
+
from typing import Any, ClassVar, Generic, cast, final
|
4
|
+
|
5
|
+
from ..packet_pool import Packet, PacketPool
|
6
|
+
from ..processor import Processor
|
7
|
+
from ..run_context import CtxT, RunContext
|
8
|
+
from ..typing.io import InT_contra, OutT_co, ProcName
|
9
|
+
from .workflow_processor import WorkflowProcessor
|
10
|
+
|
11
|
+
|
12
|
+
class SequentialWorkflow(
|
13
|
+
WorkflowProcessor[InT_contra, OutT_co, CtxT], Generic[InT_contra, OutT_co, CtxT]
|
14
|
+
):
|
15
|
+
_generic_arg_to_instance_attr_map: ClassVar[dict[int, str]] = {
|
16
|
+
0: "_in_type",
|
17
|
+
1: "_out_type",
|
18
|
+
}
|
19
|
+
|
20
|
+
def __init__(
|
21
|
+
self,
|
22
|
+
name: ProcName,
|
23
|
+
subprocs: Sequence[Processor[Any, Any, Any, CtxT]],
|
24
|
+
packet_pool: PacketPool[CtxT] | None = None,
|
25
|
+
recipients: list[ProcName] | None = None,
|
26
|
+
) -> None:
|
27
|
+
super().__init__(
|
28
|
+
subprocs=subprocs,
|
29
|
+
start_proc=subprocs[0],
|
30
|
+
end_proc=subprocs[-1],
|
31
|
+
name=name,
|
32
|
+
packet_pool=packet_pool,
|
33
|
+
recipients=recipients,
|
34
|
+
)
|
35
|
+
|
36
|
+
for prev_proc, proc in pairwise(subprocs):
|
37
|
+
if prev_proc.out_type != proc.in_type:
|
38
|
+
raise ValueError(
|
39
|
+
f"Output type {prev_proc.out_type} of subprocessor {prev_proc.name}"
|
40
|
+
f" does not match input type {proc.in_type} of subprocessor"
|
41
|
+
f" {proc.name}"
|
42
|
+
)
|
43
|
+
|
44
|
+
@final
|
45
|
+
async def run(
|
46
|
+
self,
|
47
|
+
chat_inputs: Any | None = None,
|
48
|
+
*,
|
49
|
+
in_packet: Packet[InT_contra] | None = None,
|
50
|
+
in_args: InT_contra | Sequence[InT_contra] | None = None,
|
51
|
+
ctx: RunContext[CtxT] | None = None,
|
52
|
+
forgetful: bool = False,
|
53
|
+
) -> Packet[OutT_co]:
|
54
|
+
packet = in_packet
|
55
|
+
for subproc in self.subprocs:
|
56
|
+
packet = await subproc.run(
|
57
|
+
chat_inputs=chat_inputs,
|
58
|
+
in_packet=packet,
|
59
|
+
in_args=in_args,
|
60
|
+
forgetful=forgetful,
|
61
|
+
ctx=ctx,
|
62
|
+
)
|
63
|
+
chat_inputs = None
|
64
|
+
in_args = None
|
65
|
+
|
66
|
+
return cast("Packet[OutT_co]", packet)
|
@@ -0,0 +1,78 @@
|
|
1
|
+
from abc import ABC, abstractmethod
|
2
|
+
from collections.abc import Sequence
|
3
|
+
from typing import Any, ClassVar, Generic
|
4
|
+
|
5
|
+
from ..comm_processor import CommProcessor
|
6
|
+
from ..packet import Packet
|
7
|
+
from ..packet_pool import PacketPool
|
8
|
+
from ..processor import Processor
|
9
|
+
from ..run_context import CtxT, RunContext
|
10
|
+
from ..typing.io import InT_contra, OutT_co, ProcName
|
11
|
+
|
12
|
+
|
13
|
+
class WorkflowProcessor(
|
14
|
+
CommProcessor[InT_contra, OutT_co, Any, CtxT],
|
15
|
+
ABC,
|
16
|
+
Generic[InT_contra, OutT_co, CtxT],
|
17
|
+
):
|
18
|
+
_generic_arg_to_instance_attr_map: ClassVar[dict[int, str]] = {
|
19
|
+
0: "_in_type",
|
20
|
+
1: "_out_type",
|
21
|
+
}
|
22
|
+
|
23
|
+
def __init__(
|
24
|
+
self,
|
25
|
+
name: ProcName,
|
26
|
+
subprocs: Sequence[Processor[Any, Any, Any, CtxT]],
|
27
|
+
start_proc: Processor[InT_contra, Any, Any, CtxT],
|
28
|
+
end_proc: Processor[Any, OutT_co, Any, CtxT],
|
29
|
+
packet_pool: PacketPool[CtxT] | None = None,
|
30
|
+
recipients: list[ProcName] | None = None,
|
31
|
+
) -> None:
|
32
|
+
super().__init__(name=name, packet_pool=packet_pool, recipients=recipients)
|
33
|
+
|
34
|
+
if len(subprocs) < 2:
|
35
|
+
raise ValueError("At least two subprocessors are required")
|
36
|
+
if start_proc not in subprocs:
|
37
|
+
raise ValueError("Start subprocessor must be in the subprocessors list")
|
38
|
+
if end_proc not in subprocs:
|
39
|
+
raise ValueError("End subprocessor must be in the subprocessors list")
|
40
|
+
|
41
|
+
if start_proc.in_type != self.in_type:
|
42
|
+
raise ValueError(
|
43
|
+
f"Start subprocessor's input type {start_proc.in_type} does not "
|
44
|
+
f"match workflow's input type {self._in_type}"
|
45
|
+
)
|
46
|
+
if end_proc.out_type != self.out_type:
|
47
|
+
raise ValueError(
|
48
|
+
f"End subprocessor's output type {end_proc.out_type} does not "
|
49
|
+
f"match workflow's output type {self._out_type}"
|
50
|
+
)
|
51
|
+
|
52
|
+
self._subprocs = subprocs
|
53
|
+
self._start_proc = start_proc
|
54
|
+
self._end_proc = end_proc
|
55
|
+
|
56
|
+
@property
|
57
|
+
def subprocs(self) -> Sequence[Processor[Any, Any, Any, CtxT]]:
|
58
|
+
return self._subprocs
|
59
|
+
|
60
|
+
@property
|
61
|
+
def start_proc(self) -> Processor[InT_contra, Any, Any, CtxT]:
|
62
|
+
return self._start_proc
|
63
|
+
|
64
|
+
@property
|
65
|
+
def end_proc(self) -> Processor[Any, OutT_co, Any, CtxT]:
|
66
|
+
return self._end_proc
|
67
|
+
|
68
|
+
@abstractmethod
|
69
|
+
async def run(
|
70
|
+
self,
|
71
|
+
chat_inputs: Any | None = None,
|
72
|
+
*,
|
73
|
+
in_packet: Packet[InT_contra] | None = None,
|
74
|
+
in_args: InT_contra | Sequence[InT_contra] | None = None,
|
75
|
+
ctx: RunContext[CtxT] | None = None,
|
76
|
+
forgetful: bool = False,
|
77
|
+
) -> Packet[OutT_co]:
|
78
|
+
pass
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: grasp_agents
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.3.1
|
4
4
|
Summary: Grasp Agents Library
|
5
5
|
License-File: LICENSE.md
|
6
6
|
Requires-Python: <4,>=3.11.4
|
@@ -38,7 +38,7 @@ Description-Content-Type: text/markdown
|
|
38
38
|
|
39
39
|
- Clean formulation of agents as generic entities over:
|
40
40
|
- I/O schemas
|
41
|
-
-
|
41
|
+
- Memory
|
42
42
|
- Shared context
|
43
43
|
- Transparent implementation of common agentic patterns:
|
44
44
|
- Single-agent loops with an optional "ReAct mode" to enforce reasoning between the tool calls
|
@@ -50,16 +50,15 @@ Description-Content-Type: text/markdown
|
|
50
50
|
|
51
51
|
## Project Structure
|
52
52
|
|
53
|
-
- `
|
54
|
-
- `
|
55
|
-
- `
|
56
|
-
- `tool_orchestrator.py`: Orchestration of tools used by agents.
|
53
|
+
- `processor.py`, `comm_processor.py`, `llm_agent.py`: Core processor and agent class implementations.
|
54
|
+
- `packet.py`, `packet_pool.py`: Communication management.
|
55
|
+
- `llm_policy_executor.py`: LLM actions and tool call loops.
|
57
56
|
- `prompt_builder.py`: Tools for constructing prompts.
|
58
|
-
- `workflow/`: Modules for defining and managing agent workflows.
|
59
|
-
- `
|
57
|
+
- `workflow/`: Modules for defining and managing static agent workflows.
|
58
|
+
- `llm.py`, `cloud_llm.py`: LLM integration and base LLM functionalities.
|
60
59
|
- `openai/`: Modules specific to OpenAI API integration.
|
61
|
-
- `memory.py`: Memory management
|
62
|
-
- `run_context.py`:
|
60
|
+
- `memory.py`, `llm_agent_memory.py`: Memory management.
|
61
|
+
- `run_context.py`: Shared context management for agent runs.
|
63
62
|
- `usage_tracker.py`: Tracking of API usage and costs.
|
64
63
|
- `costs_dict.yaml`: Dictionary for cost tracking (update if needed).
|
65
64
|
- `rate_limiting/`: Basic rate limiting tools.
|
@@ -111,7 +110,6 @@ Create a script, e.g., `problem_recommender.py`:
|
|
111
110
|
|
112
111
|
```python
|
113
112
|
import asyncio
|
114
|
-
import re
|
115
113
|
from pathlib import Path
|
116
114
|
from typing import Any
|
117
115
|
|
@@ -119,11 +117,8 @@ from dotenv import load_dotenv
|
|
119
117
|
from pydantic import BaseModel, Field
|
120
118
|
|
121
119
|
from grasp_agents.grasp_logging import setup_logging
|
122
|
-
from grasp_agents.
|
123
|
-
from grasp_agents
|
124
|
-
from grasp_agents.run_context import RunContextWrapper
|
125
|
-
from grasp_agents.typing.message import Conversation
|
126
|
-
from grasp_agents.typing.tool import BaseTool
|
120
|
+
from grasp_agents.openai import OpenAILLM, OpenAILLMSettings
|
121
|
+
from grasp_agents import LLMAgent, BaseTool, RunContext
|
127
122
|
|
128
123
|
load_dotenv()
|
129
124
|
|
@@ -135,70 +130,66 @@ setup_logging(
|
|
135
130
|
)
|
136
131
|
|
137
132
|
sys_prompt_react = """
|
138
|
-
Your task is to suggest an exciting stats problem to
|
139
|
-
|
133
|
+
Your task is to suggest an exciting stats problem to the student.
|
134
|
+
You should first ask the student about their education, interests, and preferences, then suggest a problem tailored specifically to them.
|
140
135
|
|
141
136
|
# Instructions
|
137
|
+
* Use the provided tool to ask questions.
|
142
138
|
* Ask questions one by one.
|
143
139
|
* Provide your thinking before asking a question and after receiving a reply.
|
144
|
-
*
|
140
|
+
* Do not include your exact question as part of your thinking.
|
141
|
+
* The problem must have all the necessary data.
|
145
142
|
"""
|
146
143
|
|
147
|
-
|
144
|
+
# Tool input must be a Pydantic model to infer the JSON schema used by the LLM APIs
|
148
145
|
class TeacherQuestion(BaseModel):
|
149
|
-
question: str
|
146
|
+
question: str
|
150
147
|
|
151
148
|
|
152
149
|
StudentReply = str
|
153
150
|
|
154
151
|
|
152
|
+
ask_student_tool_description = """
|
153
|
+
"Ask the student a question and get their reply."
|
154
|
+
|
155
|
+
Args:
|
156
|
+
question: str
|
157
|
+
The question to ask the student.
|
158
|
+
Returns:
|
159
|
+
reply: str
|
160
|
+
The student's reply to the question.
|
161
|
+
"""
|
162
|
+
|
163
|
+
|
155
164
|
class AskStudentTool(BaseTool[TeacherQuestion, StudentReply, Any]):
|
156
|
-
name: str = "
|
157
|
-
description: str =
|
165
|
+
name: str = "ask_student"
|
166
|
+
description: str = ask_student_tool_description
|
158
167
|
|
159
168
|
async def run(
|
160
|
-
self, inp: TeacherQuestion, ctx:
|
169
|
+
self, inp: TeacherQuestion, ctx: RunContext[Any] | None = None
|
161
170
|
) -> StudentReply:
|
162
171
|
return input(inp.question)
|
163
172
|
|
164
173
|
|
165
|
-
Problem
|
174
|
+
class Problem(BaseModel):
|
175
|
+
problem: str
|
166
176
|
|
167
177
|
|
168
|
-
teacher = LLMAgent[
|
169
|
-
|
178
|
+
teacher = LLMAgent[None, Problem, None](
|
179
|
+
name="teacher",
|
170
180
|
llm=OpenAILLM(
|
171
181
|
model_name="openai:gpt-4.1",
|
172
|
-
llm_settings=OpenAILLMSettings(temperature=0.
|
182
|
+
llm_settings=OpenAILLMSettings(temperature=0.5),
|
173
183
|
),
|
174
184
|
tools=[AskStudentTool()],
|
175
|
-
max_turns=20,
|
176
185
|
react_mode=True,
|
186
|
+
final_answer_as_tool_call=True,
|
177
187
|
sys_prompt=sys_prompt_react,
|
178
|
-
set_state_strategy="reset",
|
179
188
|
)
|
180
189
|
|
181
|
-
|
182
|
-
@teacher.exit_tool_call_loop_handler
|
183
|
-
def exit_tool_call_loop(
|
184
|
-
conversation: Conversation, ctx: RunContextWrapper[Any] | None, **kwargs: Any
|
185
|
-
) -> bool:
|
186
|
-
return r"<PROBLEM>" in str(conversation[-1].content)
|
187
|
-
|
188
|
-
|
189
|
-
@teacher.parse_output_handler
|
190
|
-
def parse_output(
|
191
|
-
conversation: Conversation, ctx: RunContextWrapper[Any] | None, **kwargs: Any
|
192
|
-
) -> Problem:
|
193
|
-
message = str(conversation[-1].content)
|
194
|
-
matches = re.findall(r"<PROBLEM>(.*?)</PROBLEM>", message, re.DOTALL)
|
195
|
-
|
196
|
-
return matches[0]
|
197
|
-
|
198
|
-
|
199
190
|
async def main():
|
200
|
-
ctx =
|
201
|
-
out = await teacher.run(ctx=ctx)
|
191
|
+
ctx = RunContext[None](print_messages=True)
|
192
|
+
out = await teacher.run("start", ctx=ctx)
|
202
193
|
print(out.payloads[0])
|
203
194
|
print(ctx.usage_tracker.total_usage)
|
204
195
|
|
@@ -0,0 +1,51 @@
|
|
1
|
+
grasp_agents/__init__.py,sha256=CIsyUasb9HBC3M4olg6ATAwKXtVNmmtpyGJrt7hpZW4,947
|
2
|
+
grasp_agents/cloud_llm.py,sha256=Psta1FH0YpBUxLnQJ55KUS5o7-eMR-uxrAP_AEUbupk,13427
|
3
|
+
grasp_agents/comm_processor.py,sha256=YzvvHgrSrgn1oXEpkGXNrAtogFyq6gpS0UrVMBO7LBY,6722
|
4
|
+
grasp_agents/costs_dict.yaml,sha256=2MFNWtkv5W5WSCcv1Cj13B1iQLVv5Ot9pS_KW2Gu2DA,2510
|
5
|
+
grasp_agents/generics_utils.py,sha256=5Pw3I9dlnKC2VGqYKC4ZZUO3Z_vTNT-NPFovNfPkl6I,6542
|
6
|
+
grasp_agents/grasp_logging.py,sha256=H1GYhXdQvVkmauFDZ-KDwvVmPQHZUUm9sRqX_ObK2xI,1111
|
7
|
+
grasp_agents/http_client.py,sha256=KZva2MjJjuI5ohUeU8RdTAImUnQYaqBrV2jDH8smbJw,738
|
8
|
+
grasp_agents/llm.py,sha256=5Yn4jkBPSbGVMf1sEuYXX6YrJlYZiH7oqKLY0yQIn-U,5192
|
9
|
+
grasp_agents/llm_agent.py,sha256=Ez6_SQ3yq5X0hs7-zjqgYB0RvxoIJsaf4zzS8awrDZI,14851
|
10
|
+
grasp_agents/llm_agent_memory.py,sha256=kD_UIF8xVgbSgW6xN87TzkdQcbTWLB-C5ZQu1_2HLx8,1770
|
11
|
+
grasp_agents/llm_policy_executor.py,sha256=ODyxTgqtbBnTRbCanMKjcQ7ET1fYy28_f2Jh_xXxY0Y,17876
|
12
|
+
grasp_agents/memory.py,sha256=gPkVIIF6dI_xXzarIAw9kSEnSJcfW_teUsWA2JAih94,671
|
13
|
+
grasp_agents/message_history.py,sha256=-ZNy3C1z0yQeahjqR0oIoWDMySJ7vPS19jdutibW7OE,5408
|
14
|
+
grasp_agents/packet.py,sha256=PZ1EpclniAoLk7z4ieZbWzgYH3JSRgnlTe_WfbJYG_4,707
|
15
|
+
grasp_agents/packet_pool.py,sha256=9umHbi5FwuUYYhhodSR-Z-fRR6OYiZyYEzq5d4nZFK4,3036
|
16
|
+
grasp_agents/printer.py,sha256=eVpSZMVk4ZLkV78Sgfg1euzkaS3XBCb30yJcwLMqI0w,5464
|
17
|
+
grasp_agents/processor.py,sha256=bfkDJn_zZHLbCQzKj8DcPKpBj9leV0pvmqYuoGn-U9g,6744
|
18
|
+
grasp_agents/prompt_builder.py,sha256=eW_TOVarvh5niZcNg6EpiUrR_xv_xKZ0QT04RnzCLxc,8454
|
19
|
+
grasp_agents/run_context.py,sha256=0y1JDbz3hJbGiqd3EjUYagkMbEK8YlCzdErp3R2MCr0,1647
|
20
|
+
grasp_agents/usage_tracker.py,sha256=SPwv6RpdoHRuMIKE2hCAWAvDbtR3uXuhr2jpHQuKWhI,3438
|
21
|
+
grasp_agents/utils.py,sha256=uOUt4LDnErp3WQ2aQesH6mkMzon1-NIsft-o6aVznJE,4522
|
22
|
+
grasp_agents/openai/__init__.py,sha256=wpTeew6EjhM6esHCKrEKUpwq0kygMN2QQDxYtmbRG8Y,4201
|
23
|
+
grasp_agents/openai/completion_chunk_converters.py,sha256=i-1SvIWhRKtL0K8p5pb3jjACSnyHuJHTCoZovEERpxs,2628
|
24
|
+
grasp_agents/openai/completion_converters.py,sha256=vzPEkUOX4l2hobKxZjEk_dyWfzeYesO0DlvWvNVb-Sg,2656
|
25
|
+
grasp_agents/openai/content_converters.py,sha256=r1D5uci5x7sbDyl0XN27y-l_jVigCauJruvSdZSnZcc,2510
|
26
|
+
grasp_agents/openai/converters.py,sha256=ncscVyPnPMMbyxAfFX3U73lnr_BZU-I89HA5Ld8BuxI,4691
|
27
|
+
grasp_agents/openai/message_converters.py,sha256=_fG4vI42rBzoajuC5iYgnUBalg8cQ1ckSt8xFBOuWVY,4111
|
28
|
+
grasp_agents/openai/openai_llm.py,sha256=sd1nE5eLUMA8NBqUkSwo_BTfyEBmp9j7Gpuf_s0abw4,7961
|
29
|
+
grasp_agents/openai/tool_converters.py,sha256=d_7edJbnUhfSs2-F4J20D71UjVJWIslsXwMyac7-v2Q,1246
|
30
|
+
grasp_agents/rate_limiting/__init__.py,sha256=KRgtF_E7R3YfA2cpYcFcZ7wycV0pWVJ0xRQC7YhiIEQ,158
|
31
|
+
grasp_agents/rate_limiting/rate_limiter_chunked.py,sha256=BPgkUXvhmZhTpZs2T6uujNFuxH_kYHiISuf6_-eNhUc,5544
|
32
|
+
grasp_agents/rate_limiting/types.py,sha256=PbnNhEAcYedQdIpPJWud8HUVcxa_xZS2RDZu4c5jr40,1003
|
33
|
+
grasp_agents/rate_limiting/utils.py,sha256=oEDWDNHYMUdxOOG49PlAJochkZq8nnVBCo6JxPc1iSo,2007
|
34
|
+
grasp_agents/typing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
35
|
+
grasp_agents/typing/completion.py,sha256=KrvdFZqOcw5GePdMhDoFuwCkMJ86nqRdz9mT9s2lFv4,2445
|
36
|
+
grasp_agents/typing/completion_chunk.py,sha256=b1d2a_8h21Uoif_FCS05XDMKap2nZWYV9G4goLKkoL0,5866
|
37
|
+
grasp_agents/typing/content.py,sha256=VdnHdW8PHDCtX_ffvcwQz-7ypPUQNSGqHa3txFE_72Y,3676
|
38
|
+
grasp_agents/typing/converters.py,sha256=kHlocHQS8QnduZOzNPbj3aRD8JpvJd53oudYqWdOxKE,2978
|
39
|
+
grasp_agents/typing/events.py,sha256=QbrvXnDmXFr9_cSsdqL9f35eQLOfZ2O0h3a6yCRtKwY,2625
|
40
|
+
grasp_agents/typing/io.py,sha256=_9G_pfEc41mHCLZ5lJ4N4qn7I-QDSOrdK8EFnQrSP18,315
|
41
|
+
grasp_agents/typing/message.py,sha256=jTdN6-wVftfzplRjAz9zEWCpx_wPOBgcaMl0fcnsioU,2415
|
42
|
+
grasp_agents/typing/tool.py,sha256=AfC6dsFiNAhM80yI9eh1Qm5U5WzJXsTHogXK-Zn4_cE,2495
|
43
|
+
grasp_agents/workflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
44
|
+
grasp_agents/workflow/looped_workflow.py,sha256=QqXclXYxsW6C8Rxkf3dRaMHi-DfCvCbjvFZr4nBl1u0,4299
|
45
|
+
grasp_agents/workflow/parallel_processor.py,sha256=Xyzs2UR_mRe2GFgzzadHOhqgMu3rFjd3GUjvmZimt_k,3505
|
46
|
+
grasp_agents/workflow/sequential_workflow.py,sha256=Pl7jl9ZVDu-rC5UMfympEaQN8iG3kZurVF5eIPG62XA,2130
|
47
|
+
grasp_agents/workflow/workflow_processor.py,sha256=2-iaDIlgNXgj-ClGbiE3fYfSv-N_qRC49Gf_dF6M_40,2640
|
48
|
+
grasp_agents-0.3.1.dist-info/METADATA,sha256=Oa-Dmx94NHXOLj03LCM6kBe2EtdVcR70IaSoGKTWemg,6806
|
49
|
+
grasp_agents-0.3.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
50
|
+
grasp_agents-0.3.1.dist-info/licenses/LICENSE.md,sha256=-nNNdWqGB8gJ2O-peFQ2Irshv5tW5pHKyTcYkwvH7CE,1201
|
51
|
+
grasp_agents-0.3.1.dist-info/RECORD,,
|
grasp_agents/agent_message.py
DELETED
@@ -1,27 +0,0 @@
|
|
1
|
-
from collections.abc import Sequence
|
2
|
-
from typing import Generic, TypeVar
|
3
|
-
from uuid import uuid4
|
4
|
-
|
5
|
-
from pydantic import BaseModel, ConfigDict, Field
|
6
|
-
|
7
|
-
from .typing.io import AgentID, AgentState
|
8
|
-
|
9
|
-
_PayloadT = TypeVar("_PayloadT", covariant=True) # noqa: PLC0105
|
10
|
-
_StateT = TypeVar("_StateT", bound=AgentState, covariant=True) # noqa: PLC0105
|
11
|
-
|
12
|
-
|
13
|
-
class AgentMessage(BaseModel, Generic[_PayloadT, _StateT]):
|
14
|
-
payloads: Sequence[_PayloadT]
|
15
|
-
sender_id: AgentID
|
16
|
-
sender_state: _StateT | None = None
|
17
|
-
recipient_ids: Sequence[AgentID] = Field(default_factory=list)
|
18
|
-
|
19
|
-
message_id: str = Field(default_factory=lambda: str(uuid4())[:8])
|
20
|
-
|
21
|
-
model_config = ConfigDict(extra="forbid", frozen=True)
|
22
|
-
|
23
|
-
def __repr__(self) -> str:
|
24
|
-
return (
|
25
|
-
f"From: {self.sender_id}, To: {', '.join(self.recipient_ids)}, "
|
26
|
-
f"Payloads: {len(self.payloads)}"
|
27
|
-
)
|