grasp_agents 0.1.18__py3-none-any.whl → 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- grasp_agents/agent_message.py +2 -2
- grasp_agents/agent_message_pool.py +6 -8
- grasp_agents/base_agent.py +15 -36
- grasp_agents/cloud_llm.py +10 -6
- grasp_agents/comm_agent.py +39 -43
- grasp_agents/generics_utils.py +159 -0
- grasp_agents/llm.py +4 -0
- grasp_agents/llm_agent.py +126 -46
- grasp_agents/llm_agent_state.py +18 -12
- grasp_agents/prompt_builder.py +55 -28
- grasp_agents/rate_limiting/rate_limiter_chunked.py +49 -48
- grasp_agents/rate_limiting/types.py +19 -40
- grasp_agents/rate_limiting/utils.py +24 -27
- grasp_agents/run_context.py +2 -15
- grasp_agents/tool_orchestrator.py +34 -12
- grasp_agents/typing/content.py +2 -2
- grasp_agents/typing/converters.py +3 -1
- grasp_agents/typing/io.py +7 -11
- grasp_agents/typing/message.py +2 -2
- grasp_agents/typing/tool.py +26 -14
- grasp_agents/utils.py +90 -96
- grasp_agents/workflow/looped_agent.py +12 -9
- grasp_agents/workflow/sequential_agent.py +9 -6
- grasp_agents/workflow/workflow_agent.py +16 -11
- {grasp_agents-0.1.18.dist-info → grasp_agents-0.2.1.dist-info}/METADATA +37 -33
- grasp_agents-0.2.1.dist-info/RECORD +45 -0
- {grasp_agents-0.1.18.dist-info → grasp_agents-0.2.1.dist-info}/licenses/LICENSE.md +1 -1
- grasp_agents-0.1.18.dist-info/RECORD +0 -44
- {grasp_agents-0.1.18.dist-info → grasp_agents-0.2.1.dist-info}/WHEEL +0 -0
grasp_agents/utils.py
CHANGED
@@ -1,85 +1,82 @@
|
|
1
1
|
import ast
|
2
2
|
import asyncio
|
3
|
-
import functools
|
4
3
|
import json
|
5
4
|
import re
|
6
|
-
from collections.abc import
|
7
|
-
from copy import deepcopy
|
5
|
+
from collections.abc import Coroutine, Mapping
|
8
6
|
from datetime import datetime
|
9
7
|
from logging import getLogger
|
10
8
|
from pathlib import Path
|
11
|
-
from typing import Any, TypeVar
|
9
|
+
from typing import Any, TypeVar
|
12
10
|
|
13
|
-
from pydantic import
|
14
|
-
|
11
|
+
from pydantic import (
|
12
|
+
GetCoreSchemaHandler,
|
13
|
+
TypeAdapter,
|
14
|
+
ValidationError,
|
15
|
+
)
|
15
16
|
from pydantic_core import core_schema
|
16
17
|
from tqdm.autonotebook import tqdm
|
17
18
|
|
18
19
|
logger = getLogger(__name__)
|
19
20
|
|
21
|
+
_JSON_START_RE = re.compile(r"[{\[]")
|
20
22
|
|
21
|
-
|
22
|
-
fields_dict: dict[str, FieldInfo] = {}
|
23
|
-
for model in models:
|
24
|
-
for field_name, field_info in model.model_fields.items():
|
25
|
-
if field_name in fields_dict:
|
26
|
-
raise ValueError(
|
27
|
-
f"Field conflict detected: '{field_name}' exists in multiple models"
|
28
|
-
)
|
29
|
-
fields_dict[field_name] = field_info
|
23
|
+
T = TypeVar("T")
|
30
24
|
|
31
|
-
return create_model("MergedModel", __module__=__name__, **fields_dict) # type: ignore
|
32
|
-
|
33
|
-
|
34
|
-
def filter_fields(data: dict[str, Any], model: type[BaseModel]) -> dict[str, Any]:
|
35
|
-
return {key: data[key] for key in model.model_fields if key in data}
|
36
25
|
|
26
|
+
def extract_json_substring(text: str) -> str | None:
|
27
|
+
decoder = json.JSONDecoder()
|
28
|
+
for match in _JSON_START_RE.finditer(text):
|
29
|
+
start = match.start()
|
30
|
+
try:
|
31
|
+
_, end = decoder.raw_decode(text, idx=start)
|
32
|
+
return text[start:end]
|
33
|
+
except ValueError:
|
34
|
+
continue
|
37
35
|
|
38
|
-
|
39
|
-
return Path(file_path).read_text()
|
36
|
+
return None
|
40
37
|
|
41
38
|
|
42
|
-
def
|
43
|
-
|
44
|
-
text = text.replace("\n", "")
|
45
|
-
length = len(text)
|
46
|
-
i = 0
|
47
|
-
while i < length:
|
48
|
-
ch = text[i]
|
49
|
-
if ch in "{[":
|
50
|
-
try:
|
51
|
-
_, end = decoder.raw_decode(text[i:])
|
52
|
-
return text[i : i + end]
|
53
|
-
except ValueError:
|
54
|
-
pass
|
55
|
-
i += 1
|
56
|
-
|
57
|
-
return text
|
58
|
-
|
59
|
-
|
60
|
-
def read_json_string(
|
61
|
-
json_str: str, return_none_on_failure: bool = False
|
39
|
+
def parse_json_or_py_string(
|
40
|
+
s: str, return_none_on_failure: bool = False
|
62
41
|
) -> dict[str, Any] | list[Any] | None:
|
42
|
+
s_fmt = re.sub(r"```[a-zA-Z0-9]*\n|```", "", s).strip()
|
63
43
|
try:
|
64
|
-
|
44
|
+
return ast.literal_eval(s_fmt)
|
65
45
|
except (ValueError, SyntaxError):
|
66
46
|
try:
|
67
|
-
|
47
|
+
return json.loads(s_fmt)
|
68
48
|
except json.JSONDecodeError as exc:
|
69
49
|
if return_none_on_failure:
|
70
50
|
return None
|
71
51
|
raise ValueError(
|
72
|
-
"Invalid JSON - Both ast.literal_eval and json.loads "
|
73
|
-
f"failed to parse the following response:\n{
|
52
|
+
"Invalid JSON/Python string - Both ast.literal_eval and json.loads "
|
53
|
+
f"failed to parse the following response:\n{s}"
|
74
54
|
) from exc
|
75
55
|
|
76
|
-
return json_response
|
77
56
|
|
78
|
-
|
79
|
-
def extract_json(
|
57
|
+
def parse_json_or_py_substring(
|
80
58
|
json_str: str, return_none_on_failure: bool = False
|
81
59
|
) -> dict[str, Any] | list[Any] | None:
|
82
|
-
return
|
60
|
+
return parse_json_or_py_string(
|
61
|
+
extract_json_substring(json_str) or "", return_none_on_failure
|
62
|
+
)
|
63
|
+
|
64
|
+
|
65
|
+
def validate_obj_from_json_or_py_string(
|
66
|
+
s: str, adapter: TypeAdapter[T], from_substring: bool = False
|
67
|
+
) -> T:
|
68
|
+
try:
|
69
|
+
if from_substring:
|
70
|
+
parsed = parse_json_or_py_substring(s, return_none_on_failure=True)
|
71
|
+
else:
|
72
|
+
parsed = parse_json_or_py_string(s, return_none_on_failure=True)
|
73
|
+
if parsed is None:
|
74
|
+
parsed = s
|
75
|
+
return adapter.validate_python(parsed)
|
76
|
+
except (json.JSONDecodeError, ValidationError) as exc:
|
77
|
+
raise ValueError(
|
78
|
+
f"Invalid JSON or Python string:\n{s}\nExpected type: {adapter._type}", # type: ignore[arg-type]
|
79
|
+
) from exc
|
83
80
|
|
84
81
|
|
85
82
|
def extract_xml_list(text: str) -> list[str]:
|
@@ -92,16 +89,26 @@ def extract_xml_list(text: str) -> list[str]:
|
|
92
89
|
return chunks
|
93
90
|
|
94
91
|
|
95
|
-
def
|
96
|
-
|
92
|
+
def build_marker_json_parser_type(
|
93
|
+
marker_to_model: Mapping[str, type],
|
97
94
|
) -> type:
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
95
|
+
"""
|
96
|
+
Return a Pydantic-compatible *type* that, when given a **str**, searches for
|
97
|
+
the first marker substring and validates the JSON that follows with the
|
98
|
+
corresponding Pydantic model.
|
99
|
+
|
100
|
+
If no marker is found, the raw string is returned unchanged.
|
101
|
+
|
102
|
+
Example:
|
103
|
+
-------
|
104
|
+
>>> Todo = build_marker_json_parser_type({'```json': MyModel})
|
105
|
+
>>> Todo.validate('```json {"a": 1}')
|
106
|
+
MyModel(a=1)
|
107
|
+
|
108
|
+
"""
|
109
|
+
|
110
|
+
class MarkerParsedOutput:
|
111
|
+
"""String → (Model | str) parser generated by build_marker_json_parser_type."""
|
105
112
|
|
106
113
|
@classmethod
|
107
114
|
def __get_pydantic_core_schema__(
|
@@ -109,58 +116,49 @@ def make_conditional_parsed_output_type(
|
|
109
116
|
_source_type: Any,
|
110
117
|
_handler: GetCoreSchemaHandler,
|
111
118
|
) -> core_schema.CoreSchema:
|
112
|
-
def
|
113
|
-
if isinstance(
|
114
|
-
|
115
|
-
response_format_adapter = TypeAdapter[Any](response_format)
|
119
|
+
def _validate(value: Any) -> Any:
|
120
|
+
if not isinstance(value, str):
|
121
|
+
raise TypeError("MarkerParsedOutput expects a string")
|
116
122
|
|
117
|
-
|
123
|
+
for marker, model in marker_to_model.items():
|
124
|
+
if marker in value:
|
125
|
+
adapter = TypeAdapter[Any](model)
|
126
|
+
return validate_obj_from_json_or_py_string(
|
127
|
+
value, adapter=adapter, from_substring=True
|
128
|
+
)
|
118
129
|
|
119
|
-
return
|
130
|
+
return value
|
120
131
|
|
121
132
|
return core_schema.no_info_after_validator_function(
|
122
|
-
|
133
|
+
_validate, core_schema.any_schema()
|
123
134
|
)
|
124
135
|
|
125
136
|
@classmethod
|
126
137
|
def __get_pydantic_json_schema__(
|
127
|
-
cls,
|
138
|
+
cls,
|
139
|
+
schema: core_schema.CoreSchema,
|
140
|
+
handler: GetCoreSchemaHandler,
|
128
141
|
):
|
129
|
-
return handler(
|
142
|
+
return handler(schema)
|
130
143
|
|
131
|
-
|
144
|
+
unique_suffix = "_".join(sorted(marker_to_model))[:40]
|
145
|
+
MarkerParsedOutput.__name__ = f"MarkerParsedOutput_{unique_suffix}"
|
132
146
|
|
147
|
+
return MarkerParsedOutput
|
133
148
|
|
134
|
-
T = TypeVar("T", bound=Callable[..., Any])
|
135
149
|
|
136
|
-
|
137
|
-
|
138
|
-
@functools.wraps(method)
|
139
|
-
def wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
|
140
|
-
before = deepcopy(self.__dict__)
|
141
|
-
result = method(self, *args, **kwargs)
|
142
|
-
after = self.__dict__
|
143
|
-
if before != after:
|
144
|
-
raise RuntimeError(
|
145
|
-
f"Method '{method.__name__}' modified the instance state."
|
146
|
-
)
|
147
|
-
return result
|
148
|
-
|
149
|
-
return cast("T", wrapper)
|
150
|
+
def read_txt(file_path: str | Path, encoding: str = "utf-8") -> str:
|
151
|
+
return Path(file_path).read_text(encoding=encoding)
|
150
152
|
|
151
153
|
|
152
154
|
def read_contents_from_file(
|
153
155
|
file_path: str | Path,
|
154
156
|
binary_mode: bool = False,
|
155
157
|
) -> str | bytes:
|
156
|
-
"""Reads and returns contents of file"""
|
157
158
|
try:
|
158
159
|
if binary_mode:
|
159
|
-
|
160
|
-
|
161
|
-
else:
|
162
|
-
with open(file_path) as file:
|
163
|
-
return file.read()
|
160
|
+
return Path(file_path).read_bytes()
|
161
|
+
return Path(file_path).read_text()
|
164
162
|
except FileNotFoundError:
|
165
163
|
logger.error(f"File {file_path} not found.")
|
166
164
|
return ""
|
@@ -168,13 +166,9 @@ def read_contents_from_file(
|
|
168
166
|
|
169
167
|
def get_prompt(prompt_text: str | None, prompt_path: str | Path | None) -> str | None:
|
170
168
|
if prompt_text is None:
|
171
|
-
|
172
|
-
read_contents_from_file(prompt_path) if prompt_path is not None else None
|
173
|
-
)
|
174
|
-
else:
|
175
|
-
prompt = prompt_text
|
169
|
+
return read_contents_from_file(prompt_path) if prompt_path is not None else None # type: ignore[arg-type]
|
176
170
|
|
177
|
-
return
|
171
|
+
return prompt_text
|
178
172
|
|
179
173
|
|
180
174
|
async def asyncio_gather_with_pbar(
|
@@ -1,16 +1,16 @@
|
|
1
1
|
from collections.abc import Sequence
|
2
2
|
from logging import getLogger
|
3
|
-
from typing import Any, Generic, Protocol, TypeVar, cast, final
|
3
|
+
from typing import Any, ClassVar, Generic, Protocol, TypeVar, cast, final
|
4
4
|
|
5
5
|
from ..agent_message_pool import AgentMessage, AgentMessagePool
|
6
6
|
from ..comm_agent import CommunicatingAgent
|
7
7
|
from ..run_context import CtxT, RunContextWrapper
|
8
|
-
from ..typing.io import AgentID,
|
8
|
+
from ..typing.io import AgentID, AgentState, InT, OutT
|
9
9
|
from .workflow_agent import WorkflowAgent
|
10
10
|
|
11
11
|
logger = getLogger(__name__)
|
12
12
|
|
13
|
-
_EH_OutT = TypeVar("_EH_OutT",
|
13
|
+
_EH_OutT = TypeVar("_EH_OutT", contravariant=True) # noqa: PLC0105
|
14
14
|
|
15
15
|
|
16
16
|
class WorkflowLoopExitHandler(Protocol[_EH_OutT, CtxT]):
|
@@ -23,13 +23,16 @@ class WorkflowLoopExitHandler(Protocol[_EH_OutT, CtxT]):
|
|
23
23
|
|
24
24
|
|
25
25
|
class LoopedWorkflowAgent(WorkflowAgent[InT, OutT, CtxT], Generic[InT, OutT, CtxT]):
|
26
|
+
_generic_arg_to_instance_attr_map: ClassVar[dict[int, str]] = {
|
27
|
+
0: "_in_type",
|
28
|
+
1: "_out_type",
|
29
|
+
}
|
30
|
+
|
26
31
|
def __init__(
|
27
32
|
self,
|
28
33
|
agent_id: AgentID,
|
29
|
-
subagents: Sequence[
|
30
|
-
|
31
|
-
],
|
32
|
-
exit_agent: CommunicatingAgent[AgentPayload, OutT, AgentState, CtxT],
|
34
|
+
subagents: Sequence[CommunicatingAgent[Any, Any, AgentState, CtxT]],
|
35
|
+
exit_agent: CommunicatingAgent[Any, OutT, AgentState, CtxT],
|
33
36
|
message_pool: AgentMessagePool[CtxT] | None = None,
|
34
37
|
recipient_ids: list[AgentID] | None = None,
|
35
38
|
dynamic_routing: bool = False,
|
@@ -61,7 +64,7 @@ class LoopedWorkflowAgent(WorkflowAgent[InT, OutT, CtxT], Generic[InT, OutT, Ctx
|
|
61
64
|
|
62
65
|
return func
|
63
66
|
|
64
|
-
def
|
67
|
+
def _exit_workflow_loop(
|
65
68
|
self,
|
66
69
|
output_message: AgentMessage[OutT, AgentState],
|
67
70
|
ctx: RunContextWrapper[CtxT] | None,
|
@@ -101,7 +104,7 @@ class LoopedWorkflowAgent(WorkflowAgent[InT, OutT, CtxT], Generic[InT, OutT, Ctx
|
|
101
104
|
if subagent is self._end_agent:
|
102
105
|
num_iterations += 1
|
103
106
|
exit_message = cast("AgentMessage[OutT, AgentState]", agent_message)
|
104
|
-
if self.
|
107
|
+
if self._exit_workflow_loop(exit_message, ctx=ctx):
|
105
108
|
return exit_message
|
106
109
|
if num_iterations >= self._max_iterations:
|
107
110
|
logger.info(
|
@@ -1,20 +1,23 @@
|
|
1
1
|
from collections.abc import Sequence
|
2
|
-
from typing import Any, Generic, cast, final
|
2
|
+
from typing import Any, ClassVar, Generic, cast, final
|
3
3
|
|
4
4
|
from ..agent_message_pool import AgentMessage, AgentMessagePool
|
5
5
|
from ..comm_agent import CommunicatingAgent
|
6
6
|
from ..run_context import CtxT, RunContextWrapper
|
7
|
-
from ..typing.io import AgentID,
|
7
|
+
from ..typing.io import AgentID, AgentState, InT, OutT
|
8
8
|
from .workflow_agent import WorkflowAgent
|
9
9
|
|
10
10
|
|
11
11
|
class SequentialWorkflowAgent(WorkflowAgent[InT, OutT, CtxT], Generic[InT, OutT, CtxT]):
|
12
|
+
_generic_arg_to_instance_attr_map: ClassVar[dict[int, str]] = {
|
13
|
+
0: "_in_type",
|
14
|
+
1: "_out_type",
|
15
|
+
}
|
16
|
+
|
12
17
|
def __init__(
|
13
18
|
self,
|
14
19
|
agent_id: AgentID,
|
15
|
-
subagents: Sequence[
|
16
|
-
CommunicatingAgent[AgentPayload, AgentPayload, AgentState, CtxT]
|
17
|
-
],
|
20
|
+
subagents: Sequence[CommunicatingAgent[Any, Any, AgentState, CtxT]],
|
18
21
|
message_pool: AgentMessagePool[CtxT] | None = None,
|
19
22
|
recipient_ids: list[AgentID] | None = None,
|
20
23
|
dynamic_routing: bool = False,
|
@@ -23,7 +26,7 @@ class SequentialWorkflowAgent(WorkflowAgent[InT, OutT, CtxT], Generic[InT, OutT,
|
|
23
26
|
super().__init__(
|
24
27
|
subagents=subagents,
|
25
28
|
start_agent=subagents[0],
|
26
|
-
end_agent=subagents[-1],
|
29
|
+
end_agent=subagents[-1],
|
27
30
|
agent_id=agent_id,
|
28
31
|
message_pool=message_pool,
|
29
32
|
recipient_ids=recipient_ids,
|
@@ -1,11 +1,11 @@
|
|
1
1
|
from abc import ABC, abstractmethod
|
2
2
|
from collections.abc import Sequence
|
3
|
-
from typing import Any, Generic
|
3
|
+
from typing import Any, ClassVar, Generic
|
4
4
|
|
5
5
|
from ..agent_message_pool import AgentMessage, AgentMessagePool
|
6
6
|
from ..comm_agent import CommunicatingAgent
|
7
7
|
from ..run_context import CtxT, RunContextWrapper
|
8
|
-
from ..typing.io import AgentID,
|
8
|
+
from ..typing.io import AgentID, AgentState, InT, OutT
|
9
9
|
|
10
10
|
|
11
11
|
class WorkflowAgent(
|
@@ -13,14 +13,17 @@ class WorkflowAgent(
|
|
13
13
|
ABC,
|
14
14
|
Generic[InT, OutT, CtxT],
|
15
15
|
):
|
16
|
+
_generic_arg_to_instance_attr_map: ClassVar[dict[int, str]] = {
|
17
|
+
0: "_in_type",
|
18
|
+
1: "_out_type",
|
19
|
+
}
|
20
|
+
|
16
21
|
def __init__(
|
17
22
|
self,
|
18
23
|
agent_id: AgentID,
|
19
|
-
subagents: Sequence[
|
20
|
-
|
21
|
-
],
|
22
|
-
start_agent: CommunicatingAgent[InT, AgentPayload, AgentState, CtxT],
|
23
|
-
end_agent: CommunicatingAgent[AgentPayload, OutT, AgentState, CtxT],
|
24
|
+
subagents: Sequence[CommunicatingAgent[Any, Any, AgentState, CtxT]],
|
25
|
+
start_agent: CommunicatingAgent[InT, Any, AgentState, CtxT],
|
26
|
+
end_agent: CommunicatingAgent[Any, OutT, AgentState, CtxT],
|
24
27
|
message_pool: AgentMessagePool[CtxT] | None = None,
|
25
28
|
recipient_ids: list[AgentID] | None = None,
|
26
29
|
dynamic_routing: bool = False,
|
@@ -28,6 +31,10 @@ class WorkflowAgent(
|
|
28
31
|
) -> None:
|
29
32
|
if not subagents:
|
30
33
|
raise ValueError("At least one step is required")
|
34
|
+
if start_agent not in subagents:
|
35
|
+
raise ValueError("Start agent must be in the subagents list")
|
36
|
+
if end_agent not in subagents:
|
37
|
+
raise ValueError("End agent must be in the subagents list")
|
31
38
|
|
32
39
|
self.subagents = subagents
|
33
40
|
|
@@ -36,8 +43,6 @@ class WorkflowAgent(
|
|
36
43
|
|
37
44
|
super().__init__(
|
38
45
|
agent_id=agent_id,
|
39
|
-
out_schema=end_agent.out_schema,
|
40
|
-
rcv_args_schema=start_agent.rcv_args_schema,
|
41
46
|
message_pool=message_pool,
|
42
47
|
recipient_ids=recipient_ids,
|
43
48
|
dynamic_routing=dynamic_routing,
|
@@ -48,11 +53,11 @@ class WorkflowAgent(
|
|
48
53
|
)
|
49
54
|
|
50
55
|
@property
|
51
|
-
def start_agent(self) -> CommunicatingAgent[InT,
|
56
|
+
def start_agent(self) -> CommunicatingAgent[InT, Any, AgentState, CtxT]:
|
52
57
|
return self._start_agent
|
53
58
|
|
54
59
|
@property
|
55
|
-
def end_agent(self) -> CommunicatingAgent[
|
60
|
+
def end_agent(self) -> CommunicatingAgent[Any, OutT, AgentState, CtxT]:
|
56
61
|
return self._end_agent
|
57
62
|
|
58
63
|
@abstractmethod
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: grasp_agents
|
3
|
-
Version: 0.1
|
3
|
+
Version: 0.2.1
|
4
4
|
Summary: Grasp Agents Library
|
5
5
|
License-File: LICENSE.md
|
6
6
|
Requires-Python: <4,>=3.11.4
|
@@ -17,7 +17,10 @@ Description-Content-Type: text/markdown
|
|
17
17
|
# Grasp Agents
|
18
18
|
|
19
19
|
<br/>
|
20
|
-
<
|
20
|
+
<picture>
|
21
|
+
<source srcset="./.assets/grasp-dark.svg" media="(prefers-color-scheme: dark)">
|
22
|
+
<img src="./.assets/grasp.svg" alt="Grasp Agents"/>
|
23
|
+
</picture>
|
21
24
|
<br/>
|
22
25
|
<br/>
|
23
26
|
|
@@ -34,14 +37,14 @@ Description-Content-Type: text/markdown
|
|
34
37
|
## Features
|
35
38
|
|
36
39
|
- Clean formulation of agents as generic entities over:
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
+
- I/O schemas
|
41
|
+
- Agent state
|
42
|
+
- Shared context
|
40
43
|
- Transparent implementation of common agentic patterns:
|
41
44
|
* Single-agent loops with an optional "ReAct mode" to enforce reasoning between the tool calls
|
42
45
|
* Workflows (static communication topology), including loops
|
43
46
|
* Agents-as-tools for task delegation
|
44
|
-
* Freeform A2A communication via in-process
|
47
|
+
* Freeform A2A communication via the in-process actor model
|
45
48
|
- Batch processing support outside of agentic loops
|
46
49
|
- Simple logging and usage/cost tracking
|
47
50
|
|
@@ -54,7 +57,7 @@ Description-Content-Type: text/markdown
|
|
54
57
|
- `prompt_builder.py`: Tools for constructing prompts.
|
55
58
|
- `workflow/`: Modules for defining and managing agent workflows.
|
56
59
|
- `cloud_llm.py`, `llm.py`: LLM integration and base LLM functionalities.
|
57
|
-
- `openai/`: Modules specific to OpenAI API integration.
|
60
|
+
- `openai/`: Modules specific to OpenAI API integration.
|
58
61
|
- `memory.py`: Memory management for agents (currently only message history).
|
59
62
|
- `run_context.py`: Context management for agent runs.
|
60
63
|
- `usage_tracker.py`: Tracking of API usage and costs.
|
@@ -107,18 +110,20 @@ GOOGLE_AI_STUDIO_API_KEY=your_google_ai_studio_api_key
|
|
107
110
|
Create a script, e.g., `problem_recommender.py`:
|
108
111
|
|
109
112
|
```python
|
113
|
+
import asyncio
|
110
114
|
import re
|
111
|
-
from typing import Any
|
112
115
|
from pathlib import Path
|
113
|
-
from
|
116
|
+
from typing import Any
|
117
|
+
|
114
118
|
from dotenv import load_dotenv
|
115
|
-
from
|
116
|
-
|
117
|
-
from grasp_agents.run_context import RunContextWrapper
|
118
|
-
from grasp_agents.openai.openai_llm import OpenAILLM, OpenAILLMSettings
|
119
|
-
from grasp_agents.llm_agent import LLMAgent
|
119
|
+
from pydantic import BaseModel, Field
|
120
|
+
|
120
121
|
from grasp_agents.grasp_logging import setup_logging
|
122
|
+
from grasp_agents.llm_agent import LLMAgent
|
123
|
+
from grasp_agents.openai.openai_llm import OpenAILLM, OpenAILLMSettings
|
124
|
+
from grasp_agents.run_context import RunContextWrapper
|
121
125
|
from grasp_agents.typing.message import Conversation
|
126
|
+
from grasp_agents.typing.tool import BaseTool
|
122
127
|
|
123
128
|
load_dotenv()
|
124
129
|
|
@@ -130,8 +135,8 @@ setup_logging(
|
|
130
135
|
)
|
131
136
|
|
132
137
|
sys_prompt_react = """
|
133
|
-
Your task is to suggest an exciting stats problem to a student.
|
134
|
-
Ask the student about their education, interests, and preferences, then suggest a problem tailored to them.
|
138
|
+
Your task is to suggest an exciting stats problem to a student.
|
139
|
+
Ask the student about their education, interests, and preferences, then suggest a problem tailored to them.
|
135
140
|
|
136
141
|
# Instructions
|
137
142
|
* Ask questions one by one.
|
@@ -143,14 +148,13 @@ Ask the student about their education, interests, and preferences, then suggest
|
|
143
148
|
class TeacherQuestion(BaseModel):
|
144
149
|
question: str = Field(..., description="The question to ask the student.")
|
145
150
|
|
151
|
+
|
146
152
|
StudentReply = str
|
147
153
|
|
148
154
|
|
149
155
|
class AskStudentTool(BaseTool[TeacherQuestion, StudentReply, Any]):
|
150
156
|
name: str = "ask_student_tool"
|
151
157
|
description: str = "Ask the student a question and get their reply."
|
152
|
-
in_schema: type[TeacherQuestion] = TeacherQuestion
|
153
|
-
out_schema: type[StudentReply] = StudentReply
|
154
158
|
|
155
159
|
async def run(
|
156
160
|
self, inp: TeacherQuestion, ctx: RunContextWrapper[Any] | None = None
|
@@ -158,11 +162,10 @@ class AskStudentTool(BaseTool[TeacherQuestion, StudentReply, Any]):
|
|
158
162
|
return input(inp.question)
|
159
163
|
|
160
164
|
|
161
|
-
|
162
|
-
problem: str
|
165
|
+
Problem = str
|
163
166
|
|
164
167
|
|
165
|
-
teacher = LLMAgent[Any,
|
168
|
+
teacher = LLMAgent[Any, Problem, None](
|
166
169
|
agent_id="teacher",
|
167
170
|
llm=OpenAILLM(
|
168
171
|
model_name="gpt-4.1",
|
@@ -173,30 +176,31 @@ teacher = LLMAgent[Any, FinalResponse, None](
|
|
173
176
|
max_turns=20,
|
174
177
|
react_mode=True,
|
175
178
|
sys_prompt=sys_prompt_react,
|
176
|
-
out_schema=FinalResponse,
|
177
179
|
set_state_strategy="reset",
|
178
180
|
)
|
179
181
|
|
180
182
|
|
181
|
-
@teacher.
|
182
|
-
def exit_tool_call_loop(
|
183
|
-
|
184
|
-
|
185
|
-
return
|
183
|
+
@teacher.exit_tool_call_loop_handler
|
184
|
+
def exit_tool_call_loop(
|
185
|
+
conversation: Conversation, ctx: RunContextWrapper[Any] | None, **kwargs: Any
|
186
|
+
) -> bool:
|
187
|
+
return r"<PROBLEM>" in str(conversation[-1].content)
|
186
188
|
|
187
189
|
|
188
190
|
@teacher.parse_output_handler
|
189
|
-
def parse_output(
|
190
|
-
|
191
|
-
|
191
|
+
def parse_output(
|
192
|
+
conversation: Conversation, ctx: RunContextWrapper[Any] | None, **kwargs: Any
|
193
|
+
) -> Problem:
|
194
|
+
message = str(conversation[-1].content)
|
195
|
+
matches = re.findall(r"<PROBLEM>(.*?)</PROBLEM>", message, re.DOTALL)
|
192
196
|
|
193
|
-
return
|
197
|
+
return matches[0]
|
194
198
|
|
195
199
|
|
196
200
|
async def main():
|
197
|
-
ctx = RunContextWrapper(print_messages=True)
|
201
|
+
ctx = RunContextWrapper[None](print_messages=True)
|
198
202
|
out = await teacher.run(ctx=ctx)
|
199
|
-
print(out.payloads[0]
|
203
|
+
print(out.payloads[0])
|
200
204
|
print(ctx.usage_tracker.total_usage)
|
201
205
|
|
202
206
|
|
@@ -0,0 +1,45 @@
|
|
1
|
+
grasp_agents/agent_message.py,sha256=eJV5n44t8EIE6M3jl48Ld7pmaW9dDhBX_FWm_u9yGWE,877
|
2
|
+
grasp_agents/agent_message_pool.py,sha256=OKTXNEo9LAJTQJkzxmJ3TQgWw7WJKOzrKCJjeHpln6o,3158
|
3
|
+
grasp_agents/base_agent.py,sha256=BOLYxS_cSisOR4qupUYIVn2FW15svit3jbNNfjw_cT8,1347
|
4
|
+
grasp_agents/cloud_llm.py,sha256=D3iWYb0F-6cG4rXgVoP4bWm2u2RtnEh_xG3MufFaFDo,13157
|
5
|
+
grasp_agents/comm_agent.py,sha256=e2IsatGLxdDkSZpPxQm6s1ha6w0Z9XAzRU1L4qb9wNY,7280
|
6
|
+
grasp_agents/costs_dict.yaml,sha256=EW6XxRXLZobMwQEEiUNYALbDzfbZFb2zEVCaTSAqYjw,2334
|
7
|
+
grasp_agents/generics_utils.py,sha256=kw4Odte6Nvl4c9U7-mKPgXCavWZXo009zYDHAA0BR3g,6234
|
8
|
+
grasp_agents/grasp_logging.py,sha256=H1GYhXdQvVkmauFDZ-KDwvVmPQHZUUm9sRqX_ObK2xI,1111
|
9
|
+
grasp_agents/http_client.py,sha256=KZva2MjJjuI5ohUeU8RdTAImUnQYaqBrV2jDH8smbJw,738
|
10
|
+
grasp_agents/llm.py,sha256=n67lXbB8spr_i3Xz0Plw7oeykfjQmVHHkSiveqBB5Lw,3150
|
11
|
+
grasp_agents/llm_agent.py,sha256=C57T2UlsGSebkLIdk96si06b1h5IVIGj1ZIUxV2tX2I,14989
|
12
|
+
grasp_agents/llm_agent_state.py,sha256=lLdYni2f3TA5zJLf_jqR5DSWqVI_zP2YfNrwEGqZnvg,2402
|
13
|
+
grasp_agents/memory.py,sha256=X1YtVX8XxP5KnGPMW8BqjID8QK4hTG2obxoyhnnZ4pU,5575
|
14
|
+
grasp_agents/printer.py,sha256=Jk6OJExio53gbKBod5Dd8Y3CWYrVb4K5q4UJ8i9cQvo,5024
|
15
|
+
grasp_agents/prompt_builder.py,sha256=JQ269tvRokWBdibJIkv1ZBkEYNR-AWppZpj2Fx7c9VY,8208
|
16
|
+
grasp_agents/run_context.py,sha256=M4w_HXl5aiz-18CDlfNCRNZm3m5UIQMrjKkhurFTtkY,2229
|
17
|
+
grasp_agents/tool_orchestrator.py,sha256=--E-ue7Z8nK6NwqGbWeCQWfTjWIbPxEe5X54bjPe62M,6107
|
18
|
+
grasp_agents/usage_tracker.py,sha256=5YuN6hpg6HASdg-hOylgWzhCiORmDMnZuQtbISfhm_4,3378
|
19
|
+
grasp_agents/utils.py,sha256=gKUtJ6__HB7yHBUPWY5tkdSAfgj3_R3--s2J5B5fBPE,5739
|
20
|
+
grasp_agents/openai/__init__.py,sha256=qN8HMAatSJKOsA6v-JwakMYguwkswCVHqrmK1gFy9wI,3096
|
21
|
+
grasp_agents/openai/completion_converters.py,sha256=lX9h1kaGAo5ttsl-4V7l4x8IpjxJaJJtyU2cKu3-EOc,1871
|
22
|
+
grasp_agents/openai/content_converters.py,sha256=6GI0D7xJalzsiawAJOyCUzTJTo0NQdpv87YKmfN0LYQ,2631
|
23
|
+
grasp_agents/openai/converters.py,sha256=DBXBxow9oRG6pc8inpZBLiuUqHzVfpscmHFpN9bAdvc,5276
|
24
|
+
grasp_agents/openai/message_converters.py,sha256=KjF6FbXzwlWdM-1YT3cswUV-74sjiwOhLFPMY4sJ5Xk,4593
|
25
|
+
grasp_agents/openai/openai_llm.py,sha256=dscGlVhH4v1yAw4NRPgJdww9toOoMRpIqA6HD4IGWOs,6132
|
26
|
+
grasp_agents/openai/tool_converters.py,sha256=KhWRETkjhjocISUo_HBZ8QfBiyTOoC5WurPNAR4BYxc,1027
|
27
|
+
grasp_agents/rate_limiting/__init__.py,sha256=KRgtF_E7R3YfA2cpYcFcZ7wycV0pWVJ0xRQC7YhiIEQ,158
|
28
|
+
grasp_agents/rate_limiting/rate_limiter_chunked.py,sha256=BPgkUXvhmZhTpZs2T6uujNFuxH_kYHiISuf6_-eNhUc,5544
|
29
|
+
grasp_agents/rate_limiting/types.py,sha256=PbnNhEAcYedQdIpPJWud8HUVcxa_xZS2RDZu4c5jr40,1003
|
30
|
+
grasp_agents/rate_limiting/utils.py,sha256=oEDWDNHYMUdxOOG49PlAJochkZq8nnVBCo6JxPc1iSo,2007
|
31
|
+
grasp_agents/typing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
32
|
+
grasp_agents/typing/completion.py,sha256=_KDLx3Gtz7o-pEZrvAFgCZwDmkr2oQkxrL-2LSXHHsw,657
|
33
|
+
grasp_agents/typing/content.py,sha256=VdnHdW8PHDCtX_ffvcwQz-7ypPUQNSGqHa3txFE_72Y,3676
|
34
|
+
grasp_agents/typing/converters.py,sha256=yORIljRsVoKz7oj38pHLD6luIelM1RcYL_PqG_D4nWM,3086
|
35
|
+
grasp_agents/typing/io.py,sha256=uxSvbD05UK5nIhPfDvXIoGuU6xRMW4USZq_4IgBeGCY,609
|
36
|
+
grasp_agents/typing/message.py,sha256=XgPjXeh47e2GG1AYslhxaNw1Ax6Ozatga_7X2SFFKMA,3826
|
37
|
+
grasp_agents/typing/tool.py,sha256=e0pTMnRcpMpGNVQ8muE9wnh7LdIgh92AqXDo9hMDxf0,1960
|
38
|
+
grasp_agents/workflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
39
|
+
grasp_agents/workflow/looped_agent.py,sha256=8NVy6dAwEs7f6JgYHZUI1h-N8SabpND4n-3rSy5mh70,3945
|
40
|
+
grasp_agents/workflow/sequential_agent.py,sha256=Ral6Bvsl5-NdO-uKMGiWuz5EE9rNcYb1lXhY8CcQw4w,2054
|
41
|
+
grasp_agents/workflow/workflow_agent.py,sha256=LadvEJTsV6YEGRb_eaYgu5r7k1aa8N-2FHXMYJpbBVU,2460
|
42
|
+
grasp_agents-0.2.1.dist-info/METADATA,sha256=UY4lcgpBi7u0wqNU1Ep5Wp_13azdTzcFXsncASIdYXA,6889
|
43
|
+
grasp_agents-0.2.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
44
|
+
grasp_agents-0.2.1.dist-info/licenses/LICENSE.md,sha256=-nNNdWqGB8gJ2O-peFQ2Irshv5tW5pHKyTcYkwvH7CE,1201
|
45
|
+
grasp_agents-0.2.1.dist-info/RECORD,,
|
@@ -8,6 +8,6 @@ Package production dependencies are licensed under the following terms:
|
|
8
8
|
| dotenv | 0.9.9 | BSD-3-Clause license | https://github.com/pedroburon/dotenv |
|
9
9
|
| httpx | 0.28.1 | BSD License | https://github.com/encode/httpx |
|
10
10
|
| openai | 1.77.0 | Apache Software License | https://github.com/openai/openai-python |
|
11
|
-
| tenacity |
|
11
|
+
| tenacity | 8.5.0 | Apache Software License | https://github.com/jd/tenacity |
|
12
12
|
| termcolor | 2.5.0 | MIT License | https://github.com/termcolor/termcolor |
|
13
13
|
| tqdm | 4.67.1 | MIT License; Mozilla Public License 2.0 (MPL 2.0) | https://tqdm.github.io |
|