grasp_agents 0.2.10__tar.gz → 0.3.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. {grasp_agents-0.2.10 → grasp_agents-0.3.1}/PKG-INFO +41 -50
  2. {grasp_agents-0.2.10 → grasp_agents-0.3.1}/README.md +40 -49
  3. {grasp_agents-0.2.10 → grasp_agents-0.3.1}/pyproject.toml +1 -1
  4. grasp_agents-0.3.1/src/grasp_agents/__init__.py +39 -0
  5. {grasp_agents-0.2.10 → grasp_agents-0.3.1}/src/grasp_agents/cloud_llm.py +118 -131
  6. grasp_agents-0.3.1/src/grasp_agents/comm_processor.py +201 -0
  7. {grasp_agents-0.2.10 → grasp_agents-0.3.1}/src/grasp_agents/generics_utils.py +15 -7
  8. grasp_agents-0.3.1/src/grasp_agents/llm.py +151 -0
  9. grasp_agents-0.3.1/src/grasp_agents/llm_agent.py +436 -0
  10. grasp_agents-0.3.1/src/grasp_agents/llm_agent_memory.py +58 -0
  11. grasp_agents-0.3.1/src/grasp_agents/llm_policy_executor.py +482 -0
  12. grasp_agents-0.3.1/src/grasp_agents/memory.py +30 -0
  13. grasp_agents-0.2.10/src/grasp_agents/memory.py → grasp_agents-0.3.1/src/grasp_agents/message_history.py +20 -24
  14. grasp_agents-0.3.1/src/grasp_agents/openai/__init__.py +105 -0
  15. grasp_agents-0.3.1/src/grasp_agents/openai/completion_chunk_converters.py +78 -0
  16. grasp_agents-0.3.1/src/grasp_agents/openai/completion_converters.py +78 -0
  17. {grasp_agents-0.2.10 → grasp_agents-0.3.1}/src/grasp_agents/openai/content_converters.py +13 -14
  18. grasp_agents-0.3.1/src/grasp_agents/openai/converters.py +140 -0
  19. grasp_agents-0.3.1/src/grasp_agents/openai/message_converters.py +141 -0
  20. {grasp_agents-0.2.10 → grasp_agents-0.3.1}/src/grasp_agents/openai/openai_llm.py +101 -42
  21. grasp_agents-0.3.1/src/grasp_agents/openai/tool_converters.py +43 -0
  22. grasp_agents-0.3.1/src/grasp_agents/packet.py +24 -0
  23. grasp_agents-0.3.1/src/grasp_agents/packet_pool.py +91 -0
  24. {grasp_agents-0.2.10 → grasp_agents-0.3.1}/src/grasp_agents/printer.py +29 -15
  25. grasp_agents-0.3.1/src/grasp_agents/processor.py +194 -0
  26. grasp_agents-0.3.1/src/grasp_agents/prompt_builder.py +234 -0
  27. grasp_agents-0.3.1/src/grasp_agents/run_context.py +57 -0
  28. grasp_agents-0.3.1/src/grasp_agents/typing/completion.py +76 -0
  29. grasp_agents-0.3.1/src/grasp_agents/typing/completion_chunk.py +173 -0
  30. {grasp_agents-0.2.10 → grasp_agents-0.3.1}/src/grasp_agents/typing/converters.py +8 -12
  31. grasp_agents-0.3.1/src/grasp_agents/typing/events.py +86 -0
  32. grasp_agents-0.3.1/src/grasp_agents/typing/io.py +16 -0
  33. grasp_agents-0.3.1/src/grasp_agents/typing/message.py +94 -0
  34. grasp_agents-0.3.1/src/grasp_agents/typing/tool.py +97 -0
  35. {grasp_agents-0.2.10 → grasp_agents-0.3.1}/src/grasp_agents/usage_tracker.py +6 -6
  36. {grasp_agents-0.2.10 → grasp_agents-0.3.1}/src/grasp_agents/utils.py +3 -3
  37. grasp_agents-0.3.1/src/grasp_agents/workflow/looped_workflow.py +132 -0
  38. grasp_agents-0.3.1/src/grasp_agents/workflow/parallel_processor.py +95 -0
  39. grasp_agents-0.3.1/src/grasp_agents/workflow/sequential_workflow.py +66 -0
  40. grasp_agents-0.3.1/src/grasp_agents/workflow/workflow_processor.py +78 -0
  41. grasp_agents-0.2.10/src/grasp_agents/__init__.py +0 -38
  42. grasp_agents-0.2.10/src/grasp_agents/agent_message.py +0 -27
  43. grasp_agents-0.2.10/src/grasp_agents/agent_message_pool.py +0 -92
  44. grasp_agents-0.2.10/src/grasp_agents/base_agent.py +0 -51
  45. grasp_agents-0.2.10/src/grasp_agents/comm_agent.py +0 -217
  46. grasp_agents-0.2.10/src/grasp_agents/llm.py +0 -122
  47. grasp_agents-0.2.10/src/grasp_agents/llm_agent.py +0 -485
  48. grasp_agents-0.2.10/src/grasp_agents/llm_agent_state.py +0 -79
  49. grasp_agents-0.2.10/src/grasp_agents/openai/__init__.py +0 -87
  50. grasp_agents-0.2.10/src/grasp_agents/openai/completion_converters.py +0 -55
  51. grasp_agents-0.2.10/src/grasp_agents/openai/converters.py +0 -164
  52. grasp_agents-0.2.10/src/grasp_agents/openai/message_converters.py +0 -155
  53. grasp_agents-0.2.10/src/grasp_agents/openai/tool_converters.py +0 -38
  54. grasp_agents-0.2.10/src/grasp_agents/prompt_builder.py +0 -237
  55. grasp_agents-0.2.10/src/grasp_agents/run_context.py +0 -77
  56. grasp_agents-0.2.10/src/grasp_agents/tool_orchestrator.py +0 -203
  57. grasp_agents-0.2.10/src/grasp_agents/typing/completion.py +0 -30
  58. grasp_agents-0.2.10/src/grasp_agents/typing/io.py +0 -25
  59. grasp_agents-0.2.10/src/grasp_agents/typing/message.py +0 -132
  60. grasp_agents-0.2.10/src/grasp_agents/typing/tool.py +0 -71
  61. grasp_agents-0.2.10/src/grasp_agents/workflow/looped_agent.py +0 -120
  62. grasp_agents-0.2.10/src/grasp_agents/workflow/sequential_agent.py +0 -63
  63. grasp_agents-0.2.10/src/grasp_agents/workflow/workflow_agent.py +0 -73
  64. {grasp_agents-0.2.10 → grasp_agents-0.3.1}/.gitignore +0 -0
  65. {grasp_agents-0.2.10 → grasp_agents-0.3.1}/LICENSE.md +0 -0
  66. {grasp_agents-0.2.10 → grasp_agents-0.3.1}/src/grasp_agents/costs_dict.yaml +0 -0
  67. {grasp_agents-0.2.10 → grasp_agents-0.3.1}/src/grasp_agents/grasp_logging.py +0 -0
  68. {grasp_agents-0.2.10 → grasp_agents-0.3.1}/src/grasp_agents/http_client.py +0 -0
  69. {grasp_agents-0.2.10 → grasp_agents-0.3.1}/src/grasp_agents/rate_limiting/__init__.py +0 -0
  70. {grasp_agents-0.2.10 → grasp_agents-0.3.1}/src/grasp_agents/rate_limiting/rate_limiter_chunked.py +0 -0
  71. {grasp_agents-0.2.10 → grasp_agents-0.3.1}/src/grasp_agents/rate_limiting/types.py +0 -0
  72. {grasp_agents-0.2.10 → grasp_agents-0.3.1}/src/grasp_agents/rate_limiting/utils.py +0 -0
  73. {grasp_agents-0.2.10 → grasp_agents-0.3.1}/src/grasp_agents/typing/__init__.py +0 -0
  74. {grasp_agents-0.2.10 → grasp_agents-0.3.1}/src/grasp_agents/typing/content.py +0 -0
  75. {grasp_agents-0.2.10 → grasp_agents-0.3.1}/src/grasp_agents/workflow/__init__.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: grasp_agents
3
- Version: 0.2.10
3
+ Version: 0.3.1
4
4
  Summary: Grasp Agents Library
5
5
  License-File: LICENSE.md
6
6
  Requires-Python: <4,>=3.11.4
@@ -38,7 +38,7 @@ Description-Content-Type: text/markdown
38
38
 
39
39
  - Clean formulation of agents as generic entities over:
40
40
  - I/O schemas
41
- - Agent state
41
+ - Memory
42
42
  - Shared context
43
43
  - Transparent implementation of common agentic patterns:
44
44
  - Single-agent loops with an optional "ReAct mode" to enforce reasoning between the tool calls
@@ -50,16 +50,15 @@ Description-Content-Type: text/markdown
50
50
 
51
51
  ## Project Structure
52
52
 
53
- - `base_agent.py`, `llm_agent.py`, `comm_agent.py`: Core agent class implementations.
54
- - `agent_message.py`, `agent_message_pool.py`: Messaging and message pool management.
55
- - `llm_agent_state.py`: State management for LLM agents.
56
- - `tool_orchestrator.py`: Orchestration of tools used by agents.
53
+ - `processor.py`, `comm_processor.py`, `llm_agent.py`: Core processor and agent class implementations.
54
+ - `packet.py`, `packet_pool.py`: Communication management.
55
+ - `llm_policy_executor.py`: LLM actions and tool call loops.
57
56
  - `prompt_builder.py`: Tools for constructing prompts.
58
- - `workflow/`: Modules for defining and managing agent workflows.
59
- - `cloud_llm.py`, `llm.py`: LLM integration and base LLM functionalities.
57
+ - `workflow/`: Modules for defining and managing static agent workflows.
58
+ - `llm.py`, `cloud_llm.py`: LLM integration and base LLM functionalities.
60
59
  - `openai/`: Modules specific to OpenAI API integration.
61
- - `memory.py`: Memory management for agents (currently only message history).
62
- - `run_context.py`: Context management for agent runs.
60
+ - `memory.py`, `llm_agent_memory.py`: Memory management.
61
+ - `run_context.py`: Shared context management for agent runs.
63
62
  - `usage_tracker.py`: Tracking of API usage and costs.
64
63
  - `costs_dict.yaml`: Dictionary for cost tracking (update if needed).
65
64
  - `rate_limiting/`: Basic rate limiting tools.
@@ -111,7 +110,6 @@ Create a script, e.g., `problem_recommender.py`:
111
110
 
112
111
  ```python
113
112
  import asyncio
114
- import re
115
113
  from pathlib import Path
116
114
  from typing import Any
117
115
 
@@ -119,11 +117,8 @@ from dotenv import load_dotenv
119
117
  from pydantic import BaseModel, Field
120
118
 
121
119
  from grasp_agents.grasp_logging import setup_logging
122
- from grasp_agents.llm_agent import LLMAgent
123
- from grasp_agents.openai.openai_llm import OpenAILLM, OpenAILLMSettings
124
- from grasp_agents.run_context import RunContextWrapper
125
- from grasp_agents.typing.message import Conversation
126
- from grasp_agents.typing.tool import BaseTool
120
+ from grasp_agents.openai import OpenAILLM, OpenAILLMSettings
121
+ from grasp_agents import LLMAgent, BaseTool, RunContext
127
122
 
128
123
  load_dotenv()
129
124
 
@@ -135,70 +130,66 @@ setup_logging(
135
130
  )
136
131
 
137
132
  sys_prompt_react = """
138
- Your task is to suggest an exciting stats problem to a student.
139
- Ask the student about their education, interests, and preferences, then suggest a problem tailored to them.
133
+ Your task is to suggest an exciting stats problem to the student.
134
+ You should first ask the student about their education, interests, and preferences, then suggest a problem tailored specifically to them.
140
135
 
141
136
  # Instructions
137
+ * Use the provided tool to ask questions.
142
138
  * Ask questions one by one.
143
139
  * Provide your thinking before asking a question and after receiving a reply.
144
- * The problem must be enclosed in <PROBLEM> tags.
140
+ * Do not include your exact question as part of your thinking.
141
+ * The problem must have all the necessary data.
145
142
  """
146
143
 
147
-
144
+ # Tool input must be a Pydantic model to infer the JSON schema used by the LLM APIs
148
145
  class TeacherQuestion(BaseModel):
149
- question: str = Field(..., description="The question to ask the student.")
146
+ question: str
150
147
 
151
148
 
152
149
  StudentReply = str
153
150
 
154
151
 
152
+ ask_student_tool_description = """
153
+ "Ask the student a question and get their reply."
154
+
155
+ Args:
156
+ question: str
157
+ The question to ask the student.
158
+ Returns:
159
+ reply: str
160
+ The student's reply to the question.
161
+ """
162
+
163
+
155
164
  class AskStudentTool(BaseTool[TeacherQuestion, StudentReply, Any]):
156
- name: str = "ask_student_tool"
157
- description: str = "Ask the student a question and get their reply."
165
+ name: str = "ask_student"
166
+ description: str = ask_student_tool_description
158
167
 
159
168
  async def run(
160
- self, inp: TeacherQuestion, ctx: RunContextWrapper[Any] | None = None
169
+ self, inp: TeacherQuestion, ctx: RunContext[Any] | None = None
161
170
  ) -> StudentReply:
162
171
  return input(inp.question)
163
172
 
164
173
 
165
- Problem = str
174
+ class Problem(BaseModel):
175
+ problem: str
166
176
 
167
177
 
168
- teacher = LLMAgent[Any, Problem, None](
169
- agent_id="teacher",
178
+ teacher = LLMAgent[None, Problem, None](
179
+ name="teacher",
170
180
  llm=OpenAILLM(
171
181
  model_name="openai:gpt-4.1",
172
- llm_settings=OpenAILLMSettings(temperature=0.1)
182
+ llm_settings=OpenAILLMSettings(temperature=0.5),
173
183
  ),
174
184
  tools=[AskStudentTool()],
175
- max_turns=20,
176
185
  react_mode=True,
186
+ final_answer_as_tool_call=True,
177
187
  sys_prompt=sys_prompt_react,
178
- set_state_strategy="reset",
179
188
  )
180
189
 
181
-
182
- @teacher.exit_tool_call_loop_handler
183
- def exit_tool_call_loop(
184
- conversation: Conversation, ctx: RunContextWrapper[Any] | None, **kwargs: Any
185
- ) -> bool:
186
- return r"<PROBLEM>" in str(conversation[-1].content)
187
-
188
-
189
- @teacher.parse_output_handler
190
- def parse_output(
191
- conversation: Conversation, ctx: RunContextWrapper[Any] | None, **kwargs: Any
192
- ) -> Problem:
193
- message = str(conversation[-1].content)
194
- matches = re.findall(r"<PROBLEM>(.*?)</PROBLEM>", message, re.DOTALL)
195
-
196
- return matches[0]
197
-
198
-
199
190
  async def main():
200
- ctx = RunContextWrapper[None](print_messages=True)
201
- out = await teacher.run(ctx=ctx)
191
+ ctx = RunContext[None](print_messages=True)
192
+ out = await teacher.run("start", ctx=ctx)
202
193
  print(out.payloads[0])
203
194
  print(ctx.usage_tracker.total_usage)
204
195
 
@@ -22,7 +22,7 @@
22
22
 
23
23
  - Clean formulation of agents as generic entities over:
24
24
  - I/O schemas
25
- - Agent state
25
+ - Memory
26
26
  - Shared context
27
27
  - Transparent implementation of common agentic patterns:
28
28
  - Single-agent loops with an optional "ReAct mode" to enforce reasoning between the tool calls
@@ -34,16 +34,15 @@
34
34
 
35
35
  ## Project Structure
36
36
 
37
- - `base_agent.py`, `llm_agent.py`, `comm_agent.py`: Core agent class implementations.
38
- - `agent_message.py`, `agent_message_pool.py`: Messaging and message pool management.
39
- - `llm_agent_state.py`: State management for LLM agents.
40
- - `tool_orchestrator.py`: Orchestration of tools used by agents.
37
+ - `processor.py`, `comm_processor.py`, `llm_agent.py`: Core processor and agent class implementations.
38
+ - `packet.py`, `packet_pool.py`: Communication management.
39
+ - `llm_policy_executor.py`: LLM actions and tool call loops.
41
40
  - `prompt_builder.py`: Tools for constructing prompts.
42
- - `workflow/`: Modules for defining and managing agent workflows.
43
- - `cloud_llm.py`, `llm.py`: LLM integration and base LLM functionalities.
41
+ - `workflow/`: Modules for defining and managing static agent workflows.
42
+ - `llm.py`, `cloud_llm.py`: LLM integration and base LLM functionalities.
44
43
  - `openai/`: Modules specific to OpenAI API integration.
45
- - `memory.py`: Memory management for agents (currently only message history).
46
- - `run_context.py`: Context management for agent runs.
44
+ - `memory.py`, `llm_agent_memory.py`: Memory management.
45
+ - `run_context.py`: Shared context management for agent runs.
47
46
  - `usage_tracker.py`: Tracking of API usage and costs.
48
47
  - `costs_dict.yaml`: Dictionary for cost tracking (update if needed).
49
48
  - `rate_limiting/`: Basic rate limiting tools.
@@ -95,7 +94,6 @@ Create a script, e.g., `problem_recommender.py`:
95
94
 
96
95
  ```python
97
96
  import asyncio
98
- import re
99
97
  from pathlib import Path
100
98
  from typing import Any
101
99
 
@@ -103,11 +101,8 @@ from dotenv import load_dotenv
103
101
  from pydantic import BaseModel, Field
104
102
 
105
103
  from grasp_agents.grasp_logging import setup_logging
106
- from grasp_agents.llm_agent import LLMAgent
107
- from grasp_agents.openai.openai_llm import OpenAILLM, OpenAILLMSettings
108
- from grasp_agents.run_context import RunContextWrapper
109
- from grasp_agents.typing.message import Conversation
110
- from grasp_agents.typing.tool import BaseTool
104
+ from grasp_agents.openai import OpenAILLM, OpenAILLMSettings
105
+ from grasp_agents import LLMAgent, BaseTool, RunContext
111
106
 
112
107
  load_dotenv()
113
108
 
@@ -119,70 +114,66 @@ setup_logging(
119
114
  )
120
115
 
121
116
  sys_prompt_react = """
122
- Your task is to suggest an exciting stats problem to a student.
123
- Ask the student about their education, interests, and preferences, then suggest a problem tailored to them.
117
+ Your task is to suggest an exciting stats problem to the student.
118
+ You should first ask the student about their education, interests, and preferences, then suggest a problem tailored specifically to them.
124
119
 
125
120
  # Instructions
121
+ * Use the provided tool to ask questions.
126
122
  * Ask questions one by one.
127
123
  * Provide your thinking before asking a question and after receiving a reply.
128
- * The problem must be enclosed in <PROBLEM> tags.
124
+ * Do not include your exact question as part of your thinking.
125
+ * The problem must have all the necessary data.
129
126
  """
130
127
 
131
-
128
+ # Tool input must be a Pydantic model to infer the JSON schema used by the LLM APIs
132
129
  class TeacherQuestion(BaseModel):
133
- question: str = Field(..., description="The question to ask the student.")
130
+ question: str
134
131
 
135
132
 
136
133
  StudentReply = str
137
134
 
138
135
 
136
+ ask_student_tool_description = """
137
+ "Ask the student a question and get their reply."
138
+
139
+ Args:
140
+ question: str
141
+ The question to ask the student.
142
+ Returns:
143
+ reply: str
144
+ The student's reply to the question.
145
+ """
146
+
147
+
139
148
  class AskStudentTool(BaseTool[TeacherQuestion, StudentReply, Any]):
140
- name: str = "ask_student_tool"
141
- description: str = "Ask the student a question and get their reply."
149
+ name: str = "ask_student"
150
+ description: str = ask_student_tool_description
142
151
 
143
152
  async def run(
144
- self, inp: TeacherQuestion, ctx: RunContextWrapper[Any] | None = None
153
+ self, inp: TeacherQuestion, ctx: RunContext[Any] | None = None
145
154
  ) -> StudentReply:
146
155
  return input(inp.question)
147
156
 
148
157
 
149
- Problem = str
158
+ class Problem(BaseModel):
159
+ problem: str
150
160
 
151
161
 
152
- teacher = LLMAgent[Any, Problem, None](
153
- agent_id="teacher",
162
+ teacher = LLMAgent[None, Problem, None](
163
+ name="teacher",
154
164
  llm=OpenAILLM(
155
165
  model_name="openai:gpt-4.1",
156
- llm_settings=OpenAILLMSettings(temperature=0.1)
166
+ llm_settings=OpenAILLMSettings(temperature=0.5),
157
167
  ),
158
168
  tools=[AskStudentTool()],
159
- max_turns=20,
160
169
  react_mode=True,
170
+ final_answer_as_tool_call=True,
161
171
  sys_prompt=sys_prompt_react,
162
- set_state_strategy="reset",
163
172
  )
164
173
 
165
-
166
- @teacher.exit_tool_call_loop_handler
167
- def exit_tool_call_loop(
168
- conversation: Conversation, ctx: RunContextWrapper[Any] | None, **kwargs: Any
169
- ) -> bool:
170
- return r"<PROBLEM>" in str(conversation[-1].content)
171
-
172
-
173
- @teacher.parse_output_handler
174
- def parse_output(
175
- conversation: Conversation, ctx: RunContextWrapper[Any] | None, **kwargs: Any
176
- ) -> Problem:
177
- message = str(conversation[-1].content)
178
- matches = re.findall(r"<PROBLEM>(.*?)</PROBLEM>", message, re.DOTALL)
179
-
180
- return matches[0]
181
-
182
-
183
174
  async def main():
184
- ctx = RunContextWrapper[None](print_messages=True)
185
- out = await teacher.run(ctx=ctx)
175
+ ctx = RunContext[None](print_messages=True)
176
+ out = await teacher.run("start", ctx=ctx)
186
177
  print(out.payloads[0])
187
178
  print(ctx.usage_tracker.total_usage)
188
179
 
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "grasp_agents"
3
- version = "0.2.10"
3
+ version = "0.3.1"
4
4
  description = "Grasp Agents Library"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.11.4,<4"
@@ -0,0 +1,39 @@
1
+ # pyright: reportUnusedImport=false
2
+
3
+
4
+ from .comm_processor import CommProcessor
5
+ from .llm import LLM, LLMSettings
6
+ from .llm_agent import LLMAgent
7
+ from .llm_agent_memory import LLMAgentMemory
8
+ from .memory import Memory
9
+ from .packet import Packet
10
+ from .processor import Processor
11
+ from .run_context import RunArgs, RunContext
12
+ from .typing.completion import Completion
13
+ from .typing.content import Content, ImageData
14
+ from .typing.io import LLMPrompt, LLMPromptArgs, ProcName
15
+ from .typing.message import AssistantMessage, Messages, SystemMessage, UserMessage
16
+ from .typing.tool import BaseTool
17
+
18
+ __all__ = [
19
+ "LLM",
20
+ "AssistantMessage",
21
+ "BaseTool",
22
+ "CommProcessor",
23
+ "Completion",
24
+ "Content",
25
+ "ImageData",
26
+ "LLMAgent",
27
+ "LLMPrompt",
28
+ "LLMPromptArgs",
29
+ "LLMSettings",
30
+ "Messages",
31
+ "Packet",
32
+ "Packet",
33
+ "ProcName",
34
+ "Processor",
35
+ "RunArgs",
36
+ "RunContext",
37
+ "SystemMessage",
38
+ "UserMessage",
39
+ ]