meshagent-agents 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of meshagent-agents might be problematic. Click here for more details.
- meshagent/agents/__init__.py +5 -0
- meshagent/agents/adapter.py +39 -0
- meshagent/agents/agent.py +427 -0
- meshagent/agents/chat.py +316 -0
- meshagent/agents/context.py +90 -0
- meshagent/agents/development.py +32 -0
- meshagent/agents/hosting.py +117 -0
- meshagent/agents/indexer.py +593 -0
- meshagent/agents/listener.py +155 -0
- meshagent/agents/planning.py +603 -0
- meshagent/agents/prompt.py +49 -0
- meshagent/agents/pydantic.py +137 -0
- meshagent/agents/schema.py +50 -0
- meshagent/agents/single_shot_writer.py +92 -0
- meshagent/agents/version.py +1 -0
- meshagent/agents/worker.py +126 -0
- meshagent/agents/writer.py +82 -0
- meshagent_agents-0.0.1.dist-info/LICENSE +201 -0
- meshagent_agents-0.0.1.dist-info/METADATA +29 -0
- meshagent_agents-0.0.1.dist-info/RECORD +22 -0
- meshagent_agents-0.0.1.dist-info/WHEEL +5 -0
- meshagent_agents-0.0.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,603 @@
|
|
|
1
|
+
from meshagent.agents.agent import AgentCallContext, AgentChatContext, AgentException
|
|
2
|
+
from meshagent.api import WebSocketClientProtocol, RequiredToolkit, RequiredSchema, Requirement
|
|
3
|
+
from meshagent.api.schema_document import Document
|
|
4
|
+
from meshagent.api.room_server_client import RoomClient
|
|
5
|
+
from meshagent.tools.toolkit import Toolkit, TextResponse, Tool, ToolContext
|
|
6
|
+
from meshagent.api.schema import MeshSchema
|
|
7
|
+
from meshagent.agents.writer import Writer, WriterContext
|
|
8
|
+
from meshagent.agents.adapter import LLMAdapter, ToolResponseAdapter
|
|
9
|
+
from meshagent.api.schema import MeshSchema, ElementType, ChildProperty, ValueProperty
|
|
10
|
+
from meshagent.agents.schema import merge
|
|
11
|
+
from meshagent.tools.document_tools import build_tools, DocumentAuthoringToolkit
|
|
12
|
+
from meshagent.agents import TaskRunner
|
|
13
|
+
from copy import deepcopy
|
|
14
|
+
from typing import Optional
|
|
15
|
+
|
|
16
|
+
reasoning_rules = [
|
|
17
|
+
"If an ask_user tool call is available, plans should include a series of questions to ask the user to help refine.",
|
|
18
|
+
"If an ask_user tool call is available, ask a maximum of one question per step",
|
|
19
|
+
"If an ask_user tool call is not available, you may not ask the user any questions",
|
|
20
|
+
"You will be given a task. First formulate a plan for the task. Then execute the steps until you have a final answer.",
|
|
21
|
+
"Do not use tool calls to write estimates of progress or plans",
|
|
22
|
+
"You are a document generation service",
|
|
23
|
+
"The user is asking for a document to be created",
|
|
24
|
+
"You must use tool calls must to generate the answer to the user's question as document"
|
|
25
|
+
]
|
|
26
|
+
|
|
27
|
+
goto_next_step_message = """
|
|
28
|
+
execute the next step, and provide the result of the step.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
def is_reasoning_done(*, context: AgentCallContext, response: dict) -> bool:
|
|
32
|
+
parsed = response["response"]["data"][0]
|
|
33
|
+
if "abort" in parsed:
|
|
34
|
+
abort = parsed["abort"]
|
|
35
|
+
reason = abort["reason"]
|
|
36
|
+
raise AgentException(reason)
|
|
37
|
+
|
|
38
|
+
elif "progress" in parsed:
|
|
39
|
+
result = parsed["progress"]
|
|
40
|
+
|
|
41
|
+
if "done" not in result or result["done"]:
|
|
42
|
+
logger.info("Done generating response %s", result)
|
|
43
|
+
return True
|
|
44
|
+
else:
|
|
45
|
+
context.chat.append_user_message(goto_next_step_message)
|
|
46
|
+
return False
|
|
47
|
+
|
|
48
|
+
elif "plan" in parsed:
|
|
49
|
+
plan = parsed["plan"]
|
|
50
|
+
context.chat.append_user_message(goto_next_step_message)
|
|
51
|
+
return False
|
|
52
|
+
else:
|
|
53
|
+
logger.info("recieved invalid response, %s", parsed)
|
|
54
|
+
context.chat.append_user_message("this response did not conform to the schema")
|
|
55
|
+
return False
|
|
56
|
+
|
|
57
|
+
def reasoning_schema(*, description: str, elements: Optional[list[ElementType]] = None, has_done_property: bool = True, has_abort: bool = True) -> MeshSchema:
|
|
58
|
+
|
|
59
|
+
if elements == None:
|
|
60
|
+
elements = []
|
|
61
|
+
|
|
62
|
+
progress_properties = [
|
|
63
|
+
ValueProperty(name="percentage", description="an estimate for how complete the task is so far", type="string"),
|
|
64
|
+
ValueProperty(name="next", description="a very short description of the next step", type="string"),
|
|
65
|
+
]
|
|
66
|
+
|
|
67
|
+
if has_done_property:
|
|
68
|
+
progress_properties.append( ValueProperty(name="done", description="whether there is more work to do. the program will continue will continue to send messages to the LLM to refine the answer until done is set to true.", type="boolean") )
|
|
69
|
+
|
|
70
|
+
elements = elements.copy()
|
|
71
|
+
|
|
72
|
+
if has_abort:
|
|
73
|
+
elements.append(ElementType(
|
|
74
|
+
tag_name="abort",
|
|
75
|
+
description="return if the task cannot completed because the user cancelled a request or errors could not be resolved",
|
|
76
|
+
properties=[
|
|
77
|
+
ValueProperty(name="reason", description="the reason the task was aborted", type="string")
|
|
78
|
+
]
|
|
79
|
+
)),
|
|
80
|
+
|
|
81
|
+
return MeshSchema(
|
|
82
|
+
root_tag_name="response",
|
|
83
|
+
elements=[
|
|
84
|
+
ElementType(
|
|
85
|
+
tag_name="response",
|
|
86
|
+
description="a response for a task",
|
|
87
|
+
properties=[
|
|
88
|
+
ChildProperty(name="data", description="the response for a task, should contain a single item", child_tag_names=[
|
|
89
|
+
"plan","progress",
|
|
90
|
+
*map(lambda x: x.tag_name, elements)
|
|
91
|
+
])
|
|
92
|
+
]
|
|
93
|
+
),
|
|
94
|
+
ElementType(
|
|
95
|
+
tag_name="plan",
|
|
96
|
+
description="a plan will be output for each task to describe the work that will be done, the work will be performed using tool calls.",
|
|
97
|
+
properties=[
|
|
98
|
+
ChildProperty(name="steps", description="the steps for the plan", child_tag_names=["step"])
|
|
99
|
+
]
|
|
100
|
+
),
|
|
101
|
+
ElementType(
|
|
102
|
+
tag_name="step",
|
|
103
|
+
description="a step in the plan",
|
|
104
|
+
properties=[
|
|
105
|
+
ValueProperty(name="description", description="a short sentence description description of the work that will be performed to complete the user's request.", type="string")
|
|
106
|
+
],
|
|
107
|
+
),
|
|
108
|
+
ElementType(
|
|
109
|
+
tag_name="progress",
|
|
110
|
+
description="the progress of the task",
|
|
111
|
+
properties=progress_properties
|
|
112
|
+
),
|
|
113
|
+
ElementType(
|
|
114
|
+
tag_name="thinking",
|
|
115
|
+
description="use to log information that will not be included in the final answer",
|
|
116
|
+
properties=[
|
|
117
|
+
ValueProperty(name="text", description="used to log thoughts or progress", type="string"),
|
|
118
|
+
],
|
|
119
|
+
),
|
|
120
|
+
*elements
|
|
121
|
+
])
|
|
122
|
+
|
|
123
|
+
from .schema import prompt_schema
|
|
124
|
+
import logging
|
|
125
|
+
logging.basicConfig()
|
|
126
|
+
logger = logging.getLogger("planning_agent")
|
|
127
|
+
logger.setLevel(logging.INFO)
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
class PlanningWriter(Writer):
|
|
131
|
+
|
|
132
|
+
def __init__(self, *, name: str, llm_adapter: LLMAdapter, tool_adapter: Optional[ToolResponseAdapter] = None, max_iterations : int = 100, toolkits: Optional[list[Tool]] = None, title: Optional[str] = None, description: Optional[str] = None, rules: Optional[list[str]] | None = None, requires: Optional[list[Requirement]] = None, supports_tools: Optional[bool] = None):
|
|
133
|
+
|
|
134
|
+
super().__init__(
|
|
135
|
+
name=name,
|
|
136
|
+
description=description,
|
|
137
|
+
title=title,
|
|
138
|
+
input_schema=merge(
|
|
139
|
+
schema=prompt_schema(description="use a prompt to generate content"),
|
|
140
|
+
additional_properties={
|
|
141
|
+
"path" : { "type" : "string" }
|
|
142
|
+
}),
|
|
143
|
+
output_schema={
|
|
144
|
+
"type" : "object",
|
|
145
|
+
"additionalProperties" : False,
|
|
146
|
+
"required" : [],
|
|
147
|
+
"properties" : {
|
|
148
|
+
}
|
|
149
|
+
},
|
|
150
|
+
requires = requires,
|
|
151
|
+
supports_tools = supports_tools
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
if rules == None:
|
|
155
|
+
rules = []
|
|
156
|
+
|
|
157
|
+
self._rules = rules
|
|
158
|
+
|
|
159
|
+
self._llm_adapter = llm_adapter
|
|
160
|
+
self._tool_adapter = tool_adapter
|
|
161
|
+
self._max_iterations = max_iterations
|
|
162
|
+
if toolkits == None:
|
|
163
|
+
toolkits = []
|
|
164
|
+
self.toolkits = toolkits
|
|
165
|
+
|
|
166
|
+
self._planning_rules : list[str] = [
|
|
167
|
+
*reasoning_rules,
|
|
168
|
+
*rules
|
|
169
|
+
]
|
|
170
|
+
|
|
171
|
+
async def init_chat_context(self):
|
|
172
|
+
chat = await super().init_chat_context()
|
|
173
|
+
|
|
174
|
+
all_rules = self._planning_rules.copy()
|
|
175
|
+
chat.append_rules(rules=all_rules)
|
|
176
|
+
return chat
|
|
177
|
+
|
|
178
|
+
async def write(self, writer_context: WriterContext, arguments: dict):
|
|
179
|
+
|
|
180
|
+
writer_context.call_context.chat.append_rules(f"your are writing to the document at the path {writer_context.path}")
|
|
181
|
+
|
|
182
|
+
arguments = arguments.copy()
|
|
183
|
+
self.pop_path(arguments=arguments)
|
|
184
|
+
|
|
185
|
+
execute = goto_next_step_message
|
|
186
|
+
|
|
187
|
+
react = """
|
|
188
|
+
based on what you know know, either execute the next task or formulate a new plan. If you have sufficient information to complete the task, return a final answer.
|
|
189
|
+
"""
|
|
190
|
+
|
|
191
|
+
prompt = arguments["prompt"]
|
|
192
|
+
|
|
193
|
+
writer_context.call_context.chat.append_user_message(message=prompt)
|
|
194
|
+
|
|
195
|
+
rs = reasoning_schema(description="uses tools", elements=[]).to_json()
|
|
196
|
+
|
|
197
|
+
i = 0
|
|
198
|
+
while i < self._max_iterations:
|
|
199
|
+
i += 1
|
|
200
|
+
|
|
201
|
+
try:
|
|
202
|
+
logger.info("COMPLETION STARTING: Step %s", i)
|
|
203
|
+
|
|
204
|
+
base_args = arguments.copy()
|
|
205
|
+
base_args.pop("path")
|
|
206
|
+
|
|
207
|
+
toolkits = [
|
|
208
|
+
DocumentAuthoringToolkit(),
|
|
209
|
+
Toolkit(
|
|
210
|
+
name="meshagent.planning-writer.tools",
|
|
211
|
+
tools=build_tools(document_type="document", schema=writer_context.document.schema, documents={writer_context.path : writer_context.document})),
|
|
212
|
+
*self.toolkits,
|
|
213
|
+
*writer_context.call_context.toolkits
|
|
214
|
+
]
|
|
215
|
+
|
|
216
|
+
responses = await self._llm_adapter.next(context=writer_context.call_context.chat, room=writer_context.room, toolkits=toolkits, tool_adapter=self._tool_adapter, output_schema=rs)
|
|
217
|
+
|
|
218
|
+
logger.info("COMPLETION RESPONSE %s", responses)
|
|
219
|
+
|
|
220
|
+
except Exception as e:
|
|
221
|
+
logger.error("Unable to execute reasoning completion task", exc_info=e)
|
|
222
|
+
# retry
|
|
223
|
+
raise(e)
|
|
224
|
+
|
|
225
|
+
parsed = responses["response"]["data"][0]
|
|
226
|
+
if "abort" in parsed:
|
|
227
|
+
abort = parsed["abort"]
|
|
228
|
+
reason = abort["reason"]
|
|
229
|
+
raise AgentException(reason)
|
|
230
|
+
|
|
231
|
+
elif "progress" in parsed:
|
|
232
|
+
result = parsed["progress"]
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
if "done" not in result or result["done"]:
|
|
236
|
+
logger.info("Done generating response %s", result)
|
|
237
|
+
return {}
|
|
238
|
+
else:
|
|
239
|
+
writer_context.call_context.chat.append_user_message(execute)
|
|
240
|
+
continue
|
|
241
|
+
elif "plan" in parsed:
|
|
242
|
+
plan = parsed["plan"]
|
|
243
|
+
writer_context.call_context.chat.append_user_message(execute)
|
|
244
|
+
continue
|
|
245
|
+
else:
|
|
246
|
+
logger.info("recieved invalid response, %s", parsed)
|
|
247
|
+
writer_context.call_context.chat.append_user_message("this response did not conform to the schema")
|
|
248
|
+
continue
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
class PlanningResponder(TaskRunner):
|
|
252
|
+
|
|
253
|
+
def __init__(self, *, name:str, llm_adapter: LLMAdapter, tool_adapter: Optional[ToolResponseAdapter] = None, output_schema: dict, max_iterations : int = 100, toolkits: Optional[list[Toolkit]] = None, title: Optional[str] = None, description: Optional[str] = None, requires: Optional[list[Requirement]] = None, supports_tools : bool = True, input_prompt: bool = True, use_terminate_tool: bool = False, rules: Optional[list[str]] = None, labels: Optional[list[str]] = None):
|
|
254
|
+
if isinstance(output_schema, dict) == False:
|
|
255
|
+
raise Exception("schema must be a dict, got: {type}".format(type=type(output_schema)))
|
|
256
|
+
|
|
257
|
+
self._input_prompt = input_prompt
|
|
258
|
+
|
|
259
|
+
if rules == None:
|
|
260
|
+
rules = []
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
if input_prompt:
|
|
264
|
+
input_schema = prompt_schema(description="use a prompt to generate content")
|
|
265
|
+
else:
|
|
266
|
+
input_schema = {
|
|
267
|
+
"type" : "object",
|
|
268
|
+
"additionalProperties" : False,
|
|
269
|
+
"required" : [],
|
|
270
|
+
"properties" : {
|
|
271
|
+
}
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
super().__init__(
|
|
275
|
+
name=name,
|
|
276
|
+
title=title,
|
|
277
|
+
description=description,
|
|
278
|
+
input_schema=input_schema,
|
|
279
|
+
output_schema=output_schema,
|
|
280
|
+
requires=requires,
|
|
281
|
+
supports_tools=supports_tools,
|
|
282
|
+
labels=labels
|
|
283
|
+
)
|
|
284
|
+
|
|
285
|
+
self._max_iterations = max_iterations
|
|
286
|
+
|
|
287
|
+
self._planning_rules : list[str] = [
|
|
288
|
+
*rules,
|
|
289
|
+
*reasoning_rules
|
|
290
|
+
]
|
|
291
|
+
|
|
292
|
+
self._responses = dict()
|
|
293
|
+
|
|
294
|
+
self._llm_adapter = llm_adapter
|
|
295
|
+
self._tool_adapter = tool_adapter
|
|
296
|
+
|
|
297
|
+
if toolkits == None:
|
|
298
|
+
toolkits = []
|
|
299
|
+
|
|
300
|
+
self.toolkits = toolkits
|
|
301
|
+
|
|
302
|
+
self._use_terminate_tool = use_terminate_tool
|
|
303
|
+
|
|
304
|
+
async def init_chat_context(self):
|
|
305
|
+
chat = self._llm_adapter.create_chat_context()
|
|
306
|
+
|
|
307
|
+
all_rules = self._planning_rules.copy()
|
|
308
|
+
chat.append_rules(rules=all_rules)
|
|
309
|
+
return chat
|
|
310
|
+
|
|
311
|
+
|
|
312
|
+
async def ask(self, context: AgentCallContext, arguments: dict):
|
|
313
|
+
|
|
314
|
+
class ResponseTool(Tool):
|
|
315
|
+
def __init__(self, output_schema: dict, context: AgentCallContext, parent:PlanningResponder):
|
|
316
|
+
super().__init__(
|
|
317
|
+
name="respond",
|
|
318
|
+
title="respond",
|
|
319
|
+
description="send the response to the user",
|
|
320
|
+
input_schema=output_schema
|
|
321
|
+
)
|
|
322
|
+
self.parent = parent
|
|
323
|
+
self.context = context
|
|
324
|
+
|
|
325
|
+
async def execute(self, *, context: ToolContext, **kwargs):
|
|
326
|
+
self.parent._responses[self.context] = kwargs
|
|
327
|
+
return TextResponse(text="the response was sent")
|
|
328
|
+
|
|
329
|
+
terminated = False
|
|
330
|
+
|
|
331
|
+
class ExitTool(Tool):
|
|
332
|
+
def __init__(self):
|
|
333
|
+
super().__init__(
|
|
334
|
+
name="terminate",
|
|
335
|
+
title="terminate",
|
|
336
|
+
description="terminates the agent, the agent will no longer be available",
|
|
337
|
+
input_schema={
|
|
338
|
+
"type" : "object",
|
|
339
|
+
"required" : [],
|
|
340
|
+
"additionalProperties" : False,
|
|
341
|
+
"properties" : {}
|
|
342
|
+
}
|
|
343
|
+
)
|
|
344
|
+
|
|
345
|
+
async def execute(self, *, context: ToolContext, **kwargs):
|
|
346
|
+
nonlocal terminated
|
|
347
|
+
terminated = True
|
|
348
|
+
return TextResponse(text="the process was terminated")
|
|
349
|
+
|
|
350
|
+
use_terminate_tool = self._use_terminate_tool
|
|
351
|
+
|
|
352
|
+
|
|
353
|
+
class ResponseToolkit(Toolkit):
|
|
354
|
+
def __init__(self, output_schema, context, parent):
|
|
355
|
+
|
|
356
|
+
tools = [
|
|
357
|
+
ResponseTool(output_schema=output_schema, context=context, parent=parent)
|
|
358
|
+
]
|
|
359
|
+
|
|
360
|
+
if use_terminate_tool:
|
|
361
|
+
tools.append(ExitTool())
|
|
362
|
+
|
|
363
|
+
super().__init__(
|
|
364
|
+
name="meshagent.responder",
|
|
365
|
+
title="responder",
|
|
366
|
+
description="tools for responding",
|
|
367
|
+
tools=tools
|
|
368
|
+
)
|
|
369
|
+
|
|
370
|
+
|
|
371
|
+
context.toolkits.append(ResponseToolkit(output_schema=self.output_schema, context=context, parent=self))
|
|
372
|
+
|
|
373
|
+
execute = goto_next_step_message
|
|
374
|
+
|
|
375
|
+
react = """
|
|
376
|
+
based on what you know know, either execute the next task or formulate a new plan. If you have sufficient information to complete the task, return a final answer.
|
|
377
|
+
"""
|
|
378
|
+
|
|
379
|
+
rs = reasoning_schema(description="uses tools", elements=[], has_done_property=self._use_terminate_tool == False, has_abort=self._use_terminate_tool == False).to_json()
|
|
380
|
+
|
|
381
|
+
if self._input_prompt:
|
|
382
|
+
prompt = arguments["prompt"]
|
|
383
|
+
context.chat.append_user_message(message=prompt)
|
|
384
|
+
|
|
385
|
+
room = context.room
|
|
386
|
+
i = 0
|
|
387
|
+
while i < self._max_iterations and not terminated:
|
|
388
|
+
i += 1
|
|
389
|
+
|
|
390
|
+
try:
|
|
391
|
+
logger.info("COMPLETION STARTING: Step %s", i)
|
|
392
|
+
|
|
393
|
+
toolkits = [
|
|
394
|
+
*self.toolkits,
|
|
395
|
+
*context.toolkits
|
|
396
|
+
]
|
|
397
|
+
|
|
398
|
+
responses = await self._llm_adapter.next(context=context.chat, room=room, toolkits=toolkits, tool_adapter=self._tool_adapter, output_schema=rs)
|
|
399
|
+
|
|
400
|
+
|
|
401
|
+
logger.info("COMPLETION RESPONSE %s", responses)
|
|
402
|
+
|
|
403
|
+
except Exception as e:
|
|
404
|
+
logger.error("Unable to execute reasoning completion task", exc_info=e)
|
|
405
|
+
# retry
|
|
406
|
+
raise(e)
|
|
407
|
+
|
|
408
|
+
parsed = responses["response"]["data"][0]
|
|
409
|
+
|
|
410
|
+
if "abort" in parsed:
|
|
411
|
+
abort = parsed["abort"]
|
|
412
|
+
reason = abort["reason"]
|
|
413
|
+
raise AgentException(reason)
|
|
414
|
+
|
|
415
|
+
elif "progress" in parsed:
|
|
416
|
+
result = parsed["progress"]
|
|
417
|
+
|
|
418
|
+
if "done" in result and result["done"]:
|
|
419
|
+
if context not in self._responses:
|
|
420
|
+
context.chat.append_user_message("you must call the respond tool")
|
|
421
|
+
continue
|
|
422
|
+
|
|
423
|
+
final_answer = self._responses.pop(context)
|
|
424
|
+
logger.info("Done generating response %s", final_answer)
|
|
425
|
+
return final_answer
|
|
426
|
+
|
|
427
|
+
else:
|
|
428
|
+
context.chat.append_user_message(execute)
|
|
429
|
+
continue
|
|
430
|
+
elif "plan" in parsed:
|
|
431
|
+
plan = parsed["plan"]
|
|
432
|
+
context.chat.append_user_message(execute)
|
|
433
|
+
continue
|
|
434
|
+
else:
|
|
435
|
+
logger.info("recieved invalid response, %s", parsed)
|
|
436
|
+
context.chat.append_user_message("this response did not conform to the schema")
|
|
437
|
+
continue
|
|
438
|
+
|
|
439
|
+
|
|
440
|
+
return {}
|
|
441
|
+
|
|
442
|
+
class DynamicPlanningResponder(TaskRunner):
|
|
443
|
+
|
|
444
|
+
def __init__(self, *, name: str, llm_adapter: LLMAdapter, tool_adapter: Optional[ToolResponseAdapter] = None, max_iterations : int = 100, toolkits: Optional[list[Toolkit]] = None, title: Optional[str] = None, description: Optional[str] = None):
|
|
445
|
+
|
|
446
|
+
super().__init__(
|
|
447
|
+
name=name,
|
|
448
|
+
title=title,
|
|
449
|
+
description=description,
|
|
450
|
+
input_schema=merge(
|
|
451
|
+
schema=prompt_schema(
|
|
452
|
+
description="use a prompt to generate content"),
|
|
453
|
+
additional_properties={
|
|
454
|
+
"output_schema" : { "type" : "object" }
|
|
455
|
+
}
|
|
456
|
+
),
|
|
457
|
+
output_schema=None)
|
|
458
|
+
|
|
459
|
+
self._max_iterations = max_iterations
|
|
460
|
+
|
|
461
|
+
self._planning_rules : list[str] = [
|
|
462
|
+
*reasoning_rules
|
|
463
|
+
]
|
|
464
|
+
|
|
465
|
+
self._responses = dict()
|
|
466
|
+
|
|
467
|
+
self._llm_adapter = llm_adapter
|
|
468
|
+
self._tool_adapter = tool_adapter
|
|
469
|
+
|
|
470
|
+
if toolkits == None:
|
|
471
|
+
toolkits = []
|
|
472
|
+
|
|
473
|
+
self.toolkits = toolkits
|
|
474
|
+
|
|
475
|
+
|
|
476
|
+
async def init_chat_context(self):
|
|
477
|
+
chat = self._llm_adapter.create_chat_context()
|
|
478
|
+
|
|
479
|
+
all_rules = self._planning_rules.copy()
|
|
480
|
+
chat.append_rules(rules=all_rules)
|
|
481
|
+
return chat
|
|
482
|
+
|
|
483
|
+
|
|
484
|
+
async def ask(self, context: AgentCallContext, arguments: dict):
|
|
485
|
+
|
|
486
|
+
dynamic_schema = arguments["output_schema"]
|
|
487
|
+
|
|
488
|
+
|
|
489
|
+
class ResponseTool(Tool):
|
|
490
|
+
def __init__(self, output_schema: dict, context: AgentCallContext, parent:PlanningResponder):
|
|
491
|
+
|
|
492
|
+
output_schema = deepcopy(output_schema)
|
|
493
|
+
|
|
494
|
+
schema = {
|
|
495
|
+
"type" : "object",
|
|
496
|
+
"additionalProperties" : False,
|
|
497
|
+
"required" : [ "summary", "data"],
|
|
498
|
+
"properties" : {
|
|
499
|
+
"summary" : {
|
|
500
|
+
"type" : "string",
|
|
501
|
+
"description" : "a summary of the data structure"
|
|
502
|
+
},
|
|
503
|
+
"data": output_schema
|
|
504
|
+
}
|
|
505
|
+
}
|
|
506
|
+
|
|
507
|
+
if "$defs" in output_schema:
|
|
508
|
+
schema["$defs"] = output_schema["$defs"]
|
|
509
|
+
del output_schema["$defs"]
|
|
510
|
+
|
|
511
|
+
super().__init__(
|
|
512
|
+
name="respond",
|
|
513
|
+
title="respond",
|
|
514
|
+
input_schema=schema,
|
|
515
|
+
description="send the response to the user",
|
|
516
|
+
)
|
|
517
|
+
self.parent = parent
|
|
518
|
+
self.context = context
|
|
519
|
+
|
|
520
|
+
async def execute(self, *, context: ToolContext, **kwargs):
|
|
521
|
+
self.parent._responses[self.context] = kwargs
|
|
522
|
+
return TextResponse(text="the response was sent")
|
|
523
|
+
|
|
524
|
+
class ResponseToolkit(Toolkit):
|
|
525
|
+
def __init__(self, output_schema, context, parent):
|
|
526
|
+
super().__init__(
|
|
527
|
+
name="meshagent.dynamic_response",
|
|
528
|
+
tools=[
|
|
529
|
+
ResponseTool(output_schema=output_schema, context=context, parent=parent)
|
|
530
|
+
]
|
|
531
|
+
)
|
|
532
|
+
|
|
533
|
+
context.toolkits.append(ResponseToolkit(output_schema=dynamic_schema, context=context, parent=self))
|
|
534
|
+
|
|
535
|
+
execute = goto_next_step_message
|
|
536
|
+
|
|
537
|
+
react = """
|
|
538
|
+
based on what you know know, either execute the next task or formulate a new plan. If you have sufficient information to complete the task, return a final answer.
|
|
539
|
+
"""
|
|
540
|
+
|
|
541
|
+
rs = reasoning_schema(description="uses tools", elements=[]).to_json()
|
|
542
|
+
|
|
543
|
+
prompt = arguments["prompt"]
|
|
544
|
+
|
|
545
|
+
context.chat.append_user_message(message=prompt)
|
|
546
|
+
|
|
547
|
+
room = context.room
|
|
548
|
+
|
|
549
|
+
i = 0
|
|
550
|
+
while i < self._max_iterations:
|
|
551
|
+
i += 1
|
|
552
|
+
|
|
553
|
+
try:
|
|
554
|
+
logger.info("COMPLETION STARTING: Step %s", i)
|
|
555
|
+
|
|
556
|
+
toolkits = [
|
|
557
|
+
*self.toolkits,
|
|
558
|
+
*context.toolkits
|
|
559
|
+
]
|
|
560
|
+
|
|
561
|
+
responses = await self._llm_adapter.next(context=context.chat, room=room, toolkits=toolkits, tool_adapter=self._tool_adapter, output_schema=rs)
|
|
562
|
+
|
|
563
|
+
|
|
564
|
+
logger.info("COMPLETION RESPONSE %s", responses)
|
|
565
|
+
|
|
566
|
+
except Exception as e:
|
|
567
|
+
logger.error("Unable to execute reasoning completion task", exc_info=e)
|
|
568
|
+
# retry
|
|
569
|
+
raise(e)
|
|
570
|
+
|
|
571
|
+
parsed = responses["response"]["data"][0]
|
|
572
|
+
|
|
573
|
+
if "abort" in parsed:
|
|
574
|
+
abort = parsed["abort"]
|
|
575
|
+
reason = abort["reason"]
|
|
576
|
+
raise AgentException(reason)
|
|
577
|
+
|
|
578
|
+
elif "progress" in parsed:
|
|
579
|
+
result = parsed["progress"]
|
|
580
|
+
|
|
581
|
+
if "done" not in result or result["done"]:
|
|
582
|
+
if context not in self._responses:
|
|
583
|
+
context.chat.append_user_message("you must call the respond tool")
|
|
584
|
+
continue
|
|
585
|
+
|
|
586
|
+
final_answer = self._responses.pop(context)
|
|
587
|
+
|
|
588
|
+
logger.info("Done generating response %s", final_answer)
|
|
589
|
+
return final_answer["data"]
|
|
590
|
+
|
|
591
|
+
else:
|
|
592
|
+
context.chat.append_user_message(execute)
|
|
593
|
+
continue
|
|
594
|
+
elif "plan" in parsed:
|
|
595
|
+
plan = parsed["plan"]
|
|
596
|
+
context.chat.append_user_message(execute)
|
|
597
|
+
continue
|
|
598
|
+
else:
|
|
599
|
+
logger.info("recieved invalid response, %s", parsed)
|
|
600
|
+
context.chat.append_user_message("this response did not conform to the schema")
|
|
601
|
+
continue
|
|
602
|
+
|
|
603
|
+
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
|
|
2
|
+
from .adapter import LLMAdapter, Toolkit, ToolResponseAdapter
|
|
3
|
+
from .schema import prompt_schema
|
|
4
|
+
from .agent import AgentCallContext
|
|
5
|
+
from typing import Optional
|
|
6
|
+
from meshagent.agents import TaskRunner
|
|
7
|
+
|
|
8
|
+
# An agent that takes a simple prompt and gets the result
|
|
9
|
+
class PromptAgent(TaskRunner):
|
|
10
|
+
def __init__(self,
|
|
11
|
+
*,
|
|
12
|
+
name: str,
|
|
13
|
+
output_schema: dict,
|
|
14
|
+
llm_adapter: LLMAdapter,
|
|
15
|
+
tool_adapter: Optional[ToolResponseAdapter] = None,
|
|
16
|
+
tools: list[Toolkit] = [],
|
|
17
|
+
rules: list[str] = [],
|
|
18
|
+
title: Optional[str] = None,
|
|
19
|
+
description: Optional[str] = None
|
|
20
|
+
):
|
|
21
|
+
super().__init__(
|
|
22
|
+
name=name,
|
|
23
|
+
description=description,
|
|
24
|
+
title=title,
|
|
25
|
+
input_schema=prompt_schema(
|
|
26
|
+
description=description
|
|
27
|
+
),
|
|
28
|
+
output_schema=output_schema,
|
|
29
|
+
|
|
30
|
+
)
|
|
31
|
+
self.rules = rules
|
|
32
|
+
self.tools = tools
|
|
33
|
+
self.llm_adapter = llm_adapter
|
|
34
|
+
self.tool_adapter = tool_adapter
|
|
35
|
+
|
|
36
|
+
async def init_chat_context(self):
|
|
37
|
+
chat = self.llm_adapter.create_chat_context()
|
|
38
|
+
chat.append_rules(self.rules)
|
|
39
|
+
return chat
|
|
40
|
+
|
|
41
|
+
async def ask(self, *, context: AgentCallContext, arguments: dict):
|
|
42
|
+
context.chat.append_user_message(arguments["prompt"])
|
|
43
|
+
|
|
44
|
+
toolkits = [
|
|
45
|
+
*self.toolkits,
|
|
46
|
+
*context.toolkits
|
|
47
|
+
]
|
|
48
|
+
|
|
49
|
+
return await self.llm_adapter.next(context=context.chat, room=context.room, toolkits=toolkits, tool_adapter=self.tool_adapter, output_schema=self.output_schema)
|