universal-mcp-agents 0.1.23__py3-none-any.whl → 0.1.24rc3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. universal_mcp/agents/__init__.py +11 -2
  2. universal_mcp/agents/base.py +3 -6
  3. universal_mcp/agents/codeact0/agent.py +14 -17
  4. universal_mcp/agents/codeact0/prompts.py +9 -3
  5. universal_mcp/agents/codeact0/sandbox.py +2 -2
  6. universal_mcp/agents/codeact0/tools.py +2 -2
  7. universal_mcp/agents/codeact0/utils.py +48 -0
  8. universal_mcp/agents/codeact00/__init__.py +3 -0
  9. universal_mcp/agents/codeact00/__main__.py +26 -0
  10. universal_mcp/agents/codeact00/agent.py +578 -0
  11. universal_mcp/agents/codeact00/config.py +77 -0
  12. universal_mcp/agents/codeact00/langgraph_agent.py +14 -0
  13. universal_mcp/agents/codeact00/llm_tool.py +25 -0
  14. universal_mcp/agents/codeact00/prompts.py +364 -0
  15. universal_mcp/agents/codeact00/sandbox.py +135 -0
  16. universal_mcp/agents/codeact00/state.py +66 -0
  17. universal_mcp/agents/codeact00/tools.py +525 -0
  18. universal_mcp/agents/codeact00/utils.py +678 -0
  19. universal_mcp/agents/codeact01/__init__.py +3 -0
  20. universal_mcp/agents/codeact01/__main__.py +26 -0
  21. universal_mcp/agents/codeact01/agent.py +413 -0
  22. universal_mcp/agents/codeact01/config.py +77 -0
  23. universal_mcp/agents/codeact01/langgraph_agent.py +14 -0
  24. universal_mcp/agents/codeact01/llm_tool.py +25 -0
  25. universal_mcp/agents/codeact01/prompts.py +246 -0
  26. universal_mcp/agents/codeact01/sandbox.py +162 -0
  27. universal_mcp/agents/codeact01/state.py +58 -0
  28. universal_mcp/agents/codeact01/tools.py +648 -0
  29. universal_mcp/agents/codeact01/utils.py +552 -0
  30. universal_mcp/agents/llm.py +7 -3
  31. universal_mcp/applications/llm/app.py +66 -15
  32. {universal_mcp_agents-0.1.23.dist-info → universal_mcp_agents-0.1.24rc3.dist-info}/METADATA +1 -1
  33. universal_mcp_agents-0.1.24rc3.dist-info/RECORD +66 -0
  34. universal_mcp_agents-0.1.23.dist-info/RECORD +0 -44
  35. {universal_mcp_agents-0.1.23.dist-info → universal_mcp_agents-0.1.24rc3.dist-info}/WHEEL +0 -0
@@ -0,0 +1,578 @@
1
+ import copy
2
+ import json
3
+ import re
4
+ import uuid
5
+ from typing import Literal, cast
6
+
7
+ from langchain_anthropic import ChatAnthropic
8
+ from langchain_core.messages import AIMessage, HumanMessage, ToolMessage
9
+ from langgraph.checkpoint.base import BaseCheckpointSaver
10
+ from langgraph.graph import START, StateGraph
11
+ from langgraph.types import Command, RetryPolicy, StreamWriter
12
+ from universal_mcp.tools.registry import ToolRegistry
13
+ from universal_mcp.types import ToolFormat
14
+
15
+ from universal_mcp.agents.base import BaseAgent
16
+ from universal_mcp.agents.codeact00.llm_tool import smart_print
17
+ from universal_mcp.agents.codeact00.prompts import (
18
+ AGENT_BUILDER_GENERATING_PROMPT,
19
+ AGENT_BUILDER_META_PROMPT,
20
+ AGENT_BUILDER_PLANNING_PROMPT,
21
+ AGENT_BUILDER_PLANNER_PROMPT,
22
+ AGENT_BUILDER_PLAN_PATCH_PROMPT,
23
+ AGENT_BUILDER_CODE_PATCH_PROMPT,
24
+ build_tool_definitions,
25
+ create_default_prompt,
26
+ )
27
+ from universal_mcp.agents.codeact00.sandbox import eval_unsafe, execute_ipython_cell, handle_execute_ipython_cell
28
+ from universal_mcp.agents.codeact00.state import (
29
+ AgentBuilderCode,
30
+ AgentBuilderMeta,
31
+ AgentBuilderPlan,
32
+ AgentBuilderPatch,
33
+ CodeActState,
34
+ )
35
+ from universal_mcp.agents.codeact00.tools import (
36
+ create_meta_tools,
37
+ enter_agent_builder_mode,
38
+ finalize_planning,
39
+ cancel_planning,
40
+ )
41
+ from universal_mcp.agents.codeact00.utils import (
42
+ build_anthropic_cache_message,
43
+ extract_plan_parameters,
44
+ get_connected_apps_string,
45
+ strip_thinking,
46
+ apply_patch_or_use_proposed,
47
+ )
48
+ from universal_mcp.agents.llm import load_chat_model
49
+ from universal_mcp.agents.utils import convert_tool_ids_to_dict, filter_retry_on, get_message_text
50
+
51
+
52
+ class CodeActPlaybookAgent(BaseAgent):
53
+ def __init__(
54
+ self,
55
+ name: str,
56
+ instructions: str,
57
+ model: str,
58
+ memory: BaseCheckpointSaver | None = None,
59
+ registry: ToolRegistry | None = None,
60
+ agent_builder_registry: object | None = None,
61
+ sandbox_timeout: int = 20,
62
+ **kwargs,
63
+ ):
64
+ super().__init__(
65
+ name=name,
66
+ instructions=instructions,
67
+ model=model,
68
+ memory=memory,
69
+ **kwargs,
70
+ )
71
+ self.model_instance = load_chat_model(model)
72
+ self.agent_builder_model_instance = load_chat_model("anthropic:claude-sonnet-4-5-20250929", thinking=False, disable_streaming = True, tags=("quiet",))
73
+ self.registry = registry
74
+ self.agent_builder_registry = agent_builder_registry
75
+ self.agent = agent_builder_registry.get_agent() if agent_builder_registry else None
76
+
77
+ self.tools_config = self.agent.tools if self.agent else {}
78
+ self.eval_fn = eval_unsafe
79
+ self.sandbox_timeout = sandbox_timeout
80
+ self.default_tools_config = {
81
+ "llm": ["generate_text", "classify_data", "extract_data", "call_llm"],
82
+ }
83
+ self.final_instructions = ""
84
+ self.tools_context = {}
85
+ self.eval_mode = kwargs.get("eval_mode", False)
86
+
87
+ async def _build_graph(self): # noqa: PLR0915
88
+ """Build the graph for the CodeAct Playbook Agent."""
89
+ meta_tools = create_meta_tools(self.registry)
90
+ self.additional_tools = [
91
+ smart_print,
92
+ meta_tools["web_search"],
93
+ meta_tools["read_file"],
94
+ meta_tools["save_file"],
95
+ meta_tools["upload_file"],
96
+ ]
97
+
98
+ if self.tools_config:
99
+ await self.registry.load_tools(self.tools_config) # Load provided tools
100
+ if self.default_tools_config:
101
+ await self.registry.load_tools(self.default_tools_config) # Load default tools
102
+
103
+ async def call_model(state: CodeActState) -> Command[Literal["execute_tools"]]:
104
+ """This node now only ever binds the four meta-tools to the LLM."""
105
+ messages = build_anthropic_cache_message(self.final_instructions) + state["messages"]
106
+
107
+ agent_facing_tools = [
108
+ execute_ipython_cell,
109
+ enter_agent_builder_mode,
110
+ meta_tools["search_functions"],
111
+ meta_tools["load_functions"],
112
+ ]
113
+
114
+ if isinstance(self.model_instance, ChatAnthropic):
115
+ model_with_tools = self.model_instance.bind_tools(
116
+ tools=agent_facing_tools,
117
+ tool_choice="auto",
118
+ cache_control={"type": "ephemeral", "ttl": "1h"},
119
+ )
120
+ if isinstance(messages[-1].content, str):
121
+ pass
122
+ else:
123
+ last = copy.deepcopy(messages[-1])
124
+ last.content[-1]["cache_control"] = {"type": "ephemeral", "ttl": "5m"}
125
+ messages[-1] = last
126
+ else:
127
+ model_with_tools = self.model_instance.bind_tools(
128
+ tools=agent_facing_tools,
129
+ tool_choice="auto",
130
+ )
131
+ response = cast(AIMessage, await model_with_tools.ainvoke(messages))
132
+ if response.tool_calls:
133
+ return Command(goto="execute_tools", update={"messages": [response]})
134
+ else:
135
+ return Command(update={"messages": [response], "model_with_tools": model_with_tools})
136
+
137
+ async def execute_tools(state: CodeActState) -> Command[Literal["call_model", "agent_builder"]]:
138
+ """Execute tool calls"""
139
+ last_message = state["messages"][-1]
140
+ tool_calls = last_message.tool_calls if isinstance(last_message, AIMessage) else []
141
+
142
+ tool_messages = []
143
+ new_tool_ids = []
144
+ tool_result = ""
145
+ ask_user = False
146
+ ai_msg = ""
147
+ effective_previous_add_context = state.get("add_context", {})
148
+ effective_existing_context = state.get("context", {})
149
+ # logging.info(f"Initial new_tool_ids_for_context: {new_tool_ids_for_context}")
150
+
151
+ for tool_call in tool_calls:
152
+ tool_name = tool_call["name"]
153
+ tool_args = tool_call["args"]
154
+ try:
155
+ if tool_name == "enter_agent_builder_mode":
156
+ tool_message = ToolMessage(
157
+ content=json.dumps("Entered Agent Builder Mode."),
158
+ name=tool_call["name"],
159
+ tool_call_id=tool_call["id"],
160
+ )
161
+ return Command(
162
+ goto="agent_builder",
163
+ update={
164
+ "agent_builder_mode": "planning",
165
+ "messages": [tool_message],
166
+ }, # Entered Agent Builder mode
167
+ )
168
+ elif tool_name == "execute_ipython_cell":
169
+ code = tool_call["args"]["snippet"]
170
+ output, new_context, new_add_context = await handle_execute_ipython_cell(
171
+ code,
172
+ self.tools_context, # Uses the dynamically updated context
173
+ self.eval_fn,
174
+ effective_previous_add_context,
175
+ effective_existing_context,
176
+ )
177
+ effective_existing_context = new_context
178
+ effective_previous_add_context = new_add_context
179
+ tool_result = output
180
+ elif tool_name == "load_functions":
181
+ # The tool now does all the work of validation and formatting.
182
+ tool_result, new_context_for_sandbox, valid_tools, unconnected_links = await meta_tools[
183
+ "load_functions"
184
+ ].ainvoke(tool_args)
185
+ # We still need to update the sandbox context for `execute_ipython_cell`
186
+ new_tool_ids.extend(valid_tools)
187
+ if new_tool_ids:
188
+ self.tools_context.update(new_context_for_sandbox)
189
+ if unconnected_links:
190
+ ask_user = True
191
+ ai_msg = f"Please login to the following app(s) using the following links and let me know in order to proceed:\n {unconnected_links} "
192
+
193
+ elif tool_name == "search_functions":
194
+ tool_result = await meta_tools["search_functions"].ainvoke(tool_args)
195
+ else:
196
+ raise Exception(
197
+ f"Unexpected tool call: {tool_call['name']}. "
198
+ "tool calls must be one of 'enter_agent_builder_mode', 'execute_ipython_cell', 'load_functions', or 'search_functions'. For using functions, call them in code using 'execute_ipython_cell'."
199
+ )
200
+ except Exception as e:
201
+ tool_result = str(e)
202
+
203
+ tool_message = ToolMessage(
204
+ content=json.dumps(tool_result),
205
+ name=tool_call["name"],
206
+ tool_call_id=tool_call["id"],
207
+ )
208
+ tool_messages.append(tool_message)
209
+
210
+ if ask_user:
211
+ tool_messages.append(AIMessage(content=ai_msg))
212
+ return Command(
213
+ update={
214
+ "messages": tool_messages,
215
+ "selected_tool_ids": new_tool_ids,
216
+ "context": effective_existing_context,
217
+ "add_context": effective_previous_add_context,
218
+ }
219
+ )
220
+
221
+ return Command(
222
+ goto="call_model",
223
+ update={
224
+ "messages": tool_messages,
225
+ "selected_tool_ids": new_tool_ids,
226
+ "context": effective_existing_context,
227
+ "add_context": effective_previous_add_context,
228
+ },
229
+ )
230
+
231
+ async def agent_builder(state: CodeActState, writer: StreamWriter) -> Command[Literal["call_model"]]:
232
+ agent_builder_mode = state.get("agent_builder_mode")
233
+ if agent_builder_mode == "planning":
234
+ planner_instructions = self.instructions + AGENT_BUILDER_PLANNER_PROMPT + self.preloaded_defs
235
+ existing_plan_steps = None
236
+ if self.agent and getattr(self.agent, "instructions", None):
237
+ existing_plan_steps = self.agent.instructions.get("plan")
238
+
239
+ messages = [{"role": "system", "content": planner_instructions}]
240
+ if existing_plan_steps:
241
+ plan_block = "\n".join(str(s) for s in existing_plan_steps)
242
+ messages.append({
243
+ "role": "system",
244
+ "content": f"Existing plan steps:\n{plan_block}"
245
+ })
246
+ messages += state["messages"]
247
+
248
+ # Bind planning tool so the builder can decide when to finalize
249
+ model_with_tools = self.model_instance.bind_tools(
250
+ tools=[finalize_planning, cancel_planning],
251
+ tool_choice="auto",
252
+ )
253
+ response = cast(AIMessage, await model_with_tools.ainvoke(messages))
254
+
255
+ if response.tool_calls:
256
+ # If exit was requested, route back to main agent
257
+ for tc in response.tool_calls:
258
+ if tc.get("name") == "cancel_planning":
259
+ return Command(goto="call_model", update={"agent_builder_mode": "inactive"})
260
+
261
+ plan_id = str(uuid.uuid4())
262
+ writer({"type": "custom", id: plan_id, "name": "planning", "data": {"update": bool(self.agent)}})
263
+
264
+ existing_plan_steps = None
265
+ if self.agent and getattr(self.agent, "instructions", None):
266
+ existing_plan_steps = self.agent.instructions.get("plan")
267
+ if not existing_plan_steps:
268
+ existing_plan_steps = state.get("plan")
269
+
270
+ if existing_plan_steps:
271
+ current_plan_text = "\n".join(str(s) for s in (existing_plan_steps or []))
272
+ plan_patch_instructions = self.instructions + "\n" + AGENT_BUILDER_PLAN_PATCH_PROMPT + self.preloaded_defs
273
+ base_messages = [{"role": "system", "content": plan_patch_instructions}] + strip_thinking(state["messages"])
274
+ base_messages.append(HumanMessage(content=f"Current plan (one step per line):\n{current_plan_text}"))
275
+ model_with_structured_output = self.agent_builder_model_instance.with_structured_output(AgentBuilderPatch)
276
+ patch_response = await model_with_structured_output.ainvoke(base_messages)
277
+ proposed_text = cast(AgentBuilderPatch, patch_response).patch
278
+ updated_plan_text = apply_patch_or_use_proposed(current_plan_text, proposed_text)
279
+ plan_steps = [line for line in updated_plan_text.splitlines() if line.strip()]
280
+ plan = AgentBuilderPlan(steps=plan_steps)
281
+ else:
282
+ model_with_structured_output = self.agent_builder_model_instance.with_structured_output(
283
+ AgentBuilderPlan
284
+ )
285
+ structured_plan_instructions = self.instructions + AGENT_BUILDER_PLANNING_PROMPT + self.preloaded_defs
286
+ so_base_messages = [{"role": "system", "content": structured_plan_instructions}] + strip_thinking(
287
+ state["messages"]
288
+ )
289
+ so_response = await model_with_structured_output.ainvoke(
290
+ so_base_messages
291
+ )
292
+ plan = cast(AgentBuilderPlan, so_response)
293
+
294
+ writer({"type": "custom", id: plan_id, "name": "planning", "data": {"plan": plan.steps}})
295
+ ai_msg = AIMessage(
296
+ content=json.dumps(plan.model_dump()),
297
+ additional_kwargs={
298
+ "type": "planning",
299
+ "plan": plan.steps,
300
+ "update": bool(self.agent),
301
+ },
302
+ metadata={"tags": ("quiet")}
303
+ )
304
+
305
+ if self.eval_mode:
306
+ mock_user_message = HumanMessage(content="yes, this is great")
307
+ return Command(
308
+ goto="agent_builder",
309
+ update={
310
+ "messages": [ai_msg, mock_user_message],
311
+ "agent_builder_mode": "generating",
312
+ "plan": plan.steps,
313
+ },
314
+ )
315
+
316
+ return Command(
317
+ update={
318
+ "messages": [ai_msg],
319
+ "agent_builder_mode": "confirming",
320
+ "plan": plan.steps,
321
+ }
322
+ )
323
+
324
+ # The builder is asking clarifying questions. Stay in planning mode.
325
+ return Command(update={"messages": [response]})
326
+
327
+ elif agent_builder_mode == "confirming":
328
+ # Deterministic routing based on three exact button inputs from UI
329
+ user_text = ""
330
+ for m in reversed(state["messages"]):
331
+ try:
332
+ if getattr(m, "type", "") in {"human", "user"}:
333
+ user_text = (get_message_text(m) or "").strip()
334
+ if user_text:
335
+ break
336
+ except Exception:
337
+ continue
338
+
339
+ t = user_text.lower()
340
+ if t == "yes, this is great":
341
+ self.meta_id = str(uuid.uuid4())
342
+ name, description = None, None
343
+ if self.agent:
344
+ # Update flow: use existing name/description and do not re-generate
345
+ name = getattr(self.agent, "name", None)
346
+ description = getattr(self.agent, "description", None)
347
+ writer(
348
+ {
349
+ "type": "custom",
350
+ id: self.meta_id,
351
+ "name": "generating",
352
+ "data": {
353
+ "update": True,
354
+ "name": name,
355
+ "description": description,
356
+ },
357
+ }
358
+ )
359
+ else:
360
+ writer({"type": "custom", id: self.meta_id, "name": "generating", "data": {"update": False}})
361
+
362
+ meta_instructions = self.instructions + AGENT_BUILDER_META_PROMPT
363
+ messages = [{"role": "system", "content": meta_instructions}] + strip_thinking(state["messages"])
364
+
365
+ model_with_structured_output = self.agent_builder_model_instance.with_structured_output(
366
+ AgentBuilderMeta
367
+ )
368
+ meta_response = await model_with_structured_output.ainvoke(messages)
369
+ meta = cast(AgentBuilderMeta, meta_response)
370
+ name, description = meta.name, meta.description
371
+
372
+ # Emit intermediary UI update with created name/description
373
+ writer(
374
+ {
375
+ "type": "custom",
376
+ id: self.meta_id,
377
+ "name": "generating",
378
+ "data": {"update": False, "name": name, "description": description},
379
+ }
380
+ )
381
+
382
+ return Command(
383
+ goto="agent_builder",
384
+ update={
385
+ "agent_builder_mode": "generating",
386
+ "agent_name": name,
387
+ "agent_description": description,
388
+ },
389
+ )
390
+ if t == "i would like to modify the plan":
391
+ prompt_ai = AIMessage(
392
+ content="What would you like to change about the plan? Let me know and I'll update the plan accordingly.",
393
+ )
394
+ return Command(update={"agent_builder_mode": "planning", "messages": [prompt_ai]})
395
+ if t == "let's do something else":
396
+ return Command(goto="call_model", update={"agent_builder_mode": "inactive"})
397
+
398
+ # Fallback safe default
399
+ return Command(goto="call_model", update={"agent_builder_mode": "inactive"})
400
+
401
+ elif agent_builder_mode == "generating":
402
+ existing_code = None
403
+ if self.agent and getattr(self.agent, "instructions", None):
404
+ existing_code = self.agent.instructions.get("script")
405
+
406
+ if existing_code:
407
+ generating_instructions = self.instructions + AGENT_BUILDER_CODE_PATCH_PROMPT + self.preloaded_defs
408
+ messages = [{"role": "system", "content": generating_instructions}] + strip_thinking(state["messages"])
409
+ if state.get("plan"):
410
+ plan_text = "\n".join(str(s) for s in state["plan"])
411
+ messages.append(HumanMessage(content=f"Confirmed plan (one step per line):\n{plan_text}"))
412
+ messages.append(HumanMessage(content=f"Current code to update:\n```python\n{existing_code}\n```"))
413
+ model_with_structured_output = self.agent_builder_model_instance.with_structured_output(AgentBuilderPatch)
414
+ ai_resp = await model_with_structured_output.ainvoke(messages)
415
+ proposed = cast(AgentBuilderPatch, ai_resp).patch
416
+ func_code = apply_patch_or_use_proposed(existing_code, proposed)
417
+ else:
418
+ generating_instructions = self.instructions + AGENT_BUILDER_GENERATING_PROMPT + self.preloaded_defs
419
+ messages = [{"role": "system", "content": generating_instructions}] + strip_thinking(state["messages"])
420
+ model_with_structured_output = self.agent_builder_model_instance.with_structured_output(
421
+ AgentBuilderCode
422
+ )
423
+ response = await model_with_structured_output.ainvoke(messages)
424
+ func_code = cast(AgentBuilderCode, response).code
425
+
426
+ # Extract function name (handle both regular and async functions)
427
+ match = re.search(r"^\s*(?:async\s+)?def\s+([a-zA-Z_][a-zA-Z0-9_]*)\s*\(", func_code, re.MULTILINE)
428
+ if match:
429
+ function_name = match.group(1)
430
+ else:
431
+ function_name = "generated_agent"
432
+
433
+ # Use generated metadata if available
434
+ final_name = state.get("agent_name") or function_name
435
+ final_description = state.get("agent_description") or f"Generated agent: {function_name}"
436
+ add_context = state.get("add_context", {})
437
+ if "functions" not in add_context:
438
+ add_context["functions"] = []
439
+ add_context["functions"].append(func_code)
440
+
441
+ # Save or update an Agent using the helper registry
442
+ try:
443
+ if not self.agent_builder_registry:
444
+ raise ValueError("AgentBuilder registry is not configured")
445
+
446
+ plan_params = extract_plan_parameters(state["plan"])
447
+
448
+ # Build instructions payload embedding the plan and function code
449
+ instructions_payload = {
450
+ "plan": state["plan"],
451
+ "script": func_code,
452
+ "params": plan_params,
453
+ }
454
+
455
+ # Convert tool ids list to dict
456
+ tool_dict = convert_tool_ids_to_dict(state["selected_tool_ids"])
457
+
458
+ res = self.agent_builder_registry.upsert_agent(
459
+ name=final_name,
460
+ description=final_description,
461
+ instructions=instructions_payload,
462
+ tools=tool_dict,
463
+ )
464
+ except Exception:
465
+ # In case of error, add the code to the exit message content
466
+
467
+ mock_exit_tool_call = {"name": "exit_agent_builder_mode", "args": {}, "id": "exit_builder_1"}
468
+
469
+ # Create a minimal assistant message to maintain flow
470
+ mock_assistant_message = AIMessage(
471
+ content=json.dumps({"code": func_code}),
472
+ tool_calls=[mock_exit_tool_call],
473
+ additional_kwargs={
474
+ "type": "generating",
475
+ "id": "ignore",
476
+ "update": bool(self.agent),
477
+ "name": final_name.replace(" ", "_"),
478
+ "description": final_description,
479
+ },
480
+ metadata={"tags": ("quiet")}
481
+ )
482
+ mock_exit_tool_response = ToolMessage(
483
+ content=json.dumps(
484
+ f"An error occurred. Displaying the function code:\n\n{func_code}\nFinal Name: {final_name}\nDescription: {final_description}"
485
+ ),
486
+ name="exit_agent_builder_mode",
487
+ tool_call_id="exit_builder_1",
488
+ )
489
+ if self.eval_mode:
490
+ human_msg = HumanMessage(content="Call the generated agent function (without redeclaring it) and check whether it works as expected")
491
+ return Command(goto="call_model", update={"messages": [mock_assistant_message, mock_exit_tool_response, human_msg], "agent_builder_mode": "normal", "add_context": add_context})
492
+ else:
493
+ return Command(update={"messages": [mock_assistant_message, mock_exit_tool_response], "agent_builder_mode": "normal", "add_context": add_context})
494
+
495
+ writer(
496
+ {
497
+ "type": "custom",
498
+ id: self.meta_id,
499
+ "name": "generating",
500
+ "data": {
501
+ "id": str(res.id),
502
+ "update": bool(self.agent),
503
+ "name": final_name,
504
+ "description": final_description,
505
+ "add_context": add_context,
506
+ },
507
+ }
508
+ )
509
+ mock_exit_tool_call = {"name": "exit_agent_builder_mode", "args": {}, "id": "exit_builder_1"}
510
+ mock_assistant_message = AIMessage(
511
+ content=json.dumps({"code": func_code}),
512
+ tool_calls=[mock_exit_tool_call],
513
+ additional_kwargs={
514
+ "type": "generating",
515
+ "id": str(res.id),
516
+ "update": bool(self.agent),
517
+ "name": final_name.replace(" ", "_"),
518
+ "description": final_description,
519
+ },
520
+ metadata={"tags": ("quiet")}
521
+ )
522
+
523
+ mock_exit_tool_response = ToolMessage(
524
+ content=json.dumps(
525
+ "Exited Agent Builder Mode. Enter this mode again if you need to modify the saved agent."
526
+ ),
527
+ name="exit_agent_builder_mode",
528
+ tool_call_id="exit_builder_1",
529
+ )
530
+
531
+ return Command(
532
+ update={
533
+ "messages": [mock_assistant_message, mock_exit_tool_response],
534
+ "agent_builder_mode": "normal",
535
+ "add_context": add_context,
536
+ }
537
+ )
538
+
539
+ async def route_entry(state: CodeActState) -> Command[Literal["call_model", "agent_builder", "execute_tools"]]:
540
+ """Route to either normal mode or agent builder creation"""
541
+ pre_tools = await self.registry.export_tools(format=ToolFormat.NATIVE)
542
+
543
+ # Create the initial system prompt and tools_context in one go
544
+ self.final_instructions, self.tools_context = create_default_prompt(
545
+ pre_tools,
546
+ self.additional_tools,
547
+ self.instructions,
548
+ await get_connected_apps_string(self.registry),
549
+ self.agent,
550
+ is_initial_prompt=True,
551
+ )
552
+ self.preloaded_defs, _ = build_tool_definitions(pre_tools)
553
+ self.preloaded_defs = "\n".join(self.preloaded_defs)
554
+ await self.registry.load_tools(state["selected_tool_ids"])
555
+ exported_tools = await self.registry.export_tools(
556
+ state["selected_tool_ids"], ToolFormat.NATIVE
557
+ ) # Get definition for only the new tools
558
+ _, loaded_tools_context = build_tool_definitions(exported_tools)
559
+ self.tools_context.update(loaded_tools_context)
560
+
561
+ if (
562
+ len(state["messages"]) == 1 and self.agent
563
+ ): # Inject the agent's script function into add_context for execution
564
+ script = self.agent.instructions.get("script")
565
+ add_context = {"functions": [script]}
566
+ return Command(goto="call_model", update={"add_context": add_context})
567
+
568
+ if state.get("agent_builder_mode") in ["planning", "confirming", "generating"]:
569
+ return Command(goto="agent_builder")
570
+ return Command(goto="call_model")
571
+
572
+ agent = StateGraph(state_schema=CodeActState)
573
+ agent.add_node(call_model, retry_policy=RetryPolicy(max_attempts=3, retry_on=filter_retry_on))
574
+ agent.add_node(agent_builder)
575
+ agent.add_node(execute_tools)
576
+ agent.add_node(route_entry)
577
+ agent.add_edge(START, "route_entry")
578
+ return agent.compile(checkpointer=self.memory)
@@ -0,0 +1,77 @@
1
+ from typing import Annotated, Literal
2
+
3
+ from pydantic import BaseModel, Field
4
+
5
+ # Literal type for all available usecase filenames
6
+ UseCaseName = Literal[
7
+ " ",
8
+ "1-unsubscribe",
9
+ "2-reddit",
10
+ "2.1-reddit",
11
+ "3-earnings",
12
+ "4-maps",
13
+ "4.1-maps",
14
+ "5-gmailreply",
15
+ "6-contract",
16
+ "7-overnight",
17
+ "8-sheets_chart",
18
+ "9-learning",
19
+ "10-reddit2",
20
+ "11-github",
21
+ ]
22
+
23
+
24
+ class ContextSchema(BaseModel):
25
+ """The configuration for the agent."""
26
+
27
+ base_prompt: str = Field(
28
+ default=" ",
29
+ description="The base prompt to use for the agent's interactions. Leave blank if using a JSON prompt from the dropdown.",
30
+ )
31
+ model_provider: Annotated[
32
+ Literal[
33
+ "openai",
34
+ "anthropic",
35
+ "azure_openai",
36
+ "azure_ai",
37
+ "google_vertexai",
38
+ "google_genai",
39
+ "bedrock",
40
+ "bedrock_converse",
41
+ "cohere",
42
+ "fireworks",
43
+ "together",
44
+ "mistralai",
45
+ "huggingface",
46
+ "groq",
47
+ "ollama",
48
+ "google_anthropic_vertex",
49
+ "deepseek",
50
+ "ibm",
51
+ "nvidia",
52
+ "xai",
53
+ "perplexity",
54
+ ],
55
+ {"__template_metadata__": {"kind": "provider"}},
56
+ ] = Field(
57
+ default="anthropic",
58
+ description="The name of the model provider to use for the agent's main interactions. ",
59
+ )
60
+ model: Annotated[
61
+ Literal[
62
+ "claude-4-sonnet-20250514",
63
+ "claude-sonnet-4@20250514",
64
+ ],
65
+ {"__template_metadata__": {"kind": "llm"}},
66
+ ] = Field(
67
+ default="claude-4-sonnet-20250514",
68
+ description="The name of the language model to use for the agent's main interactions. ",
69
+ )
70
+ tool_names: list[str] = Field(
71
+ default=[],
72
+ description="The names of the tools to use for the agent's main interactions. Leave blank if using a JSON prompt from the dropdown.",
73
+ )
74
+ json_prompt_name: UseCaseName = Field(
75
+ default=" ",
76
+ description="The name of the JSON prompt to use for the agent's main interactions, instead of providing a base prompt and tool names. ",
77
+ )
@@ -0,0 +1,14 @@
1
+ from universal_mcp.agentr.registry import AgentrRegistry
2
+
3
+ from universal_mcp.agents.codeact00 import CodeActPlaybookAgent
4
+
5
+
6
+ async def agent():
7
+ agent_obj = CodeActPlaybookAgent(
8
+ name="CodeAct Agent",
9
+ instructions="Be very concise in your answers.",
10
+ model="anthropic:claude-4-sonnet-20250514",
11
+ tools=[],
12
+ registry=AgentrRegistry(),
13
+ )
14
+ return await agent_obj._build_graph()